From c9c22396e7669bad2821c413bcdc2c2a78f69edf Mon Sep 17 00:00:00 2001 From: khs1994 Date: Sat, 20 Jan 2024 09:03:38 +0800 Subject: [PATCH] Sync from docker/docker.github.io@8fc0e3c by PCIT --- SUMMARY.md | 64 +- .../github.com/docker/cli/docs/deprecated.md | 131 +- .../docker/cli/docs/extend/config.md | 191 +- .../docker/cli/docs/extend/index.md | 127 +- .../docker/cli/docs/extend/legacy_plugins.md | 24 +- .../docker/cli/docs/extend/plugin_api.md | 41 +- .../cli/docs/extend/plugins_authorization.md | 31 +- .../docker/cli/docs/extend/plugins_logging.md | 67 +- .../docker/cli/docs/extend/plugins_metrics.md | 34 +- .../docker/cli/docs/extend/plugins_network.md | 23 +- .../docker/cli/docs/extend/plugins_volume.md | 78 +- .../cli/docs/reference/commandline/cli.md | 151 +- .../cli/docs/reference/commandline/dockerd.md | 868 +- .../docker/cli/docs/reference/run.md | 1166 +- .../v2/docs/reference/compose_events.md | 2 +- .../docs/reference/docker_compose_events.yaml | 2 +- .../github.com/moby/moby/docs/api/v1.18.md | 2 +- .../github.com/moby/moby/docs/api/v1.19.md | 2 +- .../github.com/moby/moby/docs/api/v1.20.md | 2 +- .../github.com/moby/moby/docs/api/v1.21.md | 2 +- .../github.com/moby/moby/docs/api/v1.22.md | 2 +- .../github.com/moby/moby/docs/api/v1.23.md | 2 +- .../github.com/moby/moby/docs/api/v1.24.md | 2 +- .../github.com/moby/moby/docs/api/v1.25.yaml | 6 +- .../github.com/moby/moby/docs/api/v1.26.yaml | 6 +- .../github.com/moby/moby/docs/api/v1.27.yaml | 6 +- .../github.com/moby/moby/docs/api/v1.28.yaml | 6 +- .../github.com/moby/moby/docs/api/v1.29.yaml | 6 +- .../github.com/moby/moby/docs/api/v1.30.yaml | 6 +- .../github.com/moby/moby/docs/api/v1.31.yaml | 6 +- .../github.com/moby/moby/docs/api/v1.32.yaml | 10 +- .../github.com/moby/moby/docs/api/v1.33.yaml | 10 +- .../github.com/moby/moby/docs/api/v1.34.yaml | 10 +- .../github.com/moby/moby/docs/api/v1.35.yaml | 10 +- .../github.com/moby/moby/docs/api/v1.36.yaml | 10 +- .../github.com/moby/moby/docs/api/v1.37.yaml | 10 +- .../github.com/moby/moby/docs/api/v1.38.yaml | 10 +- .../github.com/moby/moby/docs/api/v1.39.yaml | 10 +- .../github.com/moby/moby/docs/api/v1.40.yaml | 10 +- .../github.com/moby/moby/docs/api/v1.41.yaml | 6 +- .../github.com/moby/moby/docs/api/v1.42.yaml | 4 + .../github.com/moby/moby/docs/api/v1.43.yaml | 4 + .../github.com/moby/moby/docs/api/v1.44.yaml | 12300 ++++++++++++++++ .../moby/moby/docs/api/version-history.md | 77 +- _vendor/modules.txt | 6 +- assets/images/docs-logo-white-full.svg | 32 + content/build/building/packaging.md | 2 +- content/build/guide/export.md | 2 +- content/build/guide/intro.md | 4 +- .../compose/compose-file/compose-file-v2.md | 10 +- .../compose/compose-file/compose-file-v3.md | 6 +- content/compose/release-notes.md | 4 +- .../config/containers/logging/configure.md | 3 +- .../config/containers/logging/logentries.md | 70 - .../config/containers/resource_constraints.md | 2 +- content/config/containers/runmetrics.md | 2 +- content/config/filter.md | 4 +- content/config/labels-custom-metadata.md | 8 +- content/desktop/backup-and-restore.md | 14 +- .../settings-management/configure.md | 2 +- content/desktop/networking.md | 4 +- .../desktop/previous-versions/archive-mac.md | 4 +- .../previous-versions/archive-windows.md | 4 +- .../previous-versions/edge-releases-mac.md | 4 +- .../edge-releases-windows.md | 4 +- content/desktop/release-notes.md | 2 +- .../develop/develop-images/instructions.md | 2 +- content/docker-hub/builds/advanced.md | 6 +- content/engine/api/_index.md | 16 +- content/engine/api/v1.44.md | 3 + .../engine/reference/commandline/attach.md | 16 - content/engine/reference/commandline/build.md | 16 - .../reference/commandline/builder_build.md | 14 - .../engine/reference/commandline/commit.md | 16 - .../reference/commandline/container_attach.md | 3 +- .../reference/commandline/container_commit.md | 3 +- .../reference/commandline/container_cp.md | 3 +- .../reference/commandline/container_create.md | 3 +- .../reference/commandline/container_diff.md | 3 +- .../reference/commandline/container_exec.md | 4 +- .../reference/commandline/container_export.md | 3 +- .../reference/commandline/container_kill.md | 3 +- .../reference/commandline/container_logs.md | 3 +- .../reference/commandline/container_ls.md | 4 +- .../reference/commandline/container_pause.md | 3 +- .../reference/commandline/container_port.md | 3 +- .../reference/commandline/container_rename.md | 3 +- .../commandline/container_restart.md | 3 +- .../reference/commandline/container_rm.md | 3 +- .../reference/commandline/container_run.md | 4 +- .../reference/commandline/container_start.md | 3 +- .../reference/commandline/container_stats.md | 3 +- .../reference/commandline/container_stop.md | 3 +- .../reference/commandline/container_top.md | 3 +- .../commandline/container_unpause.md | 3 +- .../reference/commandline/container_update.md | 3 +- .../reference/commandline/container_wait.md | 3 +- content/engine/reference/commandline/cp.md | 16 - .../engine/reference/commandline/create.md | 16 - content/engine/reference/commandline/diff.md | 16 - .../engine/reference/commandline/events.md | 16 - content/engine/reference/commandline/exec.md | 16 - .../engine/reference/commandline/export.md | 16 - .../engine/reference/commandline/history.md | 16 - .../reference/commandline/image_build.md | 5 +- .../reference/commandline/image_history.md | 3 +- .../reference/commandline/image_import.md | 3 +- .../reference/commandline/image_load.md | 3 +- .../engine/reference/commandline/image_ls.md | 4 +- .../reference/commandline/image_pull.md | 4 +- .../reference/commandline/image_push.md | 4 +- .../engine/reference/commandline/image_rm.md | 3 +- .../reference/commandline/image_save.md | 3 +- .../engine/reference/commandline/image_tag.md | 3 +- .../engine/reference/commandline/images.md | 16 - .../engine/reference/commandline/import.md | 16 - content/engine/reference/commandline/info.md | 34 - content/engine/reference/commandline/kill.md | 16 - content/engine/reference/commandline/load.md | 16 - content/engine/reference/commandline/logs.md | 16 - content/engine/reference/commandline/pause.md | 16 - content/engine/reference/commandline/port.md | 16 - content/engine/reference/commandline/ps.md | 16 - content/engine/reference/commandline/pull.md | 16 - content/engine/reference/commandline/push.md | 16 - .../engine/reference/commandline/rename.md | 16 - .../engine/reference/commandline/restart.md | 16 - content/engine/reference/commandline/rm.md | 16 - content/engine/reference/commandline/rmi.md | 16 - content/engine/reference/commandline/run.md | 20 - content/engine/reference/commandline/save.md | 16 - content/engine/reference/commandline/start.md | 16 - content/engine/reference/commandline/stats.md | 16 - content/engine/reference/commandline/stop.md | 16 - .../reference/commandline/system_events.md | 3 +- .../reference/commandline/system_info.md | 4 +- content/engine/reference/commandline/tag.md | 16 - content/engine/reference/commandline/top.md | 16 - .../engine/reference/commandline/unpause.md | 16 - .../engine/reference/commandline/update.md | 16 - content/engine/reference/commandline/wait.md | 16 - content/engine/release-notes/24.0.md | 5 - content/engine/release-notes/25.0.md | 115 + content/engine/security/rootless.md | 95 +- content/get-started/resources.md | 6 + .../guides/use-case/genai-pdf-bot/_index.md | 16 + .../use-case/genai-pdf-bot/containerize.md | 133 + .../guides/use-case/genai-pdf-bot/develop.md | 247 + .../walkthroughs/access-local-folder.md | 2 +- .../guides/walkthroughs/publish-your-image.md | 4 +- content/language/rust/build-images.md | 2 +- content/language/rust/run-containers.md | 2 +- content/network/_index.md | 2 +- content/network/network-tutorial-overlay.md | 4 +- content/scout/policy/_index.md | 12 +- content/storage/bind-mounts.md | 26 +- content/storage/storagedriver/btrfs-driver.md | 2 +- .../storagedriver/device-mapper-driver.md | 2 +- .../storagedriver/select-storage-driver.md | 39 +- data/buildx/docker_buildx_build.yaml | 70 +- data/buildx/docker_buildx_debug_build.yaml | 10 +- data/buildx/docker_buildx_du.yaml | 96 +- data/buildx/docker_buildx_ls.yaml | 72 +- data/buildx/docker_buildx_rm.yaml | 4 +- data/engine-cli/docker_attach.yaml | 150 +- data/engine-cli/docker_build.yaml | 705 +- data/engine-cli/docker_checkpoint.yaml | 14 +- data/engine-cli/docker_commit.yaml | 86 +- data/engine-cli/docker_config_create.yaml | 2 +- data/engine-cli/docker_config_inspect.yaml | 2 +- data/engine-cli/docker_config_ls.yaml | 4 +- data/engine-cli/docker_config_rm.yaml | 8 +- data/engine-cli/docker_container_attach.yaml | 146 +- data/engine-cli/docker_container_commit.yaml | 83 +- data/engine-cli/docker_container_cp.yaml | 104 +- data/engine-cli/docker_container_create.yaml | 93 +- data/engine-cli/docker_container_diff.yaml | 39 +- data/engine-cli/docker_container_exec.yaml | 110 +- data/engine-cli/docker_container_export.yaml | 19 +- data/engine-cli/docker_container_kill.yaml | 54 +- data/engine-cli/docker_container_logs.yaml | 49 +- data/engine-cli/docker_container_ls.yaml | 429 +- data/engine-cli/docker_container_pause.yaml | 16 +- data/engine-cli/docker_container_port.yaml | 31 +- data/engine-cli/docker_container_prune.yaml | 2 +- data/engine-cli/docker_container_rename.yaml | 6 +- data/engine-cli/docker_container_restart.yaml | 6 +- data/engine-cli/docker_container_rm.yaml | 94 +- data/engine-cli/docker_container_run.yaml | 1425 +- data/engine-cli/docker_container_start.yaml | 6 +- data/engine-cli/docker_container_stats.yaml | 166 +- data/engine-cli/docker_container_stop.yaml | 10 +- data/engine-cli/docker_container_top.yaml | 2 +- data/engine-cli/docker_container_unpause.yaml | 13 +- data/engine-cli/docker_container_update.yaml | 95 +- data/engine-cli/docker_container_wait.yaml | 30 +- data/engine-cli/docker_context_create.yaml | 34 +- data/engine-cli/docker_context_export.yaml | 11 - data/engine-cli/docker_context_update.yaml | 20 - data/engine-cli/docker_context_use.yaml | 2 +- data/engine-cli/docker_cp.yaml | 106 +- data/engine-cli/docker_create.yaml | 93 +- data/engine-cli/docker_diff.yaml | 39 +- data/engine-cli/docker_events.yaml | 394 +- data/engine-cli/docker_exec.yaml | 112 +- data/engine-cli/docker_export.yaml | 19 +- data/engine-cli/docker_history.yaml | 57 - data/engine-cli/docker_image_build.yaml | 702 +- data/engine-cli/docker_image_history.yaml | 59 +- data/engine-cli/docker_image_import.yaml | 58 +- data/engine-cli/docker_image_load.yaml | 40 +- data/engine-cli/docker_image_ls.yaml | 325 +- data/engine-cli/docker_image_prune.yaml | 10 +- data/engine-cli/docker_image_pull.yaml | 222 +- data/engine-cli/docker_image_push.yaml | 100 +- data/engine-cli/docker_image_rm.yaml | 86 +- data/engine-cli/docker_image_save.yaml | 41 +- data/engine-cli/docker_image_tag.yaml | 73 +- data/engine-cli/docker_images.yaml | 325 +- data/engine-cli/docker_import.yaml | 58 +- data/engine-cli/docker_info.yaml | 155 +- data/engine-cli/docker_inspect.yaml | 4 +- data/engine-cli/docker_kill.yaml | 54 +- data/engine-cli/docker_load.yaml | 40 +- data/engine-cli/docker_login.yaml | 24 +- data/engine-cli/docker_logs.yaml | 49 +- data/engine-cli/docker_manifest.yaml | 14 +- data/engine-cli/docker_network_connect.yaml | 2 +- data/engine-cli/docker_network_create.yaml | 26 +- data/engine-cli/docker_node_ps.yaml | 5 +- data/engine-cli/docker_node_rm.yaml | 2 +- data/engine-cli/docker_pause.yaml | 16 +- data/engine-cli/docker_plugin_create.yaml | 4 +- data/engine-cli/docker_plugin_install.yaml | 2 +- data/engine-cli/docker_plugin_ls.yaml | 2 +- data/engine-cli/docker_plugin_rm.yaml | 4 +- data/engine-cli/docker_plugin_set.yaml | 1 + data/engine-cli/docker_port.yaml | 29 - data/engine-cli/docker_ps.yaml | 427 - data/engine-cli/docker_pull.yaml | 222 +- data/engine-cli/docker_push.yaml | 100 +- data/engine-cli/docker_rename.yaml | 6 +- data/engine-cli/docker_restart.yaml | 4 - data/engine-cli/docker_rm.yaml | 92 - data/engine-cli/docker_rmi.yaml | 86 +- data/engine-cli/docker_run.yaml | 892 +- data/engine-cli/docker_save.yaml | 41 +- data/engine-cli/docker_search.yaml | 111 +- data/engine-cli/docker_secret_inspect.yaml | 4 +- data/engine-cli/docker_secret_ls.yaml | 2 +- data/engine-cli/docker_secret_rm.yaml | 1 + data/engine-cli/docker_service_create.yaml | 55 +- data/engine-cli/docker_service_ls.yaml | 14 +- data/engine-cli/docker_service_ps.yaml | 11 +- data/engine-cli/docker_service_update.yaml | 12 +- data/engine-cli/docker_stack_config.yaml | 8 +- data/engine-cli/docker_stack_deploy.yaml | 2 +- data/engine-cli/docker_stack_ps.yaml | 4 +- data/engine-cli/docker_start.yaml | 4 - data/engine-cli/docker_stats.yaml | 166 +- data/engine-cli/docker_stop.yaml | 10 +- data/engine-cli/docker_swarm_init.yaml | 173 +- data/engine-cli/docker_system_df.yaml | 12 +- data/engine-cli/docker_system_events.yaml | 189 +- data/engine-cli/docker_system_info.yaml | 152 +- data/engine-cli/docker_system_prune.yaml | 8 +- data/engine-cli/docker_tag.yaml | 73 +- .../engine-cli/docker_trust_key_generate.yaml | 4 +- data/engine-cli/docker_trust_key_load.yaml | 2 +- data/engine-cli/docker_trust_revoke.yaml | 2 +- data/engine-cli/docker_trust_sign.yaml | 2 +- data/engine-cli/docker_trust_signer_add.yaml | 2 +- .../docker_trust_signer_remove.yaml | 11 +- data/engine-cli/docker_unpause.yaml | 13 +- data/engine-cli/docker_update.yaml | 95 +- data/engine-cli/docker_version.yaml | 12 +- data/engine-cli/docker_volume_create.yaml | 29 +- data/engine-cli/docker_volume_ls.yaml | 4 +- data/engine-cli/docker_volume_rm.yaml | 2 +- data/engine-cli/docker_wait.yaml | 28 - data/redirects.yml | 28 +- data/toc.yaml | 86 +- go.mod | 9 +- go.sum | 6 + hugo.yaml | 6 +- layouts/_default/cli.html | 23 +- layouts/partials/header.html | 5 +- static/assets/images/docker-docs-logo.svg | 14 - 288 files changed, 19942 insertions(+), 8173 deletions(-) create mode 100644 _vendor/github.com/moby/moby/docs/api/v1.44.yaml create mode 100644 assets/images/docs-logo-white-full.svg delete mode 100644 content/config/containers/logging/logentries.md create mode 100644 content/engine/api/v1.44.md delete mode 100644 content/engine/reference/commandline/attach.md delete mode 100644 content/engine/reference/commandline/build.md delete mode 100644 content/engine/reference/commandline/builder_build.md delete mode 100644 content/engine/reference/commandline/commit.md delete mode 100644 content/engine/reference/commandline/cp.md delete mode 100644 content/engine/reference/commandline/create.md delete mode 100644 content/engine/reference/commandline/diff.md delete mode 100644 content/engine/reference/commandline/events.md delete mode 100644 content/engine/reference/commandline/exec.md delete mode 100644 content/engine/reference/commandline/export.md delete mode 100644 content/engine/reference/commandline/history.md delete mode 100644 content/engine/reference/commandline/images.md delete mode 100644 content/engine/reference/commandline/import.md delete mode 100644 content/engine/reference/commandline/info.md delete mode 100644 content/engine/reference/commandline/kill.md delete mode 100644 content/engine/reference/commandline/load.md delete mode 100644 content/engine/reference/commandline/logs.md delete mode 100644 content/engine/reference/commandline/pause.md delete mode 100644 content/engine/reference/commandline/port.md delete mode 100644 content/engine/reference/commandline/ps.md delete mode 100644 content/engine/reference/commandline/pull.md delete mode 100644 content/engine/reference/commandline/push.md delete mode 100644 content/engine/reference/commandline/rename.md delete mode 100644 content/engine/reference/commandline/restart.md delete mode 100644 content/engine/reference/commandline/rm.md delete mode 100644 content/engine/reference/commandline/rmi.md delete mode 100644 content/engine/reference/commandline/run.md delete mode 100644 content/engine/reference/commandline/save.md delete mode 100644 content/engine/reference/commandline/start.md delete mode 100644 content/engine/reference/commandline/stats.md delete mode 100644 content/engine/reference/commandline/stop.md delete mode 100644 content/engine/reference/commandline/tag.md delete mode 100644 content/engine/reference/commandline/top.md delete mode 100644 content/engine/reference/commandline/unpause.md delete mode 100644 content/engine/reference/commandline/update.md delete mode 100644 content/engine/reference/commandline/wait.md create mode 100644 content/engine/release-notes/25.0.md create mode 100644 content/guides/use-case/genai-pdf-bot/_index.md create mode 100644 content/guides/use-case/genai-pdf-bot/containerize.md create mode 100644 content/guides/use-case/genai-pdf-bot/develop.md delete mode 100644 static/assets/images/docker-docs-logo.svg diff --git a/SUMMARY.md b/SUMMARY.md index 8e72e7ed7..bf45bbbe4 100644 --- a/SUMMARY.md +++ b/SUMMARY.md @@ -9,23 +9,23 @@ - Cli - Docs - Extend - * [Config](_vendor/github.com/docker/cli/docs/extend/config.md) + * [Plugin Config Version 1 Of Plugin V 2](_vendor/github.com/docker/cli/docs/extend/config.md) * [EBS Volume](_vendor/github.com/docker/cli/docs/extend/EBS_volume.md) - * [Index](_vendor/github.com/docker/cli/docs/extend/index.md) - * [Legacy Plugins](_vendor/github.com/docker/cli/docs/extend/legacy_plugins.md) - * [Plugin Api](_vendor/github.com/docker/cli/docs/extend/plugin_api.md) - * [Plugins Authorization](_vendor/github.com/docker/cli/docs/extend/plugins_authorization.md) + * [Docker Engine Managed Plugin System](_vendor/github.com/docker/cli/docs/extend/index.md) + * [Use Docker Engine Plugins](_vendor/github.com/docker/cli/docs/extend/legacy_plugins.md) + * [Docker Plugin API](_vendor/github.com/docker/cli/docs/extend/plugin_api.md) + * [Access Authorization Plugin](_vendor/github.com/docker/cli/docs/extend/plugins_authorization.md) * [Plugins Graphdriver](_vendor/github.com/docker/cli/docs/extend/plugins_graphdriver.md) - * [Plugins Logging](_vendor/github.com/docker/cli/docs/extend/plugins_logging.md) - * [Plugins Metrics](_vendor/github.com/docker/cli/docs/extend/plugins_metrics.md) - * [Plugins Network](_vendor/github.com/docker/cli/docs/extend/plugins_network.md) + * [Docker Log Driver Plugins](_vendor/github.com/docker/cli/docs/extend/plugins_logging.md) + * [Docker Metrics Collector Plugins](_vendor/github.com/docker/cli/docs/extend/plugins_metrics.md) + * [Docker Network Driver Plugins](_vendor/github.com/docker/cli/docs/extend/plugins_network.md) * [Plugins And Services](_vendor/github.com/docker/cli/docs/extend/plugins_services.md) - * [Plugins Volume](_vendor/github.com/docker/cli/docs/extend/plugins_volume.md) + * [Docker Volume Plugins](_vendor/github.com/docker/cli/docs/extend/plugins_volume.md) - Reference - Commandline * [Use The Docker Command Line](_vendor/github.com/docker/cli/docs/reference/commandline/cli.md) * [Dockerd](_vendor/github.com/docker/cli/docs/reference/commandline/dockerd.md) - * [Run](_vendor/github.com/docker/cli/docs/reference/run.md) + * [Running Containers](_vendor/github.com/docker/cli/docs/reference/run.md) * [Deprecated](_vendor/github.com/docker/cli/docs/deprecated.md) - Compose - V 2 @@ -313,7 +313,6 @@ * [JSON File Logging Driver](content/config/containers/logging/json-file.md) * [Local File Logging Driver](content/config/containers/logging/local.md) * [Customize Log Driver Output](content/config/containers/logging/log_tags.md) - * [Logentries Logging Driver Deprecated](content/config/containers/logging/logentries.md) * [Use A Logging Driver Plugin](content/config/containers/logging/plugins.md) * [Splunk Logging Driver](content/config/containers/logging/splunk.md) * [Syslog Logging Driver](content/config/containers/logging/syslog.md) @@ -576,6 +575,7 @@ * [v1.41](content/engine/api/v1.41.md) * [v1.42](content/engine/api/v1.42.md) * [v1.43](content/engine/api/v1.43.md) + * [v1.44](content/engine/api/v1.44.md) - Context * [Docker Contexts](content/engine/context/working-with-contexts.md) - Install @@ -592,9 +592,6 @@ * [Install Docker Engine On Ubuntu](content/engine/install/ubuntu.md) - Reference - [Commandline](content/engine/reference/commandline/README.md) - * [Docker Attach](content/engine/reference/commandline/attach.md) - * [Docker Build](content/engine/reference/commandline/build.md) - * [Docker Builder Build](content/engine/reference/commandline/builder_build.md) * [Docker Builder Prune](content/engine/reference/commandline/builder_prune.md) * [Docker Builder](content/engine/reference/commandline/builder.md) * [Docker Buildx Bake](content/engine/reference/commandline/buildx_bake.md) @@ -618,7 +615,6 @@ * [Docker Checkpoint Ls](content/engine/reference/commandline/checkpoint_ls.md) * [Docker Checkpoint Rm](content/engine/reference/commandline/checkpoint_rm.md) * [Docker Checkpoint](content/engine/reference/commandline/checkpoint.md) - * [Docker Commit](content/engine/reference/commandline/commit.md) * [Docker Compose Alpha Dry Run](content/engine/reference/commandline/compose_alpha_dry-run.md) * [Docker Compose Alpha Publish](content/engine/reference/commandline/compose_alpha_publish.md) * [Docker Compose Alpha Scale](content/engine/reference/commandline/compose_alpha_scale.md) @@ -693,14 +689,7 @@ * [Docker Context Update](content/engine/reference/commandline/context_update.md) * [Docker Context Use](content/engine/reference/commandline/context_use.md) * [Docker Context](content/engine/reference/commandline/context.md) - * [Docker Cp](content/engine/reference/commandline/cp.md) - * [Docker Create](content/engine/reference/commandline/create.md) - * [Docker Diff](content/engine/reference/commandline/diff.md) * [Docker](content/engine/reference/commandline/docker.md) - * [Docker Events](content/engine/reference/commandline/events.md) - * [Docker Exec](content/engine/reference/commandline/exec.md) - * [Docker Export](content/engine/reference/commandline/export.md) - * [Docker History](content/engine/reference/commandline/history.md) * [Docker Image Build](content/engine/reference/commandline/image_build.md) * [Docker Image History](content/engine/reference/commandline/image_history.md) * [Docker Image Import](content/engine/reference/commandline/image_import.md) @@ -714,16 +703,10 @@ * [Docker Image Save](content/engine/reference/commandline/image_save.md) * [Docker Image Tag](content/engine/reference/commandline/image_tag.md) * [Docker Image](content/engine/reference/commandline/image.md) - * [Docker Images](content/engine/reference/commandline/images.md) - * [Docker Import](content/engine/reference/commandline/import.md) - * [Docker Info](content/engine/reference/commandline/info.md) * [Docker Init](content/engine/reference/commandline/init.md) * [Docker Inspect](content/engine/reference/commandline/inspect.md) - * [Docker Kill](content/engine/reference/commandline/kill.md) - * [Docker Load](content/engine/reference/commandline/load.md) * [Docker Login](content/engine/reference/commandline/login.md) * [Docker Logout](content/engine/reference/commandline/logout.md) - * [Docker Logs](content/engine/reference/commandline/logs.md) * [Docker Manifest Annotate](content/engine/reference/commandline/manifest_annotate.md) * [Docker Manifest Create](content/engine/reference/commandline/manifest_create.md) * [Docker Manifest Inspect](content/engine/reference/commandline/manifest_inspect.md) @@ -746,7 +729,6 @@ * [Docker Node Rm](content/engine/reference/commandline/node_rm.md) * [Docker Node Update](content/engine/reference/commandline/node_update.md) * [Docker Node](content/engine/reference/commandline/node.md) - * [Docker Pause](content/engine/reference/commandline/pause.md) * [Docker Plugin Create](content/engine/reference/commandline/plugin_create.md) * [Docker Plugin Disable](content/engine/reference/commandline/plugin_disable.md) * [Docker Plugin Enable](content/engine/reference/commandline/plugin_enable.md) @@ -758,16 +740,6 @@ * [Docker Plugin Set](content/engine/reference/commandline/plugin_set.md) * [Docker Plugin Upgrade](content/engine/reference/commandline/plugin_upgrade.md) * [Docker Plugin](content/engine/reference/commandline/plugin.md) - * [Docker Port](content/engine/reference/commandline/port.md) - * [Docker Ps](content/engine/reference/commandline/ps.md) - * [Docker Pull](content/engine/reference/commandline/pull.md) - * [Docker Push](content/engine/reference/commandline/push.md) - * [Docker Rename](content/engine/reference/commandline/rename.md) - * [Docker Restart](content/engine/reference/commandline/restart.md) - * [Docker Rm](content/engine/reference/commandline/rm.md) - * [Docker Rmi](content/engine/reference/commandline/rmi.md) - * [Docker Run](content/engine/reference/commandline/run.md) - * [Docker Save](content/engine/reference/commandline/save.md) * [Docker Scout Cache Df](content/engine/reference/commandline/scout_cache_df.md) * [Docker Scout Cache Prune](content/engine/reference/commandline/scout_cache_prune.md) * [Docker Scout Cache](content/engine/reference/commandline/scout_cache.md) @@ -815,9 +787,6 @@ * [Docker Stack Rm](content/engine/reference/commandline/stack_rm.md) * [Docker Stack Services](content/engine/reference/commandline/stack_services.md) * [Docker Stack](content/engine/reference/commandline/stack.md) - * [Docker Start](content/engine/reference/commandline/start.md) - * [Docker Stats](content/engine/reference/commandline/stats.md) - * [Docker Stop](content/engine/reference/commandline/stop.md) * [Docker Swarm Ca](content/engine/reference/commandline/swarm_ca.md) * [Docker Swarm Init](content/engine/reference/commandline/swarm_init.md) * [Docker Swarm Join Token](content/engine/reference/commandline/swarm_join-token.md) @@ -832,8 +801,6 @@ * [Docker System Info](content/engine/reference/commandline/system_info.md) * [Docker System Prune](content/engine/reference/commandline/system_prune.md) * [Docker System](content/engine/reference/commandline/system.md) - * [Docker Tag](content/engine/reference/commandline/tag.md) - * [Docker Top](content/engine/reference/commandline/top.md) * [Docker Trust Inspect](content/engine/reference/commandline/trust_inspect.md) * [Docker Trust Key Generate](content/engine/reference/commandline/trust_key_generate.md) * [Docker Trust Key Load](content/engine/reference/commandline/trust_key_load.md) @@ -844,8 +811,6 @@ * [Docker Trust Signer Remove](content/engine/reference/commandline/trust_signer_remove.md) * [Docker Trust Signer](content/engine/reference/commandline/trust_signer.md) * [Docker Trust](content/engine/reference/commandline/trust.md) - * [Docker Unpause](content/engine/reference/commandline/unpause.md) - * [Docker Update](content/engine/reference/commandline/update.md) * [Docker Version](content/engine/reference/commandline/version.md) * [Docker Volume Create](content/engine/reference/commandline/volume_create.md) * [Docker Volume Inspect](content/engine/reference/commandline/volume_inspect.md) @@ -854,7 +819,6 @@ * [Docker Volume Rm](content/engine/reference/commandline/volume_rm.md) * [Docker Volume Update](content/engine/reference/commandline/volume_update.md) * [Docker Volume](content/engine/reference/commandline/volume.md) - * [Docker Wait](content/engine/reference/commandline/wait.md) - Release Notes * [Docker Engine 17.03 release notes](content/engine/release-notes/17.03.md) * [Docker Engine 17.04 release notes](content/engine/release-notes/17.04.md) @@ -876,6 +840,7 @@ * [Docker Engine 20.10 release notes](content/engine/release-notes/20.10.md) * [Docker Engine 23.0 release notes](content/engine/release-notes/23.0.md) * [Docker Engine 24.0 release notes](content/engine/release-notes/24.0.md) + * [Docker Engine 25.0 release notes](content/engine/release-notes/25.0.md) * [Docker Engine Release Notes](content/engine/release-notes/prior-releases.md) - Sbom * [Generate The SBOM For Docker Images](content/engine/sbom/_index.md) @@ -970,6 +935,11 @@ * [Educational Resources](content/get-started/resources.md) * [Deploy To Swarm](content/get-started/swarm-deploy.md) - Guides + - Use Case + - Genai Pdf Bot + * [Generative AI Guide](content/guides/use-case/genai-pdf-bot/_index.md) + * [Containerize A Generative AI Application](content/guides/use-case/genai-pdf-bot/containerize.md) + * [Use Containers For Generative AI Development](content/guides/use-case/genai-pdf-bot/develop.md) - Walkthroughs * [Access A Local Folder From A Container](content/guides/walkthroughs/access-local-folder.md) * [Containerize Your Application](content/guides/walkthroughs/containerize-your-app.md) diff --git a/_vendor/github.com/docker/cli/docs/deprecated.md b/_vendor/github.com/docker/cli/docs/deprecated.md index 4a9cd2376..7a68c0f60 100644 --- a/_vendor/github.com/docker/cli/docs/deprecated.md +++ b/_vendor/github.com/docker/cli/docs/deprecated.md @@ -50,8 +50,11 @@ The table below provides an overview of the current status of deprecated feature | Status | Feature | Deprecated | Remove | |------------|------------------------------------------------------------------------------------------------------------------------------------|------------|--------| -| Deprecated | [logentries logging driver](#logentries-logging-driver) | v24.0 | v25.0 | -| Deprecated | [OOM-score adjust for the daemon](#oom-score-adjust-for-the-daemon) | v24.0 | v25.0 | +| Deprecated | [Deprecate legacy API versions](#deprecate-legacy-api-versions) | v25.0 | v26.0 | +| Deprecated | [Container short ID in network Aliases field](#container-short-id-in-network-aliases-field) | v25.0 | v26.0 | +| Deprecated | [IsAutomated field, and "is-automated" filter on docker search](#isautomated-field-and-is-automated-filter-on-docker-search) | v25.0 | v26.0 | +| Removed | [logentries logging driver](#logentries-logging-driver) | v24.0 | v25.0 | +| Removed | [OOM-score adjust for the daemon](#oom-score-adjust-for-the-daemon) | v24.0 | v25.0 | | Removed | [Buildkit build information](#buildkit-build-information) | v23.0 | v24.0 | | Deprecated | [Legacy builder for Linux images](#legacy-builder-for-linux-images) | v23.0 | - | | Deprecated | [Legacy builder fallback](#legacy-builder-fallback) | v23.0 | - | @@ -76,7 +79,7 @@ The table below provides an overview of the current status of deprecated feature | Removed | [Support for the `overlay2.override_kernel_check` storage option](#support-for-the-overlay2override_kernel_check-storage-option) | v19.03 | v24.0 | | Removed | [AuFS storage driver](#aufs-storage-driver) | v19.03 | v24.0 | | Removed | [Legacy "overlay" storage driver](#legacy-overlay-storage-driver) | v18.09 | v24.0 | -| Disabled | [Device mapper storage driver](#device-mapper-storage-driver) | v18.09 | - | +| Removed | [Device mapper storage driver](#device-mapper-storage-driver) | v18.09 | v25.0 | | Removed | [Use of reserved namespaces in engine labels](#use-of-reserved-namespaces-in-engine-labels) | v18.06 | v20.10 | | Removed | [`--disable-legacy-registry` override daemon option](#--disable-legacy-registry-override-daemon-option) | v17.12 | v19.03 | | Removed | [Interacting with V1 registries](#interacting-with-v1-registries) | v17.06 | v17.12 | @@ -107,18 +110,104 @@ The table below provides an overview of the current status of deprecated feature | Removed | [`--run` flag on `docker commit`](#--run-flag-on-docker-commit) | v0.10 | v1.13 | | Removed | [Three arguments form in `docker import`](#three-arguments-form-in-docker-import) | v0.6.7 | v1.12 | +### Deprecate legacy API versions + +**Deprecated in Release: v25.0** +**Target For Removal In Release: v26.0** + +The Docker daemon provides a versioned API for backward compatibility with old +clients. Docker clients can perform API-version negotiation to select the most +recent API version supported by the daemon (downgrading to and older version of +the API when necessary). API version negotiation was introduced in Docker v1.12.0 +(API 1.24), and clients before that used a fixed API version. + +Docker Engine versions through v25.0 provide support for all [API versions](https://docs.docker.com/engine/api/#api-version-matrix) +included in stable releases for a given platform. For Docker daemons on Linux, +the earliest supported API version is 1.12 (corresponding with Docker Engine +v1.0.0), whereas for Docker daemons on Windows, the earliest supported API +version is 1.24 (corresponding with Docker Engine v1.12.0). + +Support for legacy API versions (providing old API versions on current versions +of the Docker Engine) is primarily intended to provide compatibility with recent, +but still supported versions of the client, which is a common scenario (the Docker +daemon may be updated to the latest release, but not all clients may be up-to-date +or vice versa). Support for API versions before that (API versions provided by +EOL versions of the Docker Daemon) is provided on a "best effort" basis. + +Use of old API versions is very rare, and support for legacy API versions +involves significant complexity (Docker 1.0.0 having been released 10 years ago). +Because of this, we'll start deprecating support for legacy API versions. + +Docker Engine v25.0 by default disables API version older than 1.24 (aligning +the minimum supported API version between Linux and Windows daemons). When +connecting with a client that uses an API version version older than 1.24, +the daemon returns an error. The following example configures the docker +CLI to use API version 1.23, which produces an error: + +```console +DOCKER_API_VERSION=1.23 docker version +Error response from daemon: client version 1.23 is too old. Minimum supported API version is 1.24, please upgrade your client to a newer version +``` + +An environment variable (`DOCKER_MIN_API_VERSION`) is introduced that allows +re-enabling older API versions in the daemon. This environment variable must +be set in the daemon's environment (for example, through a [systemd override +file](https://docs.docker.com/config/daemon/systemd/)), and the specified +API version must be supported by the daemon (`1.12` or higher on Linux, or +`1.24` or higher on Windows). + +Support for API versions lower than `1.24` will be permanently removed in Docker +Engine v26, and the minimum supported API version will be incrementally raised +in releases following that. + +We do not recommend depending on the `DOCKER_MIN_API_VERSION` environment +variable other than for exceptional cases where it's not possible to update +old clients, and those clients must be supported. + +### Container short ID in network Aliases field + +**Deprecated in Release: v25.0** +**Target For Remove In Release: v26.0** + +The `Aliases` field returned by `docker inspect` contains the container short +ID once the container is started. This behavior is deprecated in v25.0 but +kept until the next release, v26.0. Starting with that version, the `Aliases` +field will only contain the aliases set through the `docker container create` +and `docker run` flag `--network-alias`. + +A new field `DNSNames` containing the container name (if one was specified), +the hostname, the network aliases, as well as the container short ID, has been +introduced in v25.0 and should be used instead of the `Aliases` field. + +### IsAutomated field, and "is-automated" filter on docker search + +**Deprecated in Release: v25.0** +**Target For Removal In Release: v26.0** + +The "is_automated" field has been deprecated by Docker Hub's search API. +Consequently, the `IsAutomated` field in image search will always be set +to `false` in future, and searching for "is-automated=true" will yield no +results. + +The `AUTOMATED` column has been removed from the default `docker search` +and `docker image search` output in v25.0, and the corresponding `IsAutomated` +templating option will be removed in v26.0. + ### Logentries logging driver -**Target For Removal In Release: v25.0** +**Deprecated in Release: v24.0** +**Removed in Release: v25.0** The logentries service SaaS was shut down on November 15, 2022, rendering this logging driver non-functional. Users should no longer use this logging -driver, and the driver will be removed in Docker 25.0. +driver, and the driver has been removed in Docker 25.0. Existing containers +using this logging-driver are migrated to use the "local" logging driver +after upgrading. ### OOM-score adjust for the daemon **Deprecated in Release: v24.0** -**Target For Removal In Release: v25.0** +**Removed in Release: v25.0** The `oom-score-adjust` option was added to prevent the daemon from being OOM-killed before other processes. This option was mostly added as a @@ -389,9 +478,6 @@ Given that the old file format encourages insecure storage of credentials Docker v1.7.0 has created this file, support for this file, and its format has been removed. -A warning is printed in situations where the CLI would fall back to the old file, -notifying the user that the legacy file is present, but ignored. - ### Configuration options for experimental CLI features **Deprecated in Release: v19.03** @@ -571,11 +657,12 @@ backported), there is no reason to keep maintaining the `overlay` storage driver **Deprecated in Release: v18.09** **Disabled by default in Release: v23.0** +**Removed in Release: v25.0** -The `devicemapper` storage driver is deprecated in favor of `overlay2`, and will -be removed in a future release. Users of the `devicemapper` storage driver are -recommended to migrate to a different storage driver, such as `overlay2`, which -is now the default storage driver. +The `devicemapper` storage driver is deprecated in favor of `overlay2`, and has +been removed in Docker Engine v25.0. Users of the `devicemapper` storage driver +must migrate to a different storage driver, such as `overlay2`, before upgrading +to Docker Engine v25.0. The `devicemapper` storage driver facilitates running Docker on older (3.x) kernels that have no support for other storage drivers (such as overlay2, or btrfs). @@ -584,24 +671,6 @@ Now that support for `overlay2` is added to all supported distros (as they are either on kernel 4.x, or have support for multiple lowerdirs backported), there is no reason to continue maintenance of the `devicemapper` storage driver. -#### Disabled by default in v23.0 - -Docker already prevented deprecated storage drivers from being automatically -selected on new installations, but continued to use these drivers when upgrading -existing installations. Starting with the v23.0 release, the Docker Engine will -fail to start if a deprecated storage driver is used (see [moby#43378](https://github.com/moby/moby/pull/43378): - -```console -failed to start daemon: error initializing graphdriver: prior storage driver -devicemapper is deprecated and will be removed in a future release; update the the daemon -configuration and explicitly choose this storage driver to continue using it; -visit https://docs.docker.com/go/storage-driver/ for more information. -``` - -To continue using the storage driver, update the daemon configuration to use -explicitly use the given storage driver. Users are encouraged to migrate to -different storage driver. - ### Use of reserved namespaces in engine labels **Deprecated in Release: v18.06** diff --git a/_vendor/github.com/docker/cli/docs/extend/config.md b/_vendor/github.com/docker/cli/docs/extend/config.md index 6af92c55b..1f96d3e00 100644 --- a/_vendor/github.com/docker/cli/docs/extend/config.md +++ b/_vendor/github.com/docker/cli/docs/extend/config.md @@ -1,192 +1,185 @@ --- description: "How to develop and use a plugin with the managed plugin system" keywords: "API, Usage, plugins, documentation, developer" +title: Plugin Config Version 1 of Plugin V2 --- - - - -# Plugin Config Version 1 of Plugin V2 - This document outlines the format of the V0 plugin configuration. -Plugin configs describe the various constituents of a docker plugin. Plugin -configs can be serialized to JSON format with the following media types: +Plugin configs describe the various constituents of a Docker engine plugin. +Plugin configs can be serialized to JSON format with the following media types: | Config Type | Media Type | |-------------|-----------------------------------------| -| config | "application/vnd.docker.plugin.v1+json" | +| config | `application/vnd.docker.plugin.v1+json` | -## *Config* Field Descriptions +## Config Field Descriptions -Config provides the base accessible fields for working with V0 plugin format - in the registry. +Config provides the base accessible fields for working with V0 plugin format in +the registry. -- **`description`** *string* +- `description` string - description of the plugin + Description of the plugin -- **`documentation`** *string* +- `documentation` string - link to the documentation about the plugin + Link to the documentation about the plugin -- **`interface`** *PluginInterface* +- `interface` PluginInterface - interface implemented by the plugins, struct consisting of the following fields + Interface implemented by the plugins, struct consisting of the following fields: - - **`types`** *string array* + - `types` string array - types indicate what interface(s) the plugin currently implements. + Types indicate what interface(s) the plugin currently implements. - currently supported: + Supported types: - - **docker.volumedriver/1.0** + - `docker.volumedriver/1.0` - - **docker.networkdriver/1.0** + - `docker.networkdriver/1.0` - - **docker.ipamdriver/1.0** + - `docker.ipamdriver/1.0` - - **docker.authz/1.0** + - `docker.authz/1.0` - - **docker.logdriver/1.0** + - `docker.logdriver/1.0` - - **docker.metricscollector/1.0** + - `docker.metricscollector/1.0` - - **`socket`** *string* + - `socket` string - socket is the name of the socket the engine should use to communicate with the plugins. - the socket will be created in `/run/docker/plugins`. + Socket is the name of the socket the engine should use to communicate with the plugins. + the socket will be created in `/run/docker/plugins`. +- `entrypoint` string array -- **`entrypoint`** *string array* + Entrypoint of the plugin, see [`ENTRYPOINT`](https://docs.docker.com/engine/reference/builder/#entrypoint) - entrypoint of the plugin, see [`ENTRYPOINT`](https://docs.docker.com/engine/reference/builder/#entrypoint) +- `workdir` string -- **`workdir`** *string* + Working directory of the plugin, see [`WORKDIR`](https://docs.docker.com/engine/reference/builder/#workdir) - workdir of the plugin, see [`WORKDIR`](https://docs.docker.com/engine/reference/builder/#workdir) +- `network` PluginNetwork -- **`network`** *PluginNetwork* + Network of the plugin, struct consisting of the following fields: - network of the plugin, struct consisting of the following fields + - `type` string - - **`type`** *string* + Network type. - network type. + Supported types: - currently supported: + - `bridge` + - `host` + - `none` - - **bridge** - - **host** - - **none** +- `mounts` PluginMount array -- **`mounts`** *PluginMount array* + Mount of the plugin, struct consisting of the following fields. + See [`MOUNTS`](https://github.com/opencontainers/runtime-spec/blob/master/config.md#mounts). - mount of the plugin, struct consisting of the following fields, see [`MOUNTS`](https://github.com/opencontainers/runtime-spec/blob/master/config.md#mounts) + - `name` string - - **`name`** *string* + Name of the mount. - name of the mount. + - `description` string - - **`description`** *string* + Description of the mount. - description of the mount. + - `source` string - - **`source`** *string* + Source of the mount. - source of the mount. + - `destination` string - - **`destination`** *string* + Destination of the mount. - destination of the mount. + - `type` string - - **`type`** *string* + Mount type. - mount type. + - `options` string array - - **`options`** *string array* + Options of the mount. - options of the mount. +- `ipchost` Boolean -- **`ipchost`** *boolean* Access to host ipc namespace. -- **`pidhost`** *boolean* - Access to host pid namespace. -- **`propagatedMount`** *string* +- `pidhost` Boolean + + Access to host PID namespace. + +- `propagatedMount` string - path to be mounted as rshared, so that mounts under that path are visible to docker. This is useful for volume plugins. - This path will be bind-mounted outside of the plugin rootfs so it's contents - are preserved on upgrade. + Path to be mounted as rshared, so that mounts under that path are visible to + Docker. This is useful for volume plugins. This path will be bind-mounted + outside of the plugin rootfs so it's contents are preserved on upgrade. -- **`env`** *PluginEnv array* +- `env` PluginEnv array - env of the plugin, struct consisting of the following fields + Environment variables of the plugin, struct consisting of the following fields: - - **`name`** *string* + - `name` string - name of the env. + Name of the environment variable. - - **`description`** *string* + - `description` string - description of the env. + Description of the environment variable. - - **`value`** *string* + - `value` string - value of the env. + Value of the environment variable. -- **`args`** *PluginArgs* +- `args` PluginArgs - args of the plugin, struct consisting of the following fields + Arguments of the plugin, struct consisting of the following fields: - - **`name`** *string* + - `name` string - name of the args. + Name of the arguments. - - **`description`** *string* + - `description` string - description of the args. + Description of the arguments. - - **`value`** *string array* + - `value` string array - values of the args. + Values of the arguments. -- **`linux`** *PluginLinux* +- `linux` PluginLinux - - **`capabilities`** *string array* + - `capabilities` string array - capabilities of the plugin (*Linux only*), see list [`here`](https://github.com/opencontainers/runc/blob/master/libcontainer/SPEC.md#security) + Capabilities of the plugin (Linux only), see list [`here`](https://github.com/opencontainers/runc/blob/master/libcontainer/SPEC.md#security) - - **`allowAllDevices`** *boolean* + - `allowAllDevices` Boolean - If `/dev` is bind mounted from the host, and allowAllDevices is set to true, the plugin will have `rwm` access to all devices on the host. + If `/dev` is bind mounted from the host, and allowAllDevices is set to true, the plugin will have `rwm` access to all devices on the host. - - **`devices`** *PluginDevice array* + - `devices` PluginDevice array - device of the plugin, (*Linux only*), struct consisting of the following fields, see [`DEVICES`](https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#devices) + Device of the plugin, (Linux only), struct consisting of the following fields. + See [`DEVICES`](https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#devices). - - **`name`** *string* + - `name` string - name of the device. + Name of the device. - - **`description`** *string* + - `description` string - description of the device. + Description of the device. - - **`path`** *string* + - `path` string - path of the device. + Path of the device. ## Example Config -*Example showing the 'tiborvass/sample-volume-plugin' plugin config.* +The following example shows the 'tiborvass/sample-volume-plugin' plugin config. ```json { diff --git a/_vendor/github.com/docker/cli/docs/extend/index.md b/_vendor/github.com/docker/cli/docs/extend/index.md index b0c2f2958..22ab0f3f3 100644 --- a/_vendor/github.com/docker/cli/docs/extend/index.md +++ b/_vendor/github.com/docker/cli/docs/extend/index.md @@ -1,24 +1,14 @@ --- +title: Docker Engine managed plugin system description: Develop and use a plugin with the managed plugin system keywords: "API, Usage, plugins, documentation, developer" --- - - -# Docker Engine managed plugin system - - [Installing and using a plugin](index.md#installing-and-using-a-plugin) - [Developing a plugin](index.md#developing-a-plugin) - [Debugging plugins](index.md#debugging-plugins) -Docker Engine's plugin system allows you to install, start, stop, and remove +Docker Engine's plugin system lets you install, start, stop, and remove plugins using Docker Engine. For information about legacy (non-managed) plugins, refer to @@ -49,78 +39,78 @@ enabled, and use it to create a volume. > **Note** > > This example is intended for instructional purposes only. Once the volume is -> created, your SSH password to the remote host will be exposed as plaintext -> when inspecting the volume. You should delete the volume as soon as you are -> done with the example. +> created, your SSH password to the remote host is exposed as plaintext when +> inspecting the volume. Delete the volume as soon as you are done with the +> example. -1. Install the `sshfs` plugin. +1. Install the `sshfs` plugin. - ```console - $ docker plugin install vieux/sshfs + ```console + $ docker plugin install vieux/sshfs - Plugin "vieux/sshfs" is requesting the following privileges: - - network: [host] - - capabilities: [CAP_SYS_ADMIN] - Do you grant the above permissions? [y/N] y + Plugin "vieux/sshfs" is requesting the following privileges: + - network: [host] + - capabilities: [CAP_SYS_ADMIN] + Do you grant the above permissions? [y/N] y - vieux/sshfs - ``` + vieux/sshfs + ``` - The plugin requests 2 privileges: + The plugin requests 2 privileges: - - It needs access to the `host` network. - - It needs the `CAP_SYS_ADMIN` capability, which allows the plugin to run - the `mount` command. + - It needs access to the `host` network. + - It needs the `CAP_SYS_ADMIN` capability, which allows the plugin to run + the `mount` command. -2. Check that the plugin is enabled in the output of `docker plugin ls`. +2. Check that the plugin is enabled in the output of `docker plugin ls`. - ```console - $ docker plugin ls + ```console + $ docker plugin ls - ID NAME TAG DESCRIPTION ENABLED - 69553ca1d789 vieux/sshfs latest the `sshfs` plugin true - ``` + ID NAME TAG DESCRIPTION ENABLED + 69553ca1d789 vieux/sshfs latest the `sshfs` plugin true + ``` -3. Create a volume using the plugin. - This example mounts the `/remote` directory on host `1.2.3.4` into a - volume named `sshvolume`. +3. Create a volume using the plugin. + This example mounts the `/remote` directory on host `1.2.3.4` into a + volume named `sshvolume`. - This volume can now be mounted into containers. + This volume can now be mounted into containers. - ```console - $ docker volume create \ - -d vieux/sshfs \ - --name sshvolume \ - -o sshcmd=user@1.2.3.4:/remote \ - -o password=$(cat file_containing_password_for_remote_host) + ```console + $ docker volume create \ + -d vieux/sshfs \ + --name sshvolume \ + -o sshcmd=user@1.2.3.4:/remote \ + -o password=$(cat file_containing_password_for_remote_host) - sshvolume - ``` + sshvolume + ``` -4. Verify that the volume was created successfully. +4. Verify that the volume was created successfully. - ```console - $ docker volume ls + ```console + $ docker volume ls - DRIVER NAME - vieux/sshfs sshvolume - ``` + DRIVER NAME + vieux/sshfs sshvolume + ``` -5. Start a container that uses the volume `sshvolume`. +5. Start a container that uses the volume `sshvolume`. - ```console - $ docker run --rm -v sshvolume:/data busybox ls /data + ```console + $ docker run --rm -v sshvolume:/data busybox ls /data - - ``` + + ``` -6. Remove the volume `sshvolume` +6. Remove the volume `sshvolume` - ```console - $ docker volume rm sshvolume + ```console + $ docker volume rm sshvolume - sshvolume - ``` + sshvolume + ``` To disable a plugin, use the `docker plugin disable` command. To completely remove it, use the `docker plugin remove` command. For other available @@ -134,8 +124,10 @@ commands and options, see the The `rootfs` directory represents the root filesystem of the plugin. In this example, it was created from a Dockerfile: -> **Note:** The `/run/docker/plugins` directory is mandatory inside of the -> plugin's filesystem for docker to communicate with the plugin. +> **Note** +> +> The `/run/docker/plugins` directory is mandatory inside of the +> plugin's filesystem for Docker to communicate with the plugin. ```console $ git clone https://github.com/vieux/docker-volume-sshfs @@ -219,11 +211,10 @@ INFO[0421] Path Called... Returned path /data/samplevol plugin=f52a3df433b9a INFO[0421] Unmount Called... Unmounted samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 ``` -#### Using docker-runc to obtain logfiles and shell into the plugin. +#### Using runc to obtain logfiles and shell into the plugin. -`docker-runc`, the default docker container runtime can be used for debugging -plugins. This is specifically useful to collect plugin logs if they are -redirected to a file. +Use `runc`, the default docker container runtime, for debugging plugins by +collecting plugin logs redirected to a file. ```console $ sudo runc --root /run/docker/runtime-runc/plugins.moby list diff --git a/_vendor/github.com/docker/cli/docs/extend/legacy_plugins.md b/_vendor/github.com/docker/cli/docs/extend/legacy_plugins.md index 6c842ac47..0086761c1 100644 --- a/_vendor/github.com/docker/cli/docs/extend/legacy_plugins.md +++ b/_vendor/github.com/docker/cli/docs/extend/legacy_plugins.md @@ -1,21 +1,11 @@ --- +title: Use Docker Engine plugins aliases: - "/engine/extend/plugins/" description: "How to add additional functionality to Docker with plugins extensions" keywords: "Examples, Usage, plugins, docker, documentation, user guide" --- - - -# Use Docker Engine plugins - This document describes the Docker Engine plugins generally available in Docker Engine. To view information on plugins managed by Docker, refer to [Docker Engine plugin system](index.md). @@ -40,19 +30,15 @@ Follow the instructions in the plugin's documentation. ## Finding a plugin -The sections below provide an inexhaustive overview of available plugins. - - +The sections below provide an overview of available third-party plugins. ### Network plugins | Plugin | Description | -|:-----------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| :--------------------------------------------------------------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | [Contiv Networking](https://github.com/contiv/netplugin) | An open source network plugin to provide infrastructure and security policies for a multi-tenant micro services deployment, while providing an integration to physical network for non-container workload. Contiv Networking implements the remote driver and IPAM APIs available in Docker 1.9 onwards. | | [Kuryr Network Plugin](https://github.com/openstack/kuryr) | A network plugin is developed as part of the OpenStack Kuryr project and implements the Docker networking (libnetwork) remote driver API by utilizing Neutron, the OpenStack networking service. It includes an IPAM driver as well. | -| [Weave Network Plugin](https://www.weave.works/docs/net/latest/introducing-weave/) | A network plugin that creates a virtual network that connects your Docker containers - across multiple hosts or clouds and enables automatic discovery of applications. Weave networks are resilient, partition tolerant, secure and work in partially connected networks, and other adverse environments - all configured with delightful simplicity. | +| [Kathará Network Plugin](https://github.com/KatharaFramework/NetworkPlugin) | Docker Network Plugin used by Kathará, an open source container-based network emulation system for showing interactive demos/lessons, testing production networks in a sandbox environment, or developing new network protocols. | ### Volume plugins @@ -101,4 +87,4 @@ of the plugin for help. The Docker team may not be able to assist you. ## Writing a plugin If you are interested in writing a plugin for Docker, or seeing how they work -under the hood, see the [docker plugins reference](plugin_api.md). +under the hood, see the [Docker plugins reference](plugin_api.md). diff --git a/_vendor/github.com/docker/cli/docs/extend/plugin_api.md b/_vendor/github.com/docker/cli/docs/extend/plugin_api.md index 812b46508..4600992c4 100644 --- a/_vendor/github.com/docker/cli/docs/extend/plugin_api.md +++ b/_vendor/github.com/docker/cli/docs/extend/plugin_api.md @@ -1,19 +1,9 @@ --- +title: Docker Plugin API description: "How to write Docker plugins extensions " keywords: "API, Usage, plugins, documentation, developer" --- - - -# Docker Plugin API - Docker plugins are out-of-process extensions which add capabilities to the Docker Engine. @@ -26,8 +16,8 @@ If you just want to learn about or use Docker plugins, look ## What plugins are -A plugin is a process running on the same or a different host as the docker daemon, -which registers itself by placing a file on the same docker host in one of the plugin +A plugin is a process running on the same or a different host as the Docker daemon, +which registers itself by placing a file on the daemon host in one of the plugin directories described in [Plugin discovery](#plugin-discovery). Plugins have human-readable names, which are short, lowercase strings. For @@ -43,26 +33,26 @@ user or container tries to use one by name. There are three types of files which can be put in the plugin directory. -* `.sock` files are UNIX domain sockets. +* `.sock` files are Unix domain sockets. * `.spec` files are text files containing a URL, such as `unix:///other.sock` or `tcp://localhost:8080`. * `.json` files are text files containing a full json specification for the plugin. -Plugins with UNIX domain socket files must run on the same docker host, whereas -plugins with spec or json files can run on a different host if a remote URL is specified. +Plugins with Unix domain socket files must run on the same host as the Docker daemon. +Plugins with `.spec` or `.json` files can run on a different host if you specify a remote URL. -UNIX domain socket files must be located under `/run/docker/plugins`, whereas +Unix domain socket files must be located under `/run/docker/plugins`, whereas spec files can be located either under `/etc/docker/plugins` or `/usr/lib/docker/plugins`. The name of the file (excluding the extension) determines the plugin name. -For example, the `flocker` plugin might create a UNIX socket at +For example, the `flocker` plugin might create a Unix socket at `/run/docker/plugins/flocker.sock`. You can define each plugin into a separated subdirectory if you want to isolate definitions from each other. For example, you can create the `flocker` socket under `/run/docker/plugins/flocker/flocker.sock` and only mount `/run/docker/plugins/flocker` inside the `flocker` container. -Docker always searches for unix sockets in `/run/docker/plugins` first. It checks for spec or json files under +Docker always searches for Unix sockets in `/run/docker/plugins` first. It checks for spec or json files under `/etc/docker/plugins` and `/usr/lib/docker/plugins` if the socket doesn't exist. The directory scan stops as soon as it finds the first plugin definition with the given name. @@ -87,7 +77,7 @@ The `TLSConfig` field is optional and TLS will only be verified if this configur ## Plugin lifecycle -Plugins should be started before Docker, and stopped after Docker. For +Plugins should be started before Docker, and stopped after Docker. For example, when packaging a plugin for a platform which supports `systemd`, you might use [`systemd` dependencies]( https://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=) to @@ -103,7 +93,7 @@ When a plugin is first referred to -- either by a user referring to it by name use a plugin being started -- Docker looks for the named plugin in the plugin directory and activates it with a handshake. See Handshake API below. -Plugins are *not* activated automatically at Docker daemon startup. Rather, +Plugins are not activated automatically at Docker daemon startup. Rather, they are activated only lazily, or on-demand, when they are needed. ## Systemd socket activation @@ -149,8 +139,8 @@ or if one of the plugin goes down accidentally). The Plugin API is RPC-style JSON over HTTP, much like webhooks. -Requests flow *from* the Docker daemon *to* the plugin. So the plugin needs to -implement an HTTP server and bind this to the UNIX socket mentioned in the +Requests flow from the Docker daemon to the plugin. The plugin needs to +implement an HTTP server and bind this to the Unix socket mentioned in the "plugin discovery" section. All requests are HTTP `POST` requests. @@ -164,9 +154,9 @@ Plugins are activated via the following "handshake" API call. ### /Plugin.Activate -**Request:** empty body +Request: empty body -**Response:** +Response: ```json { @@ -183,7 +173,6 @@ Possible values are: * [`NetworkDriver`](plugins_network.md) * [`VolumeDriver`](plugins_volume.md) - ## Plugin retries Attempts to call a method on a plugin are retried with an exponential backoff diff --git a/_vendor/github.com/docker/cli/docs/extend/plugins_authorization.md b/_vendor/github.com/docker/cli/docs/extend/plugins_authorization.md index 203c1bf2e..8a646e50f 100644 --- a/_vendor/github.com/docker/cli/docs/extend/plugins_authorization.md +++ b/_vendor/github.com/docker/cli/docs/extend/plugins_authorization.md @@ -1,22 +1,12 @@ --- +title: Access authorization plugin description: "How to create authorization plugins to manage access control to your Docker daemon." keywords: "security, authorization, authentication, docker, documentation, plugin, extend" aliases: - "/engine/extend/authorization/" --- - - -# Access authorization plugin - -This document describes the Docker Engine plugins generally available in Docker +This document describes the Docker Engine plugins available in Docker Engine. To view information on plugins managed by Docker Engine, refer to [Docker Engine plugin system](index.md). @@ -41,7 +31,7 @@ third-party components using a generic API. The access authorization subsystem was built using this mechanism. Using this subsystem, you don't need to rebuild the Docker daemon to add an -authorization plugin. You can add a plugin to an installed Docker daemon. You do +authorization plugin. You can add a plugin to an installed Docker daemon. You do need to restart the Docker daemon to add a new plugin. An authorization plugin approves or denies requests to the Docker daemon based @@ -158,7 +148,7 @@ should implement the following two methods: #### /AuthZPlugin.AuthZReq -**Request**: +Request ```json { @@ -171,7 +161,7 @@ should implement the following two methods: } ``` -**Response**: +Response ```json { @@ -183,7 +173,7 @@ should implement the following two methods: #### /AuthZPlugin.AuthZRes -**Request**: +Request: ```json { @@ -199,7 +189,7 @@ should implement the following two methods: } ``` -**Response**: +Response: ```json { @@ -224,7 +214,6 @@ Request URI | string | The HTTP request URI including API Request headers | map[string]string | Request headers as key value pairs (without the authorization header) Request body | []byte | Raw request body - #### Plugin -> Daemon Name | Type | Description @@ -239,7 +228,6 @@ The plugin must support two authorization messages formats, one from the daemon #### Daemon -> Plugin - Name | Type | Description ----------------------- |------------------ |---------------------------------------------------- User | string | The user identification @@ -248,10 +236,9 @@ Request method | string | The HTTP method (GET/DELETE/POST) Request URI | string | The HTTP request URI including API version (e.g., v.1.17/containers/json) Request headers | map[string]string | Request headers as key value pairs (without the authorization header) Request body | []byte | Raw request body -Response status code | int | Status code from the docker daemon +Response status code | int | Status code from the Docker daemon Response headers | map[string]string | Response headers as key value pairs -Response body | []byte | Raw docker daemon response body - +Response body | []byte | Raw Docker daemon response body #### Plugin -> Daemon diff --git a/_vendor/github.com/docker/cli/docs/extend/plugins_logging.md b/_vendor/github.com/docker/cli/docs/extend/plugins_logging.md index d6ce96cfb..2d30571fe 100644 --- a/_vendor/github.com/docker/cli/docs/extend/plugins_logging.md +++ b/_vendor/github.com/docker/cli/docs/extend/plugins_logging.md @@ -1,19 +1,9 @@ --- +title: Docker log driver plugins description: "Log driver plugins." keywords: "Examples, Usage, plugins, docker, documentation, user guide, logging" --- - - -# Docker log driver plugins - This document describes logging driver plugins for Docker. Logging drivers enables users to forward container logs to another service for @@ -46,20 +36,21 @@ receiving logs for. Logs will be streamed over the defined file in the request. On Linux this file is a FIFO. Logging plugins are not currently supported on Windows. -**Request**: +Request: + ```json { - "File": "/path/to/file/stream", - "Info": { - "ContainerID": "123456" - } + "File": "/path/to/file/stream", + "Info": { + "ContainerID": "123456" + } } ``` `File` is the path to the log stream that needs to be consumed. Each call to `StartLogging` should provide a different file path, even if it's a container that the plugin has already received logs for prior. The file is created by -docker with a randomly generated name. +Docker with a randomly generated name. `Info` is details about the container that's being logged. This is fairly free-form, but is defined by the following struct definition: @@ -81,14 +72,14 @@ type Info struct { } ``` - `ContainerID` will always be supplied with this struct, but other fields may be empty or missing. -**Response** +Response: + ```json { - "Err": "" + "Err": "" } ``` @@ -102,12 +93,12 @@ write to its stdio streams. Log stream messages are encoded as protocol buffers. The protobuf definitions are in the -[docker repository](https://github.com/docker/docker/blob/master/api/types/plugins/logdriver/entry.proto). +[moby repository](https://github.com/moby/moby/blob/master/api/types/plugins/logdriver/entry.proto). Since protocol buffers are not self-delimited you must decode them from the stream using the following stream format: -``` +```text [size][message] ``` @@ -127,17 +118,19 @@ losing log data. Requests on this endpoint does not mean that the container has been removed only that it has stopped. -**Request**: +Request: + ```json { - "File": "/path/to/file/stream" + "File": "/path/to/file/stream" } ``` -**Response**: +Response: + ```json { - "Err": "" + "Err": "" } ``` @@ -154,15 +147,17 @@ Logging plugins can implement two extra logging endpoints: Defines the capabilities of the log driver. You must implement this endpoint for Docker to be able to take advantage of any of the defined capabilities. -**Request**: +Request: + ```json {} ``` -**Response**: +Response: + ```json { - "ReadLogs": true + "ReadLogs": true } ``` @@ -180,14 +175,14 @@ called. In order for Docker to use this endpoint, the plugin must specify as much when `/LogDriver.Capabilities` is called. +Request: -**Request**: ```json { - "ReadConfig": {}, - "Info": { - "ContainerID": "123456" - } + "ReadConfig": {}, + "Info": { + "ContainerID": "123456" + } } ``` @@ -210,9 +205,9 @@ as they come in once the existing logs have been read. `Info` is the same type defined in `/LogDriver.StartLogging`. It should be used to determine what set of logs to read. -**Response**: +Response: -``` +```text {{ log stream }} ``` diff --git a/_vendor/github.com/docker/cli/docs/extend/plugins_metrics.md b/_vendor/github.com/docker/cli/docs/extend/plugins_metrics.md index c698a5a13..dc47e4692 100644 --- a/_vendor/github.com/docker/cli/docs/extend/plugins_metrics.md +++ b/_vendor/github.com/docker/cli/docs/extend/plugins_metrics.md @@ -1,20 +1,10 @@ --- +title: Docker metrics collector plugins description: "Metrics plugins." keywords: "Examples, Usage, plugins, docker, documentation, user guide, metrics" --- - - -# Docker metrics collector plugins - -Docker exposes internal metrics based on the prometheus format. Metrics plugins +Docker exposes internal metrics based on the Prometheus format. Metrics plugins enable accessing these metrics in a consistent way by providing a Unix socket at a predefined path where the plugin can scrape the metrics. @@ -44,17 +34,19 @@ plugin's rootfs. Signals to the plugin that the metrics socket is now available for scraping -**Request** +Request: + ```json {} ``` -The request has no playload. +The request has no payload. + +Response: -**Response** ```json { - "Err": "" + "Err": "" } ``` @@ -67,17 +59,19 @@ or an empty value for the `Err` field. Errors will only be logged. Signals to the plugin that the metrics socket is no longer available. This may happen when the daemon is shutting down. -**Request** +Request: + ```json {} ``` -The request has no playload. +The request has no payload. + +Response: -**Response** ```json { - "Err": "" + "Err": "" } ``` diff --git a/_vendor/github.com/docker/cli/docs/extend/plugins_network.md b/_vendor/github.com/docker/cli/docs/extend/plugins_network.md index 9d92a9640..e24fd2625 100644 --- a/_vendor/github.com/docker/cli/docs/extend/plugins_network.md +++ b/_vendor/github.com/docker/cli/docs/extend/plugins_network.md @@ -1,19 +1,9 @@ --- +title: Docker network driver plugins description: "Network driver plugins." keywords: "Examples, Usage, plugins, docker, documentation, user guide" --- - - -# Docker network driver plugins - This document describes Docker Engine network driver plugins generally available in Docker Engine. To view information on plugins managed by Docker Engine, refer to [Docker Engine plugin system](index.md). @@ -26,11 +16,11 @@ LibNetwork, which shares plugin infrastructure with Engine. Effectively, network driver plugins are activated in the same way as other plugins, and use the same kind of protocol. -## Network plugins and swarm mode +## Network plugins and Swarm mode -[Legacy plugins](legacy_plugins.md) do not work in swarm mode. However, -plugins written using the [v2 plugin system](index.md) do work in swarm mode, as -long as they are installed on each swarm worker node. +[Legacy plugins](legacy_plugins.md) do not work in Swarm mode. However, +plugins written using the [v2 plugin system](index.md) do work in Swarm mode, as +long as they are installed on each Swarm worker node. ## Use network driver plugins @@ -55,12 +45,11 @@ referring to that network will be sent to the plugin, $ docker run --network=mynet busybox top ``` - ## Find network plugins Network plugins are written by third parties, and are published by those third parties, either on -[Docker Store](https://store.docker.com/search?category=network&q=&type=plugin) +[Docker Hub](https://hub.docker.com/search?q=&type=plugin) or on the third party's site. ## Write a network plugin diff --git a/_vendor/github.com/docker/cli/docs/extend/plugins_volume.md b/_vendor/github.com/docker/cli/docs/extend/plugins_volume.md index 23e7cddf9..3e42bc75a 100644 --- a/_vendor/github.com/docker/cli/docs/extend/plugins_volume.md +++ b/_vendor/github.com/docker/cli/docs/extend/plugins_volume.md @@ -1,19 +1,9 @@ --- +title: Docker volume plugins description: "How to manage data with external volume plugins" keywords: "Examples, Usage, volume, docker, data, volumes, plugin, api" --- - - -# Docker volume plugins - Docker Engine volume plugins enable Engine deployments to be integrated with external storage systems such as Amazon EBS, and enable data volumes to persist beyond the lifetime of a single Docker host. See the @@ -50,7 +40,7 @@ beyond the lifetime of a single Docker host. See the ## Command-line changes To give a container access to a volume, use the `--volume` and `--volume-driver` -flags on the `docker container run` command. The `--volume` (or `-v`) flag +flags on the `docker container run` command. The `--volume` (or `-v`) flag accepts a volume name and path on the host, and the `--volume-driver` flag accepts a driver type. @@ -98,7 +88,8 @@ the volumes available by bind-mounting the provided paths into the containers. ### `/VolumeDriver.Create` -**Request**: +Request: + ```json { "Name": "volume_name", @@ -111,18 +102,20 @@ specified volume name. The plugin does not need to actually manifest the volume on the filesystem yet (until `Mount` is called). `Opts` is a map of driver specific options passed through from the user request. -**Response**: +Response: + ```json { "Err": "" } ``` -Respond with a string error if an error occurred. + Respond with a string error if an error occurred. ### `/VolumeDriver.Remove` -**Request**: +Request: + ```json { "Name": "volume_name" @@ -132,7 +125,8 @@ Respond with a string error if an error occurred. Delete the specified volume from disk. This request is issued when a user invokes `docker rm -v` to remove volumes associated with a container. -**Response**: +Response: + ```json { "Err": "" @@ -143,7 +137,8 @@ Respond with a string error if an error occurred. ### `/VolumeDriver.Mount` -**Request**: +Request: + ```json { "Name": "volume_name", @@ -158,9 +153,9 @@ at the first mount request and deprovision at the last corresponding unmount req `ID` is a unique ID for the caller that is requesting the mount. -**Response**: +Response: -- **v1**: +- v1 ```json { @@ -169,7 +164,7 @@ at the first mount request and deprovision at the last corresponding unmount req } ``` -- **v2**: +- v2 ```json { @@ -185,7 +180,7 @@ has been made available. ### `/VolumeDriver.Path` -**Request**: +Request: ```json { @@ -195,9 +190,9 @@ has been made available. Request the path to the volume with the given `volume_name`. -**Response**: +Response: -- **v1**: +- v1 ```json { @@ -206,7 +201,7 @@ Request the path to the volume with the given `volume_name`. } ``` -- **v2**: +- v2 ```json { @@ -223,7 +218,8 @@ is not provided. ### `/VolumeDriver.Unmount` -**Request**: +Request: + ```json { "Name": "volume_name", @@ -237,7 +233,8 @@ this point. `ID` is a unique ID for the caller that is requesting the mount. -**Response**: +Response: + ```json { "Err": "" @@ -246,10 +243,10 @@ this point. Respond with a string error if an error occurred. - ### `/VolumeDriver.Get` -**Request**: +Request: + ```json { "Name": "volume_name" @@ -258,10 +255,9 @@ Respond with a string error if an error occurred. Get info about `volume_name`. +Response: -**Response**: - -- **v1**: +- v1 ```json { @@ -274,7 +270,7 @@ Get info about `volume_name`. } ``` -- **v2**: +- v2 ```json { @@ -293,16 +289,17 @@ optional. ### /VolumeDriver.List -**Request**: +Request: + ```json {} ``` Get the list of volumes registered with the plugin. -**Response**: +Response: -- **v1**: +- v1 ```json { @@ -316,7 +313,7 @@ Get the list of volumes registered with the plugin. } ``` -- **v2**: +- v2 ```json { @@ -330,12 +327,12 @@ Get the list of volumes registered with the plugin. } ``` - Respond with a string error if an error occurred. `Mountpoint` is optional. ### /VolumeDriver.Capabilities -**Request**: +Request: + ```json {} ``` @@ -345,7 +342,8 @@ Get the list of capabilities the driver supports. The driver is not required to implement `Capabilities`. If it is not implemented, the default values are used. -**Response**: +Response: + ```json { "Capabilities": { diff --git a/_vendor/github.com/docker/cli/docs/reference/commandline/cli.md b/_vendor/github.com/docker/cli/docs/reference/commandline/cli.md index 1fdeb0d7a..dbfe2a5e4 100644 --- a/_vendor/github.com/docker/cli/docs/reference/commandline/cli.md +++ b/_vendor/github.com/docker/cli/docs/reference/commandline/cli.md @@ -10,105 +10,8 @@ aliases: - /engine/reference/commandline/engine_update/ --- - - -# docker - -To list available commands, either run `docker` with no parameters -or execute `docker help`: - - -The base command for the Docker CLI. - -### Subcommands - -| Name | Description | -|:------------------------------|:------------------------------------------------------------------------------| -| [`attach`](attach.md) | Attach local standard input, output, and error streams to a running container | -| [`build`](build.md) | Build an image from a Dockerfile | -| [`builder`](builder.md) | Manage builds | -| [`checkpoint`](checkpoint.md) | Manage checkpoints | -| [`commit`](commit.md) | Create a new image from a container's changes | -| [`config`](config.md) | Manage Swarm configs | -| [`container`](container.md) | Manage containers | -| [`context`](context.md) | Manage contexts | -| [`cp`](cp.md) | Copy files/folders between a container and the local filesystem | -| [`create`](create.md) | Create a new container | -| [`diff`](diff.md) | Inspect changes to files or directories on a container's filesystem | -| [`events`](events.md) | Get real time events from the server | -| [`exec`](exec.md) | Execute a command in a running container | -| [`export`](export.md) | Export a container's filesystem as a tar archive | -| [`history`](history.md) | Show the history of an image | -| [`image`](image.md) | Manage images | -| [`images`](images.md) | List images | -| [`import`](import.md) | Import the contents from a tarball to create a filesystem image | -| [`info`](info.md) | Display system-wide information | -| [`inspect`](inspect.md) | Return low-level information on Docker objects | -| [`kill`](kill.md) | Kill one or more running containers | -| [`load`](load.md) | Load an image from a tar archive or STDIN | -| [`login`](login.md) | Log in to a registry | -| [`logout`](logout.md) | Log out from a registry | -| [`logs`](logs.md) | Fetch the logs of a container | -| [`manifest`](manifest.md) | Manage Docker image manifests and manifest lists | -| [`network`](network.md) | Manage networks | -| [`node`](node.md) | Manage Swarm nodes | -| [`pause`](pause.md) | Pause all processes within one or more containers | -| [`plugin`](plugin.md) | Manage plugins | -| [`port`](port.md) | List port mappings or a specific mapping for the container | -| [`ps`](ps.md) | List containers | -| [`pull`](pull.md) | Download an image from a registry | -| [`push`](push.md) | Upload an image to a registry | -| [`rename`](rename.md) | Rename a container | -| [`restart`](restart.md) | Restart one or more containers | -| [`rm`](rm.md) | Remove one or more containers | -| [`rmi`](rmi.md) | Remove one or more images | -| [`run`](run.md) | Create and run a new container from an image | -| [`save`](save.md) | Save one or more images to a tar archive (streamed to STDOUT by default) | -| [`search`](search.md) | Search Docker Hub for images | -| [`secret`](secret.md) | Manage Swarm secrets | -| [`service`](service.md) | Manage Swarm services | -| [`stack`](stack.md) | Manage Swarm stacks | -| [`start`](start.md) | Start one or more stopped containers | -| [`stats`](stats.md) | Display a live stream of container(s) resource usage statistics | -| [`stop`](stop.md) | Stop one or more running containers | -| [`swarm`](swarm.md) | Manage Swarm | -| [`system`](system.md) | Manage Docker | -| [`tag`](tag.md) | Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE | -| [`top`](top.md) | Display the running processes of a container | -| [`trust`](trust.md) | Manage trust on Docker images | -| [`unpause`](unpause.md) | Unpause all processes within one or more containers | -| [`update`](update.md) | Update configuration of one or more containers | -| [`version`](version.md) | Show the Docker version information | -| [`volume`](volume.md) | Manage volumes | -| [`wait`](wait.md) | Block until one or more containers stop, then print their exit codes | - - -### Options - -| Name | Type | Default | Description | -|:---------------------------------|:---------|:-------------------------|:--------------------------------------------------------------------------------------------------------------------------------------| -| `--config` | `string` | `/root/.docker` | Location of client config files | -| `-c`, `--context` | `string` | | Name of the context to use to connect to the daemon (overrides DOCKER_HOST env var and default context set with `docker context use`) | -| `-D`, `--debug` | | | Enable debug mode | -| [`-H`](#host), [`--host`](#host) | `list` | | Daemon socket to connect to | -| `-l`, `--log-level` | `string` | `info` | Set the logging level (`debug`, `info`, `warn`, `error`, `fatal`) | -| `--tls` | | | Use TLS; implied by --tlsverify | -| `--tlscacert` | `string` | `/root/.docker/ca.pem` | Trust certs signed only by this CA | -| `--tlscert` | `string` | `/root/.docker/cert.pem` | Path to TLS certificate file | -| `--tlskey` | `string` | `/root/.docker/key.pem` | Path to TLS key file | -| `--tlsverify` | | | Use TLS and verify the remote | - - - - -## Description +The base command for the Docker CLI is `docker`. For information about the +available flags and subcommands, refer to the [CLI reference](docker.md) Depending on your Docker system configuration, you may be required to preface each `docker` command with `sudo`. To avoid having to use `sudo` with the @@ -124,7 +27,7 @@ The following list of environment variables are supported by the `docker` comman line: | Variable | Description | -|:------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| :---------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `DOCKER_API_VERSION` | Override the negotiated API version to use for debugging (e.g. `1.19`) | | `DOCKER_CERT_PATH` | Location of your authentication keys. This variable is used both by the `docker` CLI and the [`dockerd` daemon](dockerd.md) | | `DOCKER_CONFIG` | The location of your client configuration files. | @@ -136,7 +39,7 @@ line: | `DOCKER_HOST` | Daemon socket to connect to. | | `DOCKER_TLS` | Enable TLS for connections made by the `docker` CLI (equivalent of the `--tls` command-line option). Set to a non-empty value to enable TLS. Note that TLS is enabled automatically if any of the other TLS options are set. | | `DOCKER_TLS_VERIFY` | When set Docker uses TLS and verifies the remote. This variable is used both by the `docker` CLI and the [`dockerd` daemon](dockerd.md) | -| `BUILDKIT_PROGRESS` | Set type of progress output (`auto`, `plain`, `tty`) when [building](build.md) with [BuildKit backend](https://docs.docker.com/build/buildkit/). Use plain to show container output (default `auto`). | +| `BUILDKIT_PROGRESS` | Set type of progress output (`auto`, `plain`, `tty`) when [building](image_build.md) with [BuildKit backend](https://docs.docker.com/build/buildkit/). Use plain to show container output (default `auto`). | Because Docker is developed using Go, you can also use any environment variables used by the Go runtime. In particular, you may find these useful: @@ -156,7 +59,7 @@ By default, the Docker command line stores its configuration files in a directory called `.docker` within your `$HOME` directory. Docker manages most of the files in the configuration directory -and you should not modify them. However, you can modify the +and you shouldn't modify them. However, you can modify the `config.json` file to control certain aspects of how the `docker` command behaves. @@ -167,7 +70,6 @@ and the `--config` flag are set, the flag takes precedent over the environment variable. Command line options override environment variables and environment variables override properties you specify in a `config.json` file. - ### Change the `.docker` directory To specify a different directory, use the `DOCKER_CONFIG` @@ -201,7 +103,7 @@ By default, configuration file is stored in `~/.docker/config.json`. Refer to th different location. > **Warning** -> +> > The configuration file and other files inside the `~/.docker` configuration > directory may contain sensitive information, such as authentication information > for proxies or, depending on your credential store, credentials for your image @@ -210,40 +112,39 @@ different location. ### Customize the default output format for commands -These fields allow you to customize the default output format for some commands +These fields lets you customize the default output format for some commands if no `--format` flag is provided. | Property | Description | | :--------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | `configFormat` | Custom default format for `docker config ls` output. See [`docker config ls`](config_ls.md#format) for a list of supported formatting directives. | -| `imagesFormat` | Custom default format for `docker images` / `docker image ls` output. See [`docker images`](images.md#format) for a list of supported formatting directives. | +| `imagesFormat` | Custom default format for `docker images` / `docker image ls` output. See [`docker images`](image_ls.md#format) for a list of supported formatting directives. | | `networksFormat` | Custom default format for `docker network ls` output. See [`docker network ls`](network_ls.md#format) for a list of supported formatting directives. | | `nodesFormat` | Custom default format for `docker node ls` output. See [`docker node ls`](node_ls.md#format) for a list of supported formatting directives. | | `pluginsFormat` | Custom default format for `docker plugin ls` output. See [`docker plugin ls`](plugin_ls.md#format) for a list of supported formatting directives. | -| `psFormat` | Custom default format for `docker ps` / `docker container ps` output. See [`docker ps`](ps.md#format) for a list of supported formatting directives. | +| `psFormat` | Custom default format for `docker ps` / `docker container ps` output. See [`docker ps`](container_ls.md#format) for a list of supported formatting directives. | | `secretFormat` | Custom default format for `docker secret ls` output. See [`docker secret ls`](secret_ls.md#format) for a list of supported formatting directives. | | `serviceInspectFormat` | Custom default format for `docker service inspect` output. See [`docker service inspect`](service_inspect.md#format) for a list of supported formatting directives. | | `servicesFormat` | Custom default format for `docker service ls` output. See [`docker service ls`](service_ls.md#format) for a list of supported formatting directives. | -| `statsFormat` | Custom default format for `docker stats` output. See [`docker stats`](stats.md#format) for a list of supported formatting directives. | +| `statsFormat` | Custom default format for `docker stats` output. See [`docker stats`](container_stats.md#format) for a list of supported formatting directives. | | `tasksFormat` | Custom default format for `docker stack ps` output. See [`docker stack ps`](stack_ps.md#format) for a list of supported formatting directives. | | `volumesFormat` | Custom default format for `docker volume ls` output. See [`docker volume ls`](volume_ls.md#format) for a list of supported formatting directives. | ### Custom HTTP headers The property `HttpHeaders` specifies a set of headers to include in all messages -sent from the Docker client to the daemon. Docker does not try to interpret or +sent from the Docker client to the daemon. Docker doesn't try to interpret or understand these headers; it simply puts them into the messages. Docker does not allow these headers to change any headers it sets for itself. - ### Credential store options The property `credsStore` specifies an external binary to serve as the default credential store. When this property is set, `docker login` will attempt to store credentials in the binary specified by `docker-credential-` which -is visible on `$PATH`. If this property is not set, credentials will be stored -in the `auths` property of the config. For more information, see the -[**Credential stores** section in the `docker login` documentation](login.md#credential-stores) +is visible on `$PATH`. If this property isn't set, credentials are stored +in the `auths` property of the CLI configuration file. For more information, +see the [**Credential stores** section in the `docker login` documentation](login.md#credential-stores) The property `credHelpers` specifies a set of credential helpers to use preferentially over `credsStore` or `auths` when storing and retrieving @@ -252,14 +153,13 @@ credentials for specific registries. If this property is set, the binary for a specific registry. For more information, see the [**Credential helpers** section in the `docker login` documentation](login.md#credential-helpers) - ### Automatic proxy configuration for containers The property `proxies` specifies proxy environment variables to be automatically set on containers, and set as `--build-arg` on containers used during `docker build`. -A `"default"` set of proxies can be configured, and will be used for any docker -daemon that the client connects to, or a configuration per host (docker daemon), -for example, "https://docker-daemon1.example.com". The following properties can +A `"default"` set of proxies can be configured, and will be used for any Docker +daemon that the client connects to, or a configuration per host (Docker daemon), +for example, `https://docker-daemon1.example.com`. The following properties can be set for each environment: | Property | Description | @@ -276,11 +176,12 @@ used as proxy settings for the `docker` CLI or the `dockerd` daemon. Refer to th sections for configuring proxy settings for the cli and daemon. > **Warning** -> +> > Proxy settings may contain sensitive information (for example, if the proxy > requires authentication). Environment variables are stored as plain text in > the container's configuration, and as such can be inspected through the remote > API or committed to an image when using `docker commit`. +{ .warning } ### Default key-sequence to detach from containers @@ -294,7 +195,7 @@ a letter [a-Z], or the `ctrl-` combined with any of the following: * `@` (at sign) * `[` (left bracket) * `\\` (two backward slashes) -* `_` (underscore) +* `_` (underscore) * `^` (caret) Your customization applies to all containers started in with your Docker client. @@ -302,13 +203,12 @@ Users can override your custom or the default key sequence on a per-container basis. To do this, the user specifies the `--detach-keys` flag with the `docker attach`, `docker exec`, `docker run` or `docker start` command. -### CLI Plugin options +### CLI plugin options The property `plugins` contains settings specific to CLI plugins. The key is the plugin name, while the value is a further map of options, which are specific to that plugin. - ### Sample configuration file Following is a sample `config.json` file to illustrate the format used for @@ -372,7 +272,7 @@ and require no configuration to enable them. If using your own notary server and a self-signed certificate or an internal Certificate Authority, you need to place the certificate at -`tls//ca.crt` in your docker config directory. +`tls//ca.crt` in your Docker config directory. Alternatively you can trust the certificate globally by adding it to your system's list of root Certificate Authorities. @@ -429,6 +329,13 @@ to the `/var/run/docker.sock` Unix socket on the SSH host. $ docker -H ssh://user@192.168.64.5 ps ``` +You can optionally specify the location of the socket by appending a path +component to the end of the SSH address. + +```console +$ docker -H ssh://user@192.168.64.5/var/run/docker.sock ps +``` + ### Display help text To list the help on any command just execute the command, followed by the diff --git a/_vendor/github.com/docker/cli/docs/reference/commandline/dockerd.md b/_vendor/github.com/docker/cli/docs/reference/commandline/dockerd.md index 1c9f8e183..9e9fd332f 100644 --- a/_vendor/github.com/docker/cli/docs/reference/commandline/dockerd.md +++ b/_vendor/github.com/docker/cli/docs/reference/commandline/dockerd.md @@ -29,6 +29,7 @@ Options: --authorization-plugin list Authorization plugins to load --bip string Specify network bridge IP -b, --bridge string Attach containers to a network bridge + --cdi-spec-dir list CDI specification directories to use --cgroup-parent string Set parent cgroup for all containers --config-file string Daemon configuration file (default "/etc/docker/daemon.json") --containerd string containerd grpc address @@ -124,25 +125,28 @@ To run the daemon with debug output, use `dockerd --debug` or add `"debug": true to [the `daemon.json` file](#daemon-configuration-file). > **Enabling experimental features** -> +> > Enable experimental features by starting `dockerd` with the `--experimental` > flag or adding `"experimental": true` to the `daemon.json` file. ### Environment variables -For easy reference, the following list of environment variables are supported -by the `dockerd` command line: +The following list of environment variables are supported by the `dockerd` daemon. +Some of these environment variables are supported both by the Docker Daemon and +the `docker` CLI. Refer to [Environment variables](cli.md#environment-variables) +in the CLI section to learn about environment variables supported by the +`docker` CLI. | Variable | Description | |:--------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `DOCKER_CERT_PATH` | Location of your authentication keys. This variable is used both by the [`docker` CLI](cli.md) and the `dockerd` daemon. | | `DOCKER_DRIVER` | The storage driver to use. | -| `DOCKER_RAMDISK` | If set this disables 'pivot_root'. | +| `DOCKER_RAMDISK` | If set this disables `pivot_root`. | | `DOCKER_TLS_VERIFY` | When set Docker uses TLS and verifies the remote. This variable is used both by the [`docker` CLI](cli.md) and the `dockerd` daemon. | | `DOCKER_TMPDIR` | Location for temporary files created by the daemon. | | `HTTP_PROXY` | Proxy URL for HTTP requests unless overridden by NoProxy. See the [Go specification](https://pkg.go.dev/golang.org/x/net/http/httpproxy#Config) for details. | | `HTTPS_PROXY` | Proxy URL for HTTPS requests unless overridden by NoProxy. See the [Go specification](https://pkg.go.dev/golang.org/x/net/http/httpproxy#Config) for details. | -| `MOBY_DISABLE_PIGZ` | Disables the use of [`unpigz`](https://linux.die.net/man/1/pigz) to decompress layers in parallel when pulling images, even if it is installed. | | +| `MOBY_DISABLE_PIGZ` | Disables the use of [`unpigz`](https://linux.die.net/man/1/pigz) to decompress layers in parallel when pulling images, even if it is installed. | | `NO_PROXY` | Comma-separated values specifying hosts that should be excluded from proxying. See the [Go specification](https://pkg.go.dev/golang.org/x/net/http/httpproxy#Config) for details. | ## Examples @@ -150,7 +154,7 @@ by the `dockerd` command line: ### Proxy configuration > **Note** -> +> > Refer to the [Docker Desktop manual](https://docs.docker.com/desktop/networking/#httphttps-proxy-support) > if you are running [Docker Desktop](https://docs.docker.com/desktop/). @@ -160,10 +164,10 @@ operations such as pulling and pushing images. The daemon can be configured in three ways: 1. Using environment variables (`HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY`). -2. Using the "http-proxy", "https-proxy", and "no-proxy" fields in the - [daemon configuration file](#daemon-configuration-file) (Docker Engine 23.0 or newer). +2. Using the `http-proxy`, `https-proxy`, and `no-proxy` fields in the + [daemon configuration file](#daemon-configuration-file) (Docker Engine version 23.0 or later). 3. Using the `--http-proxy`, `--https-proxy`, and `--no-proxy` command-line - options. (Docker Engine 23.0 or newer). + options. (Docker Engine version 23.0 or later). The command-line and configuration file options take precedence over environment variables. Refer to [control and configure Docker with systemd](https://docs.docker.com/config/daemon/systemd/#httphttps-proxy) @@ -178,11 +182,11 @@ By default, a `unix` domain socket (or IPC socket) is created at `/var/run/docker.sock`, requiring either `root` permission, or `docker` group membership. -If you need to access the Docker daemon remotely, you need to enable the `tcp` -Socket. Beware that the default setup provides un-encrypted and -un-authenticated direct access to the Docker daemon - and should be secured -either using the [built in HTTPS encrypted socket](https://docs.docker.com/engine/security/https/), or by -putting a secure web proxy in front of it. You can listen on port `2375` on all +If you need to access the Docker daemon remotely, you need to enable the tcp +Socket. When using a TCP socket, the Docker daemon provides un-encrypted and +un-authenticated direct access to the Docker daemon by default. You should secure +the daemon either using the [built in HTTPS encrypted socket](https://docs.docker.com/engine/security/protect-access/), +or by putting a secure web proxy in front of it. You can listen on port `2375` on all network interfaces with `-H tcp://0.0.0.0:2375`, or on a particular network interface using its IP address: `-H tcp://192.168.59.103:2375`. It is conventional to use port `2375` for un-encrypted, and port `2376` for encrypted @@ -191,28 +195,28 @@ communication with the daemon. > **Note** > > If you're using an HTTPS encrypted socket, keep in mind that only -> TLS1.0 and greater are supported. Protocols SSLv3 and under are not -> supported anymore for security reasons. +> TLS version 1.0 and higher is supported. Protocols SSLv3 and below are not +> supported for security reasons. -On Systemd based systems, you can communicate with the daemon via -[Systemd socket activation](https://0pointer.de/blog/projects/socket-activation.html), -use `dockerd -H fd://`. Using `fd://` will work perfectly for most setups but +On systemd based systems, you can communicate with the daemon via +[systemd socket activation](https://0pointer.de/blog/projects/socket-activation.html), +with `dockerd -H fd://`. Using `fd://` works for most setups, but you can also specify individual sockets: `dockerd -H fd://3`. If the -specified socket activated files aren't found, then Docker will exit. You can -find examples of using Systemd socket activation with Docker and Systemd in the +specified socket activated files aren't found, the daemon exits. You can +find examples of using systemd socket activation with Docker and systemd in the [Docker source tree](https://github.com/docker/docker/tree/master/contrib/init/systemd/). You can configure the Docker daemon to listen to multiple sockets at the same time using multiple `-H` options: -The example below runs the daemon listening on the default unix socket, and +The example below runs the daemon listening on the default Unix socket, and on 2 specific IP addresses on this host: ```console $ sudo dockerd -H unix:///var/run/docker.sock -H tcp://192.168.59.106 -H tcp://10.10.10.2 ``` -The Docker client will honor the `DOCKER_HOST` environment variable to set the +The Docker client honors the `DOCKER_HOST` environment variable to set the `-H` flag for the client. Use **one** of the following commands: ```console @@ -236,7 +240,7 @@ $ export DOCKER_TLS_VERIFY=1 $ docker ps ``` -The Docker client will honor the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` +The Docker client honors the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables (or the lowercase versions thereof). `HTTPS_PROXY` takes precedence over `HTTP_PROXY`. @@ -258,29 +262,28 @@ supported. If your key is protected with passphrase, you need to set up > **Warning** > -> Changing the default `docker` daemon binding to a -> TCP port or Unix *docker* user group will increase your security risks -> by allowing non-root users to gain *root* access on the host. Make sure -> you control access to `docker`. If you are binding -> to a TCP port, anyone with access to that port has full Docker access; -> so it is not advisable on an open network. -{: .warning :} - -With `-H` it is possible to make the Docker daemon to listen on a -specific IP and port. By default, it will listen on -`unix:///var/run/docker.sock` to allow only local connections by the -*root* user. You *could* set it to `0.0.0.0:2375` or a specific host IP -to give access to everybody, but that is **not recommended** because -then it is trivial for someone to gain root access to the host where the -daemon is running. +> Changing the default `docker` daemon binding to a TCP port or Unix `docker` +> user group introduces security risks, as it may allow non-root users to gain +> root access on the host. Make sure you control access to `docker`. If you are +> binding to a TCP port, anyone with access to that port has full Docker +> access; so it's not advisable on an open network. +{ .warning } + +With `-H` it's possible to make the Docker daemon to listen on a specific IP +and port. By default, it listens on `unix:///var/run/docker.sock` to allow +only local connections by the root user. You could set it to `0.0.0.0:2375` or +a specific host IP to give access to everybody, but that isn't recommended +because someone could gain root access to the host where the daemon is running. Similarly, the Docker client can use `-H` to connect to a custom port. -The Docker client will default to connecting to `unix:///var/run/docker.sock` +The Docker client defaults to connecting to `unix:///var/run/docker.sock` on Linux, and `tcp://127.0.0.1:2376` on Windows. `-H` accepts host and port assignment in the following format: - tcp://[host]:[port][path] or unix://path +```text +tcp://[host]:[port][path] or unix://path +``` For example: @@ -293,7 +296,7 @@ For example: - `unix://path/to/socket` -> Unix socket located at `path/to/socket` -`-H`, when empty, will default to the same value as +`-H`, when empty, defaults to the same value as when no `-H` was passed in. `-H` also accepts short form for TCP bindings: `host:` or `host:port` or `:port` @@ -324,7 +327,7 @@ $ docker -H tcp://127.0.0.1:2375 pull ubuntu ### Daemon storage-driver On Linux, the Docker daemon has support for several different image layer storage -drivers: `overlay2`, `fuse-overlayfs`, `btrfs`, `zfs`, and `devicemapper`. +drivers: `overlay2`, `fuse-overlayfs`, `btrfs`, and `zfs`. `overlay2` is the preferred storage driver for all currently supported Linux distributions, and is selected by default. Unless users have a strong reason to prefer another storage driver, @@ -337,448 +340,15 @@ On Windows, the Docker daemon only supports the `windowsfilter` storage driver. ### Options per storage driver Particular storage-driver can be configured with options specified with -`--storage-opt` flags. Options for `devicemapper` are prefixed with `dm`, -options for `zfs` start with `zfs`, and options for `btrfs` start with `btrfs`. - -#### Devicemapper options - -This is an example of the configuration file for devicemapper on Linux: - -```json -{ - "storage-driver": "devicemapper", - "storage-opts": [ - "dm.thinpooldev=/dev/mapper/thin-pool", - "dm.use_deferred_deletion=true", - "dm.use_deferred_removal=true" - ] -} -``` - -##### `dm.thinpooldev` - -Specifies a custom block storage device to use for the thin pool. - -If using a block device for device mapper storage, it is best to use `lvm` -to create and manage the thin-pool volume. This volume is then handed to Docker -to exclusively create snapshot volumes needed for images and containers. - -Managing the thin-pool outside of Engine makes for the most feature-rich -method of having Docker utilize device mapper thin provisioning as the -backing storage for Docker containers. The highlights of the lvm-based -thin-pool management feature include: automatic or interactive thin-pool -resize support, dynamically changing thin-pool features, automatic thinp -metadata checking when lvm activates the thin-pool, etc. - -As a fallback if no thin pool is provided, loopback files are -created. Loopback is very slow, but can be used without any -pre-configuration of storage. It is strongly recommended that you do -not use loopback in production. Ensure your Engine daemon has a -`--storage-opt dm.thinpooldev` argument provided. - -###### Example: - -```console -$ sudo dockerd --storage-opt dm.thinpooldev=/dev/mapper/thin-pool -``` - -##### `dm.directlvm_device` - -As an alternative to providing a thin pool as above, Docker can setup a block -device for you. - -###### Example: - -```console -$ sudo dockerd --storage-opt dm.directlvm_device=/dev/xvdf -``` - -##### `dm.thinp_percent` - -Sets the percentage of passed in block device to use for storage. - -###### Example: - -```console -$ sudo dockerd --storage-opt dm.thinp_percent=95 -``` - -##### `dm.thinp_metapercent` - -Sets the percentage of the passed in block device to use for metadata storage. - -###### Example: - -```console -$ sudo dockerd --storage-opt dm.thinp_metapercent=1 -``` - -##### `dm.thinp_autoextend_threshold` - -Sets the value of the percentage of space used before `lvm` attempts to -autoextend the available space [100 = disabled] - -###### Example: - -```console -$ sudo dockerd --storage-opt dm.thinp_autoextend_threshold=80 -``` - -##### `dm.thinp_autoextend_percent` - -Sets the value percentage value to increase the thin pool by when `lvm` -attempts to autoextend the available space [100 = disabled] - -###### Example: - -```console -$ sudo dockerd --storage-opt dm.thinp_autoextend_percent=20 -``` - - -##### `dm.basesize` - -Specifies the size to use when creating the base device, which limits the -size of images and containers. The default value is 10G. Note, thin devices -are inherently "sparse", so a 10G device which is mostly empty doesn't use -10 GB of space on the pool. However, the filesystem will use more space for -the empty case the larger the device is. - -The base device size can be increased at daemon restart which will allow -all future images and containers (based on those new images) to be of the -new base device size. - -###### Examples - -```console -$ sudo dockerd --storage-opt dm.basesize=50G -``` - -This will increase the base device size to 50G. The Docker daemon will throw an -error if existing base device size is larger than 50G. A user can use -this option to expand the base device size however shrinking is not permitted. - -This value affects the system-wide "base" empty filesystem -that may already be initialized and inherited by pulled images. Typically, -a change to this value requires additional steps to take effect: - -```console -$ sudo service docker stop - -$ sudo rm -rf /var/lib/docker - -$ sudo service docker start -``` - - -##### `dm.loopdatasize` - -> **Note** -> -> This option configures devicemapper loopback, which should not -> be used in production. - -Specifies the size to use when creating the loopback file for the -"data" device which is used for the thin pool. The default size is -100G. The file is sparse, so it will not initially take up this -much space. - -###### Example - -```console -$ sudo dockerd --storage-opt dm.loopdatasize=200G -``` - -##### `dm.loopmetadatasize` - -> **Note** -> -> This option configures devicemapper loopback, which should not -> be used in production. - -Specifies the size to use when creating the loopback file for the -"metadata" device which is used for the thin pool. The default size -is 2G. The file is sparse, so it will not initially take up -this much space. - -###### Example - -```console -$ sudo dockerd --storage-opt dm.loopmetadatasize=4G -``` - -##### `dm.fs` - -Specifies the filesystem type to use for the base device. The supported -options are "ext4" and "xfs". The default is "xfs" - -###### Example - -```console -$ sudo dockerd --storage-opt dm.fs=ext4 -``` - -##### `dm.mkfsarg` - -Specifies extra mkfs arguments to be used when creating the base device. - -###### Example - -```console -$ sudo dockerd --storage-opt "dm.mkfsarg=-O ^has_journal" -``` - -##### `dm.mountopt` - -Specifies extra mount options used when mounting the thin devices. - -###### Example - -```console -$ sudo dockerd --storage-opt dm.mountopt=nodiscard -``` - -##### `dm.datadev` - -(Deprecated, use `dm.thinpooldev`) - -Specifies a custom blockdevice to use for data for the thin pool. - -If using a block device for device mapper storage, ideally both `datadev` and -`metadatadev` should be specified to completely avoid using the loopback -device. - -###### Example - -```console -$ sudo dockerd \ - --storage-opt dm.datadev=/dev/sdb1 \ - --storage-opt dm.metadatadev=/dev/sdc1 -``` - -##### `dm.metadatadev` - -(Deprecated, use `dm.thinpooldev`) - -Specifies a custom blockdevice to use for metadata for the thin pool. - -For best performance the metadata should be on a different spindle than the -data, or even better on an SSD. - -If setting up a new metadata pool it is required to be valid. This can be -achieved by zeroing the first 4k to indicate empty metadata, like this: - -```console -$ dd if=/dev/zero of=$metadata_dev bs=4096 count=1 -``` - -###### Example - -```console -$ sudo dockerd \ - --storage-opt dm.datadev=/dev/sdb1 \ - --storage-opt dm.metadatadev=/dev/sdc1 -``` - -##### `dm.blocksize` - -Specifies a custom blocksize to use for the thin pool. The default -blocksize is 64K. - -###### Example - -```console -$ sudo dockerd --storage-opt dm.blocksize=512K -``` - -##### `dm.blkdiscard` - -Enables or disables the use of `blkdiscard` when removing devicemapper -devices. This is enabled by default (only) if using loopback devices and is -required to resparsify the loopback file on image/container removal. - -Disabling this on loopback can lead to *much* faster container removal -times, but will make the space used in `/var/lib/docker` directory not be -returned to the system for other use when containers are removed. - -###### Examples - -```console -$ sudo dockerd --storage-opt dm.blkdiscard=false -``` - -##### `dm.override_udev_sync_check` - -Overrides the `udev` synchronization checks between `devicemapper` and `udev`. -`udev` is the device manager for the Linux kernel. - -To view the `udev` sync support of a Docker daemon that is using the -`devicemapper` driver, run: - -```console -$ docker info -<...> -Udev Sync Supported: true -<...> -``` - -When `udev` sync support is `true`, then `devicemapper` and udev can -coordinate the activation and deactivation of devices for containers. - -When `udev` sync support is `false`, a race condition occurs between -the`devicemapper` and `udev` during create and cleanup. The race condition -results in errors and failures. (For information on these failures, see -[docker#4036](https://github.com/docker/docker/issues/4036)) - -To allow the `docker` daemon to start, regardless of `udev` sync not being -supported, set `dm.override_udev_sync_check` to true: - -```console -$ sudo dockerd --storage-opt dm.override_udev_sync_check=true -``` - -When this value is `true`, the `devicemapper` continues and simply warns -you the errors are happening. - -> **Note** -> -> The ideal is to pursue a `docker` daemon and environment that does -> support synchronizing with `udev`. For further discussion on this -> topic, see [docker#4036](https://github.com/docker/docker/issues/4036). -> Otherwise, set this flag for migrating existing Docker daemons to -> a daemon with a supported environment. - -##### `dm.use_deferred_removal` - -Enables use of deferred device removal if `libdm` and the kernel driver -support the mechanism. - -Deferred device removal means that if device is busy when devices are -being removed/deactivated, then a deferred removal is scheduled on -device. And devices automatically go away when last user of the device -exits. - -For example, when a container exits, its associated thin device is removed. -If that device has leaked into some other mount namespace and can't be -removed, the container exit still succeeds and this option causes the -system to schedule the device for deferred removal. It does not wait in a -loop trying to remove a busy device. - -###### Example - -```console -$ sudo dockerd --storage-opt dm.use_deferred_removal=true -``` - -##### `dm.use_deferred_deletion` - -Enables use of deferred device deletion for thin pool devices. By default, -thin pool device deletion is synchronous. Before a container is deleted, -the Docker daemon removes any associated devices. If the storage driver -can not remove a device, the container deletion fails and daemon returns. - -```console -Error deleting container: Error response from daemon: Cannot destroy container -``` - -To avoid this failure, enable both deferred device deletion and deferred -device removal on the daemon. - -```console -$ sudo dockerd \ - --storage-opt dm.use_deferred_deletion=true \ - --storage-opt dm.use_deferred_removal=true -``` - -With these two options enabled, if a device is busy when the driver is -deleting a container, the driver marks the device as deleted. Later, when -the device isn't in use, the driver deletes it. - -In general it should be safe to enable this option by default. It will help -when unintentional leaking of mount point happens across multiple mount -namespaces. - -##### `dm.min_free_space` - -Specifies the min free space percent in a thin pool require for new device -creation to succeed. This check applies to both free data space as well -as free metadata space. Valid values are from 0% - 99%. Value 0% disables -free space checking logic. If user does not specify a value for this option, -the Engine uses a default value of 10%. - -Whenever a new a thin pool device is created (during `docker pull` or during -container creation), the Engine checks if the minimum free space is -available. If sufficient space is unavailable, then device creation fails -and any relevant `docker` operation fails. - -To recover from this error, you must create more free space in the thin pool -to recover from the error. You can create free space by deleting some images -and containers from the thin pool. You can also add more storage to the thin -pool. - -To add more space to a LVM (logical volume management) thin pool, just add -more storage to the volume group container thin pool; this should automatically -resolve any errors. If your configuration uses loop devices, then stop the -Engine daemon, grow the size of loop files and restart the daemon to resolve -the issue. - -###### Example - -```console -$ sudo dockerd --storage-opt dm.min_free_space=10% -``` - -##### `dm.xfs_nospace_max_retries` - -Specifies the maximum number of retries XFS should attempt to complete -IO when ENOSPC (no space) error is returned by underlying storage device. - -By default XFS retries infinitely for IO to finish and this can result -in unkillable process. To change this behavior one can set -xfs_nospace_max_retries to say 0 and XFS will not retry IO after getting -ENOSPC and will shutdown filesystem. - -###### Example - -```console -$ sudo dockerd --storage-opt dm.xfs_nospace_max_retries=0 -``` - -##### `dm.libdm_log_level` - -Specifies the maxmimum `libdm` log level that will be forwarded to the -`dockerd` log (as specified by `--log-level`). This option is primarily -intended for debugging problems involving `libdm`. Using values other than the -defaults may cause false-positive warnings to be logged. - -Values specified must fall within the range of valid `libdm` log levels. At the -time of writing, the following is the list of `libdm` log levels as well as -their corresponding levels when output by `dockerd`. - -| `libdm` Level | Value | `--log-level` | -|---------------|------:|---------------| -| `_LOG_FATAL` | 2 | error | -| `_LOG_ERR` | 3 | error | -| `_LOG_WARN` | 4 | warn | -| `_LOG_NOTICE` | 5 | info | -| `_LOG_INFO` | 6 | info | -| `_LOG_DEBUG` | 7 | debug | - -###### Example - -```console -$ sudo dockerd \ - --log-level debug \ - --storage-opt dm.libdm_log_level=7 -``` +`--storage-opt` flags. Options for `zfs` start with `zfs`, and options for +`btrfs` start with `btrfs`. #### ZFS options ##### `zfs.fsname` -Set zfs filesystem under which docker will create its own datasets. -By default docker will pick up the zfs filesystem where docker graph -(`/var/lib/docker`) is located. +Specifies the ZFS filesystem that the daemon should use to create its datasets. +By default, the ZFS filesystem in `/var/lib/docker` is used. ###### Example @@ -792,8 +362,8 @@ $ sudo dockerd -s zfs --storage-opt zfs.fsname=zroot/docker Specifies the minimum size to use when creating the subvolume which is used for containers. If user uses disk quota for btrfs when creating or running -a container with **--storage-opt size** option, docker should ensure the -**size** cannot be smaller than **btrfs.min_space**. +a container with **--storage-opt size** option, Docker should ensure the +**size** can't be smaller than **btrfs.min_space**. ###### Example @@ -806,8 +376,8 @@ $ sudo dockerd -s btrfs --storage-opt btrfs.min_space=10G ##### `overlay2.size` Sets the default max size of the container. It is supported only when the -backing fs is `xfs` and mounted with `pquota` mount option. Under these -conditions the user can pass any size less than the backing fs size. +backing filesystem is `xfs` and mounted with `pquota` mount option. Under these +conditions the user can pass any size less than the backing filesystem size. ###### Example @@ -1119,7 +689,7 @@ To set the DNS search domain for all Docker containers, use: $ sudo dockerd --dns-search example.com ``` -### Allow push of nondistributable artifacts +### Allow push of non-distributable artifacts Some images (e.g., Windows base images) contain artifacts whose distribution is restricted by license. When these images are pushed to a registry, restricted @@ -1129,38 +699,41 @@ To override this behavior for specific registries, use the `--allow-nondistributable-artifacts` option in one of the following forms: * `--allow-nondistributable-artifacts myregistry:5000` tells the Docker daemon - to push nondistributable artifacts to myregistry:5000. + to push non-distributable artifacts to myregistry:5000. * `--allow-nondistributable-artifacts 10.1.0.0/16` tells the Docker daemon to - push nondistributable artifacts to all registries whose resolved IP address + push non-distributable artifacts to all registries whose resolved IP address is within the subnet described by the CIDR syntax. This option can be used multiple times. -This option is useful when pushing images containing nondistributable artifacts +This option is useful when pushing images containing non-distributable artifacts to a registry on an air-gapped network so hosts on that network can pull the images without connecting to another server. -> **Warning**: Nondistributable artifacts typically have restrictions on how +> **Warning** +> +> Non-distributable artifacts typically have restrictions on how > and where they can be distributed and shared. Only use this feature to push > artifacts to private registries and ensure that you are in compliance with -> any terms that cover redistributing nondistributable artifacts. +> any terms that cover redistributing non-distributable artifacts. +{ .warning } ### Insecure registries -Docker considers a private registry either secure or insecure. In the rest of -this section, *registry* is used for *private registry*, and `myregistry:5000` -is a placeholder example for a private registry. +In this section, "registry" refers to a private registry, and `myregistry:5000` +is a placeholder example of a private registry. +Docker considers a private registry either secure or insecure. A secure registry uses TLS and a copy of its CA certificate is placed on the Docker host at `/etc/docker/certs.d/myregistry:5000/ca.crt`. An insecure registry is either not using TLS (i.e., listening on plain text HTTP), or is using TLS with a CA certificate not known by the Docker daemon. The latter can -happen when the certificate was not found under +happen when the certificate wasn't found under `/etc/docker/certs.d/myregistry:5000/`, or if the certificate verification failed (i.e., wrong CA). -By default, Docker assumes all, but local (see local registries below), -registries are secure. Communicating with an insecure registry is not possible +By default, Docker assumes all registries to be secure, except for local registries. +Communicating with an insecure registry isn't possible if Docker assumes that registry is secure. In order to communicate with an insecure registry, the Docker daemon requires `--insecure-registry` in one of the following two forms: @@ -1174,34 +747,33 @@ the following two forms: The flag can be used multiple times to allow multiple registries to be marked as insecure. -If an insecure registry is not marked as insecure, `docker pull`, -`docker push`, and `docker search` will result in an error message prompting +If an insecure registry isn't marked as insecure, `docker pull`, +`docker push`, and `docker search` result in error messages, prompting the user to either secure or pass the `--insecure-registry` flag to the Docker daemon as described above. Local registries, whose IP address falls in the 127.0.0.0/8 range, are -automatically marked as insecure as of Docker 1.3.2. It is not recommended to +automatically marked as insecure as of Docker 1.3.2. It isn't recommended to rely on this, as it may change in the future. Enabling `--insecure-registry`, i.e., allowing un-encrypted and/or untrusted -communication, can be useful when running a local registry. However, -because its use creates security vulnerabilities it should ONLY be enabled for -testing purposes. For increased security, users should add their CA to their +communication, can be useful when running a local registry. However, +because its use creates security vulnerabilities it should only be enabled for +testing purposes. For increased security, users should add their CA to their system's list of trusted CAs instead of enabling `--insecure-registry`. #### Legacy Registries Operations against registries supporting only the legacy v1 protocol are no longer -supported. Specifically, the daemon will not attempt `push`, `pull` and `login` +supported. Specifically, the daemon doesn't attempt to push, pull or sign in to v1 registries. The exception to this is `search` which can still be performed on v1 registries. - ### Running a Docker daemon behind an HTTPS_PROXY -When running inside a LAN that uses an `HTTPS` proxy, the Docker Hub -certificates will be replaced by the proxy's certificates. These certificates -need to be added to your Docker host's configuration: +When running inside a LAN that uses an `HTTPS` proxy, the proxy's certificates +replace Docker Hub's certificates. These certificates must be added to your +Docker host's configuration: 1. Install the `ca-certificates` package for your distribution 2. Ask your network admin for the proxy's CA certificate and append them to @@ -1210,21 +782,20 @@ need to be added to your Docker host's configuration: The `username:` and `password@` are optional - and are only needed if your proxy is set up to require authentication. -This will only add the proxy and authentication to the Docker daemon's requests - -your `docker build`s and running containers will need extra configuration to -use the proxy +This only adds the proxy and authentication to the Docker daemon's requests. +To use the proxy when building images and running containers, see +[Configure Docker to use a proxy server](https://docs.docker.com/network/proxy/) ### Default `ulimit` settings -`--default-ulimit` allows you to set the default `ulimit` options to use for +The `--default-ulimit` flag lets you set the default `ulimit` options to use for all containers. It takes the same options as `--ulimit` for `docker run`. If -these defaults are not set, `ulimit` settings will be inherited, if not set on -`docker run`, from the Docker daemon. Any `--ulimit` options passed to -`docker run` will overwrite these defaults. +these defaults aren't set, `ulimit` settings are inherited from the Docker daemon. +Any `--ulimit` options passed to `docker run` override the daemon defaults. -Be careful setting `nproc` with the `ulimit` flag as `nproc` is designed by Linux to -set the maximum number of processes available to a user, not to a container. For details -please check the [run](run.md) reference. +Be careful setting `nproc` with the `ulimit` flag, as `nproc` is designed by Linux to +set the maximum number of processes available to a user, not to a container. +For details, see [`docker run` reference](run.md#ulimit). ### Access authorization @@ -1250,17 +821,16 @@ allow the request for it to complete. For information about how to create an authorization plugin, refer to the [authorization plugin](../../extend/plugins_authorization.md) section. - ### Daemon user namespace options The Linux kernel [user namespace support](https://man7.org/linux/man-pages/man7/user_namespaces.7.html) provides additional security by enabling a process, and therefore a container, to have a unique range of user and group IDs which are outside the traditional -user and group range utilized by the host system. Potentially the most important -security improvement is that, by default, container processes running as the -`root` user will have expected administrative privilege (with some restrictions) -inside the container but will effectively be mapped to an unprivileged `uid` on +user and group range utilized by the host system. One of the most important +security improvements is that, by default, container processes running as the +`root` user have expected administrative privileges it expects (with some restrictions) +inside the container, but are effectively mapped to an unprivileged `uid` on the host. For details about how to use this feature, as well as limitations, see @@ -1285,32 +855,64 @@ $ docker run -it --add-host host.docker.internal:host-gateway \ PING host.docker.internal (192.0.2.0): 56 data bytes ``` +### Enable CDI devices + +> **Note** +> +> This is experimental feature and as such doesn't represent a stable API. +> +> This feature isn't enabled by default. To this feature, set `features.cdi` to +> `true` in the `daemon.json` configuration file. + +Container Device Interface (CDI) is a +[standardized](https://github.com/cncf-tags/container-device-interface/blob/main/SPEC.md) +mechanism for container runtimes to create containers which are able to +interact with third party devices. + +The Docker daemon supports running containers with CDI devices if the requested +device specifications are available on the filesystem of the daemon. + +The default specification directors are: + +- `/etc/cdi/` for static CDI Specs +- `/var/run/cdi` for generated CDI Specs + +Alternatively, you can set custom locations for CDI specifications using the +`cdi-spec-dirs` option in the `daemon.json` configuration file, or the +`--cdi-spec-dir` flag for the `dockerd` CLI. + +```json +{ + "features": { + "cdi": true + }, + "cdi-spec-dirs": ["/etc/cdi/", "/var/run/cdi"] +} +``` + +When CDI is enabled for a daemon, you can view the configured CDI specification +directories using the `docker info` command. + ### Miscellaneous options IP masquerading uses address translation to allow containers without a public -IP to talk to other machines on the Internet. This may interfere with some -network topologies and can be disabled with `--ip-masq=false`. +IP to talk to other machines on the internet. This may interfere with some +network topologies, and can be disabled with `--ip-masq=false`. -Docker supports softlinks for the Docker data directory (`/var/lib/docker`) and +Docker supports soft links for the Docker data directory (`/var/lib/docker`) and for `/var/lib/docker/tmp`. The `DOCKER_TMPDIR` and the data directory can be set like this: -```console -$ DOCKER_TMPDIR=/mnt/disk2/tmp /usr/local/bin/dockerd --data-root /var/lib/docker -H unix:// > /var/lib/docker-machine/docker.log 2>&1 -``` - -or - ```console $ export DOCKER_TMPDIR=/mnt/disk2/tmp -$ /usr/local/bin/dockerd --data-root /var/lib/docker -H unix:// > /var/lib/docker-machine/docker.log 2>&1 +$ sudo -E dockerd --data-root /var/lib/docker -H unix:// ```` #### Default cgroup parent -The `--cgroup-parent` option allows you to set the default cgroup parent -to use for containers. If this option is not set, it defaults to `/docker` for -fs cgroup driver and `system.slice` for systemd cgroup driver. +The `--cgroup-parent` option lets you set the default cgroup parent +for containers. If this option isn't set, it defaults to `/docker` for +the cgroupfs driver, and `system.slice` for the systemd cgroup driver. If the cgroup has a leading forward slash (`/`), the cgroup is created under the root cgroup, otherwise the cgroup is created under the daemon @@ -1321,7 +923,7 @@ Assuming the daemon is running in cgroup `daemoncgroup`, `/sys/fs/cgroup/memory/foobar`, whereas using `--cgroup-parent=foobar` creates the cgroup in `/sys/fs/cgroup/memory/daemoncgroup/foobar` -The systemd cgroup driver has different rules for `--cgroup-parent`. Systemd +The systemd cgroup driver has different rules for `--cgroup-parent`. systemd represents hierarchy by slice and the name of the slice encodes the location in the tree. So `--cgroup-parent` for systemd cgroups should be a slice name. A name can consist of a dash-separated series of names, which describes the path @@ -1335,7 +937,7 @@ the `--cgroup-parent` option on the daemon. #### Daemon metrics -The `--metrics-addr` option takes a tcp address to serve the metrics API. +The `--metrics-addr` option takes a TCP address to serve the metrics API. This feature is still experimental, therefore, the daemon must be running in experimental mode for this feature to work. @@ -1345,34 +947,24 @@ allowing you to make requests on the API at `127.0.0.1:9323/metrics` to receive Port `9323` is the [default port associated with Docker metrics](https://github.com/prometheus/prometheus/wiki/Default-port-allocations) -to avoid collisions with other prometheus exporters and services. - -If you are running a prometheus server you can add this address to your scrape configs -to have prometheus collect metrics on Docker. For more information -on prometheus refer to the [prometheus website](https://prometheus.io/). +to avoid collisions with other Prometheus exporters and services. -```yaml -scrape_configs: - - job_name: 'docker' - static_configs: - - targets: ['127.0.0.1:9323'] -``` - -Please note that this feature is still marked as experimental as metrics and metric -names could change while this feature is still in experimental. Please provide -feedback on what you would like to see collected in the API. +If you are running a Prometheus server you can add this address to your scrape configs +to have Prometheus collect metrics on Docker. For more information, see +[Collect Docker metrics with Prometheus](https://docs.docker.com/config/daemon/prometheus/). -#### Node Generic Resources +#### Node generic resources The `--node-generic-resources` option takes a list of key-value pair (`key=value`) that allows you to advertise user defined resources -in a swarm cluster. +in a Swarm cluster. The current expected use case is to advertise NVIDIA GPUs so that services requesting `NVIDIA-GPU=[0-16]` can land on a node that has enough GPUs for the task to run. Example of usage: + ```json { "node-generic-resources": [ @@ -1390,8 +982,8 @@ except for flags that allow several entries, where it uses the plural of the flag name, e.g., `labels` for the `label` flag. The options set in the configuration file must not conflict with options set -via flags. The docker daemon fails to start if an option is duplicated between -the file and the flags, regardless of their value. We do this to avoid +using flags. The Docker daemon fails to start if an option is duplicated between +the file and the flags, regardless of their value. This is intentional, and avoids silently ignore changes introduced in configuration reloads. For example, the daemon fails to start if you set daemon labels in the configuration file and also set daemon labels via the `--label` flag. @@ -1415,14 +1007,13 @@ $ echo $? 1 ``` - ##### On Linux The default location of the configuration file on Linux is -`/etc/docker/daemon.json`. The `--config-file` flag can be used to specify a - non-default location. +`/etc/docker/daemon.json`. Use the `--config-file` flag to specify a +non-default location. -This is a full example of the allowed configuration options on Linux: +The following is a full example of the allowed configuration options on Linux: ```json { @@ -1431,6 +1022,17 @@ This is a full example of the allowed configuration options on Linux: "authorization-plugins": [], "bip": "", "bridge": "", + "builder": { + "gc": { + "enabled": true, + "defaultKeepStorage": "10GB", + "policy": [ + { "keepStorage": "10GB", "filter": ["unused-for=2200h"] }, + { "keepStorage": "50GB", "filter": ["unused-for=3300h"] }, + { "keepStorage": "100GB", "all": true } + ] + } + }, "cgroup-parent": "", "containerd": "/run/containerd/containerd.sock", "containerd-namespace": "docker", @@ -1541,22 +1143,22 @@ This is a full example of the allowed configuration options on Linux: } ``` -> **Note:** +> **Note** > -> You cannot set options in `daemon.json` that have already been set on +> You can't set options in `daemon.json` that have already been set on > daemon startup as a flag. -> On systems that use `systemd` to start the Docker daemon, `-H` is already set, so -> you cannot use the `hosts` key in `daemon.json` to add listening addresses. -> See ["custom Docker daemon options"](https://docs.docker.com/config/daemon/systemd/#custom-docker-daemon-options) for how -> to accomplish this task with a systemd drop-in file. +> On systems that use systemd to start the Docker daemon, `-H` is already set, so +> you can't use the `hosts` key in `daemon.json` to add listening addresses. +> See [custom Docker daemon options](https://docs.docker.com/config/daemon/systemd/#custom-docker-daemon-options) +> for an example on how to configure the daemon using systemd drop-in files. ##### On Windows The default location of the configuration file on Windows is - `%programdata%\docker\config\daemon.json`. The `--config-file` flag can be - used to specify a non-default location. +`%programdata%\docker\config\daemon.json`. Use the `--config-file` flag +to specify a non-default location. -This is a full example of the allowed configuration options on Windows: +The following is a full example of the allowed configuration options on Windows: ```json { @@ -1602,7 +1204,8 @@ This is a full example of the allowed configuration options on Windows: } ``` -The `default-runtime` option is by default unset, in which case dockerd will auto-detect the runtime. This detection is currently based on if the `containerd` flag is set. +The `default-runtime` option is by default unset, in which case dockerd automatically detects the runtime. +This detection is based on if the `containerd` flag is set. Accepted values: @@ -1610,60 +1213,69 @@ Accepted values: - `io.containerd.runhcs.v1` - This is uses the containerd `runhcs` shim to run the container and uses the v2 HCS API's in Windows. #### Feature options -The optional field `features` in `daemon.json` allows users to enable or disable specific -daemon features. For example, `{"features":{"buildkit": true}}` enables `buildkit` as the -default docker image builder. -The list of currently supported feature options: -- `buildkit`: It enables `buildkit` as default builder when set to `true` or disables it by -`false`. Note that if this option is not explicitly set in the daemon config file, then it -is up to the cli to determine which builder to invoke. +The optional field `features` in `daemon.json` lets you enable or disable specific +daemon features. + +```json +{ + "features": { + "some-feature": true, + "some-disabled-feature-enabled-by-default": false + } +} +``` + +The list of feature options include: + +- `containerd-snapshotter`: when set to `true`, the daemon uses containerd + snapshotters instead of the classic storage drivers for storing image and + container data. For more information, see + [containerd storage](https://docs.docker.com/storage/containerd/). #### Configuration reload behavior Some options can be reconfigured when the daemon is running without requiring -to restart the process. We use the `SIGHUP` signal in Linux to reload, and a global event -in Windows with the key `Global\docker-daemon-config-$PID`. The options can -be modified in the configuration file but still will check for conflicts with -the provided flags. The daemon fails to reconfigure itself -if there are conflicts, but it won't stop execution. +to restart the process. The daemon uses the `SIGHUP` signal in Linux to reload, +and a global event in Windows with the key `Global\docker-daemon-config-$PID`. +You can modify the options in the configuration file, but the daemon still +checks for conflicting settings with the specified CLI flags. The daemon fails +to reconfigure itself if there are conflicts, but it won't stop execution. The list of currently supported options that can be reconfigured is this: -- `debug`: it changes the daemon to debug mode when set to true. -- `labels`: it replaces the daemon labels with a new set of labels. -- `live-restore`: Enables [keeping containers alive during daemon downtime](https://docs.docker.com/config/containers/live-restore/). -- `max-concurrent-downloads`: it updates the max concurrent downloads for each pull. -- `max-concurrent-uploads`: it updates the max concurrent uploads for each push. -- `max-download-attempts`: it updates the max download attempts for each pull. -- `default-runtime`: it updates the runtime to be used if not is - specified at container creation. It defaults to "default" which is - the runtime shipped with the official docker packages. -- `runtimes`: it updates the list of available OCI runtimes that can - be used to run containers. -- `authorization-plugin`: it specifies the authorization plugins to use. -- `allow-nondistributable-artifacts`: Replaces the set of registries to which the daemon will push nondistributable artifacts with a new set of registries. -- `insecure-registries`: it replaces the daemon insecure registries with a new set of insecure registries. If some existing insecure registries in daemon's configuration are not in newly reloaded insecure registries, these existing ones will be removed from daemon's config. -- `registry-mirrors`: it replaces the daemon registry mirrors with a new set of registry mirrors. If some existing registry mirrors in daemon's configuration are not in newly reloaded registry mirrors, these existing ones will be removed from daemon's config. -- `shutdown-timeout`: it replaces the daemon's existing configuration timeout with a new timeout for shutting down all containers. -- `features`: it explicitly enables or disables specific features. +| Option | Description | +| ---------------------------------- | ----------------------------------------------------------------------------------------------------------- | +| `debug` | Toggles debug mode of the daemon. | +| `labels` | Replaces the daemon labels with a new set of labels. | +| `live-restore` | Toggles [live restore](https://docs.docker.com/config/containers/live-restore/). | +| `max-concurrent-downloads` | Configures the max concurrent downloads for each pull. | +| `max-concurrent-uploads` | Configures the max concurrent uploads for each push. | +| `max-download-attempts` | Configures the max download attempts for each pull. | +| `default-runtime` | Configures the runtime to be used if not is specified at container creation. | +| `runtimes` | Configures the list of available OCI runtimes that can be used to run containers. | +| `authorization-plugin` | Specifies the authorization plugins to use. | +| `allow-nondistributable-artifacts` | Specifies a list of registries to which the daemon will push non-distributable artifacts. | +| `insecure-registries` | Specifies a list of registries that the daemon should consider insecure. | +| `registry-mirrors` | Specifies a list of registry mirrors. | +| `shutdown-timeout` | Configures the daemon's existing configuration timeout with a new timeout for shutting down all containers. | +| `features` | Enables or disables specific features. | ### Run multiple daemons -> **Note:** +> **Note** > -> Running multiple daemons on a single host is considered as "experimental". The user should be aware of -> unsolved problems. This solution may not work properly in some cases. Solutions are currently under development -> and will be delivered in the near future. +> Running multiple daemons on a single host is considered experimental. +> You may encounter unsolved problems, and things may not work as expected in some cases. This section describes how to run multiple Docker daemons on a single host. To -run multiple daemons, you must configure each daemon so that it does not +run multiple daemons, you must configure each daemon so that it doesn't conflict with other daemons on the same host. You can set these options either by providing them as flags, or by using a [daemon configuration file](#daemon-configuration-file). The following daemon options must be configured for each daemon: -```console +```text -b, --bridge= Attach containers to a network bridge --exec-root=/var/run/docker Root of the Docker execdriver --data-root=/var/lib/docker Root of persisted Docker data @@ -1677,30 +1289,32 @@ The following daemon options must be configured for each daemon: ``` When your daemons use different values for these flags, you can run them on the same host without any problems. -It is very important to properly understand the meaning of those options and to use them correctly. - -- The `-b, --bridge=` flag is set to `docker0` as default bridge network. It is created automatically when you install Docker. -If you are not using the default, you must create and configure the bridge manually or just set it to 'none': `--bridge=none` -- `--exec-root` is the path where the container state is stored. The default value is `/var/run/docker`. Specify the path for -your running daemon here. +It is important that you understand the meaning of these options and to use them correctly. + +- The `-b, --bridge=` flag is set to `docker0` as default bridge network. + It is created automatically when you install Docker. + If you aren't using the default, you must create and configure the bridge manually, or set it to 'none': `--bridge=none` +- `--exec-root` is the path where the container state is stored. + The default value is `/var/run/docker`. + Specify the path for your running daemon here. - `--data-root` is the path where persisted data such as images, volumes, and -cluster state are stored. The default value is `/var/lib/docker`. To avoid any -conflict with other daemons, set this parameter separately for each daemon. -- `-p, --pidfile=/var/run/docker.pid` is the path where the process ID of the daemon is stored. Specify the path for your -pid file here. -- `--host=[]` specifies where the Docker daemon will listen for client connections. If unspecified, it defaults to `/var/run/docker.sock`. -- `--iptables=false` prevents the Docker daemon from adding iptables rules. If -multiple daemons manage iptables rules, they may overwrite rules set by another -daemon. Be aware that disabling this option requires you to manually add -iptables rules to expose container ports. If you prevent Docker from adding -iptables rules, Docker will also not add IP masquerading rules, even if you set -`--ip-masq` to `true`. Without IP masquerading rules, Docker containers will not be -able to connect to external hosts or the internet when using network other than -default bridge. -- `--config-file=/etc/docker/daemon.json` is the path where configuration file is stored. You can use it instead of -daemon flags. Specify the path for each daemon. + cluster state are stored. The default value is `/var/lib/docker`. To avoid any + conflict with other daemons, set this parameter separately for each daemon. +- `-p, --pidfile=/var/run/docker.pid` is the path where the process ID of the daemon is stored. + Specify the path for your PID file here. +- `--host=[]` specifies where the Docker daemon listens for client connections. + If unspecified, it defaults to `/var/run/docker.sock`. +- `--iptables=false` prevents the Docker daemon from adding iptables rules. If + multiple daemons manage iptables rules, they may overwrite rules set by another + daemon. Be aware that disabling this option requires you to manually add + iptables rules to expose container ports. If you prevent Docker from adding + iptables rules, Docker also doesn't add IP masquerading rules, even if you set + `--ip-masq` to `true`. Without IP masquerading rules, Docker containers can't + connect to external hosts or the internet when using network other than default bridge. +- `--config-file=/etc/docker/daemon.json` is the path where configuration file is stored. + You can use it instead of daemon flags. Specify the path for each daemon. - `--tls*` Docker daemon supports `--tlsverify` mode that enforces encrypted and authenticated remote connections. -The `--tls*` options enable use of specific certificates for individual daemons. + The `--tls*` options enable use of specific certificates for individual daemons. Example script for a separate “bootstrap” instance of the Docker daemon without network: diff --git a/_vendor/github.com/docker/cli/docs/reference/run.md b/_vendor/github.com/docker/cli/docs/reference/run.md index 200ef75f2..41fd2c0dc 100644 --- a/_vendor/github.com/docker/cli/docs/reference/run.md +++ b/_vendor/github.com/docker/cli/docs/reference/run.md @@ -1,199 +1,47 @@ --- -description: "Configure containers at runtime" -keywords: "docker, run, configure, runtime" +description: "Running and configuring containers with the Docker CLI" +keywords: "docker, run, cli" aliases: - /reference/run/ +title: Running containers --- - - -# Docker run reference - Docker runs processes in isolated containers. A container is a process -which runs on a host. The host may be local or remote. When an operator -executes `docker run`, the container process that runs is isolated in +which runs on a host. The host may be local or remote. When an you +execute `docker run`, the container process that runs is isolated in that it has its own file system, its own networking, and its own isolated process tree separate from the host. -This page details how to use the `docker run` command to define the -container's resources at runtime. +This page details how to use the `docker run` command to run containers. ## General form -The basic `docker run` command takes this form: - - $ docker run [OPTIONS] IMAGE[:TAG|@DIGEST] [COMMAND] [ARG...] - -The `docker run` command must specify an [*IMAGE*](https://docs.docker.com/glossary/#image) -to derive the container from. An image developer can define image -defaults related to: - - * detached or foreground running - * container identification - * network settings - * runtime constraints on CPU and memory - -With the `docker run [OPTIONS]` an operator can add to or override the -image defaults set by a developer. And, additionally, operators can -override nearly all the defaults set by the Docker runtime itself. The -operator's ability to override image and Docker runtime defaults is why -[*run*](commandline/run.md) has more options than any -other `docker` command. - -To learn how to interpret the types of `[OPTIONS]`, see -[*Option types*](commandline/cli.md#option-types). - -> **Note** -> -> Depending on your Docker system configuration, you may be -> required to preface the `docker run` command with `sudo`. To avoid -> having to use `sudo` with the `docker` command, your system -> administrator can create a Unix group called `docker` and add users to -> it. For more information about this configuration, refer to the Docker -> installation documentation for your operating system. - - -## Operator exclusive options - -Only the operator (the person executing `docker run`) can set the -following options. - - - [Detached vs foreground](#detached-vs-foreground) - - [Detached (-d)](#detached--d) - - [Foreground](#foreground) - - [Container identification](#container-identification) - - [Name (--name)](#name---name) - - [PID equivalent](#pid-equivalent) - - [IPC settings (--ipc)](#ipc-settings---ipc) - - [Network settings](#network-settings) - - [Restart policies (--restart)](#restart-policies---restart) - - [Clean up (--rm)](#clean-up---rm) - - [Runtime constraints on resources](#runtime-constraints-on-resources) - - [Runtime privilege and Linux capabilities](#runtime-privilege-and-linux-capabilities) - -## Detached vs foreground - -When starting a Docker container, you must first decide if you want to -run the container in the background in a "detached" mode or in the -default foreground mode: - - -d=false: Detached mode: Run container in the background, print new container id - -### Detached (-d) - -To start a container in detached mode, you use `-d=true` or just `-d` option. By -design, containers started in detached mode exit when the root process used to -run the container exits, unless you also specify the `--rm` option. If you use -`-d` with `--rm`, the container is removed when it exits **or** when the daemon -exits, whichever happens first. - -Do not pass a `service x start` command to a detached container. For example, this -command attempts to start the `nginx` service. - - $ docker run -d -p 80:80 my_image service nginx start - -This succeeds in starting the `nginx` service inside the container. However, it -fails the detached container paradigm in that, the root process (`service nginx -start`) returns and the detached container stops as designed. As a result, the -`nginx` service is started but could not be used. Instead, to start a process -such as the `nginx` web server do the following: - - $ docker run -d -p 80:80 my_image nginx -g 'daemon off;' - -To do input/output with a detached container use network connections or shared -volumes. These are required because the container is no longer listening to the -command line where `docker run` was run. - -To reattach to a detached container, use `docker` -[*attach*](commandline/attach.md) command. - -### Foreground - -In foreground mode (the default when `-d` is not specified), `docker -run` can start the process in the container and attach the console to -the process's standard input, output, and standard error. It can even -pretend to be a TTY (this is what most command line executables expect) -and pass along signals. All of that is configurable: - - -a=[] : Attach to `STDIN`, `STDOUT` and/or `STDERR` - -t : Allocate a pseudo-tty - --sig-proxy=true: Proxy all received signals to the process (non-TTY mode only) - -i : Keep STDIN open even if not attached - -If you do not specify `-a` then Docker will [attach to both stdout and stderr -]( https://github.com/docker/docker/blob/4118e0c9eebda2412a09ae66e90c34b85fae3275/runconfig/opts/parse.go#L267). -You can specify to which of the three standard streams (`STDIN`, `STDOUT`, -`STDERR`) you'd like to connect instead, as in: - -```console -$ docker run -a stdin -a stdout -i -t ubuntu /bin/bash -``` - -For interactive processes (like a shell), you must use `-i -t` together in -order to allocate a tty for the container process. `-i -t` is often written `-it` -as you'll see in later examples. Specifying `-t` is forbidden when the client -is receiving its standard input from a pipe, as in: +A `docker run` command takes the following form: ```console -$ echo test | docker run -i busybox cat +$ docker run [OPTIONS] IMAGE[:TAG|@DIGEST] [COMMAND] [ARG...] ``` -> **Note** -> -> A process running as PID 1 inside a container is treated specially by Linux: -> it ignores any signal with the default action. As a result, the process will -> not terminate on `SIGINT` or `SIGTERM` unless it is coded to do so. - -## Container identification - -### Name (--name) - -The operator can identify a container in three ways: - -| Identifier type | Example value | -|:----------------------|:-------------------------------------------------------------------| -| UUID long identifier | "f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778" | -| UUID short identifier | "f78375b1c487" | -| Name | "evil_ptolemy" | - -The UUID identifiers come from the Docker daemon. If you do not assign a -container name with the `--name` option, then the daemon generates a random -string name for you. Defining a `name` can be a handy way to add meaning to a -container. If you specify a `name`, you can use it when referencing the -container within a Docker network. This works for both background and foreground -Docker containers. +The `docker run` command must specify an [image reference](#image-references) +to create the container from. -> **Note** -> -> Containers on the default bridge network must be linked to communicate by name. - -### PID equivalent - -Finally, to help with automation, you can have Docker write the -container ID out to a file of your choosing. This is similar to how some -programs might write out their process ID to a file (you've seen them as -PID files): +### Image references - --cidfile="": Write the container ID to the file +The image reference is the name and version of the image. You can use the image +reference to create or run a container based on an image. -### Image[:tag] +- `docker run IMAGE[:TAG][@DIGEST]` +- `docker create IMAGE[:TAG][@DIGEST]` -While not strictly a means of identifying a container, you can specify a version of an -image you'd like to run the container with by adding `image[:tag]` to the command. For -example, `docker run ubuntu:22.04`. +An image tag is the image version, which defaults to `latest` when omitted. Use +the tag to run a container from specific version of an image. For example, to +run version `23.10` of the `ubuntu` image: `docker run ubuntu:23.10`. -### Image[@digest] +#### Image digests Images using the v2 or later image format have a content-addressable identifier called a digest. As long as the input used to generate the image is unchanged, -the digest value is predictable and referenceable. +the digest value is predictable. The following example runs a container from the `alpine` image with the `sha256:9cacb71397b640eca97488cf08582ae4e4068513101088e9f96c9814bfda95e0` digest: @@ -202,415 +50,230 @@ The following example runs a container from the `alpine` image with the $ docker run alpine@sha256:9cacb71397b640eca97488cf08582ae4e4068513101088e9f96c9814bfda95e0 date ``` -## PID settings (--pid) - - --pid="" : Set the PID (Process) Namespace mode for the container, - 'container:': joins another container's PID namespace - 'host': use the host's PID namespace inside the container - -By default, all containers have the PID namespace enabled. - -PID namespace provides separation of processes. The PID Namespace removes the -view of the system processes, and allows process ids to be reused including -pid 1. +### Options -In certain cases you want your container to share the host's process namespace, -basically allowing processes within the container to see all of the processes -on the system. For example, you could build a container with debugging tools -like `strace` or `gdb`, but want to use these tools when debugging processes -within the container. +`[OPTIONS]` let you configure options for the container. For example, you can +give the container a name (`--name`), or run it as a background process (`-d`). +You can also set options to control things like resource constraints and +networking. -### Example: run htop inside a container +### Commands and arguments -Create this Dockerfile: - -```dockerfile -FROM alpine:latest -RUN apk add --update htop && rm -rf /var/cache/apk/* -CMD ["htop"] -``` - -Build the Dockerfile and tag the image as `myhtop`: - -```console -$ docker build -t myhtop . -``` - -Use the following command to run `htop` inside a container: +You can use the `[COMMAND]` and `[ARG...]` positional arguments to specify +commands and arguments for the container to run when it starts up. For example, +you can specify `sh` as the `[COMMAND]`, combined with the `-i` and `-t` flags, +to start an interactive shell in the container (if the image you select has an +`sh` executable on `PATH`). ```console -$ docker run -it --rm --pid=host myhtop +$ docker run -it IMAGE sh ``` -Joining another container's pid namespace can be used for debugging that container. +> **Note** +> +> Depending on your Docker system configuration, you may be +> required to preface the `docker run` command with `sudo`. To avoid +> having to use `sudo` with the `docker` command, your system +> administrator can create a Unix group called `docker` and add users to +> it. For more information about this configuration, refer to the Docker +> installation documentation for your operating system. -### Example +## Foreground and background -Start a container running a redis server: +When you start a container, the container runs in the foreground by default. +If you want to run the container in the background instead, you can use the +`--detach` (or `-d`) flag. This starts the container without occupying your +terminal window. ```console -$ docker run --name my-redis -d redis +$ docker run -d ``` -Debug the redis container by running another container that has strace in it: +While the container runs in the background, you can interact with the container +using other CLI commands. For example, `docker logs` lets you view the logs for +the container, and `docker attach` brings it to the foreground. ```console -$ docker run -it --pid=container:my-redis my_strace_docker_image bash -$ strace -p 1 -``` - -## UTS settings (--uts) - - --uts="" : Set the UTS namespace mode for the container, - 'host': use the host's UTS namespace inside the container - -The UTS namespace is for setting the hostname and the domain that is visible -to running processes in that namespace. By default, all containers, including -those with `--network=host`, have their own UTS namespace. The `host` setting will -result in the container using the same UTS namespace as the host. Note that -`--hostname` and `--domainname` are invalid in `host` UTS mode. - -You may wish to share the UTS namespace with the host if you would like the -hostname of the container to change as the hostname of the host changes. A -more advanced use case would be changing the host's hostname from a container. - -## IPC settings (--ipc) - - --ipc="MODE" : Set the IPC mode for the container - -The following values are accepted: - -| Value | Description | -|:---------------------------|:----------------------------------------------------------------------------------| -| "" | Use daemon's default. | -| "none" | Own private IPC namespace, with /dev/shm not mounted. | -| "private" | Own private IPC namespace. | -| "shareable" | Own private IPC namespace, with a possibility to share it with other containers. | -| "container:<_name-or-ID_>" | Join another ("shareable") container's IPC namespace. | -| "host" | Use the host system's IPC namespace. | - -If not specified, daemon default is used, which can either be `"private"` -or `"shareable"`, depending on the daemon version and configuration. - -IPC (POSIX/SysV IPC) namespace provides separation of named shared memory -segments, semaphores and message queues. - -Shared memory segments are used to accelerate inter-process communication at -memory speed, rather than through pipes or through the network stack. Shared -memory is commonly used by databases and custom-built (typically C/OpenMPI, -C++/using boost libraries) high performance applications for scientific -computing and financial services industries. If these types of applications -are broken into multiple containers, you might need to share the IPC mechanisms -of the containers, using `"shareable"` mode for the main (i.e. "donor") -container, and `"container:"` for other containers. - -## Network settings - - --dns=[] : Set custom dns servers for the container - --network="bridge" : Connect a container to a network - 'bridge': create a network stack on the default Docker bridge - 'none': no networking - 'container:': reuse another container's network stack - 'host': use the Docker host network stack - '|': connect to a user-defined network - --network-alias=[] : Add network-scoped alias for the container - --add-host="" : Add a line to /etc/hosts (host:IP) - --mac-address="" : Sets the container's Ethernet device's MAC address - --ip="" : Sets the container's Ethernet device's IPv4 address - --ip6="" : Sets the container's Ethernet device's IPv6 address - --link-local-ip=[] : Sets one or more container's Ethernet device's link local IPv4/IPv6 addresses - -By default, all containers have networking enabled and they can make any -outgoing connections. The operator can completely disable networking -with `docker run --network none` which disables all incoming and outgoing -networking. In cases like this, you would perform I/O through files or -`STDIN` and `STDOUT` only. - -Publishing ports and linking to other containers only works with the default (bridge). The linking feature is a legacy feature. You should always prefer using Docker network drivers over linking. - -Your container will use the same DNS servers as the host by default, but -you can override this with `--dns`. - -By default, the MAC address is generated using the IP address allocated to the -container. You can set the container's MAC address explicitly by providing a -MAC address via the `--mac-address` parameter (format:`12:34:56:78:9a:bc`).Be -aware that Docker does not check if manually specified MAC addresses are unique. - -Supported networks : +$ docker run -d nginx +0246aa4d1448a401cabd2ce8f242192b6e7af721527e48a810463366c7ff54f1 +$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +0246aa4d1448 nginx "/docker-entrypoint.…" 2 seconds ago Up 1 second 80/tcp pedantic_liskov +$ docker logs -n 5 0246aa4d1448 +2023/11/06 15:58:23 [notice] 1#1: start worker process 33 +2023/11/06 15:58:23 [notice] 1#1: start worker process 34 +2023/11/06 15:58:23 [notice] 1#1: start worker process 35 +2023/11/06 15:58:23 [notice] 1#1: start worker process 36 +2023/11/06 15:58:23 [notice] 1#1: start worker process 37 +$ docker attach 0246aa4d1448 +^C +2023/11/06 15:58:40 [notice] 1#1: signal 2 (SIGINT) received, exiting +... +``` + +For more information about `docker run` flags related to foreground and +background modes, see: + +- [`docker run --detach`](commandline/container_run.md#detach): run container in background +- [`docker run --attach`](commandline/container_run.md#attach): attach to `stdin`, `stdout`, and `stderr` +- [`docker run --tty`](commandline/container_run.md#tty): allocate a pseudo-tty +- [`docker run --interactive`](commandline/container_run.md#interactive): keep `stdin` open even if not attached + +For more information about re-attaching to a background container, see +[`docker attach`](commandline/container_attach.md). - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NetworkDescription
none - No networking in the container. -
bridge (default) - Connect the container to the bridge via veth interfaces. -
host - Use the host's network stack inside the container. -
container:<name|id> - Use the network stack of another container, specified via - its name or id. -
NETWORK - Connects the container to a user created network (using docker network create command) -
- -#### Network: none - -With the network is `none` a container will not have -access to any external routes. The container will still have a -`loopback` interface enabled in the container but it does not have any -routes to external traffic. - -#### Network: bridge - -With the network set to `bridge` a container will use docker's -default networking setup. A bridge is setup on the host, commonly named -`docker0`, and a pair of `veth` interfaces will be created for the -container. One side of the `veth` pair will remain on the host attached -to the bridge while the other side of the pair will be placed inside the -container's namespaces in addition to the `loopback` interface. An IP -address will be allocated for containers on the bridge's network and -traffic will be routed though this bridge to the container. - -Containers can communicate via their IP addresses by default. To communicate by -name, they must be linked. - -#### Network: host - -With the network set to `host` a container will share the host's -network stack and all interfaces from the host will be available to the -container. The container's hostname will match the hostname on the host -system. Note that `--mac-address` is invalid in `host` netmode. Even in `host` -network mode a container has its own UTS namespace by default. As such -`--hostname` and `--domainname` are allowed in `host` network mode and will -only change the hostname and domain name inside the container. -Similar to `--hostname`, the `--add-host`, `--dns`, `--dns-search`, and -`--dns-option` options can be used in `host` network mode. These options update -`/etc/hosts` or `/etc/resolv.conf` inside the container. No change are made to -`/etc/hosts` and `/etc/resolv.conf` on the host. - -Compared to the default `bridge` mode, the `host` mode gives *significantly* -better networking performance since it uses the host's native networking stack -whereas the bridge has to go through one level of virtualization through the -docker daemon. It is recommended to run containers in this mode when their -networking performance is critical, for example, a production Load Balancer -or a High Performance Web Server. - -> **Note** -> -> `--network="host"` gives the container full access to local system services -> such as D-bus and is therefore considered insecure. - -#### Network: container +## Container identification -With the network set to `container` a container will share the -network stack of another container. The other container's name must be -provided in the format of `--network container:`. Note that `--add-host` -`--hostname` `--dns` `--dns-search` `--dns-option` and `--mac-address` are -invalid in `container` netmode, and `--publish` `--publish-all` `--expose` are -also invalid in `container` netmode. +You can identify a container in three ways: -Example running a Redis container with Redis binding to `localhost` then -running the `redis-cli` command and connecting to the Redis server over the -`localhost` interface. +| Identifier type | Example value | +|:----------------------|:-------------------------------------------------------------------| +| UUID long identifier | `f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778` | +| UUID short identifier | `f78375b1c487` | +| Name | `evil_ptolemy` | + +The UUID identifier is a random ID assigned to the container by the daemon. + +The daemon generates a random string name for containers automatically. You can +also defined a custom name using [the `--name` flag](./commandline/container_run.md#name). +Defining a `name` can be a handy way to add meaning to a container. If you +specify a `name`, you can use it when referring to the container in a +user-defined network. This works for both background and foreground Docker +containers. + +A container identifier is not the same thing as an image reference. The image +reference specifies which image to use when you run a container. You can't run +`docker exec nginx:alpine sh` to open a shell in a container based on the +`nginx:alpine` image, because `docker exec` expects a container identifier +(name or ID), not an image. + +While the image used by a container is not an identifier for the container, you +find out the IDs of containers using an image by using the `--filter` flag. For +example, the following `docker ps` command gets the IDs of all running +containers based on the `nginx:alpine` image: ```console -$ docker run -d --name redis example/redis --bind 127.0.0.1 -$ # use the redis container's network stack to access localhost -$ docker run --rm -it --network container:redis example/redis-cli -h 127.0.0.1 +$ docker ps -q --filter ancestor=nginx:alpine ``` -#### User-defined network +For more information about using filters, see +[Filtering](https://docs.docker.com/config/filter/). -You can create a network using a Docker network driver or an external network -driver plugin. You can connect multiple containers to the same network. Once -connected to a user-defined network, the containers can communicate easily using -only another container's IP address or name. +## Container networking -For `overlay` networks or custom plugins that support multi-host connectivity, -containers connected to the same multi-host network but launched from different -Engines can also communicate in this way. +Containers have networking enabled by default, and they can make outgoing +connections. If you're running multiple containers that need to communicate +with each other, you can create a custom network and attach the containers to +the network. -The following example creates a network using the built-in `bridge` network -driver and running a container in the created network +When multiple containers are attached to the same custom network, they can +communicate with each other using the container names as a DNS hostname. The +following example creates a custom network named `my-net`, and runs two +containers that attach to the network. ```console -$ docker network create -d bridge my-net -$ docker run --network=my-net -itd --name=container3 busybox +$ docker network create my-net +$ docker run -d --name web --network my-net nginx:alpine +$ docker run --rm -it --network my-net busybox +/ # ping web +PING web (172.18.0.2): 56 data bytes +64 bytes from 172.18.0.2: seq=0 ttl=64 time=0.326 ms +64 bytes from 172.18.0.2: seq=1 ttl=64 time=0.257 ms +64 bytes from 172.18.0.2: seq=2 ttl=64 time=0.281 ms +^C +--- web ping statistics --- +3 packets transmitted, 3 packets received, 0% packet loss +round-trip min/avg/max = 0.257/0.288/0.326 ms ``` -### Managing /etc/hosts +For more information about container networking, see [Networking +overview](https://docs.docker.com/network/) -Your container will have lines in `/etc/hosts` which define the hostname of the -container itself as well as `localhost` and a few other common things. The -`--add-host` flag can be used to add additional lines to `/etc/hosts`. +## Filesystem mounts -```console -$ docker run -it --add-host db-static:86.75.30.9 ubuntu cat /etc/hosts - -172.17.0.22 09d03f76bf2c -fe00::0 ip6-localnet -ff00::0 ip6-mcastprefix -ff02::1 ip6-allnodes -ff02::2 ip6-allrouters -127.0.0.1 localhost -::1 localhost ip6-localhost ip6-loopback -86.75.30.9 db-static -``` +By default, the data in a container is stored in an ephemeral, writable +container layer. Removing the container also removes its data. If you want to +use persistent data with containers, you can use filesystem mounts to store the +data persistently on the host system. Filesystem mounts can also let you share +data between containers and the host. -If a container is connected to the default bridge network and `linked` -with other containers, then the container's `/etc/hosts` file is updated -with the linked container's name. +Docker supports two main categories of mounts: -> **Note** -> -> Since Docker may live update the container’s `/etc/hosts` file, there -> may be situations when processes inside the container can end up reading an -> empty or incomplete `/etc/hosts` file. In most cases, retrying the read again -> should fix the problem. +- Volume mounts +- Bind mounts -## Restart policies (--restart) +Volume mounts are great for persistently storing data for containers, and for +sharing data between containers. Bind mounts, on the other hand, are for +sharing data between a container and the host. -Using the `--restart` flag on Docker run you can specify a restart policy for -how a container should or should not be restarted on exit. +You can add a filesystem mount to a container using the `--mount` flag for the +`docker run` command. -When a restart policy is active on a container, it will be shown as either `Up` -or `Restarting` in [`docker ps`](commandline/ps.md). It can also be -useful to use [`docker events`](commandline/events.md) to see the -restart policy in effect. +The following sections show basic examples of how to create volumes and bind +mounts. For more in-depth examples and descriptions, refer to the section of +the [storage section](https://docs.docker.com/storage/) in the documentation. -Docker supports the following restart policies: +### Volume mounts - - - - - - - - - - - - - - - - - - - - - - - - - -
PolicyResult
no - Do not automatically restart the container when it exits. This is the - default. -
- - on-failure[:max-retries] - - - Restart only if the container exits with a non-zero exit status. - Optionally, limit the number of restart retries the Docker - daemon attempts. -
always - Always restart the container regardless of the exit status. - When you specify always, the Docker daemon will try to restart - the container indefinitely. The container will also always start - on daemon startup, regardless of the current state of the container. -
unless-stopped - Always restart the container regardless of the exit status, - including on daemon startup, except if the container was put - into a stopped state before the Docker daemon was stopped. -
- -An increasing delay (double the previous delay, starting at 100 milliseconds) -is added before each restart to prevent flooding the server. -This means the daemon will wait for 100 ms, then 200 ms, 400, 800, 1600, -and so on until either the `on-failure` limit, the maximum delay of 1 minute is -hit, or when you `docker stop` or `docker rm -f` the container. - -If a container is successfully restarted (the container is started and runs -for at least 10 seconds), the delay is reset to its default value of 100 ms. - -You can specify the maximum amount of times Docker will try to restart the -container when using the **on-failure** policy. The default is that Docker -will try forever to restart the container. The number of (attempted) restarts -for a container can be obtained via [`docker inspect`](commandline/inspect.md). For example, to get the number of restarts -for container "my-container"; +To create a volume mount: ```console -$ docker inspect -f "{{ .RestartCount }}" my-container -# 2 +$ docker run --mount source=,target=[PATH] [IMAGE] [COMMAND...] ``` -Or, to get the last time the container was (re)started; +The `--mount` flag takes two parameters in this case: `source` and `target`. +The value for the `source` parameter is the name of the volume. The value of +`target` is the mount location of the volume inside the container. Once you've +created the volume, any data you write to the volume is persisted, even if you +stop or remove the container: ```console -$ docker inspect -f "{{ .State.StartedAt }}" my-container -# 2015-03-04T23:47:07.691840179Z +$ docker run --rm --mount source=my_volume,target=/foo busybox \ + echo "hello, volume!" > /foo/hello.txt +$ docker run --mount source=my_volume,target=/bar busybox + cat /bar/hello.txt +hello, volume! ``` -Combining `--restart` (restart policy) with the `--rm` (clean up) flag results -in an error. On container restart, attached clients are disconnected. See the -examples on using the [`--rm` (clean up)](#clean-up---rm) flag later in this page. +The `target` must always be an absolute path, such as `/src/docs`. An absolute +path starts with a `/` (forward slash). Volume names must start with an +alphanumeric character, followed by `a-z0-9`, `_` (underscore), `.` (period) or +`-` (hyphen). + +### Bind mounts -### Examples +To create a bind mount: ```console -$ docker run --restart=always redis +$ docker run -it --mount type=bind,source=[PATH],target=[PATH] busybox ``` -This will run the `redis` container with a restart policy of **always** -so that if the container exits, Docker will restart it. +In this case, the `--mount` flag takes three parameters. A type (`bind`), and +two paths. The `source` path is a the location on the host that you want to +bind mount into the container. The `target` path is the mount destination +inside the container. + +Bind mounts are read-write by default, meaning that you can both read and write +files to and from the mounted location from the container. Changes that you +make, such as adding or editing files, are reflected on the host filesystem: ```console -$ docker run --restart=on-failure:10 redis +$ docker run -it --mount type=bind,source=.,target=/foo busybox +/ # echo "hello from container" > /foo/hello.txt +/ # exit +$ cat hello.txt +hello from container ``` -This will run the `redis` container with a restart policy of **on-failure** -and a maximum restart count of 10. If the `redis` container exits with a -non-zero exit status more than 10 times in a row Docker will abort trying to -restart the container. Providing a maximum restart limit is only valid for the -**on-failure** policy. - -## Exit Status +## Exit status The exit code from `docker run` gives information about why the container -failed to run or why it exited. When `docker run` exits with a non-zero code, -the exit codes follow the `chroot` standard, see below: +failed to run or why it exited. The following sections describe the meanings of +different container exit codes values. -**_125_** if the error is with Docker daemon **_itself_** +### 125 + +Exit code `125` indicates that the error is with Docker daemon itself. ```console $ docker run --foo busybox; echo $? @@ -620,7 +283,10 @@ See 'docker run --help'. 125 ``` -**_126_** if the **_contained command_** cannot be invoked +### 126 + +Exit code `126` indicates that the specified contained command can't be invoked. +The container command in the following example is: `/etc; echo $?`. ```console $ docker run busybox /etc; echo $? @@ -629,7 +295,9 @@ docker: Error response from daemon: Container command '/etc' could not be invoke 126 ``` -**_127_** if the **_contained command_** cannot be found +### 127 + +Exit code `127` indicates that the contained command can't be found. ```console $ docker run busybox foo; echo $? @@ -638,7 +306,10 @@ docker: Error response from daemon: Container command 'foo' not found or does no 127 ``` -**_Exit code_** of **_contained command_** otherwise +### Other exit codes + +Any exit code other than `125`, `126`, and `127` represent the exit code of the +provided container command. ```console $ docker run busybox /bin/sh -c 'exit 3' @@ -646,110 +317,6 @@ $ echo $? 3 ``` -## Clean up (--rm) - -By default a container's file system persists even after the container -exits. This makes debugging a lot easier (since you can inspect the -final state) and you retain all your data by default. But if you are -running short-term **foreground** processes, these container file -systems can really pile up. If instead you'd like Docker to -**automatically clean up the container and remove the file system when -the container exits**, you can add the `--rm` flag: - - --rm=false: Automatically remove the container when it exits - -> **Note** -> -> If you set the `--rm` flag, Docker also removes the anonymous volumes -> associated with the container when the container is removed. This is similar -> to running `docker rm -v my-container`. Only volumes that are specified without -> a name are removed. For example, when running: -> -> ```console -> $ docker run --rm -v /foo -v awesome:/bar busybox top -> ``` -> -> the volume for `/foo` will be removed, but the volume for `/bar` will not. -> Volumes inherited via `--volumes-from` will be removed with the same logic: if -> the original volume was specified with a name it will **not** be removed. - -## Security configuration - -| Option | Description | -|:------------------------------------------|:--------------------------------------------------------------------------| -| `--security-opt="label=user:USER"` | Set the label user for the container | -| `--security-opt="label=role:ROLE"` | Set the label role for the container | -| `--security-opt="label=type:TYPE"` | Set the label type for the container | -| `--security-opt="label=level:LEVEL"` | Set the label level for the container | -| `--security-opt="label=disable"` | Turn off label confinement for the container | -| `--security-opt="apparmor=PROFILE"` | Set the apparmor profile to be applied to the container | -| `--security-opt="no-new-privileges=true"` | Disable container processes from gaining new privileges | -| `--security-opt="seccomp=unconfined"` | Turn off seccomp confinement for the container | -| `--security-opt="seccomp=profile.json"` | White-listed syscalls seccomp Json file to be used as a seccomp filter | - - -You can override the default labeling scheme for each container by specifying -the `--security-opt` flag. Specifying the level in the following command -allows you to share the same content between containers. - -```console -$ docker run --security-opt label=level:s0:c100,c200 -it fedora bash -``` - -> **Note** -> -> Automatic translation of MLS labels is not currently supported. - -To disable the security labeling for this container versus running with the -`--privileged` flag, use the following command: - -```console -$ docker run --security-opt label=disable -it fedora bash -``` - -If you want a tighter security policy on the processes within a container, -you can specify an alternate type for the container. You could run a container -that is only allowed to listen on Apache ports by executing the following -command: - -```console -$ docker run --security-opt label=type:svirt_apache_t -it centos bash -``` - -> **Note** -> -> You would have to write policy defining a `svirt_apache_t` type. - -If you want to prevent your container processes from gaining additional -privileges, you can execute the following command: - -```console -$ docker run --security-opt no-new-privileges -it centos bash -``` - -This means that commands that raise privileges such as `su` or `sudo` will no longer work. -It also causes any seccomp filters to be applied later, after privileges have been dropped -which may mean you can have a more restrictive set of filters. -For more details, see the [kernel documentation](https://www.kernel.org/doc/Documentation/prctl/no_new_privs.txt). - -## Specify an init process - -You can use the `--init` flag to indicate that an init process should be used as -the PID 1 in the container. Specifying an init process ensures the usual -responsibilities of an init system, such as reaping zombie processes, are -performed inside the created container. - -The default init process used is the first `docker-init` executable found in the -system path of the Docker daemon process. This `docker-init` binary, included in -the default installation, is backed by [tini](https://github.com/krallin/tini). - -## Specify custom cgroups - -Using the `--cgroup-parent` flag, you can pass a specific cgroup to run a -container in. This allows you to create and manage cgroups on their own. You can -define custom resources for those cgroups and put containers under a common -parent group. - ## Runtime constraints on resources The operator can also adjust the performance parameters of the @@ -1397,94 +964,68 @@ drwxrwxr-x 1 1000 1000 4096 Dec 4 06:11 .git The default seccomp profile will adjust to the selected capabilities, in order to allow use of facilities allowed by the capabilities, so you should not have to adjust this. -## Logging drivers (--log-driver) - -The container can have a different logging driver than the Docker daemon. Use -the `--log-driver=VALUE` with the `docker run` command to configure the -container's logging driver. The following options are supported: - -| Driver | Description | -|:-------------|:-------------------------------------------------------------------------------------------------------------------------------| -| `none` | Disables any logging for the container. `docker logs` won't be available with this driver. | -| `local` | Logs are stored in a custom format designed for minimal overhead. | -| `json-file` | Default logging driver for Docker. Writes JSON messages to file. No logging options are supported for this driver. | -| `syslog` | Syslog logging driver for Docker. Writes log messages to syslog. | -| `journald` | Journald logging driver for Docker. Writes log messages to `journald`. | -| `gelf` | Graylog Extended Log Format (GELF) logging driver for Docker. Writes log messages to a GELF endpoint likeGraylog or Logstash. | -| `fluentd` | Fluentd logging driver for Docker. Writes log messages to `fluentd` (forward input). | -| `awslogs` | Amazon CloudWatch Logs logging driver for Docker. Writes log messages to Amazon CloudWatch Logs. | -| `splunk` | Splunk logging driver for Docker. Writes log messages to `splunk` using Event Http Collector. | -| `etwlogs` | Event Tracing for Windows (ETW) events. Writes log messages as Event Tracing for Windows (ETW) events. Only Windows platforms. | -| `gcplogs` | Google Cloud Platform (GCP) Logging. Writes log messages to Google Cloud Platform (GCP) Logging. | -| `logentries` | Rapid7 Logentries. Writes log messages to Rapid7 Logentries. | - -The `docker logs` command is available only for the `json-file` and `journald` -logging drivers. For detailed information on working with logging drivers, see -[Configure logging drivers](https://docs.docker.com/config/containers/logging/configure/). - - -## Overriding Dockerfile image defaults - -When a developer builds an image from a [*Dockerfile*](https://docs.docker.com/engine/reference/builder/) -or when committing it, the developer can set a number of default parameters -that take effect when the image starts up as a container. - -Four of the Dockerfile commands cannot be overridden at runtime: `FROM`, -`MAINTAINER`, `RUN`, and `ADD`. Everything else has a corresponding override -in `docker run`. We'll go through what the developer might have set in each -Dockerfile instruction and how the operator can override that setting. - - - [CMD (Default Command or Options)](#cmd-default-command-or-options) - - [ENTRYPOINT (Default Command to Execute at Runtime)]( - #entrypoint-default-command-to-execute-at-runtime) - - [EXPOSE (Incoming Ports)](#expose-incoming-ports) - - [ENV (Environment Variables)](#env-environment-variables) - - [HEALTHCHECK](#healthcheck) - - [VOLUME (Shared Filesystems)](#volume-shared-filesystems) - - [USER](#user) - - [WORKDIR](#workdir) - -### CMD (default command or options) - -Recall the optional `COMMAND` in the Docker -commandline: +## Overriding image defaults + +When you build an image from a [Dockerfile](https://docs.docker.com/engine/reference/builder/), +or when committing it, you can set a number of default parameters that take +effect when the image starts up as a container. When you run an image, you can +override those defaults using flags for the `docker run` command. + +- [Default entrypoint](#default-entrypoint) +- [Default command and options](#default-command-and-options) +- [Expose ports](#exposed-ports) +- [Environment variables](#environment-variables) +- [Healthcheck](#healthchecks) +- [User](#user) +- [Working directory](#working-directory) + +### Default command and options + +The command syntax for `docker run` supports optionally specifying commands and +arguments to the container's entrypoint, represented as `[COMMAND]` and +`[ARG...]` in the following synopsis example: ```console $ docker run [OPTIONS] IMAGE[:TAG|@DIGEST] [COMMAND] [ARG...] ``` -This command is optional because the person who created the `IMAGE` may -have already provided a default `COMMAND` using the Dockerfile `CMD` -instruction. As the operator (the person running a container from the -image), you can override that `CMD` instruction just by specifying a new -`COMMAND`. +This command is optional because whoever created the `IMAGE` may have already +provided a default `COMMAND`, using the Dockerfile `CMD` instruction. When you +run a container, you can override that `CMD` instruction just by specifying a +new `COMMAND`. If the image also specifies an `ENTRYPOINT` then the `CMD` or `COMMAND` get appended as arguments to the `ENTRYPOINT`. -### ENTRYPOINT (default command to execute at runtime) +### Default entrypoint -```console - --entrypoint="": Overwrite the default entrypoint set by the image +```text +--entrypoint="": Overwrite the default entrypoint set by the image ``` -The `ENTRYPOINT` of an image is similar to a `COMMAND` because it -specifies what executable to run when the container starts, but it is -(purposely) more difficult to override. The `ENTRYPOINT` gives a -container its default nature or behavior, so that when you set an -`ENTRYPOINT` you can run the container *as if it were that binary*, -complete with default options, and you can pass in more options via the -`COMMAND`. But, sometimes an operator may want to run something else -inside the container, so you can override the default `ENTRYPOINT` at -runtime by using a string to specify the new `ENTRYPOINT`. Here is an -example of how to run a shell in a container that has been set up to -automatically run something else (like `/usr/bin/redis-server`): +The entrypoint refers to the default executable that's invoked when you run a +container. A container's entrypoint is defined using the Dockerfile +`ENTRYPOINT` instruction. It's similar to specifying a default command because +it specifies, but the difference is that you need to pass an explicit flag to +override the entrypoint, whereas you can override default commands with +positional arguments. The defines a container's default behavior, with the idea +that when you set an entrypoint you can run the container *as if it were that +binary*, complete with default options, and you can pass in more options as +commands. But there are cases where you may want to run something else inside +the container. This is when overriding the default entrypoint at runtime comes +in handy, using the `--entrypoint` flag for the `docker run` command. + +The `--entrypoint` flag expects a string value, representing the name or path +of the binary that you want to invoke when the container starts. The following +example shows you how to run a Bash shell in a container that has been set up +to automatically run some other binary (like `/usr/bin/redis-server`): ```console $ docker run -it --entrypoint /bin/bash example/redis ``` -or two examples of how to pass more parameters to that ENTRYPOINT: +The following examples show how to pass additional parameters to the custom +entrypoint, using the positional command arguments: ```console $ docker run -it --entrypoint /bin/bash example/redis -c ls -l @@ -1499,69 +1040,39 @@ $ docker run -it --entrypoint="" mysql bash > **Note** > -> Passing `--entrypoint` will clear out any default command set on the -> image (i.e. any `CMD` instruction in the Dockerfile used to build it). - -### EXPOSE (incoming ports) - -The following `run` command options work with container networking: - - --expose=[]: Expose a port or a range of ports inside the container. - These are additional to those exposed by the `EXPOSE` instruction - -P : Publish all exposed ports to the host interfaces - -p=[] : Publish a container's port or a range of ports to the host - format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort - Both hostPort and containerPort can be specified as a - range of ports. When specifying ranges for both, the - number of container ports in the range must match the - number of host ports in the range, for example: - -p 1234-1236:1234-1236/tcp - - When specifying a range for hostPort only, the - containerPort must not be a range. In this case the - container port is published somewhere within the - specified hostPort range. (e.g., `-p 1234-1236:1234/tcp`) - - (use 'docker port' to see the actual mapping) - - --link="" : Add link to another container (:alias or ) - -With the exception of the `EXPOSE` directive, an image developer hasn't -got much control over networking. The `EXPOSE` instruction defines the -initial incoming ports that provide services. These ports are available -to processes inside the container. An operator can use the `--expose` -option to add to the exposed ports. - -To expose a container's internal port, an operator can start the -container with the `-P` or `-p` flag. The exposed port is accessible on -the host and the ports are available to any client that can reach the -host. - -The `-P` option publishes all the ports to the host interfaces. Docker -binds each exposed port to a random port on the host. The range of -ports are within an *ephemeral port range* defined by -`/proc/sys/net/ipv4/ip_local_port_range`. Use the `-p` flag to -explicitly map a single port or range of ports. - -The port number inside the container (where the service listens) does -not need to match the port number exposed on the outside of the -container (where clients connect). For example, inside the container an -HTTP service is listening on port 80 (and so the image developer -specifies `EXPOSE 80` in the Dockerfile). At runtime, the port might be -bound to 42800 on the host. To find the mapping between the host ports -and the exposed ports, use `docker port`. - -If the operator uses `--link` when starting a new client container in the -default bridge network, then the client container can access the exposed -port via a private networking interface. -If `--link` is used when starting a container in a user-defined network as -described in [*Networking overview*](https://docs.docker.com/network/), -it will provide a named alias for the container being linked to. - -### ENV (environment variables) +> Passing `--entrypoint` clears out any default command set on the image. That +> is, any `CMD` instruction in the Dockerfile used to build it. + +### Exposed ports + +By default, when you run a container, none of the container's ports are exposed +to the host. This means you won't be able to access any ports that the +container might be listening on. To make a container's ports accessible from +the host, you need to publish the ports. + +You can start the container with the `-P` or `-p` flags to expose its ports: + +- The `-P` (or `--publish-all`) flag publishes all the exposed ports to the + host. Docker binds each exposed port to a random port on the host. + + The `-P` flag only publishes port numbers that are explicitly flagged as + exposed, either using the Dockerfile `EXPOSE` instruction or the `--expose` + flag for the `docker run` command. + +- The `-p` (or `--publish`) flag lets you explicitly map a single port or range + of ports in the container to the host. + +The port number inside the container (where the service listens) doesn't need +to match the port number published on the outside of the container (where +clients connect). For example, inside the container an HTTP service might be +listening on port 80. At runtime, the port might be bound to 42800 on the host. +To find the mapping between the host ports and the exposed ports, use the +`docker port` command. + +### Environment variables Docker automatically sets some environment variables when creating a Linux -container. Docker does not set any environment variables when creating a Windows +container. Docker doesn't set any environment variables when creating a Windows container. The following environment variables are set for Linux containers: @@ -1574,11 +1085,13 @@ The following environment variables are set for Linux containers: | `TERM` | `xterm` if the container is allocated a pseudo-TTY | -Additionally, the operator can **set any environment variable** in the -container by using one or more `-e` flags, even overriding those mentioned -above, or already defined by the developer with a Dockerfile `ENV`. If the -operator names an environment variable without specifying a value, then the -current value of the named variable is propagated into the container's environment: +Additionally, you can set any environment variable in the container by using +one or more `-e` flags. You can even override the variables mentioned above, or +variables defined using a Dockerfile `ENV` instruction when building the image. + +If the you name an environment variable without specifying a value, the current +value of the named variable on the host is propagated into the container's +environment: ```console $ export today=Wednesday @@ -1626,18 +1139,20 @@ USERPROFILE=C:\Users\ContainerAdministrator windir=C:\Windows ``` -Similarly the operator can set the **HOSTNAME** (Linux) or **COMPUTERNAME** (Windows) with `-h`. +### Healthchecks -### HEALTHCHECK +The following flags for the `docker run` command let you control the parameters +for container healthchecks: -``` - --health-cmd Command to run to check health - --health-interval Time between running the check - --health-retries Consecutive failures needed to report unhealthy - --health-timeout Maximum time to allow one check to run - --health-start-period Start period for the container to initialize before starting health-retries countdown - --no-healthcheck Disable any container-specified HEALTHCHECK -``` +| Option | Description | +|:---------------------------|:---------------------------------------------------------------------------------------| +| `--health-cmd` | Command to run to check health | +| `--health-interval` | Time between running the check | +| `--health-retries` | Consecutive failures needed to report unhealthy | +| `--health-timeout` | Maximum time to allow one check to run | +| `--health-start-period` | Start period for the container to initialize before starting health-retries countdown | +| `--health-start-interval` | Time between running the check during the start period | +| `--no-healthcheck` | Disable any container-specified `HEALTHCHECK` | Example: @@ -1690,88 +1205,39 @@ $ sleep 2; docker inspect --format='{{json .State.Health}}' test The health status is also displayed in the `docker ps` output. -### TMPFS (mount tmpfs filesystems) - -```console ---tmpfs=[]: Create a tmpfs mount with: container-dir[:], - where the options are identical to the Linux - 'mount -t tmpfs -o' command. -``` +### User -The example below mounts an empty tmpfs into the container with the `rw`, -`noexec`, `nosuid`, and `size=65536k` options. +The default user within a container is `root` (uid = 0). You can set a default +user to run the first process with the Dockerfile `USER` instruction. When +starting a container, you can override the `USER` instruction by passing the +`-u` option. -```console -$ docker run -d --tmpfs /run:rw,noexec,nosuid,size=65536k my_image +```text +-u="", --user="": Sets the username or UID used and optionally the groupname or GID for the specified command. ``` -### VOLUME (shared filesystems) - - -v, --volume=[host-src:]container-dest[:]: Bind mount a volume. - The comma-delimited `options` are [rw|ro], [z|Z], - [[r]shared|[r]slave|[r]private], and [nocopy]. - The 'host-src' is an absolute path or a name value. - - If neither 'rw' or 'ro' is specified then the volume is mounted in - read-write mode. +The followings examples are all valid: - The `nocopy` mode is used to disable automatically copying the requested volume - path in the container to the volume storage location. - For named volumes, `copy` is the default mode. Copy modes are not supported - for bind-mounted volumes. - - --volumes-from="": Mount all volumes from the given container(s) +```text +--user=[ user | user:group | uid | uid:gid | user:gid | uid:group ] +``` > **Note** > -> When using systemd to manage the Docker daemon's start and stop, in the systemd -> unit file there is an option to control mount propagation for the Docker daemon -> itself, called `MountFlags`. The value of this setting may cause Docker to not -> see mount propagation changes made on the mount point. For example, if this value -> is `slave`, you may not be able to use the `shared` or `rshared` propagation on -> a volume. - -The volumes commands are complex enough to have their own documentation -in section [*Use volumes*](https://docs.docker.com/storage/volumes/). A developer can define -one or more `VOLUME`'s associated with an image, but only the operator -can give access from one container to another (or from a container to a -volume mounted on the host). - -The `container-dest` must always be an absolute path such as `/src/docs`. -The `host-src` can either be an absolute path or a `name` value. If you -supply an absolute path for the `host-src`, Docker bind-mounts to the path -you specify. If you supply a `name`, Docker creates a named volume by that `name`. - -A `name` value must start with an alphanumeric character, -followed by `a-z0-9`, `_` (underscore), `.` (period) or `-` (hyphen). -An absolute path starts with a `/` (forward slash). - -For example, you can specify either `/foo` or `foo` for a `host-src` value. -If you supply the `/foo` value, Docker creates a bind mount. If you supply -the `foo` specification, Docker creates a named volume. - -### USER +> If you pass a numeric user ID, it must be in the range of 0-2147483647. If +> you pass a username, the user must exist in the container. -`root` (id = 0) is the default user within a container. The image developer can -create additional users. Those users are accessible by name. When passing a numeric -ID, the user does not have to exist in the container. - -The developer can set a default user to run the first process with the -Dockerfile `USER` instruction. When starting a container, the operator can override -the `USER` instruction by passing the `-u` option. - - -u="", --user="": Sets the username or UID used and optionally the groupname or GID for the specified command. - - The followings examples are all valid: - --user=[ user | user:group | uid | uid:gid | user:gid | uid:group ] - -> **Note:** if you pass a numeric uid, it must be in the range of 0-2147483647. -> If you pass a username, the user must exist in the container. - -### WORKDIR +### Working directory The default working directory for running binaries within a container is the -root directory (`/`). It is possible to set a different working directory with the -Dockerfile `WORKDIR` command. The operator can override this with: +root directory (`/`). The default working directory of an image is set using +the Dockerfile `WORKDIR` command. You can override the default working +directory for an image using the `-w` (or `--workdir`) flag for the `docker +run` command: + +```text +$ docker run --rm -w /my/workdir alpine pwd +/my/workdir +``` - -w="", --workdir="": Working directory inside the container +If the directory doesn't already exist in the container, it's created. diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_events.md b/_vendor/github.com/docker/compose/v2/docs/reference/compose_events.md index 3c231fbc4..86c492e84 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/compose_events.md +++ b/_vendor/github.com/docker/compose/v2/docs/reference/compose_events.md @@ -33,4 +33,4 @@ With the `--json` flag, a json object is printed one per line with the format: } ``` -The events that can be received using this can be seen [here](https://docs.docker.com/engine/reference/commandline/events/#object-types). +The events that can be received using this can be seen [here](https://docs.docker.com/engine/reference/commandline/system_events/#object-types). diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_events.yaml b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_events.yaml index ce7af72cc..cd51372f7 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_events.yaml +++ b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_events.yaml @@ -19,7 +19,7 @@ long: |- } ``` - The events that can be received using this can be seen [here](/engine/reference/commandline/events/#object-types). + The events that can be received using this can be seen [here](/engine/reference/commandline/system_events/#object-types). usage: docker compose events [OPTIONS] [SERVICE...] pname: docker compose plink: docker_compose.yaml diff --git a/_vendor/github.com/moby/moby/docs/api/v1.18.md b/_vendor/github.com/moby/moby/docs/api/v1.18.md index 494b113fc..650c56203 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.18.md +++ b/_vendor/github.com/moby/moby/docs/api/v1.18.md @@ -377,7 +377,7 @@ Return low-level information on the container `id` "WorkingDir": "" }, "Created": "2015-01-06T15:47:31.485331387Z", - "Driver": "devicemapper", + "Driver": "overlay2", "ExecDriver": "native-0.2", "ExecIDs": null, "HostConfig": { diff --git a/_vendor/github.com/moby/moby/docs/api/v1.19.md b/_vendor/github.com/moby/moby/docs/api/v1.19.md index d46b05673..8523f5480 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.19.md +++ b/_vendor/github.com/moby/moby/docs/api/v1.19.md @@ -387,7 +387,7 @@ Return low-level information on the container `id` "WorkingDir": "" }, "Created": "2015-01-06T15:47:31.485331387Z", - "Driver": "devicemapper", + "Driver": "overlay2", "ExecDriver": "native-0.2", "ExecIDs": null, "HostConfig": { diff --git a/_vendor/github.com/moby/moby/docs/api/v1.20.md b/_vendor/github.com/moby/moby/docs/api/v1.20.md index 630c8b727..55a099f32 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.20.md +++ b/_vendor/github.com/moby/moby/docs/api/v1.20.md @@ -390,7 +390,7 @@ Return low-level information on the container `id` "WorkingDir": "" }, "Created": "2015-01-06T15:47:31.485331387Z", - "Driver": "devicemapper", + "Driver": "overlay2", "ExecDriver": "native-0.2", "ExecIDs": null, "HostConfig": { diff --git a/_vendor/github.com/moby/moby/docs/api/v1.21.md b/_vendor/github.com/moby/moby/docs/api/v1.21.md index 9f118259b..a67fe1919 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.21.md +++ b/_vendor/github.com/moby/moby/docs/api/v1.21.md @@ -412,7 +412,7 @@ Return low-level information on the container `id` "StopSignal": "SIGTERM" }, "Created": "2015-01-06T15:47:31.485331387Z", - "Driver": "devicemapper", + "Driver": "overlay2", "ExecDriver": "native-0.2", "ExecIDs": null, "HostConfig": { diff --git a/_vendor/github.com/moby/moby/docs/api/v1.22.md b/_vendor/github.com/moby/moby/docs/api/v1.22.md index a2a2dd91a..4f099624d 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.22.md +++ b/_vendor/github.com/moby/moby/docs/api/v1.22.md @@ -529,7 +529,7 @@ Return low-level information on the container `id` "StopSignal": "SIGTERM" }, "Created": "2015-01-06T15:47:31.485331387Z", - "Driver": "devicemapper", + "Driver": "overlay2", "ExecIDs": null, "HostConfig": { "Binds": null, diff --git a/_vendor/github.com/moby/moby/docs/api/v1.23.md b/_vendor/github.com/moby/moby/docs/api/v1.23.md index e74245881..d191fb76e 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.23.md +++ b/_vendor/github.com/moby/moby/docs/api/v1.23.md @@ -555,7 +555,7 @@ Return low-level information on the container `id` "StopSignal": "SIGTERM" }, "Created": "2015-01-06T15:47:31.485331387Z", - "Driver": "devicemapper", + "Driver": "overlay2", "ExecIDs": null, "HostConfig": { "Binds": null, diff --git a/_vendor/github.com/moby/moby/docs/api/v1.24.md b/_vendor/github.com/moby/moby/docs/api/v1.24.md index 6e9fcd384..45b4b3fdb 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.24.md +++ b/_vendor/github.com/moby/moby/docs/api/v1.24.md @@ -597,7 +597,7 @@ Return low-level information on the container `id` "StopSignal": "SIGTERM" }, "Created": "2015-01-06T15:47:31.485331387Z", - "Driver": "devicemapper", + "Driver": "overlay2", "ExecIDs": null, "HostConfig": { "Binds": null, diff --git a/_vendor/github.com/moby/moby/docs/api/v1.25.yaml b/_vendor/github.com/moby/moby/docs/api/v1.25.yaml index 37cec560f..48a679b2d 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.25.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.25.yaml @@ -2995,7 +2995,7 @@ paths: StopSignal: "SIGTERM" StopTimeout: 10 Created: "2015-01-06T15:47:31.485331387Z" - Driver: "devicemapper" + Driver: "overlay2" HostConfig: MaximumIOps: 0 MaximumIOBps: 0 @@ -6149,6 +6149,10 @@ paths: example: Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" Warning: "" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 403: description: "operation not supported for pre-defined networks" schema: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.26.yaml b/_vendor/github.com/moby/moby/docs/api/v1.26.yaml index b6bc59610..454921263 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.26.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.26.yaml @@ -3000,7 +3000,7 @@ paths: StopSignal: "SIGTERM" StopTimeout: 10 Created: "2015-01-06T15:47:31.485331387Z" - Driver: "devicemapper" + Driver: "overlay2" HostConfig: MaximumIOps: 0 MaximumIOBps: 0 @@ -6158,6 +6158,10 @@ paths: example: Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" Warning: "" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 403: description: "operation not supported for pre-defined networks" schema: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.27.yaml b/_vendor/github.com/moby/moby/docs/api/v1.27.yaml index 4736a6743..7fdbbdd3a 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.27.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.27.yaml @@ -3060,7 +3060,7 @@ paths: StopSignal: "SIGTERM" StopTimeout: 10 Created: "2015-01-06T15:47:31.485331387Z" - Driver: "devicemapper" + Driver: "overlay2" HostConfig: MaximumIOps: 0 MaximumIOBps: 0 @@ -6229,6 +6229,10 @@ paths: example: Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" Warning: "" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 403: description: "operation not supported for pre-defined networks" schema: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.28.yaml b/_vendor/github.com/moby/moby/docs/api/v1.28.yaml index 44bb67f02..b2b070115 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.28.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.28.yaml @@ -3150,7 +3150,7 @@ paths: StopSignal: "SIGTERM" StopTimeout: 10 Created: "2015-01-06T15:47:31.485331387Z" - Driver: "devicemapper" + Driver: "overlay2" HostConfig: MaximumIOps: 0 MaximumIOBps: 0 @@ -6358,6 +6358,10 @@ paths: example: Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" Warning: "" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 403: description: "operation not supported for pre-defined networks" schema: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.29.yaml b/_vendor/github.com/moby/moby/docs/api/v1.29.yaml index ed69b6889..a33f57a55 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.29.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.29.yaml @@ -3184,7 +3184,7 @@ paths: StopSignal: "SIGTERM" StopTimeout: 10 Created: "2015-01-06T15:47:31.485331387Z" - Driver: "devicemapper" + Driver: "overlay2" HostConfig: MaximumIOps: 0 MaximumIOBps: 0 @@ -6396,6 +6396,10 @@ paths: example: Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" Warning: "" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 403: description: "operation not supported for pre-defined networks" schema: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.30.yaml b/_vendor/github.com/moby/moby/docs/api/v1.30.yaml index 535a6205e..87aa2a190 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.30.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.30.yaml @@ -3410,7 +3410,7 @@ paths: StopSignal: "SIGTERM" StopTimeout: 10 Created: "2015-01-06T15:47:31.485331387Z" - Driver: "devicemapper" + Driver: "overlay2" HostConfig: MaximumIOps: 0 MaximumIOBps: 0 @@ -6659,6 +6659,10 @@ paths: example: Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" Warning: "" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 403: description: "operation not supported for pre-defined networks" schema: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.31.yaml b/_vendor/github.com/moby/moby/docs/api/v1.31.yaml index 8535ffe23..c0ed81af7 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.31.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.31.yaml @@ -3480,7 +3480,7 @@ paths: StopSignal: "SIGTERM" StopTimeout: 10 Created: "2015-01-06T15:47:31.485331387Z" - Driver: "devicemapper" + Driver: "overlay2" HostConfig: MaximumIOps: 0 MaximumIOBps: 0 @@ -6757,6 +6757,10 @@ paths: example: Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" Warning: "" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 403: description: "operation not supported for pre-defined networks" schema: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.32.yaml b/_vendor/github.com/moby/moby/docs/api/v1.32.yaml index 5f130bf11..6b78c710a 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.32.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.32.yaml @@ -3720,10 +3720,6 @@ definitions: ServerVersion: description: | Version string of the daemon. - - > **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/) - > returns the Swarm version instead of the daemon version, for example - > `swarm/1.2.8`. type: "string" example: "17.06.0-ce" ClusterStore: @@ -4720,7 +4716,7 @@ paths: StopSignal: "SIGTERM" StopTimeout: 10 Created: "2015-01-06T15:47:31.485331387Z" - Driver: "devicemapper" + Driver: "overlay2" HostConfig: MaximumIOps: 0 MaximumIOBps: 0 @@ -7800,6 +7796,10 @@ paths: example: Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" Warning: "" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 403: description: "operation not supported for pre-defined networks" schema: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.33.yaml b/_vendor/github.com/moby/moby/docs/api/v1.33.yaml index 09c1a9bf7..00d359ad2 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.33.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.33.yaml @@ -3725,10 +3725,6 @@ definitions: ServerVersion: description: | Version string of the daemon. - - > **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/) - > returns the Swarm version instead of the daemon version, for example - > `swarm/1.2.8`. type: "string" example: "17.06.0-ce" ClusterStore: @@ -4725,7 +4721,7 @@ paths: StopSignal: "SIGTERM" StopTimeout: 10 Created: "2015-01-06T15:47:31.485331387Z" - Driver: "devicemapper" + Driver: "overlay2" HostConfig: MaximumIOps: 0 MaximumIOBps: 0 @@ -7809,6 +7805,10 @@ paths: example: Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" Warning: "" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 403: description: "operation not supported for pre-defined networks" schema: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.34.yaml b/_vendor/github.com/moby/moby/docs/api/v1.34.yaml index afb0301aa..988382470 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.34.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.34.yaml @@ -3754,10 +3754,6 @@ definitions: ServerVersion: description: | Version string of the daemon. - - > **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/) - > returns the Swarm version instead of the daemon version, for example - > `swarm/1.2.8`. type: "string" example: "17.06.0-ce" ClusterStore: @@ -4754,7 +4750,7 @@ paths: StopSignal: "SIGTERM" StopTimeout: 10 Created: "2015-01-06T15:47:31.485331387Z" - Driver: "devicemapper" + Driver: "overlay2" HostConfig: MaximumIOps: 0 MaximumIOBps: 0 @@ -7850,6 +7846,10 @@ paths: example: Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" Warning: "" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 403: description: "operation not supported for pre-defined networks" schema: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.35.yaml b/_vendor/github.com/moby/moby/docs/api/v1.35.yaml index a82056067..6d06746b5 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.35.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.35.yaml @@ -3736,10 +3736,6 @@ definitions: ServerVersion: description: | Version string of the daemon. - - > **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/) - > returns the Swarm version instead of the daemon version, for example - > `swarm/1.2.8`. type: "string" example: "17.06.0-ce" ClusterStore: @@ -4736,7 +4732,7 @@ paths: StopSignal: "SIGTERM" StopTimeout: 10 Created: "2015-01-06T15:47:31.485331387Z" - Driver: "devicemapper" + Driver: "overlay2" HostConfig: MaximumIOps: 0 MaximumIOBps: 0 @@ -7862,6 +7858,10 @@ paths: example: Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" Warning: "" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 403: description: "operation not supported for pre-defined networks" schema: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.36.yaml b/_vendor/github.com/moby/moby/docs/api/v1.36.yaml index d16fe7d64..bcf04ffa6 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.36.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.36.yaml @@ -3749,10 +3749,6 @@ definitions: ServerVersion: description: | Version string of the daemon. - - > **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/) - > returns the Swarm version instead of the daemon version, for example - > `swarm/1.2.8`. type: "string" example: "17.06.0-ce" ClusterStore: @@ -4752,7 +4748,7 @@ paths: StopSignal: "SIGTERM" StopTimeout: 10 Created: "2015-01-06T15:47:31.485331387Z" - Driver: "devicemapper" + Driver: "overlay2" HostConfig: MaximumIOps: 0 MaximumIOBps: 0 @@ -7904,6 +7900,10 @@ paths: example: Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" Warning: "" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 403: description: "operation not supported for pre-defined networks" schema: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.37.yaml b/_vendor/github.com/moby/moby/docs/api/v1.37.yaml index d1a7dba18..0ef019fc9 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.37.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.37.yaml @@ -3769,10 +3769,6 @@ definitions: ServerVersion: description: | Version string of the daemon. - - > **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/) - > returns the Swarm version instead of the daemon version, for example - > `swarm/1.2.8`. type: "string" example: "17.06.0-ce" ClusterStore: @@ -4772,7 +4768,7 @@ paths: StopSignal: "SIGTERM" StopTimeout: 10 Created: "2015-01-06T15:47:31.485331387Z" - Driver: "devicemapper" + Driver: "overlay2" HostConfig: MaximumIOps: 0 MaximumIOBps: 0 @@ -7947,6 +7943,10 @@ paths: example: Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" Warning: "" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 403: description: "operation not supported for pre-defined networks" schema: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.38.yaml b/_vendor/github.com/moby/moby/docs/api/v1.38.yaml index dcee27c1b..162c7a919 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.38.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.38.yaml @@ -3823,10 +3823,6 @@ definitions: ServerVersion: description: | Version string of the daemon. - - > **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/) - > returns the Swarm version instead of the daemon version, for example - > `swarm/1.2.8`. type: "string" example: "17.06.0-ce" ClusterStore: @@ -4830,7 +4826,7 @@ paths: StopSignal: "SIGTERM" StopTimeout: 10 Created: "2015-01-06T15:47:31.485331387Z" - Driver: "devicemapper" + Driver: "overlay2" ExecIDs: - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" @@ -8008,6 +8004,10 @@ paths: example: Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" Warning: "" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 403: description: "operation not supported for pre-defined networks" schema: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.39.yaml b/_vendor/github.com/moby/moby/docs/api/v1.39.yaml index 0bb1af5a6..a97eef76e 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.39.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.39.yaml @@ -4811,10 +4811,6 @@ definitions: ServerVersion: description: | Version string of the daemon. - - > **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/) - > returns the Swarm version instead of the daemon version, for example - > `swarm/1.2.8`. type: "string" example: "17.06.0-ce" ClusterStore: @@ -5822,7 +5818,7 @@ paths: StopSignal: "SIGTERM" StopTimeout: 10 Created: "2015-01-06T15:47:31.485331387Z" - Driver: "devicemapper" + Driver: "overlay2" ExecIDs: - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" @@ -8957,6 +8953,10 @@ paths: example: Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" Warning: "" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 403: description: "operation not supported for pre-defined networks" schema: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.40.yaml b/_vendor/github.com/moby/moby/docs/api/v1.40.yaml index 67077a982..1121c53e7 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.40.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.40.yaml @@ -4947,10 +4947,6 @@ definitions: ServerVersion: description: | Version string of the daemon. - - > **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/) - > returns the Swarm version instead of the daemon version, for example - > `swarm/1.2.8`. type: "string" example: "17.06.0-ce" ClusterStore: @@ -6123,7 +6119,7 @@ paths: StopSignal: "SIGTERM" StopTimeout: 10 Created: "2015-01-06T15:47:31.485331387Z" - Driver: "devicemapper" + Driver: "overlay2" ExecIDs: - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" @@ -9294,6 +9290,10 @@ paths: example: Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" Warning: "" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 403: description: "operation not supported for pre-defined networks" schema: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.41.yaml b/_vendor/github.com/moby/moby/docs/api/v1.41.yaml index d89efe3de..94071a2d6 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.41.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.41.yaml @@ -6311,7 +6311,7 @@ paths: StopSignal: "SIGTERM" StopTimeout: 10 Created: "2015-01-06T15:47:31.485331387Z" - Driver: "devicemapper" + Driver: "overlay2" ExecIDs: - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" @@ -9497,6 +9497,10 @@ paths: example: Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" Warning: "" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 403: description: "operation not supported for pre-defined networks" schema: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.42.yaml b/_vendor/github.com/moby/moby/docs/api/v1.42.yaml index 0814e9385..1af14130c 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.42.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.42.yaml @@ -9875,6 +9875,10 @@ paths: example: Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" Warning: "" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 403: description: "operation not supported for pre-defined networks" schema: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.43.yaml b/_vendor/github.com/moby/moby/docs/api/v1.43.yaml index 8dd8c984d..7ddb88e5f 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.43.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.43.yaml @@ -9893,6 +9893,10 @@ paths: example: Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" Warning: "" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 403: description: "operation not supported for pre-defined networks" schema: diff --git a/_vendor/github.com/moby/moby/docs/api/v1.44.yaml b/_vendor/github.com/moby/moby/docs/api/v1.44.yaml new file mode 100644 index 000000000..567939580 --- /dev/null +++ b/_vendor/github.com/moby/moby/docs/api/v1.44.yaml @@ -0,0 +1,12300 @@ +# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. +# +# This is used for generating API documentation and the types used by the +# client/server. See api/README.md for more information. +# +# Some style notes: +# - This file is used by ReDoc, which allows GitHub Flavored Markdown in +# descriptions. +# - There is no maximum line length, for ease of editing and pretty diffs. +# - operationIds are in the format "NounVerb", with a singular noun. + +swagger: "2.0" +schemes: + - "http" + - "https" +produces: + - "application/json" + - "text/plain" +consumes: + - "application/json" + - "text/plain" +basePath: "/v1.44" +info: + title: "Docker Engine API" + version: "1.44" + x-logo: + url: "https://docs.docker.com/assets/images/logo-docker-main.png" + description: | + The Engine API is an HTTP API served by Docker Engine. It is the API the + Docker client uses to communicate with the Engine, so everything the Docker + client can do can be done with the API. + + Most of the client's commands map directly to API endpoints (e.g. `docker ps` + is `GET /containers/json`). The notable exception is running containers, + which consists of several API calls. + + # Errors + + The API uses standard HTTP status codes to indicate the success or failure + of the API call. The body of the response will be JSON in the following + format: + + ``` + { + "message": "page not found" + } + ``` + + # Versioning + + The API is usually changed in each release, so API calls are versioned to + ensure that clients don't break. To lock to a specific version of the API, + you prefix the URL with its version, for example, call `/v1.30/info` to use + the v1.30 version of the `/info` endpoint. If the API version specified in + the URL is not supported by the daemon, a HTTP `400 Bad Request` error message + is returned. + + If you omit the version-prefix, the current version of the API (v1.44) is used. + For example, calling `/info` is the same as calling `/v1.44/info`. Using the + API without a version-prefix is deprecated and will be removed in a future release. + + Engine releases in the near future should support this version of the API, + so your client will continue to work even if it is talking to a newer Engine. + + The API uses an open schema model, which means server may add extra properties + to responses. Likewise, the server will ignore any extra query parameters and + request body properties. When you write clients, you need to ignore additional + properties in responses to ensure they do not break when talking to newer + daemons. + + + # Authentication + + Authentication for registries is handled client side. The client has to send + authentication details to various endpoints that need to communicate with + registries, such as `POST /images/(name)/push`. These are sent as + `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5) + (JSON) string with the following structure: + + ``` + { + "username": "string", + "password": "string", + "email": "string", + "serveraddress": "string" + } + ``` + + The `serveraddress` is a domain/IP without a protocol. Throughout this + structure, double quotes are required. + + If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), + you can just pass this instead of credentials: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +# The tags on paths define the menu sections in the ReDoc documentation, so +# the usage of tags must make sense for that: +# - They should be singular, not plural. +# - There should not be too many tags, or the menu becomes unwieldy. For +# example, it is preferable to add a path to the "System" tag instead of +# creating a tag with a single path in it. +# - The order of tags in this list defines the order in the menu. +tags: + # Primary objects + - name: "Container" + x-displayName: "Containers" + description: | + Create and manage containers. + - name: "Image" + x-displayName: "Images" + - name: "Network" + x-displayName: "Networks" + description: | + Networks are user-defined networks that containers can be attached to. + See the [networking documentation](https://docs.docker.com/network/) + for more information. + - name: "Volume" + x-displayName: "Volumes" + description: | + Create and manage persistent storage that can be attached to containers. + - name: "Exec" + x-displayName: "Exec" + description: | + Run new commands inside running containers. Refer to the + [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) + for more information. + + To exec a command in a container, you first need to create an exec instance, + then start it. These two API endpoints are wrapped up in a single command-line + command, `docker exec`. + + # Swarm things + - name: "Swarm" + x-displayName: "Swarm" + description: | + Engines can be clustered together in a swarm. Refer to the + [swarm mode documentation](https://docs.docker.com/engine/swarm/) + for more information. + - name: "Node" + x-displayName: "Nodes" + description: | + Nodes are instances of the Engine participating in a swarm. Swarm mode + must be enabled for these endpoints to work. + - name: "Service" + x-displayName: "Services" + description: | + Services are the definitions of tasks to run on a swarm. Swarm mode must + be enabled for these endpoints to work. + - name: "Task" + x-displayName: "Tasks" + description: | + A task is a container running on a swarm. It is the atomic scheduling unit + of swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Secret" + x-displayName: "Secrets" + description: | + Secrets are sensitive data that can be used by services. Swarm mode must + be enabled for these endpoints to work. + - name: "Config" + x-displayName: "Configs" + description: | + Configs are application configurations that can be used by services. Swarm + mode must be enabled for these endpoints to work. + # System things + - name: "Plugin" + x-displayName: "Plugins" + - name: "System" + x-displayName: "System" + +definitions: + Port: + type: "object" + description: "An open port on a container" + required: [PrivatePort, Type] + properties: + IP: + type: "string" + format: "ip-address" + description: "Host IP address that the container's port is mapped to" + PrivatePort: + type: "integer" + format: "uint16" + x-nullable: false + description: "Port on the container" + PublicPort: + type: "integer" + format: "uint16" + description: "Port exposed on the host" + Type: + type: "string" + x-nullable: false + enum: ["tcp", "udp", "sctp"] + example: + PrivatePort: 8080 + PublicPort: 80 + Type: "tcp" + + MountPoint: + type: "object" + description: | + MountPoint represents a mount point configuration inside the container. + This is used for reporting the mountpoints in use by a container. + properties: + Type: + description: | + The mount type: + + - `bind` a mount of a file or directory from the host into the container. + - `volume` a docker volume with the given `Name`. + - `tmpfs` a `tmpfs`. + - `npipe` a named pipe from the host into the container. + - `cluster` a Swarm cluster volume + type: "string" + enum: + - "bind" + - "volume" + - "tmpfs" + - "npipe" + - "cluster" + example: "volume" + Name: + description: | + Name is the name reference to the underlying data defined by `Source` + e.g., the volume name. + type: "string" + example: "myvolume" + Source: + description: | + Source location of the mount. + + For volumes, this contains the storage location of the volume (within + `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains + the source (host) part of the bind-mount. For `tmpfs` mount points, this + field is empty. + type: "string" + example: "/var/lib/docker/volumes/myvolume/_data" + Destination: + description: | + Destination is the path relative to the container root (`/`) where + the `Source` is mounted inside the container. + type: "string" + example: "/usr/share/nginx/html/" + Driver: + description: | + Driver is the volume driver used to create the volume (if it is a volume). + type: "string" + example: "local" + Mode: + description: | + Mode is a comma separated list of options supplied by the user when + creating the bind/volume mount. + + The default is platform-specific (`"z"` on Linux, empty on Windows). + type: "string" + example: "z" + RW: + description: | + Whether the mount is mounted writable (read-write). + type: "boolean" + example: true + Propagation: + description: | + Propagation describes how mounts are propagated from the host into the + mount point, and vice-versa. Refer to the [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt) + for details. This field is not used on Windows. + type: "string" + example: "" + + DeviceMapping: + type: "object" + description: "A device mapping between the host and container" + properties: + PathOnHost: + type: "string" + PathInContainer: + type: "string" + CgroupPermissions: + type: "string" + example: + PathOnHost: "/dev/deviceName" + PathInContainer: "/dev/deviceName" + CgroupPermissions: "mrw" + + DeviceRequest: + type: "object" + description: "A request for devices to be sent to device drivers" + properties: + Driver: + type: "string" + example: "nvidia" + Count: + type: "integer" + example: -1 + DeviceIDs: + type: "array" + items: + type: "string" + example: + - "0" + - "1" + - "GPU-fef8089b-4820-abfc-e83e-94318197576e" + Capabilities: + description: | + A list of capabilities; an OR list of AND lists of capabilities. + type: "array" + items: + type: "array" + items: + type: "string" + example: + # gpu AND nvidia AND compute + - ["gpu", "nvidia", "compute"] + Options: + description: | + Driver-specific options, specified as a key/value pairs. These options + are passed directly to the driver. + type: "object" + additionalProperties: + type: "string" + + ThrottleDevice: + type: "object" + properties: + Path: + description: "Device path" + type: "string" + Rate: + description: "Rate" + type: "integer" + format: "int64" + minimum: 0 + + Mount: + type: "object" + properties: + Target: + description: "Container path." + type: "string" + Source: + description: "Mount source (e.g. a volume name, a host path)." + type: "string" + Type: + description: | + The mount type. Available types: + + - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. + - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. + - `cluster` a Swarm cluster volume + type: "string" + enum: + - "bind" + - "volume" + - "tmpfs" + - "npipe" + - "cluster" + ReadOnly: + description: "Whether the mount should be read-only." + type: "boolean" + Consistency: + description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." + type: "string" + BindOptions: + description: "Optional configuration for the `bind` type." + type: "object" + properties: + Propagation: + description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." + type: "string" + enum: + - "private" + - "rprivate" + - "shared" + - "rshared" + - "slave" + - "rslave" + NonRecursive: + description: "Disable recursive bind mount." + type: "boolean" + default: false + CreateMountpoint: + description: "Create mount point on host if missing" + type: "boolean" + default: false + ReadOnlyNonRecursive: + description: | + Make the mount non-recursively read-only, but still leave the mount recursive + (unless NonRecursive is set to true in conjunction). + type: "boolean" + default: false + ReadOnlyForceRecursive: + description: "Raise an error if the mount cannot be made recursively read-only." + type: "boolean" + default: false + VolumeOptions: + description: "Optional configuration for the `volume` type." + type: "object" + properties: + NoCopy: + description: "Populate volume with data from the target." + type: "boolean" + default: false + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + DriverConfig: + description: "Map of driver specific options" + type: "object" + properties: + Name: + description: "Name of the driver to use to create the volume." + type: "string" + Options: + description: "key/value map of driver specific options." + type: "object" + additionalProperties: + type: "string" + TmpfsOptions: + description: "Optional configuration for the `tmpfs` type." + type: "object" + properties: + SizeBytes: + description: "The size for the tmpfs mount in bytes." + type: "integer" + format: "int64" + Mode: + description: "The permission mode for the tmpfs mount in an integer." + type: "integer" + + RestartPolicy: + description: | + The behavior to apply when the container exits. The default is not to + restart. + + An ever increasing delay (double the previous delay, starting at 100ms) is + added before each restart to prevent flooding the server. + type: "object" + properties: + Name: + type: "string" + description: | + - Empty string means not to restart + - `no` Do not automatically restart + - `always` Always restart + - `unless-stopped` Restart always except when the user has manually stopped the container + - `on-failure` Restart only when the container exit code is non-zero + enum: + - "" + - "no" + - "always" + - "unless-stopped" + - "on-failure" + MaximumRetryCount: + type: "integer" + description: | + If `on-failure` is used, the number of times to retry before giving up. + + Resources: + description: "A container's resources (cgroups config, ulimits, etc)" + type: "object" + properties: + # Applicable to all platforms + CpuShares: + description: | + An integer value representing this container's relative CPU weight + versus other containers. + type: "integer" + Memory: + description: "Memory limit in bytes." + type: "integer" + format: "int64" + default: 0 + # Applicable to UNIX platforms + CgroupParent: + description: | + Path to `cgroups` under which the container's `cgroup` is created. If + the path is not absolute, the path is considered to be relative to the + `cgroups` path of the init process. Cgroups are created if they do not + already exist. + type: "string" + BlkioWeight: + description: "Block IO weight (relative weight)." + type: "integer" + minimum: 0 + maximum: 1000 + BlkioWeightDevice: + description: | + Block IO weight (relative device weight) in the form: + + ``` + [{"Path": "device_path", "Weight": weight}] + ``` + type: "array" + items: + type: "object" + properties: + Path: + type: "string" + Weight: + type: "integer" + minimum: 0 + BlkioDeviceReadBps: + description: | + Limit read rate (bytes per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteBps: + description: | + Limit write rate (bytes per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceReadIOps: + description: | + Limit read rate (IO per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteIOps: + description: | + Limit write rate (IO per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + CpuPeriod: + description: "The length of a CPU period in microseconds." + type: "integer" + format: "int64" + CpuQuota: + description: | + Microseconds of CPU time that the container can get in a CPU period. + type: "integer" + format: "int64" + CpuRealtimePeriod: + description: | + The length of a CPU real-time period in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. + type: "integer" + format: "int64" + CpuRealtimeRuntime: + description: | + The length of a CPU real-time runtime in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. + type: "integer" + format: "int64" + CpusetCpus: + description: | + CPUs in which to allow execution (e.g., `0-3`, `0,1`). + type: "string" + example: "0-3" + CpusetMems: + description: | + Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only + effective on NUMA systems. + type: "string" + Devices: + description: "A list of devices to add to the container." + type: "array" + items: + $ref: "#/definitions/DeviceMapping" + DeviceCgroupRules: + description: "a list of cgroup rules to apply to the container" + type: "array" + items: + type: "string" + example: "c 13:* rwm" + DeviceRequests: + description: | + A list of requests for devices to be sent to device drivers. + type: "array" + items: + $ref: "#/definitions/DeviceRequest" + KernelMemoryTCP: + description: | + Hard limit for kernel TCP buffer memory (in bytes). Depending on the + OCI runtime in use, this option may be ignored. It is no longer supported + by the default (runc) runtime. + + This field is omitted when empty. + type: "integer" + format: "int64" + MemoryReservation: + description: "Memory soft limit in bytes." + type: "integer" + format: "int64" + MemorySwap: + description: | + Total memory limit (memory + swap). Set as `-1` to enable unlimited + swap. + type: "integer" + format: "int64" + MemorySwappiness: + description: | + Tune a container's memory swappiness behavior. Accepts an integer + between 0 and 100. + type: "integer" + format: "int64" + minimum: 0 + maximum: 100 + NanoCpus: + description: "CPU quota in units of 10-9 CPUs." + type: "integer" + format: "int64" + OomKillDisable: + description: "Disable OOM Killer for the container." + type: "boolean" + Init: + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. + type: "boolean" + x-nullable: true + PidsLimit: + description: | + Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` + to not change. + type: "integer" + format: "int64" + x-nullable: true + Ulimits: + description: | + A list of resource limits to set in the container. For example: + + ``` + {"Name": "nofile", "Soft": 1024, "Hard": 2048} + ``` + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + # Applicable to Windows + CpuCount: + description: | + The number of usable CPUs (Windows only). + + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + CpuPercent: + description: | + The usable percentage of the available CPUs (Windows only). + + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + IOMaximumIOps: + description: "Maximum IOps for the container system drive (Windows only)" + type: "integer" + format: "int64" + IOMaximumBandwidth: + description: | + Maximum IO in bytes per second for the container system drive + (Windows only). + type: "integer" + format: "int64" + + Limit: + description: | + An object describing a limit on resources which can be requested by a task. + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + Pids: + description: | + Limits the maximum number of PIDs in the container. Set `0` for unlimited. + type: "integer" + format: "int64" + default: 0 + example: 100 + + ResourceObject: + description: | + An object describing the resources which can be advertised by a node and + requested by a task. + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + GenericResources: + $ref: "#/definitions/GenericResources" + + GenericResources: + description: | + User-defined resources can be either Integer resources (e.g, `SSD=3`) or + String resources (e.g, `GPU=UUID1`). + type: "array" + items: + type: "object" + properties: + NamedResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "string" + DiscreteResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "integer" + format: "int64" + example: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + HealthConfig: + description: "A test to perform to check that the container is healthy." + type: "object" + properties: + Test: + description: | + The test to perform. Possible values are: + + - `[]` inherit healthcheck from image or parent image + - `["NONE"]` disable healthcheck + - `["CMD", args...]` exec arguments directly + - `["CMD-SHELL", command]` run command with system's default shell + type: "array" + items: + type: "string" + Interval: + description: | + The time to wait between checks in nanoseconds. It should be 0 or at + least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + Timeout: + description: | + The time to wait before considering the check to have hung. It should + be 0 or at least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + Retries: + description: | + The number of consecutive failures needed to consider a container as + unhealthy. 0 means inherit. + type: "integer" + StartPeriod: + description: | + Start period for the container to initialize before starting + health-retries countdown in nanoseconds. It should be 0 or at least + 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + StartInterval: + description: | + The time to wait between checks in nanoseconds during the start period. + It should be 0 or at least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + + Health: + description: | + Health stores information about the container's healthcheck results. + type: "object" + x-nullable: true + properties: + Status: + description: | + Status is one of `none`, `starting`, `healthy` or `unhealthy` + + - "none" Indicates there is no healthcheck + - "starting" Starting indicates that the container is not yet ready + - "healthy" Healthy indicates that the container is running correctly + - "unhealthy" Unhealthy indicates that the container has a problem + type: "string" + enum: + - "none" + - "starting" + - "healthy" + - "unhealthy" + example: "healthy" + FailingStreak: + description: "FailingStreak is the number of consecutive failures" + type: "integer" + example: 0 + Log: + type: "array" + description: | + Log contains the last few results (oldest first) + items: + $ref: "#/definitions/HealthcheckResult" + + HealthcheckResult: + description: | + HealthcheckResult stores information about a single run of a healthcheck probe + type: "object" + x-nullable: true + properties: + Start: + description: | + Date and time at which this check started in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "date-time" + example: "2020-01-04T10:44:24.496525531Z" + End: + description: | + Date and time at which this check ended in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2020-01-04T10:45:21.364524523Z" + ExitCode: + description: | + ExitCode meanings: + + - `0` healthy + - `1` unhealthy + - `2` reserved (considered unhealthy) + - other values: error running probe + type: "integer" + example: 0 + Output: + description: "Output from last check" + type: "string" + + HostConfig: + description: "Container configuration that depends on the host we are running on" + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + # Applicable to all platforms + Binds: + type: "array" + description: | + A list of volume bindings for this container. Each volume binding + is a string in one of these forms: + + - `host-src:container-dest[:options]` to bind-mount a host path + into the container. Both `host-src`, and `container-dest` must + be an _absolute_ path. + - `volume-name:container-dest[:options]` to bind-mount a volume + managed by a volume driver into the container. `container-dest` + must be an _absolute_ path. + + `options` is an optional, comma-delimited list of: + + - `nocopy` disables automatic copying of data from the container + path to the volume. The `nocopy` flag only applies to named volumes. + - `[ro|rw]` mounts a volume read-only or read-write, respectively. + If omitted or set to `rw`, volumes are mounted read-write. + - `[z|Z]` applies SELinux labels to allow or deny multiple containers + to read and write to the same volume. + - `z`: a _shared_ content label is applied to the content. This + label indicates that multiple containers can share the volume + content, for both reading and writing. + - `Z`: a _private unshared_ label is applied to the content. + This label indicates that only the current container can use + a private volume. Labeling systems such as SELinux require + proper labels to be placed on volume content that is mounted + into a container. Without a label, the security system can + prevent a container's processes from using the content. By + default, the labels set by the host operating system are not + modified. + - `[[r]shared|[r]slave|[r]private]` specifies mount + [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). + This only applies to bind-mounted volumes, not internal volumes + or named volumes. Mount propagation requires the source mount + point (the location where the source directory is mounted in the + host operating system) to have the correct propagation properties. + For shared volumes, the source mount point must be set to `shared`. + For slave volumes, the mount must be set to either `shared` or + `slave`. + items: + type: "string" + ContainerIDFile: + type: "string" + description: "Path to a file where the container ID is written" + LogConfig: + type: "object" + description: "The logging configuration for this container" + properties: + Type: + type: "string" + enum: + - "json-file" + - "syslog" + - "journald" + - "gelf" + - "fluentd" + - "awslogs" + - "splunk" + - "etwlogs" + - "none" + Config: + type: "object" + additionalProperties: + type: "string" + NetworkMode: + type: "string" + description: | + Network mode to use for this container. Supported standard values + are: `bridge`, `host`, `none`, and `container:`. Any + other value is taken as a custom network's name to which this + container should connect to. + PortBindings: + $ref: "#/definitions/PortMap" + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + AutoRemove: + type: "boolean" + description: | + Automatically remove the container when the container's process + exits. This has no effect if `RestartPolicy` is set. + VolumeDriver: + type: "string" + description: "Driver that this container uses to mount volumes." + VolumesFrom: + type: "array" + description: | + A list of volumes to inherit from another container, specified in + the form `[:]`. + items: + type: "string" + Mounts: + description: | + Specification for mounts to be added to the container. + type: "array" + items: + $ref: "#/definitions/Mount" + ConsoleSize: + type: "array" + description: | + Initial console size, as an `[height, width]` array. + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + Annotations: + type: "object" + description: | + Arbitrary non-identifying metadata attached to container and + provided to the runtime when the container is started. + additionalProperties: + type: "string" + + # Applicable to UNIX platforms + CapAdd: + type: "array" + description: | + A list of kernel capabilities to add to the container. Conflicts + with option 'Capabilities'. + items: + type: "string" + CapDrop: + type: "array" + description: | + A list of kernel capabilities to drop from the container. Conflicts + with option 'Capabilities'. + items: + type: "string" + CgroupnsMode: + type: "string" + enum: + - "private" + - "host" + description: | + cgroup namespace mode for the container. Possible values are: + + - `"private"`: the container runs in its own private cgroup namespace + - `"host"`: use the host system's cgroup namespace + + If not specified, the daemon default is used, which can either be `"private"` + or `"host"`, depending on daemon version, kernel support and configuration. + Dns: + type: "array" + description: "A list of DNS servers for the container to use." + items: + type: "string" + DnsOptions: + type: "array" + description: "A list of DNS options." + items: + type: "string" + DnsSearch: + type: "array" + description: "A list of DNS search domains." + items: + type: "string" + ExtraHosts: + type: "array" + description: | + A list of hostnames/IP mappings to add to the container's `/etc/hosts` + file. Specified in the form `["hostname:IP"]`. + items: + type: "string" + GroupAdd: + type: "array" + description: | + A list of additional groups that the container process will run as. + items: + type: "string" + IpcMode: + type: "string" + description: | + IPC sharing mode for the container. Possible values are: + + - `"none"`: own private IPC namespace, with /dev/shm not mounted + - `"private"`: own private IPC namespace + - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers + - `"container:"`: join another (shareable) container's IPC namespace + - `"host"`: use the host system's IPC namespace + + If not specified, daemon default is used, which can either be `"private"` + or `"shareable"`, depending on daemon version and configuration. + Cgroup: + type: "string" + description: "Cgroup to use for the container." + Links: + type: "array" + description: | + A list of links for the container in the form `container_name:alias`. + items: + type: "string" + OomScoreAdj: + type: "integer" + description: | + An integer value containing the score given to the container in + order to tune OOM killer preferences. + example: 500 + PidMode: + type: "string" + description: | + Set the PID (Process) Namespace mode for the container. It can be + either: + + - `"container:"`: joins another container's PID namespace + - `"host"`: use the host's PID namespace inside the container + Privileged: + type: "boolean" + description: "Gives the container full access to the host." + PublishAllPorts: + type: "boolean" + description: | + Allocates an ephemeral host port for all of a container's + exposed ports. + + Ports are de-allocated when the container stops and allocated when + the container starts. The allocated port might be changed when + restarting the container. + + The port is selected from the ephemeral port range that depends on + the kernel. For example, on Linux the range is defined by + `/proc/sys/net/ipv4/ip_local_port_range`. + ReadonlyRootfs: + type: "boolean" + description: "Mount the container's root filesystem as read only." + SecurityOpt: + type: "array" + description: | + A list of string values to customize labels for MLS systems, such + as SELinux. + items: + type: "string" + StorageOpt: + type: "object" + description: | + Storage driver options for this container, in the form `{"size": "120G"}`. + additionalProperties: + type: "string" + Tmpfs: + type: "object" + description: | + A map of container directories which should be replaced by tmpfs + mounts, and their corresponding mount options. For example: + + ``` + { "/run": "rw,noexec,nosuid,size=65536k" } + ``` + additionalProperties: + type: "string" + UTSMode: + type: "string" + description: "UTS namespace to use for the container." + UsernsMode: + type: "string" + description: | + Sets the usernamespace mode for the container when usernamespace + remapping option is enabled. + ShmSize: + type: "integer" + format: "int64" + description: | + Size of `/dev/shm` in bytes. If omitted, the system uses 64MB. + minimum: 0 + Sysctls: + type: "object" + description: | + A list of kernel parameters (sysctls) to set in the container. + For example: + + ``` + {"net.ipv4.ip_forward": "1"} + ``` + additionalProperties: + type: "string" + Runtime: + type: "string" + description: "Runtime to use with this container." + # Applicable to Windows + Isolation: + type: "string" + description: | + Isolation technology of the container. (Windows only) + enum: + - "default" + - "process" + - "hyperv" + MaskedPaths: + type: "array" + description: | + The list of paths to be masked inside the container (this overrides + the default set of paths). + items: + type: "string" + ReadonlyPaths: + type: "array" + description: | + The list of paths to be set as read-only inside the container + (this overrides the default set of paths). + items: + type: "string" + + ContainerConfig: + description: | + Configuration for a container that is portable between hosts. + + When used as `ContainerConfig` field in an image, `ContainerConfig` is an + optional field containing the configuration of the container that was last + committed when creating the image. + + Previous versions of Docker builder used this field to store build cache, + and it is not in active use anymore. + type: "object" + properties: + Hostname: + description: | + The hostname to use for the container, as a valid RFC 1123 hostname. + type: "string" + example: "439f4e91bd1d" + Domainname: + description: | + The domain name to use for the container. + type: "string" + User: + description: "The user that commands are run as inside the container." + type: "string" + AttachStdin: + description: "Whether to attach to `stdin`." + type: "boolean" + default: false + AttachStdout: + description: "Whether to attach to `stdout`." + type: "boolean" + default: true + AttachStderr: + description: "Whether to attach to `stderr`." + type: "boolean" + default: true + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"/": {}}` + type: "object" + x-nullable: true + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: { + "80/tcp": {}, + "443/tcp": {} + } + Tty: + description: | + Attach standard streams to a TTY, including `stdin` if it is not closed. + type: "boolean" + default: false + OpenStdin: + description: "Open `stdin`" + type: "boolean" + default: false + StdinOnce: + description: "Close `stdin` after one attached client disconnects" + type: "boolean" + default: false + Env: + description: | + A list of environment variables to set inside the container in the + form `["VAR=value", ...]`. A variable without `=` is removed from the + environment, rather than to have an empty value. + type: "array" + items: + type: "string" + example: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + description: | + Command to run specified as a string or an array of strings. + type: "array" + items: + type: "string" + example: ["/bin/sh"] + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + default: false + example: false + x-nullable: true + Image: + description: | + The name (or reference) of the image to use when creating the container, + or which was used when the container was created. + type: "string" + example: "example-image:1.0" + Volumes: + description: | + An object mapping mount point paths inside the container to empty + objects. + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + example: "/public/" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the + entry point is reset to system default (i.e., the entry point used by + docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: "array" + items: + type: "string" + example: [] + NetworkDisabled: + description: "Disable networking for the container." + type: "boolean" + x-nullable: true + MacAddress: + description: | + MAC address of the container. + + Deprecated: this field is deprecated in API v1.44 and up. Use EndpointSettings.MacAddress instead. + type: "string" + x-nullable: true + OnBuild: + description: | + `ONBUILD` metadata that were defined in the image's `Dockerfile`. + type: "array" + x-nullable: true + items: + type: "string" + example: [] + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + StopSignal: + description: | + Signal to stop a container as a string or unsigned integer. + type: "string" + example: "SIGTERM" + x-nullable: true + StopTimeout: + description: "Timeout to stop a container in seconds." + type: "integer" + default: 10 + x-nullable: true + Shell: + description: | + Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. + type: "array" + x-nullable: true + items: + type: "string" + example: ["/bin/sh", "-c"] + + NetworkingConfig: + description: | + NetworkingConfig represents the container's networking configuration for + each of its interfaces. + It is used for the networking configs specified in the `docker create` + and `docker network connect` commands. + type: "object" + properties: + EndpointsConfig: + description: | + A mapping of network name to endpoint configuration for that network. + The endpoint configuration can be left empty to connect to that + network with no particular endpoint configuration. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + example: + # putting an example here, instead of using the example values from + # /definitions/EndpointSettings, because EndpointSettings contains + # operational data returned when inspecting a container that we don't + # accept here. + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + MacAddress: "02:42:ac:12:05:02" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + database_nw: {} + + NetworkSettings: + description: "NetworkSettings exposes the network settings in the API" + type: "object" + properties: + Bridge: + description: | + Name of the default bridge interface when dockerd's --bridge flag is set. + type: "string" + example: "docker0" + SandboxID: + description: SandboxID uniquely represents a container's network stack. + type: "string" + example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3" + HairpinMode: + description: | + Indicates if hairpin NAT should be enabled on the virtual interface. + + Deprecated: This field is never set and will be removed in a future release. + type: "boolean" + example: false + LinkLocalIPv6Address: + description: | + IPv6 unicast address using the link-local prefix. + + Deprecated: This field is never set and will be removed in a future release. + type: "string" + example: "" + LinkLocalIPv6PrefixLen: + description: | + Prefix length of the IPv6 unicast address. + + Deprecated: This field is never set and will be removed in a future release. + type: "integer" + example: "" + Ports: + $ref: "#/definitions/PortMap" + SandboxKey: + description: SandboxKey is the full path of the netns handle + type: "string" + example: "/var/run/docker/netns/8ab54b426c38" + + SecondaryIPAddresses: + description: "Deprecated: This field is never set and will be removed in a future release." + type: "array" + items: + $ref: "#/definitions/Address" + x-nullable: true + + SecondaryIPv6Addresses: + description: "Deprecated: This field is never set and will be removed in a future release." + type: "array" + items: + $ref: "#/definitions/Address" + x-nullable: true + + # TODO properties below are part of DefaultNetworkSettings, which is + # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12 + EndpointID: + description: | + EndpointID uniquely represents a service endpoint in a Sandbox. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "172.17.0.1" + GlobalIPv6Address: + description: | + Global IPv6 address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "2001:db8::5689" + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "integer" + example: 64 + IPAddress: + description: | + IPv4 address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "172.17.0.4" + IPPrefixLen: + description: | + Mask length of the IPv4 address. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address for this network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "2001:db8:2::100" + MacAddress: + description: | + MAC address for the container on the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "02:42:ac:11:00:04" + Networks: + description: | + Information about all networks that the container is connected to. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + + Address: + description: Address represents an IPv4 or IPv6 IP address. + type: "object" + properties: + Addr: + description: IP address. + type: "string" + PrefixLen: + description: Mask length of the IP address. + type: "integer" + + PortMap: + description: | + PortMap describes the mapping of container ports to host ports, using the + container's port-number and protocol as key in the format `/`, + for example, `80/udp`. + + If a container's port is mapped for multiple protocols, separate entries + are added to the mapping table. + type: "object" + additionalProperties: + type: "array" + x-nullable: true + items: + $ref: "#/definitions/PortBinding" + example: + "443/tcp": + - HostIp: "127.0.0.1" + HostPort: "4443" + "80/tcp": + - HostIp: "0.0.0.0" + HostPort: "80" + - HostIp: "0.0.0.0" + HostPort: "8080" + "80/udp": + - HostIp: "0.0.0.0" + HostPort: "80" + "53/udp": + - HostIp: "0.0.0.0" + HostPort: "53" + "2377/tcp": null + + PortBinding: + description: | + PortBinding represents a binding between a host IP address and a host + port. + type: "object" + properties: + HostIp: + description: "Host IP address that the container's port is mapped to." + type: "string" + example: "127.0.0.1" + HostPort: + description: "Host port number that the container's port is mapped to." + type: "string" + example: "4443" + + GraphDriverData: + description: | + Information about the storage driver used to store the container's and + image's filesystem. + type: "object" + required: [Name, Data] + properties: + Name: + description: "Name of the storage driver." + type: "string" + x-nullable: false + example: "overlay2" + Data: + description: | + Low-level storage metadata, provided as key/value pairs. + + This information is driver-specific, and depends on the storage-driver + in use, and should be used for informational purposes only. + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: { + "MergedDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/merged", + "UpperDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/diff", + "WorkDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/work" + } + + FilesystemChange: + description: | + Change in the container's filesystem. + type: "object" + required: [Path, Kind] + properties: + Path: + description: | + Path to file or directory that has changed. + type: "string" + x-nullable: false + Kind: + $ref: "#/definitions/ChangeType" + + ChangeType: + description: | + Kind of change + + Can be one of: + + - `0`: Modified ("C") + - `1`: Added ("A") + - `2`: Deleted ("D") + type: "integer" + format: "uint8" + enum: [0, 1, 2] + x-nullable: false + + ImageInspect: + description: | + Information about an image in the local image cache. + type: "object" + properties: + Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. + type: "string" + x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" + RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. + type: "array" + items: + type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" + RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. + type: "array" + items: + type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + Parent: + description: | + ID of the parent image. + + Depending on how the image was created, this field may be empty and + is only set for images that were built/created locally. This field + is empty if the image was pulled from an image registry. + type: "string" + x-nullable: false + example: "" + Comment: + description: | + Optional message that was set when committing or importing the image. + type: "string" + x-nullable: false + example: "" + Created: + description: | + Date and time at which the image was created, formatted in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + x-nullable: false + example: "2022-02-04T21:20:12.497794809Z" + Container: + description: | + The ID of the container that was used to create the image. + + Depending on how the image was created, this field may be empty. + + **Deprecated**: this field is kept for backward compatibility, but + will be removed in API v1.45. + type: "string" + example: "65974bc86f1770ae4bff79f651ebdbce166ae9aada632ee3fa9af3a264911735" + ContainerConfig: + description: | + **Deprecated**: this field is kept for backward compatibility, but + will be removed in API v1.45. + $ref: "#/definitions/ContainerConfig" + DockerVersion: + description: | + The version of Docker that was used to build the image. + + Depending on how the image was created, this field may be empty. + type: "string" + x-nullable: false + example: "20.10.7" + Author: + description: | + Name of the author that was specified when committing the image, or as + specified through MAINTAINER (deprecated) in the Dockerfile. + type: "string" + x-nullable: false + example: "" + Config: + $ref: "#/definitions/ContainerConfig" + Architecture: + description: | + Hardware CPU architecture that the image runs on. + type: "string" + x-nullable: false + example: "arm" + Variant: + description: | + CPU architecture variant (presently ARM-only). + type: "string" + x-nullable: true + example: "v7" + Os: + description: | + Operating System the image is built to run on. + type: "string" + x-nullable: false + example: "linux" + OsVersion: + description: | + Operating System version the image is built to run on (especially + for Windows). + type: "string" + example: "" + x-nullable: true + Size: + description: | + Total size of the image including all layers it is composed of. + type: "integer" + format: "int64" + x-nullable: false + example: 1239828 + VirtualSize: + description: | + Total size of the image including all layers it is composed of. + + Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. + type: "integer" + format: "int64" + example: 1239828 + GraphDriver: + $ref: "#/definitions/GraphDriverData" + RootFS: + description: | + Information about the image's RootFS, including the layer IDs. + type: "object" + required: [Type] + properties: + Type: + type: "string" + x-nullable: false + example: "layers" + Layers: + type: "array" + items: + type: "string" + example: + - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" + - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + Metadata: + description: | + Additional metadata of the image in the local cache. This information + is local to the daemon, and not part of the image itself. + type: "object" + properties: + LastTagTime: + description: | + Date and time at which the image was last tagged in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + + This information is only available if the image was tagged locally, + and omitted otherwise. + type: "string" + format: "dateTime" + example: "2022-02-28T14:40:02.623929178Z" + x-nullable: true + ImageSummary: + type: "object" + x-go-name: "Summary" + required: + - Id + - ParentId + - RepoTags + - RepoDigests + - Created + - Size + - SharedSize + - Labels + - Containers + properties: + Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. + type: "string" + x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" + ParentId: + description: | + ID of the parent image. + + Depending on how the image was created, this field may be empty and + is only set for images that were built/created locally. This field + is empty if the image was pulled from an image registry. + type: "string" + x-nullable: false + example: "" + RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. + type: "array" + x-nullable: false + items: + type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" + RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. + type: "array" + x-nullable: false + items: + type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + Created: + description: | + Date and time at which the image was created as a Unix timestamp + (number of seconds sinds EPOCH). + type: "integer" + x-nullable: false + example: "1644009612" + Size: + description: | + Total size of the image including all layers it is composed of. + type: "integer" + format: "int64" + x-nullable: false + example: 172064416 + SharedSize: + description: | + Total size of image layers that are shared between this image and other + images. + + This size is not calculated by default. `-1` indicates that the value + has not been set / calculated. + type: "integer" + format: "int64" + x-nullable: false + example: 1239828 + VirtualSize: + description: |- + Total size of the image including all layers it is composed of. + + Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. + type: "integer" + format: "int64" + example: 172064416 + Labels: + description: "User-defined key/value metadata." + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Containers: + description: | + Number of containers using this image. Includes both stopped and running + containers. + + This size is not calculated by default, and depends on which API endpoint + is used. `-1` indicates that the value has not been set / calculated. + x-nullable: false + type: "integer" + example: 2 + + AuthConfig: + type: "object" + properties: + username: + type: "string" + password: + type: "string" + email: + type: "string" + serveraddress: + type: "string" + example: + username: "hannibal" + password: "xxxx" + serveraddress: "https://index.docker.io/v1/" + + ProcessConfig: + type: "object" + properties: + privileged: + type: "boolean" + user: + type: "string" + tty: + type: "boolean" + entrypoint: + type: "string" + arguments: + type: "array" + items: + type: "string" + + Volume: + type: "object" + required: [Name, Driver, Mountpoint, Labels, Scope, Options] + properties: + Name: + type: "string" + description: "Name of the volume." + x-nullable: false + example: "tardis" + Driver: + type: "string" + description: "Name of the volume driver used by the volume." + x-nullable: false + example: "custom" + Mountpoint: + type: "string" + description: "Mount path of the volume on the host." + x-nullable: false + example: "/var/lib/docker/volumes/tardis" + CreatedAt: + type: "string" + format: "dateTime" + description: "Date/Time the volume was created." + example: "2016-06-07T20:31:11.853781916Z" + Status: + type: "object" + description: | + Low-level details about the volume, provided by the volume driver. + Details are returned as a map with key/value pairs: + `{"key":"value","key2":"value2"}`. + + The `Status` field is optional, and is omitted if the volume driver + does not support this feature. + additionalProperties: + type: "object" + example: + hello: "world" + Labels: + type: "object" + description: "User-defined key/value metadata." + x-nullable: false + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: + type: "string" + description: | + The level at which the volume exists. Either `global` for cluster-wide, + or `local` for machine level. + default: "local" + x-nullable: false + enum: ["local", "global"] + example: "local" + ClusterVolume: + $ref: "#/definitions/ClusterVolume" + Options: + type: "object" + description: | + The driver specific options used when creating the volume. + additionalProperties: + type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + UsageData: + type: "object" + x-nullable: true + x-go-name: "UsageData" + required: [Size, RefCount] + description: | + Usage details about the volume. This information is used by the + `GET /system/df` endpoint, and omitted in other endpoints. + properties: + Size: + type: "integer" + format: "int64" + default: -1 + description: | + Amount of disk space used by the volume (in bytes). This information + is only available for volumes created with the `"local"` volume + driver. For volumes created with other volume drivers, this field + is set to `-1` ("not available") + x-nullable: false + RefCount: + type: "integer" + format: "int64" + default: -1 + description: | + The number of containers referencing this volume. This field + is set to `-1` if the reference-count is not available. + x-nullable: false + + VolumeCreateOptions: + description: "Volume configuration" + type: "object" + title: "VolumeConfig" + x-go-name: "CreateOptions" + properties: + Name: + description: | + The new volume's name. If not specified, Docker generates a name. + type: "string" + x-nullable: false + example: "tardis" + Driver: + description: "Name of the volume driver to use." + type: "string" + default: "local" + x-nullable: false + example: "custom" + DriverOpts: + description: | + A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. + type: "object" + additionalProperties: + type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + ClusterVolumeSpec: + $ref: "#/definitions/ClusterVolumeSpec" + + VolumeListResponse: + type: "object" + title: "VolumeListResponse" + x-go-name: "ListResponse" + description: "Volume list response" + properties: + Volumes: + type: "array" + description: "List of volumes" + items: + $ref: "#/definitions/Volume" + Warnings: + type: "array" + description: | + Warnings that occurred when fetching the list of volumes. + items: + type: "string" + example: [] + + Network: + type: "object" + properties: + Name: + type: "string" + Id: + type: "string" + Created: + type: "string" + format: "dateTime" + Scope: + type: "string" + Driver: + type: "string" + EnableIPv6: + type: "boolean" + IPAM: + $ref: "#/definitions/IPAM" + Internal: + type: "boolean" + Attachable: + type: "boolean" + Ingress: + type: "boolean" + Containers: + type: "object" + additionalProperties: + $ref: "#/definitions/NetworkContainer" + Options: + type: "object" + additionalProperties: + type: "string" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + Name: "net01" + Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" + Created: "2016-10-19T04:33:30.360899459Z" + Scope: "local" + Driver: "bridge" + EnableIPv6: false + IPAM: + Driver: "default" + Config: + - Subnet: "172.19.0.0/16" + Gateway: "172.19.0.1" + Options: + foo: "bar" + Internal: false + Attachable: false + Ingress: false + Containers: + 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: + Name: "test" + EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: "02:42:ac:13:00:02" + IPv4Address: "172.19.0.2/16" + IPv6Address: "" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + IPAM: + type: "object" + properties: + Driver: + description: "Name of the IPAM driver to use." + type: "string" + default: "default" + Config: + description: | + List of IPAM configuration options, specified as a map: + + ``` + {"Subnet": , "IPRange": , "Gateway": , "AuxAddress": } + ``` + type: "array" + items: + $ref: "#/definitions/IPAMConfig" + Options: + description: "Driver-specific options, specified as a map." + type: "object" + additionalProperties: + type: "string" + + IPAMConfig: + type: "object" + properties: + Subnet: + type: "string" + IPRange: + type: "string" + Gateway: + type: "string" + AuxiliaryAddresses: + type: "object" + additionalProperties: + type: "string" + + NetworkContainer: + type: "object" + properties: + Name: + type: "string" + EndpointID: + type: "string" + MacAddress: + type: "string" + IPv4Address: + type: "string" + IPv6Address: + type: "string" + + BuildInfo: + type: "object" + properties: + id: + type: "string" + stream: + type: "string" + error: + type: "string" + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + aux: + $ref: "#/definitions/ImageID" + + BuildCache: + type: "object" + description: | + BuildCache contains information about a build cache record. + properties: + ID: + type: "string" + description: | + Unique ID of the build cache record. + example: "ndlpt0hhvkqcdfkputsk4cq9c" + Parent: + description: | + ID of the parent build cache record. + + > **Deprecated**: This field is deprecated, and omitted if empty. + type: "string" + x-nullable: true + example: "" + Parents: + description: | + List of parent build cache record IDs. + type: "array" + items: + type: "string" + x-nullable: true + example: ["hw53o5aio51xtltp5xjp8v7fx"] + Type: + type: "string" + description: | + Cache record type. + example: "regular" + # see https://github.com/moby/buildkit/blob/fce4a32258dc9d9664f71a4831d5de10f0670677/client/diskusage.go#L75-L84 + enum: + - "internal" + - "frontend" + - "source.local" + - "source.git.checkout" + - "exec.cachemount" + - "regular" + Description: + type: "string" + description: | + Description of the build-step that produced the build cache. + example: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" + InUse: + type: "boolean" + description: | + Indicates if the build cache is in use. + example: false + Shared: + type: "boolean" + description: | + Indicates if the build cache is shared. + example: true + Size: + description: | + Amount of disk space used by the build cache (in bytes). + type: "integer" + example: 51 + CreatedAt: + description: | + Date and time at which the build cache was created in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + LastUsedAt: + description: | + Date and time at which the build cache was last used in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + x-nullable: true + example: "2017-08-09T07:09:37.632105588Z" + UsageCount: + type: "integer" + example: 26 + + ImageID: + type: "object" + description: "Image ID or Digest" + properties: + ID: + type: "string" + example: + ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" + + CreateImageInfo: + type: "object" + properties: + id: + type: "string" + error: + type: "string" + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + PushImageInfo: + type: "object" + properties: + error: + type: "string" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + ErrorDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "string" + + ProgressDetail: + type: "object" + properties: + current: + type: "integer" + total: + type: "integer" + + ErrorResponse: + description: "Represents an error." + type: "object" + required: ["message"] + properties: + message: + description: "The error message." + type: "string" + x-nullable: false + example: + message: "Something went wrong." + + IdResponse: + description: "Response to an API call that returns just an Id" + type: "object" + required: ["Id"] + properties: + Id: + description: "The id of the newly created object." + type: "string" + x-nullable: false + + EndpointSettings: + description: "Configuration for a network endpoint." + type: "object" + properties: + # Configurations + IPAMConfig: + $ref: "#/definitions/EndpointIPAMConfig" + Links: + type: "array" + items: + type: "string" + example: + - "container_1" + - "container_2" + MacAddress: + description: | + MAC address for the endpoint on this network. The network driver might ignore this parameter. + type: "string" + example: "02:42:ac:11:00:04" + Aliases: + type: "array" + items: + type: "string" + example: + - "server_x" + - "server_y" + + # Operational data + NetworkID: + description: | + Unique ID of the network. + type: "string" + example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a" + EndpointID: + description: | + Unique ID for the service endpoint in a Sandbox. + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for this network. + type: "string" + example: "172.17.0.1" + IPAddress: + description: | + IPv4 address. + type: "string" + example: "172.17.0.4" + IPPrefixLen: + description: | + Mask length of the IPv4 address. + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address. + type: "string" + example: "2001:db8:2::100" + GlobalIPv6Address: + description: | + Global IPv6 address. + type: "string" + example: "2001:db8::5689" + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + type: "integer" + format: "int64" + example: 64 + DriverOpts: + description: | + DriverOpts is a mapping of driver options and values. These options + are passed directly to the driver and are driver specific. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + DNSNames: + description: | + List of all DNS names an endpoint has on a specific network. This + list is based on the container name, network aliases, container short + ID, and hostname. + + These DNS names are non-fully qualified but can contain several dots. + You can get fully qualified DNS names by appending `.`. + For instance, if container name is `my.ctr` and the network is named + `testnet`, `DNSNames` will contain `my.ctr` and the FQDN will be + `my.ctr.testnet`. + type: array + items: + type: string + example: ["foobar", "server_x", "server_y", "my.ctr"] + + EndpointIPAMConfig: + description: | + EndpointIPAMConfig represents an endpoint's IPAM configuration. + type: "object" + x-nullable: true + properties: + IPv4Address: + type: "string" + example: "172.20.30.33" + IPv6Address: + type: "string" + example: "2001:db8:abcd::3033" + LinkLocalIPs: + type: "array" + items: + type: "string" + example: + - "169.254.34.68" + - "fe80::3468" + + PluginMount: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Source, Destination, Type, Options] + properties: + Name: + type: "string" + x-nullable: false + example: "some-mount" + Description: + type: "string" + x-nullable: false + example: "This is a mount that's used by the plugin." + Settable: + type: "array" + items: + type: "string" + Source: + type: "string" + example: "/var/lib/docker/plugins/" + Destination: + type: "string" + x-nullable: false + example: "/mnt/state" + Type: + type: "string" + x-nullable: false + example: "bind" + Options: + type: "array" + items: + type: "string" + example: + - "rbind" + - "rw" + + PluginDevice: + type: "object" + required: [Name, Description, Settable, Path] + x-nullable: false + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Path: + type: "string" + example: "/dev/fuse" + + PluginEnv: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "string" + + PluginInterfaceType: + type: "object" + x-nullable: false + required: [Prefix, Capability, Version] + properties: + Prefix: + type: "string" + x-nullable: false + Capability: + type: "string" + x-nullable: false + Version: + type: "string" + x-nullable: false + + PluginPrivilege: + description: | + Describes a permission the user has to accept upon installing + the plugin. + type: "object" + x-go-name: "PluginPrivilege" + properties: + Name: + type: "string" + example: "network" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - "host" + + Plugin: + description: "A plugin for the Engine API" + type: "object" + required: [Settings, Enabled, Config, Name] + properties: + Id: + type: "string" + example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: + type: "string" + x-nullable: false + example: "tiborvass/sample-volume-plugin" + Enabled: + description: + True if the plugin is running. False if the plugin is not running, + only installed. + type: "boolean" + x-nullable: false + example: true + Settings: + description: "Settings that can be modified by users." + type: "object" + x-nullable: false + required: [Args, Devices, Env, Mounts] + properties: + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + type: "string" + example: + - "DEBUG=0" + Args: + type: "array" + items: + type: "string" + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PluginReference: + description: "plugin remote reference used to push/pull the plugin" + type: "string" + x-nullable: false + example: "localhost:5000/tiborvass/sample-volume-plugin:latest" + Config: + description: "The config of a plugin." + type: "object" + x-nullable: false + required: + - Description + - Documentation + - Interface + - Entrypoint + - WorkDir + - Network + - Linux + - PidHost + - PropagatedMount + - IpcHost + - Mounts + - Env + - Args + properties: + DockerVersion: + description: "Docker Version used to create the plugin" + type: "string" + x-nullable: false + example: "17.06.0-ce" + Description: + type: "string" + x-nullable: false + example: "A sample volume plugin for Docker" + Documentation: + type: "string" + x-nullable: false + example: "https://docs.docker.com/engine/extend/plugins/" + Interface: + description: "The interface between Docker and the plugin" + x-nullable: false + type: "object" + required: [Types, Socket] + properties: + Types: + type: "array" + items: + $ref: "#/definitions/PluginInterfaceType" + example: + - "docker.volumedriver/1.0" + Socket: + type: "string" + x-nullable: false + example: "plugins.sock" + ProtocolScheme: + type: "string" + example: "some.protocol/v1.0" + description: "Protocol to use for clients connecting to the plugin." + enum: + - "" + - "moby.plugins.http/v1" + Entrypoint: + type: "array" + items: + type: "string" + example: + - "/usr/bin/sample-volume-plugin" + - "/data" + WorkDir: + type: "string" + x-nullable: false + example: "/bin/" + User: + type: "object" + x-nullable: false + properties: + UID: + type: "integer" + format: "uint32" + example: 1000 + GID: + type: "integer" + format: "uint32" + example: 1000 + Network: + type: "object" + x-nullable: false + required: [Type] + properties: + Type: + x-nullable: false + type: "string" + example: "host" + Linux: + type: "object" + x-nullable: false + required: [Capabilities, AllowAllDevices, Devices] + properties: + Capabilities: + type: "array" + items: + type: "string" + example: + - "CAP_SYS_ADMIN" + - "CAP_SYSLOG" + AllowAllDevices: + type: "boolean" + x-nullable: false + example: false + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PropagatedMount: + type: "string" + x-nullable: false + example: "/mnt/volumes" + IpcHost: + type: "boolean" + x-nullable: false + example: false + PidHost: + type: "boolean" + x-nullable: false + example: false + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + $ref: "#/definitions/PluginEnv" + example: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "0" + Args: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + example: "args" + Description: + x-nullable: false + type: "string" + example: "command line arguments" + Settable: + type: "array" + items: + type: "string" + Value: + type: "array" + items: + type: "string" + rootfs: + type: "object" + properties: + type: + type: "string" + example: "layers" + diff_ids: + type: "array" + items: + type: "string" + example: + - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887" + - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" + + ObjectVersion: + description: | + The version number of the object such as node, service, etc. This is needed + to avoid conflicting writes. The client must send the version number along + with the modified specification when updating these objects. + + This approach ensures safe concurrency and determinism in that the change + on the object may not be applied if the version number has changed from the + last read. In other words, if two update requests specify the same base + version, only one of the requests can succeed. As a result, two separate + update requests that happen at the same time will not unintentionally + overwrite each other. + type: "object" + properties: + Index: + type: "integer" + format: "uint64" + example: 373531 + + NodeSpec: + type: "object" + properties: + Name: + description: "Name for the node." + type: "string" + example: "my-node" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Role: + description: "Role of the node." + type: "string" + enum: + - "worker" + - "manager" + example: "manager" + Availability: + description: "Availability of the node." + type: "string" + enum: + - "active" + - "pause" + - "drain" + example: "active" + example: + Availability: "active" + Name: "node-name" + Role: "manager" + Labels: + foo: "bar" + + Node: + type: "object" + properties: + ID: + type: "string" + example: "24ifsmvkjbyhk" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the node was added to the swarm in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the node was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/NodeSpec" + Description: + $ref: "#/definitions/NodeDescription" + Status: + $ref: "#/definitions/NodeStatus" + ManagerStatus: + $ref: "#/definitions/ManagerStatus" + + NodeDescription: + description: | + NodeDescription encapsulates the properties of the Node as reported by the + agent. + type: "object" + properties: + Hostname: + type: "string" + example: "bf3067039e47" + Platform: + $ref: "#/definitions/Platform" + Resources: + $ref: "#/definitions/ResourceObject" + Engine: + $ref: "#/definitions/EngineDescription" + TLSInfo: + $ref: "#/definitions/TLSInfo" + + Platform: + description: | + Platform represents the platform (Arch/OS). + type: "object" + properties: + Architecture: + description: | + Architecture represents the hardware architecture (for example, + `x86_64`). + type: "string" + example: "x86_64" + OS: + description: | + OS represents the Operating System (for example, `linux` or `windows`). + type: "string" + example: "linux" + + EngineDescription: + description: "EngineDescription provides information about an engine." + type: "object" + properties: + EngineVersion: + type: "string" + example: "17.06.0" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + foo: "bar" + Plugins: + type: "array" + items: + type: "object" + properties: + Type: + type: "string" + Name: + type: "string" + example: + - Type: "Log" + Name: "awslogs" + - Type: "Log" + Name: "fluentd" + - Type: "Log" + Name: "gcplogs" + - Type: "Log" + Name: "gelf" + - Type: "Log" + Name: "journald" + - Type: "Log" + Name: "json-file" + - Type: "Log" + Name: "splunk" + - Type: "Log" + Name: "syslog" + - Type: "Network" + Name: "bridge" + - Type: "Network" + Name: "host" + - Type: "Network" + Name: "ipvlan" + - Type: "Network" + Name: "macvlan" + - Type: "Network" + Name: "null" + - Type: "Network" + Name: "overlay" + - Type: "Volume" + Name: "local" + - Type: "Volume" + Name: "localhost:5000/vieux/sshfs:latest" + - Type: "Volume" + Name: "vieux/sshfs:latest" + + TLSInfo: + description: | + Information about the issuer of leaf TLS certificates and the trusted root + CA certificate. + type: "object" + properties: + TrustRoot: + description: | + The root CA certificate(s) that are used to validate leaf TLS + certificates. + type: "string" + CertIssuerSubject: + description: + The base64-url-safe-encoded raw subject bytes of the issuer. + type: "string" + CertIssuerPublicKey: + description: | + The base64-url-safe-encoded raw public key bytes of the issuer. + type: "string" + example: + TrustRoot: | + -----BEGIN CERTIFICATE----- + MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw + EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 + MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH + A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf + 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB + Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO + PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz + pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H + -----END CERTIFICATE----- + CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" + CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" + + NodeStatus: + description: | + NodeStatus represents the status of a node. + + It provides the current status of the node, as seen by the manager. + type: "object" + properties: + State: + $ref: "#/definitions/NodeState" + Message: + type: "string" + example: "" + Addr: + description: "IP address of the node." + type: "string" + example: "172.17.0.2" + + NodeState: + description: "NodeState represents the state of a node." + type: "string" + enum: + - "unknown" + - "down" + - "ready" + - "disconnected" + example: "ready" + + ManagerStatus: + description: | + ManagerStatus represents the status of a manager. + + It provides the current status of a node's manager component, if the node + is a manager. + x-nullable: true + type: "object" + properties: + Leader: + type: "boolean" + default: false + example: true + Reachability: + $ref: "#/definitions/Reachability" + Addr: + description: | + The IP address and port at which the manager is reachable. + type: "string" + example: "10.0.0.46:2377" + + Reachability: + description: "Reachability represents the reachability of a node." + type: "string" + enum: + - "unknown" + - "unreachable" + - "reachable" + example: "reachable" + + SwarmSpec: + description: "User modifiable swarm configuration." + type: "object" + properties: + Name: + description: "Name of the swarm." + type: "string" + example: "default" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.corp.type: "production" + com.example.corp.department: "engineering" + Orchestration: + description: "Orchestration configuration." + type: "object" + x-nullable: true + properties: + TaskHistoryRetentionLimit: + description: | + The number of historic tasks to keep per instance or node. If + negative, never remove completed or failed tasks. + type: "integer" + format: "int64" + example: 10 + Raft: + description: "Raft configuration." + type: "object" + properties: + SnapshotInterval: + description: "The number of log entries between snapshots." + type: "integer" + format: "uint64" + example: 10000 + KeepOldSnapshots: + description: | + The number of snapshots to keep beyond the current snapshot. + type: "integer" + format: "uint64" + LogEntriesForSlowFollowers: + description: | + The number of log entries to keep around to sync up slow followers + after a snapshot is created. + type: "integer" + format: "uint64" + example: 500 + ElectionTick: + description: | + The number of ticks that a follower will wait for a message from + the leader before becoming a candidate and starting an election. + `ElectionTick` must be greater than `HeartbeatTick`. + + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 3 + HeartbeatTick: + description: | + The number of ticks between heartbeats. Every HeartbeatTick ticks, + the leader will send a heartbeat to the followers. + + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 1 + Dispatcher: + description: "Dispatcher configuration." + type: "object" + x-nullable: true + properties: + HeartbeatPeriod: + description: | + The delay for an agent to send a heartbeat to the dispatcher. + type: "integer" + format: "int64" + example: 5000000000 + CAConfig: + description: "CA configuration." + type: "object" + x-nullable: true + properties: + NodeCertExpiry: + description: "The duration node certificates are issued for." + type: "integer" + format: "int64" + example: 7776000000000000 + ExternalCAs: + description: | + Configuration for forwarding signing requests to an external + certificate authority. + type: "array" + items: + type: "object" + properties: + Protocol: + description: | + Protocol for communication with the external CA (currently + only `cfssl` is supported). + type: "string" + enum: + - "cfssl" + default: "cfssl" + URL: + description: | + URL where certificate signing requests should be sent. + type: "string" + Options: + description: | + An object with key/value pairs that are interpreted as + protocol-specific options for the external CA driver. + type: "object" + additionalProperties: + type: "string" + CACert: + description: | + The root CA certificate (in PEM format) this external CA uses + to issue TLS certificates (assumed to be to the current swarm + root CA certificate if not provided). + type: "string" + SigningCACert: + description: | + The desired signing CA certificate for all swarm node TLS leaf + certificates, in PEM format. + type: "string" + SigningCAKey: + description: | + The desired signing CA key for all swarm node TLS leaf certificates, + in PEM format. + type: "string" + ForceRotate: + description: | + An integer whose purpose is to force swarm to generate a new + signing CA certificate and key, if none have been specified in + `SigningCACert` and `SigningCAKey` + format: "uint64" + type: "integer" + EncryptionConfig: + description: "Parameters related to encryption-at-rest." + type: "object" + properties: + AutoLockManagers: + description: | + If set, generate a key and use it to lock data stored on the + managers. + type: "boolean" + example: false + TaskDefaults: + description: "Defaults for creating tasks in this cluster." + type: "object" + properties: + LogDriver: + description: | + The log driver to use for tasks created in the orchestrator if + unspecified by a service. + + Updating this value only affects new tasks. Existing tasks continue + to use their previously configured log driver until recreated. + type: "object" + properties: + Name: + description: | + The log driver to use as a default for new tasks. + type: "string" + example: "json-file" + Options: + description: | + Driver-specific options for the selectd log driver, specified + as key/value pairs. + type: "object" + additionalProperties: + type: "string" + example: + "max-file": "10" + "max-size": "100m" + + # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but + # without `JoinTokens`. + ClusterInfo: + description: | + ClusterInfo represents information about the swarm as is returned by the + "/info" endpoint. Join-tokens are not included. + x-nullable: true + type: "object" + properties: + ID: + description: "The ID of the swarm." + type: "string" + example: "abajmipo7b4xz5ip2nrla6b11" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the swarm was initialised in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the swarm was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/SwarmSpec" + TLSInfo: + $ref: "#/definitions/TLSInfo" + RootRotationInProgress: + description: | + Whether there is currently a root CA rotation in progress for the swarm + type: "boolean" + example: false + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + If no port is set or is set to 0, the default port (4789) is used. + type: "integer" + format: "uint32" + default: 4789 + example: 4789 + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global scope + networks. + type: "array" + items: + type: "string" + format: "CIDR" + example: ["10.10.0.0/16", "20.20.0.0/16"] + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created from the + default subnet pool. + type: "integer" + format: "uint32" + maximum: 29 + default: 24 + example: 24 + + JoinTokens: + description: | + JoinTokens contains the tokens workers and managers need to join the swarm. + type: "object" + properties: + Worker: + description: | + The token workers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" + Manager: + description: | + The token managers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + + Swarm: + type: "object" + allOf: + - $ref: "#/definitions/ClusterInfo" + - type: "object" + properties: + JoinTokens: + $ref: "#/definitions/JoinTokens" + + TaskSpec: + description: "User modifiable task configuration." + type: "object" + properties: + PluginSpec: + type: "object" + description: | + Plugin spec for the service. *(Experimental release only.)* + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Name: + description: "The name or 'alias' to use for the plugin." + type: "string" + Remote: + description: "The plugin image reference to use." + type: "string" + Disabled: + description: "Disable the plugin once scheduled." + type: "boolean" + PluginPrivilege: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + ContainerSpec: + type: "object" + description: | + Container spec for the service. + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Image: + description: "The image name to use for the container" + type: "string" + Labels: + description: "User-defined key/value data." + type: "object" + additionalProperties: + type: "string" + Command: + description: "The command to be run in the image." + type: "array" + items: + type: "string" + Args: + description: "Arguments to the command." + type: "array" + items: + type: "string" + Hostname: + description: | + The hostname to use for the container, as a valid + [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname. + type: "string" + Env: + description: | + A list of environment variables in the form `VAR=value`. + type: "array" + items: + type: "string" + Dir: + description: "The working directory for commands to run in." + type: "string" + User: + description: "The user inside the container." + type: "string" + Groups: + type: "array" + description: | + A list of additional groups that the container process will run as. + items: + type: "string" + Privileges: + type: "object" + description: "Security options for the container" + properties: + CredentialSpec: + type: "object" + description: "CredentialSpec for managed service account (Windows only)" + properties: + Config: + type: "string" + example: "0bt9dmxjvjiqermk6xrop3ekq" + description: | + Load credential spec from a Swarm Config with the given ID. + The specified config must also be present in the Configs + field with the Runtime property set. + +


+ + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + File: + type: "string" + example: "spec.json" + description: | + Load credential spec from this file. The file is read by + the daemon, and must be present in the `CredentialSpecs` + subdirectory in the docker data directory, which defaults + to `C:\ProgramData\Docker\` on Windows. + + For example, specifying `spec.json` loads + `C:\ProgramData\Docker\CredentialSpecs\spec.json`. + +


+ + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + Registry: + type: "string" + description: | + Load credential spec from this value in the Windows + registry. The specified registry value must be located in: + + `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` + +


+ + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + SELinuxContext: + type: "object" + description: "SELinux labels of the container" + properties: + Disable: + type: "boolean" + description: "Disable SELinux" + User: + type: "string" + description: "SELinux user label" + Role: + type: "string" + description: "SELinux role label" + Type: + type: "string" + description: "SELinux type label" + Level: + type: "string" + description: "SELinux level label" + Seccomp: + type: "object" + description: "Options for configuring seccomp on the container" + properties: + Mode: + type: "string" + enum: + - "default" + - "unconfined" + - "custom" + Profile: + description: "The custom seccomp profile as a json object" + type: "string" + AppArmor: + type: "object" + description: "Options for configuring AppArmor on the container" + properties: + Mode: + type: "string" + enum: + - "default" + - "disabled" + NoNewPrivileges: + type: "boolean" + description: "Configuration of the no_new_privs bit in the container" + + TTY: + description: "Whether a pseudo-TTY should be allocated." + type: "boolean" + OpenStdin: + description: "Open `stdin`" + type: "boolean" + ReadOnly: + description: "Mount the container's root filesystem as read only." + type: "boolean" + Mounts: + description: | + Specification for mounts to be added to containers created as part + of the service. + type: "array" + items: + $ref: "#/definitions/Mount" + StopSignal: + description: "Signal to stop the container." + type: "string" + StopGracePeriod: + description: | + Amount of time to wait for the container to terminate before + forcefully killing it. + type: "integer" + format: "int64" + HealthCheck: + $ref: "#/definitions/HealthConfig" + Hosts: + type: "array" + description: | + A list of hostname/IP mappings to add to the container's `hosts` + file. The format of extra hosts is specified in the + [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html) + man page: + + IP_address canonical_hostname [aliases...] + items: + type: "string" + DNSConfig: + description: | + Specification for DNS related configurations in resolver configuration + file (`resolv.conf`). + type: "object" + properties: + Nameservers: + description: "The IP addresses of the name servers." + type: "array" + items: + type: "string" + Search: + description: "A search list for host-name lookup." + type: "array" + items: + type: "string" + Options: + description: | + A list of internal resolver variables to be modified (e.g., + `debug`, `ndots:3`, etc.). + type: "array" + items: + type: "string" + Secrets: + description: | + Secrets contains references to zero or more secrets that will be + exposed to the service. + type: "array" + items: + type: "object" + properties: + File: + description: | + File represents a specific target that is backed by a file. + type: "object" + properties: + Name: + description: | + Name represents the final filename in the filesystem. + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + SecretID: + description: | + SecretID represents the ID of the specific secret that we're + referencing. + type: "string" + SecretName: + description: | + SecretName is the name of the secret that this references, + but this is just provided for lookup/display purposes. The + secret in the reference will be identified by its ID. + type: "string" + Configs: + description: | + Configs contains references to zero or more configs that will be + exposed to the service. + type: "array" + items: + type: "object" + properties: + File: + description: | + File represents a specific target that is backed by a file. + +


+ + > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive + type: "object" + properties: + Name: + description: | + Name represents the final filename in the filesystem. + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + Runtime: + description: | + Runtime represents a target that is not mounted into the + container but is used by the task + +


+ + > **Note**: `Configs.File` and `Configs.Runtime` are mutually + > exclusive + type: "object" + ConfigID: + description: | + ConfigID represents the ID of the specific config that we're + referencing. + type: "string" + ConfigName: + description: | + ConfigName is the name of the config that this references, + but this is just provided for lookup/display purposes. The + config in the reference will be identified by its ID. + type: "string" + Isolation: + type: "string" + description: | + Isolation technology of the containers running the service. + (Windows only) + enum: + - "default" + - "process" + - "hyperv" + Init: + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. + type: "boolean" + x-nullable: true + Sysctls: + description: | + Set kernel namedspaced parameters (sysctls) in the container. + The Sysctls option on services accepts the same sysctls as the + are supported on containers. Note that while the same sysctls are + supported, no guarantees or checks are made about their + suitability for a clustered environment, and it's up to the user + to determine whether a given sysctl will work properly in a + Service. + type: "object" + additionalProperties: + type: "string" + # This option is not used by Windows containers + CapabilityAdd: + type: "array" + description: | + A list of kernel capabilities to add to the default set + for the container. + items: + type: "string" + example: + - "CAP_NET_RAW" + - "CAP_SYS_ADMIN" + - "CAP_SYS_CHROOT" + - "CAP_SYSLOG" + CapabilityDrop: + type: "array" + description: | + A list of kernel capabilities to drop from the default set + for the container. + items: + type: "string" + example: + - "CAP_NET_RAW" + Ulimits: + description: | + A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + NetworkAttachmentSpec: + description: | + Read-only spec type for non-swarm containers attached to swarm overlay + networks. + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + type: "object" + properties: + ContainerID: + description: "ID of the container represented by this task" + type: "string" + Resources: + description: | + Resource requirements which apply to each individual container created + as part of the service. + type: "object" + properties: + Limits: + description: "Define resources limits." + $ref: "#/definitions/Limit" + Reservations: + description: "Define resources reservation." + $ref: "#/definitions/ResourceObject" + RestartPolicy: + description: | + Specification for the restart policy which applies to containers + created as part of this service. + type: "object" + properties: + Condition: + description: "Condition for restart." + type: "string" + enum: + - "none" + - "on-failure" + - "any" + Delay: + description: "Delay between restart attempts." + type: "integer" + format: "int64" + MaxAttempts: + description: | + Maximum attempts to restart a given container before giving up + (default value is 0, which is ignored). + type: "integer" + format: "int64" + default: 0 + Window: + description: | + Windows is the time window used to evaluate the restart policy + (default value is 0, which is unbounded). + type: "integer" + format: "int64" + default: 0 + Placement: + type: "object" + properties: + Constraints: + description: | + An array of constraint expressions to limit the set of nodes where + a task can be scheduled. Constraint expressions can either use a + _match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find + nodes that satisfy every expression (AND match). Constraints can + match node or Docker Engine labels as follows: + + node attribute | matches | example + ---------------------|--------------------------------|----------------------------------------------- + `node.id` | Node ID | `node.id==2ivku8v2gvtg4` + `node.hostname` | Node hostname | `node.hostname!=node-2` + `node.role` | Node role (`manager`/`worker`) | `node.role==manager` + `node.platform.os` | Node operating system | `node.platform.os==windows` + `node.platform.arch` | Node architecture | `node.platform.arch==x86_64` + `node.labels` | User-defined node labels | `node.labels.security==high` + `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-14.04` + + `engine.labels` apply to Docker Engine labels like operating system, + drivers, etc. Swarm administrators add `node.labels` for operational + purposes by using the [`node update endpoint`](#operation/NodeUpdate). + + type: "array" + items: + type: "string" + example: + - "node.hostname!=node3.corp.example.com" + - "node.role!=manager" + - "node.labels.type==production" + - "node.platform.os==linux" + - "node.platform.arch==x86_64" + Preferences: + description: | + Preferences provide a way to make the scheduler aware of factors + such as topology. They are provided in order from highest to + lowest precedence. + type: "array" + items: + type: "object" + properties: + Spread: + type: "object" + properties: + SpreadDescriptor: + description: | + label descriptor, such as `engine.labels.az`. + type: "string" + example: + - Spread: + SpreadDescriptor: "node.labels.datacenter" + - Spread: + SpreadDescriptor: "node.labels.rack" + MaxReplicas: + description: | + Maximum number of replicas for per node (default value is 0, which + is unlimited) + type: "integer" + format: "int64" + default: 0 + Platforms: + description: | + Platforms stores all the platforms that the service's image can + run on. This field is used in the platform filter for scheduling. + If empty, then the platform filter is off, meaning there are no + scheduling restrictions. + type: "array" + items: + $ref: "#/definitions/Platform" + ForceUpdate: + description: | + A counter that triggers an update even if no relevant parameters have + been changed. + type: "integer" + Runtime: + description: | + Runtime is the type of runtime specified for the task executor. + type: "string" + Networks: + description: "Specifies which networks the service should attach to." + type: "array" + items: + $ref: "#/definitions/NetworkAttachmentConfig" + LogDriver: + description: | + Specifies the log driver to use for tasks created from this spec. If + not present, the default one for the swarm will be used, finally + falling back to the engine default if not specified. + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + + TaskState: + type: "string" + enum: + - "new" + - "allocated" + - "pending" + - "assigned" + - "accepted" + - "preparing" + - "ready" + - "starting" + - "running" + - "complete" + - "shutdown" + - "failed" + - "rejected" + - "remove" + - "orphaned" + + ContainerStatus: + type: "object" + description: "represents the status of a container." + properties: + ContainerID: + type: "string" + PID: + type: "integer" + ExitCode: + type: "integer" + + PortStatus: + type: "object" + description: "represents the port status of a task's host ports whose service has published host ports" + properties: + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + + TaskStatus: + type: "object" + description: "represents the status of a task." + properties: + Timestamp: + type: "string" + format: "dateTime" + State: + $ref: "#/definitions/TaskState" + Message: + type: "string" + Err: + type: "string" + ContainerStatus: + $ref: "#/definitions/ContainerStatus" + PortStatus: + $ref: "#/definitions/PortStatus" + + Task: + type: "object" + properties: + ID: + description: "The ID of the task." + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Name: + description: "Name of the task." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Spec: + $ref: "#/definitions/TaskSpec" + ServiceID: + description: "The ID of the service this task is part of." + type: "string" + Slot: + type: "integer" + NodeID: + description: "The ID of the node that this task is on." + type: "string" + AssignedGenericResources: + $ref: "#/definitions/GenericResources" + Status: + $ref: "#/definitions/TaskStatus" + DesiredState: + $ref: "#/definitions/TaskState" + JobIteration: + description: | + If the Service this Task belongs to is a job-mode service, contains + the JobIteration of the Service this Task was created for. Absent if + the Task was created for a Replicated or Global Service. + $ref: "#/definitions/ObjectVersion" + example: + ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + AssignedGenericResources: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + ServiceSpec: + description: "User modifiable configuration for a service." + type: object + properties: + Name: + description: "Name of the service." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + TaskTemplate: + $ref: "#/definitions/TaskSpec" + Mode: + description: "Scheduling mode for the service." + type: "object" + properties: + Replicated: + type: "object" + properties: + Replicas: + type: "integer" + format: "int64" + Global: + type: "object" + ReplicatedJob: + description: | + The mode used for services with a finite number of tasks that run + to a completed state. + type: "object" + properties: + MaxConcurrent: + description: | + The maximum number of replicas to run simultaneously. + type: "integer" + format: "int64" + default: 1 + TotalCompletions: + description: | + The total number of replicas desired to reach the Completed + state. If unset, will default to the value of `MaxConcurrent` + type: "integer" + format: "int64" + GlobalJob: + description: | + The mode used for services which run a task to the completed state + on each valid node. + type: "object" + UpdateConfig: + description: "Specification for the update strategy of the service." + type: "object" + properties: + Parallelism: + description: | + Maximum number of tasks to be updated in one iteration (0 means + unlimited parallelism). + type: "integer" + format: "int64" + Delay: + description: "Amount of time between updates, in nanoseconds." + type: "integer" + format: "int64" + FailureAction: + description: | + Action to take if an updated task fails to run, or stops running + during the update. + type: "string" + enum: + - "continue" + - "pause" + - "rollback" + Monitor: + description: | + Amount of time to monitor each updated task for failures, in + nanoseconds. + type: "integer" + format: "int64" + MaxFailureRatio: + description: | + The fraction of tasks that may fail during an update before the + failure action is invoked, specified as a floating point number + between 0 and 1. + type: "number" + default: 0 + Order: + description: | + The order of operations when rolling out an updated task. Either + the old task is shut down before the new task is started, or the + new task is started before the old task is shut down. + type: "string" + enum: + - "stop-first" + - "start-first" + RollbackConfig: + description: "Specification for the rollback strategy of the service." + type: "object" + properties: + Parallelism: + description: | + Maximum number of tasks to be rolled back in one iteration (0 means + unlimited parallelism). + type: "integer" + format: "int64" + Delay: + description: | + Amount of time between rollback iterations, in nanoseconds. + type: "integer" + format: "int64" + FailureAction: + description: | + Action to take if an rolled back task fails to run, or stops + running during the rollback. + type: "string" + enum: + - "continue" + - "pause" + Monitor: + description: | + Amount of time to monitor each rolled back task for failures, in + nanoseconds. + type: "integer" + format: "int64" + MaxFailureRatio: + description: | + The fraction of tasks that may fail during a rollback before the + failure action is invoked, specified as a floating point number + between 0 and 1. + type: "number" + default: 0 + Order: + description: | + The order of operations when rolling back a task. Either the old + task is shut down before the new task is started, or the new task + is started before the old task is shut down. + type: "string" + enum: + - "stop-first" + - "start-first" + Networks: + description: | + Specifies which networks the service should attach to. + + Deprecated: This field is deprecated since v1.44. The Networks field in TaskSpec should be used instead. + type: "array" + items: + $ref: "#/definitions/NetworkAttachmentConfig" + + EndpointSpec: + $ref: "#/definitions/EndpointSpec" + + EndpointPortConfig: + type: "object" + properties: + Name: + type: "string" + Protocol: + type: "string" + enum: + - "tcp" + - "udp" + - "sctp" + TargetPort: + description: "The port inside the container." + type: "integer" + PublishedPort: + description: "The port on the swarm hosts." + type: "integer" + PublishMode: + description: | + The mode in which port is published. + +


+ + - "ingress" makes the target port accessible on every node, + regardless of whether there is a task for the service running on + that node or not. + - "host" bypasses the routing mesh and publish the port directly on + the swarm node where that service is running. + + type: "string" + enum: + - "ingress" + - "host" + default: "ingress" + example: "ingress" + + EndpointSpec: + description: "Properties that can be configured to access and load balance a service." + type: "object" + properties: + Mode: + description: | + The mode of resolution to use for internal load balancing between tasks. + type: "string" + enum: + - "vip" + - "dnsrr" + default: "vip" + Ports: + description: | + List of exposed ports that this service is accessible on from the + outside. Ports can only be provided if `vip` resolution mode is used. + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + + Service: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ServiceSpec" + Endpoint: + type: "object" + properties: + Spec: + $ref: "#/definitions/EndpointSpec" + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + VirtualIPs: + type: "array" + items: + type: "object" + properties: + NetworkID: + type: "string" + Addr: + type: "string" + UpdateStatus: + description: "The status of a service update." + type: "object" + properties: + State: + type: "string" + enum: + - "updating" + - "paused" + - "completed" + StartedAt: + type: "string" + format: "dateTime" + CompletedAt: + type: "string" + format: "dateTime" + Message: + type: "string" + ServiceStatus: + description: | + The status of the service's tasks. Provided only when requested as + part of a ServiceList operation. + type: "object" + properties: + RunningTasks: + description: | + The number of tasks for the service currently in the Running state. + type: "integer" + format: "uint64" + example: 7 + DesiredTasks: + description: | + The number of tasks for the service desired to be running. + For replicated services, this is the replica count from the + service spec. For global services, this is computed by taking + count of all tasks for the service with a Desired State other + than Shutdown. + type: "integer" + format: "uint64" + example: 10 + CompletedTasks: + description: | + The number of tasks for a job that are in the Completed state. + This field must be cross-referenced with the service type, as the + value of 0 may mean the service is not in a job mode, or it may + mean the job-mode service has no tasks yet Completed. + type: "integer" + format: "uint64" + JobStatus: + description: | + The status of the service when it is in one of ReplicatedJob or + GlobalJob modes. Absent on Replicated and Global mode services. The + JobIteration is an ObjectVersion, but unlike the Service's version, + does not need to be sent with an update request. + type: "object" + properties: + JobIteration: + description: | + JobIteration is a value increased each time a Job is executed, + successfully or otherwise. "Executed", in this case, means the + job as a whole has been started, not that an individual Task has + been launched. A job is "Executed" when its ServiceSpec is + updated. JobIteration can be used to disambiguate Tasks belonging + to different executions of a job. Though JobIteration will + increase with each subsequent execution, it may not necessarily + increase by 1, and so JobIteration should not be used to + $ref: "#/definitions/ObjectVersion" + LastExecution: + description: | + The last time, as observed by the server, that this job was + started. + type: "string" + format: "dateTime" + example: + ID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Version: + Index: 19 + CreatedAt: "2016-06-07T21:05:51.880065305Z" + UpdatedAt: "2016-06-07T21:07:29.962229872Z" + Spec: + Name: "hopeful_cori" + TaskTemplate: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Endpoint: + Spec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + VirtualIPs: + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.2/16" + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.3/16" + + ImageDeleteResponseItem: + type: "object" + x-go-name: "DeleteResponse" + properties: + Untagged: + description: "The image ID of an image that was untagged" + type: "string" + Deleted: + description: "The image ID of an image that was deleted" + type: "string" + + ServiceCreateResponse: + type: "object" + description: | + contains the information returned to a client on the + creation of a new service. + properties: + ID: + description: "The ID of the created service." + type: "string" + x-nullable: false + example: "ak7w3gjqoa3kuz8xcpnyy0pvl" + Warnings: + description: | + Optional warning message. + + FIXME(thaJeztah): this should have "omitempty" in the generated type. + type: "array" + x-nullable: true + items: + type: "string" + example: + - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + + ServiceUpdateResponse: + type: "object" + properties: + Warnings: + description: "Optional warning messages" + type: "array" + items: + type: "string" + example: + Warnings: + - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + + ContainerSummary: + type: "object" + properties: + Id: + description: "The ID of this container" + type: "string" + x-go-name: "ID" + Names: + description: "The names that this container has been given" + type: "array" + items: + type: "string" + Image: + description: "The name of the image used when creating this container" + type: "string" + ImageID: + description: "The ID of the image that this container was created from" + type: "string" + Command: + description: "Command to run when starting the container" + type: "string" + Created: + description: "When the container was created" + type: "integer" + format: "int64" + Ports: + description: "The ports exposed by this container" + type: "array" + items: + $ref: "#/definitions/Port" + SizeRw: + description: "The size of files that have been created or changed by this container" + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container" + type: "integer" + format: "int64" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + State: + description: "The state of this container (e.g. `Exited`)" + type: "string" + Status: + description: "Additional human-readable status of this container (e.g. `Exit 0`)" + type: "string" + HostConfig: + type: "object" + properties: + NetworkMode: + type: "string" + NetworkSettings: + description: "A summary of the container's network settings" + type: "object" + properties: + Networks: + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + Mounts: + type: "array" + items: + $ref: "#/definitions/MountPoint" + + Driver: + description: "Driver represents a driver (network, logging, secrets)." + type: "object" + required: [Name] + properties: + Name: + description: "Name of the driver." + type: "string" + x-nullable: false + example: "some-driver" + Options: + description: "Key/value map of driver-specific options." + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: + OptionA: "value for driver-specific option A" + OptionB: "value for driver-specific option B" + + SecretSpec: + type: "object" + properties: + Name: + description: "User-defined name of the secret." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Data: + description: | + Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) + data to store as secret. + + This field is only used to _create_ a secret, and is not returned by + other endpoints. + type: "string" + example: "" + Driver: + description: | + Name of the secrets driver used to fetch the secret's value from an + external secret store. + $ref: "#/definitions/Driver" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Secret: + type: "object" + properties: + ID: + type: "string" + example: "blt1owaxmitz71s9v5zh81zun" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + Spec: + $ref: "#/definitions/SecretSpec" + + ConfigSpec: + type: "object" + properties: + Name: + description: "User-defined name of the config." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Data: + description: | + Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) + config data. + type: "string" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Config: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ConfigSpec" + + ContainerState: + description: | + ContainerState stores container's running state. It's part of ContainerJSONBase + and will be returned by the "inspect" command. + type: "object" + x-nullable: true + properties: + Status: + description: | + String representation of the container state. Can be one of "created", + "running", "paused", "restarting", "removing", "exited", or "dead". + type: "string" + enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] + example: "running" + Running: + description: | + Whether this container is running. + + Note that a running container can be _paused_. The `Running` and `Paused` + booleans are not mutually exclusive: + + When pausing a container (on Linux), the freezer cgroup is used to suspend + all processes in the container. Freezing the process requires the process to + be running. As a result, paused containers are both `Running` _and_ `Paused`. + + Use the `Status` field instead to determine if a container's state is "running". + type: "boolean" + example: true + Paused: + description: "Whether this container is paused." + type: "boolean" + example: false + Restarting: + description: "Whether this container is restarting." + type: "boolean" + example: false + OOMKilled: + description: | + Whether a process within this container has been killed because it ran + out of memory since the container was last started. + type: "boolean" + example: false + Dead: + type: "boolean" + example: false + Pid: + description: "The process ID of this container" + type: "integer" + example: 1234 + ExitCode: + description: "The last exit code of this container" + type: "integer" + example: 0 + Error: + type: "string" + StartedAt: + description: "The time when this container was last started." + type: "string" + example: "2020-01-06T09:06:59.461876391Z" + FinishedAt: + description: "The time when this container last exited." + type: "string" + example: "2020-01-06T09:07:59.461876391Z" + Health: + $ref: "#/definitions/Health" + + ContainerCreateResponse: + description: "OK response to ContainerCreate operation" + type: "object" + title: "ContainerCreateResponse" + x-go-name: "CreateResponse" + required: [Id, Warnings] + properties: + Id: + description: "The ID of the created container" + type: "string" + x-nullable: false + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Warnings: + description: "Warnings encountered when creating the container" + type: "array" + x-nullable: false + items: + type: "string" + example: [] + + ContainerWaitResponse: + description: "OK response to ContainerWait operation" + type: "object" + x-go-name: "WaitResponse" + title: "ContainerWaitResponse" + required: [StatusCode] + properties: + StatusCode: + description: "Exit code of the container" + type: "integer" + format: "int64" + x-nullable: false + Error: + $ref: "#/definitions/ContainerWaitExitError" + + ContainerWaitExitError: + description: "container waiting error, if any" + type: "object" + x-go-name: "WaitExitError" + properties: + Message: + description: "Details of an error" + type: "string" + + SystemVersion: + type: "object" + description: | + Response of Engine API: GET "/version" + properties: + Platform: + type: "object" + required: [Name] + properties: + Name: + type: "string" + Components: + type: "array" + description: | + Information about system components + items: + type: "object" + x-go-name: ComponentVersion + required: [Name, Version] + properties: + Name: + description: | + Name of the component + type: "string" + example: "Engine" + Version: + description: | + Version of the component + type: "string" + x-nullable: false + example: "19.03.12" + Details: + description: | + Key/value pairs of strings with additional information about the + component. These values are intended for informational purposes + only, and their content is not defined, and not part of the API + specification. + + These messages can be printed by the client as information to the user. + type: "object" + x-nullable: true + Version: + description: "The version of the daemon" + type: "string" + example: "19.03.12" + ApiVersion: + description: | + The default (and highest) API version that is supported by the daemon + type: "string" + example: "1.40" + MinAPIVersion: + description: | + The minimum API version that is supported by the daemon + type: "string" + example: "1.12" + GitCommit: + description: | + The Git commit of the source code that was used to build the daemon + type: "string" + example: "48a66213fe" + GoVersion: + description: | + The version Go used to compile the daemon, and the version of the Go + runtime in use. + type: "string" + example: "go1.13.14" + Os: + description: | + The operating system that the daemon is running on ("linux" or "windows") + type: "string" + example: "linux" + Arch: + description: | + The architecture that the daemon is running on + type: "string" + example: "amd64" + KernelVersion: + description: | + The kernel version (`uname -r`) that the daemon is running on. + + This field is omitted when empty. + type: "string" + example: "4.19.76-linuxkit" + Experimental: + description: | + Indicates if the daemon is started with experimental features enabled. + + This field is omitted when empty / false. + type: "boolean" + example: true + BuildTime: + description: | + The date and time that the daemon was compiled. + type: "string" + example: "2020-06-22T15:49:27.000000000+00:00" + + SystemInfo: + type: "object" + properties: + ID: + description: | + Unique identifier of the daemon. + +


+ + > **Note**: The format of the ID itself is not part of the API, and + > should not be considered stable. + type: "string" + example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" + Containers: + description: "Total number of containers on the host." + type: "integer" + example: 14 + ContainersRunning: + description: | + Number of containers with status `"running"`. + type: "integer" + example: 3 + ContainersPaused: + description: | + Number of containers with status `"paused"`. + type: "integer" + example: 1 + ContainersStopped: + description: | + Number of containers with status `"stopped"`. + type: "integer" + example: 10 + Images: + description: | + Total number of images on the host. + + Both _tagged_ and _untagged_ (dangling) images are counted. + type: "integer" + example: 508 + Driver: + description: "Name of the storage driver in use." + type: "string" + example: "overlay2" + DriverStatus: + description: | + Information specific to the storage driver, provided as + "label" / "value" pairs. + + This information is provided by the storage driver, and formatted + in a way consistent with the output of `docker info` on the command + line. + +


+ + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["Backing Filesystem", "extfs"] + - ["Supports d_type", "true"] + - ["Native Overlay Diff", "true"] + DockerRootDir: + description: | + Root directory of persistent Docker state. + + Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker` + on Windows. + type: "string" + example: "/var/lib/docker" + Plugins: + $ref: "#/definitions/PluginsInfo" + MemoryLimit: + description: "Indicates if the host has memory limit support enabled." + type: "boolean" + example: true + SwapLimit: + description: "Indicates if the host has memory swap limit support enabled." + type: "boolean" + example: true + KernelMemoryTCP: + description: | + Indicates if the host has kernel memory TCP limit support enabled. This + field is omitted if not supported. + + Kernel memory TCP limits are not supported when using cgroups v2, which + does not support the corresponding `memory.kmem.tcp.limit_in_bytes` cgroup. + type: "boolean" + example: true + CpuCfsPeriod: + description: | + Indicates if CPU CFS(Completely Fair Scheduler) period is supported by + the host. + type: "boolean" + example: true + CpuCfsQuota: + description: | + Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by + the host. + type: "boolean" + example: true + CPUShares: + description: | + Indicates if CPU Shares limiting is supported by the host. + type: "boolean" + example: true + CPUSet: + description: | + Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host. + + See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt) + type: "boolean" + example: true + PidsLimit: + description: "Indicates if the host kernel has PID limit support enabled." + type: "boolean" + example: true + OomKillDisable: + description: "Indicates if OOM killer disable is supported on the host." + type: "boolean" + IPv4Forwarding: + description: "Indicates IPv4 forwarding is enabled." + type: "boolean" + example: true + BridgeNfIptables: + description: "Indicates if `bridge-nf-call-iptables` is available on the host." + type: "boolean" + example: true + BridgeNfIp6tables: + description: "Indicates if `bridge-nf-call-ip6tables` is available on the host." + type: "boolean" + example: true + Debug: + description: | + Indicates if the daemon is running in debug-mode / with debug-level + logging enabled. + type: "boolean" + example: true + NFd: + description: | + The total number of file Descriptors in use by the daemon process. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 64 + NGoroutines: + description: | + The number of goroutines that currently exist. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 174 + SystemTime: + description: | + Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + format with nano-seconds. + type: "string" + example: "2017-08-08T20:28:29.06202363Z" + LoggingDriver: + description: | + The logging driver to use as a default for new containers. + type: "string" + CgroupDriver: + description: | + The driver to use for managing cgroups. + type: "string" + enum: ["cgroupfs", "systemd", "none"] + default: "cgroupfs" + example: "cgroupfs" + CgroupVersion: + description: | + The version of the cgroup. + type: "string" + enum: ["1", "2"] + default: "1" + example: "1" + NEventsListener: + description: "Number of event listeners subscribed." + type: "integer" + example: 30 + KernelVersion: + description: | + Kernel version of the host. + + On Linux, this information obtained from `uname`. On Windows this + information is queried from the HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\ + registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. + type: "string" + example: "4.9.38-moby" + OperatingSystem: + description: | + Name of the host's operating system, for example: "Ubuntu 16.04.2 LTS" + or "Windows Server 2016 Datacenter" + type: "string" + example: "Alpine Linux v3.5" + OSVersion: + description: | + Version of the host's operating system + +


+ + > **Note**: The information returned in this field, including its + > very existence, and the formatting of values, should not be considered + > stable, and may change without notice. + type: "string" + example: "16.04" + OSType: + description: | + Generic type of the operating system of the host, as returned by the + Go runtime (`GOOS`). + + Currently returned values are "linux" and "windows". A full list of + possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + type: "string" + example: "linux" + Architecture: + description: | + Hardware architecture of the host, as returned by the Go runtime + (`GOARCH`). + + A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + type: "string" + example: "x86_64" + NCPU: + description: | + The number of logical CPUs usable by the daemon. + + The number of available CPUs is checked by querying the operating + system when the daemon starts. Changes to operating system CPU + allocation after the daemon is started are not reflected. + type: "integer" + example: 4 + MemTotal: + description: | + Total amount of physical memory available on the host, in bytes. + type: "integer" + format: "int64" + example: 2095882240 + + IndexServerAddress: + description: | + Address / URL of the index server that is used for image search, + and as a default for user authentication for Docker Hub and Docker Cloud. + default: "https://index.docker.io/v1/" + type: "string" + example: "https://index.docker.io/v1/" + RegistryConfig: + $ref: "#/definitions/RegistryServiceConfig" + GenericResources: + $ref: "#/definitions/GenericResources" + HttpProxy: + description: | + HTTP-proxy configured for the daemon. This value is obtained from the + [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "http://xxxxx:xxxxx@proxy.corp.example.com:8080" + HttpsProxy: + description: | + HTTPS-proxy configured for the daemon. This value is obtained from the + [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "https://xxxxx:xxxxx@proxy.corp.example.com:4443" + NoProxy: + description: | + Comma-separated list of domain extensions for which no proxy should be + used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) + environment variable. + + Containers do not automatically inherit this configuration. + type: "string" + example: "*.local, 169.254/16" + Name: + description: "Hostname of the host." + type: "string" + example: "node5.corp.example.com" + Labels: + description: | + User-defined labels (key/value metadata) as set on the daemon. + +


+ + > **Note**: When part of a Swarm, nodes can both have _daemon_ labels, + > set through the daemon configuration, and _node_ labels, set from a + > manager node in the Swarm. Node labels are not included in this + > field. Node labels can be retrieved using the `/nodes/(id)` endpoint + > on a manager node in the Swarm. + type: "array" + items: + type: "string" + example: ["storage=ssd", "production"] + ExperimentalBuild: + description: | + Indicates if experimental features are enabled on the daemon. + type: "boolean" + example: true + ServerVersion: + description: | + Version string of the daemon. + type: "string" + example: "24.0.2" + Runtimes: + description: | + List of [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtimes configured on the daemon. Keys hold the "name" used to + reference the runtime. + + The Docker daemon relies on an OCI compliant runtime (invoked via the + `containerd` daemon) as its interface to the Linux kernel namespaces, + cgroups, and SELinux. + + The default runtime is `runc`, and automatically configured. Additional + runtimes can be configured by the user and will be listed here. + type: "object" + additionalProperties: + $ref: "#/definitions/Runtime" + default: + runc: + path: "runc" + example: + runc: + path: "runc" + runc-master: + path: "/go/bin/runc" + custom: + path: "/usr/local/bin/my-oci-runtime" + runtimeArgs: ["--debug", "--systemd-cgroup=false"] + DefaultRuntime: + description: | + Name of the default OCI runtime that is used when starting containers. + + The default can be overridden per-container at create time. + type: "string" + default: "runc" + example: "runc" + Swarm: + $ref: "#/definitions/SwarmInfo" + LiveRestoreEnabled: + description: | + Indicates if live restore is enabled. + + If enabled, containers are kept running when the daemon is shutdown + or upon daemon start if running containers are detected. + type: "boolean" + default: false + example: false + Isolation: + description: | + Represents the isolation technology to use as a default for containers. + The supported values are platform-specific. + + If no isolation value is specified on daemon start, on Windows client, + the default is `hyperv`, and on Windows server, the default is `process`. + + This option is currently not used on other platforms. + default: "default" + type: "string" + enum: + - "default" + - "hyperv" + - "process" + InitBinary: + description: | + Name and, optional, path of the `docker-init` binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "docker-init" + ContainerdCommit: + $ref: "#/definitions/Commit" + RuncCommit: + $ref: "#/definitions/Commit" + InitCommit: + $ref: "#/definitions/Commit" + SecurityOptions: + description: | + List of security features that are enabled on the daemon, such as + apparmor, seccomp, SELinux, user-namespaces (userns), rootless and + no-new-privileges. + + Additional configuration options for each security feature may + be present, and are included as a comma-separated list of key/value + pairs. + type: "array" + items: + type: "string" + example: + - "name=apparmor" + - "name=seccomp,profile=default" + - "name=selinux" + - "name=userns" + - "name=rootless" + ProductLicense: + description: | + Reports a summary of the product license on the daemon. + + If a commercial license has been applied to the daemon, information + such as number of nodes, and expiration are included. + type: "string" + example: "Community Engine" + DefaultAddressPools: + description: | + List of custom default address pools for local networks, which can be + specified in the daemon.json file or dockerd option. + + Example: a Base "10.10.0.0/16" with Size 24 will define the set of 256 + 10.10.[0-255].0/24 address pools. + type: "array" + items: + type: "object" + properties: + Base: + description: "The network address in CIDR format" + type: "string" + example: "10.10.0.0/16" + Size: + description: "The network pool size" + type: "integer" + example: "24" + Warnings: + description: | + List of warnings / informational messages about missing features, or + issues related to the daemon configuration. + + These messages can be printed by the client as information to the user. + type: "array" + items: + type: "string" + example: + - "WARNING: No memory limit support" + - "WARNING: bridge-nf-call-iptables is disabled" + - "WARNING: bridge-nf-call-ip6tables is disabled" + CDISpecDirs: + description: | + List of directories where (Container Device Interface) CDI + specifications are located. + + These specifications define vendor-specific modifications to an OCI + runtime specification for a container being created. + + An empty list indicates that CDI device injection is disabled. + + Note that since using CDI device injection requires the daemon to have + experimental enabled. For non-experimental daemons an empty list will + always be returned. + type: "array" + items: + type: "string" + example: + - "/etc/cdi" + - "/var/run/cdi" + + # PluginsInfo is a temp struct holding Plugins name + # registered with docker daemon. It is used by Info struct + PluginsInfo: + description: | + Available plugins per type. + +


+ + > **Note**: Only unmanaged (V1) plugins are included in this list. + > V1 plugins are "lazily" loaded, and are not returned in this list + > if there is no resource using the plugin. + type: "object" + properties: + Volume: + description: "Names of available volume-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["local"] + Network: + description: "Names of available network-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"] + Authorization: + description: "Names of available authorization plugins." + type: "array" + items: + type: "string" + example: ["img-authz-plugin", "hbm"] + Log: + description: "Names of available logging-drivers, and logging-driver plugins." + type: "array" + items: + type: "string" + example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "splunk", "syslog"] + + + RegistryServiceConfig: + description: | + RegistryServiceConfig stores daemon registry services configuration. + type: "object" + x-nullable: true + properties: + AllowNondistributableArtifactsCIDRs: + description: | + List of IP ranges to which nondistributable artifacts can be pushed, + using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). + + Some images (for example, Windows base images) contain artifacts + whose distribution is restricted by license. When these images are + pushed to a registry, restricted artifacts are not included. + + This configuration override this behavior, and enables the daemon to + push nondistributable artifacts to all registries whose resolved IP + address is within the subnet described by the CIDR syntax. + + This option is useful when pushing images containing + nondistributable artifacts to a registry on an air-gapped network so + hosts on that network can pull the images without connecting to + another server. + + > **Warning**: Nondistributable artifacts typically have restrictions + > on how and where they can be distributed and shared. Only use this + > feature to push artifacts to private registries and ensure that you + > are in compliance with any terms that cover redistributing + > nondistributable artifacts. + + type: "array" + items: + type: "string" + example: ["::1/128", "127.0.0.0/8"] + AllowNondistributableArtifactsHostnames: + description: | + List of registry hostnames to which nondistributable artifacts can be + pushed, using the format `[:]` or `[:]`. + + Some images (for example, Windows base images) contain artifacts + whose distribution is restricted by license. When these images are + pushed to a registry, restricted artifacts are not included. + + This configuration override this behavior for the specified + registries. + + This option is useful when pushing images containing + nondistributable artifacts to a registry on an air-gapped network so + hosts on that network can pull the images without connecting to + another server. + + > **Warning**: Nondistributable artifacts typically have restrictions + > on how and where they can be distributed and shared. Only use this + > feature to push artifacts to private registries and ensure that you + > are in compliance with any terms that cover redistributing + > nondistributable artifacts. + type: "array" + items: + type: "string" + example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] + InsecureRegistryCIDRs: + description: | + List of IP ranges of insecure registries, using the CIDR syntax + ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries + accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates + from unknown CAs) communication. + + By default, local registries (`127.0.0.0/8`) are configured as + insecure. All other registries are secure. Communicating with an + insecure registry is not possible if the daemon assumes that registry + is secure. + + This configuration override this behavior, insecure communication with + registries whose resolved IP address is within the subnet described by + the CIDR syntax. + + Registries can also be marked insecure by hostname. Those registries + are listed under `IndexConfigs` and have their `Secure` field set to + `false`. + + > **Warning**: Using this option can be useful when running a local + > registry, but introduces security vulnerabilities. This option + > should therefore ONLY be used for testing purposes. For increased + > security, users should add their CA to their system's list of trusted + > CAs instead of enabling this option. + type: "array" + items: + type: "string" + example: ["::1/128", "127.0.0.0/8"] + IndexConfigs: + type: "object" + additionalProperties: + $ref: "#/definitions/IndexInfo" + example: + "127.0.0.1:5000": + "Name": "127.0.0.1:5000" + "Mirrors": [] + "Secure": false + "Official": false + "[2001:db8:a0b:12f0::1]:80": + "Name": "[2001:db8:a0b:12f0::1]:80" + "Mirrors": [] + "Secure": false + "Official": false + "docker.io": + Name: "docker.io" + Mirrors: ["https://hub-mirror.corp.example.com:5000/"] + Secure: true + Official: true + "registry.internal.corp.example.com:3000": + Name: "registry.internal.corp.example.com:3000" + Mirrors: [] + Secure: false + Official: false + Mirrors: + description: | + List of registry URLs that act as a mirror for the official + (`docker.io`) registry. + + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://[2001:db8:a0b:12f0::1]/" + + IndexInfo: + description: + IndexInfo contains information about a registry. + type: "object" + x-nullable: true + properties: + Name: + description: | + Name of the registry, such as "docker.io". + type: "string" + example: "docker.io" + Mirrors: + description: | + List of mirrors, expressed as URIs. + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://registry-2.docker.io/" + - "https://registry-3.docker.io/" + Secure: + description: | + Indicates if the registry is part of the list of insecure + registries. + + If `false`, the registry is insecure. Insecure registries accept + un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from + unknown CAs) communication. + + > **Warning**: Insecure registries can be useful when running a local + > registry. However, because its use creates security vulnerabilities + > it should ONLY be enabled for testing purposes. For increased + > security, users should add their CA to their system's list of + > trusted CAs instead of enabling this option. + type: "boolean" + example: true + Official: + description: | + Indicates whether this is an official registry (i.e., Docker Hub / docker.io) + type: "boolean" + example: true + + Runtime: + description: | + Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtime. + + The runtime is invoked by the daemon via the `containerd` daemon. OCI + runtimes act as an interface to the Linux kernel namespaces, cgroups, + and SELinux. + type: "object" + properties: + path: + description: | + Name and, optional, path, of the OCI executable binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "/usr/local/bin/my-oci-runtime" + runtimeArgs: + description: | + List of command-line arguments to pass to the runtime when invoked. + type: "array" + x-nullable: true + items: + type: "string" + example: ["--debug", "--systemd-cgroup=false"] + status: + description: | + Information specific to the runtime. + + While this API specification does not define data provided by runtimes, + the following well-known properties may be provided by runtimes: + + `org.opencontainers.runtime-spec.features`: features structure as defined + in the [OCI Runtime Specification](https://github.com/opencontainers/runtime-spec/blob/main/features.md), + in a JSON string representation. + +


+ + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + "org.opencontainers.runtime-spec.features": "{\"ociVersionMin\":\"1.0.0\",\"ociVersionMax\":\"1.1.0\",\"...\":\"...\"}" + + Commit: + description: | + Commit holds the Git-commit (SHA1) that a binary was built from, as + reported in the version-string of external tools, such as `containerd`, + or `runC`. + type: "object" + properties: + ID: + description: "Actual commit ID of external tool." + type: "string" + example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" + Expected: + description: | + Commit ID of external tool expected by dockerd as set at build time. + type: "string" + example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4" + + SwarmInfo: + description: | + Represents generic information about swarm. + type: "object" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + default: "" + example: "k67qz4598weg5unwwffg6z1m1" + NodeAddr: + description: | + IP address at which this node can be reached by other nodes in the + swarm. + type: "string" + default: "" + example: "10.0.0.46" + LocalNodeState: + $ref: "#/definitions/LocalNodeState" + ControlAvailable: + type: "boolean" + default: false + example: true + Error: + type: "string" + default: "" + RemoteManagers: + description: | + List of ID's and addresses of other managers in the swarm. + type: "array" + default: null + x-nullable: true + items: + $ref: "#/definitions/PeerNode" + example: + - NodeID: "71izy0goik036k48jg985xnds" + Addr: "10.0.0.158:2377" + - NodeID: "79y6h1o4gv8n120drcprv5nmc" + Addr: "10.0.0.159:2377" + - NodeID: "k67qz4598weg5unwwffg6z1m1" + Addr: "10.0.0.46:2377" + Nodes: + description: "Total number of nodes in the swarm." + type: "integer" + x-nullable: true + example: 4 + Managers: + description: "Total number of managers in the swarm." + type: "integer" + x-nullable: true + example: 3 + Cluster: + $ref: "#/definitions/ClusterInfo" + + LocalNodeState: + description: "Current local status of this node." + type: "string" + default: "" + enum: + - "" + - "inactive" + - "pending" + - "active" + - "error" + - "locked" + example: "active" + + PeerNode: + description: "Represents a peer-node in the swarm" + type: "object" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + Addr: + description: | + IP address and ports at which this node can be reached. + type: "string" + + NetworkAttachmentConfig: + description: | + Specifies how a service should be attached to a particular network. + type: "object" + properties: + Target: + description: | + The target network for attachment. Must be a network name or ID. + type: "string" + Aliases: + description: | + Discoverable alternate names for the service on this network. + type: "array" + items: + type: "string" + DriverOpts: + description: | + Driver attachment options for the network target. + type: "object" + additionalProperties: + type: "string" + + EventActor: + description: | + Actor describes something that generates events, like a container, network, + or a volume. + type: "object" + properties: + ID: + description: "The ID of the object emitting the event" + type: "string" + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Attributes: + description: | + Various key/value attributes of the object, depending on its type. + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-label-value" + image: "alpine:latest" + name: "my-container" + + EventMessage: + description: | + EventMessage represents the information an event contains. + type: "object" + title: "SystemEventsResponse" + properties: + Type: + description: "The type of object emitting the event" + type: "string" + enum: ["builder", "config", "container", "daemon", "image", "network", "node", "plugin", "secret", "service", "volume"] + example: "container" + Action: + description: "The type of event" + type: "string" + example: "create" + Actor: + $ref: "#/definitions/EventActor" + scope: + description: | + Scope of the event. Engine events are `local` scope. Cluster (Swarm) + events are `swarm` scope. + type: "string" + enum: ["local", "swarm"] + time: + description: "Timestamp of event" + type: "integer" + format: "int64" + example: 1629574695 + timeNano: + description: "Timestamp of event, with nanosecond accuracy" + type: "integer" + format: "int64" + example: 1629574695515050031 + + OCIDescriptor: + type: "object" + x-go-name: Descriptor + description: | + A descriptor struct containing digest, media type, and size, as defined in + the [OCI Content Descriptors Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/descriptor.md). + properties: + mediaType: + description: | + The media type of the object this schema refers to. + type: "string" + example: "application/vnd.docker.distribution.manifest.v2+json" + digest: + description: | + The digest of the targeted content. + type: "string" + example: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" + size: + description: | + The size in bytes of the blob. + type: "integer" + format: "int64" + example: 3987495 + # TODO Not yet including these fields for now, as they are nil / omitted in our response. + # urls: + # description: | + # List of URLs from which this object MAY be downloaded. + # type: "array" + # items: + # type: "string" + # format: "uri" + # annotations: + # description: | + # Arbitrary metadata relating to the targeted content. + # type: "object" + # additionalProperties: + # type: "string" + # platform: + # $ref: "#/definitions/OCIPlatform" + + OCIPlatform: + type: "object" + x-go-name: Platform + description: | + Describes the platform which the image in the manifest runs on, as defined + in the [OCI Image Index Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md). + properties: + architecture: + description: | + The CPU architecture, for example `amd64` or `ppc64`. + type: "string" + example: "arm" + os: + description: | + The operating system, for example `linux` or `windows`. + type: "string" + example: "windows" + os.version: + description: | + Optional field specifying the operating system version, for example on + Windows `10.0.19041.1165`. + type: "string" + example: "10.0.19041.1165" + os.features: + description: | + Optional field specifying an array of strings, each listing a required + OS feature (for example on Windows `win32k`). + type: "array" + items: + type: "string" + example: + - "win32k" + variant: + description: | + Optional field specifying a variant of the CPU, for example `v7` to + specify ARMv7 when architecture is `arm`. + type: "string" + example: "v7" + + DistributionInspect: + type: "object" + x-go-name: DistributionInspect + title: "DistributionInspectResponse" + required: [Descriptor, Platforms] + description: | + Describes the result obtained from contacting the registry to retrieve + image metadata. + properties: + Descriptor: + $ref: "#/definitions/OCIDescriptor" + Platforms: + type: "array" + description: | + An array containing all platforms supported by the image. + items: + $ref: "#/definitions/OCIPlatform" + + ClusterVolume: + type: "object" + description: | + Options and information specific to, and only present on, Swarm CSI + cluster volumes. + properties: + ID: + type: "string" + description: | + The Swarm ID of this volume. Because cluster volumes are Swarm + objects, they have an ID, unlike non-cluster volumes. This ID can + be used to refer to the Volume instead of the name. + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + Info: + type: "object" + description: | + Information about the global status of the volume. + properties: + CapacityBytes: + type: "integer" + format: "int64" + description: | + The capacity of the volume in bytes. A value of 0 indicates that + the capacity is unknown. + VolumeContext: + type: "object" + description: | + A map of strings to strings returned from the storage plugin when + the volume is created. + additionalProperties: + type: "string" + VolumeID: + type: "string" + description: | + The ID of the volume as returned by the CSI storage plugin. This + is distinct from the volume's ID as provided by Docker. This ID + is never used by the user when communicating with Docker to refer + to this volume. If the ID is blank, then the Volume has not been + successfully created in the plugin yet. + AccessibleTopology: + type: "array" + description: | + The topology this volume is actually accessible from. + items: + $ref: "#/definitions/Topology" + PublishStatus: + type: "array" + description: | + The status of the volume as it pertains to its publishing and use on + specific nodes + items: + type: "object" + properties: + NodeID: + type: "string" + description: | + The ID of the Swarm node the volume is published on. + State: + type: "string" + description: | + The published state of the volume. + * `pending-publish` The volume should be published to this node, but the call to the controller plugin to do so has not yet been successfully completed. + * `published` The volume is published successfully to the node. + * `pending-node-unpublish` The volume should be unpublished from the node, and the manager is awaiting confirmation from the worker that it has done so. + * `pending-controller-unpublish` The volume is successfully unpublished from the node, but has not yet been successfully unpublished on the controller. + enum: + - "pending-publish" + - "published" + - "pending-node-unpublish" + - "pending-controller-unpublish" + PublishContext: + type: "object" + description: | + A map of strings to strings returned by the CSI controller + plugin when a volume is published. + additionalProperties: + type: "string" + + ClusterVolumeSpec: + type: "object" + description: | + Cluster-specific options used to create the volume. + properties: + Group: + type: "string" + description: | + Group defines the volume group of this volume. Volumes belonging to + the same group can be referred to by group name when creating + Services. Referring to a volume by group instructs Swarm to treat + volumes in that group interchangeably for the purpose of scheduling. + Volumes with an empty string for a group technically all belong to + the same, emptystring group. + AccessMode: + type: "object" + description: | + Defines how the volume is used by tasks. + properties: + Scope: + type: "string" + description: | + The set of nodes this volume can be used on at one time. + - `single` The volume may only be scheduled to one node at a time. + - `multi` the volume may be scheduled to any supported number of nodes at a time. + default: "single" + enum: ["single", "multi"] + x-nullable: false + Sharing: + type: "string" + description: | + The number and way that different tasks can use this volume + at one time. + - `none` The volume may only be used by one task at a time. + - `readonly` The volume may be used by any number of tasks, but they all must mount the volume as readonly + - `onewriter` The volume may be used by any number of tasks, but only one may mount it as read/write. + - `all` The volume may have any number of readers and writers. + default: "none" + enum: ["none", "readonly", "onewriter", "all"] + x-nullable: false + MountVolume: + type: "object" + description: | + Options for using this volume as a Mount-type volume. + + Either MountVolume or BlockVolume, but not both, must be + present. + properties: + FsType: + type: "string" + description: | + Specifies the filesystem type for the mount volume. + Optional. + MountFlags: + type: "array" + description: | + Flags to pass when mounting the volume. Optional. + items: + type: "string" + BlockVolume: + type: "object" + description: | + Options for using this volume as a Block-type volume. + Intentionally empty. + Secrets: + type: "array" + description: | + Swarm Secrets that are passed to the CSI storage plugin when + operating on this volume. + items: + type: "object" + description: | + One cluster volume secret entry. Defines a key-value pair that + is passed to the plugin. + properties: + Key: + type: "string" + description: | + Key is the name of the key of the key-value pair passed to + the plugin. + Secret: + type: "string" + description: | + Secret is the swarm Secret object from which to read data. + This can be a Secret name or ID. The Secret data is + retrieved by swarm and used as the value of the key-value + pair passed to the plugin. + AccessibilityRequirements: + type: "object" + description: | + Requirements for the accessible topology of the volume. These + fields are optional. For an in-depth description of what these + fields mean, see the CSI specification. + properties: + Requisite: + type: "array" + description: | + A list of required topologies, at least one of which the + volume must be accessible from. + items: + $ref: "#/definitions/Topology" + Preferred: + type: "array" + description: | + A list of topologies that the volume should attempt to be + provisioned in. + items: + $ref: "#/definitions/Topology" + CapacityRange: + type: "object" + description: | + The desired capacity that the volume should be created with. If + empty, the plugin will decide the capacity. + properties: + RequiredBytes: + type: "integer" + format: "int64" + description: | + The volume must be at least this big. The value of 0 + indicates an unspecified minimum + LimitBytes: + type: "integer" + format: "int64" + description: | + The volume must not be bigger than this. The value of 0 + indicates an unspecified maximum. + Availability: + type: "string" + description: | + The availability of the volume for use in tasks. + - `active` The volume is fully available for scheduling on the cluster + - `pause` No new workloads should use the volume, but existing workloads are not stopped. + - `drain` All workloads using this volume should be stopped and rescheduled, and no new ones should be started. + default: "active" + x-nullable: false + enum: + - "active" + - "pause" + - "drain" + + Topology: + description: | + A map of topological domains to topological segments. For in depth + details, see documentation for the Topology object in the CSI + specification. + type: "object" + additionalProperties: + type: "string" + +paths: + /containers/json: + get: + summary: "List containers" + description: | + Returns a list of containers. For details on the format, see the + [inspect endpoint](#operation/ContainerInspect). + + Note that it uses a different, smaller representation of a container + than inspecting a single container. For example, the list of linked + containers is not propagated . + operationId: "ContainerList" + produces: + - "application/json" + parameters: + - name: "all" + in: "query" + description: | + Return all containers. By default, only running containers are shown. + type: "boolean" + default: false + - name: "limit" + in: "query" + description: | + Return this number of most recently created containers, including + non-running ones. + type: "integer" + - name: "size" + in: "query" + description: | + Return the size of container as fields `SizeRw` and `SizeRootFs`. + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + Filters to process on the container list, encoded as JSON (a + `map[string][]string`). For example, `{"status": ["paused"]}` will + only return paused containers. + + Available filters: + + - `ancestor`=(`[:]`, ``, or ``) + - `before`=(`` or ``) + - `expose`=(`[/]`|`/[]`) + - `exited=` containers with exit code of `` + - `health`=(`starting`|`healthy`|`unhealthy`|`none`) + - `id=` a container's ID + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `is-task=`(`true`|`false`) + - `label=key` or `label="key=value"` of a container label + - `name=` a container's name + - `network`=(`` or ``) + - `publish`=(`[/]`|`/[]`) + - `since`=(`` or ``) + - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) + - `volume`=(`` or ``) + type: "string" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + examples: + application/json: + - Id: "8dfafdbc3a40" + Names: + - "/boring_feynman" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 1" + Created: 1367854155 + State: "Exited" + Status: "Exit 0" + Ports: + - PrivatePort: 2222 + PublicPort: 3333 + Type: "tcp" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:02" + Mounts: + - Name: "fac362...80535" + Source: "/data" + Destination: "/data" + Driver: "local" + Mode: "ro,Z" + RW: false + Propagation: "" + - Id: "9cd87474be90" + Names: + - "/coolName" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 222222" + Created: 1367854155 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.8" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:08" + Mounts: [] + - Id: "3176a2479c92" + Names: + - "/sleepy_dog" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 3333333333333333" + Created: 1367854154 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.6" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:06" + Mounts: [] + - Id: "4cb07b47f9fb" + Names: + - "/running_cat" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 444444444444444444444444444444444" + Created: 1367854152 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.5" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:05" + Mounts: [] + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/create: + post: + summary: "Create a container" + operationId: "ContainerCreate" + consumes: + - "application/json" + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "name" + in: "query" + description: | + Assign the specified name to the container. Must match + `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`. + type: "string" + pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" + - name: "platform" + in: "query" + description: | + Platform in the format `os[/arch[/variant]]` used for image lookup. + + When specified, the daemon checks if the requested image is present + in the local image cache with the given OS and Architecture, and + otherwise returns a `404` status. + + If the option is not set, the host's native OS and Architecture are + used to look up the image in the image cache. However, if no platform + is passed and the given image does exist in the local image cache, + but its OS or architecture does not match, the container is created + with the available image, and a warning is added to the `Warnings` + field in the response, for example; + + WARNING: The requested image's platform (linux/arm64/v8) does not + match the detected host platform (linux/amd64) and no + specific platform was requested + + type: "string" + default: "" + - name: "body" + in: "body" + description: "Container to create" + schema: + allOf: + - $ref: "#/definitions/ContainerConfig" + - type: "object" + properties: + HostConfig: + $ref: "#/definitions/HostConfig" + NetworkingConfig: + $ref: "#/definitions/NetworkingConfig" + example: + Hostname: "" + Domainname: "" + User: "" + AttachStdin: false + AttachStdout: true + AttachStderr: true + Tty: false + OpenStdin: false + StdinOnce: false + Env: + - "FOO=bar" + - "BAZ=quux" + Cmd: + - "date" + Entrypoint: "" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + Volumes: + /volumes/data: {} + WorkingDir: "" + NetworkDisabled: false + MacAddress: "12:34:56:78:9a:bc" + ExposedPorts: + 22/tcp: {} + StopSignal: "SIGTERM" + StopTimeout: 10 + HostConfig: + Binds: + - "/tmp:/tmp" + Links: + - "redis3:redis" + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + NanoCpus: 500000 + CpuPercent: 80 + CpuShares: 512 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpuQuota: 50000 + CpusetCpus: "0,1" + CpusetMems: "0,1" + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 300 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceWriteIOps: + - {} + DeviceRequests: + - Driver: "nvidia" + Count: -1 + DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] + Capabilities: [["gpu", "nvidia", "compute"]] + Options: + property1: "string" + property2: "string" + MemorySwappiness: 60 + OomKillDisable: false + OomScoreAdj: 500 + PidMode: "" + PidsLimit: 0 + PortBindings: + 22/tcp: + - HostPort: "11022" + PublishAllPorts: false + Privileged: false + ReadonlyRootfs: false + Dns: + - "8.8.8.8" + DnsOptions: + - "" + DnsSearch: + - "" + VolumesFrom: + - "parent" + - "other:ro" + CapAdd: + - "NET_ADMIN" + CapDrop: + - "MKNOD" + GroupAdd: + - "newgroup" + RestartPolicy: + Name: "" + MaximumRetryCount: 0 + AutoRemove: true + NetworkMode: "bridge" + Devices: [] + Ulimits: + - {} + LogConfig: + Type: "json-file" + Config: {} + SecurityOpt: [] + StorageOpt: {} + CgroupParent: "" + VolumeDriver: "" + ShmSize: 67108864 + NetworkingConfig: + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + database_nw: {} + + required: true + responses: + 201: + description: "Container created successfully" + schema: + $ref: "#/definitions/ContainerCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/{id}/json: + get: + summary: "Inspect a container" + description: "Return low-level information about a container." + operationId: "ContainerInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "ContainerInspectResponse" + properties: + Id: + description: "The ID of the container" + type: "string" + Created: + description: "The time the container was created" + type: "string" + Path: + description: "The path to the command being run" + type: "string" + Args: + description: "The arguments to the command being run" + type: "array" + items: + type: "string" + State: + $ref: "#/definitions/ContainerState" + Image: + description: "The container's image ID" + type: "string" + ResolvConfPath: + type: "string" + HostnamePath: + type: "string" + HostsPath: + type: "string" + LogPath: + type: "string" + Name: + type: "string" + RestartCount: + type: "integer" + Driver: + type: "string" + Platform: + type: "string" + MountLabel: + type: "string" + ProcessLabel: + type: "string" + AppArmorProfile: + type: "string" + ExecIDs: + description: "IDs of exec instances that are running in the container." + type: "array" + items: + type: "string" + x-nullable: true + HostConfig: + $ref: "#/definitions/HostConfig" + GraphDriver: + $ref: "#/definitions/GraphDriverData" + SizeRw: + description: | + The size of files that have been created or changed by this + container. + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container." + type: "integer" + format: "int64" + Mounts: + type: "array" + items: + $ref: "#/definitions/MountPoint" + Config: + $ref: "#/definitions/ContainerConfig" + NetworkSettings: + $ref: "#/definitions/NetworkSettings" + examples: + application/json: + AppArmorProfile: "" + Args: + - "-c" + - "exit 9" + Config: + AttachStderr: true + AttachStdin: false + AttachStdout: true + Cmd: + - "/bin/sh" + - "-c" + - "exit 9" + Domainname: "" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Healthcheck: + Test: ["CMD-SHELL", "exit 0"] + Hostname: "ba033ac44011" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + MacAddress: "" + NetworkDisabled: false + OpenStdin: false + StdinOnce: false + Tty: false + User: "" + Volumes: + /volumes/data: {} + WorkingDir: "" + StopSignal: "SIGTERM" + StopTimeout: 10 + Created: "2015-01-06T15:47:31.485331387Z" + Driver: "overlay2" + ExecIDs: + - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" + - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" + HostConfig: + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 0 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteIOps: + - {} + ContainerIDFile: "" + CpusetCpus: "" + CpusetMems: "" + CpuPercent: 80 + CpuShares: 0 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + Devices: [] + DeviceRequests: + - Driver: "nvidia" + Count: -1 + DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] + Capabilities: [["gpu", "nvidia", "compute"]] + Options: + property1: "string" + property2: "string" + IpcMode: "" + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + OomKillDisable: false + OomScoreAdj: 500 + NetworkMode: "bridge" + PidMode: "" + PortBindings: {} + Privileged: false + ReadonlyRootfs: false + PublishAllPorts: false + RestartPolicy: + MaximumRetryCount: 2 + Name: "on-failure" + LogConfig: + Type: "json-file" + Sysctls: + net.ipv4.ip_forward: "1" + Ulimits: + - {} + VolumeDriver: "" + ShmSize: 67108864 + HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname" + HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts" + LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log" + Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39" + Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2" + MountLabel: "" + Name: "/boring_euclid" + NetworkSettings: + Bridge: "" + SandboxID: "" + HairpinMode: false + LinkLocalIPv6Address: "" + LinkLocalIPv6PrefixLen: 0 + SandboxKey: "" + EndpointID: "" + Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + IPAddress: "" + IPPrefixLen: 0 + IPv6Gateway: "" + MacAddress: "" + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Path: "/bin/sh" + ProcessLabel: "" + ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf" + RestartCount: 1 + State: + Error: "" + ExitCode: 9 + FinishedAt: "2015-01-06T15:47:32.080254511Z" + Health: + Status: "healthy" + FailingStreak: 0 + Log: + - Start: "2019-12-22T10:59:05.6385933Z" + End: "2019-12-22T10:59:05.8078452Z" + ExitCode: 0 + Output: "" + OOMKilled: false + Dead: false + Paused: false + Pid: 0 + Restarting: false + Running: true + StartedAt: "2015-01-06T15:47:32.072697474Z" + Status: "running" + Mounts: + - Name: "fac362...80535" + Source: "/data" + Destination: "/data" + Driver: "local" + Mode: "ro,Z" + RW: false + Propagation: "" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "size" + in: "query" + type: "boolean" + default: false + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" + tags: ["Container"] + /containers/{id}/top: + get: + summary: "List processes running inside a container" + description: | + On Unix systems, this is done by running the `ps` command. This endpoint + is not supported on Windows. + operationId: "ContainerTop" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "ContainerTopResponse" + description: "OK response to ContainerTop operation" + properties: + Titles: + description: "The ps column titles" + type: "array" + items: + type: "string" + Processes: + description: | + Each process running in the container, where each is process + is an array of values corresponding to the titles. + type: "array" + items: + type: "array" + items: + type: "string" + examples: + application/json: + Titles: + - "UID" + - "PID" + - "PPID" + - "C" + - "STIME" + - "TTY" + - "TIME" + - "CMD" + Processes: + - + - "root" + - "13642" + - "882" + - "0" + - "17:03" + - "pts/0" + - "00:00:00" + - "/bin/bash" + - + - "root" + - "13735" + - "13642" + - "0" + - "17:06" + - "pts/0" + - "00:00:00" + - "sleep 10" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "ps_args" + in: "query" + description: "The arguments to pass to `ps`. For example, `aux`" + type: "string" + default: "-ef" + tags: ["Container"] + /containers/{id}/logs: + get: + summary: "Get container logs" + description: | + Get `stdout` and `stderr` logs from a container. + + Note: This endpoint works only for containers with the `json-file` or + `journald` logging driver. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + operationId: "ContainerLogs" + responses: + 200: + description: | + logs returned as a stream in response body. + For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + Note that unlike the attach endpoint, the logs endpoint does not + upgrade the connection and does not set Content-Type. + schema: + type: "string" + format: "binary" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "until" + in: "query" + description: "Only return logs before this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Container"] + /containers/{id}/changes: + get: + summary: "Get changes on a container’s filesystem" + description: | + Returns which files in a container's filesystem have been added, deleted, + or modified. The `Kind` of modification can be one of: + + - `0`: Modified ("C") + - `1`: Added ("A") + - `2`: Deleted ("D") + operationId: "ContainerChanges" + produces: ["application/json"] + responses: + 200: + description: "The list of changes" + schema: + type: "array" + items: + $ref: "#/definitions/FilesystemChange" + examples: + application/json: + - Path: "/dev" + Kind: 0 + - Path: "/dev/kmsg" + Kind: 1 + - Path: "/test" + Kind: 1 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/export: + get: + summary: "Export a container" + description: "Export the contents of a container as a tarball." + operationId: "ContainerExport" + produces: + - "application/octet-stream" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/stats: + get: + summary: "Get container stats based on resource usage" + description: | + This endpoint returns a live stream of a container’s resource usage + statistics. + + The `precpu_stats` is the CPU statistic of the *previous* read, and is + used to calculate the CPU usage percentage. It is not an exact copy + of the `cpu_stats` field. + + If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is + nil then for compatibility with older daemons the length of the + corresponding `cpu_usage.percpu_usage` array should be used. + + On a cgroup v2 host, the following fields are not set + * `blkio_stats`: all fields other than `io_service_bytes_recursive` + * `cpu_stats`: `cpu_usage.percpu_usage` + * `memory_stats`: `max_usage` and `failcnt` + Also, `memory_stats.stats` fields are incompatible with cgroup v1. + + To calculate the values shown by the `stats` command of the docker cli tool + the following formulas can be used: + * used_memory = `memory_stats.usage - memory_stats.stats.cache` + * available_memory = `memory_stats.limit` + * Memory usage % = `(used_memory / available_memory) * 100.0` + * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` + * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` + * number_cpus = `lenght(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` + * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` + operationId: "ContainerStats" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + type: "object" + examples: + application/json: + read: "2015-01-08T22:57:31.547920715Z" + pids_stats: + current: 3 + networks: + eth0: + rx_bytes: 5338 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 36 + tx_bytes: 648 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 8 + eth5: + rx_bytes: 4641 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 26 + tx_bytes: 690 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 9 + memory_stats: + stats: + total_pgmajfault: 0 + cache: 0 + mapped_file: 0 + total_inactive_file: 0 + pgpgout: 414 + rss: 6537216 + total_mapped_file: 0 + writeback: 0 + unevictable: 0 + pgpgin: 477 + total_unevictable: 0 + pgmajfault: 0 + total_rss: 6537216 + total_rss_huge: 6291456 + total_writeback: 0 + total_inactive_anon: 0 + rss_huge: 6291456 + hierarchical_memory_limit: 67108864 + total_pgfault: 964 + total_active_file: 0 + active_anon: 6537216 + total_active_anon: 6537216 + total_pgpgout: 414 + total_cache: 0 + inactive_anon: 0 + active_file: 0 + pgfault: 964 + inactive_file: 0 + total_pgpgin: 477 + max_usage: 6651904 + usage: 6537216 + failcnt: 0 + limit: 67108864 + blkio_stats: {} + cpu_stats: + cpu_usage: + percpu_usage: + - 8646879 + - 24472255 + - 36438778 + - 30657443 + usage_in_usermode: 50000000 + total_usage: 100215355 + usage_in_kernelmode: 30000000 + system_cpu_usage: 739306590000000 + online_cpus: 4 + throttling_data: + periods: 0 + throttled_periods: 0 + throttled_time: 0 + precpu_stats: + cpu_usage: + percpu_usage: + - 8646879 + - 24350896 + - 36438778 + - 30657443 + usage_in_usermode: 50000000 + total_usage: 100093996 + usage_in_kernelmode: 30000000 + system_cpu_usage: 9492140000000 + online_cpus: 4 + throttling_data: + periods: 0 + throttled_periods: 0 + throttled_time: 0 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "stream" + in: "query" + description: | + Stream the output. If false, the stats will be output once and then + it will disconnect. + type: "boolean" + default: true + - name: "one-shot" + in: "query" + description: | + Only get a single stat instead of waiting for 2 cycles. Must be used + with `stream=false`. + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/resize: + post: + summary: "Resize a container TTY" + description: "Resize the TTY for a container." + operationId: "ContainerResize" + consumes: + - "application/octet-stream" + produces: + - "text/plain" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "cannot resize container" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "h" + in: "query" + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Container"] + /containers/{id}/start: + post: + summary: "Start a container" + operationId: "ContainerStart" + responses: + 204: + description: "no error" + 304: + description: "container already started" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container. Format is a + single character `[a-Z]` or `ctrl-` where `` is one + of: `a-z`, `@`, `^`, `[`, `,` or `_`. + type: "string" + tags: ["Container"] + /containers/{id}/stop: + post: + summary: "Stop a container" + operationId: "ContainerStop" + responses: + 204: + description: "no error" + 304: + description: "container already stopped" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/restart: + post: + summary: "Restart a container" + operationId: "ContainerRestart" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/kill: + post: + summary: "Kill a container" + description: | + Send a POSIX signal to a container, defaulting to killing to the + container. + operationId: "ContainerKill" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is not running" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + default: "SIGKILL" + tags: ["Container"] + /containers/{id}/update: + post: + summary: "Update a container" + description: | + Change various configuration options of a container without having to + recreate it. + operationId: "ContainerUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "The container has been updated." + schema: + type: "object" + title: "ContainerUpdateResponse" + description: "OK response to ContainerUpdate operation" + properties: + Warnings: + type: "array" + items: + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "update" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + example: + BlkioWeight: 300 + CpuShares: 512 + CpuPeriod: 100000 + CpuQuota: 50000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpusetCpus: "0,1" + CpusetMems: "0" + Memory: 314572800 + MemorySwap: 514288000 + MemoryReservation: 209715200 + RestartPolicy: + MaximumRetryCount: 4 + Name: "on-failure" + tags: ["Container"] + /containers/{id}/rename: + post: + summary: "Rename a container" + operationId: "ContainerRename" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "name already in use" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "name" + in: "query" + required: true + description: "New name for the container" + type: "string" + tags: ["Container"] + /containers/{id}/pause: + post: + summary: "Pause a container" + description: | + Use the freezer cgroup to suspend all processes in a container. + + Traditionally, when suspending a process the `SIGSTOP` signal is used, + which is observable by the process being suspended. With the freezer + cgroup the process is unaware, and unable to capture, that it is being + suspended, and subsequently resumed. + operationId: "ContainerPause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/unpause: + post: + summary: "Unpause a container" + description: "Resume a container which has been paused." + operationId: "ContainerUnpause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/attach: + post: + summary: "Attach to a container" + description: | + Attach to a container to read its output or send it input. You can attach + to the same container multiple times and you can reattach to containers + that have been detached. + + Either the `stream` or `logs` parameter must be `true` for this endpoint + to do anything. + + See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) + for more details. + + ### Hijacking + + This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, + and `stderr` on the same socket. + + This is the response from the daemon for an attach request: + + ``` + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + [STREAM] + ``` + + After the headers and two new lines, the TCP connection can now be used + for raw, bidirectional communication between the client and server. + + To hint potential proxies about connection hijacking, the Docker client + can also optionally send connection upgrade headers. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 + Upgrade: tcp + Connection: Upgrade + ``` + + The Docker daemon will respond with a `101 UPGRADED` response, and will + similarly follow with the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + [STREAM] + ``` + + ### Stream format + + When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), + the HTTP Content-Type header is set to application/vnd.docker.multiplexed-stream + and the stream over the hijacked connected is multiplexed to separate out + `stdout` and `stderr`. The stream consists of a series of frames, each + containing a header and a payload. + + The header contains the information which the stream writes (`stdout` or + `stderr`). It also contains the size of the associated frame encoded in + the last four bytes (`uint32`). + + It is encoded on the first eight bytes like this: + + ```go + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + ``` + + `STREAM_TYPE` can be: + + - 0: `stdin` (is written on `stdout`) + - 1: `stdout` + - 2: `stderr` + + `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size + encoded as big endian. + + Following the header is the payload, which is the specified number of + bytes of `STREAM_TYPE`. + + The simplest way to implement this protocol is the following: + + 1. Read 8 bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + + ### Stream format when using a TTY + + When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), + the stream is not multiplexed. The data exchanged over the hijacked + connection is simply the raw data from the process PTY and client's + `stdin`. + + operationId: "ContainerAttach" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, + `@`, `^`, `[`, `,` or `_`. + type: "string" + - name: "logs" + in: "query" + description: | + Replay previous logs from the container. + + This is useful for attaching to a container that has started and you + want to output everything since the container started. + + If `stream` is also enabled, once all the previous output has been + returned, it will seamlessly transition into streaming current + output. + type: "boolean" + default: false + - name: "stream" + in: "query" + description: | + Stream attached streams from the time the request was made onwards. + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/attach/ws: + get: + summary: "Attach to a container via a websocket" + operationId: "ContainerAttachWebsocket" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, + `@`, `^`, `[`, `,`, or `_`. + type: "string" + - name: "logs" + in: "query" + description: "Return logs" + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Return stream" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/wait: + post: + summary: "Wait for a container" + description: "Block until a container stops, then returns the exit code." + operationId: "ContainerWait" + produces: ["application/json"] + responses: + 200: + description: "The container has exit." + schema: + $ref: "#/definitions/ContainerWaitResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "condition" + in: "query" + description: | + Wait until a container state reaches the given condition. + + Defaults to `not-running` if omitted or empty. + type: "string" + enum: + - "not-running" + - "next-exit" + - "removed" + default: "not-running" + tags: ["Container"] + /containers/{id}: + delete: + summary: "Remove a container" + operationId: "ContainerDelete" + responses: + 204: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: | + You cannot remove a running container: c2ada9df5af8. Stop the + container before attempting removal or force remove + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "v" + in: "query" + description: "Remove anonymous volumes associated with the container." + type: "boolean" + default: false + - name: "force" + in: "query" + description: "If the container is running, kill it before removing it." + type: "boolean" + default: false + - name: "link" + in: "query" + description: "Remove the specified link associated with the container." + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/archive: + head: + summary: "Get information about files in a container" + description: | + A response header `X-Docker-Container-Path-Stat` is returned, containing + a base64 - encoded JSON object with some filesystem header information + about the path. + operationId: "ContainerArchiveInfo" + responses: + 200: + description: "no error" + headers: + X-Docker-Container-Path-Stat: + type: "string" + description: | + A base64 - encoded JSON object with some filesystem header + information about the path + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + get: + summary: "Get an archive of a filesystem resource in a container" + description: "Get a tar archive of a resource in the filesystem of container id." + operationId: "ContainerArchive" + produces: ["application/x-tar"] + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + put: + summary: "Extract an archive of files or folders to a directory in a container" + description: | + Upload a tar archive to be extracted to a path in the filesystem of container id. + `path` parameter is asserted to be a directory. If it exists as a file, 400 error + will be returned with message "not a directory". + operationId: "PutContainerArchive" + consumes: ["application/x-tar", "application/octet-stream"] + responses: + 200: + description: "The content was extracted successfully" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "not a directory" + 403: + description: "Permission denied, the volume or container rootfs is marked as read-only." + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such container or path does not exist inside the container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Path to a directory in the container to extract the archive’s contents into. " + type: "string" + - name: "noOverwriteDirNonDir" + in: "query" + description: | + If `1`, `true`, or `True` then it will be an error if unpacking the + given content would cause an existing directory to be replaced with + a non-directory and vice versa. + type: "string" + - name: "copyUIDGID" + in: "query" + description: | + If `1`, `true`, then it will copy UID/GID maps to the dest file or + dir + type: "string" + - name: "inputStream" + in: "body" + required: true + description: | + The input stream must be a tar archive compressed with one of the + following algorithms: `identity` (no compression), `gzip`, `bzip2`, + or `xz`. + schema: + type: "string" + format: "binary" + tags: ["Container"] + /containers/prune: + post: + summary: "Delete stopped containers" + produces: + - "application/json" + operationId: "ContainerPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=` Prune containers created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune containers with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ContainerPruneResponse" + properties: + ContainersDeleted: + description: "Container IDs that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /images/json: + get: + summary: "List Images" + description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." + operationId: "ImageList" + produces: + - "application/json" + responses: + 200: + description: "Summary image data for the images matching the query" + schema: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "all" + in: "query" + description: "Show all images. Only images from a final layer (no children) are shown by default." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the images list. + + Available filters: + + - `before`=(`[:]`, `` or ``) + - `dangling=true` + - `label=key` or `label="key=value"` of an image label + - `reference`=(`[:]`) + - `since`=(`[:]`, `` or ``) + - `until=` + type: "string" + - name: "shared-size" + in: "query" + description: "Compute and show shared size as a `SharedSize` field on each image." + type: "boolean" + default: false + - name: "digests" + in: "query" + description: "Show digest information as a `RepoDigests` field on each image." + type: "boolean" + default: false + tags: ["Image"] + /build: + post: + summary: "Build an image" + description: | + Build an image from a tar archive with a `Dockerfile` in it. + + The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). + + The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. + + The build is canceled if the client drops the connection by quitting or being killed. + operationId: "ImageBuild" + consumes: + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "inputStream" + in: "body" + description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + format: "binary" + - name: "dockerfile" + in: "query" + description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." + type: "string" + default: "Dockerfile" + - name: "t" + in: "query" + description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." + type: "string" + - name: "extrahosts" + in: "query" + description: "Extra hosts to add to /etc/hosts" + type: "string" + - name: "remote" + in: "query" + description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." + type: "string" + - name: "q" + in: "query" + description: "Suppress verbose build output." + type: "boolean" + default: false + - name: "nocache" + in: "query" + description: "Do not use the cache when building the image." + type: "boolean" + default: false + - name: "cachefrom" + in: "query" + description: "JSON array of images used for build cache resolution." + type: "string" + - name: "pull" + in: "query" + description: "Attempt to pull the image even if an older image exists locally." + type: "string" + - name: "rm" + in: "query" + description: "Remove intermediate containers after a successful build." + type: "boolean" + default: true + - name: "forcerm" + in: "query" + description: "Always remove intermediate containers, even upon failure." + type: "boolean" + default: false + - name: "memory" + in: "query" + description: "Set memory limit for build." + type: "integer" + - name: "memswap" + in: "query" + description: "Total memory (memory + swap). Set as `-1` to disable swap." + type: "integer" + - name: "cpushares" + in: "query" + description: "CPU shares (relative weight)." + type: "integer" + - name: "cpusetcpus" + in: "query" + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." + type: "string" + - name: "cpuperiod" + in: "query" + description: "The length of a CPU period in microseconds." + type: "integer" + - name: "cpuquota" + in: "query" + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + - name: "buildargs" + in: "query" + description: > + JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker + uses the buildargs as the environment context for commands run via the `Dockerfile` RUN + instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for + passing secret values. + + + For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the + query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. + + + [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) + type: "string" + - name: "shmsize" + in: "query" + description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." + type: "integer" + - name: "squash" + in: "query" + description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" + type: "boolean" + - name: "labels" + in: "query" + description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." + type: "string" + - name: "networkmode" + in: "query" + description: | + Sets the networking mode for the run commands during build. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. + Any other value is taken as a custom network's name or ID to which this + container should connect to. + type: "string" + - name: "Content-type" + in: "header" + type: "string" + enum: + - "application/x-tar" + default: "application/x-tar" + - name: "X-Registry-Config" + in: "header" + description: | + This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. + + The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: + + ``` + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + ``` + + Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. + type: "string" + - name: "platform" + in: "query" + description: "Platform in the format os[/arch[/variant]]" + type: "string" + default: "" + - name: "target" + in: "query" + description: "Target build stage" + type: "string" + default: "" + - name: "outputs" + in: "query" + description: "BuildKit output configuration" + type: "string" + default: "" + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /build/prune: + post: + summary: "Delete builder cache" + produces: + - "application/json" + operationId: "BuildPrune" + parameters: + - name: "keep-storage" + in: "query" + description: "Amount of disk space in bytes to keep for cache" + type: "integer" + format: "int64" + - name: "all" + in: "query" + type: "boolean" + description: "Remove all types of build cache" + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the list of build cache objects. + + Available filters: + + - `until=` remove cache older than ``. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon's local time. + - `id=` + - `parent=` + - `type=` + - `description=` + - `inuse` + - `shared` + - `private` + responses: + 200: + description: "No error" + schema: + type: "object" + title: "BuildPruneResponse" + properties: + CachesDeleted: + type: "array" + items: + description: "ID of build cache object" + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /images/create: + post: + summary: "Create an image" + description: "Pull or import an image." + operationId: "ImageCreate" + consumes: + - "text/plain" + - "application/octet-stream" + produces: + - "application/json" + responses: + 200: + description: "no error" + 404: + description: "repository does not exist or no read access" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "fromImage" + in: "query" + description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed." + type: "string" + - name: "fromSrc" + in: "query" + description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." + type: "string" + - name: "repo" + in: "query" + description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." + type: "string" + - name: "tag" + in: "query" + description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." + type: "string" + - name: "message" + in: "query" + description: "Set commit message for imported image." + type: "string" + - name: "inputImage" + in: "body" + description: "Image content if the value `-` has been specified in fromSrc query parameter" + schema: + type: "string" + required: false + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "changes" + in: "query" + description: | + Apply `Dockerfile` instructions to the image that is created, + for example: `changes=ENV DEBUG=true`. + Note that `ENV DEBUG=true` should be URI component encoded. + + Supported `Dockerfile` instructions: + `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + type: "array" + items: + type: "string" + - name: "platform" + in: "query" + description: | + Platform in the format os[/arch[/variant]]. + + When used in combination with the `fromImage` option, the daemon checks + if the given image is present in the local image cache with the given + OS and Architecture, and otherwise attempts to pull the image. If the + option is not set, the host's native OS and Architecture are used. + If the given image does not exist in the local image cache, the daemon + attempts to pull the image with the host's native OS and Architecture. + If the given image does exists in the local image cache, but its OS or + architecture does not match, a warning is produced. + + When used with the `fromSrc` option to import an image from an archive, + this option sets the platform information for the imported image. If + the option is not set, the host's native OS and Architecture are used + for the imported image. + type: "string" + default: "" + tags: ["Image"] + /images/{name}/json: + get: + summary: "Inspect an image" + description: "Return low-level information about an image." + operationId: "ImageInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/ImageInspect" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Image"] + /images/{name}/history: + get: + summary: "Get the history of an image" + description: "Return parent layers of an image." + operationId: "ImageHistory" + produces: ["application/json"] + responses: + 200: + description: "List of image layers" + schema: + type: "array" + items: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false + examples: + application/json: + - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" + Created: 1398108230 + CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" + Tags: + - "ubuntu:lucid" + - "ubuntu:10.04" + Size: 182964289 + Comment: "" + - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" + Created: 1398108222 + CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" + Tags: [] + Size: 0 + Comment: "" + - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" + Created: 1371157430 + CreatedBy: "" + Tags: + - "scratch12:latest" + - "scratch:latest" + Size: 0 + Comment: "Imported from -" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + tags: ["Image"] + /images/{name}/push: + post: + summary: "Push an image" + description: | + Push an image to a registry. + + If you wish to push an image on to a private registry, that image must + already have a tag which references the registry. For example, + `registry.example.com/myimage:latest`. + + The push is cancelled if the HTTP connection is closed. + operationId: "ImagePush" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID." + type: "string" + required: true + - name: "tag" + in: "query" + description: "The tag to associate with the image on the registry." + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + required: true + tags: ["Image"] + /images/{name}/tag: + post: + summary: "Tag an image" + description: "Tag an image so that it becomes part of a repository." + operationId: "ImageTag" + responses: + 201: + description: "No error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID to tag." + type: "string" + required: true + - name: "repo" + in: "query" + description: "The repository to tag in. For example, `someuser/someimage`." + type: "string" + - name: "tag" + in: "query" + description: "The name of the new tag." + type: "string" + tags: ["Image"] + /images/{name}: + delete: + summary: "Remove an image" + description: | + Remove an image, along with any untagged parent images that were + referenced by that image. + + Images can't be removed if they have descendant images, are being + used by a running container or are being used by a build. + operationId: "ImageDelete" + produces: ["application/json"] + responses: + 200: + description: "The image was deleted successfully" + schema: + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + examples: + application/json: + - Untagged: "3e2f21a89f" + - Deleted: "3e2f21a89f" + - Deleted: "53b4f83ac9" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "force" + in: "query" + description: "Remove the image even if it is being used by stopped containers or has other tags" + type: "boolean" + default: false + - name: "noprune" + in: "query" + description: "Do not delete untagged parent images" + type: "boolean" + default: false + tags: ["Image"] + /images/search: + get: + summary: "Search images" + description: "Search for an image on Docker Hub." + operationId: "ImageSearch" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + type: "object" + title: "ImageSearchResponseItem" + properties: + description: + type: "string" + is_official: + type: "boolean" + is_automated: + description: | + Whether this repository has automated builds enabled. + +


+ + > **Deprecated**: This field is deprecated and will always + > be "false" in future. + type: "boolean" + example: false + name: + type: "string" + star_count: + type: "integer" + examples: + application/json: + - description: "A minimal Docker image based on Alpine Linux with a complete package index and only 5 MB in size!" + is_official: true + is_automated: false + name: "alpine" + star_count: 10093 + - description: "Busybox base image." + is_official: true + is_automated: false + name: "Busybox base image." + star_count: 3037 + - description: "The PostgreSQL object-relational database system provides reliability and data integrity." + is_official: true + is_automated: false + name: "postgres" + star_count: 12408 + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "term" + in: "query" + description: "Term to search" + type: "string" + required: true + - name: "limit" + in: "query" + description: "Maximum number of results to return" + type: "integer" + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + + - `is-automated=(true|false)` (deprecated, see below) + - `is-official=(true|false)` + - `stars=` Matches images that has at least 'number' stars. + + The `is-automated` filter is deprecated. The `is_automated` field has + been deprecated by Docker Hub's search API. Consequently, searching + for `is-automated=true` will yield no results. + type: "string" + tags: ["Image"] + /images/prune: + post: + summary: "Delete unused images" + produces: + - "application/json" + operationId: "ImagePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: + + - `dangling=` When set to `true` (or `1`), prune only + unused *and* untagged images. When set to `false` + (or `0`), all unused images are pruned. + - `until=` Prune images created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune images with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ImagePruneResponse" + properties: + ImagesDeleted: + description: "Images that were deleted" + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /auth: + post: + summary: "Check auth configuration" + description: | + Validate credentials for a registry and, if available, get an identity + token for accessing the registry without password. + operationId: "SystemAuth" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "An identity token was generated successfully." + schema: + type: "object" + title: "SystemAuthResponse" + required: [Status] + properties: + Status: + description: "The status of the authentication" + type: "string" + x-nullable: false + IdentityToken: + description: "An opaque token used to authenticate a user after a successful login" + type: "string" + x-nullable: false + examples: + application/json: + Status: "Login Succeeded" + IdentityToken: "9cbaf023786cd7..." + 204: + description: "No error" + 401: + description: "Auth error" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "authConfig" + in: "body" + description: "Authentication to check" + schema: + $ref: "#/definitions/AuthConfig" + tags: ["System"] + /info: + get: + summary: "Get system information" + operationId: "SystemInfo" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/SystemInfo" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /version: + get: + summary: "Get version" + description: "Returns the version of Docker that is running and various information about the system that Docker is running on." + operationId: "SystemVersion" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/SystemVersion" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /_ping: + get: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPing" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "OK" + headers: + API-Version: + type: "string" + description: "Max API Version the server supports" + Builder-Version: + type: "string" + description: | + Default version of docker image builder + + The default on Linux is version "2" (BuildKit), but the daemon + can be configured to recommend version "1" (classic Builder). + Windows does not yet support BuildKit for native Windows images, + and uses "1" (classic builder) as a default. + + This value is a recommendation as advertised by the daemon, and + it is up to the client to choose which builder to use. + default: "2" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + headers: + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + tags: ["System"] + head: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPingHead" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "(empty)" + headers: + API-Version: + type: "string" + description: "Max API Version the server supports" + Builder-Version: + type: "string" + description: "Default version of docker image builder" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /commit: + post: + summary: "Create a new image from a container" + operationId: "ImageCommit" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "containerConfig" + in: "body" + description: "The container configuration" + schema: + $ref: "#/definitions/ContainerConfig" + - name: "container" + in: "query" + description: "The ID or name of the container to commit" + type: "string" + - name: "repo" + in: "query" + description: "Repository name for the created image" + type: "string" + - name: "tag" + in: "query" + description: "Tag name for the create image" + type: "string" + - name: "comment" + in: "query" + description: "Commit message" + type: "string" + - name: "author" + in: "query" + description: "Author of the image (e.g., `John Hannibal Smith `)" + type: "string" + - name: "pause" + in: "query" + description: "Whether to pause the container before committing" + type: "boolean" + default: true + - name: "changes" + in: "query" + description: "`Dockerfile` instructions to apply while committing" + type: "string" + tags: ["Image"] + /events: + get: + summary: "Monitor events" + description: | + Stream real-time events from the server. + + Various objects within Docker report events when something happens to them. + + Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune` + + Images report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` + + Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune` + + Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune` + + The Docker daemon reports these events: `reload` + + Services report these events: `create`, `update`, and `remove` + + Nodes report these events: `create`, `update`, and `remove` + + Secrets report these events: `create`, `update`, and `remove` + + Configs report these events: `create`, `update`, and `remove` + + The Builder reports `prune` events + + operationId: "SystemEvents" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/EventMessage" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "since" + in: "query" + description: "Show events created since this timestamp then stream new events." + type: "string" + - name: "until" + in: "query" + description: "Show events created until this timestamp then stop streaming." + type: "string" + - name: "filters" + in: "query" + description: | + A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: + + - `config=` config name or ID + - `container=` container name or ID + - `daemon=` daemon name or ID + - `event=` event type + - `image=` image name or ID + - `label=` image or container label + - `network=` network name or ID + - `node=` node ID + - `plugin`= plugin name or ID + - `scope`= local or swarm + - `secret=` secret name or ID + - `service=` service name or ID + - `type=` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config` + - `volume=` volume name + type: "string" + tags: ["System"] + /system/df: + get: + summary: "Get data usage information" + operationId: "SystemDataUsage" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "SystemDataUsageResponse" + properties: + LayersSize: + type: "integer" + format: "int64" + Images: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + Containers: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + Volumes: + type: "array" + items: + $ref: "#/definitions/Volume" + BuildCache: + type: "array" + items: + $ref: "#/definitions/BuildCache" + example: + LayersSize: 1092588 + Images: + - + Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + ParentId: "" + RepoTags: + - "busybox:latest" + RepoDigests: + - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" + Created: 1466724217 + Size: 1092588 + SharedSize: 0 + Labels: {} + Containers: 1 + Containers: + - + Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" + Names: + - "/top" + Image: "busybox" + ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + Command: "top" + Created: 1472592424 + Ports: [] + SizeRootFs: 1092588 + Labels: {} + State: "exited" + Status: "Exited (0) 56 minutes ago" + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + IPAMConfig: null + Links: null + Aliases: null + NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" + EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" + Gateway: "172.18.0.1" + IPAddress: "172.18.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Mounts: [] + Volumes: + - + Name: "my-volume" + Driver: "local" + Mountpoint: "/var/lib/docker/volumes/my-volume/_data" + Labels: null + Scope: "local" + Options: null + UsageData: + Size: 10920104 + RefCount: 2 + BuildCache: + - + ID: "hw53o5aio51xtltp5xjp8v7fx" + Parents: [] + Type: "regular" + Description: "pulled from docker.io/library/debian@sha256:234cb88d3020898631af0ccbbcca9a66ae7306ecd30c9720690858c1b007d2a0" + InUse: false + Shared: true + Size: 0 + CreatedAt: "2021-06-28T13:31:01.474619385Z" + LastUsedAt: "2021-07-07T22:02:32.738075951Z" + UsageCount: 26 + - + ID: "ndlpt0hhvkqcdfkputsk4cq9c" + Parents: ["ndlpt0hhvkqcdfkputsk4cq9c"] + Type: "regular" + Description: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" + InUse: false + Shared: true + Size: 51 + CreatedAt: "2021-06-28T13:31:03.002625487Z" + LastUsedAt: "2021-07-07T22:02:32.773909517Z" + UsageCount: 26 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "type" + in: "query" + description: | + Object types, for which to compute and return data. + type: "array" + collectionFormat: multi + items: + type: "string" + enum: ["container", "image", "volume", "build-cache"] + tags: ["System"] + /images/{name}/get: + get: + summary: "Export an image" + description: | + Get a tarball containing all images and metadata for a repository. + + If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. + + ### Image tarball format + + An image tarball contains one directory per image layer (named using its long ID), each containing these files: + + - `VERSION`: currently `1.0` - the file format version + - `json`: detailed layer information, similar to `docker inspect layer_id` + - `layer.tar`: A tarfile containing the filesystem changes in this layer + + The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. + + If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. + + ```json + { + "hello-world": { + "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" + } + } + ``` + operationId: "ImageGet" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + tags: ["Image"] + /images/get: + get: + summary: "Export several images" + description: | + Get a tarball containing all images and metadata for several image + repositories. + + For each value of the `names` parameter: if it is a specific name and + tag (e.g. `ubuntu:latest`), then only that image (and its parents) are + returned; if it is an image ID, similarly only that image (and its parents) + are returned and there would be no names referenced in the 'repositories' + file for this image ID. + + For details on the format, see the [export image endpoint](#operation/ImageGet). + operationId: "ImageGetAll" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "names" + in: "query" + description: "Image names to filter by" + type: "array" + items: + type: "string" + tags: ["Image"] + /images/load: + post: + summary: "Import images" + description: | + Load a set of images and tags into a repository. + + For details on the format, see the [export image endpoint](#operation/ImageGet). + operationId: "ImageLoad" + consumes: + - "application/x-tar" + produces: + - "application/json" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "imagesTarball" + in: "body" + description: "Tar archive containing images" + schema: + type: "string" + format: "binary" + - name: "quiet" + in: "query" + description: "Suppress progress details during load." + type: "boolean" + default: false + tags: ["Image"] + /containers/{id}/exec: + post: + summary: "Create an exec instance" + description: "Run a command inside a running container." + operationId: "ContainerExec" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is paused" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execConfig" + in: "body" + description: "Exec configuration" + schema: + type: "object" + title: "ExecConfig" + properties: + AttachStdin: + type: "boolean" + description: "Attach to `stdin` of the exec command." + AttachStdout: + type: "boolean" + description: "Attach to `stdout` of the exec command." + AttachStderr: + type: "boolean" + description: "Attach to `stderr` of the exec command." + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + DetachKeys: + type: "string" + description: | + Override the key sequence for detaching a container. Format is + a single character `[a-Z]` or `ctrl-` where `` + is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + Env: + description: | + A list of environment variables in the form `["VAR=value", ...]`. + type: "array" + items: + type: "string" + Cmd: + type: "array" + description: "Command to run, as a string or array of strings." + items: + type: "string" + Privileged: + type: "boolean" + description: "Runs the exec process with extended privileges." + default: false + User: + type: "string" + description: | + The user, and optionally, group to run the exec process inside + the container. Format is one of: `user`, `user:group`, `uid`, + or `uid:gid`. + WorkingDir: + type: "string" + description: | + The working directory for the exec process inside the container. + example: + AttachStdin: false + AttachStdout: true + AttachStderr: true + DetachKeys: "ctrl-p,ctrl-q" + Tty: false + Cmd: + - "date" + Env: + - "FOO=bar" + - "BAZ=quux" + required: true + - name: "id" + in: "path" + description: "ID or name of container" + type: "string" + required: true + tags: ["Exec"] + /exec/{id}/start: + post: + summary: "Start an exec instance" + description: | + Starts a previously set up exec instance. If detach is true, this endpoint + returns immediately after starting the command. Otherwise, it sets up an + interactive session with the command. + operationId: "ExecStart" + consumes: + - "application/json" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 200: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Container is stopped or paused" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execStartConfig" + in: "body" + schema: + type: "object" + title: "ExecStartConfig" + properties: + Detach: + type: "boolean" + description: "Detach from the command." + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: + Detach: false + Tty: true + ConsoleSize: [80, 64] + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + /exec/{id}/resize: + post: + summary: "Resize an exec instance" + description: | + Resize the TTY session used by an exec instance. This endpoint only works + if `tty` was specified as part of creating and starting the exec instance. + operationId: "ExecResize" + responses: + 200: + description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + - name: "h" + in: "query" + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Exec"] + /exec/{id}/json: + get: + summary: "Inspect an exec instance" + description: "Return low-level information about an exec instance." + operationId: "ExecInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ExecInspectResponse" + properties: + CanRemove: + type: "boolean" + DetachKeys: + type: "string" + ID: + type: "string" + Running: + type: "boolean" + ExitCode: + type: "integer" + ProcessConfig: + $ref: "#/definitions/ProcessConfig" + OpenStdin: + type: "boolean" + OpenStderr: + type: "boolean" + OpenStdout: + type: "boolean" + ContainerID: + type: "string" + Pid: + type: "integer" + description: "The system process ID for the exec process." + examples: + application/json: + CanRemove: false + ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" + DetachKeys: "" + ExitCode: 2 + ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" + OpenStderr: true + OpenStdin: true + OpenStdout: true + ProcessConfig: + arguments: + - "-c" + - "exit 2" + entrypoint: "sh" + privileged: false + tty: true + user: "1000" + Running: false + Pid: 42000 + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + + /volumes: + get: + summary: "List volumes" + operationId: "VolumeList" + produces: ["application/json"] + responses: + 200: + description: "Summary volume data that matches the query" + schema: + $ref: "#/definitions/VolumeListResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to + process on the volumes list. Available filters: + + - `dangling=` When set to `true` (or `1`), returns all + volumes that are not in use by a container. When set to `false` + (or `0`), only volumes that are in use by one or more + containers are returned. + - `driver=` Matches volumes based on their driver. + - `label=` or `label=:` Matches volumes based on + the presence of a `label` alone or a `label` and a value. + - `name=` Matches all or part of a volume name. + type: "string" + format: "json" + tags: ["Volume"] + + /volumes/create: + post: + summary: "Create a volume" + operationId: "VolumeCreate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 201: + description: "The volume was created successfully" + schema: + $ref: "#/definitions/Volume" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "volumeConfig" + in: "body" + required: true + description: "Volume configuration" + schema: + $ref: "#/definitions/VolumeCreateOptions" + tags: ["Volume"] + + /volumes/{name}: + get: + summary: "Inspect a volume" + operationId: "VolumeInspect" + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Volume" + 404: + description: "No such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + tags: ["Volume"] + + put: + summary: | + "Update a volume. Valid only for Swarm cluster volumes" + operationId: "VolumeUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name or ID of the volume" + type: "string" + required: true + - name: "body" + in: "body" + schema: + # though the schema for is an object that contains only a + # ClusterVolumeSpec, wrapping the ClusterVolumeSpec in this object + # means that if, later on, we support things like changing the + # labels, we can do so without duplicating that information to the + # ClusterVolumeSpec. + type: "object" + description: "Volume configuration" + properties: + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + description: | + The spec of the volume to update. Currently, only Availability may + change. All other fields must remain unchanged. + - name: "version" + in: "query" + description: | + The version number of the volume being updated. This is required to + avoid conflicting writes. Found in the volume's `ClusterVolume` + field. + type: "integer" + format: "int64" + required: true + tags: ["Volume"] + + delete: + summary: "Remove a volume" + description: "Instruct the driver to remove the volume." + operationId: "VolumeDelete" + responses: + 204: + description: "The volume was removed" + 404: + description: "No such volume or volume driver" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Volume is in use and cannot be removed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + - name: "force" + in: "query" + description: "Force the removal of the volume" + type: "boolean" + default: false + tags: ["Volume"] + + /volumes/prune: + post: + summary: "Delete unused volumes" + produces: + - "application/json" + operationId: "VolumePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. + - `all` (`all=true`) - Consider all (local) volumes for pruning and not just anonymous volumes. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "VolumePruneResponse" + properties: + VolumesDeleted: + description: "Volumes that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Volume"] + /networks: + get: + summary: "List networks" + description: | + Returns a list of networks. For details on the format, see the + [network inspect endpoint](#operation/NetworkInspect). + + Note that it uses a different, smaller representation of a network than + inspecting a single network. For example, the list of containers attached + to the network is not propagated in API versions 1.28 and up. + operationId: "NetworkList" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Network" + examples: + application/json: + - Name: "bridge" + Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" + Created: "2016-10-19T06:21:00.416543526Z" + Scope: "local" + Driver: "bridge" + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: + - + Subnet: "172.17.0.0/16" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + - Name: "none" + Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "null" + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + - Name: "host" + Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "host" + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to process + on the networks list. + + Available filters: + + - `dangling=` When set to `true` (or `1`), returns all + networks that are not in use by a container. When set to `false` + (or `0`), only networks that are in use by one or more + containers are returned. + - `driver=` Matches a network's driver. + - `id=` Matches all or part of a network ID. + - `label=` or `label==` of a network label. + - `name=` Matches all or part of a network name. + - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + type: "string" + tags: ["Network"] + + /networks/{id}: + get: + summary: "Inspect a network" + operationId: "NetworkInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Network" + 404: + description: "Network not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "verbose" + in: "query" + description: "Detailed inspect output for troubleshooting" + type: "boolean" + default: false + - name: "scope" + in: "query" + description: "Filter the network by scope (swarm, global, or local)" + type: "string" + tags: ["Network"] + + delete: + summary: "Remove a network" + operationId: "NetworkDelete" + responses: + 204: + description: "No error" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such network" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + tags: ["Network"] + + /networks/create: + post: + summary: "Create a network" + operationId: "NetworkCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "No error" + schema: + type: "object" + title: "NetworkCreateResponse" + properties: + Id: + description: "The ID of the created network." + type: "string" + Warning: + type: "string" + example: + Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" + Warning: "" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: | + Forbidden operation. This happens when trying to create a network named after a pre-defined network, + or when trying to create an overlay network on a daemon which is not part of a Swarm cluster. + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "plugin not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "networkConfig" + in: "body" + description: "Network configuration" + required: true + schema: + type: "object" + title: "NetworkCreateRequest" + required: ["Name"] + properties: + Name: + description: "The network's name." + type: "string" + CheckDuplicate: + description: | + Deprecated: CheckDuplicate is now always enabled. + type: "boolean" + Driver: + description: "Name of the network driver plugin to use." + type: "string" + default: "bridge" + Internal: + description: "Restrict external access to the network." + type: "boolean" + Attachable: + description: | + Globally scoped network is manually attachable by regular + containers from workers in swarm mode. + type: "boolean" + Ingress: + description: | + Ingress network is the network which provides the routing-mesh + in swarm mode. + type: "boolean" + IPAM: + description: "Optional custom IP scheme for the network." + $ref: "#/definitions/IPAM" + EnableIPv6: + description: "Enable IPv6 on the network." + type: "boolean" + Options: + description: "Network specific options to be used by the drivers." + type: "object" + additionalProperties: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + Name: "isolated_nw" + CheckDuplicate: false + Driver: "bridge" + EnableIPv6: true + IPAM: + Driver: "default" + Config: + - Subnet: "172.20.0.0/16" + IPRange: "172.20.10.0/24" + Gateway: "172.20.10.11" + - Subnet: "2001:db8:abcd::/64" + Gateway: "2001:db8:abcd::1011" + Options: + foo: "bar" + Internal: true + Attachable: false + Ingress: false + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + tags: ["Network"] + + /networks/{id}/connect: + post: + summary: "Connect a container to a network" + description: "The network must be either a local-scoped network or a swarm-scoped network with the `attachable` option set. A network cannot be re-attached to a running container" + operationId: "NetworkConnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "Operation forbidden" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + title: "NetworkConnectRequest" + properties: + Container: + type: "string" + description: "The ID or name of the container to connect to the network." + EndpointConfig: + $ref: "#/definitions/EndpointSettings" + example: + Container: "3613f73ba0e4" + EndpointConfig: + IPAMConfig: + IPv4Address: "172.24.56.89" + IPv6Address: "2001:db8::5689" + MacAddress: "02:42:ac:12:05:02" + tags: ["Network"] + + /networks/{id}/disconnect: + post: + summary: "Disconnect a container from a network" + operationId: "NetworkDisconnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + title: "NetworkDisconnectRequest" + properties: + Container: + type: "string" + description: | + The ID or name of the container to disconnect from the network. + Force: + type: "boolean" + description: | + Force the container to disconnect from the network. + tags: ["Network"] + /networks/prune: + post: + summary: "Delete unused networks" + produces: + - "application/json" + operationId: "NetworkPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=` Prune networks created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune networks with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "NetworkPruneResponse" + properties: + NetworksDeleted: + description: "Networks that were deleted" + type: "array" + items: + type: "string" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Network"] + /plugins: + get: + summary: "List plugins" + operationId: "PluginList" + description: "Returns information about installed plugins." + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Plugin" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the plugin list. + + Available filters: + + - `capability=` + - `enable=|` + tags: ["Plugin"] + + /plugins/privileges: + get: + summary: "Get plugin privileges" + operationId: "GetPluginPrivileges" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: + - "Plugin" + + /plugins/pull: + post: + summary: "Install a plugin" + operationId: "PluginPull" + description: | + Pulls and installs a plugin. After the plugin is installed, it can be + enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). + produces: + - "application/json" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + Remote reference for plugin to install. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "name" + in: "query" + description: | + Local name for the pulled plugin. + + The `:latest` tag is optional, and is used as the default if omitted. + required: false + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/{name}/json: + get: + summary: "Inspect a plugin" + operationId: "PluginInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}: + delete: + summary: "Remove a plugin" + operationId: "PluginDelete" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "force" + in: "query" + description: | + Disable the plugin before removing. This may result in issues if the + plugin is in use by a container. + type: "boolean" + default: false + tags: ["Plugin"] + /plugins/{name}/enable: + post: + summary: "Enable a plugin" + operationId: "PluginEnable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "timeout" + in: "query" + description: "Set the HTTP client timeout (in seconds)" + type: "integer" + default: 0 + tags: ["Plugin"] + /plugins/{name}/disable: + post: + summary: "Disable a plugin" + operationId: "PluginDisable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "force" + in: "query" + description: | + Force disable a plugin even if still in use. + required: false + type: "boolean" + tags: ["Plugin"] + /plugins/{name}/upgrade: + post: + summary: "Upgrade a plugin" + operationId: "PluginUpgrade" + responses: + 204: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "remote" + in: "query" + description: | + Remote reference to upgrade to. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/create: + post: + summary: "Create a plugin" + operationId: "PluginCreate" + consumes: + - "application/x-tar" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "query" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "tarContext" + in: "body" + description: "Path to tar containing plugin rootfs and manifest" + schema: + type: "string" + format: "binary" + tags: ["Plugin"] + /plugins/{name}/push: + post: + summary: "Push a plugin" + operationId: "PluginPush" + description: | + Push a plugin to the registry. + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + responses: + 200: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /plugins/{name}/set: + post: + summary: "Configure a plugin" + operationId: "PluginSet" + consumes: + - "application/json" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + type: "string" + example: ["DEBUG=1"] + responses: + 204: + description: "No error" + 404: + description: "Plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /nodes: + get: + summary: "List nodes" + operationId: "NodeList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Node" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `id=` + - `label=` + - `membership=`(`accepted`|`pending`)` + - `name=` + - `node.label=` + - `role=`(`manager`|`worker`)` + type: "string" + tags: ["Node"] + /nodes/{id}: + get: + summary: "Inspect a node" + operationId: "NodeInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Node" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + tags: ["Node"] + delete: + summary: "Delete a node" + operationId: "NodeDelete" + responses: + 200: + description: "no error" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + - name: "force" + in: "query" + description: "Force remove a node from the swarm" + default: false + type: "boolean" + tags: ["Node"] + /nodes/{id}/update: + post: + summary: "Update a node" + operationId: "NodeUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID of the node" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/NodeSpec" + - name: "version" + in: "query" + description: | + The version number of the node object being updated. This is required + to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Node"] + /swarm: + get: + summary: "Inspect swarm" + operationId: "SwarmInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Swarm" + 404: + description: "no such swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/init: + post: + summary: "Initialize a new swarm" + operationId: "SwarmInit" + produces: + - "application/json" + - "text/plain" + responses: + 200: + description: "no error" + schema: + description: "The node ID" + type: "string" + example: "7v2t30z9blmxuhnyo6s4cpenp" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmInitRequest" + properties: + ListenAddr: + description: | + Listen address used for inter-manager communication, as well + as determining the networking interface used for the VXLAN + Tunnel Endpoint (VTEP). This can either be an address/port + combination in the form `192.168.1.1:4567`, or an interface + followed by a port number, like `eth0:4567`. If the port number + is omitted, the default swarm listening port is used. + type: "string" + AdvertiseAddr: + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: + ``), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same address + as `AdvertiseAddr` is used. + + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. + type: "string" + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + if no port is set or is set to 0, default port 4789 will be used. + type: "integer" + format: "uint32" + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global + scope networks. + type: "array" + items: + type: "string" + example: ["10.10.0.0/16", "20.20.0.0/16"] + ForceNewCluster: + description: "Force creation of a new swarm." + type: "boolean" + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created + from the default subnet pool. + type: "integer" + format: "uint32" + Spec: + $ref: "#/definitions/SwarmSpec" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + DataPathPort: 4789 + DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"] + SubnetSize: 24 + ForceNewCluster: false + Spec: + Orchestration: {} + Raft: {} + Dispatcher: {} + CAConfig: {} + EncryptionConfig: + AutoLockManagers: false + tags: ["Swarm"] + /swarm/join: + post: + summary: "Join an existing swarm" + operationId: "SwarmJoin" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmJoinRequest" + properties: + ListenAddr: + description: | + Listen address used for inter-manager communication if the node + gets promoted to manager, as well as determining the networking + interface used for the VXLAN Tunnel Endpoint (VTEP). + type: "string" + AdvertiseAddr: + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: + ``), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same address + as `AdvertiseAddr` is used. + + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. + + type: "string" + RemoteAddrs: + description: | + Addresses of manager nodes already participating in the swarm. + type: "array" + items: + type: "string" + JoinToken: + description: "Secret token for joining this swarm." + type: "string" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + RemoteAddrs: + - "node1:2377" + JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + tags: ["Swarm"] + /swarm/leave: + post: + summary: "Leave a swarm" + operationId: "SwarmLeave" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "force" + description: | + Force leave swarm, even if this is the last manager or that it will + break the cluster. + in: "query" + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/update: + post: + summary: "Update a swarm" + operationId: "SwarmUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + $ref: "#/definitions/SwarmSpec" + - name: "version" + in: "query" + description: | + The version number of the swarm object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + - name: "rotateWorkerToken" + in: "query" + description: "Rotate the worker join token." + type: "boolean" + default: false + - name: "rotateManagerToken" + in: "query" + description: "Rotate the manager join token." + type: "boolean" + default: false + - name: "rotateManagerUnlockKey" + in: "query" + description: "Rotate the manager unlock key." + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/unlockkey: + get: + summary: "Get the unlock key" + operationId: "SwarmUnlockkey" + consumes: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "UnlockKeyResponse" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/unlock: + post: + summary: "Unlock a locked manager" + operationId: "SwarmUnlock" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmUnlockRequest" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /services: + get: + summary: "List services" + operationId: "ServiceList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Service" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the services list. + + Available filters: + + - `id=` + - `label=` + - `mode=["replicated"|"global"]` + - `name=` + - name: "status" + in: "query" + type: "boolean" + description: | + Include service status, with count of running and desired tasks. + tags: ["Service"] + /services/create: + post: + summary: "Create a service" + operationId: "ServiceCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/ServiceCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "network is not eligible for services" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "name conflicts with an existing service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "web" + TaskTemplate: + ContainerSpec: + Image: "nginx:alpine" + Mounts: + - + ReadOnly: true + Source: "web-data" + Target: "/usr/share/nginx/html" + Type: "volume" + VolumeOptions: + DriverConfig: {} + Labels: + com.example.something: "something-value" + Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] + User: "33" + DNSConfig: + Nameservers: ["8.8.8.8"] + Search: ["example.org"] + Options: ["timeout:3"] + Secrets: + - + File: + Name: "www.example.org.key" + UID: "33" + GID: "33" + Mode: 384 + SecretID: "fpjqlhnwb19zds35k8wn80lq9" + SecretName: "example_org_domain_key" + LogDriver: + Name: "json-file" + Options: + max-file: "3" + max-size: "10M" + Placement: {} + Resources: + Limits: + MemoryBytes: 104857600 + Reservations: {} + RestartPolicy: + Condition: "on-failure" + Delay: 10000000000 + MaxAttempts: 10 + Mode: + Replicated: + Replicas: 4 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Ports: + - + Protocol: "tcp" + PublishedPort: 8080 + TargetPort: 80 + Labels: + foo: "bar" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + tags: ["Service"] + /services/{id}: + get: + summary: "Inspect a service" + operationId: "ServiceInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Service" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "insertDefaults" + in: "query" + description: "Fill empty fields with default values." + type: "boolean" + default: false + tags: ["Service"] + delete: + summary: "Delete a service" + operationId: "ServiceDelete" + responses: + 200: + description: "no error" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + tags: ["Service"] + /services/{id}/update: + post: + summary: "Update a service" + operationId: "ServiceUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ServiceUpdateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "top" + TaskTemplate: + ContainerSpec: + Image: "busybox" + Args: + - "top" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + + - name: "version" + in: "query" + description: | + The version number of the service object being updated. This is + required to avoid conflicting writes. + This version number should be the value as currently set on the + service *before* the update. You can find the current version by + calling `GET /services/{id}` + required: true + type: "integer" + - name: "registryAuthFrom" + in: "query" + description: | + If the `X-Registry-Auth` header is not specified, this parameter + indicates where to find registry authorization credentials. + type: "string" + enum: ["spec", "previous-spec"] + default: "spec" + - name: "rollback" + in: "query" + description: | + Set to this parameter to `previous` to cause a server-side rollback + to the previous service spec. The supplied spec will be ignored in + this case. + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + + tags: ["Service"] + /services/{id}/logs: + get: + summary: "Get service logs" + description: | + Get `stdout` and `stderr` logs from a service. See also + [`/containers/{id}/logs`](#operation/ContainerLogs). + + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + operationId: "ServiceLogs" + responses: + 200: + description: "logs returned as a stream in response body" + schema: + type: "string" + format: "binary" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such service: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the service" + type: "string" + - name: "details" + in: "query" + description: "Show service context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Service"] + /tasks: + get: + summary: "List tasks" + operationId: "TaskList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Task" + example: + - ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + - ID: "1yljwbmlr8er2waf8orvqpwms" + Version: + Index: 30 + CreatedAt: "2016-06-07T21:07:30.019104782Z" + UpdatedAt: "2016-06-07T21:07:30.231958098Z" + Name: "hopeful_cori" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:30.202183143Z" + State: "shutdown" + Message: "shutdown" + ContainerStatus: + ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" + DesiredState: "shutdown" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.5/16" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the tasks list. + + Available filters: + + - `desired-state=(running | shutdown | accepted)` + - `id=` + - `label=key` or `label="key=value"` + - `name=` + - `node=` + - `service=` + tags: ["Task"] + /tasks/{id}: + get: + summary: "Inspect a task" + operationId: "TaskInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Task" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID of the task" + required: true + type: "string" + tags: ["Task"] + /tasks/{id}/logs: + get: + summary: "Get task logs" + description: | + Get `stdout` and `stderr` logs from a task. + See also [`/containers/{id}/logs`](#operation/ContainerLogs). + + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. + operationId: "TaskLogs" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 200: + description: "logs returned as a stream in response body" + schema: + type: "string" + format: "binary" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such task: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID of the task" + type: "string" + - name: "details" + in: "query" + description: "Show task context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Task"] + /secrets: + get: + summary: "List secrets" + operationId: "SecretList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Secret" + example: + - ID: "blt1owaxmitz71s9v5zh81zun" + Version: + Index: 85 + CreatedAt: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: "2017-07-20T13:55:28.678958722Z" + Spec: + Name: "mysql-passwd" + Labels: + some.label: "some.value" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the secrets list. + + Available filters: + + - `id=` + - `label= or label==value` + - `name=` + - `names=` + tags: ["Secret"] + /secrets/create: + post: + summary: "Create a secret" + operationId: "SecretCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/SecretSpec" + - type: "object" + example: + Name: "app-key.crt" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + tags: ["Secret"] + /secrets/{id}: + get: + summary: "Inspect a secret" + operationId: "SecretInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Secret" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + delete: + summary: "Delete a secret" + operationId: "SecretDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + /secrets/{id}/update: + post: + summary: "Update a Secret" + operationId: "SecretUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such secret" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the secret" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/SecretSpec" + description: | + The spec of the secret to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [SecretInspect endpoint](#operation/SecretInspect) response values. + - name: "version" + in: "query" + description: | + The version number of the secret object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Secret"] + /configs: + get: + summary: "List configs" + operationId: "ConfigList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Config" + example: + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "server.conf" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the configs list. + + Available filters: + + - `id=` + - `label= or label==value` + - `name=` + - `names=` + tags: ["Config"] + /configs/create: + post: + summary: "Create a config" + operationId: "ConfigCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/ConfigSpec" + - type: "object" + example: + Name: "server.conf" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + tags: ["Config"] + /configs/{id}: + get: + summary: "Inspect a config" + operationId: "ConfigInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Config" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + delete: + summary: "Delete a config" + operationId: "ConfigDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + /configs/{id}/update: + post: + summary: "Update a Config" + operationId: "ConfigUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such config" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the config" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/ConfigSpec" + description: | + The spec of the config to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [ConfigInspect endpoint](#operation/ConfigInspect) response values. + - name: "version" + in: "query" + description: | + The version number of the config object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Config"] + /distribution/{name}/json: + get: + summary: "Get image information from the registry" + description: | + Return image digest and platform information by contacting the registry. + operationId: "DistributionInspect" + produces: + - "application/json" + responses: + 200: + description: "descriptor and platform information" + schema: + $ref: "#/definitions/DistributionInspect" + 401: + description: "Failed authentication or no image found" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Distribution"] + /session: + post: + summary: "Initialize interactive session" + description: | + Start a new interactive session with a server. Session allows server to + call back to the client for advanced capabilities. + + ### Hijacking + + This endpoint hijacks the HTTP connection to HTTP2 transport that allows + the client to expose gPRC services on that connection. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /session HTTP/1.1 + Upgrade: h2c + Connection: Upgrade + ``` + + The Docker daemon responds with a `101 UPGRADED` response follow with + the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Connection: Upgrade + Upgrade: h2c + ``` + operationId: "Session" + produces: + - "application/vnd.docker.raw-stream" + responses: + 101: + description: "no error, hijacking successful" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Session"] diff --git a/_vendor/github.com/moby/moby/docs/api/version-history.md b/_vendor/github.com/moby/moby/docs/api/version-history.md index da04e3f2d..94c821d7e 100644 --- a/_vendor/github.com/moby/moby/docs/api/version-history.md +++ b/_vendor/github.com/moby/moby/docs/api/version-history.md @@ -13,6 +13,73 @@ keywords: "API, Docker, rcli, REST, documentation" will be rejected. --> +## v1.44 API changes + +[Docker Engine API v1.44](https://docs.docker.com/engine/api/v1.44/) documentation + +* GET `/images/json` now accepts an `until` filter. This accepts a timestamp and + lists all images created before it. The `` can be Unix timestamps, + date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) + computed relative to the daemon machine’s time. This change is not versioned, + and affects all API versions if the daemon has this patch. +* The `VirtualSize` field in the `GET /images/{name}/json`, `GET /images/json`, + and `GET /system/df` responses is now omitted. Use the `Size` field instead, + which contains the same information. +* Deprecated: The `is_automated` field in the `GET /images/search` response has + been deprecated and will always be set to false in the future because Docker + Hub is deprecating the `is_automated` field in its search API. The deprecation + is not versioned, and applies to all API versions. +* Deprecated: The `is-automated` filter for the `GET /images/search` endpoint. + The `is_automated` field has been deprecated by Docker Hub's search API. + Consequently, searching for `is-automated=true` will yield no results. The + deprecation is not versioned, and applies to all API versions. +* Read-only bind mounts are now made recursively read-only on kernel >= 5.12 + with runtimes which support the feature. + `POST /containers/create`, `GET /containers/{id}/json`, and `GET /containers/json` now supports + `BindOptions.ReadOnlyNonRecursive` and `BindOptions.ReadOnlyForceRecursive` to customize the behavior. +* `POST /containers/create` now accepts a `HealthConfig.StartInterval` to set the + interval for health checks during the start period. +* `GET /info` now includes a `CDISpecDirs` field indicating the configured CDI + specifications directories. The use of the applied setting requires the daemon + to have expermental enabled, and for non-experimental daemons an empty list is + always returned. +* `POST /networks/create` now returns a 400 if the `IPAMConfig` has invalid + values. Note that this change is _unversioned_ and applied to all API + versions on daemon that support version 1.44. +* `POST /networks/create` with a duplicated name now fails systematically. As + such, the `CheckDuplicate` field is now deprecated. Note that this change is + _unversioned_ and applied to all API versions on daemon that support version + 1.44. +* `POST /containers/create` now accepts multiple `EndpointSettings` in + `NetworkingConfig.EndpointSettings`. +* `POST /containers/create` and `POST /networks/{id}/connect` will now catch + validation errors that were previously only returned during `POST /containers/{id}/start`. + These endpoints will also return the full set of validation errors they find, + instead of returning only the first one. + Note that this change is _unversioned_ and applies to all API versions. +* `POST /services/create` and `POST /services/{id}/update` now accept `Seccomp` + and `AppArmor` fields in the `ContainerSpec.Privileges` object. This allows + some configuration of Seccomp and AppArmor in Swarm services. +* A new endpoint-specific `MacAddress` field has been added to `NetworkSettings.EndpointSettings` + on `POST /containers/create`, and to `EndpointConfig` on `POST /networks/{id}/connect`. + The container-wide `MacAddress` field in `Config`, on `POST /containers/create`, is now deprecated. +* The field `Networks` in the `POST /services/create` and `POST /services/{id}/update` + requests is now deprecated. You should instead use the field `TaskTemplate.Networks`. +* The `Container` and `ContainerConfig` fields in the `GET /images/{name}/json` + response are deprecated and will no longer be included in API v1.45. +* `GET /info` now includes `status` properties in `Runtimes`. +* A new field named `DNSNames` and containing all non-fully qualified DNS names + a container takes on a specific network has been added to `GET /containers/{name:.*}/json`. +* The `Aliases` field returned in calls to `GET /containers/{name:.*}/json` in v1.44 and older + versions contains the short container ID. This will change in the next API version, v1.45. + Starting with that API version, this specific value will be removed from the `Aliases` field + such that this field will reflect exactly the values originally submitted to the + `POST /containers/create` endpoint. The newly introduced `DNSNames` should now be used instead. +* The fields `HairpinMode`, `LinkLocalIPv6Address`, `LinkLocalIPv6PrefixLen`, `SecondaryIPAddresses`, + `SecondaryIPv6Addresses` available in `NetworkSettings` when calling `GET /containers/{id}/json` are + deprecated and will be removed in a future release. You should instead look for the default network in + `NetworkSettings.Networks`. + ## v1.43 API changes [Docker Engine API v1.43](https://docs.docker.com/engine/api/v1.43/) documentation @@ -81,7 +148,7 @@ keywords: "API, Docker, rcli, REST, documentation" a default. This change is not versioned, and affects all API versions if the daemon has - this patch. + this patch. * `GET /_ping` and `HEAD /_ping` now return a `Swarm` header, which allows a client to detect if Swarm is enabled on the daemon, without having to call additional endpoints. @@ -104,7 +171,7 @@ keywords: "API, Docker, rcli, REST, documentation" versioned, and affects all API versions if the daemon has this patch. * `GET /containers/{id}/attach`, `GET /exec/{id}/start`, `GET /containers/{id}/logs` `GET /services/{id}/logs` and `GET /tasks/{id}/logs` now set Content-Type header - to `application/vnd.docker.multiplexed-stream` when a multiplexed stdout/stderr + to `application/vnd.docker.multiplexed-stream` when a multiplexed stdout/stderr stream is sent to client, `application/vnd.docker.raw-stream` otherwise. * `POST /volumes/create` now accepts a new `ClusterVolumeSpec` to create a cluster volume (CNI). This option can only be used if the daemon is a Swarm manager. @@ -117,7 +184,7 @@ keywords: "API, Docker, rcli, REST, documentation" * Volume information returned by `GET /volumes/{name}`, `GET /volumes` and `GET /system/df` can now contain a `ClusterVolume` if the volume is a cluster volume (requires the daemon to be a Swarm manager). -* The `Volume` type, as returned by `Added new `ClusterVolume` fields +* The `Volume` type, as returned by `Added new `ClusterVolume` fields * Added a new `PUT /volumes{name}` endpoint to update cluster volumes (CNI). Cluster volumes are only supported if the daemon is a Swarm manager. * `GET /containers/{name}/attach/ws` endpoint now accepts `stdin`, `stdout` and @@ -333,7 +400,7 @@ keywords: "API, Docker, rcli, REST, documentation" [Docker Engine API v1.36](https://docs.docker.com/engine/api/v1.36/) documentation -* `Get /events` now return `exec_die` event when an exec process terminates. +* `Get /events` now return `exec_die` event when an exec process terminates. ## v1.35 API changes @@ -541,7 +608,7 @@ keywords: "API, Docker, rcli, REST, documentation" * `POST /services/create` and `POST /services/(id or name)/update` now accept the `TTY` parameter, which allocate a pseudo-TTY in container. * `POST /services/create` and `POST /services/(id or name)/update` now accept the `DNSConfig` parameter, which specifies DNS related configurations in resolver configuration file (resolv.conf) through `Nameservers`, `Search`, and `Options`. * `POST /services/create` and `POST /services/(id or name)/update` now support - `node.platform.arch` and `node.platform.os` constraints in the services + `node.platform.arch` and `node.platform.os` constraints in the services `TaskSpec.Placement.Constraints` field. * `GET /networks/(id or name)` now includes IP and name of all peers nodes for swarm mode overlay networks. * `GET /plugins` list plugins. diff --git a/_vendor/modules.txt b/_vendor/modules.txt index 9ac03f787..a45492c52 100644 --- a/_vendor/modules.txt +++ b/_vendor/modules.txt @@ -1,6 +1,6 @@ -# github.com/moby/moby v24.0.8-0.20240109122856-854ca341c0f6+incompatible +# github.com/moby/moby v25.0.0+incompatible # github.com/moby/buildkit v0.13.0-beta1.0.20240116143623-28ce478b1fde # github.com/docker/buildx v0.12.1 # github.com/docker/scout-cli v1.3.0 -# github.com/docker/cli v25.0.0-rc.3+incompatible -# github.com/docker/compose/v2 v2.24.1 +# github.com/docker/cli v25.0.1-0.20240119143135-01f933261885+incompatible +# github.com/docker/compose/v2 v2.24.2-0.20240119115212-388169011f47 diff --git a/assets/images/docs-logo-white-full.svg b/assets/images/docs-logo-white-full.svg new file mode 100644 index 000000000..54c1ee901 --- /dev/null +++ b/assets/images/docs-logo-white-full.svg @@ -0,0 +1,32 @@ + + + + + + + + + + + + diff --git a/content/build/building/packaging.md b/content/build/building/packaging.md index 2f230637c..ef393333a 100644 --- a/content/build/building/packaging.md +++ b/content/build/building/packaging.md @@ -38,7 +38,7 @@ Some projects may need distinct Dockerfiles for specific purposes. A common convention is to name these `.Dockerfile`. You can specify the Dockerfile filename using the `--file` flag for the `docker build` command. Refer to the -[`docker build` CLI reference](../../engine/reference/commandline/build.md#file) +[`docker build` CLI reference](../../engine/reference/commandline/image_build.md#file) to learn about the `--file` flag. > **Note** diff --git a/content/build/guide/export.md b/content/build/guide/export.md index 79b086914..674cf90a8 100644 --- a/content/build/guide/export.md +++ b/content/build/guide/export.md @@ -103,7 +103,7 @@ This is explored later on in this guide. Related information: -- [`docker build --output` CLI reference](../../engine/reference/commandline/build.md#output) +- [`docker build --output` CLI reference](../../engine/reference/commandline/image_build.md#output) - [Build exporters](../exporters/index.md) ## Next steps diff --git a/content/build/guide/intro.md b/content/build/guide/intro.md index 743c571a4..5a55da8cf 100644 --- a/content/build/guide/intro.md +++ b/content/build/guide/intro.md @@ -160,8 +160,8 @@ container image and created a container from it. Related information: - [Dockerfile reference](../../engine/reference/builder.md) -- [`docker build` CLI reference](../../engine/reference/commandline/build.md) -- [`docker run` CLI reference](../../engine/reference/commandline/run.md) +- [`docker build` CLI reference](../../engine/reference/commandline/image_build.md) +- [`docker run` CLI reference](../../engine/reference/commandline/container_run.md) ## Next steps diff --git a/content/compose/compose-file/compose-file-v2.md b/content/compose/compose-file/compose-file-v2.md index ba7511046..76b08dabd 100644 --- a/content/compose/compose-file/compose-file-v2.md +++ b/content/compose/compose-file/compose-file-v2.md @@ -273,7 +273,7 @@ An entry with the ip address and hostname is created in `/etc/hosts` inside cont Specify a build’s container isolation technology. On Linux, the only supported value is `default`. On Windows, acceptable values are `default`, `process` and `hyperv`. Refer to the -[Docker Engine docs](../../engine/reference/commandline/run.md#isolation) +[Docker Engine docs](../../engine/reference/commandline/container_run.md#isolation) for details. If unspecified, Compose will use the `isolation` value found in the service's definition @@ -778,7 +778,7 @@ host system to be added. An example of where this is useful is when multiple containers (running as different users) need to all read or write the same file on the host system. That file can be owned by a group shared by all the containers, and specified in `group_add`. See the -[Docker documentation](../../engine/reference/run.md#additional-groups) for more +[Docker documentation](../../engine/reference/commandline/container_run.md#additional-groups) for more details. A full example: @@ -900,7 +900,7 @@ services: Specify a container’s isolation technology. On Linux, the only supported value is `default`. On Windows, acceptable values are `default`, `process` and `hyperv`. Refer to the -[Docker Engine docs](../../engine/reference/commandline/run.md#isolation) +[Docker Engine docs](../../engine/reference/commandline/container_run.md#isolation) for details. ### labels @@ -1557,7 +1557,7 @@ restart: "unless-stopped" ### cpu_count, cpu_percent, cpu\_shares, cpu\_period, cpu\_quota, cpus, cpuset, domainname, hostname, ipc, mac\_address, mem\_limit, memswap\_limit, mem\_swappiness, mem\_reservation, oom_kill_disable, oom_score_adj, privileged, read\_only, shm\_size, stdin\_open, tty, user, working\_dir Each of these is a single value, analogous to its -[docker run](../../engine/reference/run.md#runtime-constraints-on-resources) counterpart. +[docker run](../../engine/reference/commandline/container_run.md#runtime-constraints-on-resources) counterpart. > Added in [version 2.2](compose-versioning.md#version-22) file format. > @@ -1984,4 +1984,4 @@ networks: - [Installing Compose](../install/index.md) - [Compose file versions and upgrading](compose-versioning.md) - [Sample apps with Compose](../samples-for-compose.md) -- [Command line reference](../reference/index.md) \ No newline at end of file +- [Command line reference](../reference/index.md) diff --git a/content/compose/compose-file/compose-file-v3.md b/content/compose/compose-file/compose-file-v3.md index 928a910ed..29f5a80cb 100644 --- a/content/compose/compose-file/compose-file-v3.md +++ b/content/compose/compose-file/compose-file-v3.md @@ -1339,7 +1339,7 @@ services: Specify a container’s isolation technology. On Linux, the only supported value is `default`. On Windows, acceptable values are `default`, `process` and `hyperv`. Refer to the -[Docker Engine docs](../../engine/reference/commandline/run.md#isolation) +[Docker Engine docs](../../engine/reference/commandline/container_run.md#isolation) for details. ### labels @@ -1904,7 +1904,7 @@ sysctls: You can only use sysctls that are namespaced in the kernel. Docker does not support changing sysctls inside a container that also modify the host system. For an overview of supported sysctls, refer to -[configure namespaced kernel parameters (sysctls) at runtime](../../engine/reference/commandline/run.md#sysctl). +[configure namespaced kernel parameters (sysctls) at runtime](../../engine/reference/commandline/container_run.md#sysctl). > Note when using docker stack deploy > @@ -2151,7 +2151,7 @@ services: ### domainname, hostname, ipc, mac\_address, privileged, read\_only, shm\_size, stdin\_open, tty, user, working\_dir Each of these is a single value, analogous to its -[docker run](../../engine/reference/run.md) counterpart. Note that `mac_address` is a legacy option. +[docker run](../../engine/reference/commandline/container_run.md) counterpart. Note that `mac_address` is a legacy option. ```yaml user: postgresql diff --git a/content/compose/release-notes.md b/content/compose/release-notes.md index 62e3e0930..aec8c2918 100644 --- a/content/compose/release-notes.md +++ b/content/compose/release-notes.md @@ -3245,8 +3245,8 @@ Several new configuration keys have been added to `docker-compose.yml`: - `pid: host`, like `docker run --pid=host`, lets you reuse the same PID namespace as the host machine. - `cpuset`, like `docker run --cpuset-cpus`, lets you specify which CPUs to allow execution in. - `read_only`, like `docker run --read-only`, lets you mount a container's filesystem as read-only. -- `security_opt`, like `docker run --security-opt`, lets you specify [security options](/engine/reference/run/#security-configuration). -- `log_driver`, like `docker run --log-driver`, lets you specify a [log driver](/engine/reference/run/#logging-drivers---log-driver). +- `security_opt`, like `docker run --security-opt`, lets you specify [security options](/engine/reference/commandline/container_run/#security-opt). +- `log_driver`, like `docker run --log-driver`, lets you specify a [log driver](/engine/reference/commandline/container_run/#log-driver). ### Bug fixes diff --git a/content/config/containers/logging/configure.md b/content/config/containers/logging/configure.md index 337307849..7b781689e 100644 --- a/content/config/containers/logging/configure.md +++ b/content/config/containers/logging/configure.md @@ -3,9 +3,11 @@ description: Learn how to configure logging driver for the Docker daemon keywords: docker, logging, driver title: Configure logging drivers aliases: + - /config/containers/logging/logentries/ - /engine/reference/logging/overview/ - /engine/reference/logging/ - /engine/admin/reference/logging/ + - /engine/admin/logging/logentries/ - /engine/admin/logging/overview/ --- @@ -196,7 +198,6 @@ see more options. | [`splunk`](splunk.md) | Writes log messages to `splunk` using the HTTP Event Collector. | | [`etwlogs`](etwlogs.md) | Writes log messages as Event Tracing for Windows (ETW) events. Only available on Windows platforms. | | [`gcplogs`](gcplogs.md) | Writes log messages to Google Cloud Platform (GCP) Logging. | -| [`logentries`](logentries.md) ([deprecated](../../../../engine/deprecated.md#logentries-logging-driver)) | Writes log messages to Rapid7 Logentries. | ## Limitations of logging drivers diff --git a/content/config/containers/logging/logentries.md b/content/config/containers/logging/logentries.md deleted file mode 100644 index 48b329753..000000000 --- a/content/config/containers/logging/logentries.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: Logentries logging driver (deprecated) -description: Learn how to use the logentries logging driver with Docker Engine -keywords: logentries, docker, logging, driver -aliases: - - /engine/admin/logging/logentries/ ---- - -> **Deprecated** -> -> The logentries service is no longer in operation since November 15, 2022, -> and the logentries driver [has been deprecated](../../../engine/deprecated.md#logentries-logging-driver). -> -> This driver will be removed in Docker Engine v25.0, and you must migrate to -> a supported logging driver before upgrading to Docker Engine v25.0. Read the -> [Configure logging drivers](configure.md) page for an overview of supported -> logging drivers. -{ .warning } - - -The `logentries` logging driver sends container logs to the -[Logentries](https://logentries.com/) server. - -## Usage - -Some options are supported by specifying `--log-opt` as many times as needed: - -- `logentries-token`: specify the Logentries log set token -- `line-only`: send raw payload only - -Configure the default logging driver by passing the -`--log-driver` option to the Docker daemon: - -```console -$ dockerd --log-driver=logentries -``` - -To set the logging driver for a specific container, pass the -`--log-driver` option to `docker run`: - -```console -$ docker run --log-driver=logentries ... -``` - -Before using this logging driver, you need to create a new Log Set in the -Logentries web interface and pass the token of that log set to Docker: - -```console -$ docker run --log-driver=logentries --log-opt logentries-token=abcd1234-12ab-34cd-5678-0123456789ab -``` - -## Options - -Users can use the `--log-opt NAME=VALUE` flag to specify additional Logentries logging driver options. - -### logentries-token - -You need to provide your log set token for the Logentries driver to work: - -```console -$ docker run --log-driver=logentries --log-opt logentries-token=abcd1234-12ab-34cd-5678-0123456789ab -``` - -### line-only - -You could specify whether to send log message wrapped into container data (default) or to send raw log line - -```console -$ docker run --log-driver=logentries --log-opt logentries-token=abcd1234-12ab-34cd-5678-0123456789ab --log-opt line-only=true -``` diff --git a/content/config/containers/resource_constraints.md b/content/config/containers/resource_constraints.md index c8be3e9ab..f440ab3c8 100644 --- a/content/config/containers/resource_constraints.md +++ b/content/config/containers/resource_constraints.md @@ -15,7 +15,7 @@ on when you should set such limits and the possible implications of setting them Many of these features require your kernel to support Linux capabilities. To check for support, you can use the -[`docker info`](../../engine/reference/commandline/info.md) command. If a capability +[`docker info`](../../engine/reference/commandline/system_info.md) command. If a capability is disabled in your kernel, you may see a warning at the end of the output like the following: diff --git a/content/config/containers/runmetrics.md b/content/config/containers/runmetrics.md index 96241bd7f..dfa5183d0 100644 --- a/content/config/containers/runmetrics.md +++ b/content/config/containers/runmetrics.md @@ -25,7 +25,7 @@ redis1 0.07% 796 KB / 64 MB 1.21% redis2 0.07% 2.746 MB / 64 MB 4.29% 1.266 KB / 648 B 12.4 MB / 0 B ``` -The [`docker stats`](../../engine/reference/commandline/stats.md) reference +The [`docker stats`](../../engine/reference/commandline/container_stats.md) reference page has more details about the `docker stats` command. ## Control groups diff --git a/content/config/filter.md b/content/config/filter.md index 317126e68..1029cc51e 100644 --- a/content/config/filter.md +++ b/content/config/filter.md @@ -95,13 +95,13 @@ description for commands that support the `--filter` flag: - [`docker config ls`](../engine/reference/commandline/config_ls.md) - [`docker container prune`](../engine/reference/commandline/container_prune.md) - [`docker image prune`](../engine/reference/commandline/image_prune.md) -- [`docker images`](../engine/reference/commandline/images.md) +- [`docker image ls`](../engine/reference/commandline/image_ls.md) - [`docker network ls`](../engine/reference/commandline/network_ls.md) - [`docker network prune`](../engine/reference/commandline/network_prune.md) - [`docker node ls`](../engine/reference/commandline/node_ls.md) - [`docker node ps`](../engine/reference/commandline/node_ps.md) - [`docker plugin ls`](../engine/reference/commandline/plugin_ls.md) -- [`docker ps`](../engine/reference/commandline/ps.md) +- [`docker container ls`](../engine/reference/commandline/container_ls.md) - [`docker search`](../engine/reference/commandline/search.md) - [`docker secret ls`](../engine/reference/commandline/secret_ls.md) - [`docker service ls`](../engine/reference/commandline/service_ls.md) diff --git a/content/config/labels-custom-metadata.md b/content/config/labels-custom-metadata.md index 6187a510b..4752ed36e 100644 --- a/content/config/labels-custom-metadata.md +++ b/content/config/labels-custom-metadata.md @@ -80,15 +80,15 @@ Labels on Swarm nodes and services can be updated dynamically. - Images and containers - [Adding labels to images](../engine/reference/builder.md#label) - - [Overriding a container's labels at runtime](../engine/reference/commandline/run.md#label) + - [Overriding a container's labels at runtime](../engine/reference/commandline/container_run.md#label) - [Inspecting labels on images or containers](../engine/reference/commandline/inspect.md) - - [Filtering images by label](../engine/reference/commandline/images.md#filter) - - [Filtering containers by label](../engine/reference/commandline/ps.md#filter) + - [Filtering images by label](../engine/reference/commandline/image_ls.md#filter) + - [Filtering containers by label](../engine/reference/commandline/container_ls.md#filter) - Local Docker daemons - [Adding labels to a Docker daemon at runtime](../engine/reference/commandline/dockerd.md) - - [Inspecting a Docker daemon's labels](../engine/reference/commandline/info.md) + - [Inspecting a Docker daemon's labels](../engine/reference/commandline/system_info.md) - Volumes diff --git a/content/desktop/backup-and-restore.md b/content/desktop/backup-and-restore.md index 8d1d0d67a..b47207061 100644 --- a/content/desktop/backup-and-restore.md +++ b/content/desktop/backup-and-restore.md @@ -14,7 +14,7 @@ computer, for example. ## Save your data -1. Commit your containers to an image with [`docker container commit`](../engine/reference/commandline/commit.md). +1. Commit your containers to an image with [`docker container commit`](../engine/reference/commandline/container_commit.md). Committing a container stores the container filesystem changes and some of the container's configuration, for example labels and environment-variables, as a local image. Be aware that environment variables may contain sensitive @@ -26,13 +26,13 @@ computer, for example. If you used a [named volume](../storage/index.md#more-details-about-mount-types) to store container data, such as databases, refer to the [back up, restore, or migrate data volumes](../storage/volumes.md#back-up-restore-or-migrate-data-volumes) page in the storage section. -2. Use [`docker push`](../engine/reference/commandline/push.md) to push any +2. Use [`docker push`](../engine/reference/commandline/image_push.md) to push any images you have built locally and want to keep to the [Docker Hub registry](../docker-hub/index.md). Make sure to configure the [repository's visibility as "private"](../docker-hub/repos/index.md) for images that should not be publicly accessible. - Alternatively, use [`docker image save -o images.tar image1 [image2 ...]`](../engine/reference/commandline/save.md) + Alternatively, use [`docker image save -o images.tar image1 [image2 ...]`](../engine/reference/commandline/image_save.md) to save any images you want to keep to a local tar file. After backing up your data, you can uninstall the current version of Docker Desktop @@ -40,13 +40,13 @@ and [install a different version](release-notes.md) or reset Docker Desktop to f ## Restore your data -1. Use [`docker pull`](../engine/reference/commandline/pull.md) to restore images +1. Use [`docker pull`](../engine/reference/commandline/image_pull.md) to restore images you pushed to Docker Hub. - If you backed up your images to a local tar file, use [`docker image load -i images.tar`](../engine/reference/commandline/load.md) + If you backed up your images to a local tar file, use [`docker image load -i images.tar`](../engine/reference/commandline/image_load.md) to restore previously saved images. -2. Re-create your containers if needed, using [`docker run`](../engine/reference/commandline/run.md), +2. Re-create your containers if needed, using [`docker run`](../engine/reference/commandline/container_run.md), or [Docker Compose](../compose/index.md). -Refer to the [backup, restore, or migrate data volumes](../storage/volumes.md#back-up-restore-or-migrate-data-volumes) page in the storage section to restore volume data. \ No newline at end of file +Refer to the [backup, restore, or migrate data volumes](../storage/volumes.md#back-up-restore-or-migrate-data-volumes) page in the storage section to restore volume data. diff --git a/content/desktop/hardened-desktop/settings-management/configure.md b/content/desktop/hardened-desktop/settings-management/configure.md index eaadd1a32..90a83f6d0 100644 --- a/content/desktop/hardened-desktop/settings-management/configure.md +++ b/content/desktop/hardened-desktop/settings-management/configure.md @@ -150,7 +150,7 @@ The following `admin-settings.json` code and table provides an example of the re |`scout`|| Setting `useBackgroundIndexing` to `false` disables automatic indexing of images loaded to the image store. Setting `sbomIndexing` to `false` prevents the manual indexing triggered by inspecting an image in Docker Desktop.

**Note**: Users can still use the `docker scout` CLI commands to index images, even if indexing is disabled in Settings Management. | | `allowExperimentalFeatures`| | If `value` is set to `false`, experimental features are disabled.| | `allowBetaFeatures`| | If `value` is set to `false`, beta features are disabled.| -| `blockDockerLoad` | | If `value` is set to `true`, users are no longer able to run [`docker load`](../../../engine/reference/commandline/load.md) and receive an error if they try to.| +| `blockDockerLoad` | | If `value` is set to `true`, users are no longer able to run [`docker load`](../../../engine/reference/commandline/image_load.md) and receive an error if they try to.| ### Step three: Re-launch Docker Desktop >**Note** diff --git a/content/desktop/networking.md b/content/desktop/networking.md index 9738ff10a..4274712ed 100644 --- a/content/desktop/networking.md +++ b/content/desktop/networking.md @@ -162,5 +162,5 @@ container to random ports on the host. $ docker run -d -P --name webserver nginx ``` -See the [run command](../engine/reference/commandline/run.md) for more details on -publish options used with `docker run`. \ No newline at end of file +See the [run command](../engine/reference/commandline/container_run.md) for more details on +publish options used with `docker run`. diff --git a/content/desktop/previous-versions/archive-mac.md b/content/desktop/previous-versions/archive-mac.md index 206acf16f..eac97cfdd 100644 --- a/content/desktop/previous-versions/archive-mac.md +++ b/content/desktop/previous-versions/archive-mac.md @@ -35,7 +35,7 @@ This page contains release notes for older versions of Docker Desktop for Mac. - Re-enable raw as the default disk format for users running macOS 10.13.4 and higher. Note this change only takes effect after a "reset to factory defaults" or "remove all data" (from the Whale menu -> Preferences -> Reset). Related to [docker/for-mac#2625](https://github.com/docker/for-mac/issues/2625) * Bug fixes and minor changes - - AUFS storage driver is deprecated in Docker Desktop and AUFS support will be removed in the next major release. You can continue with AUFS in Docker Desktop 18.06.x, but you will need to reset disk image (in Preferences > Reset menu) before updating to the next major update. You can check documentation to [save images](/engine/reference/commandline/save/#examples) and [back up volumes](../../storage/volumes.md#back-up-restore-or-migrate-data-volumes) + - AUFS storage driver is deprecated in Docker Desktop and AUFS support will be removed in the next major release. You can continue with AUFS in Docker Desktop 18.06.x, but you will need to reset disk image (in Preferences > Reset menu) before updating to the next major update. You can check documentation to [save images](/engine/reference/commandline/image_save/#examples) and [back up volumes](../../storage/volumes.md#back-up-restore-or-migrate-data-volumes) - OS X El Captain 10.11 is deprecated in Docker Desktop. You will not be able to install updates after Docker Desktop 18.06.x. We recommend upgrading to the latest version of macOS. - Fix bug which would cause VM logs to be written to RAM rather than disk in some cases, and the VM to hang. See [docker/for-mac#2984](https://github.com/docker/for-mac/issues/2984) - Fix network connection leak triggered by haproxy TCP health-checks [docker/for-mac#1132](https://github.com/docker/for-mac/issues/1132) @@ -562,4 +562,4 @@ events or unexpected unmounts. * Docker 1.12.0 * Docker Machine 0.8.0 -* Docker Compose 1.8.0 \ No newline at end of file +* Docker Compose 1.8.0 diff --git a/content/desktop/previous-versions/archive-windows.md b/content/desktop/previous-versions/archive-windows.md index 5bc9ff2aa..792c5d3a7 100644 --- a/content/desktop/previous-versions/archive-windows.md +++ b/content/desktop/previous-versions/archive-windows.md @@ -49,7 +49,7 @@ This page contains release notes for older versions of Docker Desktop for Window - Kubernetes Support. You can now run a single-node Kubernetes cluster from the "Kubernetes" Pane in Docker for Windows settings and use kubectl commands as well as Docker commands. See [the Kubernetes section](../kubernetes.md) * Bug fixes and minor changes - - AUFS storage driver is deprecated in Docker Desktop and AUFS support will be removed in the next major release. You can continue with AUFS in Docker Desktop 18.06.x, but you will need to reset the disk image (in Settings > Reset menu) before updating to the next major update. You can check documentation to [save images](/engine/reference/commandline/save/#examples) and [backup volumes](../../storage/volumes.md#back-up-restore-or-migrate-data-volumes) + - AUFS storage driver is deprecated in Docker Desktop and AUFS support will be removed in the next major release. You can continue with AUFS in Docker Desktop 18.06.x, but you will need to reset the disk image (in Settings > Reset menu) before updating to the next major update. You can check documentation to [save images](/engine/reference/commandline/image_save/#examples) and [backup volumes](../../storage/volumes.md#back-up-restore-or-migrate-data-volumes) - Fix bug which would in some cases cause virtual machine logs to be written to RAM rather than disk, and the virtual machine to hang. - Fix security issue with named pipe connection to docker service. - Fix VPNKit memory leak. Fixes [docker/for-win#2087](https://github.com/docker/for-win/issues/2087), [moby/vpnkit#371](https://github.com/moby/vpnkit/issues/371) @@ -527,4 +527,4 @@ We did not distribute a 1.12.4 stable release * Docker 1.12.0 * Docker Machine 0.8.0 -* Docker Compose 1.8.0 \ No newline at end of file +* Docker Compose 1.8.0 diff --git a/content/desktop/previous-versions/edge-releases-mac.md b/content/desktop/previous-versions/edge-releases-mac.md index 342080622..7379be6ec 100644 --- a/content/desktop/previous-versions/edge-releases-mac.md +++ b/content/desktop/previous-versions/edge-releases-mac.md @@ -732,7 +732,7 @@ This release contains a Kubernetes upgrade. Note that your local Kubernetes clus - Add an experimental SOCKS server to allow access to container networks, see [docker/for-mac#2670](https://github.com/docker/for-mac/issues/2670#issuecomment-372365274). Also see [docker/for-mac#2721](https://github.com/docker/for-mac/issues/2721) * Bug fixes and minor changes - - AUFS storage driver is deprecated in Docker Desktop and AUFS support will be removed in the next major release. You can continue with AUFS in Docker Desktop 18.06.x, but you will need to reset disk image (in Preferences > Reset menu) before updating to the next major update. You can check documentation to [save images](/engine/reference/commandline/save/#examples) and [back up volumes](../../storage/volumes.md#back-up-restore-or-migrate-data-volumes) + - AUFS storage driver is deprecated in Docker Desktop and AUFS support will be removed in the next major release. You can continue with AUFS in Docker Desktop 18.06.x, but you will need to reset disk image (in Preferences > Reset menu) before updating to the next major update. You can check documentation to [save images](/engine/reference/commandline/image_save/#examples) and [back up volumes](../../storage/volumes.md#back-up-restore-or-migrate-data-volumes) - Fix startup issue with AUFS [docker/for-mac#2804](https://github.com/docker/for-mac/issues/2804) - Fix status bug which could prevent the Kubernetes cluster from starting. Fixes [docker/for-mac#2990](https://github.com/docker/for-mac/issues/2990) - Fix bug which would cause virtual machine logs to be written to RAM rather than disk in some cases, and the virtual machine to hang. See [docker/for-mac#2984](https://github.com/docker/for-mac/issues/2984) @@ -2441,4 +2441,4 @@ work yet. - Fixed setting hostname -- Fixed permissions on `usr/local` symbolic links \ No newline at end of file +- Fixed permissions on `usr/local` symbolic links diff --git a/content/desktop/previous-versions/edge-releases-windows.md b/content/desktop/previous-versions/edge-releases-windows.md index ffcd5e046..eac960beb 100644 --- a/content/desktop/previous-versions/edge-releases-windows.md +++ b/content/desktop/previous-versions/edge-releases-windows.md @@ -953,7 +953,7 @@ This release contains a Kubernetes upgrade. Note that your local Kubernetes clus - New Kubernetes menu item allowing to switch Kubernetes context & connect to clusters other than the local one. * Bug fixes and minor changes - - AUFS storage driver is deprecated in Docker Desktop and AUFS support will be removed in the next major release. You can continue with AUFS in Docker Desktop 18.06.x, but you will need to reset disk image (in Settings > Reset menu) before updating to the next major update. You can check documentation to [save images](/engine/reference/commandline/save/#examples) and [back up volumes](../../storage/volumes.md#back-up-restore-or-migrate-data-volumes) + - AUFS storage driver is deprecated in Docker Desktop and AUFS support will be removed in the next major release. You can continue with AUFS in Docker Desktop 18.06.x, but you will need to reset disk image (in Settings > Reset menu) before updating to the next major update. You can check documentation to [save images](/engine/reference/commandline/image_save/#examples) and [back up volumes](../../storage/volumes.md#back-up-restore-or-migrate-data-volumes) - Fix startup issue with AUFS - Fix status bug which could prevent the kubernetes cluster from starting. - Fix bug which would cause VM logs to be written to RAM rather than disk in some cases, and the VM to hang. @@ -2602,4 +2602,4 @@ are working on a solution. **Networking** - - live debugging Node.js application \ No newline at end of file + - live debugging Node.js application diff --git a/content/desktop/release-notes.md b/content/desktop/release-notes.md index 470cda722..3a6be4a31 100644 --- a/content/desktop/release-notes.md +++ b/content/desktop/release-notes.md @@ -1897,7 +1897,7 @@ CVE-2021-44228](https://www.docker.com/blog/apache-log4j-2-cve-2021-44228/). Docker Dashboard incorrectly displays the container memory usage as zero on Hyper-V based machines. -You can use the [`docker stats`](../engine/reference/commandline/stats.md) +You can use the [`docker stats`](../engine/reference/commandline/container_stats.md) command on the command line as a workaround to view the actual memory usage. See [docker/for-mac#6076](https://github.com/docker/for-mac/issues/6076). diff --git a/content/develop/develop-images/instructions.md b/content/develop/develop-images/instructions.md index ebbcf337b..79c43aad5 100644 --- a/content/develop/develop-images/instructions.md +++ b/content/develop/develop-images/instructions.md @@ -330,7 +330,7 @@ as part of your build. `ADD` is better than manually adding files using something like `wget` and `tar`, because it ensures a more precise build cache. `ADD` also has built-in support for checksum validation of the remote resources, and a protocol for parsing branches, tags, and subdirectories from -[Git URLs](../../engine/reference/commandline/build.md#git-repositories). +[Git URLs](../../engine/reference/commandline/image_build.md#git-repositories). The following example uses `ADD` to download a .NET installer. Combined with multi-stage builds, only the .NET runtime remains in the final stage, no diff --git a/content/docker-hub/builds/advanced.md b/content/docker-hub/builds/advanced.md index d1b42daa8..d6e540775 100644 --- a/content/docker-hub/builds/advanced.md +++ b/content/docker-hub/builds/advanced.md @@ -115,11 +115,11 @@ $ docker build --build-arg CUSTOM=$VAR -f $DOCKERFILE_PATH -t $IMAGE_NAME . > **Important** > -> A `hooks/build` file overrides the basic [docker build](../../engine/reference/commandline/build.md) command used by the builder, so you must include a similar build command in the hook or +> A `hooks/build` file overrides the basic [docker build](../../engine/reference/commandline/image_build.md) command used by the builder, so you must include a similar build command in the hook or the automated build fails. { .important } -Refer to the [docker build documentation](../../engine/reference/commandline/build.md#build-arg) +Refer to the [docker build documentation](../../engine/reference/commandline/image_build.md#build-arg) to learn more about Docker build-time variables. #### Push to multiple repositories @@ -157,4 +157,4 @@ you do one of the following: ```console $ git fetch --unshallow origin - ``` \ No newline at end of file + ``` diff --git a/content/engine/api/_index.md b/content/engine/api/_index.md index c5eb1b38b..c00824755 100644 --- a/content/engine/api/_index.md +++ b/content/engine/api/_index.md @@ -151,14 +151,12 @@ You can specify the API version to use in any of the following ways: | 1.13.1 | [1.26](/engine/api/v1.26/) | [changes](/engine/api/version-history/#v126-api-changes) | | 1.13 | [1.25](/engine/api/v1.26/) | [changes](/engine/api/version-history/#v125-api-changes) | | 1.12 | [1.24](/engine/api/v1.24/) | [changes](/engine/api/version-history/#v124-api-changes) | -| 1.11 | [1.23](/engine/api/v1.23/) | [changes](/engine/api/version-history/#v123-api-changes) | -| 1.10 | [1.22](/engine/api/v1.22/) | [changes](/engine/api/version-history/#v122-api-changes) | -| 1.9 | [1.21](/engine/api/v1.21/) | [changes](/engine/api/version-history/#v121-api-changes) | -| 1.8 | [1.20](/engine/api/v1.20/) | [changes](/engine/api/version-history/#v120-api-changes) | -| 1.7 | [1.19](/engine/api/v1.19/) | [changes](/engine/api/version-history/#v119-api-changes) | -| 1.6 | [1.18](/engine/api/v1.18/) | [changes](/engine/api/version-history/#v118-api-changes) | -### Archived API versions +### Deprecated API versions -You can find archived documentation for older versions of the API -in the [Docker code repository on GitHub](https://github.com/moby/moby/tree/v1.9.1/docs/reference/api) \ No newline at end of file +API versions before v1.24 are [deprecated](/engine/deprecated/#deprecate-legacy-api-versions). +You can find archived documentation for deprecated versions of the API in the +code repository on GitHub: + +- [Documentation for API versions 1.23 and before](https://github.com/moby/moby/tree/v24.0.7/docs/api). +- [Documentation for API versions 1.6 and before](https://github.com/moby/moby/tree/v1.9.1/docs/reference/api). diff --git a/content/engine/api/v1.44.md b/content/engine/api/v1.44.md new file mode 100644 index 000000000..caa6d1129 --- /dev/null +++ b/content/engine/api/v1.44.md @@ -0,0 +1,3 @@ +--- +layout: engine-api +--- \ No newline at end of file diff --git a/content/engine/reference/commandline/attach.md b/content/engine/reference/commandline/attach.md deleted file mode 100644 index e41a6a6b0..000000000 --- a/content/engine/reference/commandline/attach.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_attach -title: docker attach -aliases: -- /edge/engine/reference/commandline/attach/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/build.md b/content/engine/reference/commandline/build.md deleted file mode 100644 index ae89d8d42..000000000 --- a/content/engine/reference/commandline/build.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_build -title: docker build -aliases: -- /edge/engine/reference/commandline/build/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/builder_build.md b/content/engine/reference/commandline/builder_build.md deleted file mode 100644 index 38bcb0379..000000000 --- a/content/engine/reference/commandline/builder_build.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_builder_build -title: docker builder build -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/commit.md b/content/engine/reference/commandline/commit.md deleted file mode 100644 index c72d265e6..000000000 --- a/content/engine/reference/commandline/commit.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_commit -title: docker commit -aliases: -- /edge/engine/reference/commandline/commit/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/container_attach.md b/content/engine/reference/commandline/container_attach.md index e28e847b2..ef3896d12 100644 --- a/content/engine/reference/commandline/container_attach.md +++ b/content/engine/reference/commandline/container_attach.md @@ -4,6 +4,7 @@ datafile: docker_container_attach title: docker container attach aliases: - /edge/engine/reference/commandline/container_attach/ +- /engine/reference/commandline/attach/ layout: cli --- @@ -13,4 +14,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/container_commit.md b/content/engine/reference/commandline/container_commit.md index 0029f6fbb..2fa5b1551 100644 --- a/content/engine/reference/commandline/container_commit.md +++ b/content/engine/reference/commandline/container_commit.md @@ -4,6 +4,7 @@ datafile: docker_container_commit title: docker container commit aliases: - /edge/engine/reference/commandline/container_commit/ +- /engine/reference/commandline/commit/ layout: cli --- @@ -13,4 +14,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/container_cp.md b/content/engine/reference/commandline/container_cp.md index 92adc074a..94c595b5e 100644 --- a/content/engine/reference/commandline/container_cp.md +++ b/content/engine/reference/commandline/container_cp.md @@ -4,6 +4,7 @@ datafile: docker_container_cp title: docker container cp aliases: - /edge/engine/reference/commandline/container_cp/ +- /engine/reference/commandline/cp/ layout: cli --- @@ -13,4 +14,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/container_create.md b/content/engine/reference/commandline/container_create.md index be14d03a5..72bf509ee 100644 --- a/content/engine/reference/commandline/container_create.md +++ b/content/engine/reference/commandline/container_create.md @@ -4,6 +4,7 @@ datafile: docker_container_create title: docker container create aliases: - /edge/engine/reference/commandline/container_create/ +- /engine/reference/commandline/create/ layout: cli --- @@ -13,4 +14,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/container_diff.md b/content/engine/reference/commandline/container_diff.md index 9de56b116..3a93f545b 100644 --- a/content/engine/reference/commandline/container_diff.md +++ b/content/engine/reference/commandline/container_diff.md @@ -4,6 +4,7 @@ datafile: docker_container_diff title: docker container diff aliases: - /edge/engine/reference/commandline/container_diff/ +- /engine/reference/commandline/diff/ layout: cli --- @@ -13,4 +14,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/container_exec.md b/content/engine/reference/commandline/container_exec.md index 17c79f2e9..c62528582 100644 --- a/content/engine/reference/commandline/container_exec.md +++ b/content/engine/reference/commandline/container_exec.md @@ -1,9 +1,11 @@ --- datafolder: engine-cli datafile: docker_container_exec +linkTitle: docker exec title: docker container exec aliases: - /edge/engine/reference/commandline/container_exec/ +- /engine/reference/commandline/exec/ layout: cli --- @@ -13,4 +15,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/container_export.md b/content/engine/reference/commandline/container_export.md index f375ab873..874c09007 100644 --- a/content/engine/reference/commandline/container_export.md +++ b/content/engine/reference/commandline/container_export.md @@ -4,6 +4,7 @@ datafile: docker_container_export title: docker container export aliases: - /edge/engine/reference/commandline/container_export/ +- /engine/reference/commandline/export/ layout: cli --- @@ -13,4 +14,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/container_kill.md b/content/engine/reference/commandline/container_kill.md index 3087bdb89..e1cd73c8f 100644 --- a/content/engine/reference/commandline/container_kill.md +++ b/content/engine/reference/commandline/container_kill.md @@ -4,6 +4,7 @@ datafile: docker_container_kill title: docker container kill aliases: - /edge/engine/reference/commandline/container_kill/ +- /engine/reference/commandline/kill/ layout: cli --- @@ -13,4 +14,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/container_logs.md b/content/engine/reference/commandline/container_logs.md index d1e782835..577273f09 100644 --- a/content/engine/reference/commandline/container_logs.md +++ b/content/engine/reference/commandline/container_logs.md @@ -4,6 +4,7 @@ datafile: docker_container_logs title: docker container logs aliases: - /edge/engine/reference/commandline/container_logs/ +- /engine/reference/commandline/logs/ layout: cli --- @@ -13,4 +14,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/container_ls.md b/content/engine/reference/commandline/container_ls.md index b95d003b4..789bb9947 100644 --- a/content/engine/reference/commandline/container_ls.md +++ b/content/engine/reference/commandline/container_ls.md @@ -1,9 +1,11 @@ --- datafolder: engine-cli datafile: docker_container_ls +linkTitle: docker ps title: docker container ls aliases: - /edge/engine/reference/commandline/container_ls/ +- /engine/reference/commandline/ps/ layout: cli --- @@ -13,4 +15,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/container_pause.md b/content/engine/reference/commandline/container_pause.md index 7bda777dd..a0f997e92 100644 --- a/content/engine/reference/commandline/container_pause.md +++ b/content/engine/reference/commandline/container_pause.md @@ -4,6 +4,7 @@ datafile: docker_container_pause title: docker container pause aliases: - /edge/engine/reference/commandline/container_pause/ +- /engine/reference/commandline/pause/ layout: cli --- @@ -13,4 +14,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/container_port.md b/content/engine/reference/commandline/container_port.md index a5f163858..a1dd29b26 100644 --- a/content/engine/reference/commandline/container_port.md +++ b/content/engine/reference/commandline/container_port.md @@ -4,6 +4,7 @@ datafile: docker_container_port title: docker container port aliases: - /edge/engine/reference/commandline/container_port/ +- /engine/reference/commandline/port/ layout: cli --- @@ -13,4 +14,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/container_rename.md b/content/engine/reference/commandline/container_rename.md index 595c5fdf9..9b724095b 100644 --- a/content/engine/reference/commandline/container_rename.md +++ b/content/engine/reference/commandline/container_rename.md @@ -4,6 +4,7 @@ datafile: docker_container_rename title: docker container rename aliases: - /edge/engine/reference/commandline/container_rename/ +- /engine/reference/commandline/rename/ layout: cli --- @@ -13,4 +14,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/container_restart.md b/content/engine/reference/commandline/container_restart.md index af3d44c6a..ff2888b41 100644 --- a/content/engine/reference/commandline/container_restart.md +++ b/content/engine/reference/commandline/container_restart.md @@ -4,6 +4,7 @@ datafile: docker_container_restart title: docker container restart aliases: - /edge/engine/reference/commandline/container_restart/ +- /engine/reference/commandline/restart/ layout: cli --- @@ -13,4 +14,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/container_rm.md b/content/engine/reference/commandline/container_rm.md index fad62d892..e9b5b968e 100644 --- a/content/engine/reference/commandline/container_rm.md +++ b/content/engine/reference/commandline/container_rm.md @@ -4,6 +4,7 @@ datafile: docker_container_rm title: docker container rm aliases: - /edge/engine/reference/commandline/container_rm/ +- /engine/reference/commandline/rm/ layout: cli --- @@ -13,4 +14,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/container_run.md b/content/engine/reference/commandline/container_run.md index 7ee1d5bf0..5fc495711 100644 --- a/content/engine/reference/commandline/container_run.md +++ b/content/engine/reference/commandline/container_run.md @@ -1,9 +1,11 @@ --- datafolder: engine-cli datafile: docker_container_run +linkTitle: docker run title: docker container run aliases: - /edge/engine/reference/commandline/container_rm/ +- /engine/reference/commandline/run/ layout: cli --- @@ -13,4 +15,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/container_start.md b/content/engine/reference/commandline/container_start.md index 42a98a823..c9155b0fb 100644 --- a/content/engine/reference/commandline/container_start.md +++ b/content/engine/reference/commandline/container_start.md @@ -4,6 +4,7 @@ datafile: docker_container_start title: docker container start aliases: - /edge/engine/reference/commandline/container_start/ +- /engine/reference/commandline/start/ layout: cli --- @@ -13,4 +14,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/container_stats.md b/content/engine/reference/commandline/container_stats.md index e59197cce..7e81bcaef 100644 --- a/content/engine/reference/commandline/container_stats.md +++ b/content/engine/reference/commandline/container_stats.md @@ -4,6 +4,7 @@ datafile: docker_container_stats title: docker container stats aliases: - /edge/engine/reference/commandline/container_stats/ +- /engine/reference/commandline/stats/ layout: cli --- @@ -13,4 +14,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/container_stop.md b/content/engine/reference/commandline/container_stop.md index b14eb6406..1f6aca663 100644 --- a/content/engine/reference/commandline/container_stop.md +++ b/content/engine/reference/commandline/container_stop.md @@ -4,6 +4,7 @@ datafile: docker_container_stop title: docker container stop aliases: - /edge/engine/reference/commandline/container_stop/ +- /engine/reference/commandline/stop/ layout: cli --- @@ -13,4 +14,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/container_top.md b/content/engine/reference/commandline/container_top.md index 6fc216518..5e2bbea95 100644 --- a/content/engine/reference/commandline/container_top.md +++ b/content/engine/reference/commandline/container_top.md @@ -4,6 +4,7 @@ datafile: docker_container_top title: docker container top aliases: - /edge/engine/reference/commandline/container_top/ +- /engine/reference/commandline/top/ layout: cli --- @@ -13,4 +14,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/container_unpause.md b/content/engine/reference/commandline/container_unpause.md index 7b050663b..cd6352fd7 100644 --- a/content/engine/reference/commandline/container_unpause.md +++ b/content/engine/reference/commandline/container_unpause.md @@ -4,6 +4,7 @@ datafile: docker_container_unpause title: docker container unpause aliases: - /edge/engine/reference/commandline/container_unpause/ +- /engine/reference/commandline/unpause/ layout: cli --- @@ -13,4 +14,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/container_update.md b/content/engine/reference/commandline/container_update.md index 29f2cc481..f968d108b 100644 --- a/content/engine/reference/commandline/container_update.md +++ b/content/engine/reference/commandline/container_update.md @@ -4,6 +4,7 @@ datafile: docker_container_update title: docker container update aliases: - /edge/engine/reference/commandline/container_update/ +- /engine/reference/commandline/update/ layout: cli --- @@ -13,4 +14,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/container_wait.md b/content/engine/reference/commandline/container_wait.md index 760cdfa70..bb7f234ad 100644 --- a/content/engine/reference/commandline/container_wait.md +++ b/content/engine/reference/commandline/container_wait.md @@ -4,6 +4,7 @@ datafile: docker_container_wait title: docker container wait aliases: - /edge/engine/reference/commandline/container_wait/ +- /engine/reference/commandline/wait/ layout: cli --- @@ -13,4 +14,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/cp.md b/content/engine/reference/commandline/cp.md deleted file mode 100644 index cc24a9339..000000000 --- a/content/engine/reference/commandline/cp.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_cp -title: docker cp -aliases: -- /edge/engine/reference/commandline/cp/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/create.md b/content/engine/reference/commandline/create.md deleted file mode 100644 index 05134539f..000000000 --- a/content/engine/reference/commandline/create.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_create -title: docker create -aliases: -- /edge/engine/reference/commandline/create/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/diff.md b/content/engine/reference/commandline/diff.md deleted file mode 100644 index a3d8caffb..000000000 --- a/content/engine/reference/commandline/diff.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_diff -title: docker diff -aliases: -- /edge/engine/reference/commandline/diff/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/events.md b/content/engine/reference/commandline/events.md deleted file mode 100644 index 87cee646b..000000000 --- a/content/engine/reference/commandline/events.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_events -title: docker events -aliases: -- /edge/engine/reference/commandline/events/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/exec.md b/content/engine/reference/commandline/exec.md deleted file mode 100644 index 6467d4d67..000000000 --- a/content/engine/reference/commandline/exec.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_exec -title: docker exec -aliases: -- /edge/engine/reference/commandline/exec/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/export.md b/content/engine/reference/commandline/export.md deleted file mode 100644 index 0311afa1f..000000000 --- a/content/engine/reference/commandline/export.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_export -title: docker export -aliases: -- /edge/engine/reference/commandline/export/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/history.md b/content/engine/reference/commandline/history.md deleted file mode 100644 index ead222e92..000000000 --- a/content/engine/reference/commandline/history.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_history -title: docker history -aliases: -- /edge/engine/reference/commandline/history/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/image_build.md b/content/engine/reference/commandline/image_build.md index b0931baef..50d17e5f9 100644 --- a/content/engine/reference/commandline/image_build.md +++ b/content/engine/reference/commandline/image_build.md @@ -1,9 +1,12 @@ --- datafolder: engine-cli datafile: docker_image_build +linkTitle: docker build title: docker image build aliases: - /edge/engine/reference/commandline/image_build/ +- /engine/reference/commandline/build/ +- /engine/reference/commandline/builder_build/ layout: cli --- @@ -13,4 +16,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/image_history.md b/content/engine/reference/commandline/image_history.md index 8005a3e06..cbdc8a005 100644 --- a/content/engine/reference/commandline/image_history.md +++ b/content/engine/reference/commandline/image_history.md @@ -4,6 +4,7 @@ datafile: docker_image_history title: docker image history aliases: - /edge/engine/reference/commandline/image_history/ +- /engine/reference/commandline/history/ layout: cli --- @@ -13,4 +14,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/image_import.md b/content/engine/reference/commandline/image_import.md index 4675b4dbe..cc5fc4cef 100644 --- a/content/engine/reference/commandline/image_import.md +++ b/content/engine/reference/commandline/image_import.md @@ -4,6 +4,7 @@ datafile: docker_image_import title: docker image import aliases: - /edge/engine/reference/commandline/image_import/ +- /engine/reference/commandline/import/ layout: cli --- @@ -13,4 +14,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/image_load.md b/content/engine/reference/commandline/image_load.md index 116c20a47..df77686c3 100644 --- a/content/engine/reference/commandline/image_load.md +++ b/content/engine/reference/commandline/image_load.md @@ -4,6 +4,7 @@ datafile: docker_image_load title: docker image load aliases: - /edge/engine/reference/commandline/image_load/ +- /engine/reference/commandline/load/ layout: cli --- @@ -13,4 +14,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/image_ls.md b/content/engine/reference/commandline/image_ls.md index 8a32a287c..8596dac16 100644 --- a/content/engine/reference/commandline/image_ls.md +++ b/content/engine/reference/commandline/image_ls.md @@ -1,9 +1,11 @@ --- datafolder: engine-cli datafile: docker_image_ls +linkTitle: docker images title: docker image ls aliases: - /edge/engine/reference/commandline/image_ls/ +- /engine/reference/commandline/images/ layout: cli --- @@ -13,4 +15,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/image_pull.md b/content/engine/reference/commandline/image_pull.md index 23b0988d2..135fe6156 100644 --- a/content/engine/reference/commandline/image_pull.md +++ b/content/engine/reference/commandline/image_pull.md @@ -1,9 +1,11 @@ --- datafolder: engine-cli datafile: docker_image_pull +linkTitle: docker pull title: docker image pull aliases: - /edge/engine/reference/commandline/image_pull/ +- /engine/reference/commandline/pull/ layout: cli --- @@ -13,4 +15,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/image_push.md b/content/engine/reference/commandline/image_push.md index a7a3045a7..c61f1953f 100644 --- a/content/engine/reference/commandline/image_push.md +++ b/content/engine/reference/commandline/image_push.md @@ -1,9 +1,11 @@ --- datafolder: engine-cli datafile: docker_image_push +linkTitle: docker push title: docker image push aliases: - /edge/engine/reference/commandline/image_push/ +- /engine/reference/commandline/push/ layout: cli --- @@ -13,4 +15,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/image_rm.md b/content/engine/reference/commandline/image_rm.md index 3117c9c4b..758d8631e 100644 --- a/content/engine/reference/commandline/image_rm.md +++ b/content/engine/reference/commandline/image_rm.md @@ -4,6 +4,7 @@ datafile: docker_image_rm title: docker image rm aliases: - /edge/engine/reference/commandline/image_rm/ +- /engine/reference/commandline/rmi/ layout: cli --- @@ -13,4 +14,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/image_save.md b/content/engine/reference/commandline/image_save.md index 9ea22d161..fe37e0664 100644 --- a/content/engine/reference/commandline/image_save.md +++ b/content/engine/reference/commandline/image_save.md @@ -4,6 +4,7 @@ datafile: docker_image_save title: docker image save aliases: - /edge/engine/reference/commandline/image_save/ +- /engine/reference/commandline/save/ layout: cli --- @@ -13,4 +14,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/image_tag.md b/content/engine/reference/commandline/image_tag.md index f5a6a6a4f..a3c222634 100644 --- a/content/engine/reference/commandline/image_tag.md +++ b/content/engine/reference/commandline/image_tag.md @@ -4,6 +4,7 @@ datafile: docker_image_tag title: docker image tag aliases: - /edge/engine/reference/commandline/image_tag/ +- /engine/reference/commandline/tag/ layout: cli --- @@ -13,4 +14,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/images.md b/content/engine/reference/commandline/images.md deleted file mode 100644 index 87afebbf0..000000000 --- a/content/engine/reference/commandline/images.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_images -title: docker images -aliases: -- /edge/engine/reference/commandline/images/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/import.md b/content/engine/reference/commandline/import.md deleted file mode 100644 index f60eef87e..000000000 --- a/content/engine/reference/commandline/import.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_import -title: docker import -aliases: -- /edge/engine/reference/commandline/import/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/info.md b/content/engine/reference/commandline/info.md deleted file mode 100644 index ea2fc28fa..000000000 --- a/content/engine/reference/commandline/info.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_info -title: docker info -aliases: -- /edge/engine/reference/commandline/info/ -layout: cli ---- - - - - -## Warnings about kernel support - -If your operating system does not enable certain capabilities, you may see -warnings such as one of the following, when you run `docker info`: - -```none -WARNING: Your kernel does not support swap limit capabilities. Limitation discarded. -``` - -```none -WARNING: No swap limit support -``` - -You can ignore these warnings unless you actually need the ability to -[limit these resources](../../../config/containers/resource_constraints.md), in which case you -should consult your operating system's documentation for enabling them. \ No newline at end of file diff --git a/content/engine/reference/commandline/kill.md b/content/engine/reference/commandline/kill.md deleted file mode 100644 index a25c02aa3..000000000 --- a/content/engine/reference/commandline/kill.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_kill -title: docker kill -aliases: -- /edge/engine/reference/commandline/kill/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/load.md b/content/engine/reference/commandline/load.md deleted file mode 100644 index 1416f6f70..000000000 --- a/content/engine/reference/commandline/load.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_load -title: docker load -aliases: -- /edge/engine/reference/commandline/load/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/logs.md b/content/engine/reference/commandline/logs.md deleted file mode 100644 index 977d637b1..000000000 --- a/content/engine/reference/commandline/logs.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_logs -title: docker logs -aliases: -- /edge/engine/reference/commandline/logs/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/pause.md b/content/engine/reference/commandline/pause.md deleted file mode 100644 index cb9634c94..000000000 --- a/content/engine/reference/commandline/pause.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_pause -title: docker pause -aliases: -- /edge/engine/reference/commandline/pause/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/port.md b/content/engine/reference/commandline/port.md deleted file mode 100644 index c80015207..000000000 --- a/content/engine/reference/commandline/port.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_port -title: docker port -aliases: -- /edge/engine/reference/commandline/port/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/ps.md b/content/engine/reference/commandline/ps.md deleted file mode 100644 index 448709550..000000000 --- a/content/engine/reference/commandline/ps.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_ps -title: docker ps -aliases: -- /edge/engine/reference/commandline/ps/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/pull.md b/content/engine/reference/commandline/pull.md deleted file mode 100644 index 8e85769af..000000000 --- a/content/engine/reference/commandline/pull.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_pull -title: docker pull -aliases: -- /edge/engine/reference/commandline/pull/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/push.md b/content/engine/reference/commandline/push.md deleted file mode 100644 index 1d61e207f..000000000 --- a/content/engine/reference/commandline/push.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_push -title: docker push -aliases: -- /edge/engine/reference/commandline/push/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/rename.md b/content/engine/reference/commandline/rename.md deleted file mode 100644 index 833f93ba1..000000000 --- a/content/engine/reference/commandline/rename.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_rename -title: docker rename -aliases: -- /edge/engine/reference/commandline/rename/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/restart.md b/content/engine/reference/commandline/restart.md deleted file mode 100644 index 5f5e7fa2f..000000000 --- a/content/engine/reference/commandline/restart.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_restart -title: docker restart -aliases: -- /edge/engine/reference/commandline/restart/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/rm.md b/content/engine/reference/commandline/rm.md deleted file mode 100644 index f67112d52..000000000 --- a/content/engine/reference/commandline/rm.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_rm -title: docker rm -aliases: -- /edge/engine/reference/commandline/rm/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/rmi.md b/content/engine/reference/commandline/rmi.md deleted file mode 100644 index 6f00403d7..000000000 --- a/content/engine/reference/commandline/rmi.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_rmi -title: docker rmi -aliases: -- /edge/engine/reference/commandline/rmi/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/run.md b/content/engine/reference/commandline/run.md deleted file mode 100644 index c8b611c38..000000000 --- a/content/engine/reference/commandline/run.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_run -title: docker run -description: Learn all there is to know about the docker run command and how to use - it in the Docker CLI. -aliases: -- /reference/run/ -- /edge/engine/reference/commandline/run/ -- /reference/commandline/run/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/save.md b/content/engine/reference/commandline/save.md deleted file mode 100644 index 97c3460ef..000000000 --- a/content/engine/reference/commandline/save.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_save -title: docker save -aliases: -- /edge/engine/reference/commandline/save/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/start.md b/content/engine/reference/commandline/start.md deleted file mode 100644 index 2d74b1c6c..000000000 --- a/content/engine/reference/commandline/start.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_start -title: docker start -aliases: -- /edge/engine/reference/commandline/start/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/stats.md b/content/engine/reference/commandline/stats.md deleted file mode 100644 index a88c28cf1..000000000 --- a/content/engine/reference/commandline/stats.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_stats -title: docker stats -aliases: -- /edge/engine/reference/commandline/stats/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/stop.md b/content/engine/reference/commandline/stop.md deleted file mode 100644 index 64179894d..000000000 --- a/content/engine/reference/commandline/stop.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_stop -title: docker stop -aliases: -- /edge/engine/reference/commandline/stop/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/system_events.md b/content/engine/reference/commandline/system_events.md index a82c5c811..68025aadf 100644 --- a/content/engine/reference/commandline/system_events.md +++ b/content/engine/reference/commandline/system_events.md @@ -4,6 +4,7 @@ datafile: docker_system_events title: docker system events aliases: - /edge/engine/reference/commandline/system_events/ +- /engine/reference/commandline/events/ layout: cli --- @@ -13,4 +14,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/system_info.md b/content/engine/reference/commandline/system_info.md index 69bf76d90..f29a954b3 100644 --- a/content/engine/reference/commandline/system_info.md +++ b/content/engine/reference/commandline/system_info.md @@ -1,9 +1,11 @@ --- datafolder: engine-cli datafile: docker_system_info +linkTitle: docker info title: docker system info aliases: - /edge/engine/reference/commandline/system_info/ +- /engine/reference/commandline/info/ layout: cli --- @@ -13,4 +15,4 @@ suggest a change to the text that appears here, open a ticket or pull request in the source repository on GitHub: https://github.com/docker/cli ---> \ No newline at end of file +--> diff --git a/content/engine/reference/commandline/tag.md b/content/engine/reference/commandline/tag.md deleted file mode 100644 index dbb6b0fe2..000000000 --- a/content/engine/reference/commandline/tag.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_tag -title: docker tag -aliases: -- /edge/engine/reference/commandline/tag/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/top.md b/content/engine/reference/commandline/top.md deleted file mode 100644 index 5c7565c86..000000000 --- a/content/engine/reference/commandline/top.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_top -title: docker top -aliases: -- /edge/engine/reference/commandline/top/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/unpause.md b/content/engine/reference/commandline/unpause.md deleted file mode 100644 index 62e70dc73..000000000 --- a/content/engine/reference/commandline/unpause.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_unpause -title: docker unpause -aliases: -- /edge/engine/reference/commandline/unpause/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/update.md b/content/engine/reference/commandline/update.md deleted file mode 100644 index 2c8a71449..000000000 --- a/content/engine/reference/commandline/update.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_update -title: docker update -aliases: -- /edge/engine/reference/commandline/update/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/reference/commandline/wait.md b/content/engine/reference/commandline/wait.md deleted file mode 100644 index 9ac4b23d4..000000000 --- a/content/engine/reference/commandline/wait.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -datafolder: engine-cli -datafile: docker_wait -title: docker wait -aliases: -- /edge/engine/reference/commandline/wait/ -layout: cli ---- - - \ No newline at end of file diff --git a/content/engine/release-notes/24.0.md b/content/engine/release-notes/24.0.md index 6d4c57c11..e5fa9af82 100644 --- a/content/engine/release-notes/24.0.md +++ b/content/engine/release-notes/24.0.md @@ -5,11 +5,6 @@ description: Learn about the new features, bug fixes, and breaking changes for D keywords: docker, docker engine, ce, whats new, release notes toc_min: 1 toc_max: 2 -aliases: -- /engine/release-notes/ -- /engine/release-notes/latest/ -- /release-notes/docker-ce/ -- /release-notes/docker-engine/ --- This page describes the latest changes, additions, known issues, and fixes for Docker Engine version 24.0. diff --git a/content/engine/release-notes/25.0.md b/content/engine/release-notes/25.0.md new file mode 100644 index 000000000..59de49232 --- /dev/null +++ b/content/engine/release-notes/25.0.md @@ -0,0 +1,115 @@ +--- +title: Docker Engine 25.0 release notes +description: Learn about the new features, bug fixes, and breaking changes for Docker Engine +keywords: docker, docker engine, ce, whats new, release notes +toc_min: 1 +toc_max: 2 +skip_read_time: true +aliases: +- /engine/release-notes/ +- /engine/release-notes/latest/ +- /release-notes/docker-ce/ +- /release-notes/docker-engine/ +--- + +This page describes the latest changes, additions, known issues, and fixes for Docker Engine version 25.0. + +For more information about: + +- Deprecated and removed features, see [Deprecated Engine Features](../deprecated.md). +- Changes to the Engine API, see [Engine API version history](../api/version-history.md). + +## 25.0.0 + +{{< release-date date="2024-01-19" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 25.0.0 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A25.0.0) +- [moby/moby, 25.0.0 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A25.0.0) + +### New + +- Add OpenTelemetry tracing. [moby/moby#45652](https://github.com/moby/moby/pull/45652), [moby/moby#45579](https://github.com/moby/moby/pull/45579) +- Add support for CDI devices under Linux. [moby/moby#45134](https://github.com/moby/moby/pull/45134), [docker/cli#4510](https://github.com/docker/cli/pull/4510), [moby/moby#46004](https://github.com/moby/moby/pull/46004) +- Add an additional interval to be used by healthchecks during the container start period. [moby/moby#40894](https://github.com/moby/moby/pull/40894), [docker/cli#4405](https://github.com/docker/cli/pull/4405), [moby/moby#45965](https://github.com/moby/moby/pull/45965) +- Add a `--log-format` flag to `dockerd` to control the logging format: text (default) or JSON. [moby/moby#45737](https://github.com/moby/moby/pull/45737) +- Add support for recursive read-only mounts. [moby/moby#45278](https://github.com/moby/moby/pull/45278), [moby/moby#46037](https://github.com/moby/moby/pull/46037) +- Add support for filtering images based on timestamp with `docker image ls --filter=until=`. [moby/moby#46577](https://github.com/moby/moby/pull/46577) + +### Bug fixes and enhancements + +- API: Fix error message for invalid policies at `ValidateRestartPolicy`. [moby/moby#46352](https://github.com/moby/moby/pull/46352) +- API: Update `/info` endpoint to use singleflight. [moby/moby#45847](https://github.com/moby/moby/pull/45847) +- Add an error message for when specifying a Dockerfile filename with `-f`, and also using `stdin`. [docker/cli#4346](https://github.com/docker/cli/pull/4346) +- Add support for `mac-address` and `link-local-ip` fields in `--network` long format. [docker/cli#4419](https://github.com/docker/cli/pull/4419) +- Add support for specifying multiple `--network` flags with `docker container create` and `docker run`. [moby/moby#45906](https://github.com/moby/moby/pull/45906) +- Automatically enable IPv6 on a network when an IPv6 subnet is specified. [moby/moby#46455](https://github.com/moby/moby/pull/46455) +- Add support for overlay networks over IPv6 transport. [moby/moby#46790](https://github.com/moby/moby/pull/46790) +- Configuration reloading is now more robust: if there's an error during the configuration reload process, no configuration changes are applied. [moby/moby#43980](https://github.com/moby/moby/pull/43980) +- Live restore: Containers with auto remove (`docker run --rm`) are no longer forcibly removed on engine restart. [moby/moby#46857](https://github.com/moby/moby/pull/46857) +- Live restore: containers that are live-restored will now be given another health-check start period when the daemon restarts. [moby/moby#47051](https://github.com/moby/moby/pull/47051) +- Container health status is flushed to disk less frequently, reducing wear on flash storage. [moby/moby#47044](https://github.com/moby/moby/pull/47044) +- Ensure network names are unique. [moby/moby#46251](https://github.com/moby/moby/pull/46251) +- Ensure that overlay2 layer metadata is correct. [moby/moby#46471](https://github.com/moby/moby/pull/46471) +- Fix `Downloading` progress message on image pull. [moby/moby#46515](https://github.com/moby/moby/pull/46515) +- Fix `NetworkConnect` and `ContainerCreate` with improved data validation, and return all validation errors at once. [moby/moby#46183](https://github.com/moby/moby/pull/46183) +- Fix `com.docker.network.host_ipv4` option when IPv6 and ip6tables are enabled. [moby/moby#46446](https://github.com/moby/moby/pull/46446) +- Fix daemon's `cleanupContainer` if containerd is stopped. [moby/moby#46213](https://github.com/moby/moby/pull/46213) +- Fix returning incorrect HTTP status codes for libnetwork errors. [moby/moby#46146](https://github.com/moby/moby/pull/46146) +- Fix various issues with images/json API filters and image list. [moby/moby#46034](https://github.com/moby/moby/pull/46034) +- CIFS volumes now resolves FQDN correctly. [moby/moby#46863](https://github.com/moby/moby/pull/46863) +- Improve validation of the `userland-proxy-path` daemon configuration option. Validation now happens during daemon startup, instead of producing an error when starting a container with port-mapping. [moby/moby#47000](https://github.com/moby/moby/pull/47000) +- Set the MAC address of container's interface when network mode is a short network ID. [moby/moby#46406](https://github.com/moby/moby/pull/46406) +- Sort unconsumed build arguments before display in build output. [moby/moby#45917](https://github.com/moby/moby/pull/45917) +- The `docker image save` tarball output is now OCI compliant. [moby/moby#44598](https://github.com/moby/moby/pull/44598) +- The daemon no longer appends `ACCEPT` rules to the end of the `INPUT` iptables chain for encrypted overlay networks. Depending on firewall configuration, a rule may be needed to permit incoming encrypted overlay network traffic. [moby/moby#45280](https://github.com/moby/moby/pull/45280) +- Unpacking layers with extended attributes onto an incompatible filesystem will now fail instead of silently discarding extended attributes. [moby/moby#45464](https://github.com/moby/moby/pull/45464) +- Update daemon MTU option to BridgeConfig and display warning on Windows. [moby/moby#45887](https://github.com/moby/moby/pull/45887) +- Validate IPAM config when creating a network. Automatically fix networks created prior to this release where `--ip-range` is larger than `--subnet`. [moby/moby#45759](https://github.com/moby/moby/pull/45759) +- containerd image store: Add image events for `push`, `pull`, and `save`. [moby/moby#46405](https://github.com/moby/moby/pull/46405) +- containerd image store: Add support for pulling legacy schema1 images. [moby/moby#46513](https://github.com/moby/moby/pull/46513) +- containerd image store: Add support for pushing all tags. [moby/moby#46485](https://github.com/moby/moby/pull/46485) +- containerd image store: Add support for registry token. [moby/moby#46475](https://github.com/moby/moby/pull/46475) +- containerd image store: Add support for showing the number of containers that use an image. [moby/moby#46511](https://github.com/moby/moby/pull/46511) +- containerd image store: Fix a bug related to the `ONBUILD`, `MAINTAINER`, and `HEALTHCHECK` Dockerfile instructions. [moby/moby#46313](https://github.com/moby/moby/pull/46313) +- containerd image store: Fix `Pulling from` progress message. [moby/moby#46494](https://github.com/moby/moby/pull/46494) +- containerd image store: Add support for referencing images via the truncated ID with `sha256:` prefix. [moby/moby#46435](https://github.com/moby/moby/pull/46435) +- containerd image store: Fix `docker images` showing intermediate layers by default. [moby/moby#46423](https://github.com/moby/moby/pull/46423) +- containerd image store: Fix checking if the specified platform exists when getting an image. [moby/moby#46495](https://github.com/moby/moby/pull/46495) +- containerd image store: Fix errors when multiple `ADD` or `COPY` instructions were used with the classic builder. [moby/moby#46383](https://github.com/moby/moby/pull/46383) +- containerd image store: Fix stack overflow errors when importing an image. [moby/moby#46418](https://github.com/moby/moby/pull/46418) +- containerd image store: Improve `docker pull` progress output. [moby/moby#46412](https://github.com/moby/moby/pull/46412) +- containerd image store: Print the tag, digest, and size after pushing an image. [moby/moby#46384](https://github.com/moby/moby/pull/46384) +- containerd image store: Remove panic from `UpdateConfig`. [moby/moby#46433](https://github.com/moby/moby/pull/46433) +- containerd image store: Return an error when an image tag resembles a digest. [moby/moby#46492](https://github.com/moby/moby/pull/46492) +- containerd image store: `docker image ls` now shows the correct image creation time and date. [moby/moby#46719](https://github.com/moby/moby/pull/46719) +- containerd image store: Fix an issue handling user namespace settings. [moby/moby#46375](https://github.com/moby/moby/pull/46375) +- containerd image store: Add support for pulling all tags (`docker pull -a`). [moby/moby#46618](https://github.com/moby/moby/pull/46618) +- containerd image store: Use the domain name in the image reference as the default registry authentication domain. [moby/moby#46779](https://github.com/moby/moby/pull/46779) + +### Packaging updates + +- Upgrade API to v1.44. [moby/moby#45468](https://github.com/moby/moby/pull/45468) +- Upgrade Compose to `2.24.1`. [docker/docker-ce-packaging#980](https://github.com/docker/docker-ce-packaging/pull/980) +- Upgrade containerd to v1.7.12 (static binaries only). [moby/moby#47070](https://github.com/moby/moby/pull/47070) +- Upgrade Go runtime to [1.21.6](https://go.dev/doc/devel/release#go1.21.minor). [moby/moby#47053](https://github.com/moby/moby/pull/47053) +- Upgrade runc to v1.1.11. [moby/moby#47007](https://github.com/moby/moby/pull/47007) +- Upgrade BuildKit to v0.12.4. [moby/moby#46882](https://github.com/moby/moby/pull/46882) +- Upgrade Buildx to v0.12.1. [docker/docker-ce-packaging#979](https://github.com/docker/docker-ce-packaging/pull/979) + +### Removed + +- API: Remove VirtualSize field for the `GET /images/json` and `GET /images/{id}/json` endpoints. [moby/moby#45469](https://github.com/moby/moby/pull/45469) +- Remove deprecated `devicemapper` storage driver. [moby/moby#43637](https://github.com/moby/moby/pull/43637) +- Remove deprecated orchestrator options. [docker/cli#4366](https://github.com/docker/cli/pull/4366) +- Remove support for Debian Upstart init system. [moby/moby#45548](https://github.com/moby/moby/pull/45548), [moby/moby#45551](https://github.com/moby/moby/pull/45551) +- Remove the `--oom-score-adjust` daemon option. [moby/moby#45484](https://github.com/moby/moby/pull/45484) +- Remove warning for deprecated `~/.dockercfg` file. [docker/cli#4281](https://github.com/docker/cli/pull/4281) +- Remove `logentries` logging driver. [moby/moby#46925](https://github.com/moby/moby/pull/46925) + +### Deprecated + +- Deprecate API versions older than 1.24. [Deprecation notice](../deprecated.md#deprecate-legacy-api-versions) +- Deprecate `IsAutomated` field and `is-automated` filter for `docker search`. [Deprecation notice](../deprecated.md#isautomated-field-and-is-automated-filter-on-docker-search) +- API: Deprecate `Container` and `ContainerConfig` properties for `/images/{id}/json` (`docker image inspect`). [moby/moby#46939](https://github.com/moby/moby/pull/46939) diff --git a/content/engine/security/rootless.md b/content/engine/security/rootless.md index cf2775556..57a534299 100644 --- a/content/engine/security/rootless.md +++ b/content/engine/security/rootless.md @@ -519,7 +519,33 @@ For more information, see [Limiting resources](#limiting-resources). ### Networking errors -**`docker run -p` fails with `cannot expose privileged port`** +This section provides troubleshooting tips for networking in rootless mode. + +Networking in rootless mode is supported via network and port drivers in +RootlessKit. Network performance and characteristics depend on the combination +of network and port driver you use. If you're experiencing unexpected behavior +or performance related to networking, review the following table which shows +the configurations supported by RootlessKit, and how they compare: + +| Network driver | Port driver | Net throughput | Port throughput | Source IP propagation | No SUID | Note | +| -------------- | -------------- | -------------- | --------------- | --------------------- | ------- | ---------------------------------------------------------------------------- | +| `slirp4netns` | `builtin` | Slow | Fast ✅ | ❌ | ✅ | Default in a typical setup | +| `vpnkit` | `builtin` | Slow | Fast ✅ | ❌ | ✅ | Default when `slirp4netns` isn't installed | +| `slirp4netns` | `slirp4netns` | Slow | Slow | ✅ | ✅ | | +| `pasta` | `implicit` | Slow | Fast ✅ | ✅ | ✅ | Experimental; Needs pasta version 2023_12_04 or later | +| `lxc-user-nic` | `builtin` | Fast ✅ | Fast ✅ | ❌ | ❌ | Experimental | +| `bypass4netns` | `bypass4netns` | Fast ✅ | Fast ✅ | ✅ | ✅ | **Note:** Not integrated to RootlessKit as it needs a custom seccomp profile | + +For information about troubleshooting specific networking issues, see: + +- [`docker run -p` fails with `cannot expose privileged port`](#docker-run--p-fails-with-cannot-expose-privileged-port) +- [Ping doesn't work](#ping-doesnt-work) +- [`IPAddress` shown in `docker inspect` is unreachable](#ipaddress-shown-in-docker-inspect-is-unreachable) +- [`--net=host` doesn't listen ports on the host network namespace](#--nethost-doesnt-listen-ports-on-the-host-network-namespace) +- [Newtork is slow](#network-is-slow) +- [`docker run -p` does not propagate source IP addresses](#docker-run--p-does-not-propagate-source-ip-addresses) + +#### `docker run -p` fails with `cannot expose privileged port` `docker run -p` fails with this error when a privileged port (< 1024) is specified as the host port. @@ -536,7 +562,7 @@ $ docker run -p 8080:80 nginx:alpine To allow exposing privileged ports, see [Exposing privileged ports](#exposing-privileged-ports). -**ping doesn't work** +#### Ping doesn't work Ping does not work when `/proc/sys/net/ipv4/ping_group_range` is set to `1 0`: @@ -547,23 +573,24 @@ $ cat /proc/sys/net/ipv4/ping_group_range For details, see [Routing ping packets](#routing-ping-packets). -**`IPAddress` shown in `docker inspect` is unreachable** +#### `IPAddress` shown in `docker inspect` is unreachable This is an expected behavior, as the daemon is namespaced inside RootlessKit's network namespace. Use `docker run -p` instead. -**`--net=host` doesn't listen ports on the host network namespace** +#### `--net=host` doesn't listen ports on the host network namespace This is an expected behavior, as the daemon is namespaced inside RootlessKit's network namespace. Use `docker run -p` instead. -**Network is slow** +#### Network is slow Docker with rootless mode uses [slirp4netns](https://github.com/rootless-containers/slirp4netns) as the default network stack if slirp4netns v0.4.0 or later is installed. If slirp4netns is not installed, Docker falls back to [VPNKit](https://github.com/moby/vpnkit). - Installing slirp4netns may improve the network throughput. -See [RootlessKit documentation](https://github.com/rootless-containers/rootlesskit/tree/v0.13.0#network-drivers) for the benchmark result. + +For more information about network drivers for RootlessKit, see +[RootlessKit documentation](https://github.com/rootless-containers/rootlesskit/blob/v2.0.0/docs/network.md). Also, changing MTU value may improve the throughput. The MTU value can be specified by creating `~/.config/systemd/user/docker.service.d/override.conf` with the following content: @@ -579,25 +606,51 @@ $ systemctl --user daemon-reload $ systemctl --user restart docker ``` -**`docker run -p` does not propagate source IP addresses** +#### `docker run -p` does not propagate source IP addresses -This is because Docker with rootless mode uses RootlessKit's builtin port driver by default. +This is because Docker in rootless mode uses RootlessKit's `builtin` port +driver by default, which doesn't support source IP propagation. To enable +source IP propagation, you can: -The source IP addresses can be propagated by creating `~/.config/systemd/user/docker.service.d/override.conf` with the following content: +- Use the `slirp4netns` RootlessKit port driver +- Use the `pasta` RootlessKit network driver, with the `implicit` port driver -```systemd -[Service] -Environment="DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=slirp4netns" -``` +The `pasta` network driver is experimental, but provides improved throughput +performance compared to the `slirp4netns` port driver. The `pasta` driver +requires Docker Engine version 25.0 or later. -And then restart the daemon: -```console -$ systemctl --user daemon-reload -$ systemctl --user restart docker -``` +To change the RootlessKit networking configuration: + +1. Create a file at `~/.config/systemd/user/docker.service.d/override.conf`. +2. Add the following contents, depending on which configuration you would like to use: + + - `slirp4netns` + + ```systemd + [Service] + Environment="DOCKERD_ROOTLESS_ROOTLESSKIT_NET=slirp4netns" + Environment="DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=slirp4netns" + ``` + + - `pasta` network driver with `implicit` port driver + + ```systemd + [Service] + Environment="DOCKERD_ROOTLESS_ROOTLESSKIT_NET=pasta" + Environment="DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=implicit" + ``` + +3. Restart the daemon: + + ```console + $ systemctl --user daemon-reload + $ systemctl --user restart docker + ``` + +For more information about networking options for RootlessKit, see: -Note that this configuration decreases throughput. -See [RootlessKit documentation](https://github.com/rootless-containers/rootlesskit/tree/v0.13.0#port-drivers) for the benchmark result. +- [Network drivers](https://github.com/rootless-containers/rootlesskit/blob/v2.0.0/docs/network.md) +- [Port drivers](https://github.com/rootless-containers/rootlesskit/blob/v2.0.0/docs/port.md) ### Tips for debugging **Entering into `dockerd` namespaces** diff --git a/content/get-started/resources.md b/content/get-started/resources.md index ddda4726f..d0ae74455 100644 --- a/content/get-started/resources.md +++ b/content/get-started/resources.md @@ -7,6 +7,12 @@ description: Get started resources learning docker Docker and the broader community of Docker experts have put together many different ways to get further training and hands-on experience with Docker. Expand your understanding of Docker and Kubernetes with these additional free and paid resources. +## Docker Training + +Expand your knowledge on all things Docker with [basic to advanced trainings from Docker experts](https://www.docker.com/resources/trainings/). + +You can find recorded content at your own convenience, or register for a live session to participate in Q&A. + ## Hosted labs These self-paced and hands-on workshops use a free, hosted environment ([Play with Kubernetes](https://labs.play-with-k8s.com/)) that doesn't require any installation. Follow along and learn more about Kubernetes. diff --git a/content/guides/use-case/genai-pdf-bot/_index.md b/content/guides/use-case/genai-pdf-bot/_index.md new file mode 100644 index 000000000..bb4a09326 --- /dev/null +++ b/content/guides/use-case/genai-pdf-bot/_index.md @@ -0,0 +1,16 @@ +--- +description: Containerize generative AI (GenAI) apps using Docker +keywords: python, generative ai, genai, llm, neo4j, ollama, langchain +title: Generative AI guide +toc_min: 1 +toc_max: 2 +--- + +The generative AI (GenAI) guide teaches you how to containerize an existing GenAI application using Docker. In this guide, you’ll learn how to: + +* Containerize and run a Python-based GenAI application +* Set up a local environment to run the complete GenAI stack locally for development + +Start by containerizing an existing GenAI application. + +{{< button text="Containerize a GenAI app" url="containerize.md" >}} diff --git a/content/guides/use-case/genai-pdf-bot/containerize.md b/content/guides/use-case/genai-pdf-bot/containerize.md new file mode 100644 index 000000000..1298ddf43 --- /dev/null +++ b/content/guides/use-case/genai-pdf-bot/containerize.md @@ -0,0 +1,133 @@ +--- +title: Containerize a generative AI application +keywords: python, generative ai, genai, llm, neo4j, ollama, containerize, intitialize, langchain, openai +description: Learn how to containerize a generative AI (GenAI) application. +--- + +## Prerequisites + +* You have installed the latest version of [Docker Desktop](../../../get-docker.md). Docker adds new features regularly and some parts of this guide may work only with the latest version of Docker Desktop. +* You have a [git client](https://git-scm.com/downloads). The examples in this section use a command-line based git client, but you can use any client. + +## Overview + +This section walks you through containerizing a generative AI (GenAI) application using Docker Desktop. + +> **Note** +> +> You can see more samples of containerized GenAI applications in the [GenAI Stack](https://github.com/docker/genai-stack) demo applications. + +## Get the sample application + +The sample application used in this guide is a modified version of the PDF Reader application from the [GenAI Stack](https://github.com/docker/genai-stack) demo applications. The application is a full stack Python application that lets you ask questions about a PDF file. + +The application uses [LangChain](https://www.langchain.com/) for orchestration, [Streamlit](https://streamlit.io/) for the UI, [Ollama](https://ollama.ai/) to run the LLM, and [Neo4j](https://neo4j.com/) to store vectors. + +Clone the sample application. Open a terminal, change directory to a directory that you want to work in, and run the following command to clone the repository: + +```console +$ git clone https://github.com/docker/docker-genai-sample +``` + +You should now have the following files in your `docker-genai-sample` directory. + +```text +├── docker-genai-sample/ +│ ├── .gitignore +│ ├── app.py +│ ├── chains.py +│ ├── env.example +│ ├── requirements.txt +│ ├── util.py +│ ├── LICENSE +│ └── README.md +``` + +## Initialize Docker assets + +Now that you have an application, you can use `docker init` to create the necessary Docker assets to containerize your application. Inside the `docker-genai-sample` directory, run the `docker init` command. `docker init` provides some default configuration, but you'll need to answer a few questions about your application. For example, this application uses Streamlit to run. Refer to the following `docker init` example and use the same answers for your prompts. + +```console +$ docker init +Welcome to the Docker Init CLI! + +This utility will walk you through creating the following files with sensible defaults for your project: + - .dockerignore + - Dockerfile + - compose.yaml + - README.Docker.md + +Let's get started! + +? What application platform does your project use? Python +? What version of Python do you want to use? 3.11.4 +? What port do you want your app to listen on? 8000 +? What is the command to run your app? streamlit run app.py --server.address=0.0.0.0 --server.port=8000 +``` + +You should now have the following contents in your `docker-genai-sample` +directory. + +```text +├── docker-genai-sample/ +│ ├── .dockerignore +│ ├── .gitignore +│ ├── app.py +│ ├── chains.py +│ ├── compose.yaml +│ ├── env.example +│ ├── requirements.txt +│ ├── util.py +│ ├── Dockerfile +│ ├── LICENSE +│ ├── README.Docker.md +│ └── README.md +``` + +To learn more about the files that `docker init` added, see the following: + - [Dockerfile](../../../engine/reference/builder.md) + - [.dockerignore](../../../engine/reference/builder.md#dockerignore-file) + - [compose.yaml](../../../compose/compose-file/_index.md) + + +## Run the application + +Inside the `docker-genai-sample` directory, run the following command in a +terminal. + +```console +$ docker compose up --build +``` + +Docker builds and runs your application. Depending on your network connection, it may take several minutes to download all the dependencies. You'll see a message like the following in the terminal when the application is running. + +```console +server-1 | You can now view your Streamlit app in your browser. +server-1 | +server-1 | URL: http://0.0.0.0:8000 +server-1 | +``` + +Open a browser and view the application at [http://localhost:8000](http://localhost:8000). You should see a simple Streamlit application. The application may take a few minutes to download the embedding model. While the download is in progress, **Running** appears in the top-right corner. + +The application requires a Neo4j database service and an LLM service to +function. If you have access to services that you ran outside of Docker, specify +the connection information and try it out. If you don't have the services +running, continue with this guide to learn how you can run some or all of these +services with Docker. + +In the terminal, press `ctrl`+`c` to stop the application. + +## Summary + +In this section, you learned how you can containerize and run your GenAI +application using Docker. + +Related information: + - [docker init CLI reference](../../../engine/reference/commandline/init.md) + +## Next steps + +In the next section, you'll learn how you can run your application, database, and LLM service all locally using Docker. + +{{< button text="Develop your application" url="develop.md" >}} \ No newline at end of file diff --git a/content/guides/use-case/genai-pdf-bot/develop.md b/content/guides/use-case/genai-pdf-bot/develop.md new file mode 100644 index 000000000..acac48053 --- /dev/null +++ b/content/guides/use-case/genai-pdf-bot/develop.md @@ -0,0 +1,247 @@ +--- +title: Use containers for generative AI development +keywords: python, local, development, generative ai, genai, llm, neo4j, ollama, langchain, openai +description: Learn how to develop your generative AI (GenAI) application locally. +--- + +## Prerequisites + +Complete [Containerize a generative AI application](containerize.md). + +## Overview + +In this section, you'll learn how to set up a development environment to access all the services that your generative AI (GenAI) application needs. This includes: + +- Adding a local database +- Adding a local or remote LLM service + +> **Note** +> +> You can see more samples of containerized GenAI applications in the [GenAI Stack](https://github.com/docker/genai-stack) demo applications. + +## Add a local database + +You can use containers to set up local services, like a database. In this section, you'll update the `compose.yaml` file to define a database service. In addition, you'll specify an environment variables file to load the database connection information rather than manually entering the information every time. + +To run the database service: + +1. In the cloned repository's directory, rename `env.example` file to `.env`. + This file contains the environment variables that the containers will use. +2. In the cloned repository's directory, open the `compose.yaml` file in an IDE or text editor. +3. In the `compose.yaml` file, add the following: + - Add instructions to run a Neo4j database + - Specify the environment file under the server service in order to pass in the environment variables for the connection + + The following is the updated `compose.yaml` file. All comments have been removed. + + ```yaml{hl_lines=["7-23"]} + services: + server: + build: + context: . + ports: + - 8000:8000 + env_file: + - .env + depends_on: + database: + condition: service_healthy + database: + image: neo4j:5.11 + ports: + - "7474:7474" + - "7687:7687" + environment: + - NEO4J_AUTH=${NEO4J_USERNAME}/${NEO4J_PASSWORD} + healthcheck: + test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider localhost:7474 || exit 1"] + interval: 5s + timeout: 3s + retries: 5 + ``` + + > **Note** + > + > To learn more about Neo4j, see the [Neo4j Official Docker Image](https://hub.docker.com/_/neo4j). + +4. Run the application. Inside the `docker-genai-sample` directory, +run the following command in a terminal. + + ```console + $ docker compose up --build + ``` + +5. Access the application. Open a browser and view the application at [http://localhost:8000](http://localhost:8000). You should see a simple Streamlit application. Note that asking questions to a PDF will cause the application to fail because the LLM service specified in the `.env` file isn't running yet. + +6. Stop the application. In the terminal, press `ctrl`+`c` to stop the application. + +## Add a local or remote LLM service + +The sample application supports both [Ollama](https://ollama.ai/) and [OpenAI](https://openai.com/). This guide provides instructions for the following scenarios: +- Run Ollama in a container +- Run Ollama outside of a container +- Use OpenAI + +While all platforms can use any of the previous scenarios, the performance and +GPU support may vary. You can use the following guidelines to help you choose the appropriate option: +- Run Ollama in a container if you're on Linux or Windows 11, you + have a CUDA-supported GPU, and your system has at least 8 GB of RAM. +- Run Ollama outside of a container if you're on an Apple silicon Mac. +- Use OpenAI if the previous two scenarios don't apply to you. + +Choose one of the following options for your LLM service. + +{{< tabs >}} +{{< tab name="Run Ollama in a container" >}} + +When running Ollama in a container, you should have a CUDA-supported GPU. While you can run Ollama in a container without a supported GPU, the performance may not be acceptable. Only Linux and Windows 11 support GPU access to containers. + +To run Ollama in a container and provide GPU access: +1. Install the prerequisites. + - For Linux, install the [NVIDIA Container Toolkilt](https://github.com/NVIDIA/nvidia-container-toolkit). + - For Windows 11, install the latest [NVIDIA driver](https://www.nvidia.com/Download/index.aspx). +2. Add the Ollama service and a volume in your `compose.yaml`. The following is + the updated `compose.yaml`: + + ```yaml {hl_lines=["24-38"]} + services: + server: + build: + context: . + ports: + - 8000:8000 + env_file: + - .env + depends_on: + database: + condition: service_healthy + database: + image: neo4j:5.11 + ports: + - "7474:7474" + - "7687:7687" + environment: + - NEO4J_AUTH=${NEO4J_USERNAME}/${NEO4J_PASSWORD} + healthcheck: + test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider localhost:7474 || exit 1"] + interval: 5s + timeout: 3s + retries: 5 + ollama: + image: ollama/ollama:latest + ports: + - "11434:11434" + volumes: + - ollama_volume:/root/.ollama + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: all + capabilities: [gpu] + volumes: + ollama_volume: + ``` + + > **Note** + > + > For more details about the Compose instructions, see [Turn on GPU access with Docker Compose](../../../compose/gpu-support.md). + +3. Add the ollama-pull service to your `compose.yaml` file. This service uses + the `docker/genai:ollama-pull` image, based on the GenAI Stack's + [pull_model.Dockerfile](https://github.com/docker/genai-stack/blob/main/pull_model.Dockerfile). + The service will automatically pull the model for your Ollama + container. The following is the updated section of the `compose.yaml` file: + + ```yaml {hl_lines=["12-17"]} + services: + server: + build: + context: . + ports: + - 8000:8000 + env_file: + - .env + depends_on: + database: + condition: service_healthy + ollama-pull: + condition: service_completed_successfully + ollama-pull: + image: docker/genai:ollama-pull + env_file: + - .env + # ... + ``` + +{{< /tab >}} +{{< tab name="Run Ollama outside of a container" >}} + +To run Ollama outside of a container: +1. [Install](https://github.com/jmorganca/ollama) and run Ollama on your host + machine. +2. Update the `OLLAMA_BASE_URL` value in your `.env` file to + `http://host.docker.internal:11434`. +3. Pull the model to Ollama using the following command. + ```console + $ ollama pull llama2 + ``` + +{{< /tab >}} +{{< tab name="Use OpenAI" >}} + +> **Important** +> +> Using OpenAI requires an [OpenAI account](https://platform.openai.com/login). OpenAI is a third-party hosted service and charges may apply. +{ .important } + +1. Update the `LLM` value in your `.env` file to + `gpt-3.5`. +2. Uncomment and update the `OPENAI_API_KEY` value in your `.env` file to + your [OpenAI API key](https://help.openai.com/en/articles/4936850-where-do-i-find-my-api-key). + +{{< /tab >}} +{{< /tabs >}} + +## Run your GenAI application + +At this point, you have the following services in your Compose file: +- Server service for your main GenAI application +- Database service to store vectors in a Neo4j database +- (optional) Ollama service to run the LLM +- (optional) Ollama-pull service to automatically pull the model for the Ollama + service + +To run all the services, run the following command in your `docker-genai-sample` +directory: + +```console +$ docker compose up --build +``` + +If your Compose file has the ollama-pull service, it may take several minutes for the ollama-pull service to pull the model. The ollama-pull service will continuously update the console with its status. After pulling the model, the ollama-pull service container will stop and you can access the application. + +Once the application is running, open a browser and access the application at [http://localhost:8000](http://localhost:8000). + +Upload a PDF file, for example the [Docker CLI Cheat Sheet](https://docs.docker.com/get-started/docker_cheatsheet.pdf), and ask a question about the PDF. + +Depending on your system and the LLM service that you chose, it may take several +minutes to answer. If you are using Ollama and the performance isn't +acceptable, try using OpenAI. + +## Summary + +In this section, you learned how to set up a development environment to provide +access all the services that your GenAI application needs. + +Related information: + - [Dockerfile reference](../../../engine/reference/builder.md) + - [Compose file reference](../../../compose/compose-file/_index.md) + - [Ollama Docker image](https://hub.docker.com/r/ollama/ollama) + - [Neo4j Official Docker Image](https://hub.docker.com/_/neo4j) + - [GenAI Stack demo applications](https://github.com/docker/genai-stack) + +## Next steps + +See samples of more GenAI applications in the [GenAI Stack demo applications](https://github.com/docker/genai-stack). \ No newline at end of file diff --git a/content/guides/walkthroughs/access-local-folder.md b/content/guides/walkthroughs/access-local-folder.md index 00b5ba0b0..52a810bdb 100644 --- a/content/guides/walkthroughs/access-local-folder.md +++ b/content/guides/walkthroughs/access-local-folder.md @@ -82,7 +82,7 @@ Related information: - Deep dive into [bind mounts](../../storage/bind-mounts.md) - Learn about using bind mounts in Compose in the [Compose file reference](../../compose/compose-file/_index.md) -- Explore using bind mounts via the CLI in the [Docker run reference](/engine/reference/run/#volume-shared-filesystems) +- Explore using bind mounts via the CLI in the [Docker run reference](/engine/reference/commandline/container_run/#mount) ## Next steps diff --git a/content/guides/walkthroughs/publish-your-image.md b/content/guides/walkthroughs/publish-your-image.md index 644c309d1..3de04dc7c 100644 --- a/content/guides/walkthroughs/publish-your-image.md +++ b/content/guides/walkthroughs/publish-your-image.md @@ -55,7 +55,7 @@ In this walkthrough, you pushed and shared an image on Docker Hub. Related information: - Deep dive into the [Docker Hub manual](../../docker-hub/_index.md) -- Learn more about the [docker tag](../../engine/reference/commandline/tag.md) +- Learn more about the [docker tag](../../engine/reference/commandline/image_tag.md) command ## Next steps @@ -68,4 +68,4 @@ Continue to the language-specific guides to learn how you can use Docker to cont - [Node.js](../../language/nodejs/_index.md) - [PHP](../../language/php/_index.md) - [Python](../../language/python/_index.md) -- [Rust](../../language/rust/_index.md) \ No newline at end of file +- [Rust](../../language/rust/_index.md) diff --git a/content/language/rust/build-images.md b/content/language/rust/build-images.md index da6068784..6d646d545 100644 --- a/content/language/rust/build-images.md +++ b/content/language/rust/build-images.md @@ -174,7 +174,7 @@ Related information: - [Dockerfile reference](../../engine/reference/builder.md) - [.dockerignore file](../../engine/reference/builder.md#dockerignore-file) - [docker init CLI reference](../../engine/reference/commandline/init.md) - - [docker build CLI reference](../../engine/reference/commandline/build.md) + - [docker build CLI reference](../../engine/reference/commandline/image_build.md) ## Next steps diff --git a/content/language/rust/run-containers.md b/content/language/rust/run-containers.md index a45b390ef..285cf9869 100644 --- a/content/language/rust/run-containers.md +++ b/content/language/rust/run-containers.md @@ -192,7 +192,7 @@ That’s better! You can now easily identify your container based on the name. In this section, you took a look at running containers. You also took a look at managing containers by starting, stopping, and restarting them. And finally, you looked at naming your containers so they are more easily identifiable. Related information: - - [docker run CLI reference](../../engine/reference/commandline/run.md) + - [docker run CLI reference](../../engine/reference/commandline/container_run.md) ## Next steps diff --git a/content/network/_index.md b/content/network/_index.md index a1bbcf775..2af35ddd7 100644 --- a/content/network/_index.md +++ b/content/network/_index.md @@ -195,7 +195,7 @@ Your container will have lines in `/etc/hosts` which define the hostname of the container itself, as well as `localhost` and a few other common things. Custom hosts, defined in `/etc/hosts` on the host machine, aren't inherited by containers. To pass additional hosts into container, refer to [add entries to -container hosts file](../engine/reference/commandline/run.md#add-host) in the +container hosts file](../engine/reference/commandline/container_run.md#add-host) in the `docker run` reference documentation. ## Proxy server diff --git a/content/network/network-tutorial-overlay.md b/content/network/network-tutorial-overlay.md index a139dc902..0e83f3b5d 100644 --- a/content/network/network-tutorial-overlay.md +++ b/content/network/network-tutorial-overlay.md @@ -402,7 +402,7 @@ example also uses Linux hosts, but the same commands work on Windows. The two containers communicate with the overlay network connecting the two hosts. If you run another alpine container on `host2` that is _not detached_, you can ping `alpine1` from `host2` (and here we add the - [remove option](/engine/reference/run/#clean-up---rm) for automatic container cleanup): + [remove option](/engine/reference/commandline/container_run/#rm) for automatic container cleanup): ```sh $ docker run -it --rm --name alpine3 --network test-net alpine @@ -642,4 +642,4 @@ learn about user-defined bridge networks, continue to the - [Host networking tutorial](network-tutorial-host.md) - [Standalone networking tutorial](network-tutorial-standalone.md) -- [Macvlan networking tutorial](network-tutorial-macvlan.md) \ No newline at end of file +- [Macvlan networking tutorial](network-tutorial-macvlan.md) diff --git a/content/scout/policy/_index.md b/content/scout/policy/_index.md index 84a38b2d7..2f9f897d0 100644 --- a/content/scout/policy/_index.md +++ b/content/scout/policy/_index.md @@ -165,25 +165,17 @@ The **Supply chain attestations** policy requires that your artifacts have [provenance](../../build/attestations/slsa-provenance.md) attestations. This policy is unfulfilled if an artifact lacks either an SBOM attestation or a -provenance attestation, or if the provenance attestation lacks information -about the Git repository and base images being used. To ensure compliance, +provenance attestation with max mode. To ensure compliance, update your build command to attach these attestations at build-time: ```console $ docker buildx build --provenance=true --sbom=true -t --push . ``` -BuildKit automatically detects the Git repository and base images when this -information is available in the build context. For more information about +For more information about building with attestations, see [Attestations](../../build/attestations/_index.md). -> **Note** -> -> Docker Scout is currently unable to discern the difference between using -> `scratch` as a base image and having no base image provenance. As a result, -> images based on `scratch` always fail the Supply chain attestations policy. - ### Quality gates passed The Quality gates passed policy builds on the [SonarQube diff --git a/content/storage/bind-mounts.md b/content/storage/bind-mounts.md index 6e5bbc05c..f1a703d4e 100644 --- a/content/storage/bind-mounts.md +++ b/content/storage/bind-mounts.md @@ -266,6 +266,30 @@ $ docker container stop devtest $ docker container rm devtest ``` +## Recursive mounts + +When you bind mount a path that itself contains mounts, those submounts are +also included in the bind mount by default. This behavior is configurable, +using the `bind-recursive` option for `--mount`. This option is only supported +with the `--mount` flag, not with `-v` or `--volume`. + +If the bind mount is read-only, the Docker Engine makes a best-effort attempt +at making the submounts read-only as well. This is referred to as recursive +read-only mounts. Recursive read-only mounts require Linux kernel version 5.12 +or later. If you're running an older kernel version, submounts are +automatically mounted as read-write by default. Attempting to set submounts to +be read-only on a kernel version earlier than 5.12, using the +`bind-recursive=readonly` option, results in an error. + +Supported values for the `bind-recursive` option are: + +| Value | Description | +|:--------------------|:------------------------------------------------------------------------------------------------------------------| +| `enabled` (default) | Read-only mounts are made recursively read-only if kernel is v5.12 or later. Otherwise, submounts are read-write. | +| `disabled` | Submounts are ignored (not included in the bind mount). | +| `writable` | Submounts are read-write. | +| `readonly` | Submounts are read-only. Requires kernel v5.12 or later. | + ## Configure bind propagation Bind propagation defaults to `rprivate` for both bind mounts and volumes. It is @@ -396,4 +420,4 @@ and - Learn about [volumes](volumes.md). - Learn about [tmpfs mounts](tmpfs.md). -- Learn about [storage drivers](/storage/storagedriver/). \ No newline at end of file +- Learn about [storage drivers](/storage/storagedriver/). diff --git a/content/storage/storagedriver/btrfs-driver.md b/content/storage/storagedriver/btrfs-driver.md index d793bf67c..3f3c7dd3e 100644 --- a/content/storage/storagedriver/btrfs-driver.md +++ b/content/storage/storagedriver/btrfs-driver.md @@ -158,7 +158,7 @@ $ sudo btrfs filesystem balance /var/lib/docker ## How the `btrfs` storage driver works -The `btrfs` storage driver works differently from `devicemapper` or other +The `btrfs` storage driver works differently from other storage drivers in that your entire `/var/lib/docker/` directory is stored on a Btrfs volume. diff --git a/content/storage/storagedriver/device-mapper-driver.md b/content/storage/storagedriver/device-mapper-driver.md index bcae77604..aeba60443 100644 --- a/content/storage/storagedriver/device-mapper-driver.md +++ b/content/storage/storagedriver/device-mapper-driver.md @@ -9,7 +9,7 @@ aliases: > **Deprecated** > > The Device Mapper driver [has been deprecated](../../../engine/deprecated.md#device-mapper-storage-driver), -> and will be removed in Docker Engine v25.0. If you are using Device Mapper, +> and is removed in Docker Engine v25.0. If you are using Device Mapper, > you must migrate to a supported storage driver before upgrading to Docker > Engine v25.0. Read the [Docker storage drivers](select-storage-driver.md) > page for supported storage drivers. diff --git a/content/storage/storagedriver/select-storage-driver.md b/content/storage/storagedriver/select-storage-driver.md index a8174eeb6..2d2a845d7 100644 --- a/content/storage/storagedriver/select-storage-driver.md +++ b/content/storage/storagedriver/select-storage-driver.md @@ -1,7 +1,7 @@ --- title: Docker storage drivers description: Learn how to select the proper storage driver for your container. -keywords: container, storage, driver, btrfs, devicemapper, zfs, overlay, overlay2 +keywords: container, storage, driver, btrfs, zfs, overlay, overlay2 aliases: - /engine/userguide/storagedriver/ - /engine/userguide/storagedriver/selectadriver/ @@ -21,13 +21,12 @@ driver with the best overall performance and stability in the most usual scenari The Docker Engine provides the following storage drivers on Linux: -| Driver | Description | -| ----------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `overlay2` | `overlay2` is the preferred storage driver for all currently supported Linux distributions, and requires no extra configuration. | -| `fuse-overlayfs` | `fuse-overlayfs`is preferred only for running Rootless Docker on a host that does not provide support for rootless `overlay2`. On Ubuntu and Debian 10, the `fuse-overlayfs` driver does not need to be used, and `overlay2` works even in rootless mode. Refer to the [rootless mode documentation](../../engine/security/rootless.md) for details. | -| `btrfs` and `zfs` | The `btrfs` and `zfs` storage drivers allow for advanced options, such as creating "snapshots", but require more maintenance and setup. Each of these relies on the backing filesystem being configured correctly. | -| `vfs` | The `vfs` storage driver is intended for testing purposes, and for situations where no copy-on-write filesystem can be used. Performance of this storage driver is poor, and is not generally recommended for production use. | -| `devicemapper` ([deprecated](../../../engine/deprecated.md#device-mapper-storage-driver)) | The `devicemapper` storage driver requires `direct-lvm` for production environments, because `loopback-lvm`, while zero-configuration, has very poor performance. `devicemapper` was the recommended storage driver for CentOS and RHEL, as their kernel version did not support `overlay2`. However, current versions of CentOS and RHEL now have support for `overlay2`, which is now the recommended driver. | +| Driver | Description | +| :---------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `overlay2` | `overlay2` is the preferred storage driver for all currently supported Linux distributions, and requires no extra configuration. | +| `fuse-overlayfs` | `fuse-overlayfs`is preferred only for running Rootless Docker on a host that does not provide support for rootless `overlay2`. On Ubuntu and Debian 10, the `fuse-overlayfs` driver does not need to be used, and `overlay2` works even in rootless mode. Refer to the [rootless mode documentation](../../engine/security/rootless.md) for details. | +| `btrfs` and `zfs` | The `btrfs` and `zfs` storage drivers allow for advanced options, such as creating "snapshots", but require more maintenance and setup. Each of these relies on the backing filesystem being configured correctly. | +| `vfs` | The `vfs` storage driver is intended for testing purposes, and for situations where no copy-on-write filesystem can be used. Performance of this storage driver is poor, and is not generally recommended for production use. | @@ -64,18 +63,14 @@ example, `btrfs` is only supported if your system uses `btrfs` as storage. In general, the following configurations work on recent versions of the Linux distribution: -| Linux distribution | Recommended storage drivers | Alternative drivers | -| :----------------- | :-------------------------- | :---------------------------- | -| Ubuntu | `overlay2` | `devicemapper`¹, `zfs`, `vfs` | -| Debian | `overlay2` | `devicemapper`¹, `vfs` | -| CentOS | `overlay2` | `devicemapper`¹, `zfs`, `vfs` | -| Fedora | `overlay2` | `devicemapper`¹, `zfs`, `vfs` | -| SLES 15 | `overlay2` | `devicemapper`¹, `vfs` | -| RHEL | `overlay2` | `devicemapper`¹, `vfs` | - -¹) The `devicemapper` storage driver is deprecated, and will be removed in a future -release. It is recommended that users of the `devicemapper` storage driver migrate -to `overlay2`. +| Linux distribution | Recommended storage drivers | Alternative drivers | +| :------------------- | :--------------------------- | :------------------- | +| Ubuntu | `overlay2` | `zfs`, `vfs` | +| Debian | `overlay2` | `vfs` | +| CentOS | `overlay2` | `zfs`, `vfs` | +| Fedora | `overlay2` | `zfs`, `vfs` | +| SLES 15 | `overlay2` | `vfs` | +| RHEL | `overlay2` | `vfs` | When in doubt, the best all-around configuration is to use a modern Linux distribution with a kernel that supports the `overlay2` storage driver, and to @@ -111,7 +106,6 @@ backing filesystems. | :--------------- | :---------------------------- | | `overlay2` | `xfs` with ftype=1, `ext4` | | `fuse-overlayfs` | any filesystem | -| `devicemapper` | `direct-lvm` | | `btrfs` | `btrfs` | | `zfs` | `zfs` | | `vfs` | any filesystem | @@ -127,7 +121,7 @@ following generalizations: - `overlay2` operates at the file level rather than the block level. This uses memory more efficiently, but the container's writable layer may grow quite large in write-heavy workloads. -- Block-level storage drivers such as `devicemapper`, `btrfs`, and `zfs` perform +- Block-level storage drivers such as `btrfs`, and `zfs` perform better for write-heavy workloads (though not as well as Docker volumes). - `btrfs` and `zfs` require a lot of memory. - `zfs` is a good choice for high-density workloads such as PaaS. @@ -197,7 +191,6 @@ to physical or logical disks on the Docker host. ## Related information - [About images, containers, and storage drivers](index.md) -- [`devicemapper` storage driver in practice](device-mapper-driver.md) - [`overlay2` storage driver in practice](overlayfs-driver.md) - [`btrfs` storage driver in practice](btrfs-driver.md) - [`zfs` storage driver in practice](zfs-driver.md) diff --git a/data/buildx/docker_buildx_build.yaml b/data/buildx/docker_buildx_build.yaml index b9e032dcf..5b9789072 100644 --- a/data/buildx/docker_buildx_build.yaml +++ b/data/buildx/docker_buildx_build.yaml @@ -6,7 +6,7 @@ long: |- to the UI of `docker build` command and takes the same flags and arguments. For documentation on most of these flags, refer to the [`docker build` - documentation](/engine/reference/commandline/build/). + documentation](/engine/reference/commandline/image_build/). This page describes a subset of the new flags. usage: docker buildx build [OPTIONS] PATH | URL | - pname: docker buildx @@ -16,7 +16,7 @@ options: value_type: stringSlice default_value: '[]' description: 'Add a custom host-to-IP mapping (format: `host:ip`)' - details_url: /engine/reference/commandline/build/#add-host + details_url: /engine/reference/commandline/image_build/#add-host deprecated: false hidden: false experimental: false @@ -106,7 +106,7 @@ options: - option: cgroup-parent value_type: string description: Set the parent cgroup for the `RUN` instructions during build - details_url: /engine/reference/commandline/build/#cgroup-parent + details_url: /engine/reference/commandline/image_build/#cgroup-parent deprecated: false hidden: false experimental: false @@ -186,7 +186,7 @@ options: shorthand: f value_type: string description: 'Name of the Dockerfile (default: `PATH/Dockerfile`)' - details_url: /engine/reference/commandline/build/#file + details_url: /engine/reference/commandline/image_build/#file deprecated: false hidden: false experimental: false @@ -296,6 +296,7 @@ options: value_type: stringArray default_value: '[]' description: Do not cache specified stages + details_url: '#no-cache-filter' deprecated: false hidden: false experimental: false @@ -487,7 +488,7 @@ options: value_type: stringArray default_value: '[]' description: 'Name and optionally a tag (format: `name:tag`)' - details_url: /engine/reference/commandline/build/#tag + details_url: /engine/reference/commandline/image_build/#tag deprecated: false hidden: false experimental: false @@ -497,7 +498,7 @@ options: - option: target value_type: string description: Set the target build stage to build - details_url: /engine/reference/commandline/build/#target + details_url: /engine/reference/commandline/image_build/#target deprecated: false hidden: false experimental: false @@ -622,7 +623,7 @@ examples: |- ### Set build-time variables (--build-arg) {#build-arg} - Same as [`docker build` command](/engine/reference/commandline/build/#build-arg). + Same as [`docker build` command](/engine/reference/commandline/image_build/#build-arg). There are also useful built-in build arguments, such as: @@ -799,6 +800,61 @@ examples: |- } ``` + ### Ignore build cache for specific stages (--no-cache-filter) {#no-cache-filter} + + The `--no-cache-filter` lets you specify one or more stages of a multi-stage + Dockerfile for which build cache should be ignored. To specify multiple stages, + use a comma-separated syntax: + + ```console + $ docker buildx build --no-cache-filter stage1,stage2,stage3 . + ``` + + For example, the following Dockerfile contains four stages: + + - `base` + - `install` + - `test` + - `release` + + ```dockerfile + # syntax=docker/dockerfile:1 + + FROM oven/bun:1 as base + WORKDIR /app + + FROM base AS install + WORKDIR /temp/dev + RUN --mount=type=bind,source=package.json,target=package.json \ + --mount=type=bind,source=bun.lockb,target=bun.lockb \ + bun install --frozen-lockfile + + FROM base AS test + COPY --from=install /temp/dev/node_modules node_modules + COPY . . + RUN bun test + + FROM base AS release + ENV NODE_ENV=production + COPY --from=install /temp/dev/node_modules node_modules + COPY . . + ENTRYPOINT ["bun", "run", "index.js"] + ``` + + To ignore the cache for the `install` stage: + + ```console + $ docker buildx build --no-cache-filter install . + ``` + + To ignore the cache the `install` and `release` stages: + + ```console + $ docker buildx build --no-cache-filter install,release . + ``` + + The arguments for the `--no-cache-filter` flag must be names of stages. + ### Set the export action for the build result (-o, --output) {#output} ```text diff --git a/data/buildx/docker_buildx_debug_build.yaml b/data/buildx/docker_buildx_debug_build.yaml index 0ef8996fa..d3127a306 100644 --- a/data/buildx/docker_buildx_debug_build.yaml +++ b/data/buildx/docker_buildx_debug_build.yaml @@ -10,7 +10,7 @@ options: value_type: stringSlice default_value: '[]' description: 'Add a custom host-to-IP mapping (format: `host:ip`)' - details_url: /engine/reference/commandline/build/#add-host + details_url: /engine/reference/commandline/image_build/#add-host deprecated: false hidden: false experimental: false @@ -93,7 +93,7 @@ options: - option: cgroup-parent value_type: string description: Set the parent cgroup for the `RUN` instructions during build - details_url: /engine/reference/commandline/build/#cgroup-parent + details_url: /engine/reference/commandline/image_build/#cgroup-parent deprecated: false hidden: false experimental: false @@ -173,7 +173,7 @@ options: shorthand: f value_type: string description: 'Name of the Dockerfile (default: `PATH/Dockerfile`)' - details_url: /engine/reference/commandline/build/#file + details_url: /engine/reference/commandline/image_build/#file deprecated: false hidden: false experimental: false @@ -463,7 +463,7 @@ options: value_type: stringArray default_value: '[]' description: 'Name and optionally a tag (format: `name:tag`)' - details_url: /engine/reference/commandline/build/#tag + details_url: /engine/reference/commandline/image_build/#tag deprecated: false hidden: false experimental: false @@ -473,7 +473,7 @@ options: - option: target value_type: string description: Set the target build stage to build - details_url: /engine/reference/commandline/build/#target + details_url: /engine/reference/commandline/image_build/#target deprecated: false hidden: false experimental: false diff --git a/data/buildx/docker_buildx_du.yaml b/data/buildx/docker_buildx_du.yaml index ad5c5ab2e..83b3d5313 100644 --- a/data/buildx/docker_buildx_du.yaml +++ b/data/buildx/docker_buildx_du.yaml @@ -18,6 +18,7 @@ options: value_type: bool default_value: "false" description: Provide a more verbose output + details_url: '#verbose' deprecated: false hidden: false experimental: false @@ -36,9 +37,102 @@ inherited_options: kubernetes: false swarm: false examples: |- + ### Show disk usage + + The `docker buildx du` command shows the disk usage for the currently selected + builder. + + ```console + $ docker buildx du + ID RECLAIMABLE SIZE LAST ACCESSED + 12wgll9os87pazzft8lt0yztp* true 1.704GB 13 days ago + iupsv3it5ubh92aweb7c1wojc* true 1.297GB 36 minutes ago + ek4ve8h4obyv5kld6vicmtqyn true 811.7MB 13 days ago + isovrfnmkelzhtdx942w9vjcb* true 811.7MB 13 days ago + 0jty7mjrndi1yo7xkv1baralh true 810.5MB 13 days ago + jyzkefmsysqiaakgwmjgxjpcz* true 810.5MB 13 days ago + z8w1y95jn93gvj92jtaj6uhwk true 318MB 2 weeks ago + rz2zgfcwlfxsxd7d41w2sz2tt true 8.224kB* 43 hours ago + n5bkzpewmk2eiu6hn9tzx18jd true 8.224kB* 43 hours ago + ao94g6vtbzdl6k5zgdmrmnwpt true 8.224kB* 43 hours ago + 2pyjep7njm0wh39vcingxb97i true 8.224kB* 43 hours ago + Shared: 115.5MB + Private: 10.25GB + Reclaimable: 10.36GB + Total: 10.36GB + ``` + + If `RECLAIMABLE` is false, the `docker buildx du prune` command won't delete + the record, even if you use `--all`. That's because the record is actively in + use by some component of the builder. + + The asterisks (\*) in the default output indicate the following: + + - An asterisk next to an ID (`zu7m6evdpebh5h8kfkpw9dlf2*`) indicates that the record + is mutable. The size of the record may change, or another build can take ownership of + it and change or commit to it. If you run the `du` command again, this item may + not be there anymore, or the size might be different. + - An asterisk next to a size (`8.288kB*`) indicates that the record is shared. + Storage of the record is shared with some other resource, typically an image. + If you prune such a record then you will lose build cache but only metadata + will be deleted as the image still needs to actual storage layers. + + ### Use verbose output (--verbose) {#verbose} + + The verbose output of the `docker buildx du` command is useful for inspecting + the disk usage records in more detail. The verbose output shows the mutable and + shared states more clearly, as well as additional information about the + corresponding layer. + + ```console + $ docker buildx du --verbose + ... + Last used: 2 days ago + Type: regular + + ID: 05d0elirb4mmvpmnzbrp3ssrg + Parent: e8sfdn4mygrg7msi9ak1dy6op + Created at: 2023-11-20 09:53:30.881558721 +0000 UTC + Mutable: false + Reclaimable: true + Shared: false + Size: 0B + Description: [gobase 3/3] WORKDIR /src + Usage count: 3 + Last used: 24 hours ago + Type: regular + + Reclaimable: 4.453GB + Total: 4.453GB + ``` + ### Override the configured builder instance (--builder) {#builder} - Same as [`buildx --builder`](buildx.md#builder). + Use the `--builder` flag to inspect the disk usage of a particular builder. + + ```console + $ docker buildx du --builder youthful_shtern + ID RECLAIMABLE SIZE LAST ACCESSED + g41agepgdczekxg2mtw0dujsv* true 1.312GB 47 hours ago + e6ycrsa0bn9akigqgzu0sc6kr true 318MB 47 hours ago + our9zg4ndly65ze1ccczdksiz true 204.9MB 45 hours ago + b7xv3xpxnwupc81tc9ya3mgq6* true 120.6MB 47 hours ago + zihgye15ss6vum3wmck9egdoy* true 79.81MB 2 days ago + aaydharssv1ug98yhuwclkfrh* true 79.81MB 2 days ago + ta1r4vmnjug5dhub76as4kkol* true 74.51MB 47 hours ago + murma9f83j9h8miifbq68udjf* true 74.51MB 47 hours ago + 47f961866a49g5y8myz80ixw1* true 74.51MB 47 hours ago + tzh99xtzlaf6txllh3cobag8t true 74.49MB 47 hours ago + ld6laoeuo1kwapysu6afwqybl* true 59.89MB 47 hours ago + yitxizi5kaplpyomqpos2cryp* true 59.83MB 47 hours ago + iy8aa4b7qjn0qmy9wiga9cj8w true 33.65MB 47 hours ago + mci7okeijyp8aqqk16j80dy09 true 19.86MB 47 hours ago + lqvj091he652slxdla4wom3pz true 14.08MB 47 hours ago + fkt31oiv793nd26h42llsjcw7* true 11.87MB 2 days ago + uj802yxtvkcjysnjb4kgwvn2v true 11.68MB 45 hours ago + Reclaimable: 2.627GB + Total: 2.627GB + ``` deprecated: false hidden: false experimental: false diff --git a/data/buildx/docker_buildx_ls.yaml b/data/buildx/docker_buildx_ls.yaml index 3656f30c6..045d5635a 100644 --- a/data/buildx/docker_buildx_ls.yaml +++ b/data/buildx/docker_buildx_ls.yaml @@ -1,16 +1,16 @@ command: docker buildx ls short: List builder instances long: |- - Lists all builder instances and the nodes for each instance + Lists all builder instances and the nodes for each instance. ```console $ docker buildx ls - NAME/NODE DRIVER/ENDPOINT STATUS BUILDKIT PLATFORMS - elated_tesla * docker-container - elated_tesla0 unix:///var/run/docker.sock running v0.10.3 linux/amd64 - elated_tesla1 ssh://ubuntu@1.2.3.4 running v0.10.3 linux/arm64*, linux/arm/v7, linux/arm/v6 - default docker - default default running v0.8.2 linux/amd64 + NAME/NODE DRIVER/ENDPOINT STATUS BUILDKIT PLATFORMS + elated_tesla* docker-container + \_ elated_tesla0 \_ unix:///var/run/docker.sock running v0.10.3 linux/amd64 + \_ elated_tesla1 \_ ssh://ubuntu@1.2.3.4 running v0.10.3 linux/arm64*, linux/arm/v7, linux/arm/v6 + default docker + \_ default \_ default running v0.8.2 linux/amd64 ``` Each builder has one or more nodes associated with it. The current builder's @@ -28,6 +28,64 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: format + value_type: string + default_value: table + description: Format the output + details_url: '#format' + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +examples: |- + ### Format the output (--format) {#format} + + The formatting options (`--format`) pretty-prints builder instances output + using a Go template. + + Valid placeholders for the Go template are listed below: + + | Placeholder | Description | + |-------------------|---------------------------------------------| + | `.Name` | Builder or node name | + | `.DriverEndpoint` | Driver (for builder) or Endpoint (for node) | + | `.LastActivity` | Builder last activity | + | `.Status` | Builder or node status | + | `.Buildkit` | BuildKit version of the node | + | `.Platforms` | Available node's platforms | + | `.Error` | Error | + | `.Builder` | Builder object | + + When using the `--format` option, the `ls` command will either output the data + exactly as the template declares or, when using the `table` directive, includes + column headers as well. + + The following example uses a template without headers and outputs the + `Name` and `DriverEndpoint` entries separated by a colon (`:`): + + ```console + $ docker buildx ls --format "{{.Name}}: {{.DriverEndpoint}}" + elated_tesla: docker-container + elated_tesla0: unix:///var/run/docker.sock + elated_tesla1: ssh://ubuntu@1.2.3.4 + default: docker + default: default + ``` + + The `Builder` placeholder can be used to access the builder object and its + fields. For example, the following template outputs the builder's and + nodes' names with their respective endpoints: + + ```console + $ docker buildx ls --format "{{.Builder.Name}}: {{range .Builder.Nodes}}\n {{.Name}}: {{.Endpoint}}{{end}}" + elated_tesla: + elated_tesla0: unix:///var/run/docker.sock + elated_tesla1: ssh://ubuntu@1.2.3.4 + default: docker + default: default + ``` deprecated: false hidden: false experimental: false diff --git a/data/buildx/docker_buildx_rm.yaml b/data/buildx/docker_buildx_rm.yaml index e47d28798..233b1c8ad 100644 --- a/data/buildx/docker_buildx_rm.yaml +++ b/data/buildx/docker_buildx_rm.yaml @@ -1,9 +1,9 @@ command: docker buildx rm -short: Remove a builder instance +short: Remove one or more builder instances long: |- Removes the specified or current builder. It is a no-op attempting to remove the default builder. -usage: docker buildx rm [NAME] +usage: docker buildx rm [OPTIONS] [NAME] [NAME...] pname: docker buildx plink: docker_buildx.yaml options: diff --git a/data/engine-cli/docker_attach.yaml b/data/engine-cli/docker_attach.yaml index 73e97b793..35d52b28d 100644 --- a/data/engine-cli/docker_attach.yaml +++ b/data/engine-cli/docker_attach.yaml @@ -2,42 +2,8 @@ command: docker attach aliases: docker container attach, docker attach short: | Attach local standard input, output, and error streams to a running container -long: |- - Use `docker attach` to attach your terminal's standard input, output, and error - (or any combination of the three) to a running container using the container's - ID or name. This allows you to view its ongoing output or to control it - interactively, as though the commands were running directly in your terminal. - - > **Note:** - > The `attach` command will display the output of the `ENTRYPOINT/CMD` process. This - > can appear as if the attach command is hung when in fact the process may simply - > not be interacting with the terminal at that time. - - You can attach to the same contained process multiple times simultaneously, - from different sessions on the Docker host. - - To stop a container, use `CTRL-c`. This key sequence sends `SIGKILL` to the - container. If `--sig-proxy` is true (the default),`CTRL-c` sends a `SIGINT` to - the container. If the container was run with `-i` and `-t`, you can detach from - a container and leave it running using the `CTRL-p CTRL-q` key sequence. - - > **Note:** - > A process running as PID 1 inside a container is treated specially by - > Linux: it ignores any signal with the default action. So, the process - > will not terminate on `SIGINT` or `SIGTERM` unless it is coded to do - > so. - - It is forbidden to redirect the standard input of a `docker attach` command - while attaching to a TTY-enabled container (using the `-i` and `-t` options). - - While a client is connected to container's `stdio` using `docker attach`, Docker - uses a ~1MB memory buffer to maximize the throughput of the application. - Once this buffer is full, the speed of the API connection is affected, and so - this impacts the output process' writing speed. This is similar to other - applications like SSH. Because of this, it is not recommended to run - performance critical applications that generate a lot of output in the - foreground over a slow client connection. Instead, users should use the - `docker logs` command to get access to the logs. +long: | + Attach local standard input, output, and error streams to a running container usage: docker attach [OPTIONS] CONTAINER pname: docker plink: docker.yaml @@ -45,7 +11,6 @@ options: - option: detach-keys value_type: string description: Override the key sequence for detaching a container - details_url: '#detach-keys' deprecated: false hidden: false experimental: false @@ -83,117 +48,6 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false -examples: |- - ### Attach to and detach from a running container - - The following example starts an ubuntu container running `top` in detached mode, - then attaches to the container; - - ```console - $ docker run -d --name topdemo ubuntu:22.04 /usr/bin/top -b - - $ docker attach topdemo - - top - 12:27:44 up 3 days, 21:54, 0 users, load average: 0.00, 0.00, 0.00 - Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie - %Cpu(s): 0.1 us, 0.1 sy, 0.0 ni, 99.8 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st - MiB Mem : 3934.3 total, 770.1 free, 674.2 used, 2490.1 buff/cache - MiB Swap: 1024.0 total, 839.3 free, 184.7 used. 2814.0 avail Mem - - PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND - 1 root 20 0 7180 2896 2568 R 0.0 0.1 0:00.02 top - ``` - - As the container was started without the `-i`, and `-t` options, signals are - forwarded to the attached process, which means that the default `CTRL-p CTRL-q` - detach key sequence produces no effect, but pressing `CTRL-c` terminates the - container: - - ```console - <...> - PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND - 1 root 20 0 7180 2896 2568 R 0.0 0.1 0:00.02 top^P^Q - ^C - - $ docker ps -a --filter name=topdemo - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 4cf0d0ebb079 ubuntu:22.04 "/usr/bin/top -b" About a minute ago Exited (0) About a minute ago topdemo - ``` - - Repeating the example above, but this time with the `-i` and `-t` options set; - - ```console - $ docker run -dit --name topdemo2 ubuntu:22.04 /usr/bin/top -b - ``` - - Now, when attaching to the container, and pressing the `CTRL-p CTRL-q` ("read - escape sequence"), the Docker CLI is handling the detach sequence, and the - `attach` command is detached from the container. Checking the container's status - with `docker ps` shows that the container is still running in the background: - - ```console - $ docker attach topdemo2 - - top - 12:44:32 up 3 days, 22:11, 0 users, load average: 0.00, 0.00, 0.00 - Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie - %Cpu(s): 50.0 us, 0.0 sy, 0.0 ni, 50.0 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st - MiB Mem : 3934.3 total, 770.6 free, 672.4 used, 2491.4 buff/cache - MiB Swap: 1024.0 total, 839.3 free, 184.7 used. 2815.8 avail Mem - - PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND - 1 root 20 0 7180 2776 2452 R 0.0 0.1 0:00.02 topread escape sequence - - $ docker ps -a --filter name=topdemo2 - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - b1661dce0fc2 ubuntu:22.04 "/usr/bin/top -b" 2 minutes ago Up 2 minutes topdemo2 - ``` - - ### Get the exit code of the container's command - - And in this second example, you can see the exit code returned by the `bash` - process is returned by the `docker attach` command to its caller too: - - ```console - $ docker run --name test -dit alpine - 275c44472aebd77c926d4527885bb09f2f6db21d878c75f0a1c212c03d3bcfab - - $ docker attach test - /# exit 13 - - $ echo $? - 13 - - $ docker ps -a --filter name=test - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - a2fe3fd886db alpine "/bin/sh" About a minute ago Exited (13) 40 seconds ago test - ``` - - ### Override the detach sequence (--detach-keys) {#detach-keys} - - Use the `--detach-keys` option to override the Docker key sequence for detach. - This is useful if the Docker default sequence conflicts with key sequence you - use for other applications. There are two ways to define your own detach key - sequence, as a per-container override or as a configuration property on your - entire configuration. - - To override the sequence for an individual container, use the - `--detach-keys=""` flag with the `docker attach` command. The format of - the `` is either a letter [a-Z], or the `ctrl-` combined with any of - the following: - - * `a-z` (a single lowercase alpha character ) - * `@` (at sign) - * `[` (left bracket) - * `\\` (two backward slashes) - * `_` (underscore) - * `^` (caret) - - These `a`, `ctrl-a`, `X`, or `ctrl-\\` values are all examples of valid key - sequences. To configure a different configuration default key sequence for all - containers, see [**Configuration file** section](cli.md#configuration-files). deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_build.yaml b/data/engine-cli/docker_build.yaml index cc33f274b..5a42a9b11 100644 --- a/data/engine-cli/docker_build.yaml +++ b/data/engine-cli/docker_build.yaml @@ -1,109 +1,7 @@ command: docker build aliases: docker image build, docker build, docker buildx build, docker builder build short: Build an image from a Dockerfile -long: |- - The `docker build` command builds Docker images from a Dockerfile and a - "context". A build's context is the set of files located in the specified - `PATH` or `URL`. The build process can refer to any of the files in the - context. For example, your build can use a [*COPY*](/engine/reference/builder/#copy) - instruction to reference a file in the context. - - The `URL` parameter can refer to three kinds of resources: Git repositories, - pre-packaged tarball contexts and plain text files. - - ### Git repositories - - When the `URL` parameter points to the location of a Git repository, the - repository acts as the build context. The system recursively fetches the - repository and its submodules. The commit history is not preserved. A - repository is first pulled into a temporary directory on your local host. After - that succeeds, the directory is sent to the Docker daemon as the context. - Local copy gives you the ability to access private repositories using local - user credentials, VPN's, and so forth. - - > **Note** - > - > If the `URL` parameter contains a fragment the system will recursively clone - > the repository and its submodules using a `git clone --recursive` command. - - Git URLs accept context configuration in their fragment section, separated by a - colon (`:`). The first part represents the reference that Git will check out, - and can be either a branch, a tag, or a remote reference. The second part - represents a subdirectory inside the repository that will be used as a build - context. - - For example, run this command to use a directory called `docker` in the branch - `container`: - - ```console - $ docker build https://github.com/docker/rootfs.git#container:docker - ``` - - The following table represents all the valid suffixes with their build - contexts: - - | Build Syntax Suffix | Commit Used | Build Context Used | - |--------------------------------|-----------------------|--------------------| - | `myrepo.git` | `refs/heads/master` | `/` | - | `myrepo.git#mytag` | `refs/tags/mytag` | `/` | - | `myrepo.git#mybranch` | `refs/heads/mybranch` | `/` | - | `myrepo.git#pull/42/head` | `refs/pull/42/head` | `/` | - | `myrepo.git#:myfolder` | `refs/heads/master` | `/myfolder` | - | `myrepo.git#master:myfolder` | `refs/heads/master` | `/myfolder` | - | `myrepo.git#mytag:myfolder` | `refs/tags/mytag` | `/myfolder` | - | `myrepo.git#mybranch:myfolder` | `refs/heads/mybranch` | `/myfolder` | - - ### Tarball contexts - - If you pass an URL to a remote tarball, the URL itself is sent to the daemon: - - ```console - $ docker build http://server/context.tar.gz - ``` - - The download operation will be performed on the host the Docker daemon is - running on, which is not necessarily the same host from which the build command - is being issued. The Docker daemon will fetch `context.tar.gz` and use it as the - build context. Tarball contexts must be tar archives conforming to the standard - `tar` UNIX format and can be compressed with any one of the 'xz', 'bzip2', - 'gzip' or 'identity' (no compression) formats. - - ### Text files - - Instead of specifying a context, you can pass a single `Dockerfile` in the - `URL` or pipe the file in via `STDIN`. To pipe a `Dockerfile` from `STDIN`: - - ```console - $ docker build - < Dockerfile - ``` - - With Powershell on Windows, you can run: - - ```powershell - Get-Content Dockerfile | docker build - - ``` - - If you use `STDIN` or specify a `URL` pointing to a plain text file, the system - places the contents into a file called `Dockerfile`, and any `-f`, `--file` - option is ignored. In this scenario, there is no context. - - By default the `docker build` command will look for a `Dockerfile` at the root - of the build context. The `-f`, `--file`, option lets you specify the path to - an alternative file to use instead. This is useful in cases where the same set - of files are used for multiple builds. The path must be to a file within the - build context. If a relative path is specified then it is interpreted as - relative to the root of the context. - - In most cases, it's best to put each Dockerfile in an empty directory. Then, - add to that directory only the files needed for building the Dockerfile. To - increase the build's performance, you can exclude files and directories by - adding a `.dockerignore` file to that directory as well. For information on - creating one, see the [.dockerignore file](/engine/reference/builder/#dockerignore-file). - - If the Docker client loses connection to the daemon, the build is canceled. - This happens if you interrupt the Docker client with `CTRL-c` or if the Docker - client is killed for any reason. If the build initiated a pull which is still - running at the time the build is cancelled, the pull is cancelled as well. +long: Build an image from a Dockerfile usage: docker build [OPTIONS] PATH | URL | - pname: docker plink: docker.yaml @@ -111,7 +9,6 @@ options: - option: add-host value_type: list description: Add a custom host-to-IP mapping (`host:ip`) - details_url: '#add-host' deprecated: false hidden: false experimental: false @@ -121,7 +18,6 @@ options: - option: build-arg value_type: list description: Set build-time variables - details_url: '#build-arg' deprecated: false hidden: false experimental: false @@ -132,7 +28,6 @@ options: value_type: stringSlice default_value: '[]' description: Images to consider as cache sources - details_url: '#cache-from' deprecated: false hidden: false experimental: false @@ -142,7 +37,6 @@ options: - option: cgroup-parent value_type: string description: Set the parent cgroup for the `RUN` instructions during build - details_url: '#cgroup-parent' deprecated: false hidden: false experimental: false @@ -222,7 +116,6 @@ options: shorthand: f value_type: string description: Name of the Dockerfile (Default is `PATH/Dockerfile`) - details_url: '#file' deprecated: false hidden: false experimental: false @@ -251,7 +144,6 @@ options: - option: isolation value_type: string description: Container isolation technology - details_url: '#isolation' deprecated: false hidden: false experimental: false @@ -354,7 +246,6 @@ options: value_type: stringSlice default_value: '[]' description: Security options - details_url: '#security-opt' deprecated: false hidden: false experimental: false @@ -375,7 +266,6 @@ options: value_type: bool default_value: "false" description: Squash newly built layers into a single new layer - details_url: '#squash' deprecated: false hidden: false min_api_version: "1.25" @@ -387,7 +277,6 @@ options: shorthand: t value_type: list description: Name and optionally a tag in the `name:tag` format - details_url: '#tag' deprecated: false hidden: false experimental: false @@ -397,7 +286,6 @@ options: - option: target value_type: string description: Set the target build stage to build. - details_url: '#target' deprecated: false hidden: false experimental: false @@ -408,7 +296,6 @@ options: value_type: ulimit default_value: '[]' description: Ulimit options - details_url: '#ulimit' deprecated: false hidden: false experimental: false @@ -426,596 +313,6 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false -examples: |- - ### Build with PATH - - ```console - $ docker build . - - Uploading context 10240 bytes - Step 1/3 : FROM busybox - Pulling repository busybox - ---> e9aa60c60128MB/2.284 MB (100%) endpoint: https://cdn-registry-1.docker.io/v1/ - Step 2/3 : RUN ls -lh / - ---> Running in 9c9e81692ae9 - total 24 - drwxr-xr-x 2 root root 4.0K Mar 12 2013 bin - drwxr-xr-x 5 root root 4.0K Oct 19 00:19 dev - drwxr-xr-x 2 root root 4.0K Oct 19 00:19 etc - drwxr-xr-x 2 root root 4.0K Nov 15 23:34 lib - lrwxrwxrwx 1 root root 3 Mar 12 2013 lib64 -> lib - dr-xr-xr-x 116 root root 0 Nov 15 23:34 proc - lrwxrwxrwx 1 root root 3 Mar 12 2013 sbin -> bin - dr-xr-xr-x 13 root root 0 Nov 15 23:34 sys - drwxr-xr-x 2 root root 4.0K Mar 12 2013 tmp - drwxr-xr-x 2 root root 4.0K Nov 15 23:34 usr - ---> b35f4035db3f - Step 3/3 : CMD echo Hello world - ---> Running in 02071fceb21b - ---> f52f38b7823e - Successfully built f52f38b7823e - Removing intermediate container 9c9e81692ae9 - Removing intermediate container 02071fceb21b - ``` - - This example specifies that the `PATH` is `.`, and so all the files in the - local directory get `tar`d and sent to the Docker daemon. The `PATH` specifies - where to find the files for the "context" of the build on the Docker daemon. - Remember that the daemon could be running on a remote machine and that no - parsing of the Dockerfile happens at the client side (where you're running - `docker build`). That means that *all* the files at `PATH` get sent, not just - the ones listed to [*ADD*](/engine/reference/builder/#add) - in the Dockerfile. - - The transfer of context from the local machine to the Docker daemon is what the - `docker` client means when you see the "Sending build context" message. - - If you wish to keep the intermediate containers after the build is complete, - you must use `--rm=false`. This does not affect the build cache. - - ### Build with URL - - ```console - $ docker build github.com/creack/docker-firefox - ``` - - This will clone the GitHub repository and use the cloned repository as context. - The Dockerfile at the root of the repository is used as Dockerfile. You can - specify an arbitrary Git repository by using the `git://` or `git@` scheme. - - ```console - $ docker build -f ctx/Dockerfile http://server/ctx.tar.gz - - Downloading context: http://server/ctx.tar.gz [===================>] 240 B/240 B - Step 1/3 : FROM busybox - ---> 8c2e06607696 - Step 2/3 : ADD ctx/container.cfg / - ---> e7829950cee3 - Removing intermediate container b35224abf821 - Step 3/3 : CMD /bin/ls - ---> Running in fbc63d321d73 - ---> 3286931702ad - Removing intermediate container fbc63d321d73 - Successfully built 377c409b35e4 - ``` - - This sends the URL `http://server/ctx.tar.gz` to the Docker daemon, which - downloads and extracts the referenced tarball. The `-f ctx/Dockerfile` - parameter specifies a path inside `ctx.tar.gz` to the `Dockerfile` that is used - to build the image. Any `ADD` commands in that `Dockerfile` that refers to local - paths must be relative to the root of the contents inside `ctx.tar.gz`. In the - example above, the tarball contains a directory `ctx/`, so the `ADD - ctx/container.cfg /` operation works as expected. - - ### Build with - - - ```console - $ docker build - < Dockerfile - ``` - - This will read a Dockerfile from `STDIN` without context. Due to the lack of a - context, no contents of any local directory will be sent to the Docker daemon. - Since there is no context, a Dockerfile `ADD` only works if it refers to a - remote URL. - - ```console - $ docker build - < context.tar.gz - ``` - - This will build an image for a compressed context read from `STDIN`. Supported - formats are: bzip2, gzip and xz. - - ### Use a .dockerignore file - - ```console - $ docker build . - - Uploading context 18.829 MB - Uploading context - Step 1/2 : FROM busybox - ---> 769b9341d937 - Step 2/2 : CMD echo Hello world - ---> Using cache - ---> 99cc1ad10469 - Successfully built 99cc1ad10469 - $ echo ".git" > .dockerignore - $ docker build . - Uploading context 6.76 MB - Uploading context - Step 1/2 : FROM busybox - ---> 769b9341d937 - Step 2/2 : CMD echo Hello world - ---> Using cache - ---> 99cc1ad10469 - Successfully built 99cc1ad10469 - ``` - - This example shows the use of the `.dockerignore` file to exclude the `.git` - directory from the context. Its effect can be seen in the changed size of the - uploaded context. The builder reference contains detailed information on - [creating a .dockerignore file](/engine/reference/builder/#dockerignore-file). - - When using the [BuildKit backend](/build/buildkit/), - `docker build` searches for a `.dockerignore` file relative to the Dockerfile - name. For example, running `docker build -f myapp.Dockerfile .` will first look - for an ignore file named `myapp.Dockerfile.dockerignore`. If such a file is not - found, the `.dockerignore` file is used if present. Using a Dockerfile based - `.dockerignore` is useful if a project contains multiple Dockerfiles that expect - to ignore different sets of files. - - - ### Tag an image (-t, --tag) {#tag} - - ```console - $ docker build -t vieux/apache:2.0 . - ``` - - This will build like the previous example, but it will then tag the resulting - image. The repository name will be `vieux/apache` and the tag will be `2.0`. - [Read more about valid tags](tag.md). - - You can apply multiple tags to an image. For example, you can apply the `latest` - tag to a newly built image and add another tag that references a specific - version. - For example, to tag an image both as `whenry/fedora-jboss:latest` and - `whenry/fedora-jboss:v2.1`, use the following: - - ```console - $ docker build -t whenry/fedora-jboss:latest -t whenry/fedora-jboss:v2.1 . - ``` - - ### Specify a Dockerfile (-f, --file) {#file} - - ```console - $ docker build -f Dockerfile.debug . - ``` - - This will use a file called `Dockerfile.debug` for the build instructions - instead of `Dockerfile`. - - ```console - $ curl example.com/remote/Dockerfile | docker build -f - . - ``` - - The above command will use the current directory as the build context and read - a Dockerfile from stdin. - - ```console - $ docker build -f dockerfiles/Dockerfile.debug -t myapp_debug . - $ docker build -f dockerfiles/Dockerfile.prod -t myapp_prod . - ``` - - The above commands will build the current build context (as specified by the - `.`) twice, once using a debug version of a `Dockerfile` and once using a - production version. - - ```console - $ cd /home/me/myapp/some/dir/really/deep - $ docker build -f /home/me/myapp/dockerfiles/debug /home/me/myapp - $ docker build -f ../../../../dockerfiles/debug /home/me/myapp - ``` - - These two `docker build` commands do the exact same thing. They both use the - contents of the `debug` file instead of looking for a `Dockerfile` and will use - `/home/me/myapp` as the root of the build context. Note that `debug` is in the - directory structure of the build context, regardless of how you refer to it on - the command line. - - > **Note** - > - > `docker build` returns a `no such file or directory` error if the - > file or directory does not exist in the uploaded context. This may - > happen if there is no context, or if you specify a file that is - > elsewhere on the Host system. The context is limited to the current - > directory (and its children) for security reasons, and to ensure - > repeatable builds on remote Docker hosts. This is also the reason why - > `ADD ../file` does not work. - - ### Use a custom parent cgroup (--cgroup-parent) {#cgroup-parent} - - When `docker build` is run with the `--cgroup-parent` option the containers - used in the build will be run with the [corresponding `docker run` flag](../run.md#specify-custom-cgroups). - - ### Set ulimits in container (--ulimit) {#ulimit} - - Using the `--ulimit` option with `docker build` will cause each build step's - container to be started using those [`--ulimit` flag values](run.md#ulimit). - - ### Set build-time variables (--build-arg) {#build-arg} - - You can use `ENV` instructions in a Dockerfile to define variable - values. These values persist in the built image. However, often - persistence is not what you want. Users want to specify variables differently - depending on which host they build an image on. - - A good example is `http_proxy` or source versions for pulling intermediate - files. The `ARG` instruction lets Dockerfile authors define values that users - can set at build-time using the `--build-arg` flag: - - ```console - $ docker build --build-arg HTTP_PROXY=http://10.20.30.2:1234 --build-arg FTP_PROXY=http://40.50.60.5:4567 . - ``` - - This flag allows you to pass the build-time variables that are - accessed like regular environment variables in the `RUN` instruction of the - Dockerfile. Also, these values don't persist in the intermediate or final images - like `ENV` values do. You must add `--build-arg` for each build argument. - - Using this flag will not alter the output you see when the `ARG` lines from the - Dockerfile are echoed during the build process. - - For detailed information on using `ARG` and `ENV` instructions, see the - [Dockerfile reference](/engine/reference/builder/). - - You may also use the `--build-arg` flag without a value, in which case the value - from the local environment will be propagated into the Docker container being - built: - - ```console - $ export HTTP_PROXY=http://10.20.30.2:1234 - $ docker build --build-arg HTTP_PROXY . - ``` - - This is similar to how `docker run -e` works. Refer to the [`docker run` documentation](run.md#env) - for more information. - - ### Optional security options (--security-opt) {#security-opt} - - This flag is only supported on a daemon running on Windows, and only supports - the `credentialspec` option. The `credentialspec` must be in the format - `file://spec.txt` or `registry://keyname`. - - ### Specify isolation technology for container (--isolation) {#isolation} - - This option is useful in situations where you are running Docker containers on - Windows. The `--isolation=` option sets a container's isolation - technology. On Linux, the only supported is the `default` option which uses - Linux namespaces. On Microsoft Windows, you can specify these values: - - - | Value | Description | - |-----------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| - | `default` | Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. | - | `process` | Namespace isolation only. | - | `hyperv` | Hyper-V hypervisor partition-based isolation. | - - Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`. - - ### Add entries to container hosts file (--add-host) {#add-host} - - You can add other hosts into a container's `/etc/hosts` file by using one or - more `--add-host` flags. This example adds a static address for a host named - `docker`: - - ```console - $ docker build --add-host docker:10.180.0.1 . - ``` - - If you need your build to connect to services running on the host, you can use - the special `host-gateway` value for `--add-host`. In the following example, - build containers resolve `host.docker.internal` to the host's gateway IP. - - ```console - $ docker build --add-host host.docker.internal:host-gateway . - ``` - - ### Specifying target build stage (--target) {#target} - - When building a Dockerfile with multiple build stages, `--target` can be used to - specify an intermediate build stage by name as a final stage for the resulting - image. Commands after the target stage will be skipped. - - ```dockerfile - FROM debian AS build-env - # ... - - FROM alpine AS production-env - # ... - ``` - - ```console - $ docker build -t mybuildimage --target build-env . - ``` - - ### Custom build outputs (--output) {#output} - - > **Note** - > - > This feature requires the BuildKit backend. You can either - > [enable BuildKit](/build/buildkit/#getting-started) or - > use the [buildx](https://github.com/docker/buildx) plugin which provides more - > output type options. - - By default, a local container image is created from the build result. The - `--output` (or `-o`) flag allows you to override this behavior, and a specify a - custom exporter. For example, custom exporters allow you to export the build - artifacts as files on the local filesystem instead of a Docker image, which can - be useful for generating local binaries, code generation etc. - - The value for `--output` is a CSV-formatted string defining the exporter type - and options. Currently, `local` and `tar` exporters are supported. The `local` - exporter writes the resulting build files to a directory on the client side. The - `tar` exporter is similar but writes the files as a single tarball (`.tar`). - - If no type is specified, the value defaults to the output directory of the local - exporter. Use a hyphen (`-`) to write the output tarball to standard output - (`STDOUT`). - - The following example builds an image using the current directory (`.`) as build - context, and exports the files to a directory named `out` in the current directory. - If the directory does not exist, Docker creates the directory automatically: - - ```console - $ docker build -o out . - ``` - - The example above uses the short-hand syntax, omitting the `type` options, and - thus uses the default (`local`) exporter. The example below shows the equivalent - using the long-hand CSV syntax, specifying both `type` and `dest` (destination - path): - - ```console - $ docker build --output type=local,dest=out . - ``` - - Use the `tar` type to export the files as a `.tar` archive: - - ```console - $ docker build --output type=tar,dest=out.tar . - ``` - - The example below shows the equivalent when using the short-hand syntax. In this - case, `-` is specified as destination, which automatically selects the `tar` type, - and writes the output tarball to standard output, which is then redirected to - the `out.tar` file: - - ```console - $ docker build -o - . > out.tar - ``` - - The `--output` option exports all files from the target stage. A common pattern - for exporting only specific files is to do multi-stage builds and to copy the - desired files to a new scratch stage with [`COPY --from`](/engine/reference/builder/#copy). - - The example `Dockerfile` below uses a separate stage to collect the - build-artifacts for exporting: - - ```dockerfile - FROM golang AS build-stage - RUN go get -u github.com/LK4D4/vndr - - FROM scratch AS export-stage - COPY --from=build-stage /go/bin/vndr / - ``` - - When building the Dockerfile with the `-o` option, only the files from the final - stage are exported to the `out` directory, in this case, the `vndr` binary: - - ```console - $ docker build -o out . - - [+] Building 2.3s (7/7) FINISHED - => [internal] load build definition from Dockerfile 0.1s - => => transferring dockerfile: 176B 0.0s - => [internal] load .dockerignore 0.0s - => => transferring context: 2B 0.0s - => [internal] load metadata for docker.io/library/golang:latest 1.6s - => [build-stage 1/2] FROM docker.io/library/golang@sha256:2df96417dca0561bf1027742dcc5b446a18957cd28eba6aa79269f23f1846d3f 0.0s - => => resolve docker.io/library/golang@sha256:2df96417dca0561bf1027742dcc5b446a18957cd28eba6aa79269f23f1846d3f 0.0s - => CACHED [build-stage 2/2] RUN go get -u github.com/LK4D4/vndr 0.0s - => [export-stage 1/1] COPY --from=build-stage /go/bin/vndr / 0.2s - => exporting to client 0.4s - => => copying files 10.30MB 0.3s - - $ ls ./out - vndr - ``` - - ### Specifying external cache sources (--cache-from) {#cache-from} - - > **Note** - > - > This feature requires the BuildKit backend. You can either - > [enable BuildKit](/build/buildkit/#getting-started) or - > use the [buildx](https://github.com/docker/buildx) plugin. The previous - > builder has limited support for reusing cache from pre-pulled images. - - In addition to local build cache, the builder can reuse the cache generated from - previous builds with the `--cache-from` flag pointing to an image in the registry. - - To use an image as a cache source, cache metadata needs to be written into the - image on creation. This can be done by setting `--build-arg BUILDKIT_INLINE_CACHE=1` - when building the image. After that, the built image can be used as a cache source - for subsequent builds. - - Upon importing the cache, the builder will only pull the JSON metadata from the - registry and determine possible cache hits based on that information. If there - is a cache hit, the matched layers are pulled into the local environment. - - In addition to images, the cache can also be pulled from special cache manifests - generated by [`buildx`](https://github.com/docker/buildx) or the BuildKit CLI - (`buildctl`). These manifests (when built with the `type=registry` and `mode=max` - options) allow pulling layer data for intermediate stages in multi-stage builds. - - The following example builds an image with inline-cache metadata and pushes it - to a registry, then uses the image as a cache source on another machine: - - ```console - $ docker build -t myname/myapp --build-arg BUILDKIT_INLINE_CACHE=1 . - $ docker push myname/myapp - ``` - - After pushing the image, the image is used as cache source on another machine. - BuildKit automatically pulls the image from the registry if needed. - - On another machine: - - ```console - $ docker build --cache-from myname/myapp . - ``` - - ### Squash an image's layers (--squash) (experimental) {#squash} - - #### Overview - - Once the image is built, squash the new layers into a new image with a single - new layer. Squashing does not destroy any existing image, rather it creates a new - image with the content of the squashed layers. This effectively makes it look - like all `Dockerfile` commands were created with a single layer. The build - cache is preserved with this method. - - The `--squash` option is an experimental feature, and should not be considered - stable. - - - Squashing layers can be beneficial if your Dockerfile produces multiple layers - modifying the same files, for example, files that are created in one step, and - removed in another step. For other use-cases, squashing images may actually have - a negative impact on performance; when pulling an image consisting of multiple - layers, layers can be pulled in parallel, and allows sharing layers between - images (saving space). - - For most use cases, multi-stage builds are a better alternative, as they give more - fine-grained control over your build, and can take advantage of future - optimizations in the builder. Refer to the [use multi-stage builds](/develop/develop-images/multistage-build/) - section in the userguide for more information. - - - #### Known limitations - - The `--squash` option has a number of known limitations: - - - When squashing layers, the resulting image cannot take advantage of layer - sharing with other images, and may use significantly more space. Sharing the - base image is still supported. - - When using this option you may see significantly more space used due to - storing two copies of the image, one for the build cache with all the cache - layers intact, and one for the squashed version. - - While squashing layers may produce smaller images, it may have a negative - impact on performance, as a single layer takes longer to extract, and - downloading a single layer cannot be parallelized. - - When attempting to squash an image that does not make changes to the - filesystem (for example, the Dockerfile only contains `ENV` instructions), - the squash step will fail (see [issue #33823](https://github.com/moby/moby/issues/33823)). - - #### Prerequisites - - The example on this page is using experimental mode in Docker 23.03. - - Experimental mode can be enabled by using the `--experimental` flag when starting - the Docker daemon or setting `experimental: true` in the `daemon.json` configuration - file. - - By default, experimental mode is disabled. To see the current configuration of - the docker daemon, use the `docker version` command and check the `Experimental` - line in the `Engine` section: - - ```console - Client: Docker Engine - Community - Version: 23.0.3 - API version: 1.42 - Go version: go1.19.7 - Git commit: 3e7cbfd - Built: Tue Apr 4 22:05:41 2023 - OS/Arch: darwin/amd64 - Context: default - - Server: Docker Engine - Community - Engine: - Version: 23.0.3 - API version: 1.42 (minimum version 1.12) - Go version: go1.19.7 - Git commit: 59118bf - Built: Tue Apr 4 22:05:41 2023 - OS/Arch: linux/amd64 - Experimental: true - [...] - ``` - - To enable experimental mode, users need to restart the docker daemon with the - experimental flag enabled. - - #### Enable Docker experimental - - To enable experimental features, you need to start the Docker daemon with - `--experimental` flag. You can also enable the daemon flag via - `/etc/docker/daemon.json`, for example: - - ```json - { - "experimental": true - } - ``` - - Then make sure the experimental flag is enabled: - - ```console - $ docker version -f '{{.Server.Experimental}}' - true - ``` - - #### Build an image with `--squash` argument - - The following is an example of docker build with `--squash` argument - - ```dockerfile - FROM busybox - RUN echo hello > /hello - RUN echo world >> /hello - RUN touch remove_me /remove_me - ENV HELLO=world - RUN rm /remove_me - ``` - - An image named `test` is built with `--squash` argument. - - ```console - $ docker build --squash -t test . - - <...> - ``` - - If everything is right, the history looks like this: - - ```console - $ docker history test - - IMAGE CREATED CREATED BY SIZE COMMENT - 4e10cb5b4cac 3 seconds ago 12 B merge sha256:88a7b0112a41826885df0e7072698006ee8f621c6ab99fca7fe9151d7b599702 to sha256:47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb - 5 minutes ago /bin/sh -c rm /remove_me 0 B - 5 minutes ago /bin/sh -c #(nop) ENV HELLO=world 0 B - 5 minutes ago /bin/sh -c touch remove_me /remove_me 0 B - 5 minutes ago /bin/sh -c echo world >> /hello 0 B - 6 minutes ago /bin/sh -c echo hello > /hello 0 B - 7 weeks ago /bin/sh -c #(nop) CMD ["sh"] 0 B - 7 weeks ago /bin/sh -c #(nop) ADD file:47ca6e777c36a4cfff 1.113 MB - ``` - - We could find that a layer's name is ``, and there is a new layer with - COMMENT `merge`. - - Test the image, check for `/remove_me` being gone, make sure `hello\nworld` is - in `/hello`, make sure the `HELLO` environment variable's value is `world`. deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_checkpoint.yaml b/data/engine-cli/docker_checkpoint.yaml index f65050683..873db7667 100644 --- a/data/engine-cli/docker_checkpoint.yaml +++ b/data/engine-cli/docker_checkpoint.yaml @@ -2,7 +2,7 @@ command: docker checkpoint short: Manage checkpoints long: |- Checkpoint and Restore is an experimental feature that allows you to freeze a running - container by checkpointing it, which turns its state into a collection of files + container by specifying a checkpoint, which turns the container state into a collection of files on disk. Later, the container can be restored from the point it was frozen. This is accomplished using a tool called [CRIU](https://criu.org), which is an @@ -13,7 +13,7 @@ long: |- ### Installing CRIU If you use a Debian system, you can add the CRIU PPA and install with `apt-get` - [from the criu launchpad](https://launchpad.net/~criu/+archive/ubuntu/ppa). + [from the CRIU launchpad](https://launchpad.net/~criu/+archive/ubuntu/ppa). Alternatively, you can [build CRIU from source](https://criu.org/Installation). @@ -75,17 +75,17 @@ long: |- ``` This process just logs an incrementing counter to stdout. If you run `docker logs` - in between running/checkpoint/restoring you should see that the counter - increases while the process is running, stops while it's checkpointed, and + in-between running/checkpoint/restoring, you should see that the counter + increases while the process is running, stops while it's frozen, and resumes from the point it left off once you restore. ### Known limitations - seccomp is only supported by CRIU in very up to date kernels. + `seccomp` is only supported by CRIU in very up-to-date kernels. - External terminal (i.e. `docker run -t ..`) is not supported at the moment. + External terminals (i.e. `docker run -t ..`) aren't supported. If you try to create a checkpoint for a container with an external terminal, - it would fail: + it fails: ```console $ docker checkpoint create cr checkpoint1 diff --git a/data/engine-cli/docker_commit.yaml b/data/engine-cli/docker_commit.yaml index 0a40a255b..944497001 100644 --- a/data/engine-cli/docker_commit.yaml +++ b/data/engine-cli/docker_commit.yaml @@ -1,24 +1,7 @@ command: docker commit aliases: docker container commit, docker commit short: Create a new image from a container's changes -long: |- - It can be useful to commit a container's file changes or settings into a new - image. This allows you to debug a container by running an interactive shell, or to - export a working dataset to another server. Generally, it is better to use - Dockerfiles to manage your images in a documented and maintainable way. - [Read more about valid image names and tags](tag.md). - - The commit operation will not include any data contained in - volumes mounted inside the container. - - By default, the container being committed and its processes will be paused - while the image is committed. This reduces the likelihood of encountering data - corruption during the process of creating the commit. If this behavior is - undesired, set the `--pause` option to false. - - The `--change` option will apply `Dockerfile` instructions to the image that is - created. Supported `Dockerfile` instructions: - `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`LABEL`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` +long: Create a new image from a container's changes usage: docker commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]] pname: docker plink: docker.yaml @@ -37,7 +20,6 @@ options: shorthand: c value_type: list description: Apply Dockerfile instruction to the created image - details_url: '#change' deprecated: false hidden: false experimental: false @@ -76,72 +58,6 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false -examples: |- - ### Commit a container - - ```console - $ docker ps - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - c3f279d17e0a ubuntu:22.04 /bin/bash 7 days ago Up 25 hours desperate_dubinsky - 197387f1b436 ubuntu:22.04 /bin/bash 7 days ago Up 25 hours focused_hamilton - - $ docker commit c3f279d17e0a svendowideit/testimage:version3 - - f5283438590d - - $ docker images - - REPOSITORY TAG ID CREATED SIZE - svendowideit/testimage version3 f5283438590d 16 seconds ago 335.7 MB - ``` - - ### Commit a container with new configurations (--change) {#change} - - ```console - $ docker ps - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - c3f279d17e0a ubuntu:22.04 /bin/bash 7 days ago Up 25 hours desperate_dubinsky - 197387f1b436 ubuntu:22.04 /bin/bash 7 days ago Up 25 hours focused_hamilton - - $ docker inspect -f "{{ .Config.Env }}" c3f279d17e0a - - [HOME=/ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin] - - $ docker commit --change "ENV DEBUG=true" c3f279d17e0a svendowideit/testimage:version3 - - f5283438590d - - $ docker inspect -f "{{ .Config.Env }}" f5283438590d - - [HOME=/ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin DEBUG=true] - ``` - - ### Commit a container with new `CMD` and `EXPOSE` instructions - - ```console - $ docker ps - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - c3f279d17e0a ubuntu:22.04 /bin/bash 7 days ago Up 25 hours desperate_dubinsky - 197387f1b436 ubuntu:22.04 /bin/bash 7 days ago Up 25 hours focused_hamilton - - $ docker commit --change='CMD ["apachectl", "-DFOREGROUND"]' -c "EXPOSE 80" c3f279d17e0a svendowideit/testimage:version4 - - f5283438590d - - $ docker run -d svendowideit/testimage:version4 - - 89373736e2e7f00bc149bd783073ac43d0507da250e999f3f1036e0db60817c0 - - $ docker ps - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 89373736e2e7 testimage:version4 "apachectl -DFOREGROU" 3 seconds ago Up 2 seconds 80/tcp distracted_fermat - c3f279d17e0a ubuntu:22.04 /bin/bash 7 days ago Up 25 hours desperate_dubinsky - 197387f1b436 ubuntu:22.04 /bin/bash 7 days ago Up 25 hours focused_hamilton - ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_config_create.yaml b/data/engine-cli/docker_config_create.yaml index 6d716986a..61b7dfadd 100644 --- a/data/engine-cli/docker_config_create.yaml +++ b/data/engine-cli/docker_config_create.yaml @@ -7,7 +7,7 @@ long: |- > **Note** > - > This is a cluster management command, and must be executed on a swarm + > This is a cluster management command, and must be executed on a Swarm > manager node. To learn about managers and workers, refer to the > [Swarm mode section](/engine/swarm/) in the > documentation. diff --git a/data/engine-cli/docker_config_inspect.yaml b/data/engine-cli/docker_config_inspect.yaml index 72bf69b3c..0ec9fc0e9 100644 --- a/data/engine-cli/docker_config_inspect.yaml +++ b/data/engine-cli/docker_config_inspect.yaml @@ -13,7 +13,7 @@ long: |- > **Note** > - > This is a cluster management command, and must be executed on a swarm + > This is a cluster management command, and must be executed on a Swarm > manager node. To learn about managers and workers, refer to the > [Swarm mode section](/engine/swarm/) in the > documentation. diff --git a/data/engine-cli/docker_config_ls.yaml b/data/engine-cli/docker_config_ls.yaml index 7d4b3a89d..e5dfdb1dc 100644 --- a/data/engine-cli/docker_config_ls.yaml +++ b/data/engine-cli/docker_config_ls.yaml @@ -2,13 +2,13 @@ command: docker config ls aliases: docker config ls, docker config list short: List configs long: |- - Run this command on a manager node to list the configs in the swarm. + Run this command on a manager node to list the configs in the Swarm. For detailed information about using configs, refer to [store configuration data using Docker Configs](/engine/swarm/configs/). > **Note** > - > This is a cluster management command, and must be executed on a swarm + > This is a cluster management command, and must be executed on a Swarm > manager node. To learn about managers and workers, refer to the > [Swarm mode section](/engine/swarm/) in the > documentation. diff --git a/data/engine-cli/docker_config_rm.yaml b/data/engine-cli/docker_config_rm.yaml index db718561b..c74e404cc 100644 --- a/data/engine-cli/docker_config_rm.yaml +++ b/data/engine-cli/docker_config_rm.yaml @@ -2,13 +2,13 @@ command: docker config rm aliases: docker config rm, docker config remove short: Remove one or more configs long: |- - Removes the specified configs from the swarm. + Removes the specified configs from the Swarm. For detailed information about using configs, refer to [store configuration data using Docker Configs](/engine/swarm/configs/). > **Note** > - > This is a cluster management command, and must be executed on a swarm + > This is a cluster management command, and must be executed on a Swarm > manager node. To learn about managers and workers, refer to the > [Swarm mode section](/engine/swarm/) in the > documentation. @@ -36,8 +36,8 @@ examples: |- > **Warning** > - > Unlike `docker rm`, this command does not ask for confirmation before removing - > a config. + > This command doesn't ask for confirmation before removing a config. + { .warning } deprecated: false min_api_version: "1.30" experimental: false diff --git a/data/engine-cli/docker_container_attach.yaml b/data/engine-cli/docker_container_attach.yaml index eed522394..f26e6cd04 100644 --- a/data/engine-cli/docker_container_attach.yaml +++ b/data/engine-cli/docker_container_attach.yaml @@ -2,7 +2,43 @@ command: docker container attach aliases: docker container attach, docker attach short: | Attach local standard input, output, and error streams to a running container -long: See [docker attach](attach.md) for more information. +long: |- + Use `docker attach` to attach your terminal's standard input, output, and error + (or any combination of the three) to a running container using the container's + ID or name. This lets you view its output or control it interactively, as + though the commands were running directly in your terminal. + + > **Note** + > + > The `attach` command displays the output of the container's `ENTRYPOINT` and + > `CMD` process. This can appear as if the attach command is hung when in fact + > the process may simply not be writing any output at that time. + + You can attach to the same contained process multiple times simultaneously, + from different sessions on the Docker host. + + To stop a container, use `CTRL-c`. This key sequence sends `SIGKILL` to the + container. If `--sig-proxy` is true (the default),`CTRL-c` sends a `SIGINT` to + the container. If the container was run with `-i` and `-t`, you can detach from + a container and leave it running using the `CTRL-p CTRL-q` key sequence. + + > **Note** + > + > A process running as PID 1 inside a container is treated specially by + > Linux: it ignores any signal with the default action. So, the process + > doesn't terminate on `SIGINT` or `SIGTERM` unless it's coded to do so. + + You can't redirect the standard input of a `docker attach` command while + attaching to a TTY-enabled container (using the `-i` and `-t` options). + + While a client is connected to container's `stdio` using `docker attach`, + Docker uses a ~1MB memory buffer to maximize the throughput of the application. + Once this buffer is full, the speed of the API connection is affected, and so + this impacts the output process' writing speed. This is similar to other + applications like SSH. Because of this, it isn't recommended to run + performance-critical applications that generate a lot of output in the + foreground over a slow client connection. Instead, use the `docker logs` + command to get access to the logs. usage: docker container attach [OPTIONS] CONTAINER pname: docker container plink: docker_container.yaml @@ -10,6 +46,7 @@ options: - option: detach-keys value_type: string description: Override the key sequence for detaching a container + details_url: '#detach-keys' deprecated: false hidden: false experimental: false @@ -47,6 +84,113 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + ### Attach to and detach from a running container + + The following example starts an Alpine container running `top` in detached mode, + then attaches to the container; + + ```console + $ docker run -d --name topdemo alpine top -b + + $ docker attach topdemo + + Mem: 2395856K used, 5638884K free, 2328K shrd, 61904K buff, 1524264K cached + CPU: 0% usr 0% sys 0% nic 99% idle 0% io 0% irq 0% sirq + Load average: 0.15 0.06 0.01 1/567 6 + PID PPID USER STAT VSZ %VSZ CPU %CPU COMMAND + 1 0 root R 1700 0% 3 0% top -b + ``` + + As the container was started without the `-i`, and `-t` options, signals are + forwarded to the attached process, which means that the default `CTRL-p CTRL-q` + detach key sequence produces no effect, but pressing `CTRL-c` terminates the + container: + + ```console + <...> + PID PPID USER STAT VSZ %VSZ CPU %CPU COMMAND + 1 0 root R 1700 0% 7 0% top -b + ^P^Q + ^C + + $ docker ps -a --filter name=topdemo + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 96254a235bd6 alpine "top -b" 44 seconds ago Exited (130) 8 seconds ago topdemo + ``` + + Repeating the example above, but this time with the `-i` and `-t` options set; + + ```console + $ docker run -dit --name topdemo2 ubuntu:22.04 /usr/bin/top -b + ``` + + Now, when attaching to the container, and pressing the `CTRL-p CTRL-q` ("read + escape sequence"), the Docker CLI is handling the detach sequence, and the + `attach` command is detached from the container. Checking the container's status + with `docker ps` shows that the container is still running in the background: + + ```console + $ docker attach topdemo2 + + Mem: 2405344K used, 5629396K free, 2512K shrd, 65100K buff, 1524952K cached + CPU: 0% usr 0% sys 0% nic 99% idle 0% io 0% irq 0% sirq + Load average: 0.12 0.12 0.05 1/594 6 + PID PPID USER STAT VSZ %VSZ CPU %CPU COMMAND + 1 0 root R 1700 0% 3 0% top -b + read escape sequence + + $ docker ps -a --filter name=topdemo2 + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + fde88b83c2c2 alpine "top -b" 22 seconds ago Up 21 seconds topdemo2 + ``` + + ### Get the exit code of the container's command + + And in this second example, you can see the exit code returned by the `bash` + process is returned by the `docker attach` command to its caller too: + + ```console + $ docker run --name test -dit alpine + 275c44472aebd77c926d4527885bb09f2f6db21d878c75f0a1c212c03d3bcfab + + $ docker attach test + /# exit 13 + + $ echo $? + 13 + + $ docker ps -a --filter name=test + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + a2fe3fd886db alpine "/bin/sh" About a minute ago Exited (13) 40 seconds ago test + ``` + + ### Override the detach sequence (--detach-keys) {#detach-keys} + + Use the `--detach-keys` option to override the Docker key sequence for detach. + This is useful if the Docker default sequence conflicts with key sequence you + use for other applications. There are two ways to define your own detach key + sequence, as a per-container override or as a configuration property on your + entire configuration. + + To override the sequence for an individual container, use the + `--detach-keys=""` flag with the `docker attach` command. The format of + the `` is either a letter [a-Z], or the `ctrl-` combined with any of + the following: + + * `a-z` (a single lowercase alpha character ) + * `@` (at sign) + * `[` (left bracket) + * `\\` (two backward slashes) + * `_` (underscore) + * `^` (caret) + + These `a`, `ctrl-a`, `X`, or `ctrl-\\` values are all examples of valid key + sequences. To configure a different configuration default key sequence for all + containers, see [**Configuration file** section](cli.md#configuration-files). deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_container_commit.yaml b/data/engine-cli/docker_container_commit.yaml index 1d39b8ed5..c688291a4 100644 --- a/data/engine-cli/docker_container_commit.yaml +++ b/data/engine-cli/docker_container_commit.yaml @@ -1,7 +1,21 @@ command: docker container commit aliases: docker container commit, docker commit short: Create a new image from a container's changes -long: See [docker commit](commit.md) for more information. +long: |- + It can be useful to commit a container's file changes or settings into a new + image. This lets you debug a container by running an interactive shell, or + export a working dataset to another server. + + Commits do not include any data contained in mounted volumes. + + By default, the container being committed and its processes will be paused + while the image is committed. This reduces the likelihood of encountering data + corruption during the process of creating the commit. If this behavior is + undesired, set the `--pause` option to false. + + The `--change` option will apply `Dockerfile` instructions to the image that's + created. Supported `Dockerfile` instructions: + `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`LABEL`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` usage: docker container commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]] pname: docker container plink: docker_container.yaml @@ -20,6 +34,7 @@ options: shorthand: c value_type: list description: Apply Dockerfile instruction to the created image + details_url: '#change' deprecated: false hidden: false experimental: false @@ -58,6 +73,72 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + ### Commit a container + + ```console + $ docker ps + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + c3f279d17e0a ubuntu:22.04 /bin/bash 7 days ago Up 25 hours desperate_dubinsky + 197387f1b436 ubuntu:22.04 /bin/bash 7 days ago Up 25 hours focused_hamilton + + $ docker commit c3f279d17e0a svendowideit/testimage:version3 + + f5283438590d + + $ docker images + + REPOSITORY TAG ID CREATED SIZE + svendowideit/testimage version3 f5283438590d 16 seconds ago 335.7 MB + ``` + + ### Commit a container with new configurations (--change) {#change} + + ```console + $ docker ps + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + c3f279d17e0a ubuntu:22.04 /bin/bash 7 days ago Up 25 hours desperate_dubinsky + 197387f1b436 ubuntu:22.04 /bin/bash 7 days ago Up 25 hours focused_hamilton + + $ docker inspect -f "{{ .Config.Env }}" c3f279d17e0a + + [HOME=/ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin] + + $ docker commit --change "ENV DEBUG=true" c3f279d17e0a svendowideit/testimage:version3 + + f5283438590d + + $ docker inspect -f "{{ .Config.Env }}" f5283438590d + + [HOME=/ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin DEBUG=true] + ``` + + ### Commit a container with new `CMD` and `EXPOSE` instructions + + ```console + $ docker ps + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + c3f279d17e0a ubuntu:22.04 /bin/bash 7 days ago Up 25 hours desperate_dubinsky + 197387f1b436 ubuntu:22.04 /bin/bash 7 days ago Up 25 hours focused_hamilton + + $ docker commit --change='CMD ["apachectl", "-DFOREGROUND"]' -c "EXPOSE 80" c3f279d17e0a svendowideit/testimage:version4 + + f5283438590d + + $ docker run -d svendowideit/testimage:version4 + + 89373736e2e7f00bc149bd783073ac43d0507da250e999f3f1036e0db60817c0 + + $ docker ps + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 89373736e2e7 testimage:version4 "apachectl -DFOREGROU" 3 seconds ago Up 2 seconds 80/tcp distracted_fermat + c3f279d17e0a ubuntu:22.04 /bin/bash 7 days ago Up 25 hours desperate_dubinsky + 197387f1b436 ubuntu:22.04 /bin/bash 7 days ago Up 25 hours focused_hamilton + ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_container_cp.yaml b/data/engine-cli/docker_container_cp.yaml index 9da8707c4..3b656da13 100644 --- a/data/engine-cli/docker_container_cp.yaml +++ b/data/engine-cli/docker_container_cp.yaml @@ -1,7 +1,70 @@ command: docker container cp aliases: docker container cp, docker cp short: Copy files/folders between a container and the local filesystem -long: See [docker cp](cp.md) for more information. +long: |- + The `docker cp` utility copies the contents of `SRC_PATH` to the `DEST_PATH`. + You can copy from the container's file system to the local machine or the + reverse, from the local filesystem to the container. If `-` is specified for + either the `SRC_PATH` or `DEST_PATH`, you can also stream a tar archive from + `STDIN` or to `STDOUT`. The `CONTAINER` can be a running or stopped container. + The `SRC_PATH` or `DEST_PATH` can be a file or directory. + + The `docker cp` command assumes container paths are relative to the container's + `/` (root) directory. This means supplying the initial forward slash is optional; + The command sees `compassionate_darwin:/tmp/foo/myfile.txt` and + `compassionate_darwin:tmp/foo/myfile.txt` as identical. Local machine paths can + be an absolute or relative value. The command interprets a local machine's + relative paths as relative to the current working directory where `docker cp` is + run. + + The `cp` command behaves like the Unix `cp -a` command in that directories are + copied recursively with permissions preserved if possible. Ownership is set to + the user and primary group at the destination. For example, files copied to a + container are created with `UID:GID` of the root user. Files copied to the local + machine are created with the `UID:GID` of the user which invoked the `docker cp` + command. However, if you specify the `-a` option, `docker cp` sets the ownership + to the user and primary group at the source. + If you specify the `-L` option, `docker cp` follows any symbolic link + in the `SRC_PATH`. `docker cp` doesn't create parent directories for + `DEST_PATH` if they don't exist. + + Assuming a path separator of `/`, a first argument of `SRC_PATH` and second + argument of `DEST_PATH`, the behavior is as follows: + + - `SRC_PATH` specifies a file + - `DEST_PATH` does not exist + - the file is saved to a file created at `DEST_PATH` + - `DEST_PATH` does not exist and ends with `/` + - Error condition: the destination directory must exist. + - `DEST_PATH` exists and is a file + - the destination is overwritten with the source file's contents + - `DEST_PATH` exists and is a directory + - the file is copied into this directory using the basename from + `SRC_PATH` + - `SRC_PATH` specifies a directory + - `DEST_PATH` does not exist + - `DEST_PATH` is created as a directory and the *contents* of the source + directory are copied into this directory + - `DEST_PATH` exists and is a file + - Error condition: cannot copy a directory to a file + - `DEST_PATH` exists and is a directory + - `SRC_PATH` does not end with `/.` (that is: _slash_ followed by _dot_) + - the source directory is copied into this directory + - `SRC_PATH` does end with `/.` (that is: _slash_ followed by _dot_) + - the *content* of the source directory is copied into this + directory + + The command requires `SRC_PATH` and `DEST_PATH` to exist according to the above + rules. If `SRC_PATH` is local and is a symbolic link, the symbolic link, not + the target, is copied by default. To copy the link target and not the link, specify + the `-L` option. + + A colon (`:`) is used as a delimiter between `CONTAINER` and its path. You can + also use `:` when specifying paths to a `SRC_PATH` or `DEST_PATH` on a local + machine, for example `file:name.txt`. If you use a `:` in a local machine path, + you must be explicit with a relative or absolute path, for example: + + `/path/to/file:name.txt` or `./file:name.txt` usage: |- docker container cp [OPTIONS] CONTAINER:SRC_PATH DEST_PATH|- docker cp [OPTIONS] SRC_PATH|- CONTAINER:DEST_PATH @@ -53,6 +116,45 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + Copy a local file into container + + ```console + $ docker cp ./some_file CONTAINER:/work + ``` + + Copy files from container to local path + + ```console + $ docker cp CONTAINER:/var/logs/ /tmp/app_logs + ``` + + Copy a file from container to stdout. Please note `cp` command produces a tar stream + + ```console + $ docker cp CONTAINER:/var/logs/app.log - | tar x -O | grep "ERROR" + ``` + + ### Corner cases + + It isn't possible to copy certain system files such as resources under + `/proc`, `/sys`, `/dev`, [tmpfs](run.md#tmpfs), and mounts created by + the user in the container. However, you can still copy such files by manually + running `tar` in `docker exec`. Both of the following examples do the same thing + in different ways (consider `SRC_PATH` and `DEST_PATH` are directories): + + ```console + $ docker exec CONTAINER tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | tar Cxf DEST_PATH - + ``` + + ```console + $ tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | docker exec -i CONTAINER tar Cxf DEST_PATH - + ``` + + Using `-` as the `SRC_PATH` streams the contents of `STDIN` as a tar archive. + The command extracts the content of the tar to the `DEST_PATH` in container's + filesystem. In this case, `DEST_PATH` must specify a directory. Using `-` as + the `DEST_PATH` streams the contents of the resource as a tar archive to `STDOUT`. deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_container_create.yaml b/data/engine-cli/docker_container_create.yaml index 2d596133b..03fdf7924 100644 --- a/data/engine-cli/docker_container_create.yaml +++ b/data/engine-cli/docker_container_create.yaml @@ -1,7 +1,24 @@ command: docker container create aliases: docker container create, docker create short: Create a new container -long: See [docker create](create.md) for more information. +long: |- + The `docker container create` (or shorthand: `docker create`) command creates a + new container from the specified image, without starting it. + + When creating a container, the Docker daemon creates a writeable container layer + over the specified image and prepares it for running the specified command. The + container ID is then printed to `STDOUT`. This is similar to `docker run -d` + except the container is never started. You can then use the `docker container start` + (or shorthand: `docker start`) command to start the container at any point. + + This is useful when you want to set up a container configuration ahead of time + so that it's ready to start when you need it. The initial status of the + new container is `created`. + + The `docker create` command shares most of its options with the `docker run` + command (which performs a `docker create` before starting it). Refer to the + [`docker run` command](run.md) section and the [Docker run reference](../run.md) + for details on the available flags and options. usage: docker container create [OPTIONS] IMAGE [COMMAND] [ARG...] pname: docker container plink: docker_container.yaml @@ -410,6 +427,18 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: health-start-interval + value_type: duration + default_value: 0s + description: | + Time between running the check during the start period (ms|s|m|h) (default 0s) + deprecated: false + hidden: false + min_api_version: "1.44" + experimental: false + experimentalcli: false + kubernetes: false + swarm: false - option: health-start-period value_type: duration default_value: 0s @@ -1007,6 +1036,68 @@ options: experimentalcli: false kubernetes: false swarm: false +examples: |- + ### Create and start a container + + The following example creates an interactive container with a pseudo-TTY attached, + then starts the container and attaches to it: + + ```console + $ docker container create -i -t --name mycontainer alpine + 6d8af538ec541dd581ebc2a24153a28329acb5268abe5ef868c1f1a261221752 + + $ docker container start --attach -i mycontainer + / # echo hello world + hello world + ``` + + The above is the equivalent of a `docker run`: + + ```console + $ docker run -it --name mycontainer2 alpine + / # echo hello world + hello world + ``` + + ### Initialize volumes + + Container volumes are initialized during the `docker create` phase + (i.e., `docker run` too). For example, this allows you to `create` the `data` + volume container, and then use it from another container: + + ```console + $ docker create -v /data --name data ubuntu + + 240633dfbb98128fa77473d3d9018f6123b99c454b3251427ae190a7d951ad57 + + $ docker run --rm --volumes-from data ubuntu ls -la /data + + total 8 + drwxr-xr-x 2 root root 4096 Dec 5 04:10 . + drwxr-xr-x 48 root root 4096 Dec 5 04:11 .. + ``` + + Similarly, `create` a host directory bind mounted volume container, which can + then be used from the subsequent container: + + ```console + $ docker create -v /home/docker:/docker --name docker ubuntu + + 9aa88c08f319cd1e4515c3c46b0de7cc9aa75e878357b1e96f91e2c773029f03 + + $ docker run --rm --volumes-from docker ubuntu ls -la /docker + + total 20 + drwxr-sr-x 5 1000 staff 180 Dec 5 04:00 . + drwxr-xr-x 48 root root 4096 Dec 5 04:13 .. + -rw-rw-r-- 1 1000 staff 3833 Dec 5 04:01 .ash_history + -rw-r--r-- 1 1000 staff 446 Nov 28 11:51 .ashrc + -rw-r--r-- 1 1000 staff 25 Dec 5 04:00 .gitconfig + drwxr-sr-x 3 1000 staff 60 Dec 1 03:28 .local + -rw-r--r-- 1 1000 staff 920 Nov 28 11:51 .profile + drwx--S--- 2 1000 staff 460 Dec 5 00:51 .ssh + drwxr-xr-x 32 1000 staff 1140 Dec 5 04:01 docker + ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_container_diff.yaml b/data/engine-cli/docker_container_diff.yaml index 80d309637..39716f874 100644 --- a/data/engine-cli/docker_container_diff.yaml +++ b/data/engine-cli/docker_container_diff.yaml @@ -1,7 +1,18 @@ command: docker container diff aliases: docker container diff, docker diff short: Inspect changes to files or directories on a container's filesystem -long: See [docker diff](diff.md) for more information. +long: |- + List the changed files and directories in a container᾿s filesystem since the + container was created. Three different types of change are tracked: + + | Symbol | Description | + |--------|---------------------------------| + | `A` | A file or directory was added | + | `D` | A file or directory was deleted | + | `C` | A file or directory was changed | + + You can use the full or shortened container ID or the container name set using + `docker run --name` option. usage: docker container diff CONTAINER pname: docker container plink: docker_container.yaml @@ -16,6 +27,32 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + Inspect the changes to an `nginx` container: + + ```console + $ docker diff 1fdfd1f54c1b + + C /dev + C /dev/console + C /dev/core + C /dev/stdout + C /dev/fd + C /dev/ptmx + C /dev/stderr + C /dev/stdin + C /run + A /run/nginx.pid + C /var/lib/nginx/tmp + A /var/lib/nginx/tmp/client_body + A /var/lib/nginx/tmp/fastcgi + A /var/lib/nginx/tmp/proxy + A /var/lib/nginx/tmp/scgi + A /var/lib/nginx/tmp/uwsgi + C /var/log/nginx + A /var/log/nginx/access.log + A /var/log/nginx/error.log + ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_container_exec.yaml b/data/engine-cli/docker_container_exec.yaml index 88b049f20..79db1e6cc 100644 --- a/data/engine-cli/docker_container_exec.yaml +++ b/data/engine-cli/docker_container_exec.yaml @@ -1,7 +1,19 @@ command: docker container exec aliases: docker container exec, docker exec short: Execute a command in a running container -long: See [docker exec](exec.md) for more information. +long: |- + The `docker exec` command runs a new command in a running container. + + The command you specify with `docker exec` only runs while the container's + primary process (`PID 1`) is running, and it isn't restarted if the container + is restarted. + + The command runs in the default working directory of the container. + + The command must be an executable. A chained or a quoted command doesn't work. + + - This works: `docker exec -it my_container sh -c "echo a && echo b"` + - This doesn't work: `docker exec -it my_container "echo a && echo b"` usage: docker container exec [OPTIONS] CONTAINER COMMAND [ARG...] pname: docker container plink: docker_container.yaml @@ -30,6 +42,7 @@ options: shorthand: e value_type: list description: Set environment variables + details_url: '#env' deprecated: false hidden: false min_api_version: "1.25" @@ -93,6 +106,7 @@ options: shorthand: w value_type: string description: Working directory inside the container + details_url: '#workdir' deprecated: false hidden: false min_api_version: "1.35" @@ -111,6 +125,100 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + ### Run `docker exec` on a running container + + First, start a container. + + ```console + $ docker run --name mycontainer -d -i -t alpine /bin/sh + ``` + + This creates and starts a container named `mycontainer` from an `alpine` image + with an `sh` shell as its main process. The `-d` option (shorthand for `--detach`) + sets the container to run in the background, in detached mode, with a pseudo-TTY + attached (`-t`). The `-i` option is set to keep `STDIN` attached (`-i`), which + prevents the `sh` process from exiting immediately. + + Next, execute a command on the container. + + ```console + $ docker exec -d mycontainer touch /tmp/execWorks + ``` + + This creates a new file `/tmp/execWorks` inside the running container + `mycontainer`, in the background. + + Next, execute an interactive `sh` shell on the container. + + ```console + $ docker exec -it mycontainer sh + ``` + + This starts a new shell session in the container `mycontainer`. + + ### Set environment variables for the exec process (--env, -e) {#env} + + Next, set environment variables in the current bash session. + + The `docker exec` command inherits the environment variables that are set at the + time the container is created. Use the `--env` (or the `-e` shorthand) to + override global environment variables, or to set additional environment + variables for the process started by `docker exec`. + + The following example creates a new shell session in the container `mycontainer`, + with environment variables `$VAR_A` set to `1`, and `$VAR_B` set to `2`. + These environment variables are only valid for the `sh` process started by that + `docker exec` command, and aren't available to other processes running inside + the container. + + ```console + $ docker exec -e VAR_A=1 -e VAR_B=2 mycontainer env + PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + HOSTNAME=f64a4851eb71 + VAR_A=1 + VAR_B=2 + HOME=/root + ``` + + ### Set the working directory for the exec process (--workdir, -w) {#workdir} + + By default `docker exec` command runs in the same working directory set when + the container was created. + + ```console + $ docker exec -it mycontainer pwd + / + ``` + + You can specify an alternative working directory for the command to execute + using the `--workdir` option (or the `-w` shorthand): + + ```console + $ docker exec -it -w /root mycontainer pwd + /root + ``` + + ### Try to run `docker exec` on a paused container + + If the container is paused, then the `docker exec` command fails with an error: + + ```console + $ docker pause mycontainer + mycontainer + + $ docker ps + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 482efdf39fac alpine "/bin/sh" 17 seconds ago Up 16 seconds (Paused) mycontainer + + $ docker exec mycontainer sh + + Error response from daemon: Container mycontainer is paused, unpause the container before exec + + $ echo $? + 1 + ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_container_export.yaml b/data/engine-cli/docker_container_export.yaml index c2a7a52e5..d0643692f 100644 --- a/data/engine-cli/docker_container_export.yaml +++ b/data/engine-cli/docker_container_export.yaml @@ -1,7 +1,14 @@ command: docker container export aliases: docker container export, docker export short: Export a container's filesystem as a tar archive -long: See [docker export](export.md) for more information. +long: |- + The `docker export` command doesn't export the contents of volumes associated + with the container. If a volume is mounted on top of an existing directory in + the container, `docker export` exports the contents of the underlying + directory, not the contents of the volume. + + Refer to [Backup, restore, or migrate data volumes](/storage/volumes/#back-up-restore-or-migrate-data-volumes) + in the user guide for examples on exporting data in a volume. usage: docker container export [OPTIONS] CONTAINER pname: docker container plink: docker_container.yaml @@ -27,6 +34,16 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + The following commands produce the same result. + + ```console + $ docker export red_panda > latest.tar + ``` + + ```console + $ docker export --output="latest.tar" red_panda + ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_container_kill.yaml b/data/engine-cli/docker_container_kill.yaml index f069f0227..8d6bceeed 100644 --- a/data/engine-cli/docker_container_kill.yaml +++ b/data/engine-cli/docker_container_kill.yaml @@ -1,7 +1,27 @@ command: docker container kill aliases: docker container kill, docker kill short: Kill one or more running containers -long: See [docker kill](kill.md) for more information. +long: |- + The `docker kill` subcommand kills one or more containers. The main process + inside the container is sent `SIGKILL` signal (default), or the signal that is + specified with the `--signal` option. You can reference a container by its + ID, ID-prefix, or name. + + The `--signal` flag sets the system call signal that is sent to the container. + This signal can be a signal name in the format `SIG`, for instance `SIGINT`, + or an unsigned number that matches a position in the kernel's syscall table, + for instance `2`. + + While the default (`SIGKILL`) signal will terminate the container, the signal + set through `--signal` may be non-terminal, depending on the container's main + process. For example, the `SIGHUP` signal in most cases will be non-terminal, + and the container will continue running after receiving the signal. + + > **Note** + > + > `ENTRYPOINT` and `CMD` in the *shell* form run as a child process of + > `/bin/sh -c`, which does not pass signals. This means that the executable is + > not the container’s PID 1 and does not receive Unix signals. usage: docker container kill [OPTIONS] CONTAINER [CONTAINER...] pname: docker container plink: docker_container.yaml @@ -10,6 +30,7 @@ options: shorthand: s value_type: string description: Signal to send to the container + details_url: '#signal' deprecated: false hidden: false experimental: false @@ -27,6 +48,37 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + ### Send a KILL signal to a container + + The following example sends the default `SIGKILL` signal to the container named + `my_container`: + + ```console + $ docker kill my_container + ``` + + ### Send a custom signal to a container (--signal) {#signal} + + The following example sends a `SIGHUP` signal to the container named + `my_container`: + + ```console + $ docker kill --signal=SIGHUP my_container + ``` + + + You can specify a custom signal either by _name_, or _number_. The `SIG` prefix + is optional, so the following examples are equivalent: + + ```console + $ docker kill --signal=SIGHUP my_container + $ docker kill --signal=HUP my_container + $ docker kill --signal=1 my_container + ``` + + Refer to the [`signal(7)`](https://man7.org/linux/man-pages/man7/signal.7.html) + man-page for a list of standard Linux signals. deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_container_logs.yaml b/data/engine-cli/docker_container_logs.yaml index b21f45a4b..50fbe4bb6 100644 --- a/data/engine-cli/docker_container_logs.yaml +++ b/data/engine-cli/docker_container_logs.yaml @@ -1,7 +1,39 @@ command: docker container logs aliases: docker container logs, docker logs short: Fetch the logs of a container -long: See [docker logs](logs.md) for more information. +long: |- + The `docker logs` command batch-retrieves logs present at the time of execution. + + For more information about selecting and configuring logging drivers, refer to + [Configure logging drivers](/config/containers/logging/configure/). + + The `docker logs --follow` command will continue streaming the new output from + the container's `STDOUT` and `STDERR`. + + Passing a negative number or a non-integer to `--tail` is invalid and the + value is set to `all` in that case. + + The `docker logs --timestamps` command will add an [RFC3339Nano timestamp](https://pkg.go.dev/time#RFC3339Nano) + , for example `2014-09-16T06:17:46.000000000Z`, to each + log entry. To ensure that the timestamps are aligned the + nano-second part of the timestamp will be padded with zero when necessary. + + The `docker logs --details` command will add on extra attributes, such as + environment variables and labels, provided to `--log-opt` when creating the + container. + + The `--since` option shows only the container logs generated after + a given date. You can specify the date as an RFC 3339 date, a UNIX + timestamp, or a Go duration string (e.g. `1m30s`, `3h`). Besides RFC3339 date + format you may also use RFC3339Nano, `2006-01-02T15:04:05`, + `2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local + timezone on the client will be used if you do not provide either a `Z` or a + `+-00:00` timezone offset at the end of the timestamp. When providing Unix + timestamps enter seconds[.nanoseconds], where seconds is the number of seconds + that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap + seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a + fraction of a second no more than nine digits long. You can combine the + `--since` option with either or both of the `--follow` or `--tail` options. usage: docker container logs [OPTIONS] CONTAINER pname: docker container plink: docker_container.yaml @@ -63,6 +95,7 @@ options: value_type: string description: | Show logs before a timestamp (e.g. `2013-01-02T13:23:37Z`) or relative (e.g. `42m` for 42 minutes) + details_url: '#until' deprecated: false hidden: false min_api_version: "1.35" @@ -81,6 +114,20 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + ### Retrieve logs until a specific point in time (--until) {#until} + + In order to retrieve logs before a specific point in time, run: + + ```console + $ docker run --name test -d busybox sh -c "while true; do $(echo date); sleep 1; done" + $ date + Tue 14 Nov 2017 16:40:00 CET + $ docker logs -f --until=2s test + Tue 14 Nov 2017 16:40:00 CET + Tue 14 Nov 2017 16:40:01 CET + Tue 14 Nov 2017 16:40:02 CET + ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_container_ls.yaml b/data/engine-cli/docker_container_ls.yaml index 4a53f4b90..8e059d80c 100644 --- a/data/engine-cli/docker_container_ls.yaml +++ b/data/engine-cli/docker_container_ls.yaml @@ -1,7 +1,7 @@ command: docker container ls aliases: docker container ls, docker container list, docker container ps, docker ps short: List containers -long: See [docker ps](ps.md) for more information. +long: List containers usage: docker container ls [OPTIONS] pname: docker container plink: docker_container.yaml @@ -11,6 +11,7 @@ options: value_type: bool default_value: "false" description: Show all containers (default shows just running) + details_url: '#all' deprecated: false hidden: false experimental: false @@ -21,6 +22,7 @@ options: shorthand: f value_type: filter description: Filter output based on conditions provided + details_url: '#filter' deprecated: false hidden: false experimental: false @@ -36,6 +38,7 @@ options: 'json': Print in JSON format 'TEMPLATE': Print output using the given Go template. Refer to https://docs.docker.com/go/formatting/ for more information about formatting output with templates + details_url: '#format' deprecated: false hidden: false experimental: false @@ -68,6 +71,7 @@ options: value_type: bool default_value: "false" description: Don't truncate output + details_url: '#no-trunc' deprecated: false hidden: false experimental: false @@ -90,6 +94,7 @@ options: value_type: bool default_value: "false" description: Display total file sizes + details_url: '#size' deprecated: false hidden: false experimental: false @@ -107,6 +112,428 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + ### Do not truncate output (--no-trunc) {#no-trunc} + + Running `docker ps --no-trunc` showing 2 linked containers. + + ```console + $ docker ps --no-trunc + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + ca5534a51dd04bbcebe9b23ba05f389466cf0c190f1f8f182d7eea92a9671d00 ubuntu:22.04 bash 17 seconds ago Up 16 seconds 3300-3310/tcp webapp + 9ca9747b233100676a48cc7806131586213fa5dab86dd1972d6a8732e3a84a4d crosbymichael/redis:latest /redis-server --dir 33 minutes ago Up 33 minutes 6379/tcp redis,webapp/db + ``` + + ### Show both running and stopped containers (-a, --all) {#all} + + The `docker ps` command only shows running containers by default. To see all + containers, use the `--all` (or `-a`) flag: + + ```console + $ docker ps -a + ``` + + `docker ps` groups exposed ports into a single range if possible. E.g., a + container that exposes TCP ports `100, 101, 102` displays `100-102/tcp` in + the `PORTS` column. + + ### Show disk usage by container (--size) {#size} + + The `docker ps --size` (or `-s`) command displays two different on-disk-sizes for each container: + + ```console + $ docker ps --size + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES SIZE + e90b8831a4b8 nginx "/bin/bash -c 'mkdir " 11 weeks ago Up 4 hours my_nginx 35.58 kB (virtual 109.2 MB) + 00c6131c5e30 telegraf:1.5 "/entrypoint.sh" 11 weeks ago Up 11 weeks my_telegraf 0 B (virtual 209.5 MB) + ``` + * The "size" information shows the amount of data (on disk) that is used for the _writable_ layer of each container + * The "virtual size" is the total amount of disk-space used for the read-only _image_ data used by the container and the writable layer. + + For more information, refer to the [container size on disk](/storage/storagedriver/#container-size-on-disk) section. + + + ### Filtering (--filter) {#filter} + + The `--filter` (or `-f`) flag format is a `key=value` pair. If there is more + than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). + + The currently supported filters are: + + | Filter | Description | + |:----------------------|:-------------------------------------------------------------------------------------------------------------------------------------| + | `id` | Container's ID | + | `name` | Container's name | + | `label` | An arbitrary string representing either a key or a key-value pair. Expressed as `` or `=` | + | `exited` | An integer representing the container's exit code. Only useful with `--all`. | + | `status` | One of `created`, `restarting`, `running`, `removing`, `paused`, `exited`, or `dead` | + | `ancestor` | Filters containers which share a given image as an ancestor. Expressed as `[:]`, ``, or `` | + | `before` or `since` | Filters containers created before or after a given container ID or name | + | `volume` | Filters running containers which have mounted a given volume or bind mount. | + | `network` | Filters running containers connected to a given network. | + | `publish` or `expose` | Filters containers which publish or expose a given port. Expressed as `[/]` or `/[]` | + | `health` | Filters containers based on their healthcheck status. One of `starting`, `healthy`, `unhealthy` or `none`. | + | `isolation` | Windows daemon only. One of `default`, `process`, or `hyperv`. | + | `is-task` | Filters containers that are a "task" for a service. Boolean option (`true` or `false`) | + + + #### label + + The `label` filter matches containers based on the presence of a `label` alone or a `label` and a + value. + + The following filter matches containers with the `color` label regardless of its value. + + ```console + $ docker ps --filter "label=color" + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 673394ef1d4c busybox "top" 47 seconds ago Up 45 seconds nostalgic_shockley + d85756f57265 busybox "top" 52 seconds ago Up 51 seconds high_albattani + ``` + + The following filter matches containers with the `color` label with the `blue` value. + + ```console + $ docker ps --filter "label=color=blue" + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + d85756f57265 busybox "top" About a minute ago Up About a minute high_albattani + ``` + + #### name + + The `name` filter matches on all or part of a container's name. + + The following filter matches all containers with a name containing the `nostalgic_stallman` string. + + ```console + $ docker ps --filter "name=nostalgic_stallman" + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 9b6247364a03 busybox "top" 2 minutes ago Up 2 minutes nostalgic_stallman + ``` + + You can also filter for a substring in a name as this shows: + + ```console + $ docker ps --filter "name=nostalgic" + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 715ebfcee040 busybox "top" 3 seconds ago Up 1 second i_am_nostalgic + 9b6247364a03 busybox "top" 7 minutes ago Up 7 minutes nostalgic_stallman + 673394ef1d4c busybox "top" 38 minutes ago Up 38 minutes nostalgic_shockley + ``` + + #### exited + + The `exited` filter matches containers by exist status code. For example, to + filter for containers that have exited successfully: + + ```console + $ docker ps -a --filter 'exited=0' + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + ea09c3c82f6e registry:latest /srv/run.sh 2 weeks ago Exited (0) 2 weeks ago 127.0.0.1:5000->5000/tcp desperate_leakey + 106ea823fe4e fedora:latest /bin/sh -c 'bash -l' 2 weeks ago Exited (0) 2 weeks ago determined_albattani + 48ee228c9464 fedora:20 bash 2 weeks ago Exited (0) 2 weeks ago tender_torvalds + ``` + + #### Filter by exit signal + + You can use a filter to locate containers that exited with status of `137` + meaning a `SIGKILL(9)` killed them. + + ```console + $ docker ps -a --filter 'exited=137' + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + b3e1c0ed5bfe ubuntu:latest "sleep 1000" 12 seconds ago Exited (137) 5 seconds ago grave_kowalevski + a2eb5558d669 redis:latest "/entrypoint.sh redi 2 hours ago Exited (137) 2 hours ago sharp_lalande + ``` + + Any of these events result in a `137` status: + + * the `init` process of the container is killed manually + * `docker kill` kills the container + * Docker daemon restarts which kills all running containers + + #### status + + The `status` filter matches containers by status. The possible values for the container status are: + + | Status | Description | + | :----------- | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | + | `created` | A container that has never been started. | + | `running` | A running container, started by either `docker start` or `docker run`. | + | `paused` | A paused container. See `docker pause`. | + | `restarting` | A container which is starting due to the designated restart policy for that container. | + | `exited` | A container which is no longer running. For example, the process inside the container completed or the container was stopped using the `docker stop` command. | + | `removing` | A container which is in the process of being removed. See `docker rm`. | + | `dead` | A "defunct" container; for example, a container that was only partially removed because resources were kept busy by an external process. `dead` containers cannot be (re)started, only removed. | + + For example, to filter for `running` containers: + + ```console + $ docker ps --filter status=running + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 715ebfcee040 busybox "top" 16 minutes ago Up 16 minutes i_am_nostalgic + d5c976d3c462 busybox "top" 23 minutes ago Up 23 minutes top + 9b6247364a03 busybox "top" 24 minutes ago Up 24 minutes nostalgic_stallman + ``` + + To filter for `paused` containers: + + ```console + $ docker ps --filter status=paused + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 673394ef1d4c busybox "top" About an hour ago Up About an hour (Paused) nostalgic_shockley + ``` + + #### ancestor + + The `ancestor` filter matches containers based on its image or a descendant of + it. The filter supports the following image representation: + + - `image` + - `image:tag` + - `image:tag@digest` + - `short-id` + - `full-id` + + If you don't specify a `tag`, the `latest` tag is used. For example, to filter + for containers that use the latest `ubuntu` image: + + ```console + $ docker ps --filter ancestor=ubuntu + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 919e1179bdb8 ubuntu-c1 "top" About a minute ago Up About a minute admiring_lovelace + 5d1e4a540723 ubuntu-c2 "top" About a minute ago Up About a minute admiring_sammet + 82a598284012 ubuntu "top" 3 minutes ago Up 3 minutes sleepy_bose + bab2a34ba363 ubuntu "top" 3 minutes ago Up 3 minutes focused_yonath + ``` + + Match containers based on the `ubuntu-c1` image which, in this case, is a child + of `ubuntu`: + + ```console + $ docker ps --filter ancestor=ubuntu-c1 + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 919e1179bdb8 ubuntu-c1 "top" About a minute ago Up About a minute admiring_lovelace + ``` + + Match containers based on the `ubuntu` version `22.04` image: + + ```console + $ docker ps --filter ancestor=ubuntu:22.04 + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 82a598284012 ubuntu:22.04 "top" 3 minutes ago Up 3 minutes sleepy_bose + ``` + + The following matches containers based on the layer `d0e008c6cf02` or an image + that have this layer in its layer stack. + + ```console + $ docker ps --filter ancestor=d0e008c6cf02 + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 82a598284012 ubuntu:22.04 "top" 3 minutes ago Up 3 minutes sleepy_bose + ``` + + #### Create time + + ##### before + + The `before` filter shows only containers created before the container with + a given ID or name. For example, having these containers created: + + ```console + $ docker ps + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 9c3527ed70ce busybox "top" 14 seconds ago Up 15 seconds desperate_dubinsky + 4aace5031105 busybox "top" 48 seconds ago Up 49 seconds focused_hamilton + 6e63f6ff38b0 busybox "top" About a minute ago Up About a minute distracted_fermat + ``` + + Filtering with `before` would give: + + ```console + $ docker ps -f before=9c3527ed70ce + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 4aace5031105 busybox "top" About a minute ago Up About a minute focused_hamilton + 6e63f6ff38b0 busybox "top" About a minute ago Up About a minute distracted_fermat + ``` + + ##### since + + The `since` filter shows only containers created since the container with a given + ID or name. For example, with the same containers as in `before` filter: + + ```console + $ docker ps -f since=6e63f6ff38b0 + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 9c3527ed70ce busybox "top" 10 minutes ago Up 10 minutes desperate_dubinsky + 4aace5031105 busybox "top" 10 minutes ago Up 10 minutes focused_hamilton + ``` + + #### volume + + The `volume` filter shows only containers that mount a specific volume or have + a volume mounted in a specific path: + + ```console + $ docker ps --filter volume=remote-volume --format "table {{.ID}}\t{{.Mounts}}" + + CONTAINER ID MOUNTS + 9c3527ed70ce remote-volume + + $ docker ps --filter volume=/data --format "table {{.ID}}\t{{.Mounts}}" + + CONTAINER ID MOUNTS + 9c3527ed70ce remote-volume + ``` + + #### network + + The `network` filter shows only containers that are connected to a network with + a given name or ID. + + The following filter matches all containers that are connected to a network + with a name containing `net1`. + + ```console + $ docker run -d --net=net1 --name=test1 ubuntu top + $ docker run -d --net=net2 --name=test2 ubuntu top + + $ docker ps --filter network=net1 + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 9d4893ed80fe ubuntu "top" 10 minutes ago Up 10 minutes test1 + ``` + + The network filter matches on both the network's name and ID. The following + example shows all containers that are attached to the `net1` network, using + the network ID as a filter: + + ```console + $ docker network inspect --format "{{.ID}}" net1 + + 8c0b4110ae930dbe26b258de9bc34a03f98056ed6f27f991d32919bfe401d7c5 + + $ docker ps --filter network=8c0b4110ae930dbe26b258de9bc34a03f98056ed6f27f991d32919bfe401d7c5 + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 9d4893ed80fe ubuntu "top" 10 minutes ago Up 10 minutes test1 + ``` + + #### publish and expose + + The `publish` and `expose` filters show only containers that have published or exposed port with a given port + number, port range, and/or protocol. The default protocol is `tcp` when not specified. + + The following filter matches all containers that have published port of 80: + + ```console + $ docker run -d --publish=80 busybox top + $ docker run -d --expose=8080 busybox top + + $ docker ps -a + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 9833437217a5 busybox "top" 5 seconds ago Up 4 seconds 8080/tcp dreamy_mccarthy + fc7e477723b7 busybox "top" 50 seconds ago Up 50 seconds 0.0.0.0:32768->80/tcp admiring_roentgen + + $ docker ps --filter publish=80 + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + fc7e477723b7 busybox "top" About a minute ago Up About a minute 0.0.0.0:32768->80/tcp admiring_roentgen + ``` + + The following filter matches all containers that have exposed TCP port in the range of `8000-8080`: + + ```console + $ docker ps --filter expose=8000-8080/tcp + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 9833437217a5 busybox "top" 21 seconds ago Up 19 seconds 8080/tcp dreamy_mccarthy + ``` + + The following filter matches all containers that have exposed UDP port `80`: + + ```console + $ docker ps --filter publish=80/udp + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + ``` + + ### Format the output (--format) {#format} + + The formatting option (`--format`) pretty-prints container output using a Go + template. + + Valid placeholders for the Go template are listed below: + + | Placeholder | Description | + |:--------------|:------------------------------------------------------------------------------------------------| + | `.ID` | Container ID | + | `.Image` | Image ID | + | `.Command` | Quoted command | + | `.CreatedAt` | Time when the container was created. | + | `.RunningFor` | Elapsed time since the container was started. | + | `.Ports` | Exposed ports. | + | `.State` | Container status (for example; "created", "running", "exited"). | + | `.Status` | Container status with details about duration and health-status. | + | `.Size` | Container disk size. | + | `.Names` | Container names. | + | `.Labels` | All labels assigned to the container. | + | `.Label` | Value of a specific label for this container. For example `'{{.Label "com.docker.swarm.cpu"}}'` | + | `.Mounts` | Names of the volumes mounted in this container. | + | `.Networks` | Names of the networks attached to this container. | + + When using the `--format` option, the `ps` command will either output the data + exactly as the template declares or, when using the `table` directive, includes + column headers as well. + + The following example uses a template without headers and outputs the `ID` and + `Command` entries separated by a colon (`:`) for all running containers: + + ```console + $ docker ps --format "{{.ID}}: {{.Command}}" + + a87ecb4f327c: /bin/sh -c #(nop) MA + 01946d9d34d8: /bin/sh -c #(nop) MA + c1d3b0166030: /bin/sh -c yum -y up + 41d50ecd2f57: /bin/sh -c #(nop) MA + ``` + + To list all running containers with their labels in a table format you can use: + + ```console + $ docker ps --format "table {{.ID}}\t{{.Labels}}" + + CONTAINER ID LABELS + a87ecb4f327c com.docker.swarm.node=ubuntu,com.docker.swarm.storage=ssd + 01946d9d34d8 + c1d3b0166030 com.docker.swarm.node=debian,com.docker.swarm.cpu=6 + 41d50ecd2f57 com.docker.swarm.node=fedora,com.docker.swarm.cpu=3,com.docker.swarm.storage=ssd + ``` + + To list all running containers in JSON format, use the `json` directive: + + ```console + $ docker ps --format json + {"Command":"\"/docker-entrypoint.…\"","CreatedAt":"2021-03-10 00:15:05 +0100 CET","ID":"a762a2b37a1d","Image":"nginx","Labels":"maintainer=NGINX Docker Maintainers \u003cdocker-maint@nginx.com\u003e","LocalVolumes":"0","Mounts":"","Names":"boring_keldysh","Networks":"bridge","Ports":"80/tcp","RunningFor":"4 seconds ago","Size":"0B","State":"running","Status":"Up 3 seconds"} + ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_container_pause.yaml b/data/engine-cli/docker_container_pause.yaml index 2840a2448..a9a47ed40 100644 --- a/data/engine-cli/docker_container_pause.yaml +++ b/data/engine-cli/docker_container_pause.yaml @@ -1,7 +1,17 @@ command: docker container pause aliases: docker container pause, docker pause short: Pause all processes within one or more containers -long: See [docker pause](pause.md) for more information. +long: |- + The `docker pause` command suspends all processes in the specified containers. + On Linux, this uses the freezer cgroup. Traditionally, when suspending a process + the `SIGSTOP` signal is used, which is observable by the process being suspended. + With the freezer cgroup the process is unaware, and unable to capture, + that it is being suspended, and subsequently resumed. On Windows, only Hyper-V + containers can be paused. + + See the + [freezer cgroup documentation](https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt) + for further details. usage: docker container pause CONTAINER [CONTAINER...] pname: docker container plink: docker_container.yaml @@ -16,6 +26,10 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + ```console + $ docker pause my_container + ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_container_port.yaml b/data/engine-cli/docker_container_port.yaml index 03c567fa2..89976d8f9 100644 --- a/data/engine-cli/docker_container_port.yaml +++ b/data/engine-cli/docker_container_port.yaml @@ -1,7 +1,7 @@ command: docker container port aliases: docker container port, docker port short: List port mappings or a specific mapping for the container -long: See [docker port](port.md) for more information. +long: List port mappings or a specific mapping for the container usage: docker container port CONTAINER [PRIVATE_PORT[/PROTO]] pname: docker container plink: docker_container.yaml @@ -16,6 +16,35 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + ### Show all mapped ports + + You can find out all the ports mapped by not specifying a `PRIVATE_PORT`, or + just a specific mapping: + + ```console + $ docker ps + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + b650456536c7 busybox:latest top 54 minutes ago Up 54 minutes 0.0.0.0:1234->9876/tcp, 0.0.0.0:4321->7890/tcp test + + $ docker port test + + 7890/tcp -> 0.0.0.0:4321 + 9876/tcp -> 0.0.0.0:1234 + + $ docker port test 7890/tcp + + 0.0.0.0:4321 + + $ docker port test 7890/udp + + 2014/06/24 11:53:36 Error: No public port '7890/udp' published for test + + $ docker port test 7890 + + 0.0.0.0:4321 + ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_container_prune.yaml b/data/engine-cli/docker_container_prune.yaml index 881963710..c9d1bfe20 100644 --- a/data/engine-cli/docker_container_prune.yaml +++ b/data/engine-cli/docker_container_prune.yaml @@ -67,7 +67,7 @@ examples: |- formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, `2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local timezone on the daemon will be used if you do not provide either a `Z` or a - `+-00:00` timezone offset at the end of the timestamp. When providing Unix + `+-00:00` timezone offset at the end of the timestamp. When providing Unix timestamps enter seconds[.nanoseconds], where seconds is the number of seconds that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a diff --git a/data/engine-cli/docker_container_rename.yaml b/data/engine-cli/docker_container_rename.yaml index c6a144fde..a337b180f 100644 --- a/data/engine-cli/docker_container_rename.yaml +++ b/data/engine-cli/docker_container_rename.yaml @@ -1,7 +1,7 @@ command: docker container rename aliases: docker container rename, docker rename short: Rename a container -long: See [docker rename](rename.md) for more information. +long: The `docker rename` command renames a container. usage: docker container rename CONTAINER NEW_NAME pname: docker container plink: docker_container.yaml @@ -16,6 +16,10 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + ```console + $ docker rename my_container my_new_container + ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_container_restart.yaml b/data/engine-cli/docker_container_restart.yaml index a63b75b3f..6a0eaf466 100644 --- a/data/engine-cli/docker_container_restart.yaml +++ b/data/engine-cli/docker_container_restart.yaml @@ -1,7 +1,7 @@ command: docker container restart aliases: docker container restart, docker restart short: Restart one or more containers -long: See [docker restart](restart.md) for more information. +long: Restart one or more containers usage: docker container restart [OPTIONS] CONTAINER [CONTAINER...] pname: docker container plink: docker_container.yaml @@ -38,6 +38,10 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + ```console + $ docker restart my_container + ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_container_rm.yaml b/data/engine-cli/docker_container_rm.yaml index 933c3a580..8f8982b20 100644 --- a/data/engine-cli/docker_container_rm.yaml +++ b/data/engine-cli/docker_container_rm.yaml @@ -1,7 +1,7 @@ command: docker container rm aliases: docker container rm, docker container remove, docker rm short: Remove one or more containers -long: See [docker rm](rm.md) for more information. +long: Remove one or more containers usage: docker container rm [OPTIONS] CONTAINER [CONTAINER...] pname: docker container plink: docker_container.yaml @@ -11,6 +11,7 @@ options: value_type: bool default_value: "false" description: Force the removal of a running container (uses SIGKILL) + details_url: '#force' deprecated: false hidden: false experimental: false @@ -22,6 +23,7 @@ options: value_type: bool default_value: "false" description: Remove the specified link + details_url: '#link' deprecated: false hidden: false experimental: false @@ -33,6 +35,7 @@ options: value_type: bool default_value: "false" description: Remove anonymous volumes associated with the container + details_url: '#volumes' deprecated: false hidden: false experimental: false @@ -50,6 +53,95 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + ### Remove a container + + This removes the container referenced under the link `/redis`. + + ```console + $ docker rm /redis + + /redis + ``` + + ### Remove a link specified with `--link` on the default bridge network (--link) {#link} + + This removes the underlying link between `/webapp` and the `/redis` + containers on the default bridge network, removing all network communication + between the two containers. This does not apply when `--link` is used with + user-specified networks. + + ```console + $ docker rm --link /webapp/redis + + /webapp/redis + ``` + + ### Force-remove a running container (--force) {#force} + + This command force-removes a running container. + + ```console + $ docker rm --force redis + + redis + ``` + + The main process inside the container referenced under the link `redis` will receive + `SIGKILL`, then the container will be removed. + + ### Remove all stopped containers + + Use the [`docker container prune`](container_prune.md) command to remove all + stopped containers, or refer to the [`docker system prune`](system_prune.md) + command to remove unused containers in addition to other Docker resources, such + as (unused) images and networks. + + Alternatively, you can use the `docker ps` with the `-q` / `--quiet` option to + generate a list of container IDs to remove, and use that list as argument for + the `docker rm` command. + + Combining commands can be more flexible, but is less portable as it depends + on features provided by the shell, and the exact syntax may differ depending on + what shell is used. To use this approach on Windows, consider using PowerShell + or Bash. + + The example below uses `docker ps -q` to print the IDs of all containers that + have exited (`--filter status=exited`), and removes those containers with + the `docker rm` command: + + ```console + $ docker rm $(docker ps --filter status=exited -q) + ``` + + Or, using the `xargs` Linux utility: + + ```console + $ docker ps --filter status=exited -q | xargs docker rm + ``` + + ### Remove a container and its volumes (-v, --volumes) {#volumes} + + ```console + $ docker rm --volumes redis + redis + ``` + + This command removes the container and any volumes associated with it. + Note that if a volume was specified with a name, it will not be removed. + + ### Remove a container and selectively remove volumes + + ```console + $ docker create -v awesome:/foo -v /bar --name hello redis + hello + + $ docker rm -v hello + ``` + + In this example, the volume for `/foo` remains intact, but the volume for + `/bar` is removed. The same behavior holds for volumes inherited with + `--volumes-from`. deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_container_run.yaml b/data/engine-cli/docker_container_run.yaml index d7ef6c549..7a37ec729 100644 --- a/data/engine-cli/docker_container_run.yaml +++ b/data/engine-cli/docker_container_run.yaml @@ -1,7 +1,11 @@ command: docker container run aliases: docker container run, docker run short: Create and run a new container from an image -long: See [docker run](run.md) for more information. +long: |- + The `docker run` command runs a command in a new container, pulling the image if needed and starting the container. + + You can restart a stopped container with all its previous changes intact using `docker start`. + Use `docker ps -a` to view a list of all containers, including those that are stopped. usage: docker container run [OPTIONS] IMAGE [COMMAND] [ARG...] pname: docker container plink: docker_container.yaml @@ -9,6 +13,7 @@ options: - option: add-host value_type: list description: Add a custom host-to-IP mapping (host:ip) + details_url: '#add-host' deprecated: false hidden: false experimental: false @@ -31,6 +36,7 @@ options: shorthand: a value_type: list description: Attach to STDIN, STDOUT or STDERR + details_url: '#attach' deprecated: false hidden: false experimental: false @@ -79,6 +85,7 @@ options: - option: cgroup-parent value_type: string description: Optional parent cgroup for the container + details_url: '#cgroup-parent' deprecated: false hidden: false experimental: false @@ -103,6 +110,7 @@ options: - option: cidfile value_type: string description: Write the container ID to the file + details_url: '#cidfile' deprecated: false hidden: false experimental: false @@ -217,6 +225,7 @@ options: value_type: bool default_value: "false" description: Run container in background and print container ID + details_url: '#detach' deprecated: false hidden: false experimental: false @@ -226,6 +235,7 @@ options: - option: detach-keys value_type: string description: Override the key sequence for detaching a container + details_url: '#detach-keys' deprecated: false hidden: false experimental: false @@ -235,6 +245,7 @@ options: - option: device value_type: list description: Add a host device to the container + details_url: '#device' deprecated: false hidden: false experimental: false @@ -244,6 +255,7 @@ options: - option: device-cgroup-rule value_type: list description: Add a rule to the cgroup allowed devices list + details_url: '#device-cgroup-rule' deprecated: false hidden: false experimental: false @@ -358,6 +370,7 @@ options: shorthand: e value_type: list description: Set environment variables + details_url: '#env' deprecated: false hidden: false experimental: false @@ -385,6 +398,7 @@ options: - option: gpus value_type: gpu-request description: GPU devices to add to the container ('all' to pass all GPUs) + details_url: '#gpus' deprecated: false hidden: false min_api_version: "1.40" @@ -430,6 +444,18 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: health-start-interval + value_type: duration + default_value: 0s + description: | + Time between running the check during the start period (ms|s|m|h) (default 0s) + deprecated: false + hidden: false + min_api_version: "1.44" + experimental: false + experimentalcli: false + kubernetes: false + swarm: false - option: health-start-period value_type: duration default_value: 0s @@ -477,6 +503,7 @@ options: default_value: "false" description: | Run an init inside the container that forwards signals and reaps processes + details_url: '#init' deprecated: false hidden: false min_api_version: "1.25" @@ -489,6 +516,7 @@ options: value_type: bool default_value: "false" description: Keep STDIN open even if not attached + details_url: '#interactive' deprecated: false hidden: false experimental: false @@ -538,6 +566,7 @@ options: - option: ipc value_type: string description: IPC mode to use + details_url: '#ipc' deprecated: false hidden: false experimental: false @@ -547,6 +576,7 @@ options: - option: isolation value_type: string description: Container isolation technology + details_url: '#isolation' deprecated: false hidden: false experimental: false @@ -567,6 +597,7 @@ options: shorthand: l value_type: list description: Set meta data on a container + details_url: '#label' deprecated: false hidden: false experimental: false @@ -603,6 +634,7 @@ options: - option: log-driver value_type: string description: Logging driver for the container + details_url: '#log-driver' deprecated: false hidden: false experimental: false @@ -632,6 +664,7 @@ options: value_type: bytes default_value: "0" description: Memory limit + details_url: '#memory' deprecated: false hidden: false experimental: false @@ -672,6 +705,7 @@ options: - option: mount value_type: mount description: Attach a filesystem mount to the container + details_url: '#mount' deprecated: false hidden: false experimental: false @@ -681,6 +715,7 @@ options: - option: name value_type: string description: Assign a name to the container + details_url: '#name' deprecated: false hidden: false experimental: false @@ -708,6 +743,7 @@ options: - option: network value_type: network description: Connect a container to a network + details_url: '#network' deprecated: false hidden: false experimental: false @@ -756,6 +792,7 @@ options: - option: pid value_type: string description: PID namespace to use + details_url: '#pid' deprecated: false hidden: false experimental: false @@ -786,6 +823,7 @@ options: value_type: bool default_value: "false" description: Give extended privileges to this container + details_url: '#privileged' deprecated: false hidden: false experimental: false @@ -796,6 +834,7 @@ options: shorthand: p value_type: list description: Publish a container's port(s) to the host + details_url: '#publish' deprecated: false hidden: false experimental: false @@ -807,6 +846,7 @@ options: value_type: bool default_value: "false" description: Publish all exposed ports to random ports + details_url: '#publish-all' deprecated: false hidden: false experimental: false @@ -817,6 +857,7 @@ options: value_type: string default_value: missing description: Pull image before running (`always`, `missing`, `never`) + details_url: '#pull' deprecated: false hidden: false experimental: false @@ -838,6 +879,7 @@ options: value_type: bool default_value: "false" description: Mount the container's root filesystem as read only + details_url: '#read-only' deprecated: false hidden: false experimental: false @@ -848,6 +890,7 @@ options: value_type: string default_value: "no" description: Restart policy to apply when a container exits + details_url: '#restart' deprecated: false hidden: false experimental: false @@ -858,6 +901,7 @@ options: value_type: bool default_value: "false" description: Automatically remove the container when it exits + details_url: '#rm' deprecated: false hidden: false experimental: false @@ -876,6 +920,7 @@ options: - option: security-opt value_type: list description: Security Options + details_url: '#security-opt' deprecated: false hidden: false experimental: false @@ -905,6 +950,7 @@ options: - option: stop-signal value_type: string description: Signal to stop the container + details_url: '#stop-signal' deprecated: false hidden: false experimental: false @@ -915,6 +961,7 @@ options: value_type: int default_value: "0" description: Timeout (in seconds) to stop a container + details_url: '#stop-timeout' deprecated: false hidden: false min_api_version: "1.25" @@ -925,6 +972,7 @@ options: - option: storage-opt value_type: list description: Storage driver options for the container + details_url: '#storage-opt' deprecated: false hidden: false experimental: false @@ -935,6 +983,7 @@ options: value_type: map default_value: map[] description: Sysctl options + details_url: '#sysctl' deprecated: false hidden: false experimental: false @@ -944,6 +993,7 @@ options: - option: tmpfs value_type: list description: Mount a tmpfs directory + details_url: '#tmpfs' deprecated: false hidden: false experimental: false @@ -955,6 +1005,7 @@ options: value_type: bool default_value: "false" description: Allocate a pseudo-TTY + details_url: '#tty' deprecated: false hidden: false experimental: false @@ -965,6 +1016,7 @@ options: value_type: ulimit default_value: '[]' description: Ulimit options + details_url: '#ulimit' deprecated: false hidden: false experimental: false @@ -993,6 +1045,7 @@ options: - option: uts value_type: string description: UTS namespace to use + details_url: '#uts' deprecated: false hidden: false experimental: false @@ -1003,6 +1056,7 @@ options: shorthand: v value_type: list description: Bind mount a volume + details_url: '#volume' deprecated: false hidden: false experimental: false @@ -1021,6 +1075,7 @@ options: - option: volumes-from value_type: list description: Mount volumes from the specified container(s) + details_url: '#volumes-from' deprecated: false hidden: false experimental: false @@ -1031,12 +1086,1380 @@ options: shorthand: w value_type: string description: Working directory inside the container + details_url: '#workdir' deprecated: false hidden: false experimental: false experimentalcli: false kubernetes: false swarm: false +examples: |- + ### Assign name (--name) {#name} + + The `--name` flag lets you specify a custom identifier for a container. The + following example runs a container named `test` using the `nginx:alpine` image + in [detached mode](#detach). + + ```console + $ docker run --name test -d nginx:alpine + 4bed76d3ad428b889c56c1ecc2bf2ed95cb08256db22dc5ef5863e1d03252a19 + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 4bed76d3ad42 nginx:alpine "/docker-entrypoint.…" 1 second ago Up Less than a second 80/tcp test + ``` + + You can reference the container by name with other commands. For example, the + following commands stop and remove a container named `test`: + + ```console + $ docker stop test + test + $ docker rm test + test + ``` + + If you don't specify a custom name using the `--name` flag, the daemon assigns + a randomly generated name, such as `vibrant_cannon`, to the container. Using a + custom-defined name provides the benefit of having an easy-to-remember ID for a + container. + + Moreover, if you connect the container to a user-defined bridge network, other + containers on the same network can refer to the container by name via DNS. + + ```console + $ docker network create mynet + cb79f45948d87e389e12013fa4d969689ed2c3316985dd832a43aaec9a0fe394 + $ docker run --name test --net mynet -d nginx:alpine + 58df6ecfbc2ad7c42d088ed028d367f9e22a5f834d7c74c66c0ab0485626c32a + $ docker run --net mynet busybox:latest ping test + PING test (172.18.0.2): 56 data bytes + 64 bytes from 172.18.0.2: seq=0 ttl=64 time=0.073 ms + 64 bytes from 172.18.0.2: seq=1 ttl=64 time=0.411 ms + 64 bytes from 172.18.0.2: seq=2 ttl=64 time=0.319 ms + 64 bytes from 172.18.0.2: seq=3 ttl=64 time=0.383 ms + ... + ``` + + ### Capture container ID (--cidfile) {#cidfile} + + To help with automation, you can have Docker write the container ID out to a + file of your choosing. This is similar to how some programs might write out + their process ID to a file (you might've seen them as PID files): + + ```console + $ docker run --cidfile /tmp/docker_test.cid ubuntu echo "test" + ``` + + This creates a container and prints `test` to the console. The `cidfile` + flag makes Docker attempt to create a new file and write the container ID to it. + If the file exists already, Docker returns an error. Docker closes this + file when `docker run` exits. + + ### PID settings (--pid) {#pid} + + ```text + --pid="" : Set the PID (Process) Namespace mode for the container, + 'container:': joins another container's PID namespace + 'host': use the host's PID namespace inside the container + ``` + + By default, all containers have the PID namespace enabled. + + PID namespace provides separation of processes. The PID Namespace removes the + view of the system processes, and allows process ids to be reused including + PID 1. + + In certain cases you want your container to share the host's process namespace, + allowing processes within the container to see all of the processes on the + system. For example, you could build a container with debugging tools like + `strace` or `gdb`, but want to use these tools when debugging processes within + the container. + + #### Example: run htop inside a container + + To run `htop` in a container that shares the process namespac of the host: + + 1. Run an alpine container with the `--pid=host` option: + + ```console + $ docker run --rm -it --pid=host alpine + ``` + + 2. Install `htop` in the container: + + ```console + / # apk add htop + fetch https://dl-cdn.alpinelinux.org/alpine/v3.18/main/aarch64/APKINDEX.tar.gz + fetch https://dl-cdn.alpinelinux.org/alpine/v3.18/community/aarch64/APKINDEX.tar.gz + (1/3) Installing ncurses-terminfo-base (6.4_p20230506-r0) + (2/3) Installing libncursesw (6.4_p20230506-r0) + (3/3) Installing htop (3.2.2-r1) + Executing busybox-1.36.1-r2.trigger + OK: 9 MiB in 18 packages + ``` + + 3. Invoke the `htop` command. + + ```console + / # htop + ``` + + #### Example, join another container's PID namespace + + Joining another container's PID namespace can be useful for debugging that + container. + + 1. Start a container running a Redis server: + + ```console + $ docker run --rm --name my-nginx -d nginx:alpine + ``` + + 2. Run an Alpine container that attaches the `--pid` namespace to the + `my-nginx` container: + + ```console + $ docker run --rm -it --pid=container:my-nginx \ + --cap-add SYS_PTRACE \ + --security-opt seccomp=unconfined \ + alpine + ``` + + 3. Install `strace` in the Alpine container: + + ```console + / # apk add strace + ``` + + 4. Attach to process 1, the process ID of the `my-nginx` container: + + ```console + / # strace -p 1 + strace: Process 1 attached + ``` + + ### UTS settings (--uts) {#uts} + + ```text + --uts="" : Set the UTS namespace mode for the container + 'host': use the host's UTS namespace inside the container + ``` + + The UTS namespace is for setting the hostname and the domain that's visible to + running processes in that namespace. By default, all containers, including + those with `--network=host`, have their own UTS namespace. Setting `--uts` to + `host` results in the container using the same UTS namespace as the host. + + > **Note** + > + > Docker disallows combining the `--hostname` and `--domainname` flags with + > `--uts=host`. This is to prevent containers running in the host's UTS + > namespace from attempting to change the hosts' configuration. + + You may wish to share the UTS namespace with the host if you would like the + hostname of the container to change as the hostname of the host changes. A more + advanced use case would be changing the host's hostname from a container. + + ### IPC settings (--ipc) {#ipc} + + ```text + --ipc="MODE" : Set the IPC mode for the container + ``` + + The `--ipc` flag accepts the following values: + + | Value | Description | + |:---------------------------|:----------------------------------------------------------------------------------| + | "" | Use daemon's default. | + | "none" | Own private IPC namespace, with /dev/shm not mounted. | + | "private" | Own private IPC namespace. | + | "shareable" | Own private IPC namespace, with a possibility to share it with other containers. | + | "container:<_name-or-ID_>" | Join another ("shareable") container's IPC namespace. | + | "host" | Use the host system's IPC namespace. | + + If not specified, daemon default is used, which can either be `"private"` + or `"shareable"`, depending on the daemon version and configuration. + + [System V interprocess communication (IPC)](https://linux.die.net/man/5/ipc) + namespaces provide separation of named shared memory segments, semaphores and + message queues. + + Shared memory segments are used to accelerate inter-process communication at + memory speed, rather than through pipes or through the network stack. Shared + memory is commonly used by databases and custom-built (typically C/OpenMPI, + C++/using boost libraries) high performance applications for scientific + computing and financial services industries. If these types of applications + are broken into multiple containers, you might need to share the IPC mechanisms + of the containers, using `"shareable"` mode for the main (i.e. "donor") + container, and `"container:"` for other containers. + + ### Full container capabilities (--privileged) {#privileged} + + The following example doesn't work, because by default, Docker drops most + potentially dangerous kernel capabilities, including `CAP_SYS_ADMIN ` (which is + required to mount filesystems). + + ```console + $ docker run -t -i --rm ubuntu bash + root@bc338942ef20:/# mount -t tmpfs none /mnt + mount: permission denied + ``` + + It works when you add the `--privileged` flag: + + ```console + $ docker run -t -i --privileged ubuntu bash + root@50e3f57e16e6:/# mount -t tmpfs none /mnt + root@50e3f57e16e6:/# df -h + Filesystem Size Used Avail Use% Mounted on + none 1.9G 0 1.9G 0% /mnt + ``` + + The `--privileged` flag gives all capabilities to the container, and it also + lifts all the limitations enforced by the `device` cgroup controller. In other + words, the container can then do almost everything that the host can do. This + flag exists to allow special use-cases, like running Docker within Docker. + + ### Set working directory (-w, --workdir) {#workdir} + + ```console + $ docker run -w /path/to/dir/ -i -t ubuntu pwd + ``` + + The `-w` option runs the command executed inside the directory specified, in this example, + `/path/to/dir/`. If the path doesn't exist, Docker creates it inside the container. + + ### Set storage driver options per container (--storage-opt) {#storage-opt} + + ```console + $ docker run -it --storage-opt size=120G fedora /bin/bash + ``` + + This (size) constraints the container filesystem size to 120G at creation time. + This option is only available for the `btrfs`, `overlay2`, `windowsfilter`, + and `zfs` storage drivers. + + For the `overlay2` storage driver, the size option is only available if the + backing filesystem is `xfs` and mounted with the `pquota` mount option. + Under these conditions, you can pass any size less than the backing filesystem size. + + For the `windowsfilter`, `btrfs`, and `zfs` storage drivers, you cannot pass a + size less than the Default BaseFS Size. + + ### Mount tmpfs (--tmpfs) {#tmpfs} + + The `--tmpfs` flag lets you create a `tmpfs` mount. + + The options that you can pass to `--tmpfs` are identical to the Linux `mount -t + tmpfs -o` command. The following example mounts an empty `tmpfs` into the + container with the `rw`, `noexec`, `nosuid`, `size=65536k` options. + + ```console + $ docker run -d --tmpfs /run:rw,noexec,nosuid,size=65536k my_image + ``` + + For more information, see [tmpfs mounts](/storage/tmpfs/). + + ### Mount volume (-v) {#volume} + + ```console + $ docker run -v $(pwd):$(pwd) -w $(pwd) -i -t ubuntu pwd + ``` + + The example above mounts the current directory into the container at the same path + using the `-v` flag, sets it as the working directory, and then runs the `pwd` command inside the container. + + As of Docker Engine version 23, you can use relative paths on the host. + + ```console + $ docker run -v ./content:/content -w /content -i -t ubuntu pwd + ``` + + The example above mounts the `content` directory in the current directory into the container at the + `/content` path using the `-v` flag, sets it as the working directory, and then + runs the `pwd` command inside the container. + + ```console + $ docker run -v /doesnt/exist:/foo -w /foo -i -t ubuntu bash + ``` + + When the host directory of a bind-mounted volume doesn't exist, Docker + automatically creates this directory on the host for you. In the + example above, Docker creates the `/doesnt/exist` + folder before starting your container. + + ### Mount volume read-only (--read-only) {#read-only} + + ```console + $ docker run --read-only -v /icanwrite busybox touch /icanwrite/here + ``` + + You can use volumes in combination with the `--read-only` flag to control where + a container writes files. The `--read-only` flag mounts the container's root + filesystem as read only prohibiting writes to locations other than the + specified volumes for the container. + + ```console + $ docker run -t -i -v /var/run/docker.sock:/var/run/docker.sock -v /path/to/static-docker-binary:/usr/bin/docker busybox sh + ``` + + By bind-mounting the Docker Unix socket and statically linked Docker + binary (refer to [get the Linux binary](/engine/install/binaries/#install-static-binaries)), + you give the container the full access to create and manipulate the host's + Docker daemon. + + On Windows, you must specify the paths using Windows-style path semantics. + + ```powershell + PS C:\> docker run -v c:\foo:c:\dest microsoft/nanoserver cmd /s /c type c:\dest\somefile.txt + Contents of file + + PS C:\> docker run -v c:\foo:d: microsoft/nanoserver cmd /s /c type d:\somefile.txt + Contents of file + ``` + + The following examples fails when using Windows-based containers, as the + destination of a volume or bind mount inside the container must be one of: + a non-existing or empty directory; or a drive other than `C:`. Further, the source + of a bind mount must be a local directory, not a file. + + ```powershell + net use z: \\remotemachine\share + docker run -v z:\foo:c:\dest ... + docker run -v \\uncpath\to\directory:c:\dest ... + docker run -v c:\foo\somefile.txt:c:\dest ... + docker run -v c:\foo:c: ... + docker run -v c:\foo:c:\existing-directory-with-contents ... + ``` + + For in-depth information about volumes, refer to [manage data in containers](/storage/volumes/) + + ### Add bind mounts or volumes using the --mount flag {#mount} + + The `--mount` flag allows you to mount volumes, host-directories, and `tmpfs` + mounts in a container. + + The `--mount` flag supports most options supported by the `-v` or the + `--volume` flag, but uses a different syntax. For in-depth information on the + `--mount` flag, and a comparison between `--volume` and `--mount`, refer to + [Bind mounts](/storage/bind-mounts/). + + Even though there is no plan to deprecate `--volume`, usage of `--mount` is recommended. + + Examples: + + ```console + $ docker run --read-only --mount type=volume,target=/icanwrite busybox touch /icanwrite/here + ``` + + ```console + $ docker run -t -i --mount type=bind,src=/data,dst=/data busybox sh + ``` + + ### Publish or expose port (-p, --expose) {#publish} + + ```console + $ docker run -p 127.0.0.1:80:8080/tcp nginx:alpine + ``` + + This binds port `8080` of the container to TCP port `80` on `127.0.0.1` of the + host. You can also specify `udp` and `sctp` ports. The [Networking overview + page](/network/) explains in detail how to publish ports + with Docker. + + > **Note** + > + > If you don't specify an IP address (i.e., `-p 80:80` instead of `-p + > 127.0.0.1:80:80`) when publishing a container's ports, Docker publishes the + > port on all interfaces (address `0.0.0.0`) by default. These ports are + > externally accessible. This also applies if you configured UFW to block this + > specific port, as Docker manages its own iptables rules. [Read + > more](/network/packet-filtering-firewalls/) + + ```console + $ docker run --expose 80 nginx:alpine + ``` + + This exposes port `80` of the container without publishing the port to the host + system's interfaces. + + ### Publish all exposed ports (-P, --publish-all) {#publish-all} + + ```console + $ docker run -P nginx:alpine + ``` + + The `-P`, or `--publish-all`, flag publishes all the exposed ports to the host. + Docker binds each exposed port to a random port on the host. + + The `-P` flag only publishes port numbers that are explicitly flagged as + exposed, either using the Dockerfile `EXPOSE` instruction or the `--expose` + flag for the `docker run` command. + + The range of ports are within an *ephemeral port range* defined by + `/proc/sys/net/ipv4/ip_local_port_range`. Use the `-p` flag to explicitly map a + single port or range of ports. + + ### Set the pull policy (--pull) {#pull} + + Use the `--pull` flag to set the image pull policy when creating (and running) + the container. + + The `--pull` flag can take one of these values: + + | Value | Description | + |:--------------------|:------------------------------------------------------------------------------------------------------------------| + | `missing` (default) | Pull the image if it was not found in the image cache, or use the cached image otherwise. | + | `never` | Do not pull the image, even if it's missing, and produce an error if the image does not exist in the image cache. | + | `always` | Always perform a pull before creating the container. | + + When creating (and running) a container from an image, the daemon checks if the + image exists in the local image cache. If the image is missing, an error is + returned to the CLI, allowing it to initiate a pull. + + The default (`missing`) is to only pull the image if it's not present in the + daemon's image cache. This default allows you to run images that only exist + locally (for example, images you built from a Dockerfile, but that have not + been pushed to a registry), and reduces networking. + + The `always` option always initiates a pull before creating the container. This + option makes sure the image is up-to-date, and prevents you from using outdated + images, but may not be suitable in situations where you want to test a locally + built image before pushing (as pulling the image overwrites the existing image + in the image cache). + + The `never` option disables (implicit) pulling images when creating containers, + and only uses images that are available in the image cache. If the specified + image is not found, an error is produced, and the container is not created. + This option is useful in situations where networking is not available, or to + prevent images from being pulled implicitly when creating containers. + + The following example shows `docker run` with the `--pull=never` option set, + which produces en error as the image is missing in the image-cache: + + ```console + $ docker run --pull=never hello-world + docker: Error response from daemon: No such image: hello-world:latest. + ``` + + ### Set environment variables (-e, --env, --env-file) {#env} + + ```console + $ docker run -e MYVAR1 --env MYVAR2=foo --env-file ./env.list ubuntu bash + ``` + + Use the `-e`, `--env`, and `--env-file` flags to set simple (non-array) + environment variables in the container you're running, or overwrite variables + defined in the Dockerfile of the image you're running. + + You can define the variable and its value when running the container: + + ```console + $ docker run --env VAR1=value1 --env VAR2=value2 ubuntu env | grep VAR + VAR1=value1 + VAR2=value2 + ``` + + You can also use variables exported to your local environment: + + ```console + export VAR1=value1 + export VAR2=value2 + + $ docker run --env VAR1 --env VAR2 ubuntu env | grep VAR + VAR1=value1 + VAR2=value2 + ``` + + When running the command, the Docker CLI client checks the value the variable + has in your local environment and passes it to the container. + If no `=` is provided and that variable isn't exported in your local + environment, the variable is unset in the container. + + You can also load the environment variables from a file. This file should use + the syntax `=value` (which sets the variable to the given value) or + `` (which takes the value from the local environment), and `#` for + comments. Lines beginning with `#` are treated as line comments and are + ignored, whereas a `#` appearing anywhere else in a line is treated as part of + the variable value. + + ```console + $ cat env.list + # This is a comment + VAR1=value1 + VAR2=value2 + USER + + $ docker run --env-file env.list ubuntu env | grep -E 'VAR|USER' + VAR1=value1 + VAR2=value2 + USER=jonzeolla + ``` + + ### Set metadata on container (-l, --label, --label-file) {#label} + + A label is a `key=value` pair that applies metadata to a container. To label a container with two labels: + + ```console + $ docker run -l my-label --label com.example.foo=bar ubuntu bash + ``` + + The `my-label` key doesn't specify a value so the label defaults to an empty + string (`""`). To add multiple labels, repeat the label flag (`-l` or `--label`). + + The `key=value` must be unique to avoid overwriting the label value. If you + specify labels with identical keys but different values, each subsequent value + overwrites the previous. Docker uses the last `key=value` you supply. + + Use the `--label-file` flag to load multiple labels from a file. Delimit each + label in the file with an EOL mark. The example below loads labels from a + labels file in the current directory: + + ```console + $ docker run --label-file ./labels ubuntu bash + ``` + + The label-file format is similar to the format for loading environment + variables. (Unlike environment variables, labels are not visible to processes + running inside a container.) The following example shows a label-file + format: + + ```console + com.example.label1="a label" + + # this is a comment + com.example.label2=another\ label + com.example.label3 + ``` + + You can load multiple label-files by supplying multiple `--label-file` flags. + + For additional information on working with labels, see + [Labels](/config/labels-custom-metadata/). + + ### Connect a container to a network (--network) {#network} + + To start a container and connect it to a network, use the `--network` option. + + The following commands create a network named `my-net` and adds a `busybox` container + to the `my-net` network. + + ```console + $ docker network create my-net + $ docker run -itd --network=my-net busybox + ``` + + You can also choose the IP addresses for the container with `--ip` and `--ip6` + flags when you start the container on a user-defined network. To assign a + static IP to containers, you must specify subnet block for the network. + + ```console + $ docker network create --subnet 192.0.2.0/24 my-net + $ docker run -itd --network=my-net --ip=192.0.2.69 busybox + ``` + + If you want to add a running container to a network use the `docker network connect` subcommand. + + You can connect multiple containers to the same network. Once connected, the + containers can communicate using only another container's IP address + or name. For `overlay` networks or custom plugins that support multi-host + connectivity, containers connected to the same multi-host network but launched + from different Engines can also communicate in this way. + + > **Note** + > + > The default bridge network only allow containers to communicate with each other using + > internal IP addresses. User-created bridge networks provide DNS resolution between + > containers using container names. + + You can disconnect a container from a network using the `docker network + disconnect` command. + + For more information on connecting a container to a network when using the `run` command, see the ["*Docker network overview*"](/network/). + + ### Mount volumes from container (--volumes-from) {#volumes-from} + + ```console + $ docker run --volumes-from 777f7dc92da7 --volumes-from ba8c0c54f0f2:ro -i -t ubuntu pwd + ``` + + The `--volumes-from` flag mounts all the defined volumes from the referenced + containers. You can specify more than one container by repetitions of the `--volumes-from` + argument. The container ID may be optionally suffixed with `:ro` or `:rw` to + mount the volumes in read-only or read-write mode, respectively. By default, + Docker mounts the volumes in the same mode (read write or read only) as + the reference container. + + Labeling systems like SELinux require placing proper labels on volume + content mounted into a container. Without a label, the security system might + prevent the processes running inside the container from using the content. By + default, Docker does not change the labels set by the OS. + + To change the label in the container context, you can add either of two suffixes + `:z` or `:Z` to the volume mount. These suffixes tell Docker to relabel file + objects on the shared volumes. The `z` option tells Docker that two containers + share the volume content. As a result, Docker labels the content with a shared + content label. Shared volume labels allow all containers to read/write content. + The `Z` option tells Docker to label the content with a private unshared label. + Only the current container can use a private volume. + + ### Detached mode (-d, --detach) {#detach} + + The `--detach` (or `-d`) flag starts a container as a background process that + doesn't occupy your terminal window. By design, containers started in detached + mode exit when the root process used to run the container exits, unless you + also specify the `--rm` option. If you use `-d` with `--rm`, the container is + removed when it exits or when the daemon exits, whichever happens first. + + Don't pass a `service x start` command to a detached container. For example, + this command attempts to start the `nginx` service. + + ```console + $ docker run -d -p 80:80 my_image service nginx start + ``` + + This succeeds in starting the `nginx` service inside the container. However, it + fails the detached container paradigm in that, the root process (`service nginx + start`) returns and the detached container stops as designed. As a result, the + `nginx` service starts but can't be used. Instead, to start a process such as + the `nginx` web server do the following: + + ```console + $ docker run -d -p 80:80 my_image nginx -g 'daemon off;' + ``` + + To do input/output with a detached container use network connections or shared + volumes. These are required because the container is no longer listening to the + command line where `docker run` was run. + + ### Override the detach sequence (--detach-keys) {#detach-keys} + + Use the `--detach-keys` option to override the Docker key sequence for detach. + This is useful if the Docker default sequence conflicts with key sequence you + use for other applications. There are two ways to define your own detach key + sequence, as a per-container override or as a configuration property on your + entire configuration. + + To override the sequence for an individual container, use the + `--detach-keys=""` flag with the `docker attach` command. The format of + the `` is either a letter [a-Z], or the `ctrl-` combined with any of + the following: + + * `a-z` (a single lowercase alpha character ) + * `@` (at sign) + * `[` (left bracket) + * `\\` (two backward slashes) + * `_` (underscore) + * `^` (caret) + + These `a`, `ctrl-a`, `X`, or `ctrl-\\` values are all examples of valid key + sequences. To configure a different configuration default key sequence for all + containers, see [**Configuration file** section](cli.md#configuration-files). + + ### Add host device to container (--device) {#device} + + ```console + $ docker run -it --rm \ + --device=/dev/sdc:/dev/xvdc \ + --device=/dev/sdd \ + --device=/dev/zero:/dev/foobar \ + ubuntu ls -l /dev/{xvdc,sdd,foobar} + + brw-rw---- 1 root disk 8, 2 Feb 9 16:05 /dev/xvdc + brw-rw---- 1 root disk 8, 3 Feb 9 16:05 /dev/sdd + crw-rw-rw- 1 root root 1, 5 Feb 9 16:05 /dev/foobar + ``` + + It's often necessary to directly expose devices to a container. The `--device` + option enables that. For example, adding a specific block storage device or loop + device or audio device to an otherwise unprivileged container + (without the `--privileged` flag) and have the application directly access it. + + By default, the container is able to `read`, `write` and `mknod` these devices. + This can be overridden using a third `:rwm` set of options to each `--device` + flag. If the container is running in privileged mode, then Docker ignores the + specified permissions. + + ```console + $ docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk /dev/xvdc + + Command (m for help): q + $ docker run --device=/dev/sda:/dev/xvdc:r --rm -it ubuntu fdisk /dev/xvdc + You will not be able to write the partition table. + + Command (m for help): q + + $ docker run --device=/dev/sda:/dev/xvdc:rw --rm -it ubuntu fdisk /dev/xvdc + + Command (m for help): q + + $ docker run --device=/dev/sda:/dev/xvdc:m --rm -it ubuntu fdisk /dev/xvdc + fdisk: unable to open /dev/xvdc: Operation not permitted + ``` + + > **Note** + > + > The `--device` option cannot be safely used with ephemeral devices. You shouldn't + > add block devices that may be removed to untrusted containers with `--device`. + + For Windows, the format of the string passed to the `--device` option is in + the form of `--device=/`. Beginning with Windows Server 2019 + and Windows 10 October 2018 Update, Windows only supports an IdType of + `class` and the Id as a [device interface class + GUID](https://docs.microsoft.com/en-us/windows-hardware/drivers/install/overview-of-device-interface-classes). + Refer to the table defined in the [Windows container + docs](https://docs.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/hardware-devices-in-containers) + for a list of container-supported device interface class GUIDs. + + If you specify this option for a process-isolated Windows container, Docker makes + _all_ devices that implement the requested device interface class GUID + available in the container. For example, the command below makes all COM + ports on the host visible in the container. + + ```powershell + PS C:\> docker run --device=class/86E0D1E0-8089-11D0-9CE4-08003E301F73 mcr.microsoft.com/windows/servercore:ltsc2019 + ``` + + > **Note** + > + > The `--device` option is only supported on process-isolated Windows containers, + > and produces an error if the container isolation is `hyperv`. + + #### CDI devices + + > **Note** + > + > This is experimental feature and as such doesn't represent a stable API. + + Container Device Interface (CDI) is a + [standardized](https://github.com/cncf-tags/container-device-interface/blob/main/SPEC.md) + mechanism for container runtimes to create containers which are able to + interact with third party devices. + + With CDI, device configurations are defined using a JSON file. In addition to + enabling the container to interact with the device node, it also lets you + specify additional configuration for the device, such as kernel modules, host + libraries, and environment variables. + + You can reference a CDI device with the `--device` flag using the + fully-qualified name of the device, as shown in the following example: + + ```console + $ docker run --device=vendor.com/class=device-name --rm -it ubuntu + ``` + + This starts an `ubuntu` container with access to the specified CDI device, + `vendor.com/class=device-name`, assuming that: + + - A valid CDI specification (JSON file) for the requested device is available + on the system running the daemon, in one of the configured CDI specification + directories. + - The CDI feature has been enabled on the daemon side, see [Enable CDI + devices](dockerd.md#enable-cdi-devices). + + ### Attach to STDIN/STDOUT/STDERR (-a, --attach) {#attach} + + The `--attach` (or `-a`) flag tells `docker run` to bind to the container's + `STDIN`, `STDOUT` or `STDERR`. This makes it possible to manipulate the output + and input as needed. You can specify to which of the three standard streams + (`STDIN`, `STDOUT`, `STDERR`) you'd like to connect instead, as in: + + ```console + $ docker run -a stdin -a stdout -i -t ubuntu /bin/bash + ``` + + The following example pipes data into a container and prints the container's ID + by attaching only to the container's `STDIN`. + + ```console + $ echo "test" | docker run -i -a stdin ubuntu cat - + ``` + + The following example doesn't print anything to the console unless there's an + error because output is only attached to the `STDERR` of the container. The + container's logs still store what's written to `STDERR` and `STDOUT`. + + ```console + $ docker run -a stderr ubuntu echo test + ``` + + The following example shows a way of using `--attach` to pipe a file into a + container. The command prints the container's ID after the build completes and + you can retrieve the build logs using `docker logs`. This is useful if you need + to pipe a file or something else into a container and retrieve the container's + ID once the container has finished running. + + ```console + $ cat somefile | docker run -i -a stdin mybuilder dobuild + ``` + + > **Note** + > + > A process running as PID 1 inside a container is treated specially by + > Linux: it ignores any signal with the default action. So, the process + > doesn't terminate on `SIGINT` or `SIGTERM` unless it's coded to do so. + + See also [the `docker cp` command](container_cp.md). + + ### Keep STDIN open (-i, --interactive) {#interactive} + + The `--interactive` (or `-i`) flag keeps the container's `STDIN` open, and lets + you send input to the container through standard input. + + ```console + $ echo hello | docker run --rm -i busybox cat + hello + ``` + + The `-i` flag is most often used together with the `--tty` flag to bind the I/O + streams of the container to a pseudo terminal, creating an interactive terminal + session for the container. See [Allocate a pseudo-TTY](#tty) for more examples. + + ```console + $ docker run -it debian + root@10a3e71492b0:/# factor 90 + 90: 2 3 3 5 + root@10a3e71492b0:/# exit + exit + ``` + + Using the `-i` flag on its own allows for composition, such as piping input to + containers: + + ```console + $ docker run --rm -i busybox echo "foo bar baz" \ + | docker run --rm -i busybox awk '{ print $2 }' \ + | docker run --rm -i busybox rev + rab + ``` + + ### Specify an init process {#init} + + You can use the `--init` flag to indicate that an init process should be used as + the PID 1 in the container. Specifying an init process ensures the usual + responsibilities of an init system, such as reaping zombie processes, are + performed inside the created container. + + The default init process used is the first `docker-init` executable found in the + system path of the Docker daemon process. This `docker-init` binary, included in + the default installation, is backed by [tini](https://github.com/krallin/tini). + + ### Allocate a pseudo-TTY (-t, --tty) {#tty} + + The `--tty` (or `-t`) flag attaches a pseudo-TTY to the container, connecting + your terminal to the I/O streams of the container. Allocating a pseudo-TTY to + the container means that you get access to input and output feature that TTY + devices provide. + + For example, the following command runs the `passwd` command in a `debian` + container, to set a new password for the `root` user. + + ```console + $ docker run -i debian passwd root + New password: karjalanpiirakka9 + Retype new password: karjalanpiirakka9 + passwd: password updated successfully + ``` + + If you run this command with only the `-i` flag (which lets you send text to + `STDIN` of the container), the `passwd` prompt displays the password in plain + text. However, if you try the same thing but also adding the `-t` flag, the + password is hidden: + + ```console + $ docker run -i debian passwd root + New password: + Retype new password: + passwd: password updated successfully + ``` + + This is because `passwd` can suppress the output of characters to the terminal + using the echo-off TTY feature. + + You can use the `-t` flag without `-i` flag. This still allocates a pseudo-TTY + to the container, but with no way of writing to `STDIN`. The only time this + might be useful is if the output of the container requires a TTY environment. + + ### Specify custom cgroups {#cgroup-parent} + + Using the `--cgroup-parent` flag, you can pass a specific cgroup to run a + container in. This allows you to create and manage cgroups on their own. You can + define custom resources for those cgroups and put containers under a common + parent group. + + ### Using dynamically created devices (--device-cgroup-rule) {#device-cgroup-rule} + + Docker assigns devices available to a container at creation time. The + assigned devices are added to the cgroup.allow file and + created into the container when it runs. This poses a problem when + you need to add a new device to running container. + + One solution is to add a more permissive rule to a container + allowing it access to a wider range of devices. For example, supposing + the container needs access to a character device with major `42` and + any number of minor numbers (added as new devices appear), add the + following rule: + + ```console + $ docker run -d --device-cgroup-rule='c 42:* rmw' --name my-container my-image + ``` + + Then, a user could ask `udev` to execute a script that would `docker exec my-container mknod newDevX c 42 ` + the required device when it is added. + + > **Note**: You still need to explicitly add initially present devices to the + > `docker run` / `docker create` command. + + ### Access an NVIDIA GPU {#gpus} + + The `--gpus` flag allows you to access NVIDIA GPU resources. First you need to + install the [nvidia-container-runtime](https://nvidia.github.io/nvidia-container-runtime/). + + > **Note** + > + > You can also specify a GPU as a CDI device with the `--device` flag, see + > [CDI devices](#cdi-devices). + + Read [Specify a container's resources](/config/containers/resource_constraints/) + for more information. + + To use `--gpus`, specify which GPUs (or all) to use. If you provide no value, Docker uses all + available GPUs. The example below exposes all available GPUs. + + ```console + $ docker run -it --rm --gpus all ubuntu nvidia-smi + ``` + + Use the `device` option to specify GPUs. The example below exposes a specific + GPU. + + ```console + $ docker run -it --rm --gpus device=GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a ubuntu nvidia-smi + ``` + + The example below exposes the first and third GPUs. + + ```console + $ docker run -it --rm --gpus '"device=0,2"' ubuntu nvidia-smi + ``` + + ### Restart policies (--restart) {#restart} + + Use the `--restart` flag to specify a container's *restart policy*. A restart + policy controls whether the Docker daemon restarts a container after exit. + Docker supports the following restart policies: + + | Policy | Result | + |:---------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + | `no` | Do not automatically restart the container when it exits. This is the default. | + | `on-failure[:max-retries]` | Restart only if the container exits with a non-zero exit status. Optionally, limit the number of restart retries the Docker daemon attempts. | + | `unless-stopped` | Restart the container unless it's explicitly stopped or Docker itself is stopped or restarted. | + | `always` | Always restart the container regardless of the exit status. When you specify always, the Docker daemon tries to restart the container indefinitely. The container always starts on daemon startup, regardless of the current state of the container. | + + ```console + $ docker run --restart=always redis + ``` + + This runs the `redis` container with a restart policy of **always**. + If the container exits, Docker restarts it. + + When a restart policy is active on a container, it shows as either `Up` or + `Restarting` in [`docker ps`](container_ls.md). It can also be useful to use + [`docker events`](system_events.md) to see the restart policy in effect. + + An increasing delay (double the previous delay, starting at 100 milliseconds) + is added before each restart to prevent flooding the server. This means the + daemon waits for 100 ms, then 200 ms, 400, 800, 1600, and so on until either + the `on-failure` limit, the maximum delay of 1 minute is hit, or when you + `docker stop` or `docker rm -f` the container. + + If a container is successfully restarted (the container is started and runs + for at least 10 seconds), the delay is reset to its default value of 100 ms. + + #### Specify a limit for restart attempts + + You can specify the maximum amount of times Docker attempts to restart the + container when using the **on-failure** policy. By default, Docker never stops + attempting to restart the container. + + The following example runs the `redis` container with a restart policy of + **on-failure** and a maximum restart count of 10. + + ```console + $ docker run --restart=on-failure:10 redis + ``` + + If the `redis` container exits with a non-zero exit status more than 10 times + in a row, Docker stops trying to restart the container. Providing a maximum + restart limit is only valid for the **on-failure** policy. + + #### Inspect container restarts + + The number of (attempted) restarts for a container can be obtained using the + [`docker inspect`](inspect.md) command. For example, to get the number of + restarts for container "my-container"; + + ```console + $ docker inspect -f "{{ .RestartCount }}" my-container + 2 + ``` + + Or, to get the last time the container was (re)started; + + ```console + $ docker inspect -f "{{ .State.StartedAt }}" my-container + 2015-03-04T23:47:07.691840179Z + ``` + + Combining `--restart` (restart policy) with the `--rm` (clean up) flag results + in an error. On container restart, attached clients are disconnected. + + ### Clean up (--rm) {#rm} + + By default, a container's file system persists even after the container exits. + This makes debugging a lot easier, since you can inspect the container's final + state and you retain all your data. + + If you are running short-term **foreground** processes, these container file + systems can start to pile up. If you'd like Docker to automatically clean up + the container and remove the file system when the container exits, use the + `--rm` flag: + + ```text + --rm=false: Automatically remove the container when it exits + ``` + + > **Note** + > + > If you set the `--rm` flag, Docker also removes the anonymous volumes + > associated with the container when the container is removed. This is similar + > to running `docker rm -v my-container`. Only volumes that are specified + > without a name are removed. For example, when running the following command, + > volume `/foo` is removed, but not `/bar`: + > + > ```console + > $ docker run --rm -v /foo -v awesome:/bar busybox top + > ``` + > + > Volumes inherited via `--volumes-from` are removed with the same logic: + > if the original volume was specified with a name it isn't removed. + + ### Add entries to container hosts file (--add-host) {#add-host} + + You can add other hosts into a container's `/etc/hosts` file by using one or + more `--add-host` flags. This example adds a static address for a host named + `my-hostname`: + + ```console + $ docker run --add-host=my-hostname=8.8.8.8 --rm -it alpine + + / # ping my-hostname + PING my-hostname (8.8.8.8): 56 data bytes + 64 bytes from 8.8.8.8: seq=0 ttl=37 time=93.052 ms + 64 bytes from 8.8.8.8: seq=1 ttl=37 time=92.467 ms + 64 bytes from 8.8.8.8: seq=2 ttl=37 time=92.252 ms + ^C + --- my-hostname ping statistics --- + 4 packets transmitted, 4 packets received, 0% packet loss + round-trip min/avg/max = 92.209/92.495/93.052 ms + ``` + + You can wrap an IPv6 address in square brackets: + + ```console + $ docker run --add-host my-hostname=[2001:db8::33] --rm -it alpine + ``` + + The `--add-host` flag supports a special `host-gateway` value that resolves to + the internal IP address of the host. This is useful when you want containers to + connect to services running on the host machine. + + It's conventional to use `host.docker.internal` as the hostname referring to + `host-gateway`. Docker Desktop automatically resolves this hostname, see + [Explore networking features](/desktop/networking/#i-want-to-connect-from-a-container-to-a-service-on-the-host). + + The following example shows how the special `host-gateway` value works. The + example runs an HTTP server that serves a file from host to container over the + `host.docker.internal` hostname, which resolves to the host's internal IP. + + ```console + $ echo "hello from host!" > ./hello + $ python3 -m http.server 8000 + Serving HTTP on 0.0.0.0 port 8000 (http://0.0.0.0:8000/) ... + $ docker run \ + --add-host host.docker.internal=host-gateway \ + curlimages/curl -s host.docker.internal:8000/hello + hello from host! + ``` + + The `--add-host` flag also accepts a `:` separator, for example: + + ```console + $ docker run --add-host=my-hostname:8.8.8.8 --rm -it alpine + ``` + + ### Logging drivers (--log-driver) {#log-driver} + + The container can have a different logging driver than the Docker daemon. Use + the `--log-driver=` with the `docker run` command to configure the + container's logging driver. + + To learn about the supported logging drivers and how to use them, refer to + [Configure logging drivers](/config/containers/logging/configure/). + + To disable logging for a container, set the `--log-driver` flag to `none`: + + ```console + $ docker run --log-driver=none -d nginx:alpine + 5101d3b7fe931c27c2ba0e65fd989654d297393ad65ae238f20b97a020e7295b + $ docker logs 5101d3b + Error response from daemon: configured logging driver does not support reading + ``` + + ### Set ulimits in container (--ulimit) {#ulimit} + + Since setting `ulimit` settings in a container requires extra privileges not + available in the default container, you can set these using the `--ulimit` flag. + Specify `--ulimit` with a soft and hard limit in the format + `=[:]`. For example: + + ```console + $ docker run --ulimit nofile=1024:1024 --rm debian sh -c "ulimit -n" + 1024 + ``` + + > **Note** + > + > If you don't provide a hard limit value, Docker uses the soft limit value + > for both values. If you don't provide any values, they are inherited from + > the default `ulimits` set on the daemon. + + > **Note** + > + > The `as` option is deprecated. + > In other words, the following script is not supported: + > + > ```console + > $ docker run -it --ulimit as=1024 fedora /bin/bash + > ``` + + Docker sends the values to the appropriate OS `syscall` and doesn't perform any byte conversion. + Take this into account when setting the values. + + #### For `nproc` usage + + Be careful setting `nproc` with the `ulimit` flag as Linux uses `nproc` to set the + maximum number of processes available to a user, not to a container. For example, start four + containers with `daemon` user: + + ```console + $ docker run -d -u daemon --ulimit nproc=3 busybox top + + $ docker run -d -u daemon --ulimit nproc=3 busybox top + + $ docker run -d -u daemon --ulimit nproc=3 busybox top + + $ docker run -d -u daemon --ulimit nproc=3 busybox top + ``` + + The 4th container fails and reports a "[8] System error: resource temporarily unavailable" error. + This fails because the caller set `nproc=3` resulting in the first three containers using up + the three processes quota set for the `daemon` user. + + ### Stop container with signal (--stop-signal) {#stop-signal} + + The `--stop-signal` flag sends the system call signal to the + container to exit. This signal can be a signal name in the format `SIG`, + for instance `SIGKILL`, or an unsigned number that matches a position in the + kernel's syscall table, for instance `9`. + + The default value is defined by [`STOPSIGNAL`](/engine/reference/builder/#stopsignal) + in the image, or `SIGTERM` if the image has no `STOPSIGNAL` defined. + + ### Optional security options (--security-opt) {#security-opt} + + | Option | Description | + |:------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + | `--security-opt="label=user:USER"` | Set the label user for the container | + | `--security-opt="label=role:ROLE"` | Set the label role for the container | + | `--security-opt="label=type:TYPE"` | Set the label type for the container | + | `--security-opt="label=level:LEVEL"` | Set the label level for the container | + | `--security-opt="label=disable"` | Turn off label confinement for the container | + | `--security-opt="apparmor=PROFILE"` | Set the apparmor profile to be applied to the container | + | `--security-opt="no-new-privileges=true"` | Disable container processes from gaining new privileges | + | `--security-opt="seccomp=unconfined"` | Turn off seccomp confinement for the container | + | `--security-opt="seccomp=builtin"` | Use the default (built-in) seccomp profile for the container. This can be used to enable seccomp for a container running on a daemon with a custom default profile set, or with seccomp disabled ("unconfined"). | + | `--security-opt="seccomp=profile.json"` | White-listed syscalls seccomp Json file to be used as a seccomp filter | + + The `--security-opt` flag lets you override the default labeling scheme for a + container. Specifying the level in the following command allows you to share + the same content between containers. + + ```console + $ docker run --security-opt label=level:s0:c100,c200 -it fedora bash + ``` + + > **Note** + > + > Automatic translation of MLS labels isn't supported. + + To disable the security labeling for a container entirely, you can use + `label=disable`: + + ```console + $ docker run --security-opt label=disable -it ubuntu bash + ``` + + If you want a tighter security policy on the processes within a container, you + can specify a custom `type` label. The following example runs a container + that's only allowed to listen on Apache ports: + + ```console + $ docker run --security-opt label=type:svirt_apache_t -it ubuntu bash + ``` + + > **Note** + > + > You would have to write policy defining a `svirt_apache_t` type. + + To prevent your container processes from gaining additional privileges, you can + use the following command: + + ```console + $ docker run --security-opt no-new-privileges -it ubuntu bash + ``` + + This means that commands that raise privileges such as `su` or `sudo` no longer work. + It also causes any seccomp filters to be applied later, after privileges have been dropped + which may mean you can have a more restrictive set of filters. + For more details, see the [kernel documentation](https://www.kernel.org/doc/Documentation/prctl/no_new_privs.txt). + + On Windows, you can use the `--security-opt` flag to specify the `credentialspec` option. + The `credentialspec` must be in the format `file://spec.txt` or `registry://keyname`. + + ### Stop container with timeout (--stop-timeout) {#stop-timeout} + + The `--stop-timeout` flag sets the number of seconds to wait for the container + to stop after sending the pre-defined (see `--stop-signal`) system call signal. + If the container does not exit after the timeout elapses, it's forcibly killed + with a `SIGKILL` signal. + + If you set `--stop-timeout` to `-1`, no timeout is applied, and the daemon + waits indefinitely for the container to exit. + + The Daemon determines the default, and is 10 seconds for Linux containers, + and 30 seconds for Windows containers. + + ### Specify isolation technology for container (--isolation) {#isolation} + + This option is useful in situations where you are running Docker containers on + Windows. The `--isolation=` option sets a container's isolation technology. + On Linux, the only supported is the `default` option which uses Linux namespaces. + These two commands are equivalent on Linux: + + ```console + $ docker run -d busybox top + $ docker run -d --isolation default busybox top + ``` + + On Windows, `--isolation` can take one of these values: + + | Value | Description | + |:----------|:-------------------------------------------------------------------------------------------| + | `default` | Use the value specified by the Docker daemon's `--exec-opt` or system default (see below). | + | `process` | Shared-kernel namespace isolation. | + | `hyperv` | Hyper-V hypervisor partition-based isolation. | + + The default isolation on Windows server operating systems is `process`, and `hyperv` + on Windows client operating systems, such as Windows 10. Process isolation has better + performance, but requires that the image and host use the same kernel version. + + On Windows server, assuming the default configuration, these commands are equivalent + and result in `process` isolation: + + ```powershell + PS C:\> docker run -d microsoft/nanoserver powershell echo process + PS C:\> docker run -d --isolation default microsoft/nanoserver powershell echo process + PS C:\> docker run -d --isolation process microsoft/nanoserver powershell echo process + ``` + + If you have set the `--exec-opt isolation=hyperv` option on the Docker `daemon`, or + are running against a Windows client-based daemon, these commands are equivalent and + result in `hyperv` isolation: + + ```powershell + PS C:\> docker run -d microsoft/nanoserver powershell echo hyperv + PS C:\> docker run -d --isolation default microsoft/nanoserver powershell echo hyperv + PS C:\> docker run -d --isolation hyperv microsoft/nanoserver powershell echo hyperv + ``` + + ### Specify hard limits on memory available to containers (-m, --memory) {#memory} + + These parameters always set an upper limit on the memory available to the container. Linux sets this + on the cgroup and applications in a container can query it at `/sys/fs/cgroup/memory/memory.limit_in_bytes`. + + On Windows, this affects containers differently depending on what type of isolation you use. + + - With `process` isolation, Windows reports the full memory of the host system, not the limit to applications running inside the container + + ```powershell + PS C:\> docker run -it -m 2GB --isolation=process microsoft/nanoserver powershell Get-ComputerInfo *memory* + + CsTotalPhysicalMemory : 17064509440 + CsPhyicallyInstalledMemory : 16777216 + OsTotalVisibleMemorySize : 16664560 + OsFreePhysicalMemory : 14646720 + OsTotalVirtualMemorySize : 19154928 + OsFreeVirtualMemory : 17197440 + OsInUseVirtualMemory : 1957488 + OsMaxProcessMemorySize : 137438953344 + ``` + + - With `hyperv` isolation, Windows creates a utility VM that is big enough to hold the memory limit, plus the minimal OS needed to host the container. That size is reported as "Total Physical Memory." + + ```powershell + PS C:\> docker run -it -m 2GB --isolation=hyperv microsoft/nanoserver powershell Get-ComputerInfo *memory* + + CsTotalPhysicalMemory : 2683355136 + CsPhyicallyInstalledMemory : + OsTotalVisibleMemorySize : 2620464 + OsFreePhysicalMemory : 2306552 + OsTotalVirtualMemorySize : 2620464 + OsFreeVirtualMemory : 2356692 + OsInUseVirtualMemory : 263772 + OsMaxProcessMemorySize : 137438953344 + ``` + + ### Configure namespaced kernel parameters (sysctls) at runtime (--sysctl) {#sysctl} + + The `--sysctl` sets namespaced kernel parameters (sysctls) in the + container. For example, to turn on IP forwarding in the containers + network namespace, run this command: + + ```console + $ docker run --sysctl net.ipv4.ip_forward=1 someimage + ``` + + > **Note** + > + > Not all sysctls are namespaced. Docker does not support changing sysctls + > inside of a container that also modify the host system. As the kernel + > evolves we expect to see more sysctls become namespaced. + + + #### Currently supported sysctls + + IPC Namespace: + + - `kernel.msgmax`, `kernel.msgmnb`, `kernel.msgmni`, `kernel.sem`, + `kernel.shmall`, `kernel.shmmax`, `kernel.shmmni`, `kernel.shm_rmid_forced`. + - Sysctls beginning with `fs.mqueue.*` + - If you use the `--ipc=host` option these sysctls are not allowed. + + Network Namespace: + + - Sysctls beginning with `net.*` + - If you use the `--network=host` option using these sysctls are not allowed. deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_container_start.yaml b/data/engine-cli/docker_container_start.yaml index e01f2b861..d336a9ece 100644 --- a/data/engine-cli/docker_container_start.yaml +++ b/data/engine-cli/docker_container_start.yaml @@ -1,7 +1,7 @@ command: docker container start aliases: docker container start, docker start short: Start one or more stopped containers -long: See [docker start](start.md) for more information. +long: Start one or more stopped containers usage: docker container start [OPTIONS] CONTAINER [CONTAINER...] pname: docker container plink: docker_container.yaml @@ -68,6 +68,10 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + ```console + $ docker start my_container + ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_container_stats.yaml b/data/engine-cli/docker_container_stats.yaml index c0311eaee..0d989fe9c 100644 --- a/data/engine-cli/docker_container_stats.yaml +++ b/data/engine-cli/docker_container_stats.yaml @@ -1,7 +1,34 @@ command: docker container stats aliases: docker container stats, docker stats short: Display a live stream of container(s) resource usage statistics -long: See [docker stats](stats.md) for more information. +long: |- + The `docker stats` command returns a live data stream for running containers. To + limit data to one or more specific containers, specify a list of container names + or ids separated by a space. You can specify a stopped container but stopped + containers do not return any data. + + If you need more detailed information about a container's resource usage, use + the `/containers/(id)/stats` API endpoint. + + > **Note** + > + > On Linux, the Docker CLI reports memory usage by subtracting cache usage from + > the total memory usage. The API does not perform such a calculation but rather + > provides the total memory usage and the amount from the cache so that clients + > can use the data as needed. The cache usage is defined as the value of + > `total_inactive_file` field in the `memory.stat` file on cgroup v1 hosts. + > + > On Docker 19.03 and older, the cache usage was defined as the value of `cache` + > field. On cgroup v2 hosts, the cache usage is defined as the value of + > `inactive_file` field. + + > **Note** + > + > The `PIDS` column contains the number of processes and kernel threads created + > by that container. Threads is the term used by Linux kernel. Other equivalent + > terms are "lightweight process" or "kernel task", etc. A large number in the + > `PIDS` column combined with a small number of processes (as reported by `ps` + > or `top`) may indicate that something in the container is creating many threads. usage: docker container stats [OPTIONS] [CONTAINER...] pname: docker container plink: docker_container.yaml @@ -26,6 +53,7 @@ options: 'json': Print in JSON format 'TEMPLATE': Print output using the given Go template. Refer to https://docs.docker.com/go/formatting/ for more information about formatting output with templates + details_url: '#format' deprecated: false hidden: false experimental: false @@ -63,6 +91,142 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + Running `docker stats` on all running containers against a Linux daemon. + + ```console + $ docker stats + + CONTAINER ID NAME CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O PIDS + b95a83497c91 awesome_brattain 0.28% 5.629MiB / 1.952GiB 0.28% 916B / 0B 147kB / 0B 9 + 67b2525d8ad1 foobar 0.00% 1.727MiB / 1.952GiB 0.09% 2.48kB / 0B 4.11MB / 0B 2 + e5c383697914 test-1951.1.kay7x1lh1twk9c0oig50sd5tr 0.00% 196KiB / 1.952GiB 0.01% 71.2kB / 0B 770kB / 0B 1 + 4bda148efbc0 random.1.vnc8on831idyr42slu578u3cr 0.00% 1.672MiB / 1.952GiB 0.08% 110kB / 0B 578kB / 0B 2 + ``` + + If you don't [specify a format string using `--format`](#format), the + following columns are shown. + + | Column name | Description | + |---------------------------|-----------------------------------------------------------------------------------------------| + | `CONTAINER ID` and `Name` | the ID and name of the container | + | `CPU %` and `MEM %` | the percentage of the host's CPU and memory the container is using | + | `MEM USAGE / LIMIT` | the total memory the container is using, and the total amount of memory it is allowed to use | + | `NET I/O` | The amount of data the container has received and sent over its network interface | + | `BLOCK I/O` | The amount of data the container has written to and read from block devices on the host | + | `PIDs` | the number of processes or threads the container has created | + + Running `docker stats` on multiple containers by name and id against a Linux daemon. + + ```console + $ docker stats awesome_brattain 67b2525d8ad1 + + CONTAINER ID NAME CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O PIDS + b95a83497c91 awesome_brattain 0.28% 5.629MiB / 1.952GiB 0.28% 916B / 0B 147kB / 0B 9 + 67b2525d8ad1 foobar 0.00% 1.727MiB / 1.952GiB 0.09% 2.48kB / 0B 4.11MB / 0B 2 + ``` + + Running `docker stats` on container with name `nginx` and getting output in `json` format. + + ```console + $ docker stats nginx --no-stream --format "{{ json . }}" + {"BlockIO":"0B / 13.3kB","CPUPerc":"0.03%","Container":"nginx","ID":"ed37317fbf42","MemPerc":"0.24%","MemUsage":"2.352MiB / 982.5MiB","Name":"nginx","NetIO":"539kB / 606kB","PIDs":"2"} + ``` + + Running `docker stats` with customized format on all (running and stopped) containers. + + ```console + $ docker stats --all --format "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}" fervent_panini 5acfcb1b4fd1 humble_visvesvaraya big_heisenberg + + CONTAINER CPU % MEM USAGE / LIMIT + fervent_panini 0.00% 56KiB / 15.57GiB + 5acfcb1b4fd1 0.07% 32.86MiB / 15.57GiB + humble_visvesvaraya 0.00% 0B / 0B + big_heisenberg 0.00% 0B / 0B + ``` + + `humble_visvesvaraya` and `big_heisenberg` are stopped containers in the above example. + + Running `docker stats` on all running containers against a Windows daemon. + + ```powershell + PS E:\> docker stats + CONTAINER ID CPU % PRIV WORKING SET NET I/O BLOCK I/O + 09d3bb5b1604 6.61% 38.21 MiB 17.1 kB / 7.73 kB 10.7 MB / 3.57 MB + 9db7aa4d986d 9.19% 38.26 MiB 15.2 kB / 7.65 kB 10.6 MB / 3.3 MB + 3f214c61ad1d 0.00% 28.64 MiB 64 kB / 6.84 kB 4.42 MB / 6.93 MB + ``` + + Running `docker stats` on multiple containers by name and id against a Windows daemon. + + ```powershell + PS E:\> docker ps -a + CONTAINER ID NAME IMAGE COMMAND CREATED STATUS PORTS NAMES + 3f214c61ad1d awesome_brattain nanoserver "cmd" 2 minutes ago Up 2 minutes big_minsky + 9db7aa4d986d mad_wilson windowsservercore "cmd" 2 minutes ago Up 2 minutes mad_wilson + 09d3bb5b1604 fervent_panini windowsservercore "cmd" 2 minutes ago Up 2 minutes affectionate_easley + + PS E:\> docker stats 3f214c61ad1d mad_wilson + CONTAINER ID NAME CPU % PRIV WORKING SET NET I/O BLOCK I/O + 3f214c61ad1d awesome_brattain 0.00% 46.25 MiB 76.3 kB / 7.92 kB 10.3 MB / 14.7 MB + 9db7aa4d986d mad_wilson 9.59% 40.09 MiB 27.6 kB / 8.81 kB 17 MB / 20.1 MB + ``` + + ### Format the output (--format) {#format} + + The formatting option (`--format`) pretty prints container output + using a Go template. + + Valid placeholders for the Go template are listed below: + + | Placeholder | Description | + |--------------|----------------------------------------------| + | `.Container` | Container name or ID (user input) | + | `.Name` | Container name | + | `.ID` | Container ID | + | `.CPUPerc` | CPU percentage | + | `.MemUsage` | Memory usage | + | `.NetIO` | Network IO | + | `.BlockIO` | Block IO | + | `.MemPerc` | Memory percentage (Not available on Windows) | + | `.PIDs` | Number of PIDs (Not available on Windows) | + + When using the `--format` option, the `stats` command either + outputs the data exactly as the template declares or, when using the + `table` directive, includes column headers as well. + + The following example uses a template without headers and outputs the + `Container` and `CPUPerc` entries separated by a colon (`:`) for all images: + + ```console + $ docker stats --format "{{.Container}}: {{.CPUPerc}}" + + 09d3bb5b1604: 6.61% + 9db7aa4d986d: 9.19% + 3f214c61ad1d: 0.00% + ``` + + To list all containers statistics with their name, CPU percentage and memory + usage in a table format you can use: + + ```console + $ docker stats --format "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}" + + CONTAINER CPU % PRIV WORKING SET + 1285939c1fd3 0.07% 796 KiB / 64 MiB + 9c76f7834ae2 0.07% 2.746 MiB / 64 MiB + d1ea048f04e4 0.03% 4.583 MiB / 64 MiB + ``` + + The default format is as follows: + + On Linux: + + "table {{.ID}}\t{{.Name}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.MemPerc}}\t{{.NetIO}}\t{{.BlockIO}}\t{{.PIDs}}" + + On Windows: + + "table {{.ID}}\t{{.Name}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.NetIO}}\t{{.BlockIO}}" deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_container_stop.yaml b/data/engine-cli/docker_container_stop.yaml index 5310d0df2..4b7327d61 100644 --- a/data/engine-cli/docker_container_stop.yaml +++ b/data/engine-cli/docker_container_stop.yaml @@ -1,7 +1,11 @@ command: docker container stop aliases: docker container stop, docker stop short: Stop one or more running containers -long: See [docker stop](stop.md) for more information. +long: |- + The main process inside the container will receive `SIGTERM`, and after a grace + period, `SIGKILL`. The first signal can be changed with the `STOPSIGNAL` + instruction in the container's Dockerfile, or the `--stop-signal` option to + `docker run`. usage: docker container stop [OPTIONS] CONTAINER [CONTAINER...] pname: docker container plink: docker_container.yaml @@ -38,6 +42,10 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + ```console + $ docker stop my_container + ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_container_top.yaml b/data/engine-cli/docker_container_top.yaml index 59477cc37..d519c3154 100644 --- a/data/engine-cli/docker_container_top.yaml +++ b/data/engine-cli/docker_container_top.yaml @@ -1,7 +1,7 @@ command: docker container top aliases: docker container top, docker top short: Display the running processes of a container -long: See [docker top](top.md) for more information. +long: Display the running processes of a container usage: docker container top CONTAINER [ps OPTIONS] pname: docker container plink: docker_container.yaml diff --git a/data/engine-cli/docker_container_unpause.yaml b/data/engine-cli/docker_container_unpause.yaml index 0c0fdf5dc..2901cb241 100644 --- a/data/engine-cli/docker_container_unpause.yaml +++ b/data/engine-cli/docker_container_unpause.yaml @@ -1,7 +1,13 @@ command: docker container unpause aliases: docker container unpause, docker unpause short: Unpause all processes within one or more containers -long: See [docker unpause](unpause.md) for more information. +long: |- + The `docker unpause` command un-suspends all processes in the specified containers. + On Linux, it does this using the freezer cgroup. + + See the + [freezer cgroup documentation](https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt) + for further details. usage: docker container unpause CONTAINER [CONTAINER...] pname: docker container plink: docker_container.yaml @@ -16,6 +22,11 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + ```console + $ docker unpause my_container + my_container + ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_container_update.yaml b/data/engine-cli/docker_container_update.yaml index 9bf0e6788..e06ed2516 100644 --- a/data/engine-cli/docker_container_update.yaml +++ b/data/engine-cli/docker_container_update.yaml @@ -1,7 +1,23 @@ command: docker container update aliases: docker container update, docker update short: Update configuration of one or more containers -long: See [docker update](update.md) for more information. +long: |- + The `docker update` command dynamically updates container configuration. + You can use this command to prevent containers from consuming too many + resources from their Docker host. With a single command, you can place + limits on a single container or on many. To specify more than one container, + provide space-separated list of container names or IDs. + + With the exception of the `--kernel-memory` option, you can specify these + options on a running or a stopped container. On kernel version older than + 4.6, you can only update `--kernel-memory` on a stopped container or on + a running container with kernel memory initialized. + + > **Warning** + > + > The `docker update` and `docker container update` commands are not supported + > for Windows containers. + { .warning } usage: docker container update [OPTIONS] CONTAINER [CONTAINER...] pname: docker container plink: docker_container.yaml @@ -64,6 +80,7 @@ options: value_type: int64 default_value: "0" description: CPU shares (relative weight) + details_url: '#cpu-shares' deprecated: false hidden: false experimental: false @@ -102,6 +119,7 @@ options: value_type: bytes default_value: "0" description: Kernel memory limit (deprecated) + details_url: '#kernel-memory' deprecated: true hidden: true experimental: false @@ -113,6 +131,7 @@ options: value_type: bytes default_value: "0" description: Memory limit + details_url: '#memory' deprecated: false hidden: false experimental: false @@ -153,6 +172,7 @@ options: - option: restart value_type: string description: Restart policy to apply when a container exits + details_url: '#restart' deprecated: false hidden: false experimental: false @@ -170,6 +190,79 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + The following sections illustrate ways to use this command. + + ### Update a container's cpu-shares (--cpu-shares) {#cpu-shares} + + To limit a container's cpu-shares to 512, first identify the container + name or ID. You can use `docker ps` to find these values. You can also + use the ID returned from the `docker run` command. Then, do the following: + + ```console + $ docker update --cpu-shares 512 abebf7571666 + ``` + + ### Update a container with cpu-shares and memory (-m, --memory) {#memory} + + To update multiple resource configurations for multiple containers: + + ```console + $ docker update --cpu-shares 512 -m 300M abebf7571666 hopeful_morse + ``` + + ### Update a container's kernel memory constraints (--kernel-memory) {#kernel-memory} + + You can update a container's kernel memory limit using the `--kernel-memory` + option. On kernel version older than 4.6, this option can be updated on a + running container only if the container was started with `--kernel-memory`. + If the container was started without `--kernel-memory` you need to stop + the container before updating kernel memory. + + > **Note** + > + > The `--kernel-memory` option has been deprecated since Docker 20.10. + + For example, if you started a container with this command: + + ```console + $ docker run -dit --name test --kernel-memory 50M ubuntu bash + ``` + + You can update kernel memory while the container is running: + + ```console + $ docker update --kernel-memory 80M test + ``` + + If you started a container without kernel memory initialized: + + ```console + $ docker run -dit --name test2 --memory 300M ubuntu bash + ``` + + Update kernel memory of running container `test2` will fail. You need to stop + the container before updating the `--kernel-memory` setting. The next time you + start it, the container uses the new value. + + Kernel version newer than (include) 4.6 does not have this limitation, you + can use `--kernel-memory` the same way as other options. + + ### Update a container's restart policy (--restart) {#restart} + + You can change a container's restart policy on a running container. The new + restart policy takes effect instantly after you run `docker update` on a + container. + + To update restart policy for one or more containers: + + ```console + $ docker update --restart=on-failure:3 abebf7571666 hopeful_morse + ``` + + Note that if the container is started with `--rm` flag, you cannot update the restart + policy for it. The `AutoRemove` and `RestartPolicy` are mutually exclusive for the + container. deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_container_wait.yaml b/data/engine-cli/docker_container_wait.yaml index 5ea9ee5a8..c507f3671 100644 --- a/data/engine-cli/docker_container_wait.yaml +++ b/data/engine-cli/docker_container_wait.yaml @@ -1,7 +1,7 @@ command: docker container wait aliases: docker container wait, docker wait short: Block until one or more containers stop, then print their exit codes -long: See [docker wait](wait.md) for more information. +long: Block until one or more containers stop, then print their exit codes usage: docker container wait CONTAINER [CONTAINER...] pname: docker container plink: docker_container.yaml @@ -16,6 +16,34 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + Start a container in the background. + + ```console + $ docker run -dit --name=my_container ubuntu bash + ``` + + Run `docker wait`, which should block until the container exits. + + ```console + $ docker wait my_container + ``` + + In another terminal, stop the first container. The `docker wait` command above + returns the exit code. + + ```console + $ docker stop my_container + ``` + + This is the same `docker wait` command from above, but it now exits, returning + `0`. + + ```console + $ docker wait my_container + + 0 + ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_context_create.yaml b/data/engine-cli/docker_context_create.yaml index 91fa82779..d6112eaeb 100644 --- a/data/engine-cli/docker_context_create.yaml +++ b/data/engine-cli/docker_context_create.yaml @@ -1,22 +1,12 @@ command: docker context create short: Create a context long: |- - Creates a new `context`. This allows you to quickly switch the cli - configuration to connect to different clusters or single nodes. + Creates a new `context`. This lets you switch the daemon your `docker` CLI + connects to. usage: docker context create [OPTIONS] CONTEXT pname: docker context plink: docker_context.yaml options: - - option: default-stack-orchestrator - value_type: string - description: | - Default orchestrator for stack operations to use with this context (`swarm`, `kubernetes`, `all`) - deprecated: true - hidden: true - experimental: false - experimentalcli: false - kubernetes: false - swarm: false - option: description value_type: string description: Description of the context @@ -47,16 +37,6 @@ options: experimentalcli: false kubernetes: false swarm: false - - option: kubernetes - value_type: stringToString - default_value: '[]' - description: set the kubernetes endpoint - deprecated: true - hidden: true - experimental: false - experimentalcli: false - kubernetes: true - swarm: false inherited_options: - option: help value_type: bool @@ -69,11 +49,11 @@ inherited_options: kubernetes: false swarm: false examples: |- - ### Create a context with a docker endpoint (--docker) {#docker} + ### Create a context with a Docker endpoint (--docker) {#docker} - To create a context from scratch provide the docker and, if required, - kubernetes options. The example below creates the context `my-context` - with a docker endpoint of `/var/run/docker.sock`: + Use the `--docker` flag to create a context with a custom endpoint. The + following example creates a context named `my-context` with a docker endpoint + of `/var/run/docker.sock`: ```console $ docker context create \ @@ -91,7 +71,7 @@ examples: |- $ docker context create --from existing-context my-context ``` - If the `--from` option is not set, the `context` is created from the current context: + If the `--from` option isn't set, the `context` is created from the current context: ```console $ docker context create my-context diff --git a/data/engine-cli/docker_context_export.yaml b/data/engine-cli/docker_context_export.yaml index 4971b08ae..f5c75fd74 100644 --- a/data/engine-cli/docker_context_export.yaml +++ b/data/engine-cli/docker_context_export.yaml @@ -12,17 +12,6 @@ long: |- usage: docker context export [OPTIONS] CONTEXT [FILE|-] pname: docker context plink: docker_context.yaml -options: - - option: kubeconfig - value_type: bool - default_value: "false" - description: Export as a kubeconfig file - deprecated: true - hidden: true - experimental: false - experimentalcli: false - kubernetes: true - swarm: false inherited_options: - option: help value_type: bool diff --git a/data/engine-cli/docker_context_update.yaml b/data/engine-cli/docker_context_update.yaml index 336257cf1..169653038 100644 --- a/data/engine-cli/docker_context_update.yaml +++ b/data/engine-cli/docker_context_update.yaml @@ -7,16 +7,6 @@ usage: docker context update [OPTIONS] CONTEXT pname: docker context plink: docker_context.yaml options: - - option: default-stack-orchestrator - value_type: string - description: | - Default orchestrator for stack operations to use with this context (swarm|kubernetes|all) - deprecated: true - hidden: true - experimental: false - experimentalcli: false - kubernetes: false - swarm: false - option: description value_type: string description: Description of the context @@ -36,16 +26,6 @@ options: experimentalcli: false kubernetes: false swarm: false - - option: kubernetes - value_type: stringToString - default_value: '[]' - description: set the kubernetes endpoint - deprecated: true - hidden: true - experimental: false - experimentalcli: false - kubernetes: true - swarm: false inherited_options: - option: help value_type: bool diff --git a/data/engine-cli/docker_context_use.yaml b/data/engine-cli/docker_context_use.yaml index f682c9260..1b4500552 100644 --- a/data/engine-cli/docker_context_use.yaml +++ b/data/engine-cli/docker_context_use.yaml @@ -2,7 +2,7 @@ command: docker context use short: Set the current docker context long: |- Set the default context to use, when `DOCKER_HOST`, `DOCKER_CONTEXT` environment - variables and `--host`, `--context` global options are not set. + variables and `--host`, `--context` global options aren't set. To disable usage of contexts, you can use the special `default` context. usage: docker context use CONTEXT pname: docker context diff --git a/data/engine-cli/docker_cp.yaml b/data/engine-cli/docker_cp.yaml index df0e65b37..bd91b8f62 100644 --- a/data/engine-cli/docker_cp.yaml +++ b/data/engine-cli/docker_cp.yaml @@ -2,69 +2,12 @@ command: docker cp aliases: docker container cp, docker cp short: Copy files/folders between a container and the local filesystem long: |- - The `docker cp` utility copies the contents of `SRC_PATH` to the `DEST_PATH`. - You can copy from the container's file system to the local machine or the - reverse, from the local filesystem to the container. If `-` is specified for - either the `SRC_PATH` or `DEST_PATH`, you can also stream a tar archive from - `STDIN` or to `STDOUT`. The `CONTAINER` can be a running or stopped container. - The `SRC_PATH` or `DEST_PATH` can be a file or directory. + Copy files/folders between a container and the local filesystem - The `docker cp` command assumes container paths are relative to the container's - `/` (root) directory. This means supplying the initial forward slash is optional; - The command sees `compassionate_darwin:/tmp/foo/myfile.txt` and - `compassionate_darwin:tmp/foo/myfile.txt` as identical. Local machine paths can - be an absolute or relative value. The command interprets a local machine's - relative paths as relative to the current working directory where `docker cp` is - run. - - The `cp` command behaves like the Unix `cp -a` command in that directories are - copied recursively with permissions preserved if possible. Ownership is set to - the user and primary group at the destination. For example, files copied to a - container are created with `UID:GID` of the root user. Files copied to the local - machine are created with the `UID:GID` of the user which invoked the `docker cp` - command. However, if you specify the `-a` option, `docker cp` sets the ownership - to the user and primary group at the source. - If you specify the `-L` option, `docker cp` follows any symbolic link - in the `SRC_PATH`. `docker cp` does *not* create parent directories for - `DEST_PATH` if they do not exist. - - Assuming a path separator of `/`, a first argument of `SRC_PATH` and second - argument of `DEST_PATH`, the behavior is as follows: - - - `SRC_PATH` specifies a file - - `DEST_PATH` does not exist - - the file is saved to a file created at `DEST_PATH` - - `DEST_PATH` does not exist and ends with `/` - - Error condition: the destination directory must exist. - - `DEST_PATH` exists and is a file - - the destination is overwritten with the source file's contents - - `DEST_PATH` exists and is a directory - - the file is copied into this directory using the basename from - `SRC_PATH` - - `SRC_PATH` specifies a directory - - `DEST_PATH` does not exist - - `DEST_PATH` is created as a directory and the *contents* of the source - directory are copied into this directory - - `DEST_PATH` exists and is a file - - Error condition: cannot copy a directory to a file - - `DEST_PATH` exists and is a directory - - `SRC_PATH` does not end with `/.` (that is: _slash_ followed by _dot_) - - the source directory is copied into this directory - - `SRC_PATH` does end with `/.` (that is: _slash_ followed by _dot_) - - the *content* of the source directory is copied into this - directory - - The command requires `SRC_PATH` and `DEST_PATH` to exist according to the above - rules. If `SRC_PATH` is local and is a symbolic link, the symbolic link, not - the target, is copied by default. To copy the link target and not the link, specify - the `-L` option. - - A colon (`:`) is used as a delimiter between `CONTAINER` and its path. You can - also use `:` when specifying paths to a `SRC_PATH` or `DEST_PATH` on a local - machine, for example `file:name.txt`. If you use a `:` in a local machine path, - you must be explicit with a relative or absolute path, for example: - - `/path/to/file:name.txt` or `./file:name.txt` + Use '-' as the source to read a tar archive from stdin + and extract it to a directory destination in a container. + Use '-' as the destination to stream a tar archive of a + container source to stdout. usage: |- docker cp [OPTIONS] CONTAINER:SRC_PATH DEST_PATH|- docker cp [OPTIONS] SRC_PATH|- CONTAINER:DEST_PATH @@ -116,45 +59,6 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false -examples: |- - Copy a local file into container - - ```console - $ docker cp ./some_file CONTAINER:/work - ``` - - Copy files from container to local path - - ```console - $ docker cp CONTAINER:/var/logs/ /tmp/app_logs - ``` - - Copy a file from container to stdout. Please note `cp` command produces a tar stream - - ```console - $ docker cp CONTAINER:/var/logs/app.log - | tar x -O | grep "ERROR" - ``` - - ### Corner cases - - It is not possible to copy certain system files such as resources under - `/proc`, `/sys`, `/dev`, [tmpfs](run.md#tmpfs), and mounts created by - the user in the container. However, you can still copy such files by manually - running `tar` in `docker exec`. Both of the following examples do the same thing - in different ways (consider `SRC_PATH` and `DEST_PATH` are directories): - - ```console - $ docker exec CONTAINER tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | tar Cxf DEST_PATH - - ``` - - ```console - $ tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | docker exec -i CONTAINER tar Cxf DEST_PATH - - ``` - - Using `-` as the `SRC_PATH` streams the contents of `STDIN` as a tar archive. - The command extracts the content of the tar to the `DEST_PATH` in container's - filesystem. In this case, `DEST_PATH` must specify a directory. Using `-` as - the `DEST_PATH` streams the contents of the resource as a tar archive to `STDOUT`. deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_create.yaml b/data/engine-cli/docker_create.yaml index 6fe20317b..1a2b4f5af 100644 --- a/data/engine-cli/docker_create.yaml +++ b/data/engine-cli/docker_create.yaml @@ -1,24 +1,7 @@ command: docker create aliases: docker container create, docker create short: Create a new container -long: |- - The `docker container create` (or shorthand: `docker create`) command creates a - new container from the specified image, without starting it. - - When creating a container, the docker daemon creates a writeable container layer - over the specified image and prepares it for running the specified command. The - container ID is then printed to `STDOUT`. This is similar to `docker run -d` - except the container is never started. You can then use the `docker container start` - (or shorthand: `docker start`) command to start the container at any point. - - This is useful when you want to set up a container configuration ahead of time - so that it is ready to start when you need it. The initial status of the - new container is `created`. - - The `docker create` command shares most of its options with the `docker run` - command (which performs a `docker create` before starting it). Refer to the - [`docker run` command](run.md) section and the [Docker run reference](../run.md) - for details on the available flags and options. +long: Create a new container usage: docker create [OPTIONS] IMAGE [COMMAND] [ARG...] pname: docker plink: docker.yaml @@ -427,6 +410,18 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: health-start-interval + value_type: duration + default_value: 0s + description: | + Time between running the check during the start period (ms|s|m|h) (default 0s) + deprecated: false + hidden: false + min_api_version: "1.44" + experimental: false + experimentalcli: false + kubernetes: false + swarm: false - option: health-start-period value_type: duration default_value: 0s @@ -1024,68 +1019,6 @@ options: experimentalcli: false kubernetes: false swarm: false -examples: |- - ### Create and start a container - - The following example creates an interactive container with a pseudo-TTY attached, - then starts the container and attaches to it: - - ```console - $ docker container create -i -t --name mycontainer alpine - 6d8af538ec541dd581ebc2a24153a28329acb5268abe5ef868c1f1a261221752 - - $ docker container start --attach -i mycontainer - / # echo hello world - hello world - ``` - - The above is the equivalent of a `docker run`: - - ```console - $ docker run -it --name mycontainer2 alpine - / # echo hello world - hello world - ``` - - ### Initialize volumes - - Container volumes are initialized during the `docker create` phase - (i.e., `docker run` too). For example, this allows you to `create` the `data` - volume container, and then use it from another container: - - ```console - $ docker create -v /data --name data ubuntu - - 240633dfbb98128fa77473d3d9018f6123b99c454b3251427ae190a7d951ad57 - - $ docker run --rm --volumes-from data ubuntu ls -la /data - - total 8 - drwxr-xr-x 2 root root 4096 Dec 5 04:10 . - drwxr-xr-x 48 root root 4096 Dec 5 04:11 .. - ``` - - Similarly, `create` a host directory bind mounted volume container, which can - then be used from the subsequent container: - - ```console - $ docker create -v /home/docker:/docker --name docker ubuntu - - 9aa88c08f319cd1e4515c3c46b0de7cc9aa75e878357b1e96f91e2c773029f03 - - $ docker run --rm --volumes-from docker ubuntu ls -la /docker - - total 20 - drwxr-sr-x 5 1000 staff 180 Dec 5 04:00 . - drwxr-xr-x 48 root root 4096 Dec 5 04:13 .. - -rw-rw-r-- 1 1000 staff 3833 Dec 5 04:01 .ash_history - -rw-r--r-- 1 1000 staff 446 Nov 28 11:51 .ashrc - -rw-r--r-- 1 1000 staff 25 Dec 5 04:00 .gitconfig - drwxr-sr-x 3 1000 staff 60 Dec 1 03:28 .local - -rw-r--r-- 1 1000 staff 920 Nov 28 11:51 .profile - drwx--S--- 2 1000 staff 460 Dec 5 00:51 .ssh - drwxr-xr-x 32 1000 staff 1140 Dec 5 04:01 docker - ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_diff.yaml b/data/engine-cli/docker_diff.yaml index 1ad5f1c47..91a0f3b50 100644 --- a/data/engine-cli/docker_diff.yaml +++ b/data/engine-cli/docker_diff.yaml @@ -1,18 +1,7 @@ command: docker diff aliases: docker container diff, docker diff short: Inspect changes to files or directories on a container's filesystem -long: |- - List the changed files and directories in a container᾿s filesystem since the - container was created. Three different types of change are tracked: - - | Symbol | Description | - |--------|---------------------------------| - | `A` | A file or directory was added | - | `D` | A file or directory was deleted | - | `C` | A file or directory was changed | - - You can use the full or shortened container ID or the container name set using - `docker run --name` option. +long: Inspect changes to files or directories on a container's filesystem usage: docker diff CONTAINER pname: docker plink: docker.yaml @@ -27,32 +16,6 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false -examples: |- - Inspect the changes to an `nginx` container: - - ```console - $ docker diff 1fdfd1f54c1b - - C /dev - C /dev/console - C /dev/core - C /dev/stdout - C /dev/fd - C /dev/ptmx - C /dev/stderr - C /dev/stdin - C /run - A /run/nginx.pid - C /var/lib/nginx/tmp - A /var/lib/nginx/tmp/client_body - A /var/lib/nginx/tmp/fastcgi - A /var/lib/nginx/tmp/proxy - A /var/lib/nginx/tmp/scgi - A /var/lib/nginx/tmp/uwsgi - C /var/log/nginx - A /var/log/nginx/access.log - A /var/log/nginx/error.log - ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_events.yaml b/data/engine-cli/docker_events.yaml index 492cbd94e..cd0c772bc 100644 --- a/data/engine-cli/docker_events.yaml +++ b/data/engine-cli/docker_events.yaml @@ -1,185 +1,7 @@ command: docker events aliases: docker system events, docker events short: Get real time events from the server -long: |- - Use `docker events` to get real-time events from the server. These events differ - per Docker object type. Different event types have different scopes. Local - scoped events are only seen on the node they take place on, and swarm scoped - events are seen on all managers. - - Only the last 1000 log events are returned. You can use filters to further limit - the number of events returned. - - ### Object types - - #### Containers - - Docker containers report the following events: - - - `attach` - - `commit` - - `copy` - - `create` - - `destroy` - - `detach` - - `die` - - `exec_create` - - `exec_detach` - - `exec_die` - - `exec_start` - - `export` - - `health_status` - - `kill` - - `oom` - - `pause` - - `rename` - - `resize` - - `restart` - - `start` - - `stop` - - `top` - - `unpause` - - `update` - - #### Images - - Docker images report the following events: - - - `delete` - - `import` - - `load` - - `pull` - - `push` - - `save` - - `tag` - - `untag` - - #### Plugins - - Docker plugins report the following events: - - - `enable` - - `disable` - - `install` - - `remove` - - #### Volumes - - Docker volumes report the following events: - - - `create` - - `destroy` - - `mount` - - `unmount` - - #### Networks - - Docker networks report the following events: - - - `create` - - `connect` - - `destroy` - - `disconnect` - - `remove` - - #### Daemons - - Docker daemons report the following events: - - - `reload` - - #### Services - - Docker services report the following events: - - - `create` - - `remove` - - `update` - - #### Nodes - - Docker nodes report the following events: - - - `create` - - `remove` - - `update` - - #### Secrets - - Docker secrets report the following events: - - - `create` - - `remove` - - `update` - - #### Configs - - Docker configs report the following events: - - - `create` - - `remove` - - `update` - - ### Limiting, filtering, and formatting the output - - #### Limit events by time (--since, --until) {#since} - - The `--since` and `--until` parameters can be Unix timestamps, date formatted - timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed - relative to the client machine’s time. If you do not provide the `--since` option, - the command returns only new and/or live events. Supported formats for date - formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, - `2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local - timezone on the client will be used if you do not provide either a `Z` or a - `+-00:00` timezone offset at the end of the timestamp. When providing Unix - timestamps enter seconds[.nanoseconds], where seconds is the number of seconds - that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap - seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a - fraction of a second no more than nine digits long. - - Only the last 1000 log events are returned. You can use filters to further limit - the number of events returned. - - #### Filtering (--filter) {#filter} - - The filtering flag (`-f` or `--filter`) format is of "key=value". If you would - like to use multiple filters, pass multiple flags (e.g., - `--filter "foo=bar" --filter "bif=baz"`) - - Using the same filter multiple times will be handled as a *OR*; for example - `--filter container=588a23dac085 --filter container=a8f7720b8c22` will display - events for container 588a23dac085 *OR* container a8f7720b8c22 - - Using multiple filters will be handled as a *AND*; for example - `--filter container=588a23dac085 --filter event=start` will display events for - container container 588a23dac085 *AND* the event type is *start* - - The currently supported filters are: - - * config (`config=`) - * container (`container=`) - * daemon (`daemon=`) - * event (`event=`) - * image (`image=`) - * label (`label=` or `label==`) - * network (`network=`) - * node (`node=`) - * plugin (`plugin=`) - * scope (`scope=`) - * secret (`secret=`) - * service (`service=`) - * type (`type=`) - * volume (`volume=`) - - #### Format the output (--format) {#format} - - If a format (`--format`) is specified, the given template will be executed - instead of the default - format. Go's [text/template](https://pkg.go.dev/text/template) package - describes all the details of the format. - - If a format is set to `{{json .}}`, the events are streamed as valid JSON - Lines. For information about JSON Lines, please refer to https://jsonlines.org/. +long: Get real time events from the server usage: docker events [OPTIONS] pname: docker plink: docker.yaml @@ -236,220 +58,6 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false -examples: |- - ### Basic example - - You'll need two shells for this example. - - **Shell 1: Listening for events:** - - ```console - $ docker events - ``` - - **Shell 2: Start and Stop containers:** - - ```console - $ docker create --name test alpine:latest top - $ docker start test - $ docker stop test - ``` - - **Shell 1: (Again .. now showing events):** - - ```console - 2017-01-05T00:35:58.859401177+08:00 container create 0fdb48addc82871eb34eb23a847cfd033dedd1a0a37bef2e6d9eb3870fc7ff37 (image=alpine:latest, name=test) - 2017-01-05T00:36:04.703631903+08:00 network connect e2e1f5ceda09d4300f3a846f0acfaa9a8bb0d89e775eb744c5acecd60e0529e2 (container=0fdb...ff37, name=bridge, type=bridge) - 2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) - 2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) - 2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) - 2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) - 2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) - ``` - - To exit the `docker events` command, use `CTRL+C`. - - ### Filter events by time - - You can filter the output by an absolute timestamp or relative time on the host - machine, using the following different time syntaxes: - - ```console - $ docker events --since 1483283804 - 2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) - 2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) - 2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) - 2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) - 2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) - 2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) - 2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) - 2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) - - $ docker events --since '2017-01-05' - 2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) - 2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) - 2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) - 2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) - 2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) - 2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) - 2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) - 2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) - - $ docker events --since '2013-09-03T15:49:29' - 2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) - 2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) - 2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) - 2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) - 2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) - 2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) - 2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) - 2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) - - $ docker events --since '10m' - 2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) - 2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) - 2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) - 2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) - 2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) - 2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) - 2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) - 2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) - - $ docker events --since '2017-01-05T00:35:30' --until '2017-01-05T00:36:05' - 2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) - 2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) - 2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) - 2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) - ``` - - ### Filter events by criteria - - The following commands show several different ways to filter the `docker event` - output. - - ```console - $ docker events --filter 'event=stop' - - 2017-01-05T00:40:22.880175420+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) - 2017-01-05T00:41:17.888104182+08:00 container stop 2a8f...4e78 (image=alpine, name=kickass_brattain) - - $ docker events --filter 'image=alpine' - - 2017-01-05T00:41:55.784240236+08:00 container create d9cd...4d70 (image=alpine, name=happy_meitner) - 2017-01-05T00:41:55.913156783+08:00 container start d9cd...4d70 (image=alpine, name=happy_meitner) - 2017-01-05T00:42:01.106875249+08:00 container kill d9cd...4d70 (image=alpine, name=happy_meitner, signal=15) - 2017-01-05T00:42:11.111934041+08:00 container kill d9cd...4d70 (image=alpine, name=happy_meitner, signal=9) - 2017-01-05T00:42:11.119578204+08:00 container die d9cd...4d70 (exitCode=137, image=alpine, name=happy_meitner) - 2017-01-05T00:42:11.173276611+08:00 container stop d9cd...4d70 (image=alpine, name=happy_meitner) - - $ docker events --filter 'container=test' - - 2017-01-05T00:43:00.139719934+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) - 2017-01-05T00:43:09.259951086+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) - 2017-01-05T00:43:09.270102715+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) - 2017-01-05T00:43:09.312556440+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) - - $ docker events --filter 'container=test' --filter 'container=d9cdb1525ea8' - - 2017-01-05T00:44:11.517071981+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) - 2017-01-05T00:44:17.685870901+08:00 container start d9cd...4d70 (image=alpine, name=happy_meitner) - 2017-01-05T00:44:29.757658470+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=9) - 2017-01-05T00:44:29.767718510+08:00 container die 0fdb...ff37 (exitCode=137, image=alpine:latest, name=test) - 2017-01-05T00:44:29.815798344+08:00 container destroy 0fdb...ff37 (image=alpine:latest, name=test) - - $ docker events --filter 'container=test' --filter 'event=stop' - - 2017-01-05T00:46:13.664099505+08:00 container stop a9d1...e130 (image=alpine, name=test) - - $ docker events --filter 'type=volume' - - 2015-12-23T21:05:28.136212689Z volume create test-event-volume-local (driver=local) - 2015-12-23T21:05:28.383462717Z volume mount test-event-volume-local (read/write=true, container=562f...5025, destination=/foo, driver=local, propagation=rprivate) - 2015-12-23T21:05:28.650314265Z volume unmount test-event-volume-local (container=562f...5025, driver=local) - 2015-12-23T21:05:28.716218405Z volume destroy test-event-volume-local (driver=local) - - $ docker events --filter 'type=network' - - 2015-12-23T21:38:24.705709133Z network create 8b11...2c5b (name=test-event-network-local, type=bridge) - 2015-12-23T21:38:25.119625123Z network connect 8b11...2c5b (name=test-event-network-local, container=b4be...c54e, type=bridge) - - $ docker events --filter 'container=container_1' --filter 'container=container_2' - - 2014-09-03T15:49:29.999999999Z07:00 container die 4386fb97867d (image=ubuntu:22.04) - 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu:22.04) - 2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (imager=redis:2.8) - 2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) - - $ docker events --filter 'type=volume' - - 2015-12-23T21:05:28.136212689Z volume create test-event-volume-local (driver=local) - 2015-12-23T21:05:28.383462717Z volume mount test-event-volume-local (read/write=true, container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, destination=/foo, driver=local, propagation=rprivate) - 2015-12-23T21:05:28.650314265Z volume unmount test-event-volume-local (container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, driver=local) - 2015-12-23T21:05:28.716218405Z volume destroy test-event-volume-local (driver=local) - - $ docker events --filter 'type=network' - - 2015-12-23T21:38:24.705709133Z network create 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, type=bridge) - 2015-12-23T21:38:25.119625123Z network connect 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, container=b4be644031a3d90b400f88ab3d4bdf4dc23adb250e696b6328b85441abe2c54e, type=bridge) - - $ docker events --filter 'type=plugin' - - 2016-07-25T17:30:14.825557616Z plugin pull ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest) - 2016-07-25T17:30:14.888127370Z plugin enable ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest) - - $ docker events -f type=service - - 2017-07-12T06:34:07.999446625Z service create wj64st89fzgchxnhiqpn8p4oj (name=reverent_albattani) - 2017-07-12T06:34:21.405496207Z service remove wj64st89fzgchxnhiqpn8p4oj (name=reverent_albattani) - - $ docker events -f type=node - - 2017-07-12T06:21:51.951586759Z node update 3xyz5ttp1a253q74z1thwywk9 (name=ip-172-31-23-42, state.new=ready, state.old=unknown) - - $ docker events -f type=secret - - 2017-07-12T06:32:13.915704367Z secret create s8o6tmlnndrgzbmdilyy5ymju (name=new_secret) - 2017-07-12T06:32:37.052647783Z secret remove s8o6tmlnndrgzbmdilyy5ymju (name=new_secret) - - $ docker events -f type=config - 2017-07-12T06:44:13.349037127Z config create u96zlvzdfsyb9sg4mhyxfh3rl (name=abc) - 2017-07-12T06:44:36.327694184Z config remove u96zlvzdfsyb9sg4mhyxfh3rl (name=abc) - - $ docker events --filter 'scope=swarm' - - 2017-07-10T07:46:50.250024503Z service create m8qcxu8081woyof7w3jaax6gk (name=affectionate_wilson) - 2017-07-10T07:47:31.093797134Z secret create 6g5pufzsv438p9tbvl9j94od4 (name=new_secret) - ``` - - ### Format the output - - ```console - $ docker events --filter 'type=container' --format 'Type={{.Type}} Status={{.Status}} ID={{.ID}}' - - Type=container Status=create ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 - Type=container Status=attach ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 - Type=container Status=start ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 - Type=container Status=resize ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 - Type=container Status=die ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 - Type=container Status=destroy ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 - ``` - - #### Format as JSON - - To list events in JSON format, use the `json` directive, which is the equivalent - of `--format '{{ json . }}`. - - ```console - $ docker events --format json - - {"status":"create","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. - {"status":"attach","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. - {"Type":"network","Action":"connect","Actor":{"ID":"1b50a5bf755f6021dfa78e.. - {"status":"start","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f42.. - {"status":"resize","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. - ``` - - . deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_exec.yaml b/data/engine-cli/docker_exec.yaml index dd3d030cd..43e3319da 100644 --- a/data/engine-cli/docker_exec.yaml +++ b/data/engine-cli/docker_exec.yaml @@ -1,20 +1,7 @@ command: docker exec aliases: docker container exec, docker exec short: Execute a command in a running container -long: |- - The `docker exec` command runs a new command in a running container. - - The command started using `docker exec` only runs while the container's primary - process (`PID 1`) is running, and it is not restarted if the container is - restarted. - - COMMAND runs in the default directory of the container. If the underlying image - has a custom directory specified with the WORKDIR directive in its Dockerfile, - this directory is used instead. - - COMMAND must be an executable. A chained or a quoted command does not work. - For example, `docker exec -it my_container sh -c "echo a && echo b"` does - work, but `docker exec -it my_container "echo a && echo b"` does not. +long: Execute a command in a running container usage: docker exec [OPTIONS] CONTAINER COMMAND [ARG...] pname: docker plink: docker.yaml @@ -43,7 +30,6 @@ options: shorthand: e value_type: list description: Set environment variables - details_url: '#env' deprecated: false hidden: false min_api_version: "1.25" @@ -107,7 +93,6 @@ options: shorthand: w value_type: string description: Working directory inside the container - details_url: '#workdir' deprecated: false hidden: false min_api_version: "1.35" @@ -126,101 +111,6 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false -examples: |- - ### Run `docker exec` on a running container - - First, start a container. - - ```console - $ docker run --name mycontainer -d -i -t alpine /bin/sh - ``` - - This creates and starts a container named `mycontainer` from an `alpine` image - with an `sh` shell as its main process. The `-d` option (shorthand for `--detach`) - sets the container to run in the background, in detached mode, with a pseudo-TTY - attached (`-t`). The `-i` option is set to keep `STDIN` attached (`-i`), which - prevents the `sh` process from exiting immediately. - - Next, execute a command on the container. - - ```console - $ docker exec -d mycontainer touch /tmp/execWorks - ``` - - This creates a new file `/tmp/execWorks` inside the running container - `mycontainer`, in the background. - - Next, execute an interactive `sh` shell on the container. - - ```console - $ docker exec -it mycontainer sh - ``` - - This starts a new shell session in the container `mycontainer`. - - ### Set environment variables for the exec process (--env, -e) {#env} - - Next, set environment variables in the current bash session. - - The `docker exec` command inherits the environment variables that are set at the - time the container is created. Use the `--env` (or the `-e` shorthand) to - override global environment variables, or to set additional environment - variables for the process started by `docker exec`. - - The example below creates a new shell session in the container `mycontainer` with - environment variables `$VAR_A` and `$VAR_B` set to "1" and "2" respectively. - These environment variables are only valid for the `sh` process started by that - `docker exec` command, and are not available to other processes running inside - the container. - - ```console - $ docker exec -e VAR_A=1 -e VAR_B=2 mycontainer env - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - HOSTNAME=f64a4851eb71 - VAR_A=1 - VAR_B=2 - HOME=/root - ``` - - ### Set the working directory for the exec process (--workdir, -w) {#workdir} - - By default `docker exec` command runs in the same working directory set when - the container was created. - - ```console - $ docker exec -it mycontainer pwd - / - ``` - - You can specify an alternative working directory for the command to execute - using the `--workdir` option (or the `-w` shorthand): - - ```console - $ docker exec -it -w /root mycontainer pwd - /root - ``` - - - ### Try to run `docker exec` on a paused container - - If the container is paused, then the `docker exec` command fails with an error: - - ```console - $ docker pause mycontainer - mycontainer - - $ docker ps - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 482efdf39fac alpine "/bin/sh" 17 seconds ago Up 16 seconds (Paused) mycontainer - - $ docker exec mycontainer sh - - Error response from daemon: Container mycontainer is paused, unpause the container before exec - - $ echo $? - 1 - ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_export.yaml b/data/engine-cli/docker_export.yaml index 1ba184642..4063c4f27 100644 --- a/data/engine-cli/docker_export.yaml +++ b/data/engine-cli/docker_export.yaml @@ -1,14 +1,7 @@ command: docker export aliases: docker container export, docker export short: Export a container's filesystem as a tar archive -long: |- - The `docker export` command does not export the contents of volumes associated - with the container. If a volume is mounted on top of an existing directory in - the container, `docker export` will export the contents of the *underlying* - directory, not the contents of the volume. - - Refer to [Backup, restore, or migrate data volumes](/storage/volumes/#back-up-restore-or-migrate-data-volumes) - in the user guide for examples on exporting data in a volume. +long: Export a container's filesystem as a tar archive usage: docker export [OPTIONS] CONTAINER pname: docker plink: docker.yaml @@ -34,16 +27,6 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false -examples: |- - Each of these commands has the same result. - - ```console - $ docker export red_panda > latest.tar - ``` - - ```console - $ docker export --output="latest.tar" red_panda - ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_history.yaml b/data/engine-cli/docker_history.yaml index e41d3b2a6..494375b05 100644 --- a/data/engine-cli/docker_history.yaml +++ b/data/engine-cli/docker_history.yaml @@ -15,7 +15,6 @@ options: 'json': Print in JSON format 'TEMPLATE': Print output using the given Go template. Refer to https://docs.docker.com/go/formatting/ for more information about formatting output with templates - details_url: '#format' deprecated: false hidden: false experimental: false @@ -65,62 +64,6 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false -examples: |- - To see how the `docker:latest` image was built: - - ```console - $ docker history docker - - IMAGE CREATED CREATED BY SIZE COMMENT - 3e23a5875458 8 days ago /bin/sh -c #(nop) ENV LC_ALL=C.UTF-8 0 B - 8578938dd170 8 days ago /bin/sh -c dpkg-reconfigure locales && loc 1.245 MB - be51b77efb42 8 days ago /bin/sh -c apt-get update && apt-get install 338.3 MB - 4b137612be55 6 weeks ago /bin/sh -c #(nop) ADD jessie.tar.xz in / 121 MB - 750d58736b4b 6 weeks ago /bin/sh -c #(nop) MAINTAINER Tianon Gravi : 4 weeks ago - ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_image_build.yaml b/data/engine-cli/docker_image_build.yaml index bc6e4be25..f0bc32a06 100644 --- a/data/engine-cli/docker_image_build.yaml +++ b/data/engine-cli/docker_image_build.yaml @@ -1,7 +1,110 @@ command: docker image build aliases: docker image build, docker build, docker buildx build, docker builder build short: Build an image from a Dockerfile -long: See [docker build](build.md) for more information. +long: |- + The `docker build` command builds Docker images from a Dockerfile and a + "context". A build's context is the set of files located in the specified + `PATH` or `URL`. The build process can refer to any of the files in the + context. For example, your build can use a [*COPY*](/engine/reference/builder/#copy) + instruction to reference a file in the context. + + The `URL` parameter can refer to three kinds of resources: Git repositories, + pre-packaged tarball contexts, and plain text files. + + ### Git repositories + + When the `URL` parameter points to the location of a Git repository, the + repository acts as the build context. The system recursively fetches the + repository and its submodules. The commit history isn't preserved. A + repository is first pulled into a temporary directory on your local host. After + that succeeds, the command sends the directory to the Docker daemon as the context. + Local copy gives you the ability to access private repositories using local + user credentials, VPNs, and so forth. + + > **Note** + > + > If the `URL` parameter contains a fragment the system recursively clones + > the repository and its submodules using a `git clone --recursive` command. + + Git URLs accept context configuration in their fragment section, separated by a + colon (`:`). The first part represents the reference that Git checks out, + and can be either a branch, a tag, or a remote reference. The second part + represents a subdirectory inside the repository used as a build + context. + + For example, run this command to use a directory called `docker` in the branch + `container`: + + ```console + $ docker build https://github.com/docker/rootfs.git#container:docker + ``` + + The following table represents all the valid suffixes with their build + contexts: + + | Build Syntax Suffix | Commit Used | Build Context Used | + |--------------------------------|-----------------------|--------------------| + | `myrepo.git` | `refs/heads/master` | `/` | + | `myrepo.git#mytag` | `refs/tags/mytag` | `/` | + | `myrepo.git#mybranch` | `refs/heads/mybranch` | `/` | + | `myrepo.git#pull/42/head` | `refs/pull/42/head` | `/` | + | `myrepo.git#:myfolder` | `refs/heads/master` | `/myfolder` | + | `myrepo.git#master:myfolder` | `refs/heads/master` | `/myfolder` | + | `myrepo.git#mytag:myfolder` | `refs/tags/mytag` | `/myfolder` | + | `myrepo.git#mybranch:myfolder` | `refs/heads/mybranch` | `/myfolder` | + + ### Tarball contexts + + If you pass a URL to a remote tarball, the command sends the URL itself to the + daemon: + + ```console + $ docker build http://server/context.tar.gz + ``` + + The host running the Docker daemon performs the download operation, + which isn't necessarily the same host that issued the build command. + The Docker daemon fetches `context.tar.gz` and uses it as the + build context. Tarball contexts must be tar archives conforming to the standard + `tar` Unix format and can be compressed with any one of the `xz`, `bzip2`, + `gzip` or `identity` (no compression) formats. + + ### Text files + + Instead of specifying a context, you can pass a single `Dockerfile` in the + `URL` or pipe the file in via `STDIN`. To pipe a `Dockerfile` from `STDIN`: + + ```console + $ docker build - < Dockerfile + ``` + + With PowerShell on Windows, you run: + + ```powershell + Get-Content Dockerfile | docker build - + ``` + + If you use `STDIN` or specify a `URL` pointing to a plain text file, the daemon + places the contents into a `Dockerfile`, and ignores any `-f`, `--file` + option. In this scenario, there is no context. + + By default the `docker build` command looks for a `Dockerfile` at the root + of the build context. The `-f`, `--file`, option lets you specify the path to + an alternative file to use instead. This is useful in cases that use the same + set of files for multiple builds. The path must be to a file within the + build context. Relative path are interpreted as relative to the root of the + context. + + In most cases, it's best to put each Dockerfile in an empty directory. Then, + add to that directory only the files needed for building the Dockerfile. To + increase the build's performance, you can exclude files and directories by + adding a `.dockerignore` file to that directory as well. For information on + creating one, see the [.dockerignore file](/engine/reference/builder/#dockerignore-file). + + If the Docker client loses connection to the daemon, it cancels the build. + This happens if you interrupt the Docker client with `CTRL-c` or if the Docker + client is killed for any reason. If the build initiated a pull which is still + running at the time the build is cancelled, the client also cancels the pull. usage: docker image build [OPTIONS] PATH | URL | - pname: docker image plink: docker_image.yaml @@ -9,6 +112,7 @@ options: - option: add-host value_type: list description: Add a custom host-to-IP mapping (`host:ip`) + details_url: '#add-host' deprecated: false hidden: false experimental: false @@ -18,6 +122,7 @@ options: - option: build-arg value_type: list description: Set build-time variables + details_url: '#build-arg' deprecated: false hidden: false experimental: false @@ -28,6 +133,7 @@ options: value_type: stringSlice default_value: '[]' description: Images to consider as cache sources + details_url: '#cache-from' deprecated: false hidden: false experimental: false @@ -37,6 +143,7 @@ options: - option: cgroup-parent value_type: string description: Set the parent cgroup for the `RUN` instructions during build + details_url: '#cgroup-parent' deprecated: false hidden: false experimental: false @@ -116,6 +223,7 @@ options: shorthand: f value_type: string description: Name of the Dockerfile (Default is `PATH/Dockerfile`) + details_url: '#file' deprecated: false hidden: false experimental: false @@ -144,6 +252,7 @@ options: - option: isolation value_type: string description: Container isolation technology + details_url: '#isolation' deprecated: false hidden: false experimental: false @@ -184,6 +293,7 @@ options: value_type: string default_value: default description: Set the networking mode for the RUN instructions during build + details_url: '#network' deprecated: false hidden: false min_api_version: "1.25" @@ -246,6 +356,7 @@ options: value_type: stringSlice default_value: '[]' description: Security options + details_url: '#security-opt' deprecated: false hidden: false experimental: false @@ -266,6 +377,7 @@ options: value_type: bool default_value: "false" description: Squash newly built layers into a single new layer + details_url: '#squash' deprecated: false hidden: false min_api_version: "1.25" @@ -277,6 +389,7 @@ options: shorthand: t value_type: list description: Name and optionally a tag in the `name:tag` format + details_url: '#tag' deprecated: false hidden: false experimental: false @@ -286,6 +399,7 @@ options: - option: target value_type: string description: Set the target build stage to build. + details_url: '#target' deprecated: false hidden: false experimental: false @@ -296,6 +410,7 @@ options: value_type: ulimit default_value: '[]' description: Ulimit options + details_url: '#ulimit' deprecated: false hidden: false experimental: false @@ -313,6 +428,591 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + ### Build with PATH + + ```console + $ docker build . + + Uploading context 10240 bytes + Step 1/3 : FROM busybox + Pulling repository busybox + ---> e9aa60c60128MB/2.284 MB (100%) endpoint: https://cdn-registry-1.docker.io/v1/ + Step 2/3 : RUN ls -lh / + ---> Running in 9c9e81692ae9 + total 24 + drwxr-xr-x 2 root root 4.0K Mar 12 2013 bin + drwxr-xr-x 5 root root 4.0K Oct 19 00:19 dev + drwxr-xr-x 2 root root 4.0K Oct 19 00:19 etc + drwxr-xr-x 2 root root 4.0K Nov 15 23:34 lib + lrwxrwxrwx 1 root root 3 Mar 12 2013 lib64 -> lib + dr-xr-xr-x 116 root root 0 Nov 15 23:34 proc + lrwxrwxrwx 1 root root 3 Mar 12 2013 sbin -> bin + dr-xr-xr-x 13 root root 0 Nov 15 23:34 sys + drwxr-xr-x 2 root root 4.0K Mar 12 2013 tmp + drwxr-xr-x 2 root root 4.0K Nov 15 23:34 usr + ---> b35f4035db3f + Step 3/3 : CMD echo Hello world + ---> Running in 02071fceb21b + ---> f52f38b7823e + Successfully built f52f38b7823e + Removing intermediate container 9c9e81692ae9 + Removing intermediate container 02071fceb21b + ``` + + This example specifies that the `PATH` is `.`, and so `tar`s all the files in the + local directory and sends them to the Docker daemon. The `PATH` specifies + where to find the files for the "context" of the build on the Docker daemon. + Remember that the daemon could be running on a remote machine and that no + parsing of the Dockerfile happens at the client side (where you're running + `docker build`). That means that all the files at `PATH` are sent, not just + the ones listed to [`ADD`](/engine/reference/builder/#add) + in the Dockerfile. + + The transfer of context from the local machine to the Docker daemon is what the + `docker` client means when you see the "Sending build context" message. + + If you wish to keep the intermediate containers after the build is complete, + you must use `--rm=false`. This doesn't affect the build cache. + + ### Build with URL + + ```console + $ docker build github.com/creack/docker-firefox + ``` + + This clones the GitHub repository, using the cloned repository as context, + and the Dockerfile at the root of the repository. You can + specify an arbitrary Git repository by using the `git://` or `git@` scheme. + + ```console + $ docker build -f ctx/Dockerfile http://server/ctx.tar.gz + + Downloading context: http://server/ctx.tar.gz [===================>] 240 B/240 B + Step 1/3 : FROM busybox + ---> 8c2e06607696 + Step 2/3 : ADD ctx/container.cfg / + ---> e7829950cee3 + Removing intermediate container b35224abf821 + Step 3/3 : CMD /bin/ls + ---> Running in fbc63d321d73 + ---> 3286931702ad + Removing intermediate container fbc63d321d73 + Successfully built 377c409b35e4 + ``` + + This sends the URL `http://server/ctx.tar.gz` to the Docker daemon, which + downloads and extracts the referenced tarball. The `-f ctx/Dockerfile` + parameter specifies a path inside `ctx.tar.gz` to the `Dockerfile` used + to build the image. Any `ADD` commands in that `Dockerfile` that refer to local + paths must be relative to the root of the contents inside `ctx.tar.gz`. In the + example above, the tarball contains a directory `ctx/`, so the `ADD + ctx/container.cfg /` operation works as expected. + + ### Build with `-` + + ```console + $ docker build - < Dockerfile + ``` + + This example reads a Dockerfile from `STDIN` without context. Due to the lack of a + context, the command doesn't send contents of any local directory to the Docker daemon. + Since there is no context, a Dockerfile `ADD` only works if it refers to a + remote URL. + + ```console + $ docker build - < context.tar.gz + ``` + + This example builds an image for a compressed context read from `STDIN`. + Supported formats are: `bzip2`, `gzip` and `xz`. + + ### Use a .dockerignore file + + ```console + $ docker build . + + Uploading context 18.829 MB + Uploading context + Step 1/2 : FROM busybox + ---> 769b9341d937 + Step 2/2 : CMD echo Hello world + ---> Using cache + ---> 99cc1ad10469 + Successfully built 99cc1ad10469 + $ echo ".git" > .dockerignore + $ docker build . + Uploading context 6.76 MB + Uploading context + Step 1/2 : FROM busybox + ---> 769b9341d937 + Step 2/2 : CMD echo Hello world + ---> Using cache + ---> 99cc1ad10469 + Successfully built 99cc1ad10469 + ``` + + This example shows the use of the `.dockerignore` file to exclude the `.git` + directory from the context. You can see its effect in the changed size of the + uploaded context. The builder reference contains detailed information on + [creating a .dockerignore file](/engine/reference/builder/#dockerignore-file). + + When using the [BuildKit backend](/build/buildkit/), + `docker build` searches for a `.dockerignore` file relative to the Dockerfile + name. For example, running `docker build -f myapp.Dockerfile .` first looks + for an ignore file named `myapp.Dockerfile.dockerignore`. If it can't find such a file, + if present, it uses the `.dockerignore` file. Using a Dockerfile based + `.dockerignore` is useful if a project contains multiple Dockerfiles that expect + to ignore different sets of files. + + ### Tag an image (-t, --tag) {#tag} + + ```console + $ docker build -t vieux/apache:2.0 . + ``` + + This examples builds in the same way as the previous example, but it then tags the resulting + image. The repository name will be `vieux/apache` and the tag `2.0`. + + [Read more about valid tags](image_tag.md). + + You can apply multiple tags to an image. For example, you can apply the `latest` + tag to a newly built image and add another tag that references a specific + version. + + For example, to tag an image both as `whenry/fedora-jboss:latest` and + `whenry/fedora-jboss:v2.1`, use the following: + + ```console + $ docker build -t whenry/fedora-jboss:latest -t whenry/fedora-jboss:v2.1 . + ``` + + ### Specify a Dockerfile (-f, --file) {#file} + + ```console + $ docker build -f Dockerfile.debug . + ``` + + This uses a file called `Dockerfile.debug` for the build instructions + instead of `Dockerfile`. + + ```console + $ curl example.com/remote/Dockerfile | docker build -f - . + ``` + + The above command uses the current directory as the build context and reads + a Dockerfile from stdin. + + ```console + $ docker build -f dockerfiles/Dockerfile.debug -t myapp_debug . + $ docker build -f dockerfiles/Dockerfile.prod -t myapp_prod . + ``` + + The above commands build the current build context (as specified by the + `.`) twice. Once using a debug version of a `Dockerfile` and once using a + production version. + + ```console + $ cd /home/me/myapp/some/dir/really/deep + $ docker build -f /home/me/myapp/dockerfiles/debug /home/me/myapp + $ docker build -f ../../../../dockerfiles/debug /home/me/myapp + ``` + + These two `docker build` commands do the exact same thing. They both use the + contents of the `debug` file instead of looking for a `Dockerfile` and use + `/home/me/myapp` as the root of the build context. Note that `debug` is in the + directory structure of the build context, regardless of how you refer to it on + the command line. + + > **Note** + > + > `docker build` returns a `no such file or directory` error if the + > file or directory doesn't exist in the uploaded context. This may + > happen if there is no context, or if you specify a file that's + > elsewhere on the Host system. The context is limited to the current + > directory (and its children) for security reasons, and to ensure + > repeatable builds on remote Docker hosts. This is also the reason why + > `ADD ../file` doesn't work. + + ### Use a custom parent cgroup (--cgroup-parent) {#cgroup-parent} + + When you run `docker build` with the `--cgroup-parent` option, the daemon runs the containers + used in the build with the [corresponding `docker run` flag](../run.md#specify-custom-cgroups). + + ### Set ulimits in container (--ulimit) {#ulimit} + + Using the `--ulimit` option with `docker build` causes the daemon to start each build step's + container using those [`--ulimit` flag values](run.md#ulimit). + + ### Set build-time variables (--build-arg) {#build-arg} + + You can use `ENV` instructions in a Dockerfile to define variable values. These + values persist in the built image. Often persistence isn't what you want. Users + want to specify variables differently depending on which host they build an + image on. + + A good example is `http_proxy` or source versions for pulling intermediate + files. The `ARG` instruction lets Dockerfile authors define values that users + can set at build-time using the `--build-arg` flag: + + ```console + $ docker build --build-arg HTTP_PROXY=http://10.20.30.2:1234 --build-arg FTP_PROXY=http://40.50.60.5:4567 . + ``` + + This flag allows you to pass the build-time variables that are + accessed like regular environment variables in the `RUN` instruction of the + Dockerfile. These values don't persist in the intermediate or final images + like `ENV` values do. You must add `--build-arg` for each build argument. + + Using this flag doesn't alter the output you see when the build process echoes the`ARG` lines from the + Dockerfile. + + For detailed information on using `ARG` and `ENV` instructions, see the + [Dockerfile reference](/engine/reference/builder/). + + You can also use the `--build-arg` flag without a value, in which case the daemon + propagates the value from the local environment into the Docker container it's building: + + ```console + $ export HTTP_PROXY=http://10.20.30.2:1234 + $ docker build --build-arg HTTP_PROXY . + ``` + + This example is similar to how `docker run -e` works. Refer to the [`docker run` documentation](run.md#env) + for more information. + + ### Optional security options (--security-opt) {#security-opt} + + This flag is only supported on a daemon running on Windows, and only supports + the `credentialspec` option. The `credentialspec` must be in the format + `file://spec.txt` or `registry://keyname`. + + ### Specify isolation technology for container (--isolation) {#isolation} + + This option is useful in situations where you are running Docker containers on + Windows. The `--isolation=` option sets a container's isolation + technology. On Linux, the only supported is the `default` option which uses + Linux namespaces. On Microsoft Windows, you can specify these values: + + + | Value | Description | + |-----------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + | `default` | Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. | + | `process` | Namespace isolation only. | + | `hyperv` | Hyper-V hypervisor partition-based isolation. | + + Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`. + + ### Add entries to container hosts file (--add-host) {#add-host} + + You can add other hosts into a build container's `/etc/hosts` file by using one + or more `--add-host` flags. This example adds static addresses for hosts named + `my-hostname` and `my_hostname_v6`: + + ```console + $ docker build --add-host my_hostname=8.8.8.8 --add-host my_hostname_v6=2001:4860:4860::8888 . + ``` + + If you need your build to connect to services running on the host, you can use + the special `host-gateway` value for `--add-host`. In the following example, + build containers resolve `host.docker.internal` to the host's gateway IP. + + ```console + $ docker build --add-host host.docker.internal=host-gateway . + ``` + + You can wrap an IPv6 address in square brackets. + `=` and `:` are both valid separators. + Both formats in the following example are valid: + + ```console + $ docker build --add-host my-hostname:10.180.0.1 --add-host my-hostname_v6=[2001:4860:4860::8888] . + ``` + + ### Specifying target build stage (--target) {#target} + + When building a Dockerfile with multiple build stages, you can use the `--target` + option to specify an intermediate build stage by name as a final stage for the + resulting image. The daemon skips commands after the target stage. + + ```dockerfile + FROM debian AS build-env + # ... + + FROM alpine AS production-env + # ... + ``` + + ```console + $ docker build -t mybuildimage --target build-env . + ``` + + ### Custom build outputs (--output) {#output} + + > **Note** + > + > This feature requires the BuildKit backend. You can either + > [enable BuildKit](/build/buildkit/#getting-started) or + > use the [buildx](https://github.com/docker/buildx) plugin which provides more + > output type options. + + By default, a local container image is created from the build result. The + `--output` (or `-o`) flag allows you to override this behavior, and specify a + custom exporter. Custom exporters allow you to export the build + artifacts as files on the local filesystem instead of a Docker image, which can + be useful for generating local binaries, code generation etc. + + The value for `--output` is a CSV-formatted string defining the exporter type + and options that supports `local` and `tar` exporters. + + The `local` exporter writes the resulting build files to a directory on the client side. The + `tar` exporter is similar but writes the files as a single tarball (`.tar`). + + If you specify no type, the value defaults to the output directory of the local + exporter. Use a hyphen (`-`) to write the output tarball to standard output + (`STDOUT`). + + The following example builds an image using the current directory (`.`) as a build + context, and exports the files to a directory named `out` in the current directory. + If the directory does not exist, Docker creates the directory automatically: + + ```console + $ docker build -o out . + ``` + + The example above uses the short-hand syntax, omitting the `type` options, and + thus uses the default (`local`) exporter. The example below shows the equivalent + using the long-hand CSV syntax, specifying both `type` and `dest` (destination + path): + + ```console + $ docker build --output type=local,dest=out . + ``` + + Use the `tar` type to export the files as a `.tar` archive: + + ```console + $ docker build --output type=tar,dest=out.tar . + ``` + + The example below shows the equivalent when using the short-hand syntax. In this + case, `-` is specified as destination, which automatically selects the `tar` type, + and writes the output tarball to standard output, which is then redirected to + the `out.tar` file: + + ```console + $ docker build -o - . > out.tar + ``` + + The `--output` option exports all files from the target stage. A common pattern + for exporting only specific files is to do multi-stage builds and to copy the + desired files to a new scratch stage with [`COPY --from`](/engine/reference/builder/#copy). + + The example, the `Dockerfile` below uses a separate stage to collect the + build artifacts for exporting: + + ```dockerfile + FROM golang AS build-stage + RUN go get -u github.com/LK4D4/vndr + + FROM scratch AS export-stage + COPY --from=build-stage /go/bin/vndr / + ``` + + When building the Dockerfile with the `-o` option, the command only exports the files from the final + stage to the `out` directory, in this case, the `vndr` binary: + + ```console + $ docker build -o out . + + [+] Building 2.3s (7/7) FINISHED + => [internal] load build definition from Dockerfile 0.1s + => => transferring dockerfile: 176B 0.0s + => [internal] load .dockerignore 0.0s + => => transferring context: 2B 0.0s + => [internal] load metadata for docker.io/library/golang:latest 1.6s + => [build-stage 1/2] FROM docker.io/library/golang@sha256:2df96417dca0561bf1027742dcc5b446a18957cd28eba6aa79269f23f1846d3f 0.0s + => => resolve docker.io/library/golang@sha256:2df96417dca0561bf1027742dcc5b446a18957cd28eba6aa79269f23f1846d3f 0.0s + => CACHED [build-stage 2/2] RUN go get -u github.com/LK4D4/vndr 0.0s + => [export-stage 1/1] COPY --from=build-stage /go/bin/vndr / 0.2s + => exporting to client 0.4s + => => copying files 10.30MB 0.3s + + $ ls ./out + vndr + ``` + + ### Specifying external cache sources (--cache-from) {#cache-from} + + > **Note** + > + > This feature requires the BuildKit backend. You can either + > [enable BuildKit](/build/buildkit/#getting-started) or + > use the [buildx](https://github.com/docker/buildx) plugin. The previous + > builder has limited support for reusing cache from pre-pulled images. + + In addition to local build cache, the builder can reuse the cache generated from + previous builds with the `--cache-from` flag pointing to an image in the registry. + + To use an image as a cache source, cache metadata needs to be written into the + image on creation. You can do this by setting `--build-arg BUILDKIT_INLINE_CACHE=1` + when building the image. After that, you can use the built image as a cache source + for subsequent builds. + + Upon importing the cache, the builder only pulls the JSON metadata from the + registry and determine possible cache hits based on that information. If there + is a cache hit, the builder pulls the matched layers into the local environment. + + In addition to images, the cache can also be pulled from special cache manifests + generated by [`buildx`](https://github.com/docker/buildx) or the BuildKit CLI + (`buildctl`). These manifests (when built with the `type=registry` and `mode=max` + options) allow pulling layer data for intermediate stages in multi-stage builds. + + The following example builds an image with inline-cache metadata and pushes it + to a registry, then uses the image as a cache source on another machine: + + ```console + $ docker build -t myname/myapp --build-arg BUILDKIT_INLINE_CACHE=1 . + $ docker push myname/myapp + ``` + + After pushing the image, the image is used as cache source on another machine. + BuildKit automatically pulls the image from the registry if needed. + + On another machine: + + ```console + $ docker build --cache-from myname/myapp . + ``` + + ### Set the networking mode for the RUN instructions during build (--network) {#network} + + #### Overview + + Available options for the networking mode are: + + - `default` (default): Run in the default network. + - `none`: Run with no network access. + - `host`: Run in the host’s network environment. + + Find more details in the [Dockerfile documentation](/engine/reference/builder/#run---network). + + ### Squash an image's layers (--squash) (experimental) {#squash} + + #### Overview + + > **Note** + > The `--squash` option is an experimental feature, and should not be considered + > stable. + + Once the image is built, this flag squashes the new layers into a new image with + a single new layer. Squashing doesn't destroy any existing image, rather it + creates a new image with the content of the squashed layers. This effectively + makes it look like all `Dockerfile` commands were created with a single layer. + The `--squash` flag preserves the build cache. + + Squashing layers can be beneficial if your Dockerfile produces multiple layers + modifying the same files. For example, files created in one step and + removed in another step. For other use-cases, squashing images may actually have + a negative impact on performance. When pulling an image consisting of multiple + layers, the daemon can pull layers in parallel and allows sharing layers between + images (saving space). + + For most use cases, multi-stage builds are a better alternative, as they give more + fine-grained control over your build, and can take advantage of future + optimizations in the builder. Refer to the [Multi-stage builds](/build/building/multi-stage/) + section for more information. + + #### Known limitations + + The `--squash` option has a number of known limitations: + + - When squashing layers, the resulting image can't take advantage of layer + sharing with other images, and may use significantly more space. Sharing the + base image is still supported. + - When using this option you may see significantly more space used due to + storing two copies of the image, one for the build cache with all the cache + layers intact, and one for the squashed version. + - While squashing layers may produce smaller images, it may have a negative + impact on performance, as a single layer takes longer to extract, and + you can't parallelize downloading a single layer. + - When attempting to squash an image that doesn't make changes to the + filesystem (for example, the Dockerfile only contains `ENV` instructions), + the squash step will fail (see [issue #33823](https://github.com/moby/moby/issues/33823)). + + #### Prerequisites + + The example on this page is using experimental mode in Docker 23.03. + + You can enable experimental mode by using the `--experimental` flag when starting + the Docker daemon or setting `experimental: true` in the `daemon.json` configuration + file. + + By default, experimental mode is disabled. To see the current configuration of + the Docker daemon, use the `docker version` command and check the `Experimental` + line in the `Engine` section: + + ```console + Client: Docker Engine - Community + Version: 23.0.3 + API version: 1.42 + Go version: go1.19.7 + Git commit: 3e7cbfd + Built: Tue Apr 4 22:05:41 2023 + OS/Arch: darwin/amd64 + Context: default + + Server: Docker Engine - Community + Engine: + Version: 23.0.3 + API version: 1.42 (minimum version 1.12) + Go version: go1.19.7 + Git commit: 59118bf + Built: Tue Apr 4 22:05:41 2023 + OS/Arch: linux/amd64 + Experimental: true + [...] + ``` + + #### Build an image with the `--squash` flag + + The following is an example of a build with the `--squash` flag. Below is the + `Dockerfile`: + + ```dockerfile + FROM busybox + RUN echo hello > /hello + RUN echo world >> /hello + RUN touch remove_me /remove_me + ENV HELLO=world + RUN rm /remove_me + ``` + + Next, build an image named `test` using the `--squash` flag. + + ```console + $ docker build --squash -t test . + ``` + + After the build completes, the history looks like the below. The history could show that a layer's + name is ``, and there is a new layer with COMMENT `merge`. + + ```console + $ docker history test + + IMAGE CREATED CREATED BY SIZE COMMENT + 4e10cb5b4cac 3 seconds ago 12 B merge sha256:88a7b0112a41826885df0e7072698006ee8f621c6ab99fca7fe9151d7b599702 to sha256:47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb + 5 minutes ago /bin/sh -c rm /remove_me 0 B + 5 minutes ago /bin/sh -c #(nop) ENV HELLO=world 0 B + 5 minutes ago /bin/sh -c touch remove_me /remove_me 0 B + 5 minutes ago /bin/sh -c echo world >> /hello 0 B + 6 minutes ago /bin/sh -c echo hello > /hello 0 B + 7 weeks ago /bin/sh -c #(nop) CMD ["sh"] 0 B + 7 weeks ago /bin/sh -c #(nop) ADD file:47ca6e777c36a4cfff 1.113 MB + ``` + + Test the image, check for `/remove_me` being gone, make sure `hello\nworld` is + in `/hello`, make sure the `HELLO` environment variable's value is `world`. deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_image_history.yaml b/data/engine-cli/docker_image_history.yaml index 8296b3a78..f5df90f11 100644 --- a/data/engine-cli/docker_image_history.yaml +++ b/data/engine-cli/docker_image_history.yaml @@ -1,7 +1,7 @@ command: docker image history aliases: docker image history, docker history short: Show the history of an image -long: See [docker history](history.md) for more information. +long: Show the history of an image usage: docker image history [OPTIONS] IMAGE pname: docker image plink: docker_image.yaml @@ -15,6 +15,7 @@ options: 'json': Print in JSON format 'TEMPLATE': Print output using the given Go template. Refer to https://docs.docker.com/go/formatting/ for more information about formatting output with templates + details_url: '#format' deprecated: false hidden: false experimental: false @@ -64,6 +65,62 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + To see how the `docker:latest` image was built: + + ```console + $ docker history docker + + IMAGE CREATED CREATED BY SIZE COMMENT + 3e23a5875458 8 days ago /bin/sh -c #(nop) ENV LC_ALL=C.UTF-8 0 B + 8578938dd170 8 days ago /bin/sh -c dpkg-reconfigure locales && loc 1.245 MB + be51b77efb42 8 days ago /bin/sh -c apt-get update && apt-get install 338.3 MB + 4b137612be55 6 weeks ago /bin/sh -c #(nop) ADD jessie.tar.xz in / 121 MB + 750d58736b4b 6 weeks ago /bin/sh -c #(nop) MAINTAINER Tianon Gravi : 4 weeks ago + ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_image_import.yaml b/data/engine-cli/docker_image_import.yaml index e394a81ac..bd8b7e463 100644 --- a/data/engine-cli/docker_image_import.yaml +++ b/data/engine-cli/docker_image_import.yaml @@ -1,7 +1,18 @@ command: docker image import aliases: docker image import, docker import short: Import the contents from a tarball to create a filesystem image -long: See [docker import](import.md) for more information. +long: |- + You can specify a `URL` or `-` (dash) to take data directly from `STDIN`. The + `URL` can point to an archive (.tar, .tar.gz, .tgz, .bzip, .tar.xz, or .txz) + containing a filesystem or to an individual file on the Docker host. If you + specify an archive, Docker untars it in the container relative to the `/` + (root). If you specify an individual file, you must specify the full path within + the host. To import from a remote location, specify a `URI` that begins with the + `http://` or `https://` protocol. + + The `--change` option applies `Dockerfile` instructions to the image that is + created. Supported `Dockerfile` instructions: + `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` usage: docker image import [OPTIONS] file|URL|- [REPOSITORY[:TAG]] pname: docker image plink: docker_image.yaml @@ -47,6 +58,51 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + ### Import from a remote location + + This creates a new untagged image. + + ```console + $ docker import https://example.com/exampleimage.tgz + ``` + + ### Import from a local file + + Import to docker via pipe and `STDIN`. + + ```console + $ cat exampleimage.tgz | docker import - exampleimagelocal:new + ``` + + Import with a commit message. + + ```console + $ cat exampleimage.tgz | docker import --message "New image imported from tarball" - exampleimagelocal:new + ``` + + Import to docker from a local archive. + + ```console + $ docker import /path/to/exampleimage.tgz + ``` + + ### Import from a local directory + + ```console + $ sudo tar -c . | docker import - exampleimagedir + ``` + + ### Import from a local directory with new configurations + + ```console + $ sudo tar -c . | docker import --change "ENV DEBUG=true" - exampleimagedir + ``` + + Note the `sudo` in this example – you must preserve + the ownership of the files (especially root ownership) during the + archiving with tar. If you are not root (or the sudo command) when you + tar, then the ownerships might not get preserved. deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_image_load.yaml b/data/engine-cli/docker_image_load.yaml index a279c8d61..cbf39a300 100644 --- a/data/engine-cli/docker_image_load.yaml +++ b/data/engine-cli/docker_image_load.yaml @@ -1,7 +1,9 @@ command: docker image load aliases: docker image load, docker load short: Load an image from a tar archive or STDIN -long: See [docker load](load.md) for more information. +long: |- + Load an image or repository from a tar archive (even if compressed with gzip, + bzip2, xz or zstd) from a file or STDIN. It restores both images and tags. usage: docker image load [OPTIONS] pname: docker image plink: docker_image.yaml @@ -10,6 +12,7 @@ options: shorthand: i value_type: string description: Read from tar archive file, instead of STDIN + details_url: '#input' deprecated: false hidden: false experimental: false @@ -38,6 +41,41 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + ```console + $ docker image ls + + REPOSITORY TAG IMAGE ID CREATED SIZE + ``` + + ### Load images from STDIN + + ```console + $ docker load < busybox.tar.gz + + Loaded image: busybox:latest + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + busybox latest 769b9341d937 7 weeks ago 2.489 MB + ``` + + ### Load images from a file (--input) {#input} + + ```console + $ docker load --input fedora.tar + + Loaded image: fedora:rawhide + Loaded image: fedora:20 + + $ docker images + + REPOSITORY TAG IMAGE ID CREATED SIZE + busybox latest 769b9341d937 7 weeks ago 2.489 MB + fedora rawhide 0d20aec6529d 7 weeks ago 387 MB + fedora 20 58394af37342 7 weeks ago 385.5 MB + fedora heisenbug 58394af37342 7 weeks ago 385.5 MB + fedora latest 58394af37342 7 weeks ago 385.5 MB + ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_image_ls.yaml b/data/engine-cli/docker_image_ls.yaml index d9abc7268..abeb968f2 100644 --- a/data/engine-cli/docker_image_ls.yaml +++ b/data/engine-cli/docker_image_ls.yaml @@ -1,7 +1,22 @@ command: docker image ls aliases: docker image ls, docker image list, docker images short: List images -long: See [docker images](images.md) for more information. +long: |- + The default `docker images` will show all top level + images, their repository and tags, and their size. + + Docker images have intermediate layers that increase reusability, + decrease disk usage, and speed up `docker build` by + allowing each step to be cached. These intermediate layers are not shown + by default. + + The `SIZE` is the cumulative space taken up by the image and all + its parent images. This is also the disk space used by the contents of the + Tar file created when you `docker save` an image. + + An image will be listed more than once if it has multiple repository names + or tags. This single image (identifiable by its matching `IMAGE ID`) + uses up the `SIZE` listed only once. usage: docker image ls [OPTIONS] [REPOSITORY[:TAG]] pname: docker image plink: docker_image.yaml @@ -21,6 +36,7 @@ options: value_type: bool default_value: "false" description: Show digests + details_url: '#digests' deprecated: false hidden: false experimental: false @@ -31,6 +47,7 @@ options: shorthand: f value_type: filter description: Filter output based on conditions provided + details_url: '#filter' deprecated: false hidden: false experimental: false @@ -46,6 +63,7 @@ options: 'json': Print in JSON format 'TEMPLATE': Print output using the given Go template. Refer to https://docs.docker.com/go/formatting/ for more information about formatting output with templates + details_url: '#format' deprecated: false hidden: false experimental: false @@ -56,6 +74,7 @@ options: value_type: bool default_value: "false" description: Don't truncate output + details_url: '#no-trunc' deprecated: false hidden: false experimental: false @@ -84,6 +103,310 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + ### List the most recently created images + + ```console + $ docker images + + REPOSITORY TAG IMAGE ID CREATED SIZE + 77af4d6b9913 19 hours ago 1.089 GB + committ latest b6fa739cedf5 19 hours ago 1.089 GB + 78a85c484f71 19 hours ago 1.089 GB + docker latest 30557a29d5ab 20 hours ago 1.089 GB + 5ed6274db6ce 24 hours ago 1.089 GB + postgres 9 746b819f315e 4 days ago 213.4 MB + postgres 9.3 746b819f315e 4 days ago 213.4 MB + postgres 9.3.5 746b819f315e 4 days ago 213.4 MB + postgres latest 746b819f315e 4 days ago 213.4 MB + ``` + + ### List images by name and tag + + The `docker images` command takes an optional `[REPOSITORY[:TAG]]` argument + that restricts the list to images that match the argument. If you specify + `REPOSITORY`but no `TAG`, the `docker images` command lists all images in the + given repository. + + For example, to list all images in the `java` repository, run the following command: + + ```console + $ docker images java + + REPOSITORY TAG IMAGE ID CREATED SIZE + java 8 308e519aac60 6 days ago 824.5 MB + java 7 493d82594c15 3 months ago 656.3 MB + java latest 2711b1d6f3aa 5 months ago 603.9 MB + ``` + + The `[REPOSITORY[:TAG]]` value must be an exact match. This means that, for example, + `docker images jav` does not match the image `java`. + + If both `REPOSITORY` and `TAG` are provided, only images matching that + repository and tag are listed. To find all local images in the `java` + repository with tag `8` you can use: + + ```console + $ docker images java:8 + + REPOSITORY TAG IMAGE ID CREATED SIZE + java 8 308e519aac60 6 days ago 824.5 MB + ``` + + If nothing matches `REPOSITORY[:TAG]`, the list is empty. + + ```console + $ docker images java:0 + + REPOSITORY TAG IMAGE ID CREATED SIZE + ``` + + ### List the full length image IDs (--no-trunc) {#no-trunc} + + ```console + $ docker images --no-trunc + + REPOSITORY TAG IMAGE ID CREATED SIZE + sha256:77af4d6b9913e693e8d0b4b294fa62ade6054e6b2f1ffb617ac955dd63fb0182 19 hours ago 1.089 GB + committest latest sha256:b6fa739cedf5ea12a620a439402b6004d057da800f91c7524b5086a5e4749c9f 19 hours ago 1.089 GB + sha256:78a85c484f71509adeaace20e72e941f6bdd2b25b4c75da8693efd9f61a37921 19 hours ago 1.089 GB + docker latest sha256:30557a29d5abc51e5f1d5b472e79b7e296f595abcf19fe6b9199dbbc809c6ff4 20 hours ago 1.089 GB + sha256:0124422dd9f9cf7ef15c0617cda3931ee68346455441d66ab8bdc5b05e9fdce5 20 hours ago 1.089 GB + sha256:18ad6fad340262ac2a636efd98a6d1f0ea775ae3d45240d3418466495a19a81b 22 hours ago 1.082 GB + sha256:f9f1e26352f0a3ba6a0ff68167559f64f3e21ff7ada60366e2d44a04befd1d3a 23 hours ago 1.089 GB + tryout latest sha256:2629d1fa0b81b222fca63371ca16cbf6a0772d07759ff80e8d1369b926940074 23 hours ago 131.5 MB + sha256:5ed6274db6ceb2397844896966ea239290555e74ef307030ebb01ff91b1914df 24 hours ago 1.089 GB + ``` + + ### List image digests (--digests) {#digests} + + Images that use the v2 or later format have a content-addressable identifier + called a `digest`. As long as the input used to generate the image is + unchanged, the digest value is predictable. To list image digest values, use + the `--digests` flag: + + ```console + $ docker images --digests + REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE + localhost:5000/test/busybox sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf 4986bf8c1536 9 weeks ago 2.43 MB + ``` + + When pushing or pulling to a 2.0 registry, the `push` or `pull` command + output includes the image digest. You can `pull` using a digest value. You can + also reference by digest in `create`, `run`, and `rmi` commands, as well as the + `FROM` image reference in a Dockerfile. + + ### Filtering (--filter) {#filter} + + The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more + than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`). + + The currently supported filters are: + + * dangling (boolean - true or false) + * label (`label=` or `label==`) + * before (`[:]`, `` or ``) - filter images created before given id or references + * since (`[:]`, `` or ``) - filter images created since given id or references + * reference (pattern of an image reference) - filter images whose reference matches the specified pattern + + #### Show untagged images (dangling) + + ```console + $ docker images --filter "dangling=true" + + REPOSITORY TAG IMAGE ID CREATED SIZE + 8abc22fbb042 4 weeks ago 0 B + 48e5f45168b9 4 weeks ago 2.489 MB + bf747efa0e2f 4 weeks ago 0 B + 980fe10e5736 12 weeks ago 101.4 MB + dea752e4e117 12 weeks ago 101.4 MB + 511136ea3c5a 8 months ago 0 B + ``` + + This will display untagged images that are the leaves of the images tree (not + intermediary layers). These images occur when a new build of an image takes the + `repo:tag` away from the image ID, leaving it as `:` or untagged. + A warning will be issued if trying to remove an image when a container is presently + using it. By having this flag it allows for batch cleanup. + + You can use this in conjunction with `docker rmi`: + + ```console + $ docker rmi $(docker images -f "dangling=true" -q) + + 8abc22fbb042 + 48e5f45168b9 + bf747efa0e2f + 980fe10e5736 + dea752e4e117 + 511136ea3c5a + ``` + + Docker warns you if any containers exist that are using these untagged images. + + + #### Show images with a given label + + The `label` filter matches images based on the presence of a `label` alone or a `label` and a + value. + + The following filter matches images with the `com.example.version` label regardless of its value. + + ```console + $ docker images --filter "label=com.example.version" + + REPOSITORY TAG IMAGE ID CREATED SIZE + match-me-1 latest eeae25ada2aa About a minute ago 188.3 MB + match-me-2 latest dea752e4e117 About a minute ago 188.3 MB + ``` + + The following filter matches images with the `com.example.version` label with the `1.0` value. + + ```console + $ docker images --filter "label=com.example.version=1.0" + + REPOSITORY TAG IMAGE ID CREATED SIZE + match-me latest 511136ea3c5a About a minute ago 188.3 MB + ``` + + In this example, with the `0.1` value, it returns an empty set because no matches were found. + + ```console + $ docker images --filter "label=com.example.version=0.1" + REPOSITORY TAG IMAGE ID CREATED SIZE + ``` + + #### Filter images by time + + The `before` filter shows only images created before the image with + a given ID or reference. For example, having these images: + + ```console + $ docker images + + REPOSITORY TAG IMAGE ID CREATED SIZE + image1 latest eeae25ada2aa 4 minutes ago 188.3 MB + image2 latest dea752e4e117 9 minutes ago 188.3 MB + image3 latest 511136ea3c5a 25 minutes ago 188.3 MB + ``` + + Filtering with `before` would give: + + ```console + $ docker images --filter "before=image1" + + REPOSITORY TAG IMAGE ID CREATED SIZE + image2 latest dea752e4e117 9 minutes ago 188.3 MB + image3 latest 511136ea3c5a 25 minutes ago 188.3 MB + ``` + + Filtering with `since` would give: + + ```console + $ docker images --filter "since=image3" + REPOSITORY TAG IMAGE ID CREATED SIZE + image1 latest eeae25ada2aa 4 minutes ago 188.3 MB + image2 latest dea752e4e117 9 minutes ago 188.3 MB + ``` + + #### Filter images by reference + + The `reference` filter shows only images whose reference matches + the specified pattern. + + ```console + $ docker images + + REPOSITORY TAG IMAGE ID CREATED SIZE + busybox latest e02e811dd08f 5 weeks ago 1.09 MB + busybox uclibc e02e811dd08f 5 weeks ago 1.09 MB + busybox musl 733eb3059dce 5 weeks ago 1.21 MB + busybox glibc 21c16b6787c6 5 weeks ago 4.19 MB + ``` + + Filtering with `reference` would give: + + ```console + $ docker images --filter=reference='busy*:*libc' + + REPOSITORY TAG IMAGE ID CREATED SIZE + busybox uclibc e02e811dd08f 5 weeks ago 1.09 MB + busybox glibc 21c16b6787c6 5 weeks ago 4.19 MB + ``` + + Filtering with multiple `reference` would give, either match A or B: + + ```console + $ docker images --filter=reference='busy*:uclibc' --filter=reference='busy*:glibc' + + REPOSITORY TAG IMAGE ID CREATED SIZE + busybox uclibc e02e811dd08f 5 weeks ago 1.09 MB + busybox glibc 21c16b6787c6 5 weeks ago 4.19 MB + ``` + + ### Format the output (--format) {#format} + + The formatting option (`--format`) will pretty print container output + using a Go template. + + Valid placeholders for the Go template are listed below: + + | Placeholder | Description | + |-----------------|------------------------------------------| + | `.ID` | Image ID | + | `.Repository` | Image repository | + | `.Tag` | Image tag | + | `.Digest` | Image digest | + | `.CreatedSince` | Elapsed time since the image was created | + | `.CreatedAt` | Time when the image was created | + | `.Size` | Image disk size | + + When using the `--format` option, the `image` command will either + output the data exactly as the template declares or, when using the + `table` directive, will include column headers as well. + + The following example uses a template without headers and outputs the + `ID` and `Repository` entries separated by a colon (`:`) for all images: + + ```console + $ docker images --format "{{.ID}}: {{.Repository}}" + + 77af4d6b9913: + b6fa739cedf5: committ + 78a85c484f71: + 30557a29d5ab: docker + 5ed6274db6ce: + 746b819f315e: postgres + 746b819f315e: postgres + 746b819f315e: postgres + 746b819f315e: postgres + ``` + + To list all images with their repository and tag in a table format you + can use: + + ```console + $ docker images --format "table {{.ID}}\t{{.Repository}}\t{{.Tag}}" + + IMAGE ID REPOSITORY TAG + 77af4d6b9913 + b6fa739cedf5 committ latest + 78a85c484f71 + 30557a29d5ab docker latest + 5ed6274db6ce + 746b819f315e postgres 9 + 746b819f315e postgres 9.3 + 746b819f315e postgres 9.3.5 + 746b819f315e postgres latest + ``` + + To list all images in JSON format, use the `json` directive: + + ```console + $ docker images --format json + {"Containers":"N/A","CreatedAt":"2021-03-04 03:24:42 +0100 CET","CreatedSince":"5 days ago","Digest":"\u003cnone\u003e","ID":"4dd97cefde62","Repository":"ubuntu","SharedSize":"N/A","Size":"72.9MB","Tag":"latest","UniqueSize":"N/A","VirtualSize":"72.9MB"} + {"Containers":"N/A","CreatedAt":"2021-02-17 22:19:54 +0100 CET","CreatedSince":"2 weeks ago","Digest":"\u003cnone\u003e","ID":"28f6e2705743","Repository":"alpine","SharedSize":"N/A","Size":"5.61MB","Tag":"latest","UniqueSize":"N/A","VirtualSize":"5.613MB"} + ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_image_prune.yaml b/data/engine-cli/docker_image_prune.yaml index 536c61015..4f78217b6 100644 --- a/data/engine-cli/docker_image_prune.yaml +++ b/data/engine-cli/docker_image_prune.yaml @@ -1,7 +1,7 @@ command: docker image prune short: Remove unused images long: | - Remove all dangling images. If `-a` is specified, will also remove all images not referenced by any container. + Remove all dangling images. If `-a` is specified, also remove all images not referenced by any container. usage: docker image prune [OPTIONS] pname: docker image plink: docker_image.yaml @@ -119,10 +119,10 @@ examples: |- > same filtering syntax to see which images match your filter. > > However, if you are using negative filtering (testing for the absence of a - > label or that a label does *not* have a specific value), this type of filter - > does not work with `docker image ls` so you cannot easily predict which images + > label or that a label doesn't have a specific value), this type of filter + > doesn't work with `docker image ls` so you cannot easily predict which images > will be removed. In addition, the confirmation prompt for `docker image prune` - > always warns that *all* dangling images will be removed, even if you are using + > always warns that all dangling images will be removed, even if you are using > `--filter`. The following removes images created before `2017-01-04T00:00:00`: @@ -220,7 +220,7 @@ examples: |- > > You are prompted for confirmation before the `prune` removes > anything, but you are not shown a list of what will potentially be removed. - > In addition, `docker image ls` does not support negative filtering, so it + > In addition, `docker image ls` doesn't support negative filtering, so it > difficult to predict what images will actually be removed. deprecated: false min_api_version: "1.25" diff --git a/data/engine-cli/docker_image_pull.yaml b/data/engine-cli/docker_image_pull.yaml index a76ef5129..8640edf4b 100644 --- a/data/engine-cli/docker_image_pull.yaml +++ b/data/engine-cli/docker_image_pull.yaml @@ -1,7 +1,29 @@ command: docker image pull aliases: docker image pull, docker pull short: Download an image from a registry -long: See [docker pull](pull.md) for more information. +long: |- + Most of your images will be created on top of a base image from the + [Docker Hub](https://hub.docker.com) registry. + + [Docker Hub](https://hub.docker.com) contains many pre-built images that you + can `pull` and try without needing to define and configure your own. + + To download a particular image, or set of images (i.e., a repository), + use `docker pull`. + + ### Proxy configuration + + If you are behind an HTTP proxy server, for example in corporate settings, + before open a connect to registry, you may need to configure the Docker + daemon's proxy settings, refer to the [dockerd command-line reference](dockerd.md#proxy-configuration) + for details. + + ### Concurrent downloads + + By default the Docker daemon will pull three layers of an image at a time. + If you are on a low bandwidth connection this may cause timeout issues and you may want to lower + this via the `--max-concurrent-downloads` daemon option. See the + [daemon documentation](dockerd.md) for more details. usage: docker image pull [OPTIONS] NAME[:TAG|@DIGEST] pname: docker image plink: docker_image.yaml @@ -11,6 +33,7 @@ options: value_type: bool default_value: "false" description: Download all tagged images in the repository + details_url: '#all-tags' deprecated: false hidden: false experimental: false @@ -59,6 +82,203 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + ### Pull an image from Docker Hub + + To download a particular image, or set of images (i.e., a repository), use + `docker image pull` (or the `docker pull` shorthand). If no tag is provided, + Docker Engine uses the `:latest` tag as a default. This example pulls the + `debian:latest` image: + + ```console + $ docker image pull debian + + Using default tag: latest + latest: Pulling from library/debian + e756f3fdd6a3: Pull complete + Digest: sha256:3f1d6c17773a45c97bd8f158d665c9709d7b29ed7917ac934086ad96f92e4510 + Status: Downloaded newer image for debian:latest + docker.io/library/debian:latest + ``` + + Docker images can consist of multiple layers. In the example above, the image + consists of a single layer; `e756f3fdd6a3`. + + Layers can be reused by images. For example, the `debian:bookworm` image shares + its layer with the `debian:latest`. Pulling the `debian:bookworm` image therefore + only pulls its metadata, but not its layers, because the layer is already present + locally: + + ```console + $ docker image pull debian:bookworm + + bookworm: Pulling from library/debian + Digest: sha256:3f1d6c17773a45c97bd8f158d665c9709d7b29ed7917ac934086ad96f92e4510 + Status: Downloaded newer image for debian:bookworm + docker.io/library/debian:bookworm + ``` + + To see which images are present locally, use the [`docker images`](image_ls.md) + command: + + ```console + $ docker images + + REPOSITORY TAG IMAGE ID CREATED SIZE + debian bookworm 4eacea30377a 8 days ago 124MB + debian latest 4eacea30377a 8 days ago 124MB + ``` + + Docker uses a content-addressable image store, and the image ID is a SHA256 + digest covering the image's configuration and layers. In the example above, + `debian:bookworm` and `debian:latest` have the same image ID because they are + the same image tagged with different names. Because they are the same image, + their layers are stored only once and do not consume extra disk space. + + For more information about images, layers, and the content-addressable store, + refer to [understand images, containers, and storage drivers](/storage/storagedriver/). + + + ### Pull an image by digest (immutable identifier) + + So far, you've pulled images by their name (and "tag"). Using names and tags is + a convenient way to work with images. When using tags, you can `docker pull` an + image again to make sure you have the most up-to-date version of that image. + For example, `docker pull ubuntu:22.04` pulls the latest version of the Ubuntu + 22.04 image. + + In some cases you don't want images to be updated to newer versions, but prefer + to use a fixed version of an image. Docker enables you to pull an image by its + digest. When pulling an image by digest, you specify exactly which version + of an image to pull. Doing so, allows you to "pin" an image to that version, + and guarantee that the image you're using is always the same. + + To know the digest of an image, pull the image first. Let's pull the latest + `ubuntu:22.04` image from Docker Hub: + + ```console + $ docker pull ubuntu:22.04 + + 22.04: Pulling from library/ubuntu + 125a6e411906: Pull complete + Digest: sha256:26c68657ccce2cb0a31b330cb0be2b5e108d467f641c62e13ab40cbec258c68d + Status: Downloaded newer image for ubuntu:22.04 + docker.io/library/ubuntu:22.04 + ``` + + Docker prints the digest of the image after the pull has finished. In the example + above, the digest of the image is: + + ```console + sha256:26c68657ccce2cb0a31b330cb0be2b5e108d467f641c62e13ab40cbec258c68d + ``` + + Docker also prints the digest of an image when pushing to a registry. This + may be useful if you want to pin to a version of the image you just pushed. + + A digest takes the place of the tag when pulling an image, for example, to + pull the above image by digest, run the following command: + + ```console + $ docker pull ubuntu@sha256:26c68657ccce2cb0a31b330cb0be2b5e108d467f641c62e13ab40cbec258c68d + + docker.io/library/ubuntu@sha256:26c68657ccce2cb0a31b330cb0be2b5e108d467f641c62e13ab40cbec258c68d: Pulling from library/ubuntu + Digest: sha256:26c68657ccce2cb0a31b330cb0be2b5e108d467f641c62e13ab40cbec258c68d + Status: Image is up to date for ubuntu@sha256:26c68657ccce2cb0a31b330cb0be2b5e108d467f641c62e13ab40cbec258c68d + docker.io/library/ubuntu@sha256:26c68657ccce2cb0a31b330cb0be2b5e108d467f641c62e13ab40cbec258c68d + ``` + + Digest can also be used in the `FROM` of a Dockerfile, for example: + + ```dockerfile + FROM ubuntu@sha256:26c68657ccce2cb0a31b330cb0be2b5e108d467f641c62e13ab40cbec258c68d + LABEL org.opencontainers.image.authors="some maintainer " + ``` + + > **Note** + > + > Using this feature "pins" an image to a specific version in time. + > Docker does therefore not pull updated versions of an image, which may include + > security updates. If you want to pull an updated image, you need to change the + > digest accordingly. + + + ### Pull from a different registry + + By default, `docker pull` pulls images from [Docker Hub](https://hub.docker.com). It is also possible to + manually specify the path of a registry to pull from. For example, if you have + set up a local registry, you can specify its path to pull from it. A registry + path is similar to a URL, but does not contain a protocol specifier (`https://`). + + The following command pulls the `testing/test-image` image from a local registry + listening on port 5000 (`myregistry.local:5000`): + + ```console + $ docker image pull myregistry.local:5000/testing/test-image + ``` + + Registry credentials are managed by [docker login](login.md). + + Docker uses the `https://` protocol to communicate with a registry, unless the + registry is allowed to be accessed over an insecure connection. Refer to the + [insecure registries](dockerd.md#insecure-registries) section for more information. + + + ### Pull a repository with multiple images (-a, --all-tags) {#all-tags} + + By default, `docker pull` pulls a single image from the registry. A repository + can contain multiple images. To pull all images from a repository, provide the + `-a` (or `--all-tags`) option when using `docker pull`. + + This command pulls all images from the `ubuntu` repository: + + ```console + $ docker image pull --all-tags ubuntu + + Pulling repository ubuntu + ad57ef8d78d7: Download complete + 105182bb5e8b: Download complete + 511136ea3c5a: Download complete + 73bd853d2ea5: Download complete + .... + + Status: Downloaded newer image for ubuntu + ``` + + After the pull has completed use the `docker image ls` command (or the `docker images` + shorthand) to see the images that were pulled. The example below shows all the + `ubuntu` images that are present locally: + + ```console + $ docker image ls --filter reference=ubuntu + REPOSITORY TAG IMAGE ID CREATED SIZE + ubuntu 18.04 c6ad7e71ba7d 5 weeks ago 63.2MB + ubuntu bionic c6ad7e71ba7d 5 weeks ago 63.2MB + ubuntu 22.04 5ccefbfc0416 2 months ago 78MB + ubuntu focal ff0fea8310f3 2 months ago 72.8MB + ubuntu latest ff0fea8310f3 2 months ago 72.8MB + ubuntu jammy 41ba606c8ab9 3 months ago 79MB + ubuntu 20.04 ba6acccedd29 7 months ago 72.8MB + ``` + + ### Cancel a pull + + Killing the `docker pull` process, for example by pressing `CTRL-c` while it is + running in a terminal, will terminate the pull operation. + + ```console + $ docker pull ubuntu + + Using default tag: latest + latest: Pulling from library/ubuntu + a3ed95caeb02: Pulling fs layer + 236608c7b546: Pulling fs layer + ^C + ``` + + The Engine terminates a pull operation when the connection between the daemon + and the client (initiating the pull) is cut or lost for any reason or the + command is manually terminated. deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_image_push.yaml b/data/engine-cli/docker_image_push.yaml index 9408c2ec8..df2d23165 100644 --- a/data/engine-cli/docker_image_push.yaml +++ b/data/engine-cli/docker_image_push.yaml @@ -1,7 +1,28 @@ command: docker image push aliases: docker image push, docker push short: Upload an image to a registry -long: See [docker push](push.md) for more information. +long: |- + Use `docker image push` to share your images to the [Docker Hub](https://hub.docker.com) + registry or to a self-hosted one. + + Refer to the [`docker image tag`](image_tag.md) reference for more information + about valid image and tag names. + + Killing the `docker image push` process, for example by pressing `CTRL-c` while it is + running in a terminal, terminates the push operation. + + Progress bars are shown during docker push, which show the uncompressed size. + The actual amount of data that's pushed will be compressed before sending, so + the uploaded size will not be reflected by the progress bar. + + Registry credentials are managed by [docker login](login.md). + + ### Concurrent uploads + + By default the Docker daemon will push five layers of an image at a time. + If you are on a low bandwidth connection this may cause timeout issues and you may want to lower + this via the `--max-concurrent-uploads` daemon option. See the + [daemon documentation](dockerd.md) for more details. usage: docker image push [OPTIONS] NAME[:TAG] pname: docker image plink: docker_image.yaml @@ -11,6 +32,7 @@ options: value_type: bool default_value: "false" description: Push all tags of an image to the repository + details_url: '#all-tags' deprecated: false hidden: false experimental: false @@ -49,6 +71,82 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + ### Push a new image to a registry + + First save the new image by finding the container ID (using [`docker container + ls`](container_ls.md)) and then committing it to a new image name. Note that + only `a-z0-9-_.` are allowed when naming images: + + ```console + $ docker container commit c16378f943fe rhel-httpd:latest + ``` + + Now, push the image to the registry using the image ID. In this example the + registry is on host named `registry-host` and listening on port `5000`. To do + this, tag the image with the host name or IP address, and the port of the + registry: + + ```console + $ docker image tag rhel-httpd:latest registry-host:5000/myadmin/rhel-httpd:latest + + $ docker image push registry-host:5000/myadmin/rhel-httpd:latest + ``` + + Check that this worked by running: + + ```console + $ docker image ls + ``` + + You should see both `rhel-httpd` and `registry-host:5000/myadmin/rhel-httpd` + listed. + + ### Push all tags of an image (-a, --all-tags) {#all-tags} + + Use the `-a` (or `--all-tags`) option to push all tags of a local image. + + The following example creates multiple tags for an image, and pushes all those + tags to Docker Hub. + + + ```console + $ docker image tag myimage registry-host:5000/myname/myimage:latest + $ docker image tag myimage registry-host:5000/myname/myimage:v1.0.1 + $ docker image tag myimage registry-host:5000/myname/myimage:v1.0 + $ docker image tag myimage registry-host:5000/myname/myimage:v1 + ``` + + The image is now tagged under multiple names: + + ```console + $ docker image ls + + REPOSITORY TAG IMAGE ID CREATED SIZE + myimage latest 6d5fcfe5ff17 2 hours ago 1.22MB + registry-host:5000/myname/myimage latest 6d5fcfe5ff17 2 hours ago 1.22MB + registry-host:5000/myname/myimage v1 6d5fcfe5ff17 2 hours ago 1.22MB + registry-host:5000/myname/myimage v1.0 6d5fcfe5ff17 2 hours ago 1.22MB + registry-host:5000/myname/myimage v1.0.1 6d5fcfe5ff17 2 hours ago 1.22MB + ``` + + When pushing with the `--all-tags` option, all tags of the `registry-host:5000/myname/myimage` + image are pushed: + + + ```console + $ docker image push --all-tags registry-host:5000/myname/myimage + + The push refers to repository [registry-host:5000/myname/myimage] + 195be5f8be1d: Pushed + latest: digest: sha256:edafc0a0fb057813850d1ba44014914ca02d671ae247107ca70c94db686e7de6 size: 4527 + 195be5f8be1d: Layer already exists + v1: digest: sha256:edafc0a0fb057813850d1ba44014914ca02d671ae247107ca70c94db686e7de6 size: 4527 + 195be5f8be1d: Layer already exists + v1.0: digest: sha256:edafc0a0fb057813850d1ba44014914ca02d671ae247107ca70c94db686e7de6 size: 4527 + 195be5f8be1d: Layer already exists + v1.0.1: digest: sha256:edafc0a0fb057813850d1ba44014914ca02d671ae247107ca70c94db686e7de6 size: 4527 + ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_image_rm.yaml b/data/engine-cli/docker_image_rm.yaml index 26af7d8e5..6d69d8688 100644 --- a/data/engine-cli/docker_image_rm.yaml +++ b/data/engine-cli/docker_image_rm.yaml @@ -1,7 +1,15 @@ command: docker image rm aliases: docker image rm, docker image remove, docker rmi short: Remove one or more images -long: See [docker rmi](rmi.md) for more information. +long: |- + Removes (and un-tags) one or more images from the host node. If an image has + multiple tags, using this command with the tag as a parameter only removes the + tag. If the tag is the only one for the image, both the image and the tag are + removed. + + This does not remove images from a registry. You cannot remove an image of a + running container unless you use the `-f` option. To see all images on a host + use the [`docker image ls`](image_ls.md) command. usage: docker image rm [OPTIONS] IMAGE [IMAGE...] pname: docker image plink: docker_image.yaml @@ -38,6 +46,82 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + You can remove an image using its short or long ID, its tag, or its digest. If + an image has one or more tags referencing it, you must remove all of them before + the image is removed. Digest references are removed automatically when an image + is removed by tag. + + ```console + $ docker images + + REPOSITORY TAG IMAGE ID CREATED SIZE + test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + test2 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + + $ docker rmi fd484f19954f + + Error: Conflict, cannot delete image fd484f19954f because it is tagged in multiple repositories, use -f to force + 2013/12/11 05:47:16 Error: failed to remove one or more images + + $ docker rmi test1:latest + + Untagged: test1:latest + + $ docker rmi test2:latest + + Untagged: test2:latest + + + $ docker images + + REPOSITORY TAG IMAGE ID CREATED SIZE + test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + + $ docker rmi test:latest + + Untagged: test:latest + Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8 + ``` + + If you use the `-f` flag and specify the image's short or long ID, then this + command untags and removes all images that match the specified ID. + + ```console + $ docker images + + REPOSITORY TAG IMAGE ID CREATED SIZE + test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + test2 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + + $ docker rmi -f fd484f19954f + + Untagged: test1:latest + Untagged: test:latest + Untagged: test2:latest + Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8 + ``` + + An image pulled by digest has no tag associated with it: + + ```console + $ docker images --digests + + REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE + localhost:5000/test/busybox sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf 4986bf8c1536 9 weeks ago 2.43 MB + ``` + + To remove an image using its digest: + + ```console + $ docker rmi localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf + Untagged: localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf + Deleted: 4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125 + Deleted: ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2 + Deleted: df7546f9f060a2268024c8a230d8639878585defcc1bc6f79d2728a13957871b + ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_image_save.yaml b/data/engine-cli/docker_image_save.yaml index bfc498816..a702db9d7 100644 --- a/data/engine-cli/docker_image_save.yaml +++ b/data/engine-cli/docker_image_save.yaml @@ -1,7 +1,10 @@ command: docker image save aliases: docker image save, docker save short: Save one or more images to a tar archive (streamed to STDOUT by default) -long: See [docker save](save.md) for more information. +long: |- + Produces a tarred repository to the standard output stream. + Contains all parent layers, and all tags + versions, or specified `repo:tag`, for + each argument provided. usage: docker image save [OPTIONS] IMAGE [IMAGE...] pname: docker image plink: docker_image.yaml @@ -27,6 +30,42 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + ### Create a backup that can then be used with `docker load`. + + ```console + $ docker save busybox > busybox.tar + + $ ls -sh busybox.tar + + 2.7M busybox.tar + + $ docker save --output busybox.tar busybox + + $ ls -sh busybox.tar + + 2.7M busybox.tar + + $ docker save -o fedora-all.tar fedora + + $ docker save -o fedora-latest.tar fedora:latest + ``` + + ### Save an image to a tar.gz file using gzip + + You can use gzip to save the image file and make the backup smaller. + + ```console + $ docker save myimage:latest | gzip > myimage_latest.tar.gz + ``` + + ### Cherry-pick particular tags + + You can even cherry-pick particular tags of an image repository. + + ```console + $ docker save -o ubuntu.tar ubuntu:lucid ubuntu:saucy + ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_image_tag.yaml b/data/engine-cli/docker_image_tag.yaml index fa8091a52..3d7b55431 100644 --- a/data/engine-cli/docker_image_tag.yaml +++ b/data/engine-cli/docker_image_tag.yaml @@ -1,7 +1,40 @@ command: docker image tag aliases: docker image tag, docker tag short: Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE -long: See [docker tag](tag.md) for more information. +long: |- + A full image name has the following format and components: + + `[HOST[:PORT_NUMBER]/]PATH` + + - `HOST`: The optional registry hostname specifies where the image is located. + The hostname must comply with standard DNS rules, but may not contain + underscores. If you don't specify a hostname, the command uses Docker's public + registry at `registry-1.docker.io` by default. Note that `docker.io` is the + canonical reference for Docker's public registry. + - `PORT_NUMBER`: If a hostname is present, it may optionally be followed by a + registry port number in the format `:8080`. + - `PATH`: The path consists of slash-separated components. Each + component may contain lowercase letters, digits and separators. A separator is + defined as a period, one or two underscores, or one or more hyphens. A component + may not start or end with a separator. While the + [OCI Distribution Specification](https://github.com/opencontainers/distribution-spec) + supports more than two slash-separated components, most registries only support + two slash-separated components. For Docker's public registry, the path format is + as follows: + - `[NAMESPACE/]REPOSITORY`: The first, optional component is typically a + user's or an organization's namespace. The second, mandatory component is the + repository name. When the namespace is not present, Docker uses `library` + as the default namespace. + + After the image name, the optional `TAG` is a custom, human-readable manifest + identifier that's typically a specific version or variant of an image. The tag + must be valid ASCII and can contain lowercase and uppercase letters, digits, + underscores, periods, and hyphens. It can't start with a period or hyphen and + must be no longer than 128 characters. If you don't specify a tag, the command uses `latest` by default. + + You can group your images together using names and tags, and then + [push](/engine/reference/commandline/push) them to a + registry. usage: docker image tag SOURCE_IMAGE[:TAG] TARGET_IMAGE[:TAG] pname: docker image plink: docker_image.yaml @@ -16,6 +49,44 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + ### Tag an image referenced by ID + + To tag a local image with ID `0e5574283393` as `fedora/httpd` with the tag + `version1.0`: + + ```console + $ docker tag 0e5574283393 fedora/httpd:version1.0 + ``` + + ### Tag an image referenced by Name + + To tag a local image `httpd` as `fedora/httpd` with the tag `version1.0`: + + ```console + $ docker tag httpd fedora/httpd:version1.0 + ``` + + Note that since the tag name isn't specified, the alias is created for an + existing local version `httpd:latest`. + + ### Tag an image referenced by Name and Tag + + To tag a local image with the name `httpd` and the tag `test` as `fedora/httpd` + with the tag `version1.0.test`: + + ```console + $ docker tag httpd:test fedora/httpd:version1.0.test + ``` + + ### Tag an image for a private registry + + To push an image to a private registry and not the public Docker registry you + must include the registry hostname and port (if needed). + + ```console + $ docker tag 0e5574283393 myregistryhost:5000/fedora/httpd:version1.0 + ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_images.yaml b/data/engine-cli/docker_images.yaml index ecc60e853..5d1df2a12 100644 --- a/data/engine-cli/docker_images.yaml +++ b/data/engine-cli/docker_images.yaml @@ -1,22 +1,7 @@ command: docker images aliases: docker image ls, docker image list, docker images short: List images -long: |- - The default `docker images` will show all top level - images, their repository and tags, and their size. - - Docker images have intermediate layers that increase reusability, - decrease disk usage, and speed up `docker build` by - allowing each step to be cached. These intermediate layers are not shown - by default. - - The `SIZE` is the cumulative space taken up by the image and all - its parent images. This is also the disk space used by the contents of the - Tar file created when you `docker save` an image. - - An image will be listed more than once if it has multiple repository names - or tags. This single image (identifiable by its matching `IMAGE ID`) - uses up the `SIZE` listed only once. +long: List images usage: docker images [OPTIONS] [REPOSITORY[:TAG]] pname: docker plink: docker.yaml @@ -36,7 +21,6 @@ options: value_type: bool default_value: "false" description: Show digests - details_url: '#digests' deprecated: false hidden: false experimental: false @@ -47,7 +31,6 @@ options: shorthand: f value_type: filter description: Filter output based on conditions provided - details_url: '#filter' deprecated: false hidden: false experimental: false @@ -63,7 +46,6 @@ options: 'json': Print in JSON format 'TEMPLATE': Print output using the given Go template. Refer to https://docs.docker.com/go/formatting/ for more information about formatting output with templates - details_url: '#format' deprecated: false hidden: false experimental: false @@ -74,7 +56,6 @@ options: value_type: bool default_value: "false" description: Don't truncate output - details_url: '#no-trunc' deprecated: false hidden: false experimental: false @@ -103,310 +84,6 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false -examples: |- - ### List the most recently created images - - ```console - $ docker images - - REPOSITORY TAG IMAGE ID CREATED SIZE - 77af4d6b9913 19 hours ago 1.089 GB - committ latest b6fa739cedf5 19 hours ago 1.089 GB - 78a85c484f71 19 hours ago 1.089 GB - docker latest 30557a29d5ab 20 hours ago 1.089 GB - 5ed6274db6ce 24 hours ago 1.089 GB - postgres 9 746b819f315e 4 days ago 213.4 MB - postgres 9.3 746b819f315e 4 days ago 213.4 MB - postgres 9.3.5 746b819f315e 4 days ago 213.4 MB - postgres latest 746b819f315e 4 days ago 213.4 MB - ``` - - ### List images by name and tag - - The `docker images` command takes an optional `[REPOSITORY[:TAG]]` argument - that restricts the list to images that match the argument. If you specify - `REPOSITORY`but no `TAG`, the `docker images` command lists all images in the - given repository. - - For example, to list all images in the "java" repository, run this command : - - ```console - $ docker images java - - REPOSITORY TAG IMAGE ID CREATED SIZE - java 8 308e519aac60 6 days ago 824.5 MB - java 7 493d82594c15 3 months ago 656.3 MB - java latest 2711b1d6f3aa 5 months ago 603.9 MB - ``` - - The `[REPOSITORY[:TAG]]` value must be an "exact match". This means that, for example, - `docker images jav` does not match the image `java`. - - If both `REPOSITORY` and `TAG` are provided, only images matching that - repository and tag are listed. To find all local images in the "java" - repository with tag "8" you can use: - - ```console - $ docker images java:8 - - REPOSITORY TAG IMAGE ID CREATED SIZE - java 8 308e519aac60 6 days ago 824.5 MB - ``` - - If nothing matches `REPOSITORY[:TAG]`, the list is empty. - - ```console - $ docker images java:0 - - REPOSITORY TAG IMAGE ID CREATED SIZE - ``` - - ### List the full length image IDs (--no-trunc) {#no-trunc} - - ```console - $ docker images --no-trunc - - REPOSITORY TAG IMAGE ID CREATED SIZE - sha256:77af4d6b9913e693e8d0b4b294fa62ade6054e6b2f1ffb617ac955dd63fb0182 19 hours ago 1.089 GB - committest latest sha256:b6fa739cedf5ea12a620a439402b6004d057da800f91c7524b5086a5e4749c9f 19 hours ago 1.089 GB - sha256:78a85c484f71509adeaace20e72e941f6bdd2b25b4c75da8693efd9f61a37921 19 hours ago 1.089 GB - docker latest sha256:30557a29d5abc51e5f1d5b472e79b7e296f595abcf19fe6b9199dbbc809c6ff4 20 hours ago 1.089 GB - sha256:0124422dd9f9cf7ef15c0617cda3931ee68346455441d66ab8bdc5b05e9fdce5 20 hours ago 1.089 GB - sha256:18ad6fad340262ac2a636efd98a6d1f0ea775ae3d45240d3418466495a19a81b 22 hours ago 1.082 GB - sha256:f9f1e26352f0a3ba6a0ff68167559f64f3e21ff7ada60366e2d44a04befd1d3a 23 hours ago 1.089 GB - tryout latest sha256:2629d1fa0b81b222fca63371ca16cbf6a0772d07759ff80e8d1369b926940074 23 hours ago 131.5 MB - sha256:5ed6274db6ceb2397844896966ea239290555e74ef307030ebb01ff91b1914df 24 hours ago 1.089 GB - ``` - - ### List image digests (--digests) {#digests} - - Images that use the v2 or later format have a content-addressable identifier - called a `digest`. As long as the input used to generate the image is - unchanged, the digest value is predictable. To list image digest values, use - the `--digests` flag: - - ```console - $ docker images --digests - REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE - localhost:5000/test/busybox sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf 4986bf8c1536 9 weeks ago 2.43 MB - ``` - - When pushing or pulling to a 2.0 registry, the `push` or `pull` command - output includes the image digest. You can `pull` using a digest value. You can - also reference by digest in `create`, `run`, and `rmi` commands, as well as the - `FROM` image reference in a Dockerfile. - - ### Filtering (--filter) {#filter} - - The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more - than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) - - The currently supported filters are: - - * dangling (boolean - true or false) - * label (`label=` or `label==`) - * before (`[:]`, `` or ``) - filter images created before given id or references - * since (`[:]`, `` or ``) - filter images created since given id or references - * reference (pattern of an image reference) - filter images whose reference matches the specified pattern - - #### Show untagged images (dangling) - - ```console - $ docker images --filter "dangling=true" - - REPOSITORY TAG IMAGE ID CREATED SIZE - 8abc22fbb042 4 weeks ago 0 B - 48e5f45168b9 4 weeks ago 2.489 MB - bf747efa0e2f 4 weeks ago 0 B - 980fe10e5736 12 weeks ago 101.4 MB - dea752e4e117 12 weeks ago 101.4 MB - 511136ea3c5a 8 months ago 0 B - ``` - - This will display untagged images that are the leaves of the images tree (not - intermediary layers). These images occur when a new build of an image takes the - `repo:tag` away from the image ID, leaving it as `:` or untagged. - A warning will be issued if trying to remove an image when a container is presently - using it. By having this flag it allows for batch cleanup. - - You can use this in conjunction with `docker rmi ...`: - - ```console - $ docker rmi $(docker images -f "dangling=true" -q) - - 8abc22fbb042 - 48e5f45168b9 - bf747efa0e2f - 980fe10e5736 - dea752e4e117 - 511136ea3c5a - ``` - - Docker warns you if any containers exist that are using these untagged images. - - - #### Show images with a given label - - The `label` filter matches images based on the presence of a `label` alone or a `label` and a - value. - - The following filter matches images with the `com.example.version` label regardless of its value. - - ```console - $ docker images --filter "label=com.example.version" - - REPOSITORY TAG IMAGE ID CREATED SIZE - match-me-1 latest eeae25ada2aa About a minute ago 188.3 MB - match-me-2 latest dea752e4e117 About a minute ago 188.3 MB - ``` - - The following filter matches images with the `com.example.version` label with the `1.0` value. - - ```console - $ docker images --filter "label=com.example.version=1.0" - - REPOSITORY TAG IMAGE ID CREATED SIZE - match-me latest 511136ea3c5a About a minute ago 188.3 MB - ``` - - In this example, with the `0.1` value, it returns an empty set because no matches were found. - - ```console - $ docker images --filter "label=com.example.version=0.1" - REPOSITORY TAG IMAGE ID CREATED SIZE - ``` - - #### Filter images by time - - The `before` filter shows only images created before the image with - given id or reference. For example, having these images: - - ```console - $ docker images - - REPOSITORY TAG IMAGE ID CREATED SIZE - image1 latest eeae25ada2aa 4 minutes ago 188.3 MB - image2 latest dea752e4e117 9 minutes ago 188.3 MB - image3 latest 511136ea3c5a 25 minutes ago 188.3 MB - ``` - - Filtering with `before` would give: - - ```console - $ docker images --filter "before=image1" - - REPOSITORY TAG IMAGE ID CREATED SIZE - image2 latest dea752e4e117 9 minutes ago 188.3 MB - image3 latest 511136ea3c5a 25 minutes ago 188.3 MB - ``` - - Filtering with `since` would give: - - ```console - $ docker images --filter "since=image3" - REPOSITORY TAG IMAGE ID CREATED SIZE - image1 latest eeae25ada2aa 4 minutes ago 188.3 MB - image2 latest dea752e4e117 9 minutes ago 188.3 MB - ``` - - #### Filter images by reference - - The `reference` filter shows only images whose reference matches - the specified pattern. - - ```console - $ docker images - - REPOSITORY TAG IMAGE ID CREATED SIZE - busybox latest e02e811dd08f 5 weeks ago 1.09 MB - busybox uclibc e02e811dd08f 5 weeks ago 1.09 MB - busybox musl 733eb3059dce 5 weeks ago 1.21 MB - busybox glibc 21c16b6787c6 5 weeks ago 4.19 MB - ``` - - Filtering with `reference` would give: - - ```console - $ docker images --filter=reference='busy*:*libc' - - REPOSITORY TAG IMAGE ID CREATED SIZE - busybox uclibc e02e811dd08f 5 weeks ago 1.09 MB - busybox glibc 21c16b6787c6 5 weeks ago 4.19 MB - ``` - - Filtering with multiple `reference` would give, either match A or B: - - ```console - $ docker images --filter=reference='busy*:uclibc' --filter=reference='busy*:glibc' - - REPOSITORY TAG IMAGE ID CREATED SIZE - busybox uclibc e02e811dd08f 5 weeks ago 1.09 MB - busybox glibc 21c16b6787c6 5 weeks ago 4.19 MB - ``` - - ### Format the output (--format) {#format} - - The formatting option (`--format`) will pretty print container output - using a Go template. - - Valid placeholders for the Go template are listed below: - - | Placeholder | Description | - |-----------------|------------------------------------------| - | `.ID` | Image ID | - | `.Repository` | Image repository | - | `.Tag` | Image tag | - | `.Digest` | Image digest | - | `.CreatedSince` | Elapsed time since the image was created | - | `.CreatedAt` | Time when the image was created | - | `.Size` | Image disk size | - - When using the `--format` option, the `image` command will either - output the data exactly as the template declares or, when using the - `table` directive, will include column headers as well. - - The following example uses a template without headers and outputs the - `ID` and `Repository` entries separated by a colon (`:`) for all images: - - ```console - $ docker images --format "{{.ID}}: {{.Repository}}" - - 77af4d6b9913: - b6fa739cedf5: committ - 78a85c484f71: - 30557a29d5ab: docker - 5ed6274db6ce: - 746b819f315e: postgres - 746b819f315e: postgres - 746b819f315e: postgres - 746b819f315e: postgres - ``` - - To list all images with their repository and tag in a table format you - can use: - - ```console - $ docker images --format "table {{.ID}}\t{{.Repository}}\t{{.Tag}}" - - IMAGE ID REPOSITORY TAG - 77af4d6b9913 - b6fa739cedf5 committ latest - 78a85c484f71 - 30557a29d5ab docker latest - 5ed6274db6ce - 746b819f315e postgres 9 - 746b819f315e postgres 9.3 - 746b819f315e postgres 9.3.5 - 746b819f315e postgres latest - ``` - - To list all images in JSON format, use the `json` directive: - - ```console - $ docker images --format json - {"Containers":"N/A","CreatedAt":"2021-03-04 03:24:42 +0100 CET","CreatedSince":"5 days ago","Digest":"\u003cnone\u003e","ID":"4dd97cefde62","Repository":"ubuntu","SharedSize":"N/A","Size":"72.9MB","Tag":"latest","UniqueSize":"N/A","VirtualSize":"72.9MB"} - {"Containers":"N/A","CreatedAt":"2021-02-17 22:19:54 +0100 CET","CreatedSince":"2 weeks ago","Digest":"\u003cnone\u003e","ID":"28f6e2705743","Repository":"alpine","SharedSize":"N/A","Size":"5.61MB","Tag":"latest","UniqueSize":"N/A","VirtualSize":"5.613MB"} - ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_import.yaml b/data/engine-cli/docker_import.yaml index 80a14c4f9..f4b4e7e2e 100644 --- a/data/engine-cli/docker_import.yaml +++ b/data/engine-cli/docker_import.yaml @@ -1,18 +1,7 @@ command: docker import aliases: docker image import, docker import short: Import the contents from a tarball to create a filesystem image -long: |- - You can specify a `URL` or `-` (dash) to take data directly from `STDIN`. The - `URL` can point to an archive (.tar, .tar.gz, .tgz, .bzip, .tar.xz, or .txz) - containing a filesystem or to an individual file on the Docker host. If you - specify an archive, Docker untars it in the container relative to the `/` - (root). If you specify an individual file, you must specify the full path within - the host. To import from a remote location, specify a `URI` that begins with the - `http://` or `https://` protocol. - - The `--change` option applies `Dockerfile` instructions to the image that is - created. Supported `Dockerfile` instructions: - `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` +long: Import the contents from a tarball to create a filesystem image usage: docker import [OPTIONS] file|URL|- [REPOSITORY[:TAG]] pname: docker plink: docker.yaml @@ -58,51 +47,6 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false -examples: |- - ### Import from a remote location - - This creates a new untagged image. - - ```console - $ docker import https://example.com/exampleimage.tgz - ``` - - ### Import from a local file - - Import to docker via pipe and `STDIN`. - - ```console - $ cat exampleimage.tgz | docker import - exampleimagelocal:new - ``` - - Import with a commit message. - - ```console - $ cat exampleimage.tgz | docker import --message "New image imported from tarball" - exampleimagelocal:new - ``` - - Import to docker from a local archive. - - ```console - $ docker import /path/to/exampleimage.tgz - ``` - - ### Import from a local directory - - ```console - $ sudo tar -c . | docker import - exampleimagedir - ``` - - ### Import from a local directory with new configurations - - ```console - $ sudo tar -c . | docker import --change "ENV DEBUG=true" - exampleimagedir - ``` - - Note the `sudo` in this example – you must preserve - the ownership of the files (especially root ownership) during the - archiving with tar. If you are not root (or the sudo command) when you - tar, then the ownerships might not get preserved. deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_info.yaml b/data/engine-cli/docker_info.yaml index 9bcbf60f0..4bb27a74e 100644 --- a/data/engine-cli/docker_info.yaml +++ b/data/engine-cli/docker_info.yaml @@ -1,24 +1,7 @@ command: docker info aliases: docker system info, docker info short: Display system-wide information -long: |- - This command displays system wide information regarding the Docker installation. - Information displayed includes the kernel version, number of containers and images. - The number of images shown is the number of unique images. The same image tagged - under different names is counted only once. - - If a format is specified, the given template will be executed instead of the - default format. Go's [text/template](https://pkg.go.dev/text/template) package - describes all the details of the format. - - Depending on the storage driver in use, additional information can be shown, such - as pool name, data file, metadata file, data space used, total data space, metadata - space used, and total metadata space. - - The data file is where the images are stored and the metadata file is where the - meta data regarding those images are stored. When run for the first time Docker - allocates a certain amount of data space and meta data space from the space - available on the volume where `/var/lib/docker` is mounted. +long: Display system-wide information usage: docker info [OPTIONS] pname: docker plink: docker.yaml @@ -31,7 +14,6 @@ options: 'json': Print in JSON format 'TEMPLATE': Print output using the given Go template. Refer to https://docs.docker.com/go/formatting/ for more information about formatting output with templates - details_url: '#format' deprecated: false hidden: false experimental: false @@ -49,141 +31,6 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false -examples: |- - ### Show output - - The example below shows the output for a daemon running on Ubuntu Linux, - using the `overlay2` storage driver. As can be seen in the output, additional - information about the `overlay2` storage driver is shown: - - ```console - $ docker info - - Client: Docker Engine - Community - Version: 24.0.0 - Context: default - Debug Mode: false - Plugins: - buildx: Docker Buildx (Docker Inc.) - Version: v0.10.4 - Path: /usr/libexec/docker/cli-plugins/docker-buildx - compose: Docker Compose (Docker Inc.) - Version: v2.17.2 - Path: /usr/libexec/docker/cli-plugins/docker-compose - - Server: - Containers: 14 - Running: 3 - Paused: 1 - Stopped: 10 - Images: 52 - Server Version: 23.0.3 - Storage Driver: overlay2 - Backing Filesystem: extfs - Supports d_type: true - Using metacopy: false - Native Overlay Diff: true - userxattr: false - Logging Driver: json-file - Cgroup Driver: systemd - Cgroup Version: 2 - Plugins: - Volume: local - Network: bridge host ipvlan macvlan null overlay - Log: awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog - Swarm: inactive - Runtimes: io.containerd.runc.v2 runc - Default Runtime: runc - Init Binary: docker-init - containerd version: 2806fc1057397dbaeefbea0e4e17bddfbd388f38 - runc version: v1.1.5-0-gf19387a - init version: de40ad0 - Security Options: - apparmor - seccomp - Profile: builtin - cgroupns - Kernel Version: 5.15.0-25-generic - Operating System: Ubuntu 22.04 LTS - OSType: linux - Architecture: x86_64 - CPUs: 1 - Total Memory: 991.7 MiB - Name: ip-172-30-0-91.ec2.internal - ID: 4cee4408-10d2-4e17-891c-a41736ac4536 - Docker Root Dir: /var/lib/docker - Debug Mode: false - Username: gordontheturtle - Experimental: false - Insecure Registries: - myinsecurehost:5000 - 127.0.0.0/8 - Live Restore Enabled: false - ``` - - ### Format the output (--format) {#format} - - You can also specify the output format: - - ```console - $ docker info --format '{{json .}}' - - {"ID":"4cee4408-10d2-4e17-891c-a41736ac4536","Containers":14, ...} - ``` - - ### Run `docker info` on Windows - - Here is a sample output for a daemon running on Windows Server: - - ```console - C:\> docker info - - Client: Docker Engine - Community - Version: 24.0.0 - Context: default - Debug Mode: false - Plugins: - buildx: Docker Buildx (Docker Inc.) - Version: v0.10.4 - Path: C:\Program Files\Docker\cli-plugins\docker-buildx.exe - compose: Docker Compose (Docker Inc.) - Version: v2.17.2 - Path: C:\Program Files\Docker\cli-plugins\docker-compose.exe - - Server: - Containers: 1 - Running: 0 - Paused: 0 - Stopped: 1 - Images: 17 - Server Version: 23.0.3 - Storage Driver: windowsfilter - Logging Driver: json-file - Plugins: - Volume: local - Network: ics internal l2bridge l2tunnel nat null overlay private transparent - Log: awslogs etwlogs fluentd gcplogs gelf json-file local logentries splunk syslog - Swarm: inactive - Default Isolation: process - Kernel Version: 10.0 20348 (20348.1.amd64fre.fe_release.210507-1500) - Operating System: Microsoft Windows Server Version 21H2 (OS Build 20348.707) - OSType: windows - Architecture: x86_64 - CPUs: 8 - Total Memory: 3.999 GiB - Name: WIN-V0V70C0LU5P - ID: 2880d38d-464e-4d01-91bd-c76f33ba3981 - Docker Root Dir: C:\ProgramData\docker - Debug Mode: false - Experimental: true - Insecure Registries: - myregistry:5000 - 127.0.0.0/8 - Registry Mirrors: - http://192.168.1.2/ - http://registry-mirror.example.com:5000/ - Live Restore Enabled: false - ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_inspect.yaml b/data/engine-cli/docker_inspect.yaml index 2d83abe65..4201d9864 100644 --- a/data/engine-cli/docker_inspect.yaml +++ b/data/engine-cli/docker_inspect.yaml @@ -23,7 +23,7 @@ long: |- To restrict `docker inspect` to a specific type of object, use the `--type` option. - The following example inspects a _volume_ named "myvolume" + The following example inspects a volume named `myvolume`. ```console $ docker inspect --type=volume myvolume @@ -149,7 +149,7 @@ examples: |- section contains a map of the internal port mappings to a list of external address/port objects. To grab just the numeric public port, you use `index` to find the specific port map, and then `index` 0 contains the first object inside - of that. Then we ask for the `HostPort` field to get the public address. + of that. Then, specify the `HostPort` field to get the public address. ```console $ docker inspect --format='{{(index (index .NetworkSettings.Ports "8787/tcp") 0).HostPort}}' $INSTANCE_ID diff --git a/data/engine-cli/docker_kill.yaml b/data/engine-cli/docker_kill.yaml index 627d1cfda..2a419df38 100644 --- a/data/engine-cli/docker_kill.yaml +++ b/data/engine-cli/docker_kill.yaml @@ -1,27 +1,7 @@ command: docker kill aliases: docker container kill, docker kill short: Kill one or more running containers -long: |- - The `docker kill` subcommand kills one or more containers. The main process - inside the container is sent `SIGKILL` signal (default), or the signal that is - specified with the `--signal` option. You can reference a container by its - ID, ID-prefix, or name. - - The `--signal` flag sets the system call signal that is sent to the container. - This signal can be a signal name in the format `SIG`, for instance `SIGINT`, - or an unsigned number that matches a position in the kernel's syscall table, - for instance `2`. - - While the default (`SIGKILL`) signal will terminate the container, the signal - set through `--signal` may be non-terminal, depending on the container's main - process. For example, the `SIGHUP` signal in most cases will be non-terminal, - and the container will continue running after receiving the signal. - - > **Note** - > - > `ENTRYPOINT` and `CMD` in the *shell* form run as a child process of - > `/bin/sh -c`, which does not pass signals. This means that the executable is - > not the container’s PID 1 and does not receive Unix signals. +long: Kill one or more running containers usage: docker kill [OPTIONS] CONTAINER [CONTAINER...] pname: docker plink: docker.yaml @@ -30,7 +10,6 @@ options: shorthand: s value_type: string description: Signal to send to the container - details_url: '#signal' deprecated: false hidden: false experimental: false @@ -48,37 +27,6 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false -examples: |- - ### Send a KILL signal to a container - - The following example sends the default `SIGKILL` signal to the container named - `my_container`: - - ```console - $ docker kill my_container - ``` - - ### Send a custom signal to a container (--signal) {#signal} - - The following example sends a `SIGHUP` signal to the container named - `my_container`: - - ```console - $ docker kill --signal=SIGHUP my_container - ``` - - - You can specify a custom signal either by _name_, or _number_. The `SIG` prefix - is optional, so the following examples are equivalent: - - ```console - $ docker kill --signal=SIGHUP my_container - $ docker kill --signal=HUP my_container - $ docker kill --signal=1 my_container - ``` - - Refer to the [`signal(7)`](https://man7.org/linux/man-pages/man7/signal.7.html) - man-page for a list of standard Linux signals. deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_load.yaml b/data/engine-cli/docker_load.yaml index b308f69a9..ccea457f7 100644 --- a/data/engine-cli/docker_load.yaml +++ b/data/engine-cli/docker_load.yaml @@ -1,9 +1,7 @@ command: docker load aliases: docker image load, docker load short: Load an image from a tar archive or STDIN -long: |- - Load an image or repository from a tar archive (even if compressed with gzip, - bzip2, xz or zstd) from a file or STDIN. It restores both images and tags. +long: Load an image from a tar archive or STDIN usage: docker load [OPTIONS] pname: docker plink: docker.yaml @@ -12,7 +10,6 @@ options: shorthand: i value_type: string description: Read from tar archive file, instead of STDIN - details_url: '#input' deprecated: false hidden: false experimental: false @@ -41,41 +38,6 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false -examples: |- - ```console - $ docker image ls - - REPOSITORY TAG IMAGE ID CREATED SIZE - ``` - - ### Load images from STDIN - - ```console - $ docker load < busybox.tar.gz - - Loaded image: busybox:latest - $ docker images - REPOSITORY TAG IMAGE ID CREATED SIZE - busybox latest 769b9341d937 7 weeks ago 2.489 MB - ``` - - ### Load images from a file (--input) {#input} - - ```console - $ docker load --input fedora.tar - - Loaded image: fedora:rawhide - Loaded image: fedora:20 - - $ docker images - - REPOSITORY TAG IMAGE ID CREATED SIZE - busybox latest 769b9341d937 7 weeks ago 2.489 MB - fedora rawhide 0d20aec6529d 7 weeks ago 387 MB - fedora 20 58394af37342 7 weeks ago 385.5 MB - fedora heisenbug 58394af37342 7 weeks ago 385.5 MB - fedora latest 58394af37342 7 weeks ago 385.5 MB - ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_login.yaml b/data/engine-cli/docker_login.yaml index 225b0d2dc..037078dba 100644 --- a/data/engine-cli/docker_login.yaml +++ b/data/engine-cli/docker_login.yaml @@ -1,6 +1,6 @@ command: docker login short: Log in to a registry -long: Login to a registry. +long: Log in to a registry. usage: docker login [OPTIONS] [SERVER] pname: docker plink: docker.yaml @@ -50,7 +50,7 @@ inherited_options: examples: |- ### Login to a self-hosted registry - If you want to login to a self-hosted registry you can specify this by + If you want to log in to a self-hosted registry you can specify this by adding the server name. ```console @@ -73,12 +73,12 @@ examples: |- ### Privileged user requirement - `docker login` requires user to use `sudo` or be `root`, except when: + `docker login` requires you to use `sudo` or be `root`, except when: - 1. connecting to a remote daemon, such as a `docker-machine` provisioned `docker engine`. - 2. user is added to the `docker` group. This will impact the security of your system; the `docker` group is `root` equivalent. See [Docker Daemon Attack Surface](/engine/security/#docker-daemon-attack-surface) for details. + - Connecting to a remote daemon, such as a `docker-machine` provisioned `docker engine`. + - The user is added to the `docker` group. This will impact the security of your system; the `docker` group is `root` equivalent. See [Docker Daemon Attack Surface](/engine/security/#docker-daemon-attack-surface) for details. - You can log into any public or private repository for which you have + You can log in to any public or private repository for which you have credentials. When you log in, the command stores credentials in `$HOME/.docker/config.json` on Linux or `%USERPROFILE%/.docker/config.json` on Windows, via the procedure described below. @@ -105,7 +105,7 @@ examples: |- #### Configure the credential store You need to specify the credential store in `$HOME/.docker/config.json` - to tell the docker engine to use it. The value of the config property should be + to tell the Docker Engine to use it. The value of the config property should be the suffix of the program to use (i.e. everything after `docker-credential-`). For example, to use `docker-credential-osxkeychain`: @@ -150,11 +150,11 @@ examples: |- If the secret being stored is an identity token, the Username should be set to ``. - The `store` command can write error messages to `STDOUT` that the docker engine + The `store` command can write error messages to `STDOUT` that the Docker Engine will show if there was an issue. The `get` command takes a string payload from the standard input. That payload carries - the server address that the docker engine needs credentials for. This is + the server address that the Docker Engine needs credentials for. This is an example of that payload: `https://index.docker.io/v1`. The `get` command writes a JSON payload to `STDOUT`. Docker reads the user name @@ -168,16 +168,16 @@ examples: |- ``` The `erase` command takes a string payload from `STDIN`. That payload carries - the server address that the docker engine wants to remove credentials for. This is + the server address that the Docker Engine wants to remove credentials for. This is an example of that payload: `https://index.docker.io/v1`. - The `erase` command can write error messages to `STDOUT` that the docker engine + The `erase` command can write error messages to `STDOUT` that the Docker Engine will show if there was an issue. ### Credential helpers Credential helpers are similar to the credential store above, but act as the - designated programs to handle credentials for *specific registries*. The default + designated programs to handle credentials for specific registries. The default credential store (`credsStore` or the config file itself) will not be used for operations concerning credentials of the specified registries. diff --git a/data/engine-cli/docker_logs.yaml b/data/engine-cli/docker_logs.yaml index 282c3f03f..4d0d4cc6a 100644 --- a/data/engine-cli/docker_logs.yaml +++ b/data/engine-cli/docker_logs.yaml @@ -1,39 +1,7 @@ command: docker logs aliases: docker container logs, docker logs short: Fetch the logs of a container -long: |- - The `docker logs` command batch-retrieves logs present at the time of execution. - - For more information about selecting and configuring logging drivers, refer to - [Configure logging drivers](/config/containers/logging/configure/). - - The `docker logs --follow` command will continue streaming the new output from - the container's `STDOUT` and `STDERR`. - - Passing a negative number or a non-integer to `--tail` is invalid and the - value is set to `all` in that case. - - The `docker logs --timestamps` command will add an [RFC3339Nano timestamp](https://pkg.go.dev/time#RFC3339Nano) - , for example `2014-09-16T06:17:46.000000000Z`, to each - log entry. To ensure that the timestamps are aligned the - nano-second part of the timestamp will be padded with zero when necessary. - - The `docker logs --details` command will add on extra attributes, such as - environment variables and labels, provided to `--log-opt` when creating the - container. - - The `--since` option shows only the container logs generated after - a given date. You can specify the date as an RFC 3339 date, a UNIX - timestamp, or a Go duration string (e.g. `1m30s`, `3h`). Besides RFC3339 date - format you may also use RFC3339Nano, `2006-01-02T15:04:05`, - `2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local - timezone on the client will be used if you do not provide either a `Z` or a - `+-00:00` timezone offset at the end of the timestamp. When providing Unix - timestamps enter seconds[.nanoseconds], where seconds is the number of seconds - that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap - seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a - fraction of a second no more than nine digits long. You can combine the - `--since` option with either or both of the `--follow` or `--tail` options. +long: Fetch the logs of a container usage: docker logs [OPTIONS] CONTAINER pname: docker plink: docker.yaml @@ -95,7 +63,6 @@ options: value_type: string description: | Show logs before a timestamp (e.g. `2013-01-02T13:23:37Z`) or relative (e.g. `42m` for 42 minutes) - details_url: '#until' deprecated: false hidden: false min_api_version: "1.35" @@ -114,20 +81,6 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false -examples: |- - ### Retrieve logs until a specific point in time (--until) {#until} - - In order to retrieve logs before a specific point in time, run: - - ```console - $ docker run --name test -d busybox sh -c "while true; do $(echo date); sleep 1; done" - $ date - Tue 14 Nov 2017 16:40:00 CET - $ docker logs -f --until=2s test - Tue 14 Nov 2017 16:40:00 CET - Tue 14 Nov 2017 16:40:01 CET - Tue 14 Nov 2017 16:40:02 CET - ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_manifest.yaml b/data/engine-cli/docker_manifest.yaml index 63b34707c..d50877df7 100644 --- a/data/engine-cli/docker_manifest.yaml +++ b/data/engine-cli/docker_manifest.yaml @@ -4,9 +4,9 @@ long: |- The `docker manifest` command by itself performs no action. In order to operate on a manifest or manifest list, one of the subcommands must be used. - A single manifest is information about an image, such as layers, size, and digest. - The docker manifest command also gives users additional information such as the os - and architecture an image was built for. + A single manifest is information about an image, such as layers, size, and + digest. The `docker manifest` command also gives you additional information, + such as the OS and architecture an image was built for. A manifest list is a list of image layers that is created by specifying one or more (ideally more than one) image names. It can then be used in the same way as @@ -15,7 +15,7 @@ long: |- Ideally a manifest list is created from images that are identical in function for different os/arch combinations. For this reason, manifest lists are often referred to as "multi-arch images". However, a user could create a manifest list that points - to two images -- one for windows on amd64, and one for darwin on amd64. + to two images -- one for Windows on AMD64, and one for Darwin on AMD64. ### manifest inspect @@ -138,10 +138,10 @@ examples: |- ### Inspect an image's manifest and get the os/arch info - The `docker manifest inspect` command takes an optional `--verbose` flag - that gives you the image's name (Ref), and architecture and os (Platform). + The `docker manifest inspect` command takes an optional `--verbose` flag that + gives you the image's name (Ref), as well as the architecture and OS (Platform). - Just as with other docker commands that take image names, you can refer to an image with or + Just as with other Docker commands that take image names, you can refer to an image with or without a tag, or by digest (e.g. `hello-world@sha256:f3b3b28a45160805bb16542c9531888519430e9e6d6ffc09d72261b0d26ff74f`). Here is an example of inspecting an image's manifest with the `--verbose` flag: diff --git a/data/engine-cli/docker_network_connect.yaml b/data/engine-cli/docker_network_connect.yaml index dddc3e32e..3c75def2f 100644 --- a/data/engine-cli/docker_network_connect.yaml +++ b/data/engine-cli/docker_network_connect.yaml @@ -105,7 +105,7 @@ examples: |- ### Use the legacy `--link` option (--link) {#link} - You can use `--link` option to link another container with a preferred alias + You can use `--link` option to link another container with a preferred alias. ```console $ docker network connect --link container1:c1 multi-host-network container2 diff --git a/data/engine-cli/docker_network_create.yaml b/data/engine-cli/docker_network_create.yaml index 78645655c..64e6db7fd 100644 --- a/data/engine-cli/docker_network_create.yaml +++ b/data/engine-cli/docker_network_create.yaml @@ -6,7 +6,7 @@ long: |- network driver you can specify that `DRIVER` here also. If you don't specify the `--driver` option, the command automatically creates a `bridge` network for you. When you install Docker Engine it creates a `bridge` network automatically. This - network corresponds to the `docker0` bridge that Engine has traditionally relied + network corresponds to the `docker0` bridge that Docker Engine has traditionally relied on. When you launch a new container with `docker run` it automatically connects to this bridge network. You cannot remove this default bridge network, but you can create new ones using the `network create` command. @@ -15,8 +15,8 @@ long: |- $ docker network create -d bridge my-bridge-network ``` - Bridge networks are isolated networks on a single Engine installation. If you - want to create a network that spans multiple Docker hosts each running an + Bridge networks are isolated networks on a single Docker Engine installation. If you + want to create a network that spans multiple Docker hosts each running Docker Engine, you must enable Swarm mode, and create an `overlay` network. To read more about overlay networks with Swarm mode, see ["*use overlay networks*"](/network/overlay/). @@ -248,17 +248,17 @@ examples: |- containers can communicate using only another container's IP address or name. For `overlay` networks or custom plugins that support multi-host connectivity, containers connected to the same multi-host network but launched from different - Engines can also communicate in this way. + daemons can also communicate in this way. You can disconnect a container from a network using the `docker network disconnect` command. ### Specify advanced options - When you create a network, Engine creates a non-overlapping subnetwork for the - network by default. This subnetwork is not a subdivision of an existing network. - It is purely for ip-addressing purposes. You can override this default and - specify subnetwork values directly using the `--subnet` option. On a + When you create a network, Docker Engine creates a non-overlapping subnetwork + for the network by default. This subnetwork is not a subdivision of an existing + network. It is purely for ip-addressing purposes. You can override this default + and specify subnetwork values directly using the `--subnet` option. On a `bridge` network you can only create a single subnet: ```console @@ -277,8 +277,8 @@ examples: |- br0 ``` - If you omit the `--gateway` flag the Engine selects one for you from inside a - preferred pool. For `overlay` networks and for network driver plugins that + If you omit the `--gateway` flag, Docker Engine selects one for you from inside + a preferred pool. For `overlay` networks and for network driver plugins that support it you can create multiple subnetworks. This example uses two `/25` subnet mask to adhere to the current guidance of not having more than 256 IPs in a single overlay network. Each of the subnetworks has 126 usable addresses. @@ -295,13 +295,13 @@ examples: |- ``` Be sure that your subnetworks do not overlap. If they do, the network create - fails and Engine returns an error. + fails and Docker Engine returns an error. ### Bridge driver options When creating a custom network, the default network driver (i.e. `bridge`) has additional options that can be passed. The following are those options and the - equivalent docker daemon flags used for docker0 bridge: + equivalent Docker daemon flags used for docker0 bridge: | Option | Equivalent | Description | |--------------------------------------------------|-------------|-------------------------------------------------------| @@ -358,7 +358,7 @@ examples: |- ### Run services on predefined networks - You can create services on the predefined docker networks `bridge` and `host`. + You can create services on the predefined Docker networks `bridge` and `host`. ```console $ docker service create --name my-service \ diff --git a/data/engine-cli/docker_node_ps.yaml b/data/engine-cli/docker_node_ps.yaml index de4d45756..f66993b64 100644 --- a/data/engine-cli/docker_node_ps.yaml +++ b/data/engine-cli/docker_node_ps.yaml @@ -92,8 +92,9 @@ examples: |- ### Filtering (--filter) {#filter} - The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more - than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + The filtering flag (`-f` or `--filter`) format is of "key=value". If there is + more than one filter, then pass multiple flags (e.g., `--filter "foo=bar" + --filter "bif=baz"`). The currently supported filters are: diff --git a/data/engine-cli/docker_node_rm.yaml b/data/engine-cli/docker_node_rm.yaml index c8b37173f..818207dc8 100644 --- a/data/engine-cli/docker_node_rm.yaml +++ b/data/engine-cli/docker_node_rm.yaml @@ -51,7 +51,7 @@ examples: |- Removes the specified nodes from the swarm, but only if the nodes are in the down state. If you attempt to remove an active node you will receive an error: - ```non + ```console $ docker node rm swarm-node-03 Error response from daemon: rpc error: code = 9 desc = node swarm-node-03 is not diff --git a/data/engine-cli/docker_pause.yaml b/data/engine-cli/docker_pause.yaml index 1cc28c593..ee4b9b912 100644 --- a/data/engine-cli/docker_pause.yaml +++ b/data/engine-cli/docker_pause.yaml @@ -1,17 +1,7 @@ command: docker pause aliases: docker container pause, docker pause short: Pause all processes within one or more containers -long: |- - The `docker pause` command suspends all processes in the specified containers. - On Linux, this uses the freezer cgroup. Traditionally, when suspending a process - the `SIGSTOP` signal is used, which is observable by the process being suspended. - With the freezer cgroup the process is unaware, and unable to capture, - that it is being suspended, and subsequently resumed. On Windows, only Hyper-V - containers can be paused. - - See the - [freezer cgroup documentation](https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt) - for further details. +long: Pause all processes within one or more containers usage: docker pause CONTAINER [CONTAINER...] pname: docker plink: docker.yaml @@ -26,10 +16,6 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false -examples: |- - ```console - $ docker pause my_container - ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_plugin_create.yaml b/data/engine-cli/docker_plugin_create.yaml index e83466b6e..ed5b20903 100644 --- a/data/engine-cli/docker_plugin_create.yaml +++ b/data/engine-cli/docker_plugin_create.yaml @@ -2,8 +2,8 @@ command: docker plugin create short: | Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory. long: |- - Creates a plugin. Before creating the plugin, prepare the plugin's root filesystem as well as - [the config.json](../../extend/config.md) + Creates a plugin. Before creating the plugin, prepare the plugin's root + filesystem as well as the [config.json](../../extend/config.md). usage: docker plugin create [OPTIONS] PLUGIN PLUGIN-DATA-DIR pname: docker plugin plink: docker_plugin.yaml diff --git a/data/engine-cli/docker_plugin_install.yaml b/data/engine-cli/docker_plugin_install.yaml index 15e28c346..aaf1c1e52 100644 --- a/data/engine-cli/docker_plugin_install.yaml +++ b/data/engine-cli/docker_plugin_install.yaml @@ -4,7 +4,7 @@ long: |- Installs and enables a plugin. Docker looks first for the plugin on your Docker host. If the plugin does not exist locally, then the plugin is pulled from the registry. Note that the minimum required registry version to distribute - plugins is 2.3.0 + plugins is 2.3.0. usage: docker plugin install [OPTIONS] PLUGIN [KEY=VALUE...] pname: docker plugin plink: docker_plugin.yaml diff --git a/data/engine-cli/docker_plugin_ls.yaml b/data/engine-cli/docker_plugin_ls.yaml index 09f89a502..5793557f8 100644 --- a/data/engine-cli/docker_plugin_ls.yaml +++ b/data/engine-cli/docker_plugin_ls.yaml @@ -80,7 +80,7 @@ examples: |- ### Filtering (--filter) {#filter} The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more - than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`). The currently supported filters are: diff --git a/data/engine-cli/docker_plugin_rm.yaml b/data/engine-cli/docker_plugin_rm.yaml index a6ccb25d2..f5c050e02 100644 --- a/data/engine-cli/docker_plugin_rm.yaml +++ b/data/engine-cli/docker_plugin_rm.yaml @@ -4,8 +4,8 @@ short: Remove one or more plugins long: |- Removes a plugin. You cannot remove a plugin if it is enabled, you must disable a plugin using the [`docker plugin disable`](plugin_disable.md) before removing - it (or use --force, use of force is not recommended, since it can affect - functioning of running containers using the plugin). + it, or use `--force`. Use of `--force` is not recommended, since it can affect + functioning of running containers using the plugin. usage: docker plugin rm [OPTIONS] PLUGIN [PLUGIN...] pname: docker plugin plink: docker_plugin.yaml diff --git a/data/engine-cli/docker_plugin_set.yaml b/data/engine-cli/docker_plugin_set.yaml index 999c7f6e9..68bedf39c 100644 --- a/data/engine-cli/docker_plugin_set.yaml +++ b/data/engine-cli/docker_plugin_set.yaml @@ -76,6 +76,7 @@ examples: |- ``` > **Note** + > > Since only `path` is settable in `mydevice`, > `docker plugins set mydevice=/dev/bar myplugin` would work too. diff --git a/data/engine-cli/docker_port.yaml b/data/engine-cli/docker_port.yaml index b48013ee9..1e256f9ff 100644 --- a/data/engine-cli/docker_port.yaml +++ b/data/engine-cli/docker_port.yaml @@ -16,35 +16,6 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false -examples: |- - ### Show all mapped ports - - You can find out all the ports mapped by not specifying a `PRIVATE_PORT`, or - just a specific mapping: - - ```console - $ docker ps - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - b650456536c7 busybox:latest top 54 minutes ago Up 54 minutes 0.0.0.0:1234->9876/tcp, 0.0.0.0:4321->7890/tcp test - - $ docker port test - - 7890/tcp -> 0.0.0.0:4321 - 9876/tcp -> 0.0.0.0:1234 - - $ docker port test 7890/tcp - - 0.0.0.0:4321 - - $ docker port test 7890/udp - - 2014/06/24 11:53:36 Error: No public port '7890/udp' published for test - - $ docker port test 7890 - - 0.0.0.0:4321 - ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_ps.yaml b/data/engine-cli/docker_ps.yaml index 29a9b343e..220f71ac6 100644 --- a/data/engine-cli/docker_ps.yaml +++ b/data/engine-cli/docker_ps.yaml @@ -11,7 +11,6 @@ options: value_type: bool default_value: "false" description: Show all containers (default shows just running) - details_url: '#all' deprecated: false hidden: false experimental: false @@ -22,7 +21,6 @@ options: shorthand: f value_type: filter description: Filter output based on conditions provided - details_url: '#filter' deprecated: false hidden: false experimental: false @@ -38,7 +36,6 @@ options: 'json': Print in JSON format 'TEMPLATE': Print output using the given Go template. Refer to https://docs.docker.com/go/formatting/ for more information about formatting output with templates - details_url: '#format' deprecated: false hidden: false experimental: false @@ -71,7 +68,6 @@ options: value_type: bool default_value: "false" description: Don't truncate output - details_url: '#no-trunc' deprecated: false hidden: false experimental: false @@ -94,7 +90,6 @@ options: value_type: bool default_value: "false" description: Display total file sizes - details_url: '#size' deprecated: false hidden: false experimental: false @@ -112,428 +107,6 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false -examples: |- - ### Do not truncate output (--no-trunc) {#no-trunc} - - Running `docker ps --no-trunc` showing 2 linked containers. - - ```console - $ docker ps --no-trunc - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - ca5534a51dd04bbcebe9b23ba05f389466cf0c190f1f8f182d7eea92a9671d00 ubuntu:22.04 bash 17 seconds ago Up 16 seconds 3300-3310/tcp webapp - 9ca9747b233100676a48cc7806131586213fa5dab86dd1972d6a8732e3a84a4d crosbymichael/redis:latest /redis-server --dir 33 minutes ago Up 33 minutes 6379/tcp redis,webapp/db - ``` - - ### Show both running and stopped containers (-a, --all) {#all} - - The `docker ps` command only shows running containers by default. To see all - containers, use the `--all` (or `-a`) flag: - - ```console - $ docker ps -a - ``` - - `docker ps` groups exposed ports into a single range if possible. E.g., a - container that exposes TCP ports `100, 101, 102` displays `100-102/tcp` in - the `PORTS` column. - - ### Show disk usage by container (--size) {#size} - - The `docker ps --size` (or `-s`) command displays two different on-disk-sizes for each container: - - ```console - $ docker ps --size - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES SIZE - e90b8831a4b8 nginx "/bin/bash -c 'mkdir " 11 weeks ago Up 4 hours my_nginx 35.58 kB (virtual 109.2 MB) - 00c6131c5e30 telegraf:1.5 "/entrypoint.sh" 11 weeks ago Up 11 weeks my_telegraf 0 B (virtual 209.5 MB) - ``` - * The "size" information shows the amount of data (on disk) that is used for the _writable_ layer of each container - * The "virtual size" is the total amount of disk-space used for the read-only _image_ data used by the container and the writable layer. - - For more information, refer to the [container size on disk](/storage/storagedriver/#container-size-on-disk) section. - - - ### Filtering (--filter) {#filter} - - The `--filter` (or `-f`) flag format is a `key=value` pair. If there is more - than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`) - - The currently supported filters are: - - | Filter | Description | - |:----------------------|:-------------------------------------------------------------------------------------------------------------------------------------| - | `id` | Container's ID | - | `name` | Container's name | - | `label` | An arbitrary string representing either a key or a key-value pair. Expressed as `` or `=` | - | `exited` | An integer representing the container's exit code. Only useful with `--all`. | - | `status` | One of `created`, `restarting`, `running`, `removing`, `paused`, `exited`, or `dead` | - | `ancestor` | Filters containers which share a given image as an ancestor. Expressed as `[:]`, ``, or `` | - | `before` or `since` | Filters containers created before or after a given container ID or name | - | `volume` | Filters running containers which have mounted a given volume or bind mount. | - | `network` | Filters running containers connected to a given network. | - | `publish` or `expose` | Filters containers which publish or expose a given port. Expressed as `[/]` or `/[]` | - | `health` | Filters containers based on their healthcheck status. One of `starting`, `healthy`, `unhealthy` or `none`. | - | `isolation` | Windows daemon only. One of `default`, `process`, or `hyperv`. | - | `is-task` | Filters containers that are a "task" for a service. Boolean option (`true` or `false`) | - - - #### label - - The `label` filter matches containers based on the presence of a `label` alone or a `label` and a - value. - - The following filter matches containers with the `color` label regardless of its value. - - ```console - $ docker ps --filter "label=color" - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 673394ef1d4c busybox "top" 47 seconds ago Up 45 seconds nostalgic_shockley - d85756f57265 busybox "top" 52 seconds ago Up 51 seconds high_albattani - ``` - - The following filter matches containers with the `color` label with the `blue` value. - - ```console - $ docker ps --filter "label=color=blue" - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - d85756f57265 busybox "top" About a minute ago Up About a minute high_albattani - ``` - - #### name - - The `name` filter matches on all or part of a container's name. - - The following filter matches all containers with a name containing the `nostalgic_stallman` string. - - ```console - $ docker ps --filter "name=nostalgic_stallman" - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 9b6247364a03 busybox "top" 2 minutes ago Up 2 minutes nostalgic_stallman - ``` - - You can also filter for a substring in a name as this shows: - - ```console - $ docker ps --filter "name=nostalgic" - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 715ebfcee040 busybox "top" 3 seconds ago Up 1 second i_am_nostalgic - 9b6247364a03 busybox "top" 7 minutes ago Up 7 minutes nostalgic_stallman - 673394ef1d4c busybox "top" 38 minutes ago Up 38 minutes nostalgic_shockley - ``` - - #### exited - - The `exited` filter matches containers by exist status code. For example, to - filter for containers that have exited successfully: - - ```console - $ docker ps -a --filter 'exited=0' - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - ea09c3c82f6e registry:latest /srv/run.sh 2 weeks ago Exited (0) 2 weeks ago 127.0.0.1:5000->5000/tcp desperate_leakey - 106ea823fe4e fedora:latest /bin/sh -c 'bash -l' 2 weeks ago Exited (0) 2 weeks ago determined_albattani - 48ee228c9464 fedora:20 bash 2 weeks ago Exited (0) 2 weeks ago tender_torvalds - ``` - - #### Filter by exit signal - - You can use a filter to locate containers that exited with status of `137` - meaning a `SIGKILL(9)` killed them. - - ```console - $ docker ps -a --filter 'exited=137' - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - b3e1c0ed5bfe ubuntu:latest "sleep 1000" 12 seconds ago Exited (137) 5 seconds ago grave_kowalevski - a2eb5558d669 redis:latest "/entrypoint.sh redi 2 hours ago Exited (137) 2 hours ago sharp_lalande - ``` - - Any of these events result in a `137` status: - - * the `init` process of the container is killed manually - * `docker kill` kills the container - * Docker daemon restarts which kills all running containers - - #### status - - The `status` filter matches containers by status. The possible values for the container status are: - - | Status | Description | - | :----------- | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | - | `created` | A container that has never been started. | - | `running` | A running container, started by either `docker start` or `docker run`. | - | `paused` | A paused container. See `docker pause`. | - | `restarting` | A container which is starting due to the designated restart policy for that container. | - | `exited` | A container which is no longer running. For example, the process inside the container completed or the container was stopped using the `docker stop` command. | - | `removing` | A container which is in the process of being removed. See `docker rm`. | - | `dead` | A "defunct" container; for example, a container that was only partially removed because resources were kept busy by an external process. `dead` containers cannot be (re)started, only removed. | - - For example, to filter for `running` containers: - - ```console - $ docker ps --filter status=running - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 715ebfcee040 busybox "top" 16 minutes ago Up 16 minutes i_am_nostalgic - d5c976d3c462 busybox "top" 23 minutes ago Up 23 minutes top - 9b6247364a03 busybox "top" 24 minutes ago Up 24 minutes nostalgic_stallman - ``` - - To filter for `paused` containers: - - ```console - $ docker ps --filter status=paused - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 673394ef1d4c busybox "top" About an hour ago Up About an hour (Paused) nostalgic_shockley - ``` - - #### ancestor - - The `ancestor` filter matches containers based on its image or a descendant of - it. The filter supports the following image representation: - - - `image` - - `image:tag` - - `image:tag@digest` - - `short-id` - - `full-id` - - If you don't specify a `tag`, the `latest` tag is used. For example, to filter - for containers that use the latest `ubuntu` image: - - ```console - $ docker ps --filter ancestor=ubuntu - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 919e1179bdb8 ubuntu-c1 "top" About a minute ago Up About a minute admiring_lovelace - 5d1e4a540723 ubuntu-c2 "top" About a minute ago Up About a minute admiring_sammet - 82a598284012 ubuntu "top" 3 minutes ago Up 3 minutes sleepy_bose - bab2a34ba363 ubuntu "top" 3 minutes ago Up 3 minutes focused_yonath - ``` - - Match containers based on the `ubuntu-c1` image which, in this case, is a child - of `ubuntu`: - - ```console - $ docker ps --filter ancestor=ubuntu-c1 - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 919e1179bdb8 ubuntu-c1 "top" About a minute ago Up About a minute admiring_lovelace - ``` - - Match containers based on the `ubuntu` version `22.04` image: - - ```console - $ docker ps --filter ancestor=ubuntu:22.04 - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 82a598284012 ubuntu:22.04 "top" 3 minutes ago Up 3 minutes sleepy_bose - ``` - - The following matches containers based on the layer `d0e008c6cf02` or an image - that have this layer in its layer stack. - - ```console - $ docker ps --filter ancestor=d0e008c6cf02 - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 82a598284012 ubuntu:22.04 "top" 3 minutes ago Up 3 minutes sleepy_bose - ``` - - #### Create time - - ##### before - - The `before` filter shows only containers created before the container with - given id or name. For example, having these containers created: - - ```console - $ docker ps - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 9c3527ed70ce busybox "top" 14 seconds ago Up 15 seconds desperate_dubinsky - 4aace5031105 busybox "top" 48 seconds ago Up 49 seconds focused_hamilton - 6e63f6ff38b0 busybox "top" About a minute ago Up About a minute distracted_fermat - ``` - - Filtering with `before` would give: - - ```console - $ docker ps -f before=9c3527ed70ce - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 4aace5031105 busybox "top" About a minute ago Up About a minute focused_hamilton - 6e63f6ff38b0 busybox "top" About a minute ago Up About a minute distracted_fermat - ``` - - ##### since - - The `since` filter shows only containers created since the container with given - id or name. For example, with the same containers as in `before` filter: - - ```console - $ docker ps -f since=6e63f6ff38b0 - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 9c3527ed70ce busybox "top" 10 minutes ago Up 10 minutes desperate_dubinsky - 4aace5031105 busybox "top" 10 minutes ago Up 10 minutes focused_hamilton - ``` - - #### volume - - The `volume` filter shows only containers that mount a specific volume or have - a volume mounted in a specific path: - - ```console - $ docker ps --filter volume=remote-volume --format "table {{.ID}}\t{{.Mounts}}" - - CONTAINER ID MOUNTS - 9c3527ed70ce remote-volume - - $ docker ps --filter volume=/data --format "table {{.ID}}\t{{.Mounts}}" - - CONTAINER ID MOUNTS - 9c3527ed70ce remote-volume - ``` - - #### network - - The `network` filter shows only containers that are connected to a network with - a given name or id. - - The following filter matches all containers that are connected to a network - with a name containing `net1`. - - ```console - $ docker run -d --net=net1 --name=test1 ubuntu top - $ docker run -d --net=net2 --name=test2 ubuntu top - - $ docker ps --filter network=net1 - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 9d4893ed80fe ubuntu "top" 10 minutes ago Up 10 minutes test1 - ``` - - The network filter matches on both the network's name and id. The following - example shows all containers that are attached to the `net1` network, using - the network id as a filter; - - ```console - $ docker network inspect --format "{{.ID}}" net1 - - 8c0b4110ae930dbe26b258de9bc34a03f98056ed6f27f991d32919bfe401d7c5 - - $ docker ps --filter network=8c0b4110ae930dbe26b258de9bc34a03f98056ed6f27f991d32919bfe401d7c5 - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 9d4893ed80fe ubuntu "top" 10 minutes ago Up 10 minutes test1 - ``` - - #### publish and expose - - The `publish` and `expose` filters show only containers that have published or exposed port with a given port - number, port range, and/or protocol. The default protocol is `tcp` when not specified. - - The following filter matches all containers that have published port of 80: - - ```console - $ docker run -d --publish=80 busybox top - $ docker run -d --expose=8080 busybox top - - $ docker ps -a - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 9833437217a5 busybox "top" 5 seconds ago Up 4 seconds 8080/tcp dreamy_mccarthy - fc7e477723b7 busybox "top" 50 seconds ago Up 50 seconds 0.0.0.0:32768->80/tcp admiring_roentgen - - $ docker ps --filter publish=80 - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - fc7e477723b7 busybox "top" About a minute ago Up About a minute 0.0.0.0:32768->80/tcp admiring_roentgen - ``` - - The following filter matches all containers that have exposed TCP port in the range of `8000-8080`: - - ```console - $ docker ps --filter expose=8000-8080/tcp - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 9833437217a5 busybox "top" 21 seconds ago Up 19 seconds 8080/tcp dreamy_mccarthy - ``` - - The following filter matches all containers that have exposed UDP port `80`: - - ```console - $ docker ps --filter publish=80/udp - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - ``` - - ### Format the output (--format) {#format} - - The formatting option (`--format`) pretty-prints container output using a Go - template. - - Valid placeholders for the Go template are listed below: - - | Placeholder | Description | - |:--------------|:------------------------------------------------------------------------------------------------| - | `.ID` | Container ID | - | `.Image` | Image ID | - | `.Command` | Quoted command | - | `.CreatedAt` | Time when the container was created. | - | `.RunningFor` | Elapsed time since the container was started. | - | `.Ports` | Exposed ports. | - | `.State` | Container status (for example; "created", "running", "exited"). | - | `.Status` | Container status with details about duration and health-status. | - | `.Size` | Container disk size. | - | `.Names` | Container names. | - | `.Labels` | All labels assigned to the container. | - | `.Label` | Value of a specific label for this container. For example `'{{.Label "com.docker.swarm.cpu"}}'` | - | `.Mounts` | Names of the volumes mounted in this container. | - | `.Networks` | Names of the networks attached to this container. | - - When using the `--format` option, the `ps` command will either output the data - exactly as the template declares or, when using the `table` directive, includes - column headers as well. - - The following example uses a template without headers and outputs the `ID` and - `Command` entries separated by a colon (`:`) for all running containers: - - ```console - $ docker ps --format "{{.ID}}: {{.Command}}" - - a87ecb4f327c: /bin/sh -c #(nop) MA - 01946d9d34d8: /bin/sh -c #(nop) MA - c1d3b0166030: /bin/sh -c yum -y up - 41d50ecd2f57: /bin/sh -c #(nop) MA - ``` - - To list all running containers with their labels in a table format you can use: - - ```console - $ docker ps --format "table {{.ID}}\t{{.Labels}}" - - CONTAINER ID LABELS - a87ecb4f327c com.docker.swarm.node=ubuntu,com.docker.swarm.storage=ssd - 01946d9d34d8 - c1d3b0166030 com.docker.swarm.node=debian,com.docker.swarm.cpu=6 - 41d50ecd2f57 com.docker.swarm.node=fedora,com.docker.swarm.cpu=3,com.docker.swarm.storage=ssd - ``` - - To list all running containers in JSON format, use the `json` directive: - - ```console - $ docker ps --format json - {"Command":"\"/docker-entrypoint.…\"","CreatedAt":"2021-03-10 00:15:05 +0100 CET","ID":"a762a2b37a1d","Image":"nginx","Labels":"maintainer=NGINX Docker Maintainers \u003cdocker-maint@nginx.com\u003e","LocalVolumes":"0","Mounts":"","Names":"boring_keldysh","Networks":"bridge","Ports":"80/tcp","RunningFor":"4 seconds ago","Size":"0B","State":"running","Status":"Up 3 seconds"} - ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_pull.yaml b/data/engine-cli/docker_pull.yaml index 266b96be6..9eadf4621 100644 --- a/data/engine-cli/docker_pull.yaml +++ b/data/engine-cli/docker_pull.yaml @@ -1,29 +1,7 @@ command: docker pull aliases: docker image pull, docker pull short: Download an image from a registry -long: |- - Most of your images will be created on top of a base image from the - [Docker Hub](https://hub.docker.com) registry. - - [Docker Hub](https://hub.docker.com) contains many pre-built images that you - can `pull` and try without needing to define and configure your own. - - To download a particular image, or set of images (i.e., a repository), - use `docker pull`. - - ### Proxy configuration - - If you are behind an HTTP proxy server, for example in corporate settings, - before open a connect to registry, you may need to configure the Docker - daemon's proxy settings, refer to the [dockerd command-line reference](dockerd.md#proxy-configuration) - for details. - - ### Concurrent downloads - - By default the Docker daemon will pull three layers of an image at a time. - If you are on a low bandwidth connection this may cause timeout issues and you may want to lower - this via the `--max-concurrent-downloads` daemon option. See the - [daemon documentation](dockerd.md) for more details. +long: Download an image from a registry usage: docker pull [OPTIONS] NAME[:TAG|@DIGEST] pname: docker plink: docker.yaml @@ -33,7 +11,6 @@ options: value_type: bool default_value: "false" description: Download all tagged images in the repository - details_url: '#all-tags' deprecated: false hidden: false experimental: false @@ -82,203 +59,6 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false -examples: |- - ### Pull an image from Docker Hub - - To download a particular image, or set of images (i.e., a repository), use - `docker image pull` (or the `docker pull` shorthand). If no tag is provided, - Docker Engine uses the `:latest` tag as a default. This example pulls the - `debian:latest` image: - - ```console - $ docker image pull debian - - Using default tag: latest - latest: Pulling from library/debian - e756f3fdd6a3: Pull complete - Digest: sha256:3f1d6c17773a45c97bd8f158d665c9709d7b29ed7917ac934086ad96f92e4510 - Status: Downloaded newer image for debian:latest - docker.io/library/debian:latest - ``` - - Docker images can consist of multiple layers. In the example above, the image - consists of a single layer; `e756f3fdd6a3`. - - Layers can be reused by images. For example, the `debian:bookworm` image shares - its layer with the `debian:latest`. Pulling the `debian:bookworm` image therefore - only pulls its metadata, but not its layers, because the layer is already present - locally: - - ```console - $ docker image pull debian:bookworm - - bookworm: Pulling from library/debian - Digest: sha256:3f1d6c17773a45c97bd8f158d665c9709d7b29ed7917ac934086ad96f92e4510 - Status: Downloaded newer image for debian:bookworm - docker.io/library/debian:bookworm - ``` - - To see which images are present locally, use the [`docker images`](images.md) - command: - - ```console - $ docker images - - REPOSITORY TAG IMAGE ID CREATED SIZE - debian bookworm 4eacea30377a 8 days ago 124MB - debian latest 4eacea30377a 8 days ago 124MB - ``` - - Docker uses a content-addressable image store, and the image ID is a SHA256 - digest covering the image's configuration and layers. In the example above, - `debian:bookworm` and `debian:latest` have the same image ID because they are - the *same* image tagged with different names. Because they are the same image, - their layers are stored only once and do not consume extra disk space. - - For more information about images, layers, and the content-addressable store, - refer to [understand images, containers, and storage drivers](/storage/storagedriver/). - - - ### Pull an image by digest (immutable identifier) - - So far, you've pulled images by their name (and "tag"). Using names and tags is - a convenient way to work with images. When using tags, you can `docker pull` an - image again to make sure you have the most up-to-date version of that image. - For example, `docker pull ubuntu:22.04` pulls the latest version of the Ubuntu - 22.04 image. - - In some cases you don't want images to be updated to newer versions, but prefer - to use a fixed version of an image. Docker enables you to pull an image by its - *digest*. When pulling an image by digest, you specify *exactly* which version - of an image to pull. Doing so, allows you to "pin" an image to that version, - and guarantee that the image you're using is always the same. - - To know the digest of an image, pull the image first. Let's pull the latest - `ubuntu:22.04` image from Docker Hub: - - ```console - $ docker pull ubuntu:22.04 - - 22.04: Pulling from library/ubuntu - 125a6e411906: Pull complete - Digest: sha256:26c68657ccce2cb0a31b330cb0be2b5e108d467f641c62e13ab40cbec258c68d - Status: Downloaded newer image for ubuntu:22.04 - docker.io/library/ubuntu:22.04 - ``` - - Docker prints the digest of the image after the pull has finished. In the example - above, the digest of the image is: - - ```console - sha256:26c68657ccce2cb0a31b330cb0be2b5e108d467f641c62e13ab40cbec258c68d - ``` - - Docker also prints the digest of an image when *pushing* to a registry. This - may be useful if you want to pin to a version of the image you just pushed. - - A digest takes the place of the tag when pulling an image, for example, to - pull the above image by digest, run the following command: - - ```console - $ docker pull ubuntu@sha256:26c68657ccce2cb0a31b330cb0be2b5e108d467f641c62e13ab40cbec258c68d - - docker.io/library/ubuntu@sha256:26c68657ccce2cb0a31b330cb0be2b5e108d467f641c62e13ab40cbec258c68d: Pulling from library/ubuntu - Digest: sha256:26c68657ccce2cb0a31b330cb0be2b5e108d467f641c62e13ab40cbec258c68d - Status: Image is up to date for ubuntu@sha256:26c68657ccce2cb0a31b330cb0be2b5e108d467f641c62e13ab40cbec258c68d - docker.io/library/ubuntu@sha256:26c68657ccce2cb0a31b330cb0be2b5e108d467f641c62e13ab40cbec258c68d - ``` - - Digest can also be used in the `FROM` of a Dockerfile, for example: - - ```dockerfile - FROM ubuntu@sha256:26c68657ccce2cb0a31b330cb0be2b5e108d467f641c62e13ab40cbec258c68d - LABEL org.opencontainers.image.authors="some maintainer " - ``` - - > **Note** - > - > Using this feature "pins" an image to a specific version in time. - > Docker does therefore not pull updated versions of an image, which may include - > security updates. If you want to pull an updated image, you need to change the - > digest accordingly. - - - ### Pull from a different registry - - By default, `docker pull` pulls images from [Docker Hub](https://hub.docker.com). It is also possible to - manually specify the path of a registry to pull from. For example, if you have - set up a local registry, you can specify its path to pull from it. A registry - path is similar to a URL, but does not contain a protocol specifier (`https://`). - - The following command pulls the `testing/test-image` image from a local registry - listening on port 5000 (`myregistry.local:5000`): - - ```console - $ docker image pull myregistry.local:5000/testing/test-image - ``` - - Registry credentials are managed by [docker login](login.md). - - Docker uses the `https://` protocol to communicate with a registry, unless the - registry is allowed to be accessed over an insecure connection. Refer to the - [insecure registries](dockerd.md#insecure-registries) section for more information. - - - ### Pull a repository with multiple images (-a, --all-tags) {#all-tags} - - By default, `docker pull` pulls a *single* image from the registry. A repository - can contain multiple images. To pull all images from a repository, provide the - `-a` (or `--all-tags`) option when using `docker pull`. - - This command pulls all images from the `ubuntu` repository: - - ```console - $ docker image pull --all-tags ubuntu - - Pulling repository ubuntu - ad57ef8d78d7: Download complete - 105182bb5e8b: Download complete - 511136ea3c5a: Download complete - 73bd853d2ea5: Download complete - .... - - Status: Downloaded newer image for ubuntu - ``` - - After the pull has completed use the `docker image ls` command (or the `docker images` - shorthand) to see the images that were pulled. The example below shows all the - `ubuntu` images that are present locally: - - ```console - $ docker image ls --filter reference=ubuntu - REPOSITORY TAG IMAGE ID CREATED SIZE - ubuntu 18.04 c6ad7e71ba7d 5 weeks ago 63.2MB - ubuntu bionic c6ad7e71ba7d 5 weeks ago 63.2MB - ubuntu 22.04 5ccefbfc0416 2 months ago 78MB - ubuntu focal ff0fea8310f3 2 months ago 72.8MB - ubuntu latest ff0fea8310f3 2 months ago 72.8MB - ubuntu jammy 41ba606c8ab9 3 months ago 79MB - ubuntu 20.04 ba6acccedd29 7 months ago 72.8MB - ``` - - ### Cancel a pull - - Killing the `docker pull` process, for example by pressing `CTRL-c` while it is - running in a terminal, will terminate the pull operation. - - ```console - $ docker pull ubuntu - - Using default tag: latest - latest: Pulling from library/ubuntu - a3ed95caeb02: Pulling fs layer - 236608c7b546: Pulling fs layer - ^C - ``` - - The Engine terminates a pull operation when the connection between the daemon - and the client (initiating the pull) is cut or lost for any reason or the - command is manually terminated. deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_push.yaml b/data/engine-cli/docker_push.yaml index dff261b42..837922c5c 100644 --- a/data/engine-cli/docker_push.yaml +++ b/data/engine-cli/docker_push.yaml @@ -1,28 +1,7 @@ command: docker push aliases: docker image push, docker push short: Upload an image to a registry -long: |- - Use `docker image push` to share your images to the [Docker Hub](https://hub.docker.com) - registry or to a self-hosted one. - - Refer to the [`docker image tag`](tag.md) reference for more information about valid - image and tag names. - - Killing the `docker image push` process, for example by pressing `CTRL-c` while it is - running in a terminal, terminates the push operation. - - Progress bars are shown during docker push, which show the uncompressed size. - The actual amount of data that's pushed will be compressed before sending, so - the uploaded size will not be reflected by the progress bar. - - Registry credentials are managed by [docker login](login.md). - - ### Concurrent uploads - - By default the Docker daemon will push five layers of an image at a time. - If you are on a low bandwidth connection this may cause timeout issues and you may want to lower - this via the `--max-concurrent-uploads` daemon option. See the - [daemon documentation](dockerd.md) for more details. +long: Upload an image to a registry usage: docker push [OPTIONS] NAME[:TAG] pname: docker plink: docker.yaml @@ -32,7 +11,6 @@ options: value_type: bool default_value: "false" description: Push all tags of an image to the repository - details_url: '#all-tags' deprecated: false hidden: false experimental: false @@ -71,82 +49,6 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false -examples: |- - ### Push a new image to a registry - - First save the new image by finding the container ID (using [`docker container ls`](ps.md)) - and then committing it to a new image name. Note that only `a-z0-9-_.` are - allowed when naming images: - - ```console - $ docker container commit c16378f943fe rhel-httpd:latest - ``` - - Now, push the image to the registry using the image ID. In this example the - registry is on host named `registry-host` and listening on port `5000`. To do - this, tag the image with the host name or IP address, and the port of the - registry: - - ```console - $ docker image tag rhel-httpd:latest registry-host:5000/myadmin/rhel-httpd:latest - - $ docker image push registry-host:5000/myadmin/rhel-httpd:latest - ``` - - Check that this worked by running: - - ```console - $ docker image ls - ``` - - You should see both `rhel-httpd` and `registry-host:5000/myadmin/rhel-httpd` - listed. - - ### Push all tags of an image (-a, --all-tags) {#all-tags} - - Use the `-a` (or `--all-tags`) option to push all tags of a local image. - - The following example creates multiple tags for an image, and pushes all those - tags to Docker Hub. - - - ```console - $ docker image tag myimage registry-host:5000/myname/myimage:latest - $ docker image tag myimage registry-host:5000/myname/myimage:v1.0.1 - $ docker image tag myimage registry-host:5000/myname/myimage:v1.0 - $ docker image tag myimage registry-host:5000/myname/myimage:v1 - ``` - - The image is now tagged under multiple names: - - ```console - $ docker image ls - - REPOSITORY TAG IMAGE ID CREATED SIZE - myimage latest 6d5fcfe5ff17 2 hours ago 1.22MB - registry-host:5000/myname/myimage latest 6d5fcfe5ff17 2 hours ago 1.22MB - registry-host:5000/myname/myimage v1 6d5fcfe5ff17 2 hours ago 1.22MB - registry-host:5000/myname/myimage v1.0 6d5fcfe5ff17 2 hours ago 1.22MB - registry-host:5000/myname/myimage v1.0.1 6d5fcfe5ff17 2 hours ago 1.22MB - ``` - - When pushing with the `--all-tags` option, all tags of the `registry-host:5000/myname/myimage` - image are pushed: - - - ```console - $ docker image push --all-tags registry-host:5000/myname/myimage - - The push refers to repository [registry-host:5000/myname/myimage] - 195be5f8be1d: Pushed - latest: digest: sha256:edafc0a0fb057813850d1ba44014914ca02d671ae247107ca70c94db686e7de6 size: 4527 - 195be5f8be1d: Layer already exists - v1: digest: sha256:edafc0a0fb057813850d1ba44014914ca02d671ae247107ca70c94db686e7de6 size: 4527 - 195be5f8be1d: Layer already exists - v1.0: digest: sha256:edafc0a0fb057813850d1ba44014914ca02d671ae247107ca70c94db686e7de6 size: 4527 - 195be5f8be1d: Layer already exists - v1.0.1: digest: sha256:edafc0a0fb057813850d1ba44014914ca02d671ae247107ca70c94db686e7de6 size: 4527 - ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_rename.yaml b/data/engine-cli/docker_rename.yaml index 06577118b..03769a701 100644 --- a/data/engine-cli/docker_rename.yaml +++ b/data/engine-cli/docker_rename.yaml @@ -1,7 +1,7 @@ command: docker rename aliases: docker container rename, docker rename short: Rename a container -long: The `docker rename` command renames a container. +long: Rename a container usage: docker rename CONTAINER NEW_NAME pname: docker plink: docker.yaml @@ -16,10 +16,6 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false -examples: |- - ```console - $ docker rename my_container my_new_container - ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_restart.yaml b/data/engine-cli/docker_restart.yaml index 3c97eec4a..f7d4c823f 100644 --- a/data/engine-cli/docker_restart.yaml +++ b/data/engine-cli/docker_restart.yaml @@ -38,10 +38,6 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false -examples: |- - ```console - $ docker restart my_container - ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_rm.yaml b/data/engine-cli/docker_rm.yaml index 95e6ac203..99cc15c2e 100644 --- a/data/engine-cli/docker_rm.yaml +++ b/data/engine-cli/docker_rm.yaml @@ -11,7 +11,6 @@ options: value_type: bool default_value: "false" description: Force the removal of a running container (uses SIGKILL) - details_url: '#force' deprecated: false hidden: false experimental: false @@ -23,7 +22,6 @@ options: value_type: bool default_value: "false" description: Remove the specified link - details_url: '#link' deprecated: false hidden: false experimental: false @@ -35,7 +33,6 @@ options: value_type: bool default_value: "false" description: Remove anonymous volumes associated with the container - details_url: '#volumes' deprecated: false hidden: false experimental: false @@ -53,95 +50,6 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false -examples: |- - ### Remove a container - - This removes the container referenced under the link `/redis`. - - ```console - $ docker rm /redis - - /redis - ``` - - ### Remove a link specified with `--link` on the default bridge network (--link) {#link} - - This removes the underlying link between `/webapp` and the `/redis` - containers on the default bridge network, removing all network communication - between the two containers. This does not apply when `--link` is used with - user-specified networks. - - ```console - $ docker rm --link /webapp/redis - - /webapp/redis - ``` - - ### Force-remove a running container (--force) {#force} - - This command force-removes a running container. - - ```console - $ docker rm --force redis - - redis - ``` - - The main process inside the container referenced under the link `redis` will receive - `SIGKILL`, then the container will be removed. - - ### Remove all stopped containers - - Use the [`docker container prune`](container_prune.md) command to remove all - stopped containers, or refer to the [`docker system prune`](system_prune.md) - command to remove unused containers in addition to other Docker resources, such - as (unused) images and networks. - - Alternatively, you can use the `docker ps` with the `-q` / `--quiet` option to - generate a list of container IDs to remove, and use that list as argument for - the `docker rm` command. - - Combining commands can be more flexible, but is less portable as it depends - on features provided by the shell, and the exact syntax may differ depending on - what shell is used. To use this approach on Windows, consider using PowerShell - or Bash. - - The example below uses `docker ps -q` to print the IDs of all containers that - have exited (`--filter status=exited`), and removes those containers with - the `docker rm` command: - - ```console - $ docker rm $(docker ps --filter status=exited -q) - ``` - - Or, using the `xargs` Linux utility; - - ```console - $ docker ps --filter status=exited -q | xargs docker rm - ``` - - ### Remove a container and its volumes (-v, --volumes) {#volumes} - - ```console - $ docker rm --volumes redis - redis - ``` - - This command removes the container and any volumes associated with it. - Note that if a volume was specified with a name, it will not be removed. - - ### Remove a container and selectively remove volumes - - ```console - $ docker create -v awesome:/foo -v /bar --name hello redis - hello - - $ docker rm -v hello - ``` - - In this example, the volume for `/foo` remains intact, but the volume for - `/bar` is removed. The same behavior holds for volumes inherited with - `--volumes-from`. deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_rmi.yaml b/data/engine-cli/docker_rmi.yaml index 504acd9f2..c72df16e0 100644 --- a/data/engine-cli/docker_rmi.yaml +++ b/data/engine-cli/docker_rmi.yaml @@ -1,15 +1,7 @@ command: docker rmi aliases: docker image rm, docker image remove, docker rmi short: Remove one or more images -long: |- - Removes (and un-tags) one or more images from the host node. If an image has - multiple tags, using this command with the tag as a parameter only removes the - tag. If the tag is the only one for the image, both the image and the tag are - removed. - - This does not remove images from a registry. You cannot remove an image of a - running container unless you use the `-f` option. To see all images on a host - use the [`docker image ls`](images.md) command. +long: Remove one or more images usage: docker rmi [OPTIONS] IMAGE [IMAGE...] pname: docker plink: docker.yaml @@ -46,82 +38,6 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false -examples: |- - You can remove an image using its short or long ID, its tag, or its digest. If - an image has one or more tags referencing it, you must remove all of them before - the image is removed. Digest references are removed automatically when an image - is removed by tag. - - ```console - $ docker images - - REPOSITORY TAG IMAGE ID CREATED SIZE - test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) - test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) - test2 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) - - $ docker rmi fd484f19954f - - Error: Conflict, cannot delete image fd484f19954f because it is tagged in multiple repositories, use -f to force - 2013/12/11 05:47:16 Error: failed to remove one or more images - - $ docker rmi test1:latest - - Untagged: test1:latest - - $ docker rmi test2:latest - - Untagged: test2:latest - - - $ docker images - - REPOSITORY TAG IMAGE ID CREATED SIZE - test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) - - $ docker rmi test:latest - - Untagged: test:latest - Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8 - ``` - - If you use the `-f` flag and specify the image's short or long ID, then this - command untags and removes all images that match the specified ID. - - ```console - $ docker images - - REPOSITORY TAG IMAGE ID CREATED SIZE - test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) - test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) - test2 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) - - $ docker rmi -f fd484f19954f - - Untagged: test1:latest - Untagged: test:latest - Untagged: test2:latest - Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8 - ``` - - An image pulled by digest has no tag associated with it: - - ```console - $ docker images --digests - - REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE - localhost:5000/test/busybox sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf 4986bf8c1536 9 weeks ago 2.43 MB - ``` - - To remove an image using its digest: - - ```console - $ docker rmi localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf - Untagged: localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf - Deleted: 4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125 - Deleted: ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2 - Deleted: df7546f9f060a2268024c8a230d8639878585defcc1bc6f79d2728a13957871b - ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_run.yaml b/data/engine-cli/docker_run.yaml index 40d6b61f6..f1cd0cd50 100644 --- a/data/engine-cli/docker_run.yaml +++ b/data/engine-cli/docker_run.yaml @@ -1,11 +1,7 @@ command: docker run aliases: docker container run, docker run short: Create and run a new container from an image -long: |- - The `docker run` command runs a command in a new container, pulling the image if needed and starting the container. - - You can restart a stopped container with all its previous changes intact using `docker start`. - Use `docker ps -a` to view a list of all containers, including those that are stopped. +long: Create and run a new container from an image usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...] pname: docker plink: docker.yaml @@ -13,7 +9,6 @@ options: - option: add-host value_type: list description: Add a custom host-to-IP mapping (host:ip) - details_url: '#add-host' deprecated: false hidden: false experimental: false @@ -36,7 +31,6 @@ options: shorthand: a value_type: list description: Attach to STDIN, STDOUT or STDERR - details_url: '#attach' deprecated: false hidden: false experimental: false @@ -109,7 +103,6 @@ options: - option: cidfile value_type: string description: Write the container ID to the file - details_url: '#cidfile' deprecated: false hidden: false experimental: false @@ -233,7 +226,6 @@ options: - option: detach-keys value_type: string description: Override the key sequence for detaching a container - details_url: '#detach-keys' deprecated: false hidden: false experimental: false @@ -243,7 +235,6 @@ options: - option: device value_type: list description: Add a host device to the container - details_url: '#device' deprecated: false hidden: false experimental: false @@ -253,7 +244,6 @@ options: - option: device-cgroup-rule value_type: list description: Add a rule to the cgroup allowed devices list - details_url: '#device-cgroup-rule' deprecated: false hidden: false experimental: false @@ -368,7 +358,6 @@ options: shorthand: e value_type: list description: Set environment variables - details_url: '#env' deprecated: false hidden: false experimental: false @@ -396,7 +385,6 @@ options: - option: gpus value_type: gpu-request description: GPU devices to add to the container ('all' to pass all GPUs) - details_url: '#gpus' deprecated: false hidden: false min_api_version: "1.40" @@ -442,6 +430,18 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: health-start-interval + value_type: duration + default_value: 0s + description: | + Time between running the check during the start period (ms|s|m|h) (default 0s) + deprecated: false + hidden: false + min_api_version: "1.44" + experimental: false + experimentalcli: false + kubernetes: false + swarm: false - option: health-start-period value_type: duration default_value: 0s @@ -559,7 +559,6 @@ options: - option: isolation value_type: string description: Container isolation technology - details_url: '#isolation' deprecated: false hidden: false experimental: false @@ -580,7 +579,6 @@ options: shorthand: l value_type: list description: Set meta data on a container - details_url: '#label' deprecated: false hidden: false experimental: false @@ -646,7 +644,6 @@ options: value_type: bytes default_value: "0" description: Memory limit - details_url: '#memory' deprecated: false hidden: false experimental: false @@ -687,7 +684,6 @@ options: - option: mount value_type: mount description: Attach a filesystem mount to the container - details_url: '#mount' deprecated: false hidden: false experimental: false @@ -697,7 +693,6 @@ options: - option: name value_type: string description: Assign a name to the container - details_url: '#name' deprecated: false hidden: false experimental: false @@ -725,7 +720,6 @@ options: - option: network value_type: network description: Connect a container to a network - details_url: '#network' deprecated: false hidden: false experimental: false @@ -804,7 +798,6 @@ options: value_type: bool default_value: "false" description: Give extended privileges to this container - details_url: '#privileged' deprecated: false hidden: false experimental: false @@ -815,7 +808,6 @@ options: shorthand: p value_type: list description: Publish a container's port(s) to the host - details_url: '#publish' deprecated: false hidden: false experimental: false @@ -837,7 +829,6 @@ options: value_type: string default_value: missing description: Pull image before running (`always`, `missing`, `never`) - details_url: '#pull' deprecated: false hidden: false experimental: false @@ -859,7 +850,6 @@ options: value_type: bool default_value: "false" description: Mount the container's root filesystem as read only - details_url: '#read-only' deprecated: false hidden: false experimental: false @@ -870,7 +860,6 @@ options: value_type: string default_value: "no" description: Restart policy to apply when a container exits - details_url: '#restart' deprecated: false hidden: false experimental: false @@ -899,7 +888,6 @@ options: - option: security-opt value_type: list description: Security Options - details_url: '#security-opt' deprecated: false hidden: false experimental: false @@ -929,7 +917,6 @@ options: - option: stop-signal value_type: string description: Signal to stop the container - details_url: '#stop-signal' deprecated: false hidden: false experimental: false @@ -940,7 +927,6 @@ options: value_type: int default_value: "0" description: Timeout (in seconds) to stop a container - details_url: '#stop-timeout' deprecated: false hidden: false min_api_version: "1.25" @@ -951,7 +937,6 @@ options: - option: storage-opt value_type: list description: Storage driver options for the container - details_url: '#storage-opt' deprecated: false hidden: false experimental: false @@ -962,7 +947,6 @@ options: value_type: map default_value: map[] description: Sysctl options - details_url: '#sysctl' deprecated: false hidden: false experimental: false @@ -972,7 +956,6 @@ options: - option: tmpfs value_type: list description: Mount a tmpfs directory - details_url: '#tmpfs' deprecated: false hidden: false experimental: false @@ -994,7 +977,6 @@ options: value_type: ulimit default_value: '[]' description: Ulimit options - details_url: '#ulimit' deprecated: false hidden: false experimental: false @@ -1033,7 +1015,6 @@ options: shorthand: v value_type: list description: Bind mount a volume - details_url: '#volume' deprecated: false hidden: false experimental: false @@ -1052,7 +1033,6 @@ options: - option: volumes-from value_type: list description: Mount volumes from the specified container(s) - details_url: '#volumes-from' deprecated: false hidden: false experimental: false @@ -1063,858 +1043,12 @@ options: shorthand: w value_type: string description: Working directory inside the container - details_url: '#workdir' deprecated: false hidden: false experimental: false experimentalcli: false kubernetes: false swarm: false -examples: |- - ### Assign name and allocate pseudo-TTY (--name, -it) {#name} - - ```console - $ docker run --name test -it debian - - root@d6c0fe130dba:/# exit 13 - $ echo $? - 13 - $ docker ps -a | grep test - d6c0fe130dba debian:7 "/bin/bash" 26 seconds ago Exited (13) 17 seconds ago test - ``` - - This example runs a container named `test` using the `debian:latest` - image. The `-it` instructs Docker to allocate a pseudo-TTY connected to - the container's stdin; creating an interactive `bash` shell in the container. - The example quits the `bash` shell by entering - `exit 13`, passing the exit code on to the caller of - `docker run`, and recording it in the `test` container's metadata. - - ### Capture container ID (--cidfile) {#cidfile} - - ```console - $ docker run --cidfile /tmp/docker_test.cid ubuntu echo "test" - ``` - - This creates a container and prints `test` to the console. The `cidfile` - flag makes Docker attempt to create a new file and write the container ID to it. - If the file exists already, Docker returns an error. Docker closes this - file when `docker run` exits. - - ### Full container capabilities (--privileged) {#privileged} - - ```console - $ docker run -t -i --rm ubuntu bash - root@bc338942ef20:/# mount -t tmpfs none /mnt - mount: permission denied - ``` - - This *doesn't* work, because by default, Docker drops most potentially dangerous kernel - capabilities, including `CAP_SYS_ADMIN ` (which is required to mount - filesystems). However, the `--privileged` flag allows it to run: - - ```console - $ docker run -t -i --privileged ubuntu bash - root@50e3f57e16e6:/# mount -t tmpfs none /mnt - root@50e3f57e16e6:/# df -h - Filesystem Size Used Avail Use% Mounted on - none 1.9G 0 1.9G 0% /mnt - ``` - - The `--privileged` flag gives *all* capabilities to the container, and it also - lifts all the limitations enforced by the `device` cgroup controller. In other - words, the container can then do almost everything that the host can do. This - flag exists to allow special use-cases, like running Docker within Docker. - - ### Set working directory (-w, --workdir) {#workdir} - - ```console - $ docker run -w /path/to/dir/ -i -t ubuntu pwd - ``` - - The `-w` option runs the command executed inside the directory specified, in this example, - `/path/to/dir/`. If the path does not exist, Docker creates it inside the container. - - ### Set storage driver options per container (--storage-opt) {#storage-opt} - - ```console - $ docker run -it --storage-opt size=120G fedora /bin/bash - ``` - - This (size) constraints the container filesystem size to 120G at creation time. - This option is only available for the `devicemapper`, `btrfs`, `overlay2`, - `windowsfilter` and `zfs` storage drivers. - - For the `overlay2` storage driver, the size option is only available if the - backing filesystem is `xfs` and mounted with the `pquota` mount option. - Under these conditions, you can pass any size less than the backing filesystem size. - - For the `windowsfilter`, `devicemapper`, `btrfs`, and `zfs` storage drivers, - you cannot pass a size less than the Default BaseFS Size. - - - ### Mount tmpfs (--tmpfs) {#tmpfs} - - ```console - $ docker run -d --tmpfs /run:rw,noexec,nosuid,size=65536k my_image - ``` - - The `--tmpfs` flag mounts an empty tmpfs into the container with the `rw`, - `noexec`, `nosuid`, `size=65536k` options. - - ### Mount volume (-v) {#volume} - - ```console - $ docker run -v $(pwd):$(pwd) -w $(pwd) -i -t ubuntu pwd - ``` - - The example above mounts the current directory into the container at the same path - using the `-v` flag, sets it as the working directory, and then runs the `pwd` command inside the container. - - As of Docker Engine version 23, you can use relative paths on the host. - - ```console - $ docker run -v ./content:/content -w /content -i -t ubuntu pwd - ``` - - The example above mounts the `content` directory in the current directory into the container at the - `/content` path using the `-v` flag, sets it as the working directory, and then - runs the `pwd` command inside the container. - - ```console - $ docker run -v /doesnt/exist:/foo -w /foo -i -t ubuntu bash - ``` - - When the host directory of a bind-mounted volume doesn't exist, Docker - automatically creates this directory on the host for you. In the - example above, Docker creates the `/doesnt/exist` - folder before starting your container. - - ### Mount volume read-only (--read-only) {#read-only} - - ```console - $ docker run --read-only -v /icanwrite busybox touch /icanwrite/here - ``` - - You can use volumes in combination with the `--read-only` flag to control where - a container writes files. The `--read-only` flag mounts the container's root - filesystem as read only prohibiting writes to locations other than the - specified volumes for the container. - - ```console - $ docker run -t -i -v /var/run/docker.sock:/var/run/docker.sock -v /path/to/static-docker-binary:/usr/bin/docker busybox sh - ``` - - By bind-mounting the Docker Unix socket and statically linked Docker - binary (refer to [get the Linux binary](/engine/install/binaries/#install-static-binaries)), - you give the container the full access to create and manipulate the host's - Docker daemon. - - On Windows, you must specify the paths using Windows-style path semantics. - - ```powershell - PS C:\> docker run -v c:\foo:c:\dest microsoft/nanoserver cmd /s /c type c:\dest\somefile.txt - Contents of file - - PS C:\> docker run -v c:\foo:d: microsoft/nanoserver cmd /s /c type d:\somefile.txt - Contents of file - ``` - - The following examples fails when using Windows-based containers, as the - destination of a volume or bind mount inside the container must be one of: - a non-existing or empty directory; or a drive other than `C:`. Further, the source - of a bind mount must be a local directory, not a file. - - ```powershell - net use z: \\remotemachine\share - docker run -v z:\foo:c:\dest ... - docker run -v \\uncpath\to\directory:c:\dest ... - docker run -v c:\foo\somefile.txt:c:\dest ... - docker run -v c:\foo:c: ... - docker run -v c:\foo:c:\existing-directory-with-contents ... - ``` - - For in-depth information about volumes, refer to [manage data in containers](/storage/volumes/) - - ### Add bind mounts or volumes using the --mount flag {#mount} - - The `--mount` flag allows you to mount volumes, host-directories, and `tmpfs` - mounts in a container. - - The `--mount` flag supports most options supported by the `-v` or the - `--volume` flag, but uses a different syntax. For in-depth information on the - `--mount` flag, and a comparison between `--volume` and `--mount`, refer to - [Bind mounts](/storage/bind-mounts/). - - Even though there is no plan to deprecate `--volume`, usage of `--mount` is recommended. - - Examples: - - ```console - $ docker run --read-only --mount type=volume,target=/icanwrite busybox touch /icanwrite/here - ``` - - ```console - $ docker run -t -i --mount type=bind,src=/data,dst=/data busybox sh - ``` - - ### Publish or expose port (-p, --expose) {#publish} - - ```console - $ docker run -p 127.0.0.1:80:8080/tcp ubuntu bash - ``` - - This binds port `8080` of the container to TCP port `80` on `127.0.0.1` of the host - machine. You can also specify `udp` and `sctp` ports. - The [Docker User Guide](/network/links/) - explains in detail how to use ports in Docker. - - Note that ports which are not bound to the host (i.e., `-p 80:80` instead of - `-p 127.0.0.1:80:80`) are externally accessible. This also applies if - you configured UFW to block this specific port, as Docker manages its - own iptables rules. [Read more](/network/iptables/) - - ```console - $ docker run --expose 80 ubuntu bash - ``` - - This exposes port `80` of the container without publishing the port to the host - system's interfaces. - - ### Set the pull policy (--pull) {#pull} - - Use the `--pull` flag to set the image pull policy when creating (and running) - the container. - - The `--pull` flag can take one of these values: - - | Value | Description | - |:--------------------|:------------------------------------------------------------------------------------------------------------------| - | `missing` (default) | Pull the image if it was not found in the image cache, or use the cached image otherwise. | - | `never` | Do not pull the image, even if it's missing, and produce an error if the image does not exist in the image cache. | - | `always` | Always perform a pull before creating the container. | - - When creating (and running) a container from an image, the daemon checks if the - image exists in the local image cache. If the image is missing, an error is - returned to the CLI, allowing it to initiate a pull. - - The default (`missing`) is to only pull the image if it's not present in the - daemon's image cache. This default allows you to run images that only exist - locally (for example, images you built from a Dockerfile, but that have not - been pushed to a registry), and reduces networking. - - The `always` option always initiates a pull before creating the container. This - option makes sure the image is up-to-date, and prevents you from using outdated - images, but may not be suitable in situations where you want to test a locally - built image before pushing (as pulling the image overwrites the existing image - in the image cache). - - The `never` option disables (implicit) pulling images when creating containers, - and only uses images that are available in the image cache. If the specified - image is not found, an error is produced, and the container is not created. - This option is useful in situations where networking is not available, or to - prevent images from being pulled implicitly when creating containers. - - The following example shows `docker run` with the `--pull=never` option set, - which produces en error as the image is missing in the image-cache: - - ```console - $ docker run --pull=never hello-world - docker: Error response from daemon: No such image: hello-world:latest. - ``` - - ### Set environment variables (-e, --env, --env-file) {#env} - - ```console - $ docker run -e MYVAR1 --env MYVAR2=foo --env-file ./env.list ubuntu bash - ``` - - Use the `-e`, `--env`, and `--env-file` flags to set simple (non-array) - environment variables in the container you're running, or overwrite variables - defined in the Dockerfile of the image you're running. - - You can define the variable and its value when running the container: - - ```console - $ docker run --env VAR1=value1 --env VAR2=value2 ubuntu env | grep VAR - VAR1=value1 - VAR2=value2 - ``` - - You can also use variables exported to your local environment: - - ```console - export VAR1=value1 - export VAR2=value2 - - $ docker run --env VAR1 --env VAR2 ubuntu env | grep VAR - VAR1=value1 - VAR2=value2 - ``` - - When running the command, the Docker CLI client checks the value the variable - has in your local environment and passes it to the container. - If no `=` is provided and that variable is not exported in your local - environment, the variable isn't set in the container. - - You can also load the environment variables from a file. This file should use - the syntax `=value` (which sets the variable to the given value) or - `` (which takes the value from the local environment), and `#` for comments. - Additionally, it's important to note that lines beginning with `#` are treated as line comments - and are ignored, whereas a `#` appearing anywhere else in a line is treated as part of the variable value. - - ```console - $ cat env.list - # This is a comment - VAR1=value1 - VAR2=value2 - USER - - $ docker run --env-file env.list ubuntu env | grep -E 'VAR|USER' - VAR1=value1 - VAR2=value2 - USER=jonzeolla - ``` - - ### Set metadata on container (-l, --label, --label-file) {#label} - - A label is a `key=value` pair that applies metadata to a container. To label a container with two labels: - - ```console - $ docker run -l my-label --label com.example.foo=bar ubuntu bash - ``` - - The `my-label` key doesn't specify a value so the label defaults to an empty - string (`""`). To add multiple labels, repeat the label flag (`-l` or `--label`). - - The `key=value` must be unique to avoid overwriting the label value. If you - specify labels with identical keys but different values, each subsequent value - overwrites the previous. Docker uses the last `key=value` you supply. - - Use the `--label-file` flag to load multiple labels from a file. Delimit each - label in the file with an EOL mark. The example below loads labels from a - labels file in the current directory: - - ```console - $ docker run --label-file ./labels ubuntu bash - ``` - - The label-file format is similar to the format for loading environment - variables. (Unlike environment variables, labels are not visible to processes - running inside a container.) The following example shows a label-file - format: - - ```console - com.example.label1="a label" - - # this is a comment - com.example.label2=another\ label - com.example.label3 - ``` - - You can load multiple label-files by supplying multiple `--label-file` flags. - - For additional information on working with labels, see [*Labels - custom - metadata in Docker*](/config/labels-custom-metadata/) in - the Docker User Guide. - - ### Connect a container to a network (--network) {#network} - - To start a container and connect it to a network, use the `--network` option. - - The following commands create a network named `my-net` and adds a `busybox` container - to the `my-net` network. - - ```console - $ docker network create my-net - $ docker run -itd --network=my-net busybox - ``` - - You can also choose the IP addresses for the container with `--ip` and `--ip6` - flags when you start the container on a user-defined network. To assign a - static IP to containers, you must specify subnet block for the network. - - ```console - $ docker network create --subnet 192.0.2.0/24 my-net - $ docker run -itd --network=my-net --ip=192.0.2.69 busybox - ``` - - If you want to add a running container to a network use the `docker network connect` subcommand. - - You can connect multiple containers to the same network. Once connected, the - containers can communicate using only another container's IP address - or name. For `overlay` networks or custom plugins that support multi-host - connectivity, containers connected to the same multi-host network but launched - from different Engines can also communicate in this way. - - > **Note** - > - > The default bridge network only allow containers to communicate with each other using - > internal IP addresses. User-created bridge networks provide DNS resolution between - > containers using container names. - - You can disconnect a container from a network using the `docker network - disconnect` command. - - For more information on connecting a container to a network when using the `run` command, see the ["*Docker network overview*"](/network/). - - ### Mount volumes from container (--volumes-from) {#volumes-from} - - ```console - $ docker run --volumes-from 777f7dc92da7 --volumes-from ba8c0c54f0f2:ro -i -t ubuntu pwd - ``` - - The `--volumes-from` flag mounts all the defined volumes from the referenced - containers. You can specify more than one container by repetitions of the `--volumes-from` - argument. The container ID may be optionally suffixed with `:ro` or `:rw` to - mount the volumes in read-only or read-write mode, respectively. By default, - Docker mounts the volumes in the same mode (read write or read only) as - the reference container. - - Labeling systems like SELinux require placing proper labels on volume - content mounted into a container. Without a label, the security system might - prevent the processes running inside the container from using the content. By - default, Docker does not change the labels set by the OS. - - To change the label in the container context, you can add either of two suffixes - `:z` or `:Z` to the volume mount. These suffixes tell Docker to relabel file - objects on the shared volumes. The `z` option tells Docker that two containers - share the volume content. As a result, Docker labels the content with a shared - content label. Shared volume labels allow all containers to read/write content. - The `Z` option tells Docker to label the content with a private unshared label. - Only the current container can use a private volume. - - ### Attach to STDIN/STDOUT/STDERR (-a, --attach) {#attach} - - The `--attach` (or `-a`) flag tells `docker run` to bind to the container's - `STDIN`, `STDOUT` or `STDERR`. This makes it possible to manipulate the output - and input as needed. - - ```console - $ echo "test" | docker run -i -a stdin ubuntu cat - - ``` - - This pipes data into a container and prints the container's ID by attaching - only to the container's `STDIN`. - - ```console - $ docker run -a stderr ubuntu echo test - ``` - - This isn't going to print anything to the console unless there's an error because output - is only attached to the `STDERR` of the container. The container's logs - still store what's written to `STDERR` and `STDOUT`. - - ```console - $ cat somefile | docker run -i -a stdin mybuilder dobuild - ``` - - This example shows a way of using `--attach` to pipe a file into a container. - The command prints the container's ID after the build completes and you can retrieve - the build logs using `docker logs`. This is - useful if you need to pipe a file or something else into a container and - retrieve the container's ID once the container has finished running. - - See also [the `docker cp` command](cp.md). - - ### Override the detach sequence (--detach-keys) {#detach-keys} - - Use the `--detach-keys` option to override the Docker key sequence for detach. - This is useful if the Docker default sequence conflicts with key sequence you - use for other applications. There are two ways to define your own detach key - sequence, as a per-container override or as a configuration property on your - entire configuration. - - To override the sequence for an individual container, use the - `--detach-keys=""` flag with the `docker attach` command. The format of - the `` is either a letter [a-Z], or the `ctrl-` combined with any of - the following: - - * `a-z` (a single lowercase alpha character ) - * `@` (at sign) - * `[` (left bracket) - * `\\` (two backward slashes) - * `_` (underscore) - * `^` (caret) - - These `a`, `ctrl-a`, `X`, or `ctrl-\\` values are all examples of valid key - sequences. To configure a different configuration default key sequence for all - containers, see [**Configuration file** section](cli.md#configuration-files). - - ### Add host device to container (--device) {#device} - - ```console - $ docker run -it --rm \ - --device=/dev/sdc:/dev/xvdc \ - --device=/dev/sdd \ - --device=/dev/zero:/dev/foobar \ - ubuntu ls -l /dev/{xvdc,sdd,foobar} - - brw-rw---- 1 root disk 8, 2 Feb 9 16:05 /dev/xvdc - brw-rw---- 1 root disk 8, 3 Feb 9 16:05 /dev/sdd - crw-rw-rw- 1 root root 1, 5 Feb 9 16:05 /dev/foobar - ``` - - It's often necessary to directly expose devices to a container. The `--device` - option enables that. For example, adding a specific block storage device or loop - device or audio device to an otherwise unprivileged container - (without the `--privileged` flag) and have the application directly access it. - - By default, the container is able to `read`, `write` and `mknod` these devices. - This can be overridden using a third `:rwm` set of options to each `--device` - flag. If the container is running in privileged mode, then Docker ignores the - specified permissions. - - ```console - $ docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk /dev/xvdc - - Command (m for help): q - $ docker run --device=/dev/sda:/dev/xvdc:r --rm -it ubuntu fdisk /dev/xvdc - You will not be able to write the partition table. - - Command (m for help): q - - $ docker run --device=/dev/sda:/dev/xvdc:rw --rm -it ubuntu fdisk /dev/xvdc - - Command (m for help): q - - $ docker run --device=/dev/sda:/dev/xvdc:m --rm -it ubuntu fdisk /dev/xvdc - fdisk: unable to open /dev/xvdc: Operation not permitted - ``` - - > **Note** - > - > The `--device` option cannot be safely used with ephemeral devices. You shouldn't - > add block devices that may be removed to untrusted containers with `--device`. - - For Windows, the format of the string passed to the `--device` option is in - the form of `--device=/`. Beginning with Windows Server 2019 - and Windows 10 October 2018 Update, Windows only supports an IdType of - `class` and the Id as a [device interface class - GUID](https://docs.microsoft.com/en-us/windows-hardware/drivers/install/overview-of-device-interface-classes). - Refer to the table defined in the [Windows container - docs](https://docs.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/hardware-devices-in-containers) - for a list of container-supported device interface class GUIDs. - - If you specify this option for a process-isolated Windows container, Docker makes - _all_ devices that implement the requested device interface class GUID - available in the container. For example, the command below makes all COM - ports on the host visible in the container. - - ```powershell - PS C:\> docker run --device=class/86E0D1E0-8089-11D0-9CE4-08003E301F73 mcr.microsoft.com/windows/servercore:ltsc2019 - ``` - - > **Note** - > - > The `--device` option is only supported on process-isolated Windows containers, - > and produces an error if the container isolation is `hyperv`. - - ### Using dynamically created devices (--device-cgroup-rule) {#device-cgroup-rule} - - Docker assigns devices available to a container at creation time. The - assigned devices are added to the cgroup.allow file and - created into the container when it runs. This poses a problem when - you need to add a new device to running container. - - One solution is to add a more permissive rule to a container - allowing it access to a wider range of devices. For example, supposing - the container needs access to a character device with major `42` and - any number of minor numbers (added as new devices appear), add the - following rule: - - ```console - $ docker run -d --device-cgroup-rule='c 42:* rmw' --name my-container my-image - ``` - - Then, a user could ask `udev` to execute a script that would `docker exec my-container mknod newDevX c 42 ` - the required device when it is added. - - > **Note**: You still need to explicitly add initially present devices to the - > `docker run` / `docker create` command. - - ### Access an NVIDIA GPU {#gpus} - - The `--gpus` flag allows you to access NVIDIA GPU resources. First you need to - install the [nvidia-container-runtime](https://nvidia.github.io/nvidia-container-runtime/). - - Read [Specify a container's resources](/config/containers/resource_constraints/) - for more information. - - To use `--gpus`, specify which GPUs (or all) to use. If you provide no value, Docker uses all - available GPUs. The example below exposes all available GPUs. - - ```console - $ docker run -it --rm --gpus all ubuntu nvidia-smi - ``` - - Use the `device` option to specify GPUs. The example below exposes a specific - GPU. - - ```console - $ docker run -it --rm --gpus device=GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a ubuntu nvidia-smi - ``` - - The example below exposes the first and third GPUs. - - ```console - $ docker run -it --rm --gpus '"device=0,2"' nvidia-smi - ``` - - ### Restart policies (--restart) {#restart} - - Use the `--restart` flag to specify a container's *restart policy*. A restart - policy controls whether the Docker daemon restarts a container after exit. - Docker supports the following restart policies: - - | Policy | Result | - |:---------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| - | `no` | Do not automatically restart the container when it exits. This is the default. | - | `on-failure[:max-retries]` | Restart only if the container exits with a non-zero exit status. Optionally, limit the number of restart retries the Docker daemon attempts. | - | `unless-stopped` | Restart the container unless it's explicitly stopped or Docker itself is stopped or restarted. | - | `always` | Always restart the container regardless of the exit status. When you specify always, the Docker daemon tries to restart the container indefinitely. The container always starts on daemon startup, regardless of the current state of the container. | - - ```console - $ docker run --restart=always redis - ``` - - This will run the `redis` container with a restart policy of **always** - so that if the container exits, Docker restarts it. - - You can find more detailed information on restart policies in the - [Restart Policies (--restart)](../run.md#restart-policies---restart) - section of the Docker run reference page. - - ### Add entries to container hosts file (--add-host) {#add-host} - - You can add other hosts into a container's `/etc/hosts` file by using one or - more `--add-host` flags. This example adds a static address for a host named - `docker`: - - ```console - $ docker run --add-host=docker:93.184.216.34 --rm -it alpine - - / # ping docker - PING docker (93.184.216.34): 56 data bytes - 64 bytes from 93.184.216.34: seq=0 ttl=37 time=93.052 ms - 64 bytes from 93.184.216.34: seq=1 ttl=37 time=92.467 ms - 64 bytes from 93.184.216.34: seq=2 ttl=37 time=92.252 ms - ^C - --- docker ping statistics --- - 4 packets transmitted, 4 packets received, 0% packet loss - round-trip min/avg/max = 92.209/92.495/93.052 ms - ``` - - The `--add-host` flag supports a special `host-gateway` value that resolves to - the internal IP address of the host. This is useful when you want containers to - connect to services running on the host machine. - - It's conventional to use `host.docker.internal` as the hostname referring to - `host-gateway`. Docker Desktop automatically resolves this hostname, see - [Explore networking features](/desktop/networking/#i-want-to-connect-from-a-container-to-a-service-on-the-host). - - The following example shows how the special `host-gateway` value works. The - example runs an HTTP server that serves a file from host to container over the - `host.docker.internal` hostname, which resolves to the host's internal IP. - - ```console - $ echo "hello from host!" > ./hello - $ python3 -m http.server 8000 - Serving HTTP on 0.0.0.0 port 8000 (http://0.0.0.0:8000/) ... - $ docker run \ - --add-host host.docker.internal:host-gateway \ - curlimages/curl -s host.docker.internal:8000/hello - hello from host! - ``` - - ### Set ulimits in container (--ulimit) {#ulimit} - - Since setting `ulimit` settings in a container requires extra privileges not - available in the default container, you can set these using the `--ulimit` flag. - Specify `--ulimit` with a soft and hard limit in the format - `=[:]`. For example: - - ```console - $ docker run --ulimit nofile=1024:1024 --rm debian sh -c "ulimit -n" - 1024 - ``` - - > **Note** - > - > If you don't provide a hard limit value, Docker uses the soft limit value - > for both values. If you don't provide any values, they are inherited from - > the default `ulimits` set on the daemon. - - > **Note** - > - > The `as` option is deprecated. - > In other words, the following script is not supported: - > - > ```console - > $ docker run -it --ulimit as=1024 fedora /bin/bash - > ``` - - Docker sends the values to the appropriate OS `syscall` and doesn't perform any byte conversion. - Take this into account when setting the values. - - #### For `nproc` usage - - Be careful setting `nproc` with the `ulimit` flag as Linux uses `nproc` to set the - maximum number of processes available to a user, not to a container. For example, start four - containers with `daemon` user: - - ```console - $ docker run -d -u daemon --ulimit nproc=3 busybox top - - $ docker run -d -u daemon --ulimit nproc=3 busybox top - - $ docker run -d -u daemon --ulimit nproc=3 busybox top - - $ docker run -d -u daemon --ulimit nproc=3 busybox top - ``` - - The 4th container fails and reports a "[8] System error: resource temporarily unavailable" error. - This fails because the caller set `nproc=3` resulting in the first three containers using up - the three processes quota set for the `daemon` user. - - ### Stop container with signal (--stop-signal) {#stop-signal} - - The `--stop-signal` flag sends the system call signal to the - container to exit. This signal can be a signal name in the format `SIG`, - for instance `SIGKILL`, or an unsigned number that matches a position in the - kernel's syscall table, for instance `9`. - - The default value is defined by [`STOPSIGNAL`](/engine/reference/builder/#stopsignal) - in the image, or `SIGTERM` if the image has no `STOPSIGNAL` defined. - - ### Optional security options (--security-opt) {#security-opt} - - On Windows, you can use this flag to specify the `credentialspec` option. - The `credentialspec` must be in the format `file://spec.txt` or `registry://keyname`. - - ### Stop container with timeout (--stop-timeout) {#stop-timeout} - - The `--stop-timeout` flag sets the number of seconds to wait for the container - to stop after sending the pre-defined (see `--stop-signal`) system call signal. - If the container does not exit after the timeout elapses, it's forcibly killed - with a `SIGKILL` signal. - - If you set `--stop-timeout` to `-1`, no timeout is applied, and the daemon - waits indefinitely for the container to exit. - - The Daemon determines the default, and is 10 seconds for Linux containers, - and 30 seconds for Windows containers. - - ### Specify isolation technology for container (--isolation) {#isolation} - - This option is useful in situations where you are running Docker containers on - Windows. The `--isolation=` option sets a container's isolation technology. - On Linux, the only supported is the `default` option which uses Linux namespaces. - These two commands are equivalent on Linux: - - ```console - $ docker run -d busybox top - $ docker run -d --isolation default busybox top - ``` - - On Windows, `--isolation` can take one of these values: - - | Value | Description | - |:----------|:-------------------------------------------------------------------------------------------| - | `default` | Use the value specified by the Docker daemon's `--exec-opt` or system default (see below). | - | `process` | Shared-kernel namespace isolation. | - | `hyperv` | Hyper-V hypervisor partition-based isolation. | - - The default isolation on Windows server operating systems is `process`, and `hyperv` - on Windows client operating systems, such as Windows 10. Process isolation has better - performance, but requires that the image and host use the same kernel version. - - On Windows server, assuming the default configuration, these commands are equivalent - and result in `process` isolation: - - ```powershell - PS C:\> docker run -d microsoft/nanoserver powershell echo process - PS C:\> docker run -d --isolation default microsoft/nanoserver powershell echo process - PS C:\> docker run -d --isolation process microsoft/nanoserver powershell echo process - ``` - - If you have set the `--exec-opt isolation=hyperv` option on the Docker `daemon`, or - are running against a Windows client-based daemon, these commands are equivalent and - result in `hyperv` isolation: - - ```powershell - PS C:\> docker run -d microsoft/nanoserver powershell echo hyperv - PS C:\> docker run -d --isolation default microsoft/nanoserver powershell echo hyperv - PS C:\> docker run -d --isolation hyperv microsoft/nanoserver powershell echo hyperv - ``` - - ### Specify hard limits on memory available to containers (-m, --memory) {#memory} - - These parameters always set an upper limit on the memory available to the container. Linux sets this - on the cgroup and applications in a container can query it at `/sys/fs/cgroup/memory/memory.limit_in_bytes`. - - On Windows, this affects containers differently depending on what type of isolation you use. - - - With `process` isolation, Windows reports the full memory of the host system, not the limit to applications running inside the container - - ```powershell - PS C:\> docker run -it -m 2GB --isolation=process microsoft/nanoserver powershell Get-ComputerInfo *memory* - - CsTotalPhysicalMemory : 17064509440 - CsPhyicallyInstalledMemory : 16777216 - OsTotalVisibleMemorySize : 16664560 - OsFreePhysicalMemory : 14646720 - OsTotalVirtualMemorySize : 19154928 - OsFreeVirtualMemory : 17197440 - OsInUseVirtualMemory : 1957488 - OsMaxProcessMemorySize : 137438953344 - ``` - - - With `hyperv` isolation, Windows creates a utility VM that is big enough to hold the memory limit, plus the minimal OS needed to host the container. That size is reported as "Total Physical Memory." - - ```powershell - PS C:\> docker run -it -m 2GB --isolation=hyperv microsoft/nanoserver powershell Get-ComputerInfo *memory* - - CsTotalPhysicalMemory : 2683355136 - CsPhyicallyInstalledMemory : - OsTotalVisibleMemorySize : 2620464 - OsFreePhysicalMemory : 2306552 - OsTotalVirtualMemorySize : 2620464 - OsFreeVirtualMemory : 2356692 - OsInUseVirtualMemory : 263772 - OsMaxProcessMemorySize : 137438953344 - ``` - - ### Configure namespaced kernel parameters (sysctls) at runtime (--sysctl) {#sysctl} - - The `--sysctl` sets namespaced kernel parameters (sysctls) in the - container. For example, to turn on IP forwarding in the containers - network namespace, run this command: - - ```console - $ docker run --sysctl net.ipv4.ip_forward=1 someimage - ``` - - > **Note** - > - > Not all sysctls are namespaced. Docker does not support changing sysctls - > inside of a container that also modify the host system. As the kernel - > evolves we expect to see more sysctls become namespaced. - - - #### Currently supported sysctls - - IPC Namespace: - - - `kernel.msgmax`, `kernel.msgmnb`, `kernel.msgmni`, `kernel.sem`, - `kernel.shmall`, `kernel.shmmax`, `kernel.shmmni`, `kernel.shm_rmid_forced`. - - Sysctls beginning with `fs.mqueue.*` - - If you use the `--ipc=host` option these sysctls are not allowed. - - Network Namespace: - - - Sysctls beginning with `net.*` - - If you use the `--network=host` option using these sysctls are not allowed. deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_save.yaml b/data/engine-cli/docker_save.yaml index 9de39c4a0..d4151ba41 100644 --- a/data/engine-cli/docker_save.yaml +++ b/data/engine-cli/docker_save.yaml @@ -1,10 +1,7 @@ command: docker save aliases: docker image save, docker save short: Save one or more images to a tar archive (streamed to STDOUT by default) -long: |- - Produces a tarred repository to the standard output stream. - Contains all parent layers, and all tags + versions, or specified `repo:tag`, for - each argument provided. +long: Save one or more images to a tar archive (streamed to STDOUT by default) usage: docker save [OPTIONS] IMAGE [IMAGE...] pname: docker plink: docker.yaml @@ -30,42 +27,6 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false -examples: |- - ### Create a backup that can then be used with `docker load`. - - ```console - $ docker save busybox > busybox.tar - - $ ls -sh busybox.tar - - 2.7M busybox.tar - - $ docker save --output busybox.tar busybox - - $ ls -sh busybox.tar - - 2.7M busybox.tar - - $ docker save -o fedora-all.tar fedora - - $ docker save -o fedora-latest.tar fedora:latest - ``` - - ### Save an image to a tar.gz file using gzip - - You can use gzip to save the image file and make the backup smaller. - - ```console - $ docker save myimage:latest | gzip > myimage_latest.tar.gz - ``` - - ### Cherry-pick particular tags - - You can even cherry-pick particular tags of an image repository. - - ```console - $ docker save -o ubuntu.tar ubuntu:lucid ubuntu:saucy - ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_search.yaml b/data/engine-cli/docker_search.yaml index e76914a67..abfce2784 100644 --- a/data/engine-cli/docker_search.yaml +++ b/data/engine-cli/docker_search.yaml @@ -67,31 +67,31 @@ examples: |- ```console $ docker search busybox - NAME DESCRIPTION STARS OFFICIAL AUTOMATED + NAME DESCRIPTION STARS OFFICIAL busybox Busybox base image. 316 [OK] - progrium/busybox 50 [OK] - radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] - odise/busybox-python 2 [OK] - azukiapp/busybox This image is meant to be used as the base... 2 [OK] - ofayau/busybox-jvm Prepare busybox to install a 32 bits JVM. 1 [OK] - shingonoide/archlinux-busybox Arch Linux, a lightweight and flexible Lin... 1 [OK] - odise/busybox-curl 1 [OK] - ofayau/busybox-libc32 Busybox with 32 bits (and 64 bits) libs 1 [OK] - peelsky/zulu-openjdk-busybox 1 [OK] - skomma/busybox-data Docker image suitable for data volume cont... 1 [OK] - elektritter/busybox-teamspeak Lightweight teamspeak3 container based on... 1 [OK] - socketplane/busybox 1 [OK] - oveits/docker-nginx-busybox This is a tiny NginX docker image based on... 0 [OK] - ggtools/busybox-ubuntu Busybox ubuntu version with extra goodies 0 [OK] - nikfoundas/busybox-confd Minimal busybox based distribution of confd 0 [OK] - openshift/busybox-http-app 0 [OK] - jllopis/busybox 0 [OK] - swyckoff/busybox 0 [OK] - powellquiring/busybox 0 [OK] - williamyeh/busybox-sh Docker image for BusyBox's sh 0 [OK] - simplexsys/busybox-cli-powered Docker busybox images, with a few often us... 0 [OK] - fhisamoto/busybox-java Busybox java 0 [OK] - scottabernethy/busybox 0 [OK] + progrium/busybox 50 + radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 + odise/busybox-python 2 + azukiapp/busybox This image is meant to be used as the base... 2 + ofayau/busybox-jvm Prepare busybox to install a 32 bits JVM. 1 + shingonoide/archlinux-busybox Arch Linux, a lightweight and flexible Lin... 1 + odise/busybox-curl 1 + ofayau/busybox-libc32 Busybox with 32 bits (and 64 bits) libs 1 + peelsky/zulu-openjdk-busybox 1 + skomma/busybox-data Docker image suitable for data volume cont... 1 + elektritter/busybox-teamspeak Lightweight teamspeak3 container based on... 1 + socketplane/busybox 1 + oveits/docker-nginx-busybox This is a tiny NginX docker image based on... 0 + ggtools/busybox-ubuntu Busybox ubuntu version with extra goodies 0 + nikfoundas/busybox-confd Minimal busybox based distribution of confd 0 + openshift/busybox-http-app 0 + jllopis/busybox 0 + swyckoff/busybox 0 + powellquiring/busybox 0 + williamyeh/busybox-sh Docker image for BusyBox's sh 0 + simplexsys/busybox-cli-powered Docker busybox images, with a few often us... 0 + fhisamoto/busybox-java Busybox java 0 + scottabernethy/busybox 0 marclop/busybox-solr ``` @@ -103,10 +103,10 @@ examples: |- ```console $ docker search --filter=stars=3 --no-trunc busybox - NAME DESCRIPTION STARS OFFICIAL AUTOMATED + NAME DESCRIPTION STARS OFFICIAL busybox Busybox base image. 325 [OK] - progrium/busybox 50 [OK] - radial/busyboxplus Full-chain, Internet enabled, busybox made from scratch. Comes in git and cURL flavors. 8 [OK] + progrium/busybox 50 + radial/busyboxplus Full-chain, Internet enabled, busybox made from scratch. Comes in git and cURL flavors. 8 ``` ### Limit search results (--limit) {#limit} @@ -117,12 +117,12 @@ examples: |- ### Filtering (--filter) {#filter} The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there is more - than one filter, then pass multiple flags (e.g. `--filter is-automated=true --filter stars=3`) + than one filter, then pass multiple flags (e.g. `--filter is-official=true --filter stars=3`). The currently supported filters are: - stars (int - number of stars the image has) - - is-automated (boolean - true or false) - is the image automated or not + - is-automated (boolean - true or false) - is the image automated or not (deprecated) - is-official (boolean - true or false) - is the image official or not #### stars @@ -133,23 +133,10 @@ examples: |- ```console $ docker search --filter stars=3 busybox - NAME DESCRIPTION STARS OFFICIAL AUTOMATED + NAME DESCRIPTION STARS OFFICIAL busybox Busybox base image. 325 [OK] - progrium/busybox 50 [OK] - radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] - ``` - - #### is-automated - - This example displays images with a name containing 'busybox' - and are automated builds: - - ```console - $ docker search --filter is-automated=true busybox - - NAME DESCRIPTION STARS OFFICIAL AUTOMATED - progrium/busybox 50 [OK] - radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] + progrium/busybox 50 + radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 ``` #### is-official @@ -160,7 +147,7 @@ examples: |- ```console $ docker search --filter is-official=true --filter stars=3 busybox - NAME DESCRIPTION STARS OFFICIAL AUTOMATED + NAME DESCRIPTION STARS OFFICIAL busybox Busybox base image. 325 [OK] ``` @@ -171,13 +158,13 @@ examples: |- Valid placeholders for the Go template are: - | Placeholder | Description | - |----------------|-----------------------------------| - | `.Name` | Image Name | - | `.Description` | Image description | - | `.StarCount` | Number of stars for the image | - | `.IsOfficial` | "OK" if image is official | - | `.IsAutomated` | "OK" if image build was automated | + | Placeholder | Description | + |----------------|------------------------------------------------| + | `.Name` | Image Name | + | `.Description` | Image description | + | `.StarCount` | Number of stars for the image | + | `.IsOfficial` | "OK" if image is official | + | `.IsAutomated` | "OK" if image build was automated (deprecated) | When you use the `--format` option, the `search` command will output the data exactly as the template declares. If you use the @@ -204,15 +191,15 @@ examples: |- This example outputs a table format: ```console - $ docker search --format "table {{.Name}}\t{{.IsAutomated}}\t{{.IsOfficial}}" nginx - - NAME AUTOMATED OFFICIAL - nginx [OK] - jwilder/nginx-proxy [OK] - richarvey/nginx-php-fpm [OK] - jrcs/letsencrypt-nginx-proxy-companion [OK] - million12/nginx-php [OK] - webdevops/php-nginx [OK] + $ docker search --format "table {{.Name}}\t{{.IsOfficial}}" nginx + + NAME OFFICIAL + nginx [OK] + jwilder/nginx-proxy + richarvey/nginx-php-fpm + jrcs/letsencrypt-nginx-proxy-companion + million12/nginx-php + webdevops/php-nginx ``` deprecated: false experimental: false diff --git a/data/engine-cli/docker_secret_inspect.yaml b/data/engine-cli/docker_secret_inspect.yaml index ff5dca8f3..289009ff7 100644 --- a/data/engine-cli/docker_secret_inspect.yaml +++ b/data/engine-cli/docker_secret_inspect.yaml @@ -60,7 +60,7 @@ inherited_options: examples: |- ### Inspect a secret by name or ID - You can inspect a secret, either by its *name*, or *ID* + You can inspect a secret, either by its name or ID. For example, given the following secret: @@ -99,7 +99,7 @@ examples: |- ### Format the output (--format) {#format} - You can use the --format option to obtain specific information about a + You can use the `--format` option to obtain specific information about a secret. The following example command outputs the creation time of the secret. diff --git a/data/engine-cli/docker_secret_ls.yaml b/data/engine-cli/docker_secret_ls.yaml index 13b10e2bc..03efcd976 100644 --- a/data/engine-cli/docker_secret_ls.yaml +++ b/data/engine-cli/docker_secret_ls.yaml @@ -78,7 +78,7 @@ examples: |- ### Filtering (--filter) {#filter} The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there is more - than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`). The currently supported filters are: diff --git a/data/engine-cli/docker_secret_rm.yaml b/data/engine-cli/docker_secret_rm.yaml index 2dd46038c..2cbfe3c68 100644 --- a/data/engine-cli/docker_secret_rm.yaml +++ b/data/engine-cli/docker_secret_rm.yaml @@ -38,6 +38,7 @@ examples: |- > > Unlike `docker rm`, this command does not ask for confirmation before removing > a secret. + { .warning } deprecated: false min_api_version: "1.25" experimental: false diff --git a/data/engine-cli/docker_service_create.yaml b/data/engine-cli/docker_service_create.yaml index d2d923fca..504eb34e2 100644 --- a/data/engine-cli/docker_service_create.yaml +++ b/data/engine-cli/docker_service_create.yaml @@ -204,6 +204,16 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: health-start-interval + value_type: duration + description: Time between running the check during the start period (ms|s|m|h) + deprecated: false + hidden: false + min_api_version: "1.44" + experimental: false + experimentalcli: false + kubernetes: false + swarm: false - option: health-start-period value_type: duration description: | @@ -800,7 +810,7 @@ examples: |- This passes the login token from your local client to the swarm nodes where the service is deployed, using the encrypted WAL logs. With this information, the - nodes are able to log into the registry and pull the image. + nodes are able to log in to the registry and pull the image. ### Create a service with 5 replica tasks (--replicas) {#replicas} @@ -815,7 +825,7 @@ examples: |- The above command sets the *desired* number of tasks for the service. Even though the command returns immediately, actual scaling of the service may take - some time. The `REPLICAS` column shows both the *actual* and *desired* number + some time. The `REPLICAS` column shows both the actual and desired number of replica tasks for the service. In the following example the desired state is `5` replicas, but the current @@ -968,8 +978,8 @@ examples: |- Docker supports three different kinds of mounts, which allow containers to read from or write to files or directories, either on the host operating system, or - on memory filesystems. These types are _data volumes_ (often referred to simply - as volumes), _bind mounts_, _tmpfs_, and _named pipes_. + on memory filesystems. These types are data volumes (often referred to simply + as volumes), bind mounts, tmpfs, and named pipes. A **bind mount** makes a file or directory on the host available to the container it is mounted within. A bind mount may be either read-only or @@ -1062,7 +1072,7 @@ examples: |-

The Engine mounts binds and volumes read-write unless readonly option is given when mounting the bind or volume. Note that setting readonly for a - bind-mount does not make its submounts readonly on the current Linux implementation. See also bind-nonrecursive.

+ bind-mount may not make its submounts readonly depending on the kernel version. See also bind-recursive.

  • true or 1 or no value: Mounts the bind or volume read-only.
  • false or 0: Mounts the bind or volume read-write.
  • @@ -1071,7 +1081,7 @@ examples: |- - #### Options for Bind Mounts + #### Options for bind mounts The following options can only be used for bind mounts (`type=bind`): @@ -1100,17 +1110,40 @@ examples: |- - bind-nonrecursive + bind-recursive By default, submounts are recursively bind-mounted as well. However, this behavior can be confusing when a - bind mount is configured with readonly option, because submounts are not mounted as read-only. - Set bind-nonrecursive to disable recursive bind-mount.
    + bind mount is configured with readonly option, because submounts may not be mounted as read-only, + depending on the kernel version. + Set bind-recursive to control the behavior of the recursive bind-mount.
    +
    + A value is one of:
    +
    +
      +
    • <enabled: Enables recursive bind-mount. + Read-only mounts are made recursively read-only if kernel is v5.12 or later. + Otherwise they are not made recursively read-only.
    • +
    • <disabled: Disables recursive bind-mount.
    • +
    • <writable: Enables recursive bind-mount. + Read-only mounts are not made recursively read-only.
    • +
    • <readonly: Enables recursive bind-mount. + Read-only mounts are made recursively read-only if kernel is v5.12 or later. + Otherwise the Engine raises an error.
    • +
    + When the option is not specified, the default behavior correponds to setting enabled. + + + + bind-nonrecursive + + bind-nonrecursive is deprecated since Docker Engine v25.0. + Use bind-recursiveinstead.

    A value is optional:

      -
    • true or 1: Disables recursive bind-mount.
    • -
    • false or 0: Default if you do not provide a value. Enables recursive bind-mount.
    • +
    • true or 1: Equivalent to bind-recursive=disabled.
    • +
    • false or 0: Equivalent to bind-recursive=enabled.
    diff --git a/data/engine-cli/docker_service_ls.yaml b/data/engine-cli/docker_service_ls.yaml index 5e1ffdf82..150bfe1ca 100644 --- a/data/engine-cli/docker_service_ls.yaml +++ b/data/engine-cli/docker_service_ls.yaml @@ -2,7 +2,7 @@ command: docker service ls aliases: docker service ls, docker service list short: List services long: |- - This command lists services are running in the swarm. + This command lists services that are running in the swarm. > **Note** > @@ -76,7 +76,7 @@ examples: |- hh08h9uu8uwr job replicated-job 1/1 (3/5 completed) nginx:latest ``` - The `REPLICAS` column shows both the *actual* and *desired* number of tasks for + The `REPLICAS` column shows both the actual and desired number of tasks for the service. If the service is in `replicated-job` or `global-job`, it will additionally show the completion status of the job as completed tasks over total tasks the job will execute. @@ -84,7 +84,7 @@ examples: |- ### Filtering (--filter) {#filter} The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more - than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`). The currently supported filters are: @@ -95,7 +95,9 @@ examples: |- #### id - The `id` filter matches all or part of a service's id. + The `id` filter matches all or the prefix of a service's ID. + + The following filter matches services with an ID starting with `0bcjw`: ```console $ docker service ls -f "id=0bcjw" @@ -143,9 +145,9 @@ examples: |- #### name - The `name` filter matches on all or part of a service's name. + The `name` filter matches on all or the prefix of a service's name. - The following filter matches services with a name containing `redis`. + The following filter matches services with a name starting with `redis`. ```console $ docker service ls --filter name=redis diff --git a/data/engine-cli/docker_service_ps.yaml b/data/engine-cli/docker_service_ps.yaml index 161e7e69a..5cff02d0b 100644 --- a/data/engine-cli/docker_service_ps.yaml +++ b/data/engine-cli/docker_service_ps.yaml @@ -97,7 +97,7 @@ examples: |- 8eaxrb2fqpbn redis.10 redis:3.0.5 manager1 Running Running 8 seconds ``` - In addition to _running_ tasks, the output also shows the task history. For + In addition to running tasks, the output also shows the task history. For example, after updating the service to use the `redis:3.0.6` image, the output may look like this: @@ -121,10 +121,11 @@ examples: |- change the task history retention limit using the [`docker swarm update`](swarm_update.md) command. - When deploying a service, docker resolves the digest for the service's - image, and pins the service to that digest. The digest is not shown by - default, but is printed if `--no-trunc` is used. The `--no-trunc` option - also shows the non-truncated task ID, and error-messages, as can be seen below; + When deploying a service, docker resolves the digest for the service's image, + and pins the service to that digest. The digest is not shown by default, but is + printed if `--no-trunc` is used. The `--no-trunc` option also shows the + non-truncated task ID, and error messages, as can be seen in the following + example: ```console $ docker service ps --no-trunc redis diff --git a/data/engine-cli/docker_service_update.yaml b/data/engine-cli/docker_service_update.yaml index 629754138..c0d6b1b1e 100644 --- a/data/engine-cli/docker_service_update.yaml +++ b/data/engine-cli/docker_service_update.yaml @@ -305,6 +305,16 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: health-start-interval + value_type: duration + description: Time between running the check during the start period (ms|s|m|h) + deprecated: false + hidden: false + min_api_version: "1.44" + experimental: false + experimentalcli: false + kubernetes: false + swarm: false - option: health-start-period value_type: duration description: | @@ -1047,7 +1057,7 @@ examples: |- ``` - Roll back the `web` service... + The following example rolls back the `web` service: ```console $ docker service update --rollback web diff --git a/data/engine-cli/docker_stack_config.yaml b/data/engine-cli/docker_stack_config.yaml index fad8e0113..024f8040c 100644 --- a/data/engine-cli/docker_stack_config.yaml +++ b/data/engine-cli/docker_stack_config.yaml @@ -50,13 +50,13 @@ inherited_options: examples: |- The following command outputs the result of the merge and interpolation of two Compose files. - ```bash + ```console $ docker stack config --compose-file docker-compose.yml --compose-file docker-compose.prod.yml ``` The Compose file can also be provided as standard input with `--compose-file -`: - ```bash + ```console $ cat docker-compose.yml | docker stack config --compose-file - ``` @@ -67,7 +67,7 @@ examples: |- If you have a regex for a redirect route in an environment variable for your webserver you would use two `$` signs to prevent `stack deploy` from interpolating `${1}`. - ```bash + ```yaml service: webserver environment: REDIRECT_REGEX=http://host/redirect/$${1} @@ -78,7 +78,7 @@ examples: |- command it will be interpolated again and result in undefined behavior. That is why, when piping the output back to `stack deploy` one should always prefer the `--skip-interpolation` option. - ``` + ```console $ docker stack config --compose-file web.yml --compose-file web.prod.yml --skip-interpolation | docker stack deploy --compose-file - ``` deprecated: false diff --git a/data/engine-cli/docker_stack_deploy.yaml b/data/engine-cli/docker_stack_deploy.yaml index 03093288c..ff4a8dd89 100644 --- a/data/engine-cli/docker_stack_deploy.yaml +++ b/data/engine-cli/docker_stack_deploy.yaml @@ -83,7 +83,7 @@ inherited_options: examples: |- ### Compose file (--compose-file) {#compose-file} - The `deploy` command supports compose file version `3.0` and above. + The `deploy` command supports Compose file version `3.0` and above. ```console $ docker stack deploy --compose-file docker-compose.yml vossibility diff --git a/data/engine-cli/docker_stack_ps.yaml b/data/engine-cli/docker_stack_ps.yaml index f939f232b..f4e8c7e82 100644 --- a/data/engine-cli/docker_stack_ps.yaml +++ b/data/engine-cli/docker_stack_ps.yaml @@ -269,7 +269,7 @@ examples: |- ### Only display task IDs (-q, --quiet) {#quiet} The `-q ` or `--quiet` option only shows IDs of the tasks in the stack. - This example outputs all task IDs of the "voting" stack; + This example outputs all task IDs of the `voting` stack: ```console $ docker stack ps -q voting @@ -285,7 +285,7 @@ examples: |- This option can be used to perform batch operations. For example, you can use the task IDs as input for other commands, such as `docker inspect`. The - following example inspects all tasks of the "voting" stack; + following example inspects all tasks of the `voting` stack: ```console $ docker inspect $(docker stack ps -q voting) diff --git a/data/engine-cli/docker_start.yaml b/data/engine-cli/docker_start.yaml index 6c0af2e45..e89f3e666 100644 --- a/data/engine-cli/docker_start.yaml +++ b/data/engine-cli/docker_start.yaml @@ -68,10 +68,6 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false -examples: |- - ```console - $ docker start my_container - ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_stats.yaml b/data/engine-cli/docker_stats.yaml index b66957224..09c5689a4 100644 --- a/data/engine-cli/docker_stats.yaml +++ b/data/engine-cli/docker_stats.yaml @@ -1,34 +1,7 @@ command: docker stats aliases: docker container stats, docker stats short: Display a live stream of container(s) resource usage statistics -long: |- - The `docker stats` command returns a live data stream for running containers. To - limit data to one or more specific containers, specify a list of container names - or ids separated by a space. You can specify a stopped container but stopped - containers do not return any data. - - If you need more detailed information about a container's resource usage, use - the `/containers/(id)/stats` API endpoint. - - > **Note** - > - > On Linux, the Docker CLI reports memory usage by subtracting cache usage from - > the total memory usage. The API does not perform such a calculation but rather - > provides the total memory usage and the amount from the cache so that clients - > can use the data as needed. The cache usage is defined as the value of - > `total_inactive_file` field in the `memory.stat` file on cgroup v1 hosts. - > - > On Docker 19.03 and older, the cache usage was defined as the value of `cache` - > field. On cgroup v2 hosts, the cache usage is defined as the value of - > `inactive_file` field. - - > **Note** - > - > The `PIDS` column contains the number of processes and kernel threads created - > by that container. Threads is the term used by Linux kernel. Other equivalent - > terms are "lightweight process" or "kernel task", etc. A large number in the - > `PIDS` column combined with a small number of processes (as reported by `ps` - > or `top`) may indicate that something in the container is creating many threads. +long: Display a live stream of container(s) resource usage statistics usage: docker stats [OPTIONS] [CONTAINER...] pname: docker plink: docker.yaml @@ -53,7 +26,6 @@ options: 'json': Print in JSON format 'TEMPLATE': Print output using the given Go template. Refer to https://docs.docker.com/go/formatting/ for more information about formatting output with templates - details_url: '#format' deprecated: false hidden: false experimental: false @@ -91,142 +63,6 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false -examples: |- - Running `docker stats` on all running containers against a Linux daemon. - - ```console - $ docker stats - - CONTAINER ID NAME CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O PIDS - b95a83497c91 awesome_brattain 0.28% 5.629MiB / 1.952GiB 0.28% 916B / 0B 147kB / 0B 9 - 67b2525d8ad1 foobar 0.00% 1.727MiB / 1.952GiB 0.09% 2.48kB / 0B 4.11MB / 0B 2 - e5c383697914 test-1951.1.kay7x1lh1twk9c0oig50sd5tr 0.00% 196KiB / 1.952GiB 0.01% 71.2kB / 0B 770kB / 0B 1 - 4bda148efbc0 random.1.vnc8on831idyr42slu578u3cr 0.00% 1.672MiB / 1.952GiB 0.08% 110kB / 0B 578kB / 0B 2 - ``` - - If you don't [specify a format string using `--format`](#format), the - following columns are shown. - - | Column name | Description | - |---------------------------|-----------------------------------------------------------------------------------------------| - | `CONTAINER ID` and `Name` | the ID and name of the container | - | `CPU %` and `MEM %` | the percentage of the host's CPU and memory the container is using | - | `MEM USAGE / LIMIT` | the total memory the container is using, and the total amount of memory it is allowed to use | - | `NET I/O` | The amount of data the container has received and sent over its network interface | - | `BLOCK I/O` | The amount of data the container has written to and read from block devices on the host | - | `PIDs` | the number of processes or threads the container has created | - - Running `docker stats` on multiple containers by name and id against a Linux daemon. - - ```console - $ docker stats awesome_brattain 67b2525d8ad1 - - CONTAINER ID NAME CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O PIDS - b95a83497c91 awesome_brattain 0.28% 5.629MiB / 1.952GiB 0.28% 916B / 0B 147kB / 0B 9 - 67b2525d8ad1 foobar 0.00% 1.727MiB / 1.952GiB 0.09% 2.48kB / 0B 4.11MB / 0B 2 - ``` - - Running `docker stats` on container with name nginx and getting output in `json` format. - - ```console - $ docker stats nginx --no-stream --format "{{ json . }}" - {"BlockIO":"0B / 13.3kB","CPUPerc":"0.03%","Container":"nginx","ID":"ed37317fbf42","MemPerc":"0.24%","MemUsage":"2.352MiB / 982.5MiB","Name":"nginx","NetIO":"539kB / 606kB","PIDs":"2"} - ``` - - Running `docker stats` with customized format on all (Running and Stopped) containers. - - ```console - $ docker stats --all --format "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}" fervent_panini 5acfcb1b4fd1 humble_visvesvaraya big_heisenberg - - CONTAINER CPU % MEM USAGE / LIMIT - fervent_panini 0.00% 56KiB / 15.57GiB - 5acfcb1b4fd1 0.07% 32.86MiB / 15.57GiB - humble_visvesvaraya 0.00% 0B / 0B - big_heisenberg 0.00% 0B / 0B - ``` - - `humble_visvesvaraya` and `big_heisenberg` are stopped containers in the above example. - - Running `docker stats` on all running containers against a Windows daemon. - - ```powershell - PS E:\> docker stats - CONTAINER ID CPU % PRIV WORKING SET NET I/O BLOCK I/O - 09d3bb5b1604 6.61% 38.21 MiB 17.1 kB / 7.73 kB 10.7 MB / 3.57 MB - 9db7aa4d986d 9.19% 38.26 MiB 15.2 kB / 7.65 kB 10.6 MB / 3.3 MB - 3f214c61ad1d 0.00% 28.64 MiB 64 kB / 6.84 kB 4.42 MB / 6.93 MB - ``` - - Running `docker stats` on multiple containers by name and id against a Windows daemon. - - ```powershell - PS E:\> docker ps -a - CONTAINER ID NAME IMAGE COMMAND CREATED STATUS PORTS NAMES - 3f214c61ad1d awesome_brattain nanoserver "cmd" 2 minutes ago Up 2 minutes big_minsky - 9db7aa4d986d mad_wilson windowsservercore "cmd" 2 minutes ago Up 2 minutes mad_wilson - 09d3bb5b1604 fervent_panini windowsservercore "cmd" 2 minutes ago Up 2 minutes affectionate_easley - - PS E:\> docker stats 3f214c61ad1d mad_wilson - CONTAINER ID NAME CPU % PRIV WORKING SET NET I/O BLOCK I/O - 3f214c61ad1d awesome_brattain 0.00% 46.25 MiB 76.3 kB / 7.92 kB 10.3 MB / 14.7 MB - 9db7aa4d986d mad_wilson 9.59% 40.09 MiB 27.6 kB / 8.81 kB 17 MB / 20.1 MB - ``` - - ### Format the output (--format) {#format} - - The formatting option (`--format`) pretty prints container output - using a Go template. - - Valid placeholders for the Go template are listed below: - - | Placeholder | Description | - |--------------|----------------------------------------------| - | `.Container` | Container name or ID (user input) | - | `.Name` | Container name | - | `.ID` | Container ID | - | `.CPUPerc` | CPU percentage | - | `.MemUsage` | Memory usage | - | `.NetIO` | Network IO | - | `.BlockIO` | Block IO | - | `.MemPerc` | Memory percentage (Not available on Windows) | - | `.PIDs` | Number of PIDs (Not available on Windows) | - - When using the `--format` option, the `stats` command either - outputs the data exactly as the template declares or, when using the - `table` directive, includes column headers as well. - - The following example uses a template without headers and outputs the - `Container` and `CPUPerc` entries separated by a colon (`:`) for all images: - - ```console - $ docker stats --format "{{.Container}}: {{.CPUPerc}}" - - 09d3bb5b1604: 6.61% - 9db7aa4d986d: 9.19% - 3f214c61ad1d: 0.00% - ``` - - To list all containers statistics with their name, CPU percentage and memory - usage in a table format you can use: - - ```console - $ docker stats --format "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}" - - CONTAINER CPU % PRIV WORKING SET - 1285939c1fd3 0.07% 796 KiB / 64 MiB - 9c76f7834ae2 0.07% 2.746 MiB / 64 MiB - d1ea048f04e4 0.03% 4.583 MiB / 64 MiB - ``` - - The default format is as follows: - - On Linux: - - "table {{.ID}}\t{{.Name}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.MemPerc}}\t{{.NetIO}}\t{{.BlockIO}}\t{{.PIDs}}" - - On Windows: - - "table {{.ID}}\t{{.Name}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.NetIO}}\t{{.BlockIO}}" deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_stop.yaml b/data/engine-cli/docker_stop.yaml index cd02162a2..9511c87aa 100644 --- a/data/engine-cli/docker_stop.yaml +++ b/data/engine-cli/docker_stop.yaml @@ -1,11 +1,7 @@ command: docker stop aliases: docker container stop, docker stop short: Stop one or more running containers -long: |- - The main process inside the container will receive `SIGTERM`, and after a grace - period, `SIGKILL`. The first signal can be changed with the `STOPSIGNAL` - instruction in the container's Dockerfile, or the `--stop-signal` option to - `docker run`. +long: Stop one or more running containers usage: docker stop [OPTIONS] CONTAINER [CONTAINER...] pname: docker plink: docker.yaml @@ -42,10 +38,6 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false -examples: |- - ```console - $ docker stop my_container - ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_swarm_init.yaml b/data/engine-cli/docker_swarm_init.yaml index 7526fef26..7e895aa0c 100644 --- a/data/engine-cli/docker_swarm_init.yaml +++ b/data/engine-cli/docker_swarm_init.yaml @@ -1,7 +1,7 @@ command: docker swarm init short: Initialize a swarm long: |- - Initialize a swarm. The docker engine targeted by this command becomes a manager + Initialize a swarm. The Docker Engine targeted by this command becomes a manager in the newly created single-node swarm. usage: docker swarm init [OPTIONS] pname: docker swarm @@ -10,6 +10,7 @@ options: - option: advertise-addr value_type: string description: 'Advertised address (format: `[:port]`)' + details_url: '#advertise-addr' deprecated: false hidden: false experimental: false @@ -21,6 +22,7 @@ options: default_value: "false" description: | Enable manager autolocking (requiring an unlock key to start a stopped manager) + details_url: '#autolock' deprecated: false hidden: false experimental: false @@ -31,6 +33,7 @@ options: value_type: string default_value: active description: Availability of the node (`active`, `pause`, `drain`) + details_url: '#availability' deprecated: false hidden: false experimental: false @@ -51,6 +54,7 @@ options: value_type: string description: | Address or interface to use for data path traffic (format: ``) + details_url: '#data-path-addr' deprecated: false hidden: false min_api_version: "1.31" @@ -63,6 +67,7 @@ options: default_value: "0" description: | Port number to use for data path traffic (1024 - 49151). If no value is set or is set to 0, the default port (4789) is used. + details_url: '#data-path-port' deprecated: false hidden: false min_api_version: "1.40" @@ -74,6 +79,7 @@ options: value_type: ipNetSlice default_value: '[]' description: default address pool in CIDR format + details_url: '#default-addr-pool' deprecated: false hidden: false min_api_version: "1.39" @@ -105,6 +111,7 @@ options: - option: external-ca value_type: external-ca description: Specifications of one or more certificate signing endpoints + details_url: '#external-ca' deprecated: false hidden: false experimental: false @@ -115,6 +122,7 @@ options: value_type: bool default_value: "false" description: Force create a new cluster from current state + details_url: '#force-new-cluster' deprecated: false hidden: false experimental: false @@ -125,6 +133,7 @@ options: value_type: node-addr default_value: 0.0.0.0:2377 description: 'Listen address (format: `[:port]`)' + details_url: '#listen-addr' deprecated: false hidden: false experimental: false @@ -135,6 +144,7 @@ options: value_type: uint64 default_value: "0" description: Number of additional Raft snapshots to retain + details_url: '#max-snapshots' deprecated: false hidden: false min_api_version: "1.25" @@ -146,6 +156,7 @@ options: value_type: uint64 default_value: "10000" description: Number of log entries between Raft snapshots + details_url: '#snapshot-interval' deprecated: false hidden: false min_api_version: "1.25" @@ -182,94 +193,91 @@ examples: |- To add a worker to this swarm, run the following command: - docker swarm join \ - --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx \ - 172.17.0.2:2377 + docker swarm join --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx 172.17.0.2:2377 To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions. ``` - `docker swarm init` generates two random tokens, a worker token and a manager token. When you join - a new node to the swarm, the node joins as a worker or manager node based upon the token you pass - to [swarm join](swarm_join.md). + The `docker swarm init` command generates two random tokens: a worker token and + a manager token. When you join a new node to the swarm, the node joins as a + worker or manager node based upon the token you pass to [swarm + join](swarm_join.md). After you create the swarm, you can display or rotate the token using [swarm join-token](swarm_join-token.md). - ### `--autolock` + ### Protect manager keys and data (--autolock) {#autolock} - This flag enables automatic locking of managers with an encryption key. The - private keys and data stored by all managers will be protected by the - encryption key printed in the output, and will not be accessible without it. - Thus, it is very important to store this key in order to activate a manager - after it restarts. The key can be passed to `docker swarm unlock` to reactivate - the manager. Autolock can be disabled by running - `docker swarm update --autolock=false`. After disabling it, the encryption key - is no longer required to start the manager, and it will start up on its own - without user intervention. + The `--autolock` flag enables automatic locking of managers with an encryption + key. The private keys and data stored by all managers are protected by the + encryption key printed in the output, and is inaccessible without it. Make sure + to store this key securely, in order to reactivate a manager after it restarts. + Pass the key to the `docker swarm unlock` command to reactivate the manager. + You can disable autolock by running `docker swarm update --autolock=false`. + After disabling it, the encryption key is no longer required to start the + manager, and it will start up on its own without user intervention. - ### `--cert-expiry` + ### Configure node healthcheck frequency (--dispatcher-heartbeat) - This flag sets the validity period for node certificates. + The `--dispatcher-heartbeat` flag sets the frequency at which nodes are told to + report their health. - ### `--dispatcher-heartbeat` + ### Use an external certificate authority (--external-ca) {#external-ca} - This flag sets the frequency with which nodes are told to use as a - period to report their health. + This flag sets up the swarm to use an external CA to issue node certificates. + The value takes the form `protocol=X,url=Y`. The value for `protocol` specifies + what protocol should be used to send signing requests to the external CA. + Currently, the only supported value is `cfssl`. The URL specifies the endpoint + where signing requests should be submitted. - ### `--external-ca` + ### Force-restart node as a single-mode manager (--force-new-cluster) {#force-new-cluster} - This flag sets up the swarm to use an external CA to issue node certificates. The value takes - the form `protocol=X,url=Y`. The value for `protocol` specifies what protocol should be used - to send signing requests to the external CA. Currently, the only supported value is `cfssl`. - The URL specifies the endpoint where signing requests should be submitted. + This flag forces an existing node that was part of a quorum that was lost to + restart as a single-node Manager without losing its data. - ### `--force-new-cluster` + ### Specify interface for inbound control plane traffic (--listen-addr) {#listen-addr} - This flag forces an existing node that was part of a quorum that was lost to restart as a single node Manager without losing its data. - - ### `--listen-addr` - - The node listens for inbound swarm manager traffic on this address. The default is to listen on - 0.0.0.0:2377. It is also possible to specify a network interface to listen on that interface's - address; for example `--listen-addr eth0:2377`. + The node listens for inbound swarm manager traffic on this address. The default + is to listen on `0.0.0.0:2377`. It is also possible to specify a network + interface to listen on that interface's address; for example `--listen-addr + eth0:2377`. Specifying a port is optional. If the value is a bare IP address or interface - name, the default port 2377 will be used. + name, the default port 2377 is used. - ### `--advertise-addr` + ### Specify interface for outbound control plane traffic (--advertise-addr) {#advertise-addr} - This flag specifies the address that will be advertised to other members of the - swarm for API access and overlay networking. If unspecified, Docker will check - if the system has a single IP address, and use that IP address with the - listening port (see `--listen-addr`). If the system has multiple IP addresses, - `--advertise-addr` must be specified so that the correct address is chosen for - inter-manager communication and overlay networking. + The `--advertise-addr` flag specifies the address that will be advertised to + other members of the swarm for API access and overlay networking. If + unspecified, Docker will check if the system has a single IP address, and use + that IP address with the listening port (see `--listen-addr`). If the system + has multiple IP addresses, `--advertise-addr` must be specified so that the + correct address is chosen for inter-manager communication and overlay + networking. - It is also possible to specify a network interface to advertise that interface's address; - for example `--advertise-addr eth0:2377`. + It is also possible to specify a network interface to advertise that + interface's address; for example `--advertise-addr eth0:2377`. Specifying a port is optional. If the value is a bare IP address or interface - name, the default port 2377 will be used. + name, the default port 2377 is used. + + ### Specify interface for data traffic (--data-path-addr) {#data-path-addr} - ### `--data-path-addr` + The `--data-path-addr` flag specifies the address that global scope network + drivers will publish towards other nodes in order to reach the containers + running on this node. Using this parameter you can separate the container's + data traffic from the management traffic of the cluster. - This flag specifies the address that global scope network drivers will publish towards - other nodes in order to reach the containers running on this node. - Using this parameter it is then possible to separate the container's data traffic from the - management traffic of the cluster. - If unspecified, Docker will use the same IP address or interface that is used for the - advertise address. + If unspecified, the IP address or interface of the advertise address is used. - ### `--data-path-port` + ### Configure port number for data traffic (--data-path-port) {#data-path-port} - This flag allows you to configure the UDP port number to use for data path - traffic. The provided port number must be within the 1024 - 49151 range. If - this flag is not set or is set to 0, the default port number 4789 is used. - The data path port can only be configured when initializing the swarm, and - applies to all nodes that join the swarm. - The following example initializes a new Swarm, and configures the data path - port to UDP port 7777; + The `--data-path-port` flag allows you to configure the UDP port number to use + for data path traffic. The provided port number must be within the 1024 - 49151 + range. If this flag isn't set, or if it's set to 0, the default port number + 4789 is used. The data path port can only be configured when initializing the + swarm, and applies to all nodes that join the swarm. The following example + initializes a new Swarm, and configures the data path port to UDP port 7777; ```console $ docker swarm init --data-path-port=7777 @@ -288,40 +296,43 @@ examples: |- <...> ``` - ### `--default-addr-pool` - This flag specifies default subnet pools for global scope networks. - Format example is `--default-addr-pool 30.30.0.0/16 --default-addr-pool 40.40.0.0/16` + ### Specify default subnet pools (--default-addr-pool) {#default-addr-pool} - ### `--default-addr-pool-mask-length` - This flag specifies default subnet pools mask length for default-addr-pool. - Format example is `--default-addr-pool-mask-length 24` + The `--default-addr-pool` flag specifies default subnet pools for global scope + networks. For example, to specify two address pools: - ### `--task-history-limit` + ```console + $ docker swarm init \ + --default-addr-pool 30.30.0.0/16 \ + --default-addr-pool 40.40.0.0/16 + ``` - This flag sets up task history retention limit. + Use the `--default-addr-pool-mask-length` flag to specify the default subnet + pools mask length for the subnet pools. - ### `--max-snapshots` + ### Set limit for number of snapshots to keep (--max-snapshots) {#max-snapshots} This flag sets the number of old Raft snapshots to retain in addition to the current Raft snapshots. By default, no old snapshots are retained. This option may be used for debugging, or to store old snapshots of the swarm state for disaster recovery purposes. - ### `--snapshot-interval` + ### Configure Raft snapshot log interval (--snapshot-interval) {#snapshot-interval} - This flag specifies how many log entries to allow in between Raft snapshots. - Setting this to a higher number will trigger snapshots less frequently. - Snapshots compact the Raft log and allow for more efficient transfer of the - state to new managers. However, there is a performance cost to taking snapshots - frequently. + The `--snapshot-interval` flag specifies how many log entries to allow in + between Raft snapshots. Setting this to a high number will trigger snapshots + less frequently. Snapshots compact the Raft log and allow for more efficient + transfer of the state to new managers. However, there is a performance cost to + taking snapshots frequently. - ### `--availability` + ### Configure the availability of a manager (--availability) {#availability} - This flag specifies the availability of the node at the time the node joins a master. - Possible availability values are `active`, `pause`, or `drain`. + The `--availability` flag specifies the availability of the node at the time + the node joins a master. Possible availability values are `active`, `pause`, or + `drain`. - This flag is useful in certain situations. For example, a cluster may want to have - dedicated manager nodes that are not served as worker nodes. This could be achieved + This flag is useful in certain situations. For example, a cluster may want to + have dedicated manager nodes that don't serve as worker nodes. You can do this by passing `--availability=drain` to `docker swarm init`. deprecated: false min_api_version: "1.24" diff --git a/data/engine-cli/docker_system_df.yaml b/data/engine-cli/docker_system_df.yaml index bb3302c84..40e649f25 100644 --- a/data/engine-cli/docker_system_df.yaml +++ b/data/engine-cli/docker_system_df.yaml @@ -2,7 +2,7 @@ command: docker system df short: Show docker disk usage long: |- The `docker system df` command displays information regarding the - amount of disk space used by the docker daemon. + amount of disk space used by the Docker daemon. usage: docker system df [OPTIONS] pname: docker system plink: docker_system.yaml @@ -45,7 +45,7 @@ inherited_options: kubernetes: false swarm: false examples: |- - By default the command will just show a summary of the data used: + By default the command displays a summary of the data used: ```console $ docker system df @@ -56,7 +56,7 @@ examples: |- Local Volumes 2 1 36 B 0 B (0%) ``` - A more detailed view can be requested using the `-v, --verbose` flag: + Use the `-v, --verbose` flag to get more detailed information: ```console $ docker system df -v @@ -84,12 +84,12 @@ examples: |- ``` * `SHARED SIZE` is the amount of space that an image shares with another one (i.e. their common data) - * `UNIQUE SIZE` is the amount of space that is only used by a given image - * `SIZE` is the virtual size of the image, it is the sum of `SHARED SIZE` and `UNIQUE SIZE` + * `UNIQUE SIZE` is the amount of space that's only used by a given image + * `SIZE` is the virtual size of the image, it's the sum of `SHARED SIZE` and `UNIQUE SIZE` > **Note** > - > Network information is not shown because it does not consume disk space. + > Network information isn't shown, because it doesn't consume disk space. deprecated: false min_api_version: "1.25" experimental: false diff --git a/data/engine-cli/docker_system_events.yaml b/data/engine-cli/docker_system_events.yaml index aab5ab3e2..d6547b2d4 100644 --- a/data/engine-cli/docker_system_events.yaml +++ b/data/engine-cli/docker_system_events.yaml @@ -2,8 +2,13 @@ command: docker system events aliases: docker system events, docker events short: Get real time events from the server long: |- - Use `docker system events` to get real-time events from the server. These - events differ per Docker object type. + Use `docker events` to get real-time events from the server. These events differ + per Docker object type. Different event types have different scopes. Local + scoped events are only seen on the node they take place on, and Swarm scoped + events are seen on all managers. + + Only the last 1000 log events are returned. You can use filters to further limit + the number of events returned. ### Object types @@ -53,9 +58,9 @@ long: |- Docker plugins report the following events: - - `install` - `enable` - `disable` + - `install` - `remove` #### Volumes @@ -63,9 +68,9 @@ long: |- Docker volumes report the following events: - `create` + - `destroy` - `mount` - `unmount` - - `destroy` #### Networks @@ -73,8 +78,9 @@ long: |- - `create` - `connect` - - `disconnect` - `destroy` + - `disconnect` + - `remove` #### Daemons @@ -82,48 +88,97 @@ long: |- - `reload` + #### Services + + Docker services report the following events: + + - `create` + - `remove` + - `update` + + #### Nodes + + Docker nodes report the following events: + + - `create` + - `remove` + - `update` + + #### Secrets + + Docker secrets report the following events: + + - `create` + - `remove` + - `update` + + #### Configs + + Docker configs report the following events: + + - `create` + - `remove` + - `update` + ### Limiting, filtering, and formatting the output - #### Limit events by time + #### Limit events by time (--since, --until) {#since} The `--since` and `--until` parameters can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the client machine’s time. If you do not provide the `--since` option, - the command returns only new and/or live events. Supported formats for date + the command returns only new and/or live events. Supported formats for date formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, `2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local timezone on the client will be used if you do not provide either a `Z` or a - `+-00:00` timezone offset at the end of the timestamp. When providing Unix + `+-00:00` timezone offset at the end of the timestamp. When providing Unix timestamps enter seconds[.nanoseconds], where seconds is the number of seconds that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a fraction of a second no more than nine digits long. + Only the last 1000 log events are returned. You can use filters to further limit + the number of events returned. + #### Filtering (--filter) {#filter} The filtering flag (`-f` or `--filter`) format is of "key=value". If you would like to use multiple filters, pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) - Using the same filter multiple times will be handled as a *OR*; for example - `--filter container=588a23dac085 --filter container=a8f7720b8c22` will display - events for container 588a23dac085 *OR* container a8f7720b8c22 + Using the same filter multiple times is interpreted as a logical `OR`; for example, + `--filter container=588a23dac085 --filter container=a8f7720b8c22` displays + events for container `588a23dac085` or container `a8f7720b8c22`. - Using multiple filters will be handled as a *AND*; for example - `--filter container=588a23dac085 --filter event=start` will display events for - container container 588a23dac085 *AND* the event type is *start* + Using multiple filters is interpreted as a logical `AND`; for example, + `--filter container=588a23dac085 --filter event=start` displays events for + container `588a23dac085` and where the event type is `start`. The currently supported filters are: - * container (`container=`) - * daemon (`daemon=`) - * event (`event=`) - * image (`image=`) - * label (`label=` or `label==`) - * network (`network=`) - * plugin (`plugin=`) - * type (`type=`) - * volume (`volume=`) + - config (`config=`) + - container (`container=`) + - daemon (`daemon=`) + - event (`event=`) + - image (`image=`) + - label (`label=` or `label==`) + - network (`network=`) + - node (`node=`) + - plugin (`plugin=`) + - scope (`scope=`) + - secret (`secret=`) + - service (`service=`) + - type (`type=`) + - volume (`volume=`) + + #### Format the output (--format) {#format} + + If you specify a format (`--format`), the given template is executed + instead of the default format. Go's [text/template](https://pkg.go.dev/text/template) + package describes all the details of the format. + + If a format is set to `{{json .}}`, events are streamed in the JSON Lines format. + For information about JSON Lines, see . usage: docker system events [OPTIONS] pname: docker system plink: docker_system.yaml @@ -145,7 +200,6 @@ options: 'json': Print in JSON format 'TEMPLATE': Print output using the given Go template. Refer to https://docs.docker.com/go/formatting/ for more information about formatting output with templates - details_url: '#format' deprecated: false hidden: false experimental: false @@ -189,7 +243,7 @@ examples: |- **Shell 1: Listening for events:** ```console - $ docker system events + $ docker events ``` **Shell 2: Start and Stop containers:** @@ -212,16 +266,15 @@ examples: |- 2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) ``` - To exit the `docker system events` command, use `CTRL+C`. + To exit the `docker events` command, use `CTRL+C`. ### Filter events by time You can filter the output by an absolute timestamp or relative time on the host - machine, using the following different time syntaxes: + machine, using the following different time formats: ```console - $ docker system events --since 1483283804 - + $ docker events --since 1483283804 2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) 2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) 2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) @@ -231,8 +284,7 @@ examples: |- 2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) 2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) - $ docker system events --since '2017-01-05' - + $ docker events --since '2017-01-05' 2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) 2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) 2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) @@ -242,8 +294,7 @@ examples: |- 2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) 2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) - $ docker system events --since '2013-09-03T15:49:29' - + $ docker events --since '2013-09-03T15:49:29' 2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) 2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) 2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) @@ -253,8 +304,7 @@ examples: |- 2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) 2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) - $ docker system events --since '10m' - + $ docker events --since '10m' 2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) 2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) 2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) @@ -263,6 +313,12 @@ examples: |- 2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) 2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) 2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) + + $ docker events --since '2017-01-05T00:35:30' --until '2017-01-05T00:36:05' + 2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) + 2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) + 2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) + 2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) ``` ### Filter events by criteria @@ -271,12 +327,12 @@ examples: |- output. ```console - $ docker system events --filter 'event=stop' + $ docker events --filter 'event=stop' 2017-01-05T00:40:22.880175420+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) 2017-01-05T00:41:17.888104182+08:00 container stop 2a8f...4e78 (image=alpine, name=kickass_brattain) - $ docker system events --filter 'image=alpine' + $ docker events --filter 'image=alpine' 2017-01-05T00:41:55.784240236+08:00 container create d9cd...4d70 (image=alpine, name=happy_meitner) 2017-01-05T00:41:55.913156783+08:00 container start d9cd...4d70 (image=alpine, name=happy_meitner) @@ -285,14 +341,14 @@ examples: |- 2017-01-05T00:42:11.119578204+08:00 container die d9cd...4d70 (exitCode=137, image=alpine, name=happy_meitner) 2017-01-05T00:42:11.173276611+08:00 container stop d9cd...4d70 (image=alpine, name=happy_meitner) - $ docker system events --filter 'container=test' + $ docker events --filter 'container=test' 2017-01-05T00:43:00.139719934+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) 2017-01-05T00:43:09.259951086+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) 2017-01-05T00:43:09.270102715+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) 2017-01-05T00:43:09.312556440+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) - $ docker system events --filter 'container=test' --filter 'container=d9cdb1525ea8' + $ docker events --filter 'container=test' --filter 'container=d9cdb1525ea8' 2017-01-05T00:44:11.517071981+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) 2017-01-05T00:44:17.685870901+08:00 container start d9cd...4d70 (image=alpine, name=happy_meitner) @@ -300,55 +356,74 @@ examples: |- 2017-01-05T00:44:29.767718510+08:00 container die 0fdb...ff37 (exitCode=137, image=alpine:latest, name=test) 2017-01-05T00:44:29.815798344+08:00 container destroy 0fdb...ff37 (image=alpine:latest, name=test) - $ docker system events --filter 'container=test' --filter 'event=stop' + $ docker events --filter 'container=test' --filter 'event=stop' 2017-01-05T00:46:13.664099505+08:00 container stop a9d1...e130 (image=alpine, name=test) - $ docker system events --filter 'type=volume' + $ docker events --filter 'type=volume' 2015-12-23T21:05:28.136212689Z volume create test-event-volume-local (driver=local) 2015-12-23T21:05:28.383462717Z volume mount test-event-volume-local (read/write=true, container=562f...5025, destination=/foo, driver=local, propagation=rprivate) 2015-12-23T21:05:28.650314265Z volume unmount test-event-volume-local (container=562f...5025, driver=local) 2015-12-23T21:05:28.716218405Z volume destroy test-event-volume-local (driver=local) - $ docker system events --filter 'type=network' + $ docker events --filter 'type=network' 2015-12-23T21:38:24.705709133Z network create 8b11...2c5b (name=test-event-network-local, type=bridge) 2015-12-23T21:38:25.119625123Z network connect 8b11...2c5b (name=test-event-network-local, container=b4be...c54e, type=bridge) - $ docker system events --filter 'container=container_1' --filter 'container=container_2' + $ docker events --filter 'container=container_1' --filter 'container=container_2' - 2014-09-03T15:49:29.999999999Z07:00 container die 4386fb97867d (image=ubuntu:22.04 ) - 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu:22.04 ) + 2014-09-03T15:49:29.999999999Z07:00 container die 4386fb97867d (image=ubuntu:22.04) + 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu:22.04) 2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (imager=redis:2.8) 2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) - $ docker system events --filter 'type=volume' + $ docker events --filter 'type=volume' 2015-12-23T21:05:28.136212689Z volume create test-event-volume-local (driver=local) 2015-12-23T21:05:28.383462717Z volume mount test-event-volume-local (read/write=true, container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, destination=/foo, driver=local, propagation=rprivate) 2015-12-23T21:05:28.650314265Z volume unmount test-event-volume-local (container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, driver=local) 2015-12-23T21:05:28.716218405Z volume destroy test-event-volume-local (driver=local) - $ docker system events --filter 'type=network' + $ docker events --filter 'type=network' 2015-12-23T21:38:24.705709133Z network create 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, type=bridge) 2015-12-23T21:38:25.119625123Z network connect 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, container=b4be644031a3d90b400f88ab3d4bdf4dc23adb250e696b6328b85441abe2c54e, type=bridge) - $ docker system events --filter 'type=plugin' + $ docker events --filter 'type=plugin' 2016-07-25T17:30:14.825557616Z plugin pull ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest) 2016-07-25T17:30:14.888127370Z plugin enable ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest) - ``` - ### Format the output (--format) {#format} + $ docker events -f type=service - If a format (`--format`) is specified, the given template will be executed - instead of the default format. Go's [text/template](https://pkg.go.dev/text/template) - package describes all the details of the format. + 2017-07-12T06:34:07.999446625Z service create wj64st89fzgchxnhiqpn8p4oj (name=reverent_albattani) + 2017-07-12T06:34:21.405496207Z service remove wj64st89fzgchxnhiqpn8p4oj (name=reverent_albattani) + + $ docker events -f type=node + + 2017-07-12T06:21:51.951586759Z node update 3xyz5ttp1a253q74z1thwywk9 (name=ip-172-31-23-42, state.new=ready, state.old=unknown) + + $ docker events -f type=secret + + 2017-07-12T06:32:13.915704367Z secret create s8o6tmlnndrgzbmdilyy5ymju (name=new_secret) + 2017-07-12T06:32:37.052647783Z secret remove s8o6tmlnndrgzbmdilyy5ymju (name=new_secret) + + $ docker events -f type=config + 2017-07-12T06:44:13.349037127Z config create u96zlvzdfsyb9sg4mhyxfh3rl (name=abc) + 2017-07-12T06:44:36.327694184Z config remove u96zlvzdfsyb9sg4mhyxfh3rl (name=abc) + + $ docker events --filter 'scope=swarm' + + 2017-07-10T07:46:50.250024503Z service create m8qcxu8081woyof7w3jaax6gk (name=affectionate_wilson) + 2017-07-10T07:47:31.093797134Z secret create 6g5pufzsv438p9tbvl9j94od4 (name=new_secret) + ``` + + ### Format the output ```console - $ docker system events --filter 'type=container' --format 'Type={{.Type}} Status={{.Status}} ID={{.ID}}' + $ docker events --filter 'type=container' --format 'Type={{.Type}} Status={{.Status}} ID={{.ID}}' Type=container Status=create ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 Type=container Status=attach ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 @@ -360,11 +435,11 @@ examples: |- #### Format as JSON - If a format is set to `{{json .}}`, the events are streamed as valid JSON - Lines. For information about JSON Lines, please refer to https://jsonlines.org/ . + To list events in JSON format, use the `json` directive, which is the same + `--format '{{ json . }}`. ```console - $ docker system events --format '{{json .}}' + $ docker events --format json {"status":"create","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. {"status":"attach","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. diff --git a/data/engine-cli/docker_system_info.yaml b/data/engine-cli/docker_system_info.yaml index ed451b27c..8a71ec72a 100644 --- a/data/engine-cli/docker_system_info.yaml +++ b/data/engine-cli/docker_system_info.yaml @@ -1,7 +1,24 @@ command: docker system info aliases: docker system info, docker info short: Display system-wide information -long: Display system-wide information +long: |- + This command displays system wide information regarding the Docker installation. + Information displayed includes the kernel version, number of containers and images. + The number of images shown is the number of unique images. The same image tagged + under different names is counted only once. + + If a format is specified, the given template will be executed instead of the + default format. Go's [text/template](https://pkg.go.dev/text/template) package + describes all the details of the format. + + Depending on the storage driver in use, additional information can be shown, such + as pool name, data file, metadata file, data space used, total data space, metadata + space used, and total metadata space. + + The data file is where the images are stored and the metadata file is where the + meta data regarding those images are stored. When run for the first time Docker + allocates a certain amount of data space and meta data space from the space + available on the volume where `/var/lib/docker` is mounted. usage: docker system info [OPTIONS] pname: docker system plink: docker_system.yaml @@ -14,6 +31,7 @@ options: 'json': Print in JSON format 'TEMPLATE': Print output using the given Go template. Refer to https://docs.docker.com/go/formatting/ for more information about formatting output with templates + details_url: '#format' deprecated: false hidden: false experimental: false @@ -31,6 +49,138 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + ### Show output + + The example below shows the output for a daemon running on Ubuntu Linux, + using the `overlay2` storage driver. As can be seen in the output, additional + information about the `overlay2` storage driver is shown: + + ```console + $ docker info + + Client: + Version: 25.0.0 + Context: default + Debug Mode: false + Plugins: + buildx: Docker Buildx (Docker Inc.) + Version: v0.12.1 + Path: /usr/local/libexec/docker/cli-plugins/docker-buildx + compose: Docker Compose (Docker Inc.) + Version: v2.24.1 + Path: /usr/local/libexec/docker/cli-plugins/docker-compose + + Server: + Containers: 14 + Running: 3 + Paused: 1 + Stopped: 10 + Images: 52 + Server Version: 25.0.0 + Storage Driver: overlayfs + driver-type: io.containerd.snapshotter.v1 + Logging Driver: json-file + Cgroup Driver: cgroupfs + Cgroup Version: 2 + Plugins: + Volume: local + Network: bridge host ipvlan macvlan null overlay + Log: awslogs fluentd gcplogs gelf journald json-file local splunk syslog + CDI spec directories: + /etc/cdi + /var/run/cdi + Swarm: inactive + Runtimes: runc io.containerd.runc.v2 + Default Runtime: runc + Init Binary: docker-init + containerd version: 71909c1814c544ac47ab91d2e8b84718e517bb99 + runc version: v1.1.11-0-g4bccb38 + init version: de40ad0 + Security Options: + seccomp + Profile: builtin + cgroupns + Kernel Version: 6.5.11-linuxkit + Operating System: Alpine Linux v3.19 + OSType: linux + Architecture: aarch64 + CPUs: 10 + Total Memory: 7.663GiB + Name: 4a7ed206a70d + ID: c20f7230-59a2-4824-a2f4-fda71c982ee6 + Docker Root Dir: /var/lib/docker + Debug Mode: false + Experimental: false + Insecure Registries: + 127.0.0.0/8 + Live Restore Enabled: false + Product License: Community Engine + ``` + + ### Format the output (--format) {#format} + + You can also specify the output format: + + ```console + $ docker info --format '{{json .}}' + + {"ID":"4cee4408-10d2-4e17-891c-a41736ac4536","Containers":14, ...} + ``` + + ### Run `docker info` on Windows + + Here is a sample output for a daemon running on Windows Server: + + ```console + C:\> docker info + + Client: Docker Engine - Community + Version: 24.0.0 + Context: default + Debug Mode: false + Plugins: + buildx: Docker Buildx (Docker Inc.) + Version: v0.10.4 + Path: C:\Program Files\Docker\cli-plugins\docker-buildx.exe + compose: Docker Compose (Docker Inc.) + Version: v2.17.2 + Path: C:\Program Files\Docker\cli-plugins\docker-compose.exe + + Server: + Containers: 1 + Running: 0 + Paused: 0 + Stopped: 1 + Images: 17 + Server Version: 23.0.3 + Storage Driver: windowsfilter + Logging Driver: json-file + Plugins: + Volume: local + Network: ics internal l2bridge l2tunnel nat null overlay private transparent + Log: awslogs etwlogs fluentd gcplogs gelf json-file local splunk syslog + Swarm: inactive + Default Isolation: process + Kernel Version: 10.0 20348 (20348.1.amd64fre.fe_release.210507-1500) + Operating System: Microsoft Windows Server Version 21H2 (OS Build 20348.707) + OSType: windows + Architecture: x86_64 + CPUs: 8 + Total Memory: 3.999 GiB + Name: WIN-V0V70C0LU5P + ID: 2880d38d-464e-4d01-91bd-c76f33ba3981 + Docker Root Dir: C:\ProgramData\docker + Debug Mode: false + Experimental: true + Insecure Registries: + myregistry:5000 + 127.0.0.0/8 + Registry Mirrors: + http://192.168.1.2/ + http://registry-mirror.example.com:5000/ + Live Restore Enabled: false + ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_system_prune.yaml b/data/engine-cli/docker_system_prune.yaml index 709bb97ff..f25bef859 100644 --- a/data/engine-cli/docker_system_prune.yaml +++ b/data/engine-cli/docker_system_prune.yaml @@ -1,7 +1,7 @@ command: docker system prune short: Remove unused data long: |- - Remove all unused containers, networks, images (both dangling and unreferenced), + Remove all unused containers, networks, images (both dangling and unused), and optionally, volumes. usage: docker system prune [OPTIONS] pname: docker system @@ -69,7 +69,7 @@ examples: |- - all stopped containers - all networks not used by at least one container - all dangling images - - all build cache + - unused build cache Are you sure you want to continue? [y/N] y Deleted Containers: @@ -88,7 +88,7 @@ examples: |- Total reclaimed space: 1.84kB ``` - By default, volumes are not removed to prevent important data from being + By default, volumes aren't removed to prevent important data from being deleted if there is currently no container using the volume. Use the `--volumes` flag when running the command to prune anonymous volumes as well: @@ -149,7 +149,7 @@ examples: |- formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, `2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local timezone on the daemon will be used if you do not provide either a `Z` or a - `+-00:00` timezone offset at the end of the timestamp. When providing Unix + `+-00:00` timezone offset at the end of the timestamp. When providing Unix timestamps enter seconds[.nanoseconds], where seconds is the number of seconds that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a diff --git a/data/engine-cli/docker_tag.yaml b/data/engine-cli/docker_tag.yaml index dce541008..0c4d8a8b6 100644 --- a/data/engine-cli/docker_tag.yaml +++ b/data/engine-cli/docker_tag.yaml @@ -1,40 +1,7 @@ command: docker tag aliases: docker image tag, docker tag short: Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE -long: |- - A full image name has the following format and components: - - `[HOST[:PORT_NUMBER]/]PATH` - - - `HOST`: The optional registry hostname specifies where the image is located. - The hostname must comply with standard DNS rules, but may not contain - underscores. If the hostname is not specified, the command uses Docker's public - registry at `registry-1.docker.io` by default. Note that `docker.io` is the - canonical reference for Docker's public registry. - - `PORT_NUMBER`: If a hostname is present, it may optionally be followed by a - registry port number in the format `:8080`. - - `PATH`: The path consists of slash-separated components. Each - component may contain lowercase letters, digits and separators. A separator is - defined as a period, one or two underscores, or one or more hyphens. A component - may not start or end with a separator. While the - [OCI Distribution Specification](https://github.com/opencontainers/distribution-spec) - supports more than two slash-separated components, most registries only support - two slash-separated components. For Docker's public registry, the path format is - as follows: - - `[NAMESPACE/]REPOSITORY`: The first, optional component is typically a - user's or an organization's namespace. The second, mandatory component is the - repository name. When the namespace is not present, Docker uses `library` - as the default namespace. - - After the image name, the optional `TAG` is a custom, human-readable manifest - identifier that is typically a specific version or variant of an image. The tag - must be valid ASCII and can contain lowercase and uppercase letters, digits, - underscores, periods, and hyphens. It cannot start with a period or hyphen and - must be no longer than 128 characters. If the tag is not specified, the command uses `latest` by default. - - You can group your images together using names and tags, and then - [push](/engine/reference/commandline/push) them to a - registry. +long: Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE usage: docker tag SOURCE_IMAGE[:TAG] TARGET_IMAGE[:TAG] pname: docker plink: docker.yaml @@ -49,44 +16,6 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false -examples: |- - ### Tag an image referenced by ID - - To tag a local image with ID "0e5574283393" as "fedora/httpd" with the tag - "version1.0": - - ```console - $ docker tag 0e5574283393 fedora/httpd:version1.0 - ``` - - ### Tag an image referenced by Name - - To tag a local image "httpd" as "fedora/httpd" with the tag "version1.0": - - ```console - $ docker tag httpd fedora/httpd:version1.0 - ``` - - Note that since the tag name is not specified, the alias is created for an - existing local version `httpd:latest`. - - ### Tag an image referenced by Name and Tag - - To tag a local image with the name "httpd" and the tag "test" as "fedora/httpd" - with the tag "version1.0.test": - - ```console - $ docker tag httpd:test fedora/httpd:version1.0.test - ``` - - ### Tag an image for a private registry - - To push an image to a private registry and not the public Docker registry you - must include the registry hostname and port (if needed). - - ```console - $ docker tag 0e5574283393 myregistryhost:5000/fedora/httpd:version1.0 - ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_trust_key_generate.yaml b/data/engine-cli/docker_trust_key_generate.yaml index 9cae107f7..f1bc1e414 100644 --- a/data/engine-cli/docker_trust_key_generate.yaml +++ b/data/engine-cli/docker_trust_key_generate.yaml @@ -2,7 +2,7 @@ command: docker trust key generate short: Generate and load a signing key-pair long: |- `docker trust key generate` generates a key-pair to be used with signing, - and loads the private key into the local docker trust keystore. + and loads the private key into the local Docker trust keystore. usage: docker trust key generate NAME pname: docker trust key plink: docker_trust_key.yaml @@ -41,7 +41,7 @@ examples: |- alice.pub ``` - The private signing key is encrypted by the passphrase and loaded into the docker trust keystore. + The private signing key is encrypted by the passphrase and loaded into the Docker trust keystore. All passphrase requests to sign with the key will be referred to by the provided `NAME`. The public key component `alice.pub` will be available in the current working directory, and can diff --git a/data/engine-cli/docker_trust_key_load.yaml b/data/engine-cli/docker_trust_key_load.yaml index 84d97dc13..a7ff63912 100644 --- a/data/engine-cli/docker_trust_key_load.yaml +++ b/data/engine-cli/docker_trust_key_load.yaml @@ -1,7 +1,7 @@ command: docker trust key load short: Load a private key file for signing long: |- - `docker trust key load` adds private keys to the local docker trust keystore. + `docker trust key load` adds private keys to the local Docker trust keystore. To add a signer to a repository use `docker trust signer add`. usage: docker trust key load [OPTIONS] KEYFILE diff --git a/data/engine-cli/docker_trust_revoke.yaml b/data/engine-cli/docker_trust_revoke.yaml index 75aa41bd7..ae010f560 100644 --- a/data/engine-cli/docker_trust_revoke.yaml +++ b/data/engine-cli/docker_trust_revoke.yaml @@ -30,7 +30,7 @@ inherited_options: examples: |- ### Revoke signatures from a signed tag - Here's an example of a repo with two signed tags: + Here's an example of a repository with two signed tags: ```console diff --git a/data/engine-cli/docker_trust_sign.yaml b/data/engine-cli/docker_trust_sign.yaml index 797e35cec..957d9fe63 100644 --- a/data/engine-cli/docker_trust_sign.yaml +++ b/data/engine-cli/docker_trust_sign.yaml @@ -27,7 +27,7 @@ inherited_options: kubernetes: false swarm: false examples: |- - ### Sign a tag as a repo admin + ### Sign a tag as a repository admin Given an image: diff --git a/data/engine-cli/docker_trust_signer_add.yaml b/data/engine-cli/docker_trust_signer_add.yaml index a39fdde0a..0dd9e7322 100644 --- a/data/engine-cli/docker_trust_signer_add.yaml +++ b/data/engine-cli/docker_trust_signer_add.yaml @@ -26,7 +26,7 @@ inherited_options: kubernetes: false swarm: false examples: |- - ### Add a signer to a repo + ### Add a signer to a repository To add a new signer, `alice`, to this repository: diff --git a/data/engine-cli/docker_trust_signer_remove.yaml b/data/engine-cli/docker_trust_signer_remove.yaml index e9082d456..dad88734d 100644 --- a/data/engine-cli/docker_trust_signer_remove.yaml +++ b/data/engine-cli/docker_trust_signer_remove.yaml @@ -29,7 +29,7 @@ inherited_options: kubernetes: false swarm: false examples: |- - ### Remove a signer from a repo + ### Remove a signer from a repository To remove an existing signer, `alice`, from this repository: @@ -60,7 +60,7 @@ examples: |- Successfully removed alice from example/trust-demo ``` - `docker trust inspect --pretty` now does not list `alice` as a valid signer: + `docker trust inspect --pretty` now doesn't list `alice` as a valid signer: ```console $ docker trust inspect --pretty example/trust-demo @@ -78,7 +78,7 @@ examples: |- Root Key: 3cb2228f6561e58f46dbc4cda4fcaff9d5ef22e865a94636f82450d1d2234949 ``` - ### Remove a signer from multiple repos + ### Remove a signer from multiple repositories To remove an existing signer, `alice`, from multiple repositories: @@ -165,9 +165,8 @@ examples: |- Root Key: 3cb2228f6561e58f46dbc4cda4fcaff9d5ef22e865a94636f82450d1d2234949 ``` - `docker trust signer remove` removes signers to repositories on a best effort - basis, so it will continue to remove the signer from subsequent repositories if - one attempt fails: + `docker trust signer remove` removes signers to repositories on a best effort basis. + It continues to remove the signer from subsequent repositories if one attempt fails: ```console $ docker trust signer remove alice example/unauthorized example/authorized diff --git a/data/engine-cli/docker_unpause.yaml b/data/engine-cli/docker_unpause.yaml index 97e157a89..cc6572607 100644 --- a/data/engine-cli/docker_unpause.yaml +++ b/data/engine-cli/docker_unpause.yaml @@ -1,13 +1,7 @@ command: docker unpause aliases: docker container unpause, docker unpause short: Unpause all processes within one or more containers -long: |- - The `docker unpause` command un-suspends all processes in the specified containers. - On Linux, it does this using the freezer cgroup. - - See the - [freezer cgroup documentation](https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt) - for further details. +long: Unpause all processes within one or more containers usage: docker unpause CONTAINER [CONTAINER...] pname: docker plink: docker.yaml @@ -22,11 +16,6 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false -examples: |- - ```console - $ docker unpause my_container - my_container - ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_update.yaml b/data/engine-cli/docker_update.yaml index f05b3bbe8..a4b21f1f8 100644 --- a/data/engine-cli/docker_update.yaml +++ b/data/engine-cli/docker_update.yaml @@ -1,23 +1,7 @@ command: docker update aliases: docker container update, docker update short: Update configuration of one or more containers -long: |- - The `docker update` command dynamically updates container configuration. - You can use this command to prevent containers from consuming too many - resources from their Docker host. With a single command, you can place - limits on a single container or on many. To specify more than one container, - provide space-separated list of container names or IDs. - - With the exception of the `--kernel-memory` option, you can specify these - options on a running or a stopped container. On kernel version older than - 4.6, you can only update `--kernel-memory` on a stopped container or on - a running container with kernel memory initialized. - - > **Warning** - > - > The `docker update` and `docker container update` commands are not supported - > for Windows containers. - {: .warning } +long: Update configuration of one or more containers usage: docker update [OPTIONS] CONTAINER [CONTAINER...] pname: docker plink: docker.yaml @@ -80,7 +64,6 @@ options: value_type: int64 default_value: "0" description: CPU shares (relative weight) - details_url: '#cpu-shares' deprecated: false hidden: false experimental: false @@ -119,7 +102,6 @@ options: value_type: bytes default_value: "0" description: Kernel memory limit (deprecated) - details_url: '#kernel-memory' deprecated: true hidden: true experimental: false @@ -131,7 +113,6 @@ options: value_type: bytes default_value: "0" description: Memory limit - details_url: '#memory' deprecated: false hidden: false experimental: false @@ -172,7 +153,6 @@ options: - option: restart value_type: string description: Restart policy to apply when a container exits - details_url: '#restart' deprecated: false hidden: false experimental: false @@ -190,79 +170,6 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false -examples: |- - The following sections illustrate ways to use this command. - - ### Update a container's cpu-shares (--cpu-shares) {#cpu-shares} - - To limit a container's cpu-shares to 512, first identify the container - name or ID. You can use `docker ps` to find these values. You can also - use the ID returned from the `docker run` command. Then, do the following: - - ```console - $ docker update --cpu-shares 512 abebf7571666 - ``` - - ### Update a container with cpu-shares and memory (-m, --memory) {#memory} - - To update multiple resource configurations for multiple containers: - - ```console - $ docker update --cpu-shares 512 -m 300M abebf7571666 hopeful_morse - ``` - - ### Update a container's kernel memory constraints (--kernel-memory) {#kernel-memory} - - You can update a container's kernel memory limit using the `--kernel-memory` - option. On kernel version older than 4.6, this option can be updated on a - running container only if the container was started with `--kernel-memory`. - If the container was started *without* `--kernel-memory` you need to stop - the container before updating kernel memory. - - > **Note** - > - > The `--kernel-memory` option has been deprecated since Docker 20.10. - - For example, if you started a container with this command: - - ```console - $ docker run -dit --name test --kernel-memory 50M ubuntu bash - ``` - - You can update kernel memory while the container is running: - - ```console - $ docker update --kernel-memory 80M test - ``` - - If you started a container *without* kernel memory initialized: - - ```console - $ docker run -dit --name test2 --memory 300M ubuntu bash - ``` - - Update kernel memory of running container `test2` will fail. You need to stop - the container before updating the `--kernel-memory` setting. The next time you - start it, the container uses the new value. - - Kernel version newer than (include) 4.6 does not have this limitation, you - can use `--kernel-memory` the same way as other options. - - ### Update a container's restart policy (--restart) {#restart} - - You can change a container's restart policy on a running container. The new - restart policy takes effect instantly after you run `docker update` on a - container. - - To update restart policy for one or more containers: - - ```console - $ docker update --restart=on-failure:3 abebf7571666 hopeful_morse - ``` - - Note that if the container is started with "--rm" flag, you cannot update the restart - policy for it. The `AutoRemove` and `RestartPolicy` are mutually exclusive for the - container. deprecated: false experimental: false experimentalcli: false diff --git a/data/engine-cli/docker_version.yaml b/data/engine-cli/docker_version.yaml index 30b3b908c..02aa24a0a 100644 --- a/data/engine-cli/docker_version.yaml +++ b/data/engine-cli/docker_version.yaml @@ -12,9 +12,9 @@ long: |- ### Default output The default output renders all version information divided into two sections; - the "Client" section contains information about the Docker CLI and client - components, and the "Server" section contains information about the Docker - Engine and components used by the Engine, such as the "Containerd" and "Runc" + the `Client` section contains information about the Docker CLI and client + components, and the `Server` section contains information about the Docker + Engine and components used by the Docker Engine, such as the containerd and runc OCI Runtimes. The information shown may differ depending on how you installed Docker and @@ -57,10 +57,10 @@ long: |- Docker uses a client/server architecture, which allows you to use the Docker CLI on your local machine to control a Docker Engine running on a remote machine, - which can be (for example) a machine running in the Cloud or inside a Virtual Machine. + which can be (for example) a machine running in the cloud or inside a virtual machine. The following example switches the Docker CLI to use a [context](context.md) - named "remote-test-server", which runs an older version of the Docker Engine + named `remote-test-server`, which runs an older version of the Docker Engine on a Linux server: ```console @@ -104,7 +104,7 @@ long: |- and Docker Engine perform API version negotiation, and select the highest API version that is supported by both the Docker CLI and the Docker Engine. - For example, if the CLI is connecting with a Docker 19.03 engine, it downgrades + For example, if the CLI is connecting with Docker Engine version 19.03, it downgrades to API version 1.40 (refer to the [API version matrix](/engine/api/#api-version-matrix) to learn about the supported API versions for Docker Engine): diff --git a/data/engine-cli/docker_volume_create.yaml b/data/engine-cli/docker_volume_create.yaml index 0714736c5..12b31732a 100644 --- a/data/engine-cli/docker_volume_create.yaml +++ b/data/engine-cli/docker_volume_create.yaml @@ -178,23 +178,23 @@ examples: |- $ docker run -d -v hello:/world busybox ls /world ``` - The mount is created inside the container's `/world` directory. Docker does not + The mount is created inside the container's `/world` directory. Docker doesn't support relative paths for mount points inside the container. - Multiple containers can use the same volume in the same time period. This is - useful if two containers need access to shared data. For example, if one - container writes and the other reads the data. + Multiple containers can use the same volume. This is useful if two containers + need access to shared data. For example, if one container writes and the other + reads the data. - Volume names must be unique among drivers. This means you cannot use the same - volume name with two different drivers. If you attempt this `docker` returns an - error: + Volume names must be unique among drivers. This means you can't use the same + volume name with two different drivers. Attempting to create two volumes with + the same name results in an error: ```console A volume named "hello" already exists with the "some-other" driver. Choose a different volume name. ``` If you specify a volume name already in use on the current driver, Docker - assumes you want to re-use the existing volume and does not return an error. + assumes you want to re-use the existing volume and doesn't return an error. ### Driver-specific options (-o, --opt) {#opt} @@ -211,13 +211,12 @@ examples: |- These options are passed directly to the volume driver. Options for different volume drivers may do different things (or nothing at all). - The built-in `local` driver on Windows does not support any options. - - The built-in `local` driver on Linux accepts options similar to the linux - `mount` command. You can provide multiple options by passing the `--opt` flag - multiple times. Some `mount` options (such as the `o` option) can take a - comma-separated list of options. Complete list of available mount options can be - found [here](https://man7.org/linux/man-pages/man8/mount.8.html). + The built-in `local` driver accepts no options on Windows. On Linux and with + Docker Desktop, the `local` driver accepts options similar to the Linux `mount` + command. You can provide multiple options by passing the `--opt` flag multiple + times. Some `mount` options (such as the `o` option) can take a comma-separated + list of options. Complete list of available mount options can be found + [here](https://man7.org/linux/man-pages/man8/mount.8.html). For example, the following creates a `tmpfs` volume called `foo` with a size of 100 megabyte and `uid` of 1000. diff --git a/data/engine-cli/docker_volume_ls.yaml b/data/engine-cli/docker_volume_ls.yaml index 0763128e9..9d82c3eb8 100644 --- a/data/engine-cli/docker_volume_ls.yaml +++ b/data/engine-cli/docker_volume_ls.yaml @@ -96,7 +96,7 @@ examples: |- The currently supported filters are: - - dangling (boolean - true or false, 0 or 1) + - dangling (Boolean - true or false, 0 or 1) - driver (a volume driver's name) - label (`label=` or `label==`) - name (a volume's name) @@ -133,7 +133,7 @@ examples: |- The `label` filter matches volumes based on the presence of a `label` alone or a `label` and a value. - First, let's create some volumes to illustrate this; + First, create some volumes to illustrate this; ```console $ docker volume create the-doctor --label is-timelord=yes diff --git a/data/engine-cli/docker_volume_rm.yaml b/data/engine-cli/docker_volume_rm.yaml index 40292ac5c..245cd3b0a 100644 --- a/data/engine-cli/docker_volume_rm.yaml +++ b/data/engine-cli/docker_volume_rm.yaml @@ -2,7 +2,7 @@ command: docker volume rm aliases: docker volume rm, docker volume remove short: Remove one or more volumes long: | - Remove one or more volumes. You cannot remove a volume that is in use by a container. + Remove one or more volumes. You can't remove a volume that's in use by a container. usage: docker volume rm [OPTIONS] VOLUME [VOLUME...] pname: docker volume plink: docker_volume.yaml diff --git a/data/engine-cli/docker_wait.yaml b/data/engine-cli/docker_wait.yaml index ceb4e5f4f..b37d2319a 100644 --- a/data/engine-cli/docker_wait.yaml +++ b/data/engine-cli/docker_wait.yaml @@ -16,34 +16,6 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false -examples: |- - Start a container in the background. - - ```console - $ docker run -dit --name=my_container ubuntu bash - ``` - - Run `docker wait`, which should block until the container exits. - - ```console - $ docker wait my_container - ``` - - In another terminal, stop the first container. The `docker wait` command above - returns the exit code. - - ```console - $ docker stop my_container - ``` - - This is the same `docker wait` command from above, but it now exits, returning - `0`. - - ```console - $ docker wait my_container - - 0 - ``` deprecated: false experimental: false experimentalcli: false diff --git a/data/redirects.yml b/data/redirects.yml index d3c4aa812..6c850c7c5 100644 --- a/data/redirects.yml +++ b/data/redirects.yml @@ -85,6 +85,14 @@ - /go/access-tokens/ "/desktop/mac/apple-silicon/": - /go/apple-silicon/ +"/engine/api/#deprecated-api-versions": + - /engine/api/v1.23/ + - /engine/api/v1.22/ + - /engine/api/v1.21/ + - /engine/api/v1.20/ + - /engine/api/v1.19/ + - /engine/api/v1.18/ + "/engine/security/#docker-daemon-attack-surface": # Details about the "Docker Daemon attack surface". This redirect is currently # used in warnings printed by the Docker Engine, and in the installation script @@ -571,6 +579,8 @@ - /feedback/extension/ "https://docs.google.com/forms/u/1/d/e/1FAIpQLScgzKwqQyr7BmnphdrcGTO3wkye_McHJVsWzOIgEESfqmditQ/formResponse": - /feedback/runtime/ +"https://docs.google.com/forms/d/e/1FAIpQLSeqCJNB45MeKO_ofBO4ZvglZ0cevMMD_FYzncKoZNwqz_9bxg/viewform": + - /feedback/desktop-build/ # Docker Scout "/scout/": @@ -614,25 +624,21 @@ "/scout/integrations/source-code-management/github/": - "/go/scout-github/" -# Build links +# Build links (internal) +"/build/bake/reference/": + - /build/customize/bake/file-definition/ "/desktop/use-desktop/builds/": - /go/builds/ "/build/builders/": - /go/builders/ "/build/builders/#selected-builder": - /go/builders/selected/ -"https://www.docker.com/build-early-access-program/?utm_campaign=onboard-30-customer-zero&utm_medium=in-product-ad&utm_source=desktop_v4": - - /go/build-eap/ -"https://build.docker.com/": - - /go/build-ga/ "/build/building/multi-platform/": - /go/build-multi-platform/ "/build/cache/backends/": - /go/build-cache-backends/ "/build/exporters/": - /go/build-exporters/ -"/build/bake/reference/": - - /build/customize/bake/file-definition/ "/build/ci/github-actions/attestations/": - /go/build-attestations-gha/ "/build/attestations/": @@ -640,6 +646,14 @@ "/build/attestations/slsa-provenance/": - /go/provenance/ +# Build links (external) +"https://www.docker.com/build-early-access-program/?utm_campaign=onboard-30-customer-zero&utm_medium=in-product-ad&utm_source=desktop_v4": + - /go/build-eap/ +"https://build.docker.com/": + - /go/build-ga/ +"https://www.docker.com/products/build-cloud/": + - /go/docker-build-cloud/ + # CLI backlinks "/config/filter/": - /go/filter/ diff --git a/data/toc.yaml b/data/toc.yaml index 568570726..dfb72edac 100644 --- a/data/toc.yaml +++ b/data/toc.yaml @@ -152,6 +152,18 @@ Guides: path: /language/php/configure-ci-cd/ - title: "Test your deployment" path: /language/php/deploy/ + +- sectiontitle: Use-case guides + section: + - sectiontitle: Generative AI + section: + - path: /guides/use-case/genai-pdf-bot/ + title: Overview + - path: /guides/use-case/genai-pdf-bot/containerize/ + title: Containerize your app + - path: /guides/use-case/genai-pdf-bot/develop/ + title: Develop your app + - sectiontitle: Develop with Docker section: - path: /develop/ @@ -263,8 +275,6 @@ Reference: section: - path: /engine/reference/commandline/docker/ title: docker (base command) - - path: /engine/reference/commandline/attach/ - title: docker attach - path: /engine/reference/commandline/build/ title: docker build - sectiontitle: docker builder @@ -321,8 +331,6 @@ Reference: title: docker checkpoint ls - path: /engine/reference/commandline/checkpoint_rm/ title: docker checkpoint rm - - path: /engine/reference/commandline/commit/ - title: docker commit - sectiontitle: docker compose section: - path: /compose/reference/ @@ -483,20 +491,8 @@ Reference: title: docker context update - path: /engine/reference/commandline/context_use/ title: docker context use - - path: /engine/reference/commandline/cp/ - title: docker cp - - path: /engine/reference/commandline/create/ - title: docker create - - path: /engine/reference/commandline/diff/ - title: docker diff - - path: /engine/reference/commandline/events/ - title: docker events - path: /engine/reference/commandline/exec/ title: docker exec - - path: /engine/reference/commandline/export/ - title: docker export - - path: /engine/reference/commandline/history/ - title: docker history - sectiontitle: docker image section: - path: /engine/reference/commandline/image/ @@ -527,24 +523,16 @@ Reference: title: docker image tag - path: /engine/reference/commandline/images/ title: docker images - - path: /engine/reference/commandline/import/ - title: docker import - path: /engine/reference/commandline/info/ title: docker info - path: /engine/reference/commandline/init/ title: docker init (Beta) - path: /engine/reference/commandline/inspect/ title: docker inspect - - path: /engine/reference/commandline/kill/ - title: docker kill - - path: /engine/reference/commandline/load/ - title: docker load - path: /engine/reference/commandline/login/ title: docker login - path: /engine/reference/commandline/logout/ title: docker logout - - path: /engine/reference/commandline/logs/ - title: docker logs - sectiontitle: docker manifest section: - path: /engine/reference/commandline/manifest/ @@ -595,8 +583,6 @@ Reference: title: docker node rm - path: /engine/reference/commandline/node_update/ title: docker node update - - path: /engine/reference/commandline/pause/ - title: docker pause - sectiontitle: docker plugin section: - path: /engine/reference/commandline/plugin/ @@ -619,26 +605,14 @@ Reference: title: docker plugin set - path: /engine/reference/commandline/plugin_upgrade/ title: docker plugin upgrade - - path: /engine/reference/commandline/port/ - title: docker port - path: /engine/reference/commandline/ps/ title: docker ps - path: /engine/reference/commandline/pull/ title: docker pull - path: /engine/reference/commandline/push/ title: docker push - - path: /engine/reference/commandline/rename/ - title: docker rename - - path: /engine/reference/commandline/restart/ - title: docker restart - - path: /engine/reference/commandline/rm/ - title: docker rm - - path: /engine/reference/commandline/rmi/ - title: docker rmi - path: /engine/reference/commandline/run/ title: docker run - - path: /engine/reference/commandline/save/ - title: docker save - sectiontitle: docker scout section: - path: /engine/reference/commandline/scout/ @@ -741,12 +715,6 @@ Reference: title: docker stack rm - path: /engine/reference/commandline/stack_services/ title: docker stack services - - path: /engine/reference/commandline/start/ - title: docker start - - path: /engine/reference/commandline/stats/ - title: docker stats - - path: /engine/reference/commandline/stop/ - title: docker stop - sectiontitle: docker swarm section: - path: /engine/reference/commandline/swarm/ @@ -779,10 +747,6 @@ Reference: title: docker system info - path: /engine/reference/commandline/system_prune/ title: docker system prune - - path: /engine/reference/commandline/tag/ - title: docker tag - - path: /engine/reference/commandline/top/ - title: docker top - sectiontitle: docker trust section: - path: /engine/reference/commandline/trust/ @@ -805,10 +769,6 @@ Reference: title: docker trust signer add - path: /engine/reference/commandline/trust_signer_remove/ title: docker trust signer remove - - path: /engine/reference/commandline/unpause/ - title: docker unpause - - path: /engine/reference/commandline/update/ - title: docker update - path: /engine/reference/commandline/version/ title: docker version - sectiontitle: docker volume @@ -825,8 +785,6 @@ Reference: title: docker volume rm - path: /engine/reference/commandline/volume_update/ title: docker volume update - - path: /engine/reference/commandline/wait/ - title: docker wait - title: Dockerfile reference path: /engine/reference/builder/ - title: Daemon CLI (dockerd) @@ -885,18 +843,6 @@ Reference: title: v1.25 reference - path: /engine/api/v1.24/ title: v1.24 reference - - path: /engine/api/v1.23/ - title: v1.23 reference - - path: /engine/api/v1.22/ - title: v1.22 reference - - path: /engine/api/v1.21/ - title: v1.21 reference - - path: /engine/api/v1.20/ - title: v1.20 reference - - path: /engine/api/v1.19/ - title: v1.19 reference - - path: /engine/api/v1.18/ - title: v1.18 reference - sectiontitle: Docker Hub API section: - title: Docker Hub API @@ -1597,8 +1543,6 @@ Manuals: section: - path: /config/containers/logging/local/ title: Local file logging driver - - path: /config/containers/logging/logentries/ - title: Logentries logging driver - path: /config/containers/logging/json-file/ title: JSON File logging driver - path: /config/containers/logging/gelf/ @@ -1721,10 +1665,12 @@ Manuals: title: Deprecated features - sectiontitle: Release notes section: - - path: /engine/release-notes/24.0/ - title: Engine 24.0 + - path: /engine/release-notes/25.0/ + title: Engine 25.0 - sectiontitle: Previous versions section: + - path: /engine/release-notes/24.0/ + title: Engine 24.0 - path: /engine/release-notes/23.0/ title: Engine 23.0 - path: /engine/release-notes/20.10/ diff --git a/go.mod b/go.mod index a641876af..7ba1d5794 100644 --- a/go.mod +++ b/go.mod @@ -6,12 +6,9 @@ toolchain go1.21.1 require ( github.com/docker/buildx v0.12.1 // indirect - github.com/docker/cli v25.0.0-rc.3+incompatible // indirect - github.com/docker/compose/v2 v2.24.1 // indirect + github.com/docker/cli v25.0.1-0.20240119143135-01f933261885+incompatible // indirect + github.com/docker/compose/v2 v2.24.2-0.20240119115212-388169011f47 // indirect github.com/docker/scout-cli v1.3.0 // indirect github.com/moby/buildkit v0.13.0-beta1.0.20240116143623-28ce478b1fde // indirect - github.com/moby/moby v24.0.8-0.20240109122856-854ca341c0f6+incompatible // indirect + github.com/moby/moby v25.0.0+incompatible // indirect ) - -// buildkit depends on cli v25 beta1, pin to v24 -replace github.com/docker/cli => github.com/docker/cli v24.0.8-0.20240103162225-b0c5946ba5d8+incompatible diff --git a/go.sum b/go.sum index 94e6729c4..77519d2d5 100644 --- a/go.sum +++ b/go.sum @@ -74,6 +74,8 @@ github.com/docker/cli v24.0.8-0.20240103162225-b0c5946ba5d8+incompatible h1:hHBH github.com/docker/cli v24.0.8-0.20240103162225-b0c5946ba5d8+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v25.0.0-beta.1+incompatible h1:bJzIgR4mKNpceAwwi19SqZK0AbztMc3nQTgnvxxyY/A= github.com/docker/cli v25.0.0-beta.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v25.0.1-0.20240119143135-01f933261885+incompatible h1:UkZcGfKrx1PUDTT/TEzeYpyeRvNVbNqsj01yasxHuvA= +github.com/docker/cli v25.0.1-0.20240119143135-01f933261885+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/compose-cli v1.0.35 h1:uZyEHLalfqBS2PiTpA1LAULyJmuQ+YtZg7nG4Xl3/Cc= github.com/docker/compose-cli v1.0.35/go.mod h1:mSXI4hFLpRU3EtI8NTo32bNwI0UXSr8jnq+/rYjGAUU= github.com/docker/compose/v2 v2.22.0 h1:3rRz4L7tPU75wRsV8JZh2/aTgerQvPa1cpzZN+tHqUY= @@ -88,6 +90,8 @@ github.com/docker/compose/v2 v2.24.0 h1:Gvmg3E5/Rqa4G340sYcUk/DIegT5Nod2ZV3MqR24 github.com/docker/compose/v2 v2.24.0/go.mod h1:sDypMTKq6Mrp0W5NZ6+uiqxR9zEukI1RVuFRqwBTljs= github.com/docker/compose/v2 v2.24.1 h1:Mk14AOkxetMKrWb1bOnx7bEfS+v/moaCZnU69QqUw6A= github.com/docker/compose/v2 v2.24.1/go.mod h1:rrqu0bPBN/HD2wRSNwVN+V9SDfhVQnKxF1DP9B9WOdI= +github.com/docker/compose/v2 v2.24.2-0.20240119115212-388169011f47 h1:4vvuN0itjUryASt/WgrrLt7Tat7L53Ovt6Y1tLmVZPQ= +github.com/docker/compose/v2 v2.24.2-0.20240119115212-388169011f47/go.mod h1:YMMi6kNJdi3gELhMyhdnZinMiZvSWoyAl6i7XoeqFDg= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c/go.mod h1:CADgU4DSXK5QUlFslkQu2yW2TKzFZcXq/leZfM0UH5Q= @@ -178,6 +182,8 @@ github.com/moby/moby v24.0.5+incompatible h1:uUbydai/Y9J7Ybt+lFI3zBdnsMYXnXE9vEc github.com/moby/moby v24.0.5+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= github.com/moby/moby v24.0.8-0.20240109122856-854ca341c0f6+incompatible h1:bwE6hpc+Kq+UhTMUOdepQYXDBIqQENvj/LuuRJmTpAs= github.com/moby/moby v24.0.8-0.20240109122856-854ca341c0f6+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= +github.com/moby/moby v25.0.0+incompatible h1:KIFudkwXNK+kBrnCxWZNwhEf/jJzdjQAP7EF/awywMI= +github.com/moby/moby v25.0.0+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= diff --git a/hugo.yaml b/hugo.yaml index beeb51e89..7f438aceb 100644 --- a/hugo.yaml +++ b/hugo.yaml @@ -38,7 +38,7 @@ related: build: buildStats: - enabled: true + enable: true cachebusters: - source: assets/watching/hugo_stats\.json target: styles\.css @@ -91,8 +91,8 @@ params: repo: https://github.com/docker/docs docs_url: https://docs.docker.com - latest_engine_api_version: "1.43" - docker_ce_version: "24.0.7" + latest_engine_api_version: "1.44" + docker_ce_version: "25.0.0" compose_version: "v2.24.1" compose_file_v3: "3.8" compose_file_v2: "2.4" diff --git a/layouts/_default/cli.html b/layouts/_default/cli.html index ef27d29ad..04b8ddf5e 100644 --- a/layouts/_default/cli.html +++ b/layouts/_default/cli.html @@ -13,8 +13,16 @@ {{ .Scratch.Set "subheadings" slice }} {{ partial "breadcrumbs.html" . }}
    - {{ with .Title }} -

    {{ . }}

    + {{ if ne .LinkTitle .Title }} + {{/* + we use linkTitle for surfacing popular aliases like "docker run" + if linkTitle is set, use both alias and canonical cmd as title + */}} +

    {{ .LinkTitle }} ({{ .Title }}) +

    + {{ else }} +

    {{ .Title }}

    {{ end }} {{ $data.short | .RenderString (dict "display" "block") }} {{ if $data.deprecated }} @@ -57,6 +65,17 @@

    {{ . }}

    {{ $.Scratch.Add "headings" $heading }} {{ highlight (strings.Replace . "\t" "") "console" }} {{ end }} + {{ with $data.aliases }} + {{ $heading := dict "level" 2 "text" "Aliases" }} + {{ partial "heading.html" $heading }} + {{ $aliases := strings.Split . ", " }} +

    The following commands are equivalent and redirect here:

    +
      + {{ range $aliases }} +
    • {{ . }}
    • + {{ end }} +
    + {{ end }} {{ with $data.long }} {{ $heading := dict "level" 2 "text" "Description" }} {{ partial "heading.html" $heading }} diff --git a/layouts/partials/header.html b/layouts/partials/header.html index 4397396cc..c330341b1 100644 --- a/layouts/partials/header.html +++ b/layouts/partials/header.html @@ -1,5 +1,5 @@
    -
    +
    diff --git a/static/assets/images/docker-docs-logo.svg b/static/assets/images/docker-docs-logo.svg deleted file mode 100644 index b8c04779d..000000000 --- a/static/assets/images/docker-docs-logo.svg +++ /dev/null @@ -1,14 +0,0 @@ - - - - docs - Created with Sketch. - - - - - - - - - \ No newline at end of file