From e00d8285c2e07fd90ba1a69843471fe1d971b78a Mon Sep 17 00:00:00 2001 From: Igor Laborie Date: Wed, 15 Nov 2023 07:58:55 +0100 Subject: [PATCH] feat: Initial contribution --- .github/workflows/ci.yml | 64 ++++ .gitignore | 3 + CHANGELOG.md | 15 + Cargo.toml | 8 + README.md | 0 cliff.toml | 83 +++++ deny.toml | 272 ++++++++++++++ justfile | 54 +++ rustainers/Cargo.toml | 59 +++ rustainers/LICENSE-APACHE.txt | 176 +++++++++ rustainers/LICENSE-MIT.txt | 19 + rustainers/README.md | 54 +++ rustainers/examples/common/mod.rs | 14 + rustainers/examples/custom_image.rs | 67 ++++ rustainers/examples/minio.rs | 63 ++++ rustainers/examples/postgres.rs | 53 +++ rustainers/examples/redis.rs | 46 +++ rustainers/src/cmd/error.rs | 83 +++++ rustainers/src/cmd/mod.rs | 172 +++++++++ rustainers/src/container/health.rs | 57 +++ rustainers/src/container/health_check.rs | 55 +++ rustainers/src/container/id.rs | 38 ++ rustainers/src/container/mod.rs | 96 +++++ rustainers/src/container/process.rs | 53 +++ rustainers/src/container/runnable.rs | 67 ++++ ...s__tests__should_serde_docker_process.snap | 33 ++ ...s__tests__should_serde_podman_process.snap | 39 ++ ...e__tests__should_serde_inspect_status.snap | 12 + ...s__should_serde_inspect_status_exited.snap | 12 + rustainers/src/container/state.rs | 97 +++++ rustainers/src/container/wait_condition.rs | 35 ++ rustainers/src/error.rs | 48 +++ rustainers/src/id.rs | 164 ++++++++ rustainers/src/image/error.rs | 8 + rustainers/src/image/id.rs | 38 ++ rustainers/src/image/mod.rs | 11 + rustainers/src/image/name.rs | 155 ++++++++ rustainers/src/image/reference.rs | 36 ++ rustainers/src/images/doc.md | 67 ++++ rustainers/src/images/minio.rs | 182 +++++++++ rustainers/src/images/mod.rs | 10 + rustainers/src/images/postgres.rs | 245 ++++++++++++ rustainers/src/images/redis.rs | 111 ++++++ rustainers/src/lib.rs | 31 ++ rustainers/src/port/error.rs | 12 + rustainers/src/port/exposed.rs | 161 ++++++++ rustainers/src/port/mod.rs | 70 ++++ rustainers/src/runner/docker.rs | 139 +++++++ rustainers/src/runner/error.rs | 110 ++++++ rustainers/src/runner/inner.rs | 351 ++++++++++++++++++ rustainers/src/runner/mod.rs | 220 +++++++++++ rustainers/src/runner/nerdctl.rs | 89 +++++ rustainers/src/runner/options.rs | 39 ++ rustainers/src/runner/podman.rs | 180 +++++++++ ...__runner__docker__tests__should_serde.snap | 21 ++ ...__docker__tests__should_serde_compose.snap | 13 + ..._runner__nerdctl__tests__should_serde.snap | 15 + ...__runner__podman__tests__should_serde.snap | 14 + ...__podman__tests__should_serde_compose.snap | 13 + rustainers/src/version.rs | 221 +++++++++++ .../tests/assets/docker-compose_ps.jsonl | 3 + .../tests/assets/docker-compose_version.json | 3 + rustainers/tests/assets/docker-ps.jsonl | 3 + rustainers/tests/assets/docker_version.json | 19 + .../tests/assets/inspect-state-exited.json | 13 + rustainers/tests/assets/inspect-state.json | 23 ++ .../tests/assets/nerdctl-compose_version.json | 3 + rustainers/tests/assets/nerdctl_version.json | 36 ++ .../tests/assets/podman-compose_version.txt | 4 + rustainers/tests/assets/podman_lookup.json | 214 +++++++++++ rustainers/tests/assets/podman_ps.json | 128 +++++++ rustainers/tests/assets/podman_version.json | 22 ++ rustainers/tests/common/mod.rs | 34 ++ rustainers/tests/custom.rs | 36 ++ rustainers/tests/images.rs | 101 +++++ 75 files changed, 5315 insertions(+) create mode 100644 .github/workflows/ci.yml create mode 100644 .gitignore create mode 100644 CHANGELOG.md create mode 100644 Cargo.toml create mode 100644 README.md create mode 100644 cliff.toml create mode 100644 deny.toml create mode 100644 justfile create mode 100644 rustainers/Cargo.toml create mode 100644 rustainers/LICENSE-APACHE.txt create mode 100644 rustainers/LICENSE-MIT.txt create mode 100644 rustainers/README.md create mode 100644 rustainers/examples/common/mod.rs create mode 100644 rustainers/examples/custom_image.rs create mode 100644 rustainers/examples/minio.rs create mode 100644 rustainers/examples/postgres.rs create mode 100644 rustainers/examples/redis.rs create mode 100644 rustainers/src/cmd/error.rs create mode 100644 rustainers/src/cmd/mod.rs create mode 100644 rustainers/src/container/health.rs create mode 100644 rustainers/src/container/health_check.rs create mode 100644 rustainers/src/container/id.rs create mode 100644 rustainers/src/container/mod.rs create mode 100644 rustainers/src/container/process.rs create mode 100644 rustainers/src/container/runnable.rs create mode 100644 rustainers/src/container/snapshots/rustainers__container__process__tests__should_serde_docker_process.snap create mode 100644 rustainers/src/container/snapshots/rustainers__container__process__tests__should_serde_podman_process.snap create mode 100644 rustainers/src/container/snapshots/rustainers__container__state__tests__should_serde_inspect_status.snap create mode 100644 rustainers/src/container/snapshots/rustainers__container__state__tests__should_serde_inspect_status_exited.snap create mode 100644 rustainers/src/container/state.rs create mode 100644 rustainers/src/container/wait_condition.rs create mode 100644 rustainers/src/error.rs create mode 100644 rustainers/src/id.rs create mode 100644 rustainers/src/image/error.rs create mode 100644 rustainers/src/image/id.rs create mode 100644 rustainers/src/image/mod.rs create mode 100644 rustainers/src/image/name.rs create mode 100644 rustainers/src/image/reference.rs create mode 100644 rustainers/src/images/doc.md create mode 100644 rustainers/src/images/minio.rs create mode 100644 rustainers/src/images/mod.rs create mode 100644 rustainers/src/images/postgres.rs create mode 100644 rustainers/src/images/redis.rs create mode 100644 rustainers/src/lib.rs create mode 100644 rustainers/src/port/error.rs create mode 100644 rustainers/src/port/exposed.rs create mode 100644 rustainers/src/port/mod.rs create mode 100644 rustainers/src/runner/docker.rs create mode 100644 rustainers/src/runner/error.rs create mode 100644 rustainers/src/runner/inner.rs create mode 100644 rustainers/src/runner/mod.rs create mode 100644 rustainers/src/runner/nerdctl.rs create mode 100644 rustainers/src/runner/options.rs create mode 100644 rustainers/src/runner/podman.rs create mode 100644 rustainers/src/runner/snapshots/rustainers__runner__docker__tests__should_serde.snap create mode 100644 rustainers/src/runner/snapshots/rustainers__runner__docker__tests__should_serde_compose.snap create mode 100644 rustainers/src/runner/snapshots/rustainers__runner__nerdctl__tests__should_serde.snap create mode 100644 rustainers/src/runner/snapshots/rustainers__runner__podman__tests__should_serde.snap create mode 100644 rustainers/src/runner/snapshots/rustainers__runner__podman__tests__should_serde_compose.snap create mode 100644 rustainers/src/version.rs create mode 100644 rustainers/tests/assets/docker-compose_ps.jsonl create mode 100644 rustainers/tests/assets/docker-compose_version.json create mode 100644 rustainers/tests/assets/docker-ps.jsonl create mode 100644 rustainers/tests/assets/docker_version.json create mode 100644 rustainers/tests/assets/inspect-state-exited.json create mode 100644 rustainers/tests/assets/inspect-state.json create mode 100644 rustainers/tests/assets/nerdctl-compose_version.json create mode 100644 rustainers/tests/assets/nerdctl_version.json create mode 100644 rustainers/tests/assets/podman-compose_version.txt create mode 100644 rustainers/tests/assets/podman_lookup.json create mode 100644 rustainers/tests/assets/podman_ps.json create mode 100644 rustainers/tests/assets/podman_version.json create mode 100644 rustainers/tests/common/mod.rs create mode 100644 rustainers/tests/custom.rs create mode 100644 rustainers/tests/images.rs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..a0f1505 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,64 @@ + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + +name: Continuous integration + +env: + CARGO_TERM_COLOR: always + CARGO_INCREMENTAL: 0 + MSRV: 1.67.0 + +jobs: + tests: + runs-on: ubuntu-latest + strategy: + matrix: + include: + - rust: $MSRV + - rust: stable + - rust: beta + - rust: nightly + + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{ matrix.rust }} + - name: Build + run: cargo build --verbose + - name: Documentation + run: cargo doc --verbose + - name: Tests + run: cargo test --verbose + + clippy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@beta + with: + components: clippy + - name: Lint + run: cargo clippy + + minimal-versions: + name: Check MSRV and minimal-versions + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@master + with: + toolchain: nightly + - uses: dtolnay/rust-toolchain@master + with: + toolchain: $MSRV + - uses: taiki-e/install-action@v2 + with: + tool: cargo-hack + - run: cargo +nightly hack generate-lockfile --remove-dev-deps -Z direct-minimal-versions + - name: Build + run: cargo build --verbose --all-features diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..22d9b57 --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +target +Cargo.lock +.DS_Store diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..7598ff7 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,15 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +## [unreleased] + +### Features + +- Initial contribution + +### Miscellaneous Tasks + +- Empty + + diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..2d5f234 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,8 @@ +[workspace] +members = ["rustainers"] +resolver = "2" + +[workspace.package] +version = "0.1.0-dev" +edition = "2021" +authors = ["igor.laborie@wefox.com"] diff --git a/README.md b/README.md new file mode 100644 index 0000000..e69de29 diff --git a/cliff.toml b/cliff.toml new file mode 100644 index 0000000..7beeaca --- /dev/null +++ b/cliff.toml @@ -0,0 +1,83 @@ +# git-cliff ~ default configuration file +# https://git-cliff.org/docs/configuration +# +# Lines starting with "#" are comments. +# Configuration options are organized into tables and keys. +# See documentation for more information on available options. + +[changelog] +# changelog header +header = """ +# Changelog\n +All notable changes to this project will be documented in this file.\n +""" +# template for the changelog body +# https://keats.github.io/tera/docs/#introduction +body = """ +{% if version %}\ + ## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }} +{% else %}\ + ## [unreleased] +{% endif %}\ +{% for group, commits in commits | group_by(attribute="group") %} + ### {{ group | upper_first }} + {% for commit in commits %} + - {% if commit.breaking %}[**breaking**] {% endif %}{{ commit.message | upper_first }}\ + {% endfor %} +{% endfor %}\n +""" +# remove the leading and trailing whitespace from the template +trim = true +# changelog footer +footer = """ + +""" +# postprocessors +postprocessors = [ + # { pattern = '', replace = "https://github.com/orhun/git-cliff" }, # replace repository URL +] +[git] +# parse the commits based on https://www.conventionalcommits.org +conventional_commits = true +# filter out the commits that are not conventional +filter_unconventional = true +# process each line of a commit as an individual commit +split_commits = false +# regex for preprocessing the commit messages +commit_preprocessors = [ + # { pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](/issues/${2}))"}, # replace issue numbers +] +# regex for parsing and grouping commits +commit_parsers = [ + { message = "^feat", group = "Features" }, + { message = "^fix", group = "Bug Fixes" }, + { message = "^doc", group = "Documentation" }, + { message = "^perf", group = "Performance" }, + { message = "^refactor", group = "Refactor" }, + { message = "^style", group = "Styling" }, + { message = "^test", group = "Testing" }, + { message = "^chore\\(release\\): prepare for", skip = true }, + { message = "^chore\\(deps\\)", skip = true }, + { message = "^chore\\(pr\\)", skip = true }, + { message = "^chore\\(pull\\)", skip = true }, + { message = "^chore|ci", group = "Miscellaneous Tasks" }, + { body = ".*security", group = "Security" }, + { message = "^revert", group = "Revert" }, +] +# protect breaking changes from being skipped due to matching a skipping commit_parser +protect_breaking_commits = false +# filter out the commits that are not matched by commit parsers +filter_commits = false +# regex for matching git tags +tag_pattern = "v[0-9].*" + +# regex for skipping tags +skip_tags = "v0.1.0-beta.1" +# regex for ignoring tags +ignore_tags = "" +# sort the tags topologically +topo_order = false +# sort the commits inside sections by oldest/newest order +sort_commits = "oldest" +# limit the number of commits included in the changelog. +# limit_commits = 42 diff --git a/deny.toml b/deny.toml new file mode 100644 index 0000000..8b65cb0 --- /dev/null +++ b/deny.toml @@ -0,0 +1,272 @@ +# This template contains all of the possible sections and their default values + +# Note that all fields that take a lint level have these possible values: +# * deny - An error will be produced and the check will fail +# * warn - A warning will be produced, but the check will not fail +# * allow - No warning or error will be produced, though in some cases a note +# will be + +# The values provided in this template are the default values that will be used +# when any section or field is not specified in your own configuration + +# Root options + +# If 1 or more target triples (and optionally, target_features) are specified, +# only the specified targets will be checked when running `cargo deny check`. +# This means, if a particular package is only ever used as a target specific +# dependency, such as, for example, the `nix` crate only being used via the +# `target_family = "unix"` configuration, that only having windows targets in +# this list would mean the nix crate, as well as any of its exclusive +# dependencies not shared by any other crates, would be ignored, as the target +# list here is effectively saying which targets you are building for. +targets = [ + # The triple can be any string, but only the target triples built in to + # rustc (as of 1.40) can be checked against actual config expressions + #{ triple = "x86_64-unknown-linux-musl" }, + # You can also specify which target_features you promise are enabled for a + # particular target. target_features are currently not validated against + # the actual valid features supported by the target architecture. + #{ triple = "wasm32-unknown-unknown", features = ["atomics"] }, +] +# When creating the dependency graph used as the source of truth when checks are +# executed, this field can be used to prune crates from the graph, removing them +# from the view of cargo-deny. This is an extremely heavy hammer, as if a crate +# is pruned from the graph, all of its dependencies will also be pruned unless +# they are connected to another crate in the graph that hasn't been pruned, +# so it should be used with care. The identifiers are [Package ID Specifications] +# (https://doc.rust-lang.org/cargo/reference/pkgid-spec.html) +#exclude = [] +# If true, metadata will be collected with `--all-features`. Note that this can't +# be toggled off if true, if you want to conditionally enable `--all-features` it +# is recommended to pass `--all-features` on the cmd line instead +all-features = false +# If true, metadata will be collected with `--no-default-features`. The same +# caveat with `all-features` applies +no-default-features = false +# If set, these feature will be enabled when collecting metadata. If `--features` +# is specified on the cmd line they will take precedence over this option. +#features = [] +# When outputting inclusion graphs in diagnostics that include features, this +# option can be used to specify the depth at which feature edges will be added. +# This option is included since the graphs can be quite large and the addition +# of features from the crate(s) to all of the graph roots can be far too verbose. +# This option can be overridden via `--feature-depth` on the cmd line +feature-depth = 1 + +# This section is considered when running `cargo deny check advisories` +# More documentation for the advisories section can be found here: +# https://embarkstudios.github.io/cargo-deny/checks/advisories/cfg.html +[advisories] +# The path where the advisory database is cloned/fetched into +db-path = "~/.cargo/advisory-db" +# The url(s) of the advisory databases to use +db-urls = ["https://github.com/rustsec/advisory-db"] +# The lint level for security vulnerabilities +vulnerability = "deny" +# The lint level for unmaintained crates +unmaintained = "warn" +# The lint level for crates that have been yanked from their source registry +yanked = "warn" +# The lint level for crates with security notices. Note that as of +# 2019-12-17 there are no security notice advisories in +# https://github.com/rustsec/advisory-db +notice = "warn" +# A list of advisory IDs to ignore. Note that ignored advisories will still +# output a note when they are encountered. +ignore = [ + #"RUSTSEC-0000-0000", +] +# Threshold for security vulnerabilities, any vulnerability with a CVSS score +# lower than the range specified will be ignored. Note that ignored advisories +# will still output a note when they are encountered. +# * None - CVSS Score 0.0 +# * Low - CVSS Score 0.1 - 3.9 +# * Medium - CVSS Score 4.0 - 6.9 +# * High - CVSS Score 7.0 - 8.9 +# * Critical - CVSS Score 9.0 - 10.0 +#severity-threshold = + +# If this is true, then cargo deny will use the git executable to fetch advisory database. +# If this is false, then it uses a built-in git library. +# Setting this to true can be helpful if you have special authentication requirements that cargo-deny does not support. +# See Git Authentication for more information about setting up git authentication. +#git-fetch-with-cli = true + +# This section is considered when running `cargo deny check licenses` +# More documentation for the licenses section can be found here: +# https://embarkstudios.github.io/cargo-deny/checks/licenses/cfg.html +[licenses] +# The lint level for crates which do not have a detectable license +unlicensed = "deny" +# List of explicitly allowed licenses +# See https://spdx.org/licenses/ for list of possible licenses +# [possible values: any SPDX 3.11 short identifier (+ optional exception)]. +allow = [ + "MIT", + "Apache-2.0", + "Apache-2.0 WITH LLVM-exception", + "Unicode-DFS-2016", +] +# List of explicitly disallowed licenses +# See https://spdx.org/licenses/ for list of possible licenses +# [possible values: any SPDX 3.11 short identifier (+ optional exception)]. +deny = [ + #"Nokia", +] +# Lint level for licenses considered copyleft +copyleft = "warn" +# Blanket approval or denial for OSI-approved or FSF Free/Libre licenses +# * both - The license will be approved if it is both OSI-approved *AND* FSF +# * either - The license will be approved if it is either OSI-approved *OR* FSF +# * osi - The license will be approved if it is OSI approved +# * fsf - The license will be approved if it is FSF Free +# * osi-only - The license will be approved if it is OSI-approved *AND NOT* FSF +# * fsf-only - The license will be approved if it is FSF *AND NOT* OSI-approved +# * neither - This predicate is ignored and the default lint level is used +allow-osi-fsf-free = "neither" +# Lint level used when no other predicates are matched +# 1. License isn't in the allow or deny lists +# 2. License isn't copyleft +# 3. License isn't OSI/FSF, or allow-osi-fsf-free = "neither" +default = "deny" +# The confidence threshold for detecting a license from license text. +# The higher the value, the more closely the license text must be to the +# canonical license text of a valid SPDX license file. +# [possible values: any between 0.0 and 1.0]. +confidence-threshold = 0.8 +# Allow 1 or more licenses on a per-crate basis, so that particular licenses +# aren't accepted for every possible crate as with the normal allow list +exceptions = [ + # Each entry is the crate and version constraint, and its specific allow + # list + #{ allow = ["Zlib"], name = "adler32", version = "*" }, +] + +# Some crates don't have (easily) machine readable licensing information, +# adding a clarification entry for it allows you to manually specify the +# licensing information +#[[licenses.clarify]] +# The name of the crate the clarification applies to +#name = "ring" +# The optional version constraint for the crate +#version = "*" +# The SPDX expression for the license requirements of the crate +#expression = "MIT AND ISC AND OpenSSL" +# One or more files in the crate's source used as the "source of truth" for +# the license expression. If the contents match, the clarification will be used +# when running the license check, otherwise the clarification will be ignored +# and the crate will be checked normally, which may produce warnings or errors +# depending on the rest of your configuration +#license-files = [ +# Each entry is a crate relative path, and the (opaque) hash of its contents +#{ path = "LICENSE", hash = 0xbd0eed23 } +#] + +[licenses.private] +# If true, ignores workspace crates that aren't published, or are only +# published to private registries. +# To see how to mark a crate as unpublished (to the official registry), +# visit https://doc.rust-lang.org/cargo/reference/manifest.html#the-publish-field. +ignore = false +# One or more private registries that you might publish crates to, if a crate +# is only published to private registries, and ignore is true, the crate will +# not have its license(s) checked +registries = [ + #"https://sekretz.com/registry +] + +# This section is considered when running `cargo deny check bans`. +# More documentation about the 'bans' section can be found here: +# https://embarkstudios.github.io/cargo-deny/checks/bans/cfg.html +[bans] +# Lint level for when multiple versions of the same crate are detected +multiple-versions = "warn" +# Lint level for when a crate version requirement is `*` +wildcards = "allow" +# The graph highlighting used when creating dotgraphs for crates +# with multiple versions +# * lowest-version - The path to the lowest versioned duplicate is highlighted +# * simplest-path - The path to the version with the fewest edges is highlighted +# * all - Both lowest-version and simplest-path are used +highlight = "all" +# The default lint level for `default` features for crates that are members of +# the workspace that is being checked. This can be overridden by allowing/denying +# `default` on a crate-by-crate basis if desired. +workspace-default-features = "allow" +# The default lint level for `default` features for external crates that are not +# members of the workspace. This can be overridden by allowing/denying `default` +# on a crate-by-crate basis if desired. +external-default-features = "allow" +# List of crates that are allowed. Use with care! +allow = [ + #{ name = "ansi_term", version = "=0.11.0" }, +] +# List of crates to deny +deny = [ + # Each entry the name of a crate and a version range. If version is + # not specified, all versions will be matched. + #{ name = "ansi_term", version = "=0.11.0" }, + # + # Wrapper crates can optionally be specified to allow the crate when it + # is a direct dependency of the otherwise banned crate + #{ name = "ansi_term", version = "=0.11.0", wrappers = [] }, +] + +# List of features to allow/deny +# Each entry the name of a crate and a version range. If version is +# not specified, all versions will be matched. +#[[bans.features]] +#name = "reqwest" +# Features to not allow +#deny = ["json"] +# Features to allow +#allow = [ +# "rustls", +# "__rustls", +# "__tls", +# "hyper-rustls", +# "rustls", +# "rustls-pemfile", +# "rustls-tls-webpki-roots", +# "tokio-rustls", +# "webpki-roots", +#] +# If true, the allowed features must exactly match the enabled feature set. If +# this is set there is no point setting `deny` +#exact = true + +# Certain crates/versions that will be skipped when doing duplicate detection. +skip = [ + #{ name = "ansi_term", version = "=0.11.0" }, +] +# Similarly to `skip` allows you to skip certain crates during duplicate +# detection. Unlike skip, it also includes the entire tree of transitive +# dependencies starting at the specified crate, up to a certain depth, which is +# by default infinite. +skip-tree = [ + #{ name = "ansi_term", version = "=0.11.0", depth = 20 }, +] + +# This section is considered when running `cargo deny check sources`. +# More documentation about the 'sources' section can be found here: +# https://embarkstudios.github.io/cargo-deny/checks/sources/cfg.html +[sources] +# Lint level for what to happen when a crate from a crate registry that is not +# in the allow list is encountered +unknown-registry = "warn" +# Lint level for what to happen when a crate from a git repository that is not +# in the allow list is encountered +unknown-git = "warn" +# List of URLs for allowed crate registries. Defaults to the crates.io index +# if not specified. If it is specified but empty, no registries are allowed. +allow-registry = ["https://github.com/rust-lang/crates.io-index"] +# List of URLs for allowed Git repositories +allow-git = [] + +[sources.allow-org] +# 1 or more github.com organizations to allow git sources for +github = [] +# 1 or more gitlab.com organizations to allow git sources for +gitlab = [] +# 1 or more bitbucket.org organizations to allow git sources for +bitbucket = [] diff --git a/justfile b/justfile new file mode 100644 index 0000000..ce83402 --- /dev/null +++ b/justfile @@ -0,0 +1,54 @@ + +# List all just receipes +default: + @just --list --unsorted + +# Install requirement for recipes +requirement: + cargo install cargo-watch + cargo install cargo-nextest + cargo install cargo-llvm-cov + cargo install cargo-sort + cargo install cargo-deny + cargo install cargo-hack + +# Format the code and sort dependencies +format: + cargo fmt + cargo sort --workspace --grouped + +_check_format: + cargo fmt --all -- --check + cargo sort --workspace --grouped --check + +deny: + cargo deny check advisories + cargo deny check bans licenses sources + +# Lint the rust code +lint: + cargo clippy --workspace --all-features --all-targets -- --deny warnings --allow deprecated + +# Launch tests +test: + cargo nextest run + cargo test --doc + +# Test with features combination +test-with-features: + cargo hack check --each-feature --no-dev-deps + +# Check code coverage +coverage: + cargo llvm-cov --open + +# Check the code (formatting, lint, and tests) +check: && _check_format lint test + +# Run TDD mode +tdd: + cargo watch -c -s "just check" + +# Build documentation (rustdoc, book) +doc: + cargo doc --all-features --no-deps diff --git a/rustainers/Cargo.toml b/rustainers/Cargo.toml new file mode 100644 index 0000000..d7a31c8 --- /dev/null +++ b/rustainers/Cargo.toml @@ -0,0 +1,59 @@ +[package] +name = "rustainers" + +version = { workspace = true } +authors = { workspace = true } +edition = { workspace = true } + +license = "MIT OR Apache-2.0" + +categories = ["development-tools::testing"] +keywords = ["testcontainers", "container", "docker", "podman"] +description = "An simple, opinionated way to run containers for tests." +readme = "README.md" +repository = "https://github.com/wefoxplatform/rustainers" + +rust-version = "1.67.0" # toml_datetime + +[features] +default = [] + +# Test features +ensure-docker = [] +ensure-podman = [] +ensure-nerdctl = [] + +very-long-test = [] + +[dependencies] +async-trait = "0.1.74" +hex = { version = "0.4.3", features = ["serde"] } +indexmap = "2.1.0" +serde = { version = "1", features = ["derive", "rc"] } +serde_json = "1" +strum = "0.25.0" +strum_macros = "0.25.0" +thiserror = "1.0" +tokio = { version = "1.34", features = ["time", "process", "fs"] } +tracing = "0.1.40" +typed-builder = "0.18.0" + +[dev-dependencies] +anyhow = "1.0" +assert2 = "0.3" +bytes = "1.5" +futures-util = "0.3.28" +insta = { version = "1.29", features = ["json"] } +object_store = { version = "0.8.0", features = ["aws"] } +rdkafka = { version = "0.36", features = [ + "libz-static", + "gssapi-vendored", + "cmake-build", + "ssl-vendored", + "tracing", +] } +redis = "0.23" +rstest = "0.18" +tokio = { version = "1.34", features = ["macros", "rt-multi-thread"] } +tokio-postgres = "0.7" +tracing-subscriber = { version = "0.3", features = ["fmt"] } diff --git a/rustainers/LICENSE-APACHE.txt b/rustainers/LICENSE-APACHE.txt new file mode 100644 index 0000000..d9a10c0 --- /dev/null +++ b/rustainers/LICENSE-APACHE.txt @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/rustainers/LICENSE-MIT.txt b/rustainers/LICENSE-MIT.txt new file mode 100644 index 0000000..9cf1062 --- /dev/null +++ b/rustainers/LICENSE-MIT.txt @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/rustainers/README.md b/rustainers/README.md new file mode 100644 index 0000000..9767b68 --- /dev/null +++ b/rustainers/README.md @@ -0,0 +1,54 @@ +# wai-testcontainers + +An simple, opinionated way to run containers for tests. + +This crate supports [docker], [podman], or [nerdctl]. + +You can also run [docker compose], [podman-compose] or [nerdctl compose]. + +For now, the implementation is based on the CLI command. We may add more runner based on Rust api later. + +The start command is `async`, and the crate is using [tokio]. + +## Run a simple container + +You need a [`crate::runner::Runner`] to launch an image. +You can use the [`crate::runner::Runner::auto`] function to detect an available runner, +or use the [`crate::runner::Runner::docker`], [`crate::runner::Runner::podman`], +[`crate::runner::Runner::nerdctl`] functions to choose a specific runner. + +Then you need to create a runnable image, see module [`crate::images`] to use an existing image, +or create your own image. + +And you just need to `start` your image. The running container can provide some methods +to help you to use this container, e.g. a connection URL to access your container. + +When the container is dropped, the container is stopped (unless you call detach on your container). + +```rust, no_run +use rustainers::runner::{Runner, RunOption}; +use rustainers::images::Postgres; + +# async fn pg() -> anyhow::Result<()> { +let runner = Runner::auto()?; +let image = Postgres::default().with_tag("15.2"); +let container = runner.start(image).await?; + +let url = container.url()?; +do_something_with_postgres(url).await?; +# Ok(()) +# } +# async fn do_something_with_postgres(url: String) -> anyhow::Result<()> { Ok(())} +``` + +## Create a custom image + +See [`crate::images`] module documentation + +[docker]: https://docs.docker.com/engine/reference/commandline/cli/ +[docker compose]: https://docs.docker.com/compose/reference/ +[podman]: https://docs.podman.io/en/latest/Commands.html +[podman-compose]: https://github.com/containers/podman-compose +[nerdctl]: https://github.com/containerd/nerdctl +[nerdctl compose]: https://github.com/containerd/nerdctl/blob/main/docs/compose.md +[tokio]: https://tokio.rs/ diff --git a/rustainers/examples/common/mod.rs b/rustainers/examples/common/mod.rs new file mode 100644 index 0000000..533bfdc --- /dev/null +++ b/rustainers/examples/common/mod.rs @@ -0,0 +1,14 @@ +use tracing::Level; +use tracing_subscriber::fmt::format::FmtSpan; +use tracing_subscriber::fmt::time; + +pub fn init_tracing(level: Level) { + tracing_subscriber::fmt() + .pretty() + .with_line_number(true) + .with_thread_names(true) + .with_span_events(FmtSpan::NONE) + .with_timer(time::uptime()) + .with_max_level(level) + .init(); +} diff --git a/rustainers/examples/custom_image.rs b/rustainers/examples/custom_image.rs new file mode 100644 index 0000000..2b154c7 --- /dev/null +++ b/rustainers/examples/custom_image.rs @@ -0,0 +1,67 @@ +use std::process::Command; + +use tracing::{info, Level}; + +use rustainers::runner::{RunOption, Runner}; +use rustainers::{ + ExposedPort, HealthCheck, ImageName, RunnableContainer, RunnableContainerBuilder, + ToRunnableContainer, +}; + +mod common; +pub use self::common::*; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + init_tracing(Level::INFO); + + let runner = Runner::auto()?; + let options = RunOption::builder() + .with_remove(true) + .with_name("plop-nginx") + .build(); + + let image = Nginx::default(); + let container = runner.start_with_options(image, options).await?; + info!("Now I can use {container}"); + + // Making a dummy HTTP request + let port = container.port.host_port()?; + let url = format!("http://localhost:{port}"); //DevSkim: ignore DS137138 + Command::new("curl").args(["-v", &url]).status()?; + + Ok(()) +} + +const NGINX_IMAGE: &ImageName = &ImageName::new("nginx"); + +const PORT: u16 = 80; + +#[derive(Debug, Clone)] +struct Nginx { + image: ImageName, + port: ExposedPort, +} + +impl Default for Nginx { + fn default() -> Self { + Self { + image: NGINX_IMAGE.clone(), + port: ExposedPort::new(PORT), + } + } +} + +impl ToRunnableContainer for Nginx { + fn to_runnable(&self, builder: RunnableContainerBuilder) -> RunnableContainer { + builder + .with_image(self.image.clone()) + .with_wait_strategy( + HealthCheck::builder() + .with_command("curl -sf http://localhost") //DevSkim: ignore DS137138 + .build(), + ) + .with_port_mappings([self.port]) + .build() + } +} diff --git a/rustainers/examples/minio.rs b/rustainers/examples/minio.rs new file mode 100644 index 0000000..ea5f73d --- /dev/null +++ b/rustainers/examples/minio.rs @@ -0,0 +1,63 @@ +use std::time::Duration; + +use bytes::Bytes; +use futures_util::StreamExt; +use object_store::aws::AmazonS3Builder; +use object_store::path::Path; +use object_store::ObjectStore; +use tracing::{info, Level}; + +use rustainers::images::Minio; +use rustainers::runner::{RunOption, Runner}; + +mod common; +pub use self::common::*; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + init_tracing(Level::INFO); + + let runner = Runner::auto()?; + let image = Minio::default(); + let options = RunOption::builder() + .with_remove(true) + .with_wait_interval(Duration::from_millis(300)) + .build(); + + let container = runner.start_with_options(image, options).await?; + info!("Now I can use {container}"); + + let bucket_name = "plop-bucket"; + container.create_s3_bucket(bucket_name).await?; + info!("Bucket {bucket_name} created"); + + do_something_in_minio(&container, bucket_name).await?; + + Ok(()) +} + +async fn do_something_in_minio(minio: &Minio, bucket_name: &str) -> anyhow::Result<()> { + let endpoint = minio.endpoint()?; + info!("Using MinIO at {endpoint}"); + let s3 = AmazonS3Builder::from_env() + .with_region(minio.region()) + .with_endpoint(endpoint) + .with_bucket_name(bucket_name) + .with_allow_http(true) + .with_access_key_id(minio.access_key_id()) + .with_secret_access_key(minio.secret_access_key()) + .build()?; + + // Store an object + s3.put(&Path::from("plop.txt"), Bytes::from_static(b"plop")) + .await?; + + // list objects + let mut stream = s3.list(None); + while let Some(r) = stream.next().await { + let obj = r?; + info!("🎉 file: {obj:?}"); + } + + Ok(()) +} diff --git a/rustainers/examples/postgres.rs b/rustainers/examples/postgres.rs new file mode 100644 index 0000000..c156d1a --- /dev/null +++ b/rustainers/examples/postgres.rs @@ -0,0 +1,53 @@ +use std::time::Duration; + +use tokio_postgres::NoTls; +use tracing::{info, Level}; + +use rustainers::images::Postgres; +use rustainers::runner::{RunOption, Runner}; + +mod common; +pub use self::common::*; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + init_tracing(Level::INFO); + + let runner = Runner::auto()?; + let image = Postgres::default().with_db("plop"); + let options = RunOption::builder() + .with_remove(true) + .with_wait_interval(Duration::from_millis(300)) + .build(); + + let container = runner.start_with_options(image, options).await?; + info!("Now I can use {container}"); + do_something_in_postgres(&container).await?; + + Ok(()) +} + +async fn do_something_in_postgres(pg: &Postgres) -> anyhow::Result<()> { + let config = pg.config()?; + + // Connect to the database. + let (client, connection) = tokio_postgres::connect(&config, NoTls).await?; + + // The connection object performs the actual communication with the database, + // so spawn it off to run on its own. + tokio::spawn(async move { + if let Err(e) = connection.await { + eprintln!("connection error: {e}"); + } + }); + + // Now we can execute a simple statement that just returns its parameter. + let rows = client.query("SELECT $1::TEXT", &[&"hello world"]).await?; + + // And then check that we got back the same string we sent over. + let value: &str = rows[0].get(0); + info!("🎉 Result: {value}"); + assert_eq!(value, "hello world"); + + Ok(()) +} diff --git a/rustainers/examples/redis.rs b/rustainers/examples/redis.rs new file mode 100644 index 0000000..ef78933 --- /dev/null +++ b/rustainers/examples/redis.rs @@ -0,0 +1,46 @@ +use std::time::Duration; + +use redis::{Client, Commands}; +use tracing::{info, Level}; + +use rustainers::images::Redis; +use rustainers::runner::{RunOption, Runner}; + +mod common; +pub use self::common::*; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + init_tracing(Level::INFO); + + let runner = Runner::auto()?; + let image = Redis::default(); + let options = RunOption::builder() + .with_remove(true) + .with_wait_interval(Duration::from_millis(96)) + .build(); + + let container = runner.start_with_options(image, options).await?; + info!("Now I can use {container}"); + + do_something_in_redis(&container).await?; + + Ok(()) +} + +async fn do_something_in_redis(redis: &Redis) -> anyhow::Result<()> { + let endpoint = redis.endpoint()?; + info!("Using Redis at {endpoint}"); + let client = Client::open(endpoint)?; + let mut con = client.get_connection()?; + let key = "plop"; + // throw away the result, just make sure it does not fail + con.set(key, "plop-123")?; + // read back the key and return it. Because the return value + // from the function is a result for integer this will automatically + // convert into one. + let result = con.get::<_, String>(&key)?; + println!("Result: {result}"); + + Ok(()) +} diff --git a/rustainers/src/cmd/error.rs b/rustainers/src/cmd/error.rs new file mode 100644 index 0000000..1ab44ac --- /dev/null +++ b/rustainers/src/cmd/error.rs @@ -0,0 +1,83 @@ +use std::error::Error; +use std::fmt::{self, Display}; +use std::process::Output; + +#[derive(Debug)] +#[non_exhaustive] +pub enum CommandError { + /// Command run but fail + CommandFail { + /// The command + command: String, + /// The command output + output: Output, + }, + /// Command fail to run + CommandProcessError { + /// The command + command: String, + /// The source + source: std::io::Error, + }, + + /// I/O error + IoError { + /// The command + command: String, + /// The source + source: std::io::Error, + }, + + /// A serde error + SerdeError { + /// The command + command: String, + /// The output + output: Output, + /// The source + source: serde_json::Error, + }, +} + +impl Display for CommandError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::CommandFail { command, output } => { + writeln!(f, "Fail to execute command\n{command}")?; + let stdout = String::from_utf8_lossy(&output.stdout); + writeln!(f, "------ stdout ------\n{stdout}")?; + let stderr = String::from_utf8_lossy(&output.stderr); + write!(f, "------ stderr ------\n{stderr}") + } + Self::CommandProcessError { command, .. } => { + write!(f, "Fail to execute command\n{command}") + } + Self::IoError { command, source } => { + writeln!(f, "IO error: {source} during")?; + writeln!(f, "{command}") + } + Self::SerdeError { + command, + output, + source, + } => { + writeln!(f, "Serde error: {source} during")?; + writeln!(f, "{command}")?; + let stdout = String::from_utf8_lossy(&output.stdout); + writeln!(f, "------ stdout ------\n{stdout}")?; + let stderr = String::from_utf8_lossy(&output.stderr); + write!(f, "------ stderr ------\n{stderr}") + } + } + } +} + +impl Error for CommandError { + fn source(&self) -> Option<&(dyn Error + 'static)> { + match self { + Self::CommandFail { .. } => None, + Self::CommandProcessError { source, .. } | Self::IoError { source, .. } => Some(source), + Self::SerdeError { source, .. } => Some(source), + } + } +} diff --git a/rustainers/src/cmd/mod.rs b/rustainers/src/cmd/mod.rs new file mode 100644 index 0000000..33c013c --- /dev/null +++ b/rustainers/src/cmd/mod.rs @@ -0,0 +1,172 @@ +use std::fmt::{self, Display}; +use std::process::{ExitStatus, Output}; + +use serde::de::DeserializeOwned; +use tracing::{debug, warn}; + +mod error; +pub use self::error::*; + +#[derive(Debug, Clone)] +pub(crate) struct Cmd<'a> { + command: &'a str, + args: Vec, + ignore_stderr: bool, +} + +impl<'a> Cmd<'a> { + pub(crate) fn new(command: &'a str) -> Self { + Self { + command, + args: vec![], + ignore_stderr: false, + } + } + + pub(crate) fn ignore_stderr(&mut self) { + self.ignore_stderr = true; + } + + pub(crate) fn push_arg(&mut self, arg: impl Into) { + self.args.push(arg.into()); + } + + pub(crate) fn push_args(&mut self, args: impl IntoIterator) + where + S: Into, + { + self.args.extend(args.into_iter().map(Into::into)); + } + + fn handle_output(&self, output: std::io::Result) -> Result { + let output = match output { + Ok(output) => output, + Err(source) => { + return Err(CommandError::CommandProcessError { + command: format!("{self}"), + source, + }) + } + }; + if !self.ignore_stderr && !output.stderr.is_empty() { + let err = String::from_utf8_lossy(&output.stderr); + let command = self.to_string(); + warn!(%command, "stderr\n{err}"); + } + + if output.status.success() { + Ok(output) + } else { + let command = self.to_string(); + Err(CommandError::CommandFail { command, output }) + } + } + + fn handle_json(&self, output: Output) -> Result + where + T: DeserializeOwned, + { + let result = + serde_json::from_slice(&output.stdout).map_err(|source| CommandError::SerdeError { + command: format!("{self}"), + output, + source, + })?; + Ok(result) + } + + fn handle_json_stream(&self, output: Output) -> Result, CommandError> + where + T: DeserializeOwned, + { + let stream = serde_json::Deserializer::from_slice(&output.stdout).into_iter::(); + stream + .collect::>() + .map_err(|source| CommandError::SerdeError { + command: format!("{self}"), + output, + source, + }) + } +} + +// Blocking API +impl<'a> Cmd<'a> { + fn output_blocking(&self) -> Result { + debug!("Running blocking command\n{self}"); + let mut c = std::process::Command::new(self.command); + let output = c.args(&self.args).output(); + self.handle_output(output) + } + + pub(super) fn result_blocking(self) -> Result { + let output = self.output_blocking()?; + let result = String::from_utf8_lossy(&output.stdout).to_string(); + Ok(result) + } + + pub(super) fn json_blocking(self) -> Result + where + T: DeserializeOwned, + { + let output = self.output_blocking()?; + self.handle_json(output) + } + + pub(super) fn status_blocking(self) -> Result { + let output = self.output_blocking()?; + Ok(output.status) + } +} + +// Async API +impl<'a> Cmd<'a> { + async fn output(&self) -> Result { + debug!("Running command\n{self}"); + let mut c = tokio::process::Command::new(self.command); + let output = c.args(&self.args).output().await; + self.handle_output(output) + } + + pub(super) async fn result(&self) -> Result { + let output = self.output().await?; + let result = String::from_utf8_lossy(&output.stdout).to_string(); + Ok(result) + } + + pub(super) async fn json(self) -> Result + where + T: DeserializeOwned, + { + let output = self.output().await?; + self.handle_json(output) + } + + pub(super) async fn json_stream(self) -> Result, CommandError> + where + T: DeserializeOwned, + { + let output = self.output().await?; + self.handle_json_stream(output) + } + + pub(super) async fn status(self) -> Result { + let output = self.output().await?; + Ok(output.status) + } +} + +impl<'a> Display for Cmd<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.command)?; + for arg in &self.args { + let arg = if arg.contains(' ') { + format!("\"{arg}\"") + } else { + arg.to_string() + }; + write!(f, " {arg}")?; + } + Ok(()) + } +} diff --git a/rustainers/src/container/health.rs b/rustainers/src/container/health.rs new file mode 100644 index 0000000..0eba350 --- /dev/null +++ b/rustainers/src/container/health.rs @@ -0,0 +1,57 @@ +use serde::{Deserialize, Serialize}; +use tracing::warn; + +/// The container health +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Serialize, strum_macros::Display)] +#[serde(rename_all = "lowercase")] +pub(crate) enum ContainerHealth { + /// Unknown + #[default] + Unknown, + + /// Starting (not yet healthy) + Starting, + + /// Healthy + Healthy, + + /// Fail to be healthy + Unhealthy, +} + +impl<'de> Deserialize<'de> for ContainerHealth { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let state = String::deserialize(deserializer)?; + let result = match state.to_ascii_lowercase().as_str() { + "starting" => Self::Starting, + "healthy" => Self::Healthy, + "unhealthy" => Self::Unhealthy, + "unknown" => Self::Unknown, + _ => { + warn!(?state, "Oops, found an unknown container health"); + Self::Unknown + } + }; + + Ok(result) + } +} + +#[cfg(test)] +#[allow(clippy::ignored_unit_patterns)] +mod tests { + + use assert2::check; + + use super::ContainerHealth; + + #[test] + fn should_serde_container_health() { + let json = "\"healthy\"\n"; + let result = serde_json::from_str::(json).unwrap(); + check!(result == ContainerHealth::Healthy); + } +} diff --git a/rustainers/src/container/health_check.rs b/rustainers/src/container/health_check.rs new file mode 100644 index 0000000..d1d93b2 --- /dev/null +++ b/rustainers/src/container/health_check.rs @@ -0,0 +1,55 @@ +use std::time::Duration; + +use typed_builder::TypedBuilder; + +/// A custom health check +/// +/// # Example +/// +/// ```rust +/// # use rustainers::HealthCheck; +/// # use std::time::Duration; +/// let hc = HealthCheck::builder() +/// .with_command("redis-cli --raw incr ping") +/// .with_start_period(Duration::from_millis(96)) +/// .with_interval(Duration::from_millis(96)) +/// .build(); +/// ``` +/// +/// Note that the command is executed inside the container +// TODO maybe a macro rules can help to create the Heathcheck? +#[derive(Debug, Clone, PartialEq, Eq, TypedBuilder)] +#[builder(field_defaults(setter(prefix = "with_")))] +pub struct HealthCheck { + /// Command to run to check health + #[builder(setter(into))] + command: String, + + /// Time between running the check + #[builder(default = Duration::from_secs(1))] + interval: Duration, + + /// Consecutive failures needed to report unhealthy + #[builder(default = 10)] + retries: u32, + + /// Start period for the container to initialize before starting health-retries countdown + #[builder(default = Duration::from_secs(1))] + start_period: Duration, + + /// Maximum time to allow one check to run + #[builder(default = Duration::from_secs(30))] + timeout: Duration, +} + +impl HealthCheck { + pub(crate) fn to_vec(&self) -> Vec { + vec![ + format!("--health-cmd={}", self.command), + format!("--health-interval={}ms", self.interval.as_millis()), + format!("--health-retries={}", self.retries), + format!("--health-start-period={}ms", self.start_period.as_millis()), + format!("--health-timeout={}ms", self.timeout.as_millis()), + ] + } +} diff --git a/rustainers/src/container/id.rs b/rustainers/src/container/id.rs new file mode 100644 index 0000000..465622f --- /dev/null +++ b/rustainers/src/container/id.rs @@ -0,0 +1,38 @@ +use std::fmt::{Debug, Display}; +use std::str::FromStr; + +use serde::{Deserialize, Serialize}; + +use crate::{Id, IdError}; + +/// A container id +#[derive(Clone, Copy, PartialEq, Eq, Deserialize, Serialize)] +pub struct ContainerId(Id); + +impl From for String { + fn from(value: ContainerId) -> Self { + String::from(value.0) + } +} + +impl FromStr for ContainerId { + type Err = IdError; + + fn from_str(s: &str) -> Result { + s.parse::().map(Self) + } +} + +impl Debug for ContainerId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_tuple("ContainerId") + .field(&String::from(*self)) + .finish() + } +} + +impl Display for ContainerId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} diff --git a/rustainers/src/container/mod.rs b/rustainers/src/container/mod.rs new file mode 100644 index 0000000..f9ab77f --- /dev/null +++ b/rustainers/src/container/mod.rs @@ -0,0 +1,96 @@ +use std::fmt::{self, Debug, Display}; +use std::ops::Deref; + +use tracing::{error, info}; + +use crate::runner::Runner; +use crate::ImageReference; + +mod id; +pub use self::id::*; + +mod health_check; +pub use self::health_check::*; + +mod runnable; +pub use self::runnable::*; + +mod process; +pub(crate) use self::process::ContainerProcess; + +mod wait_condition; +pub use self::wait_condition::*; + +mod health; +pub(crate) use self::health::ContainerHealth; + +mod state; +pub use self::state::*; + +/// A running container +#[derive(Debug)] +pub struct Container +where + I: ToRunnableContainer, +{ + pub(crate) runner: Runner, + pub(crate) id: ContainerId, + pub(crate) image: I, + pub(crate) image_ref: ImageReference, + pub(crate) detached: bool, + // TODO maybe a lock? +} + +impl Container +where + I: ToRunnableContainer, +{ + /// The container id + pub fn id(&self) -> ContainerId { + self.id + } + + /// Detach the container + /// + /// A detached container won't be stopped during the drop. + pub fn detach(&mut self) { + self.detached = true; + } +} + +impl Deref for Container +where + I: ToRunnableContainer, +{ + type Target = I; + + fn deref(&self) -> &Self::Target { + &self.image + } +} + +impl Drop for Container +where + I: ToRunnableContainer, +{ + fn drop(&mut self) { + if self.detached { + info!("Detached container {self} is NOT stopped"); + return; + } + + info!("🚮 Stopping container"); + if let Err(e) = self.runner.stop(self) { + error!("Fail to stop the container {self} because {e}"); + } + } +} + +impl Display for Container +where + I: ToRunnableContainer, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{} {}", self.image_ref, self.id) + } +} diff --git a/rustainers/src/container/process.rs b/rustainers/src/container/process.rs new file mode 100644 index 0000000..bdb9fec --- /dev/null +++ b/rustainers/src/container/process.rs @@ -0,0 +1,53 @@ +use serde::{Deserialize, Serialize}; + +use super::{ContainerId, ContainerStatus}; + +/// Container process +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct ContainerProcess { + #[serde(alias = "ID")] + pub(crate) id: ContainerId, + pub(crate) names: Names, + pub(crate) state: ContainerStatus, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum Names { + Name(String), + List(Vec), +} + +impl Names { + pub fn contains(&self, name: &str) -> bool { + match self { + Self::Name(n) => n == name, + Self::List(ns) => ns.iter().any(|n| n == name), + } + } +} + +#[cfg(test)] +mod tests { + use assert2::let_assert; + + use super::*; + #[test] + fn should_serde_docker_process() { + let json_stream = include_str!("../../tests/assets/docker-ps.jsonl"); + let stream = + serde_json::Deserializer::from_str(json_stream).into_iter::(); + let result = stream.collect::, _>>(); + let_assert!(Ok(data) = result); + insta::assert_debug_snapshot!(data); + } + + #[test] + fn should_serde_podman_process() { + let json = include_str!("../../tests/assets/podman_ps.json"); + let result = serde_json::from_str::>(json); + let_assert!(Ok(data) = result); + insta::assert_debug_snapshot!(data); + } +} diff --git a/rustainers/src/container/runnable.rs b/rustainers/src/container/runnable.rs new file mode 100644 index 0000000..170add2 --- /dev/null +++ b/rustainers/src/container/runnable.rs @@ -0,0 +1,67 @@ +use std::fmt::{self, Display}; + +use indexmap::IndexMap; +use typed_builder::TypedBuilder; + +use crate::{ExposedPort, ImageReference, WaitStrategy}; + +/// Contains configuration require to create and run a container +#[derive(Debug, Clone, TypedBuilder)] +#[builder(field_defaults(setter(prefix = "with_")))] +#[non_exhaustive] +pub struct RunnableContainer { + /// The container image + #[builder(setter(into))] + pub(crate) image: ImageReference, + + /// The container name + #[builder(default, setter(into))] + pub(crate) container_name: Option, + + /// The command + #[builder(default, setter(transform = |args: impl IntoIterator>| args.into_iter().map(Into::into).collect()))] + pub(crate) command: Vec, + + /// The environnement variables + #[builder(default, setter(transform = |args: impl IntoIterator, impl Into)>| args.into_iter().map(|(k,v)| (k.into(), v.into())).collect()))] + pub(crate) env: IndexMap, + + /// The wait strategy + #[builder(default, setter(into))] + pub(crate) wait_strategy: WaitStrategy, + + /// The ports mapping + #[builder(default, setter(transform = |args: impl IntoIterator| args.into_iter().collect()))] + pub(crate) port_mappings: Vec, + // TODO networks + // TODO volumes + // TODO entrypoint +} + +impl RunnableContainer { + /// Build the descriptor of an image (name + tag) + #[must_use] + pub fn descriptor(&self) -> String { + match &self.image { + ImageReference::Id(id) => id.to_string(), + ImageReference::Name(name) => name.to_string(), + } + } +} + +impl Display for RunnableContainer { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.descriptor()) + } +} + +/// Provide an [`RunnableContainer`] and configure ports +/// +/// A container image should implement this trait. +/// See [`crate::images`] for usage. +// TODO implement this trait for a single docker file with build +// TODO derive macro? +pub trait ToRunnableContainer { + /// Build the runnable container + fn to_runnable(&self, builder: RunnableContainerBuilder) -> RunnableContainer; +} diff --git a/rustainers/src/container/snapshots/rustainers__container__process__tests__should_serde_docker_process.snap b/rustainers/src/container/snapshots/rustainers__container__process__tests__should_serde_docker_process.snap new file mode 100644 index 0000000..bff7f7a --- /dev/null +++ b/rustainers/src/container/snapshots/rustainers__container__process__tests__should_serde_docker_process.snap @@ -0,0 +1,33 @@ +--- +source: rustainers/src/container/process.rs +expression: data +--- +[ + ContainerProcess { + id: ContainerId( + "efb522e2ce2be08d3ff96685c07dd6521c27ddb2aac2c33761f292a74dd872d0", + ), + names: Name( + "redpanda-console", + ), + state: Exited, + }, + ContainerProcess { + id: ContainerId( + "58e4e6921c09b9ad17e62ae6fa92455bbe69eca31782b7b848b3972b86af1eb7", + ), + names: Name( + "redpanda-0", + ), + state: Exited, + }, + ContainerProcess { + id: ContainerId( + "4133b65cffa94eb08caefba96e8182e119efccd9655d3230365bc73b0a6e109e", + ), + names: Name( + "trusting_antonelli", + ), + state: Exited, + }, +] diff --git a/rustainers/src/container/snapshots/rustainers__container__process__tests__should_serde_podman_process.snap b/rustainers/src/container/snapshots/rustainers__container__process__tests__should_serde_podman_process.snap new file mode 100644 index 0000000..5448418 --- /dev/null +++ b/rustainers/src/container/snapshots/rustainers__container__process__tests__should_serde_podman_process.snap @@ -0,0 +1,39 @@ +--- +source: rustainers/src/container/process.rs +expression: data +--- +[ + ContainerProcess { + id: ContainerId( + "e160e315373d38ae1b9560b709de656d94cddad2a774b295030cd016b75ddbd7", + ), + names: List( + [ + "optimistic_bardeen", + ], + ), + state: Exited, + }, + ContainerProcess { + id: ContainerId( + "da0b92141113225a661be45f691e27320077ac4fe749681ca2406846d705d346", + ), + names: List( + [ + "compassionate_kilby", + ], + ), + state: Exited, + }, + ContainerProcess { + id: ContainerId( + "409e83a1c1f9952e3ea51de48dd5829e711b645e240b2a843f77c0a27342398a", + ), + names: List( + [ + "nostalgic_blackburn", + ], + ), + state: Exited, + }, +] diff --git a/rustainers/src/container/snapshots/rustainers__container__state__tests__should_serde_inspect_status.snap b/rustainers/src/container/snapshots/rustainers__container__state__tests__should_serde_inspect_status.snap new file mode 100644 index 0000000..d41936c --- /dev/null +++ b/rustainers/src/container/snapshots/rustainers__container__state__tests__should_serde_inspect_status.snap @@ -0,0 +1,12 @@ +--- +source: rustainers/src/container/state.rs +expression: result +--- +ContainerState { + status: Running, + health: ContainerFullStateHealth { + status: Starting, + failing_streak: 0, + log: None, + }, +} diff --git a/rustainers/src/container/snapshots/rustainers__container__state__tests__should_serde_inspect_status_exited.snap b/rustainers/src/container/snapshots/rustainers__container__state__tests__should_serde_inspect_status_exited.snap new file mode 100644 index 0000000..7a60a4e --- /dev/null +++ b/rustainers/src/container/snapshots/rustainers__container__state__tests__should_serde_inspect_status_exited.snap @@ -0,0 +1,12 @@ +--- +source: rustainers/src/container/state.rs +expression: result +--- +ContainerState { + status: Exited, + health: ContainerFullStateHealth { + status: Unknown, + failing_streak: 0, + log: None, + }, +} diff --git a/rustainers/src/container/state.rs b/rustainers/src/container/state.rs new file mode 100644 index 0000000..772dfae --- /dev/null +++ b/rustainers/src/container/state.rs @@ -0,0 +1,97 @@ +use std::fmt::Display; + +use serde::{Deserialize, Serialize}; + +use super::ContainerHealth; + +/// The container State +#[derive( + Debug, Clone, Copy, PartialEq, Eq, Default, Serialize, Deserialize, strum_macros::Display, +)] +#[serde(rename_all = "lowercase")] +pub enum ContainerStatus { + /// Unknown + #[default] + Unknown, + + /// Created + Created, + + /// Running + Running, + + /// Restarting + Restarting, + + /// Stopped + Stopped, + + /// Exited + Exited, + + /// Paused + Paused, + + /// Dead + Dead, +} + +#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub(crate) struct ContainerState { + #[serde(default)] + pub(crate) status: ContainerStatus, + #[serde(default)] + pub(crate) health: ContainerFullStateHealth, +} + +#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub(crate) struct ContainerFullStateHealth { + pub(crate) status: ContainerHealth, + #[serde(default)] + failing_streak: usize, + #[serde(default)] + pub(crate) log: Option>, +} + +#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub(crate) struct HealthCheckLog { + start: String, + end: String, + exit_code: i32, + output: String, +} + +impl Display for HealthCheckLog { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let HealthCheckLog { + start, + end, + exit_code, + output, + } = self; + writeln!(f, "{start} - {end}\n{output}\nExit code: {exit_code}") + } +} + +#[cfg(test)] +mod tests { + + use super::*; + + #[test] + fn should_serde_inspect_status() { + let json = include_str!("../../tests/assets/inspect-state.json"); + let result = serde_json::from_str::(json).unwrap(); + insta::assert_debug_snapshot!(result); + } + + #[test] + fn should_serde_inspect_status_exited() { + let json = include_str!("../../tests/assets/inspect-state-exited.json"); + let result = serde_json::from_str::(json).unwrap(); + insta::assert_debug_snapshot!(result); + } +} diff --git a/rustainers/src/container/wait_condition.rs b/rustainers/src/container/wait_condition.rs new file mode 100644 index 0000000..8da877b --- /dev/null +++ b/rustainers/src/container/wait_condition.rs @@ -0,0 +1,35 @@ +use crate::{ContainerStatus, HealthCheck}; + +/// Wait strategies +#[derive(Debug, Clone, PartialEq, Eq, Default)] +#[non_exhaustive] +pub enum WaitStrategy { + /// With the image health check + #[default] + HealthCheck, + + /// With custom health check + CustomHealthCheck(HealthCheck), + + /// Wait for the container state + State(ContainerStatus), + // TODO Socket until available (from container) + // nc -z localhost 9092 || exit 1 + + // TODO readiness URL (from container) + // curl --fail http://localhost:8081/ || exit 1 + + // TODO StdLog, ErrLog until match a regex +} + +impl From for WaitStrategy { + fn from(value: HealthCheck) -> Self { + Self::CustomHealthCheck(value) + } +} + +impl From for WaitStrategy { + fn from(value: ContainerStatus) -> Self { + Self::State(value) + } +} diff --git a/rustainers/src/error.rs b/rustainers/src/error.rs new file mode 100644 index 0000000..0b7b252 --- /dev/null +++ b/rustainers/src/error.rs @@ -0,0 +1,48 @@ +use std::num::ParseIntError; + +/// Version parsing error +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +pub enum VersionError { + /// Empty string + #[error("Empty string")] + Empty, + + /// Require major & minor version + #[error("Require at least .")] + RequireMajorMinor, + + /// Invalid major version + #[error("Invalid major version because {0}")] + InvalidMajorVersion(ParseIntError), + + /// Invalid minor version + #[error("Invalid minor version because {0}")] + InvalidMinorVersion(ParseIntError), + + /// Invalid patch version + #[error("Invalid patch version because {0}")] + InvalidPatchVersion(ParseIntError), +} + +/// Id error +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +pub enum IdError { + /// Empty + #[error("Id is empty")] + Empty, + + /// Invalid id + #[error("Id '{value}' is invalid because {source}")] + InvalidId { + /// The invalid value + value: String, + /// The source + source: hex::FromHexError, + }, + + /// Too long + #[error("Id '{0}' is too long (maximum length is 64)")] + TooLong(String), +} diff --git a/rustainers/src/id.rs b/rustainers/src/id.rs new file mode 100644 index 0000000..8377373 --- /dev/null +++ b/rustainers/src/id.rs @@ -0,0 +1,164 @@ +use std::fmt::{Debug, Display}; +use std::str::FromStr; + +use hex::{decode, encode, FromHex}; + +use crate::IdError; + +/// An id for image or a container. +/// +/// The id is a sha252 represented as 32 bytes array. +/// Therefore this type is [`Copy`]. +/// +/// Note because some version of Docker CLI return truncated value, +/// we need to store the size of the id. +/// +/// Most usage of this type is done with the string represenation. +/// +/// Note that the [`Display`] view truncate the id, +/// to have the full [`String`] you need to use the [`Into`] or [`From`] implementation. +#[derive(Clone, Copy, PartialEq, Eq)] +pub struct Id([u8; 32], usize); + +impl From for String { + fn from(value: Id) -> Self { + let Id(data, size) = value; + encode(&data[..size]) + } +} + +impl FromStr for Id { + type Err = IdError; + + fn from_str(s: &str) -> Result { + if s.is_empty() { + return Err(IdError::Empty); + } + if s.len() > 64 { + return Err(IdError::TooLong(String::from(s))); + } + + if s.len() == 64 { + let data = <[u8; 32]>::from_hex(s).map_err(|source| IdError::InvalidId { + value: String::from(s), + source, + })?; + Ok(Self(data, 32)) + } else { + let mut data = [0; 32]; + let bytes = decode(s).map_err(|source| IdError::InvalidId { + value: String::from(s), + source, + })?; + let size = bytes.len(); + for (i, b) in bytes.iter().enumerate() { + data[i] = *b; + } + + Ok(Self(data, size)) + } + } +} + +impl Debug for Id { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_tuple("Id").field(&String::from(*self)).finish() + } +} + +impl Display for Id { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let s = String::from(*self); + let truncate = self.1.min(6) * 2; + write!(f, "{}", &s[..truncate]) + } +} + +mod image_id_serde { + use serde::de::Visitor; + use serde::{Deserialize, Serialize}; + + use super::Id; + + impl Serialize for Id { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + let s = String::from(*self); + serializer.serialize_str(&s) + } + } + + struct IdVisitor; + + impl<'de> Visitor<'de> for IdVisitor { + type Value = Id; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(formatter, "Expected an hex-encoded 32 bits (length 64)") + } + + fn visit_str(self, v: &str) -> Result + where + E: serde::de::Error, + { + v.parse().map_err(E::custom) + } + } + + impl<'de> Deserialize<'de> for Id { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + deserializer.deserialize_str(IdVisitor) + } + } +} + +#[cfg(test)] +#[allow(clippy::ignored_unit_patterns)] +mod tests { + use assert2::{check, let_assert}; + use rstest::rstest; + + use super::*; + + #[rstest] + #[case( + "c94f6f8d4ef25b80584b9457ca24b964032681895b3a6fd7cd24fd40fad4895e", + "c94f6f8d4ef2" + )] + #[case( + "637ceb59b7a01df4466442fc5bb30bcf0ce3428289b00bbc02f62ddaa3e6bd8d", + "637ceb59b7a0" + )] + #[case("637ceb59b7a0", "637ceb59b7a0")] + #[case("637c", "637c")] + fn should_parse_id(#[case] s: &str, #[case] short: &str) { + let result = s.parse::(); + let_assert!(Ok(id) = result); + check!(id.to_string() == short); + } + + #[rstest] + #[case::normal("\"c94f6f8d4ef25b80584b9457ca24b964032681895b3a6fd7cd24fd40fad4895e\"")] + #[case::short("\"637ceb59b7a0\"")] + fn should_serde(#[case] s: &str) { + let result = serde_json::from_str::(s); + let_assert!(Ok(id) = result); + let result = serde_json::to_string(&id); + let_assert!(Ok(json) = result); + check!(json == s); + } + + #[rstest] + #[case::empty("")] + #[case::invalid("X94f6f8d4ef25b80584b9457ca24b964032681895b3a6fd7cd24fd40fad4895e")] + #[case::too_long("794f6f8d4ef25b80584b9457ca24b964032681895b3a6fd7cd24fd40fad4895e0000")] + fn should_not_parse_image_id(#[case] s: &str) { + let result = s.parse::(); + let_assert!(Err(_) = result); + } +} diff --git a/rustainers/src/image/error.rs b/rustainers/src/image/error.rs new file mode 100644 index 0000000..7d5da94 --- /dev/null +++ b/rustainers/src/image/error.rs @@ -0,0 +1,8 @@ +/// An image name errors +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +pub enum ImageNameError { + /// Empty name + #[error("Image name cannot be empty")] + EmptyName, +} diff --git a/rustainers/src/image/id.rs b/rustainers/src/image/id.rs new file mode 100644 index 0000000..37f886e --- /dev/null +++ b/rustainers/src/image/id.rs @@ -0,0 +1,38 @@ +use std::fmt::{Debug, Display}; +use std::str::FromStr; + +use serde::{Deserialize, Serialize}; + +use crate::{Id, IdError}; + +/// An image id +#[derive(Clone, Copy, PartialEq, Eq, Deserialize, Serialize)] +pub struct ImageId(Id); + +impl From for String { + fn from(value: ImageId) -> Self { + String::from(value.0) + } +} + +impl FromStr for ImageId { + type Err = IdError; + + fn from_str(s: &str) -> Result { + s.parse::().map(Self) + } +} + +impl Debug for ImageId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_tuple("ImageId") + .field(&String::from(*self)) + .finish() + } +} + +impl Display for ImageId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} diff --git a/rustainers/src/image/mod.rs b/rustainers/src/image/mod.rs new file mode 100644 index 0000000..a1ed3f5 --- /dev/null +++ b/rustainers/src/image/mod.rs @@ -0,0 +1,11 @@ +mod error; +pub use self::error::*; + +mod id; +pub use self::id::*; + +mod reference; +pub use self::reference::*; + +mod name; +pub use self::name::*; diff --git a/rustainers/src/image/name.rs b/rustainers/src/image/name.rs new file mode 100644 index 0000000..5e5e883 --- /dev/null +++ b/rustainers/src/image/name.rs @@ -0,0 +1,155 @@ +use std::borrow::Cow; +use std::fmt::{self, Display}; +use std::str::FromStr; + +use super::ImageNameError; + +/// An image name. +/// +/// It contains the name, and optionaly a tag or a digest. +/// +/// # Example +/// +/// Create an constant image +/// +/// ```rust +/// # use rustainers::ImageName; +/// const POSTGRES_IMAGE: &ImageName = &ImageName::new("postgres"); +///``` +/// +/// Parse an image name: +/// +/// ```rust +/// # use rustainers::ImageName; +/// let image = "minio/minio".parse::().unwrap(); +/// assert_eq!(image.to_string(), "minio/minio"); +/// +/// let image_with_tag = "postgres:15.2".parse::().unwrap(); +/// assert_eq!(image_with_tag.to_string(), "postgres:15.2"); +/// +/// let image_with_digest = "redis@sha256:1f9f545dd3d396ee72ca4588d31168341247e46b7e70fabca82f88a809d407a8".parse::().unwrap(); +/// assert_eq!(image_with_digest.to_string(), "redis@sha256:1f9f545dd3d396ee72ca4588d31168341247e46b7e70fabca82f88a809d407a8"); +/// ``` +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct ImageName { + /// The repository, + /// it can include the registry if necessary + pub(crate) repository: Cow<'static, str>, + + /// The image tag + pub(crate) tag: Option>, + + /// The digest, + /// it should include the `sha256:` prefix + pub(crate) digest: Option>, +} + +impl ImageName { + /// Create a new image name + #[must_use] + pub const fn new(repository: &'static str) -> Self { + Self { + repository: Cow::Borrowed(repository), + tag: None, + digest: None, + } + } + + /// Create a new image with a tag + #[must_use] + pub const fn new_with_tag(repository: &'static str, tag: &'static str) -> Self { + Self { + repository: Cow::Borrowed(repository), + tag: Some(Cow::Borrowed(tag)), + digest: None, + } + } + + /// Create a new image with a digest + #[must_use] + pub const fn new_with_digest(repository: &'static str, digest: &'static str) -> Self { + Self { + repository: Cow::Borrowed(repository), + tag: None, + digest: Some(Cow::Borrowed(digest)), + } + } + + /// Set the image tag + pub fn set_tag(&mut self, tag: impl Into) { + self.tag = Some(Cow::Owned(tag.into())); + } + + /// Set the image digest + pub fn set_digest(&mut self, digest: impl Into) { + self.digest = Some(Cow::Owned(digest.into())); + } +} + +impl Display for ImageName { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.repository)?; + + if let Some(tag) = &self.tag { + write!(f, ":{tag}")?; + } + if let Some(digest) = &self.digest { + write!(f, "@{digest}")?; + } + + Ok(()) + } +} + +impl FromStr for ImageName { + type Err = ImageNameError; + + fn from_str(s: &str) -> Result { + if s.is_empty() { + return Err(ImageNameError::EmptyName); + } + + let result = if let Some((name, rest)) = s.split_once(':') { + if let Some((tag, digest)) = rest.split_once('@') { + Self { + repository: Cow::Owned(String::from(name)), + tag: Some(Cow::Owned(String::from(tag))), + digest: Some(Cow::Owned(String::from(digest))), + } + } else { + Self { + repository: Cow::Owned(String::from(name)), + tag: Some(Cow::Owned(String::from(rest))), + digest: None, + } + } + } else if let Some((name, digest)) = s.split_once('@') { + Self { + repository: Cow::Owned(String::from(name)), + tag: None, + digest: Some(Cow::Owned(String::from(digest))), + } + } else { + Self { + repository: Cow::Owned(String::from(s)), + tag: None, + digest: None, + } + }; + Ok(result) + } +} + +#[cfg(test)] +#[allow(clippy::ignored_unit_patterns)] +mod tests { + use assert2::let_assert; + + use super::*; + + #[test] + fn should_not_parse_empty_name() { + let result = "".parse::(); + let_assert!(Err(ImageNameError::EmptyName) = result); + } +} diff --git a/rustainers/src/image/reference.rs b/rustainers/src/image/reference.rs new file mode 100644 index 0000000..f3e409d --- /dev/null +++ b/rustainers/src/image/reference.rs @@ -0,0 +1,36 @@ +use std::fmt::{Debug, Display}; + +use super::{ImageId, ImageName}; + +/// An image reference +/// +/// An image can be reference by a name, or by and id +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum ImageReference { + /// An image id + Id(ImageId), + + /// An image name + Name(ImageName), +} + +impl From for ImageReference { + fn from(value: ImageId) -> Self { + Self::Id(value) + } +} + +impl From for ImageReference { + fn from(value: ImageName) -> Self { + Self::Name(value) + } +} + +impl Display for ImageReference { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Id(id) => write!(f, "{id}"), + Self::Name(name) => write!(f, "{name}"), + } + } +} diff --git a/rustainers/src/images/doc.md b/rustainers/src/images/doc.md new file mode 100644 index 0000000..a8e7a69 --- /dev/null +++ b/rustainers/src/images/doc.md @@ -0,0 +1,67 @@ +# Runnable images + +This module contains runnable images that can be started by a [`crate::runner::Runner`]. + +These images implements the [`crate::ToRunnableContainer`] trait. + +## Create a custom runnable image + +A runnable image should implement the [`crate::ToRunnableContainer`] trait. + +```rust, no_run +use std::fmt::Display; + +use rustainers::runner::{RunOption, Runner}; +use rustainers::{ + ExposedPort, HealthCheck, RunnableContainer, RunnableContainerBuilder, Port, ToRunnableContainer, + ImageName, +}; + +// Declare the image as a constant. +// You can provide a tag or a digest if you want. +const NGINX_IMAGE: &ImageName = &ImageName::new("nginx"); + +const PORT: u16 = 80; + +/// The NGinx image +#[derive(Debug, Clone)] +struct Nginx { + /// The image name + image: ImageName, + /// The exposed port + port: ExposedPort, +} + +// Provide an easy way to create the image instance +impl Default for Nginx { + fn default() -> Self { + Self { + image: NGINX_IMAGE.clone(), + port: ExposedPort::new(PORT), // the container port + } + } +} + +// You had to implement the `ToRunnableContainer` trait +impl ToRunnableContainer for Nginx { + + fn to_runnable(&self, builder: RunnableContainerBuilder) -> RunnableContainer { + builder + // provide the image + .with_image(self.image.clone()) + // strategy to check when container is ready + .with_wait_strategy( + // here a `curl` is enough + // Note that this command is executed in the container + // therefore you need to have the `curl` command available in the container + HealthCheck::builder() + .with_command("curl -sf http://localhost") //DevSkim: ignore DS137138 + .build(), + ) + // ports mapping + // bound a random port available port of the host to the container `80` port + .with_port_mappings([self.port]) + .build() + } +} +``` diff --git a/rustainers/src/images/minio.rs b/rustainers/src/images/minio.rs new file mode 100644 index 0000000..31330d7 --- /dev/null +++ b/rustainers/src/images/minio.rs @@ -0,0 +1,182 @@ +use std::time::Duration; + +use crate::runner::RunnerError; +use crate::{ + Container, ExposedPort, HealthCheck, ImageName, Port, PortError, RunnableContainer, + RunnableContainerBuilder, ToRunnableContainer, +}; + +const DATA: &str = "/data"; + +const MINIO_IMAGE: &ImageName = &ImageName::new("minio/minio"); + +const PORT: Port = Port(9000); + +const CONSOLE_PORT: Port = Port(9001); + +/// A `Minio` image +/// +/// # Example +/// +/// ```rust, no_run +/// # async fn run() -> anyhow::Result<()> { +/// use rustainers::images::Minio; +/// +/// let default_image = Minio::default(); +/// +/// let custom_image = Minio::default() +/// .with_tag("RELEASE.2023-10-25T06-33-25Z"); +/// +/// # let runner = rustainers::runner::Runner::auto()?; +/// // ... +/// let container = runner.start(default_image).await?; +/// let endpoint = container.endpoint()?; +/// // ... +/// # Ok(()) +/// # } +///``` +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Minio { + image: ImageName, + port: ExposedPort, + console_port: ExposedPort, +} + +impl Minio { + /// Set the image tag + #[must_use] + pub fn with_tag(self, tag: impl Into) -> Self { + let Self { + mut image, + port, + console_port, + } = self; + image.set_tag(tag); + Self { + image, + port, + console_port, + } + } + + /// Set the image digest + #[must_use] + pub fn with_digest(self, digest: impl Into) -> Self { + let Self { + mut image, + port, + console_port, + } = self; + image.set_digest(digest); + Self { + image, + port, + console_port, + } + } +} + +impl Minio { + /// The region + #[must_use] + pub fn region(&self) -> &str { + "us-east-1" + } + + /// The access key id + #[must_use] + pub fn access_key_id(&self) -> &str { + "minioadmin" + } + + /// The secret access key + #[must_use] + pub fn secret_access_key(&self) -> &str { + "minioadmin" + } + + /// Get endpoint URL + /// + /// # Errors + /// + /// Could fail if the port is not bind + pub fn endpoint(&self) -> Result { + let port = self.port.host_port()?; + let url = format!("http://localhost:{port}"); + + Ok(url) + } + + /// Get console endpoint URL + /// + /// # Errors + /// + /// Could fail if the console port is not bind + pub fn console_endpoint(&self) -> Result { + let port = self.console_port.host_port()?; + let url = format!("http://localhost:{port}"); + + Ok(url) + } +} + +impl Container { + /// Create a bucket + /// + /// # Errors + /// + /// Could fail if we cannot create the bucket + pub async fn create_s3_bucket(&self, name: &str) -> Result<(), RunnerError> { + let bucket = format!("{DATA}/{name}"); + self.runner.exec(self, ["mc", "mb", &bucket]).await?; + self.runner + .exec(self, ["mc", "anonymous", "set", "public", &bucket]) + .await?; + + Ok(()) + } +} + +impl Default for Minio { + fn default() -> Self { + Minio { + image: MINIO_IMAGE.clone(), + port: ExposedPort::new(PORT), + console_port: ExposedPort::new(CONSOLE_PORT), + } + } +} + +impl ToRunnableContainer for Minio { + fn to_runnable(&self, builder: RunnableContainerBuilder) -> RunnableContainer { + builder + .with_image(self.image.clone()) + .with_wait_strategy({ + HealthCheck::builder() + .with_command("mc ping --exit --json local".to_string()) + .with_interval(Duration::from_millis(250)) + .build() + }) + .with_command(["server", DATA]) + .with_port_mappings([self.port, self.console_port]) + .build() + } +} + +#[cfg(test)] +#[allow(clippy::ignored_unit_patterns)] +mod tests { + use super::*; + use assert2::{check, let_assert}; + + #[test] + fn should_create_endpoint() { + let image = Minio { + port: ExposedPort::fixed(PORT, Port::new(9123)), + ..Default::default() + }; + let result = image.endpoint(); + let_assert!(Ok(endpoint) = result); + check!(endpoint == "http://localhost:9123"); + } +} diff --git a/rustainers/src/images/mod.rs b/rustainers/src/images/mod.rs new file mode 100644 index 0000000..aad7ca8 --- /dev/null +++ b/rustainers/src/images/mod.rs @@ -0,0 +1,10 @@ +#![doc = include_str!("./doc.md")] + +mod postgres; +pub use self::postgres::*; + +mod minio; +pub use self::minio::*; + +mod redis; +pub use self::redis::*; diff --git a/rustainers/src/images/postgres.rs b/rustainers/src/images/postgres.rs new file mode 100644 index 0000000..105d9f2 --- /dev/null +++ b/rustainers/src/images/postgres.rs @@ -0,0 +1,245 @@ +use std::time::Duration; + +use crate::{ + ExposedPort, HealthCheck, ImageName, Port, PortError, RunnableContainer, + RunnableContainerBuilder, ToRunnableContainer, +}; + +const POSTGRES_IMAGE: &ImageName = &ImageName::new("postgres"); + +const PORT: Port = Port(5432); + +/// The default postgres user +const POSTGRES_USER: &str = "postgres"; + +/// The default postgres password +const POSTGRES_PASSWORD: &str = "passwd"; + +/// The default postgres database +const POSTGRES_DATABASE: &str = POSTGRES_USER; + +/// A `PostgreSQL` image +/// +/// # Example +/// +/// ```rust, no_run +/// # async fn run() -> anyhow::Result<()> { +/// use rustainers::images::Postgres; +/// +/// let default_image = Postgres::default(); +/// +/// let custom_image = Postgres::default() +/// .with_tag("15.2") +/// .with_db("plop"); +/// +/// # let runner = rustainers::runner::Runner::auto()?; +/// // ... +/// let container = runner.start(default_image).await?; +/// let url = container.url()?; +/// // ... +/// # Ok(()) +/// # } +///``` +#[derive(Debug, Clone)] +pub struct Postgres { + image: ImageName, + user: String, + password: String, + db: String, + port: ExposedPort, +} + +impl Postgres { + /// Set the image tag + #[must_use] + pub fn with_tag(self, tag: impl Into) -> Self { + let Self { + mut image, + user, + password, + db, + port, + } = self; + image.set_tag(tag); + Self { + image, + user, + password, + db, + port, + } + } + + /// Set the image digest + #[must_use] + pub fn with_digest(self, digest: impl Into) -> Self { + let Self { + mut image, + user, + password, + db, + port, + } = self; + image.set_digest(digest); + Self { + image, + user, + password, + db, + port, + } + } + + /// Set the database user + #[must_use] + pub fn with_user(self, user: impl Into) -> Self { + let Self { + image, + password, + db, + port, + .. + } = self; + let user = user.into(); + Self { + image, + user, + password, + db, + port, + } + } + + /// Set the database password + #[must_use] + pub fn with_password(self, password: impl Into) -> Self { + let Self { + image, + user, + db, + port, + .. + } = self; + let password = password.into(); + Self { + image, + user, + password, + db, + port, + } + } + + /// Set the database db + #[must_use] + pub fn with_db(self, db: impl Into) -> Self { + let Self { + image, + user, + password, + port, + .. + } = self; + let db = db.into(); + Self { + image, + user, + password, + db, + port, + } + } +} + +impl Postgres { + /// Get connection URL + /// + /// # Errors + /// + /// Could fail if the port is not bind + pub fn url(&self) -> Result { + let user = &self.user; + let password = &self.password; + let port = self.port.host_port()?; + let database = &self.db; + let url = format!("postgresql://{user}:{password}@localhost:{port}/{database}"); + Ok(url) + } + + /// Get connection string + /// + /// # Errors + /// + /// Could fail if the port is not bind + pub fn config(&self) -> Result { + let user = &self.user; + let password = &self.password; + let port = self.port.host_port()?; + let database = &self.db; + let config = + format!("host=localhost user={user} password={password} port={port} dbname={database}"); + Ok(config) + } +} + +impl Default for Postgres { + fn default() -> Self { + Self { + image: POSTGRES_IMAGE.clone(), + user: String::from(POSTGRES_USER), + password: String::from(POSTGRES_PASSWORD), + db: String::from(POSTGRES_DATABASE), + port: ExposedPort::new(PORT), + } + } +} + +impl ToRunnableContainer for Postgres { + fn to_runnable(&self, builder: RunnableContainerBuilder) -> RunnableContainer { + builder + .with_image(self.image.clone()) + .with_wait_strategy({ + let db = &self.db; + let user = &self.user; + HealthCheck::builder() + .with_command(format!("pg_isready --dbname={db} --username={user}")) + .with_interval(Duration::from_millis(250)) + .build() + }) + .with_env([ + ("POSTGRES_USER", &self.user), + ("POSTGRES_PASSWORD", &self.password), + ("POSTGRES_DB", &self.db), + ]) + .with_port_mappings([self.port]) + .build() + } +} + +#[cfg(test)] +#[allow(clippy::ignored_unit_patterns)] +mod tests { + use assert2::check; + + use super::*; + + #[test] + fn should_build_config() { + let image = Postgres { + port: ExposedPort::fixed(PORT, Port::new(5432)), + ..Default::default() + }; + let result = image.config().unwrap(); + check!(result == "host=localhost user=postgres password=passwd port=5432 dbname=postgres"); + } + + #[test] + fn should_build_url() { + let image = Postgres { + port: ExposedPort::fixed(PORT, Port::new(5432)), + ..Default::default() + }; + let result = image.url().unwrap(); + check!(result == "postgresql://postgres:passwd@localhost:5432/postgres"); + } +} diff --git a/rustainers/src/images/redis.rs b/rustainers/src/images/redis.rs new file mode 100644 index 0000000..fd064a4 --- /dev/null +++ b/rustainers/src/images/redis.rs @@ -0,0 +1,111 @@ +use std::time::Duration; + +use crate::{ + ExposedPort, HealthCheck, ImageName, Port, PortError, RunnableContainer, + RunnableContainerBuilder, ToRunnableContainer, +}; + +const REDIS_IMAGE: &ImageName = &ImageName::new("redis"); + +const PORT: Port = Port(6379); + +/// A `Redis` image +/// +/// # Example +/// +/// ```rust, no_run +/// # async fn run() -> anyhow::Result<()> { +/// use rustainers::images::Redis; +/// +/// let default_image = Redis::default(); +/// +/// let custom_image = Redis::default() +/// .with_tag("7.2"); +/// +/// # let runner = rustainers::runner::Runner::auto()?; +/// // ... +/// let container = runner.start(default_image).await?; +/// let endpoint = container.endpoint()?; +/// // ... +/// # Ok(()) +/// # } +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Redis { + image: ImageName, + port: ExposedPort, +} + +impl Redis { + /// Set the image tag + #[must_use] + pub fn with_tag(self, tag: impl Into) -> Self { + let Self { mut image, port } = self; + image.set_tag(tag); + Self { image, port } + } + + /// Set the image digest + #[must_use] + pub fn with_digest(self, digest: impl Into) -> Self { + let Self { mut image, port } = self; + image.set_digest(digest); + Self { image, port } + } +} + +impl Redis { + /// Get endpoint URL + /// + /// # Errors + /// + /// Could fail if the port is not bind + pub fn endpoint(&self) -> Result { + let port = self.port.host_port()?; + let url = format!("redis://localhost:{port}"); + + Ok(url) + } +} + +impl Default for Redis { + fn default() -> Self { + Self { + image: REDIS_IMAGE.clone(), + port: ExposedPort::new(PORT), + } + } +} + +impl ToRunnableContainer for Redis { + fn to_runnable(&self, builder: RunnableContainerBuilder) -> RunnableContainer { + builder + .with_image(self.image.clone()) + .with_wait_strategy( + HealthCheck::builder() + .with_command("redis-cli --raw incr ping") + .with_start_period(Duration::from_millis(96)) + .with_interval(Duration::from_millis(96)) + .build(), + ) + .with_port_mappings([self.port]) + .build() + } +} + +#[cfg(test)] +#[allow(clippy::ignored_unit_patterns)] +mod tests { + use super::*; + use assert2::{check, let_assert}; + + #[test] + fn should_create_endpoint() { + let image = Redis { + port: ExposedPort::fixed(PORT, Port::new(9123)), + ..Default::default() + }; + let result = image.endpoint(); + let_assert!(Ok(endpoint) = result); + check!(endpoint == "redis://localhost:9123"); + } +} diff --git a/rustainers/src/lib.rs b/rustainers/src/lib.rs new file mode 100644 index 0000000..d828dc6 --- /dev/null +++ b/rustainers/src/lib.rs @@ -0,0 +1,31 @@ +#![warn(missing_docs)] +#![forbid(unsafe_code)] +#![warn(clippy::perf)] +#![warn(clippy::pedantic)] +#![allow(clippy::module_name_repetitions)] +#![doc = include_str!("../README.md")] + +mod error; +pub use self::error::*; + +mod container; +pub use self::container::*; + +mod image; +pub use self::image::*; + +mod port; +pub use self::port::*; + +mod id; +pub(crate) use self::id::Id; + +pub(crate) mod cmd; + +pub(crate) mod version; + +/// Runners like docker, podman, ... +pub mod runner; + +/// Provided images like postgres, redis, ... +pub mod images; diff --git a/rustainers/src/port/error.rs b/rustainers/src/port/error.rs new file mode 100644 index 0000000..dba4ae2 --- /dev/null +++ b/rustainers/src/port/error.rs @@ -0,0 +1,12 @@ +/// Port error +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +pub enum PortError { + /// Invalid port mapping + #[error("Invalid port mapping, expect a `:`, got {0}")] + InvalidPortMapping(String), + + /// The port is not yet bind + #[error("Port not bind")] + PortNotBindYet, +} diff --git a/rustainers/src/port/exposed.rs b/rustainers/src/port/exposed.rs new file mode 100644 index 0000000..4ce4205 --- /dev/null +++ b/rustainers/src/port/exposed.rs @@ -0,0 +1,161 @@ +use std::fmt::{self, Display}; +use std::str::FromStr; + +use super::{Port, PortError}; + +/// Define an exposed port +/// +/// # Examples +/// +/// Create an exposed port targeting the container `80` port: +/// +/// ```rust +/// # use rustainers::ExposedPort; +/// let port_mapping = ExposedPort::new(80); +/// ``` +/// +/// Create the exposed host port `8080` targeting the container `80` port: +/// +/// ```rust +/// # use rustainers::ExposedPort; +/// let port_mapping = ExposedPort::fixed(80, 8080); +/// ``` + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct ExposedPort { + pub(crate) container_port: Port, + pub(crate) host_port: Option, +} + +impl ExposedPort { + /// Create an exposed port + pub fn new(container_port: impl Into) -> Self { + Self { + container_port: container_port.into(), + host_port: None, + } + } + + /// Create an exposed port with a fixed host port + pub fn fixed(container_port: impl Into, host_port: impl Into) -> Self { + Self { + container_port: container_port.into(), + host_port: Some(host_port.into()), + } + } + + /// Get the bound port (host) + /// + /// # Errors + /// + /// Fail if the port is unbound + pub fn host_port(self) -> Result { + self.host_port.ok_or(PortError::PortNotBindYet) + } + + /// Get the container port + #[must_use] + pub fn container_port(self) -> Port { + self.container_port + } + + pub(crate) fn to_publish(self) -> String { + if let Some(host) = self.host_port { + format!("{host}:{}", self.container_port) + } else { + self.container_port.to_string() + } + } + + /// Bind the host port (if it's not already bound) + pub(crate) fn bind_port(&mut self, host_port: Port) { + if self.host_port.is_none() { + self.host_port = Some(host_port); + } + } +} + +impl FromStr for ExposedPort { + type Err = PortError; + + fn from_str(s: &str) -> Result { + let Some((host, container)) = s.split_once(':') else { + return Err(PortError::InvalidPortMapping(s.to_string())); + }; + let host_port = host + .parse() + .map_err(|_| PortError::InvalidPortMapping(s.to_string()))?; + let container_port = container + .parse() + .map_err(|_| PortError::InvalidPortMapping(s.to_string()))?; + + Ok(Self { + host_port: Some(host_port), + container_port, + }) + } +} + +impl Display for ExposedPort { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(port) = self.host_port { + write!(f, "{port} -> {}", self.container_port) + } else { + write!(f, "unbound ({})", self.container_port) + } + } +} + +#[cfg(test)] +#[allow(clippy::ignored_unit_patterns)] +mod tests { + use assert2::{check, let_assert}; + + use super::*; + + #[test] + fn should_parse_exposed_port() { + let s = "1234:80"; + let result = s.parse::().unwrap(); + check!( + result + == ExposedPort { + host_port: Some(Port::new(1234)), + container_port: Port::new(80), + } + ); + } + + #[rstest::rstest] + #[case::empty("")] + #[case::only_one("1234")] + #[case::bad_separator("1234->80")] + #[case::empty_port("1234:")] + #[case::invalid_first_port("a:80")] + #[case::invalid_second_port("1234:a")] + fn should_not_parse_invalid_exposed_port(#[case] s: &str) { + let result = s.parse::(); + let_assert!(Err(PortError::InvalidPortMapping(s2)) = result); + check!(s == s2); + } + + #[test] + fn should_bind_port() { + const CONTAINER: u16 = 42; + let host = 1324; + let mut exposed_port = ExposedPort::new(CONTAINER); + + // should fail if no host + let result = exposed_port.host_port(); + let_assert!(Err(_) = result); + + // bind the good port + exposed_port.bind_port(Port(host)); + check!(exposed_port.host_port == Some(Port(host))); + + // should fail if no host + let result = exposed_port.host_port(); + let_assert!(Ok(Port(h)) = result); + check!(h == host); + } +} diff --git a/rustainers/src/port/mod.rs b/rustainers/src/port/mod.rs new file mode 100644 index 0000000..6118d97 --- /dev/null +++ b/rustainers/src/port/mod.rs @@ -0,0 +1,70 @@ +use std::fmt::Display; +use std::str::FromStr; + +mod error; +pub use self::error::*; + +mod exposed; +pub use self::exposed::*; + +/// A Port +/// +/// Example +/// +/// You can create a port from an `u16`: +/// +/// ```rust +/// # use rustainers::Port; +/// let port = Port::from(8080); +///``` +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Port(pub(super) u16); + +impl Port { + /// Create a port + #[must_use] + pub const fn new(port: u16) -> Self { + Self(port) + } +} + +impl From for Port { + fn from(value: u16) -> Self { + Self(value) + } +} + +impl From for u16 { + fn from(value: Port) -> Self { + value.0 + } +} + +impl From for String { + fn from(value: Port) -> Self { + value.0.to_string() + } +} + +impl Display for Port { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +impl FromStr for Port { + type Err = PortError; + + fn from_str(s: &str) -> Result { + let p = s + .parse() + .map_err(|_| PortError::InvalidPortMapping(s.to_string()))?; + Ok(Self(p)) + } +} + +impl PartialEq for Port { + fn eq(&self, other: &u16) -> bool { + self.0 == *other + } +} diff --git a/rustainers/src/runner/docker.rs b/rustainers/src/runner/docker.rs new file mode 100644 index 0000000..d028b39 --- /dev/null +++ b/rustainers/src/runner/docker.rs @@ -0,0 +1,139 @@ +use std::fmt::Display; + +use async_trait::async_trait; +use serde::{Deserialize, Serialize}; +use tracing::{debug, info}; + +use crate::cmd::Cmd; +use crate::runner::RunnerError; +use crate::version::Version; + +use super::InnerRunner; + +const MINIMAL_VERSION: Version = Version::new(1, 20); +const COMPOSE_MINIMAL_VERSION: Version = Version::new(2, 10); + +/// A Docker runner +/// +/// This runner use the docker CLI +/// +/// It requires docker client v1.20+ +/// +/// Docker compose should be v2.10+ +#[derive(Debug, Clone, Copy)] +pub struct Docker { + /// The docker client version + pub version: Version, + /// The docker compose client version + pub compose_version: Option, +} + +#[async_trait] +impl InnerRunner for Docker { + fn command(&self) -> Cmd<'static> { + Cmd::new("docker") + } +} + +impl Display for Docker { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Docker {}", self.version)?; + if let Some(compose_version) = self.compose_version { + write!(f, " - compose {compose_version}")?; + } + Ok(()) + } +} + +pub(super) fn create() -> Result { + // Check binary version + let mut cmd = Cmd::new("docker"); + cmd.push_args(["version", "--format", "json"]); + let Ok(Some(version)) = cmd.json_blocking::>() else { + return Err(RunnerError::CommandNotAvailable(String::from("docker"))); + }; + + // Check client version + let current = version.client.api_version; + debug!("Found docker version: {current}"); + if current < MINIMAL_VERSION { + return Err(RunnerError::UnsupportedVersion { + command: String::from("docker"), + current, + minimal: MINIMAL_VERSION, + }); + } + + let compose_version = compose_version(); + + Ok(Docker { + version: current, + compose_version, + }) +} + +fn compose_version() -> Option { + let mut cmd = Cmd::new("docker"); + cmd.push_args(["compose", "version", "--format", "json"]); + let Ok(Some(docker_compose_version)) = cmd.json_blocking::>() + else { + debug!("Fail to check docker compose version"); + return None; + }; + + // Check minimal version + let version = docker_compose_version.version; + debug!("Docker compose version: {version}"); + if version < COMPOSE_MINIMAL_VERSION { + info!( + "Docker compose version {version} is not supported, require to be >= {COMPOSE_MINIMAL_VERSION}" + ); + return None; + } + Some(version) +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +struct DockerVersion { + client: DockerVersionItem, + server: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +struct DockerVersionItem { + api_version: Version, + version: Version, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +struct DockerComposeVersion { + version: Version, +} + +#[cfg(test)] +mod tests { + + use super::*; + + #[test] + fn should_serde() { + let json = include_str!("../../tests/assets/docker_version.json"); + let version = serde_json::from_str::(json).unwrap(); + insta::assert_debug_snapshot!(version); + } + #[test] + fn should_serde_compose() { + let json = include_str!("../../tests/assets/docker-compose_version.json"); + let version = serde_json::from_str::(json).unwrap(); + insta::assert_debug_snapshot!(version); + } + + #[cfg(feature = "ensure-docker")] + #[test] + fn should_works() { + _ = tracing_subscriber::fmt::try_init(); + assert2::let_assert!(Ok(_) = create()); + } +} diff --git a/rustainers/src/runner/error.rs b/rustainers/src/runner/error.rs new file mode 100644 index 0000000..5c844d2 --- /dev/null +++ b/rustainers/src/runner/error.rs @@ -0,0 +1,110 @@ +use crate::cmd::CommandError; +use crate::version::Version; +use crate::{ContainerId, IdError, Port, RunnableContainer}; + +use super::Runner; + +/// Runner errors +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +pub enum RunnerError { + /// Command not available + #[error("Command '{0}' not available")] + CommandNotAvailable(String), + + /// Unsupported version + #[error("{command} version {current} expected to be ≥ that {minimal}")] + UnsupportedVersion { + /// The command + command: String, + /// The current version + current: Version, + /// The minimal version + minimal: Version, + }, + + /// Unable to find an available runner + #[error("No runner available")] + NoRunnerAvailable, + + /// Fail to start a container + #[error( + "Fail to start container because {source}\nrunner: {runner}\ncontainer: {container:#?}" + )] + StartError { + /// The runner + runner: Runner, + /// The runnable container + container: Box, + /// The source error + source: Box, + }, + + /// Fail to exec a container + #[error("Fail to execute command in container {id} because {source}\nrunner: {runner}")] + ExecError { + /// The runner + runner: Runner, + /// The container id + id: ContainerId, + /// The source error + source: Box, + }, + + /// Fail to stop a container + #[error("Fail to stop container {id} because {source}\nrunner: {runner}")] + StopError { + /// The runner + runner: Runner, + /// The container id + id: ContainerId, + /// The source error + source: Box, + }, +} + +/// Errors that could happen during creation of a container +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +pub enum ContainerError { + /// Port not found + #[error("Cannot find host port for {id} and container port {container_port}")] + PortNotFound { + /// The container id + id: ContainerId, + /// The container port + container_port: Port, + }, + + /// Fail to start a container + #[error("Container '{0}' cannot be started")] + ContainerCannotBeStarted(ContainerId), + + /// Fail to resume a container + #[error("Container '{0}' cannot be resumed (unpause)")] + ContainerCannotBeResumed(ContainerId), + + /// Invalid container state + #[error("Container {0} state {1:?} is unexpected")] + InvalidContainerState(ContainerId, String), + + /// The container is not healthy + #[error("Container {0} is unhealthy")] + UnhealthyContainer(ContainerId), + + /// Invalid container health + #[error("Container {0} does not have a health check")] + UnknownContainerHealth(ContainerId), + + /// Fail to remove a container + #[error("Container '{0}' cannot be removed")] + ContainerCannotBeRemoved(ContainerId), + + /// Fail to run error + #[error(transparent)] + CommandError(#[from] CommandError), + + /// Id error + #[error(transparent)] + IdError(#[from] IdError), +} diff --git a/rustainers/src/runner/inner.rs b/rustainers/src/runner/inner.rs new file mode 100644 index 0000000..591ce1b --- /dev/null +++ b/rustainers/src/runner/inner.rs @@ -0,0 +1,351 @@ +use std::fmt::{Debug, Display}; +use std::net::SocketAddr; +use std::time::Duration; + +use async_trait::async_trait; +use serde::de::DeserializeOwned; +use tracing::{info, trace, warn}; + +use crate::cmd::Cmd; +use crate::{ + ContainerHealth, ContainerId, ContainerProcess, ContainerState, ContainerStatus, Port, + RunnableContainer, WaitStrategy, +}; + +use super::{ContainerError, RunOption}; + +#[async_trait] +pub(crate) trait InnerRunner: Display + Debug + Send + Sync { + fn command(&self) -> Cmd<'static>; + + #[tracing::instrument(level = "debug", skip(self), fields(runner = %self))] + async fn ps(&self, name: &str) -> Result, ContainerError> { + let mut cmd = self.command(); + cmd.push_args([ + "ps", + "--all", + "--no-trunc", + "--filter", + &format!("name={name}"), + "--format={{json .}}", + ]); + + let containers = cmd.json_stream::().await?; + let result = containers.into_iter().find(|it| it.names.contains(name)); + Ok(result) + } + + #[tracing::instrument(level = "debug", skip(self, image), fields(runner = %self, image = %image))] + async fn create_and_start( + &self, + image: &RunnableContainer, + remove: bool, + name: Option<&str>, + ) -> Result { + let mut cmd = self.command(); + cmd.push_args(["run", "--detach"]); + let descriptor = image.descriptor(); + + // --rm + if remove { + cmd.push_arg("--rm"); + } + + // --name + if let Some(name) = name { + cmd.push_args(["--name", name]); + } + + // --env + for (key, value) in &image.env { + cmd.push_args([String::from("--env"), format!("{key}={value}")]); + } + + // --publish + for &port_mapping in &image.port_mappings { + let publish = port_mapping.to_publish(); + cmd.push_args(["--publish", &publish]); + } + + // health check args + if let WaitStrategy::CustomHealthCheck(hc) = &image.wait_strategy { + cmd.push_args(hc.to_vec()); + } + + // descriptor (name:tag or other alternatives) + cmd.push_arg(descriptor); + + // command + cmd.push_args(&image.command); + + // Run + info!(%image, "🚀 Launching container"); + let stdout = cmd.result().await?; + let id = stdout.trim().parse::()?; + + Ok(id) + } + + #[tracing::instrument(level = "debug", skip(self, id), fields(runner = %self, id = %id))] + async fn inspect(&self, id: ContainerId, json_path: &str) -> Result + where + R: DeserializeOwned + Default + Debug, + { + let mut cmd = self.command(); + cmd.push_args(["inspect", &format!("--format={{{{json {json_path}}}}}")]); + cmd.push_arg(id); + let result = cmd.json::().await?; + Ok(result) + } + + #[tracing::instrument(level = "debug", skip(self, id), fields(runner = %self, id = %id))] + async fn port(&self, id: ContainerId, container_port: Port) -> Result { + let mut cmd = self.command(); + cmd.push_arg("port"); + cmd.push_arg(id); + cmd.push_arg(container_port); + let output = cmd.result().await?; + parse_port(&output).ok_or_else(|| { + warn!( %id, %container_port, "Bound port not found\n{cmd}\noutput: '{output}'"); + ContainerError::PortNotFound { id, container_port } + }) + } + + #[tracing::instrument(level = "debug", skip(self, id), fields(runner = %self, id = %id))] + async fn start(&self, id: ContainerId) -> Result<(), ContainerError> { + let mut cmd = self.command(); + cmd.push_arg("start"); + cmd.push_arg(id); + let status = cmd.status().await?; + if status.success() { + info!(%id, "▶️ Container started"); + Ok(()) + } else { + warn!(%id, ?status, "⚠️ Fail to start container"); + Err(ContainerError::ContainerCannotBeStarted(id)) + } + } + + #[tracing::instrument(level = "debug", skip(self, id), fields(runner = %self, id = %id))] + async fn unpause(&self, id: ContainerId) -> Result<(), ContainerError> { + let mut cmd = self.command(); + cmd.push_arg("unpause"); + cmd.push_arg(id); + let status = cmd.status().await?; + if status.success() { + info!(%id, "⏯ Container resumed"); + Ok(()) + } else { + warn!(%id, ?status, "⚠️ Fail to resume container"); + Err(ContainerError::ContainerCannotBeResumed(id)) + } + } + + async fn full_status(&self, id: ContainerId) -> Result { + self.inspect(id, ".State").await + } + + #[tracing::instrument(level = "debug", skip(self, id), fields(runner = %self, id = %id))] + async fn wait_ready( + &self, + id: ContainerId, + wait_condition: &WaitStrategy, + interval: Duration, + ) -> Result<(), ContainerError> { + loop { + match wait_condition { + WaitStrategy::HealthCheck | WaitStrategy::CustomHealthCheck(_) => { + if self.check_healthy(id).await? { + info!(%id, "💚 healthy"); + break; + } + } + WaitStrategy::State(state) => { + if self.check_for_state(id, *state).await? { + info!(%id, "💚 state {state} reached"); + break; + } + } + } + + tokio::time::sleep(interval).await; + } + + Ok(()) + } + + async fn check_healthy(&self, id: ContainerId) -> Result { + let state = self.full_status(id).await?; + if !matches!( + state.status, + ContainerStatus::Restarting | ContainerStatus::Running + ) { + warn!(%id, ?state, "✋ The container not seems to run"); + let state = format!("{:?}", state.status); + return Err(ContainerError::InvalidContainerState(id, state)); + } + match state.health.status { + ContainerHealth::Healthy => Ok(true), + ContainerHealth::Unhealthy => { + info!(%id, "🚨 unhealthy"); + Err(ContainerError::UnhealthyContainer(id)) + } + ContainerHealth::Starting => { + // TODO use another way to display logs (like tokio channel) + if let Some(last_log) = state.health.log.unwrap_or_default().last() { + trace!(%id, "Last health check log\n\t{}", last_log); + } + Ok(false) + } + ContainerHealth::Unknown => { + warn!(%id, ?state, "🩺 The container does not have health check"); + Err(ContainerError::UnknownContainerHealth(id)) + } + } + } + + async fn check_for_state( + &self, + id: ContainerId, + state: ContainerStatus, + ) -> Result { + let status = self.full_status(id).await?; + Ok(status.status == state) + } + + #[tracing::instrument(level = "debug", skip(self, id), fields(runner = %self, id = %id))] + async fn rm(&self, id: ContainerId) -> Result<(), ContainerError> { + let mut cmd = self.command(); + cmd.push_arg("rm"); + cmd.push_arg(id); + let status = cmd.status().await?; + + if status.success() { + info!(%id, "🧹 Container removed"); + Ok(()) + } else { + warn!(%id, ?status, "⚠️ Fail to remove container"); + Err(ContainerError::ContainerCannotBeRemoved(id)) + } + } + + #[tracing::instrument(skip(self, image), fields(runner = %self, image = %image))] + async fn start_container( + &self, + image: &mut RunnableContainer, + options: RunOption, + ) -> Result { + let RunOption { + wait_interval, + remove, + name, + } = options; + + // Container name + let name = name.as_deref(); + let container_name = image.container_name.as_deref().or(name); + let container = if let Some(name) = container_name { + self.ps(name).await?.map(|it| (it.state, it.id)) + } else { + None + }; + + let id = match container { + // Nothing to do for the container + Some((ContainerStatus::Restarting | ContainerStatus::Running, id)) => id, + // Need to unpause the container + Some((ContainerStatus::Paused, id)) => { + self.unpause(id).await?; + id + } + // Need to start the container + Some(( + ContainerStatus::Created | ContainerStatus::Exited | ContainerStatus::Stopped, + id, + )) => { + self.start(id).await?; + id + } + // Need cleanup before restarting the container + Some((ContainerStatus::Dead, id)) => { + self.rm(id).await?; + self.create_and_start(image, remove, container_name).await? + } + // Need to create and start the container + Some((ContainerStatus::Unknown, _)) | None => { + self.create_and_start(image, remove, container_name).await? + } + }; + + // Wait + self.wait_ready(id, &image.wait_strategy, wait_interval) + .await?; + + // Port Mapping + for port_mapping in &mut image.port_mappings { + if port_mapping.host_port.is_none() { + let host_port = self.port(id, port_mapping.container_port).await?; + port_mapping.bind_port(host_port); + } + } + + Ok(id) + } + + #[tracing::instrument(skip(self, id), fields(runner = %self, id = %id))] + async fn exec(&self, id: ContainerId, exec_command: Vec) -> Result<(), ContainerError> { + let mut cmd = self.command(); + cmd.push_arg("exec"); + cmd.push_arg(id); + cmd.push_args(exec_command); + + let stdout = cmd.result().await?; + info!(%id, "🐚 Executed\n{stdout}",); + + Ok(()) + } + + #[tracing::instrument(skip(self, id), fields(runner = %self, id = %id))] + fn stop(&self, id: ContainerId) -> Result<(), ContainerError> { + let mut cmd = self.command(); + cmd.push_arg("stop"); + cmd.push_arg(id); + let status = cmd.status_blocking()?; + if status.success() { + info!(%id, "🛑 Container stopped"); + } else { + warn!(%id, ?status, "⚠️ Fail to stop container"); + } + Ok(()) + } +} + +fn parse_port(s: &str) -> Option { + s.lines() + .filter_map(|it| it.parse::().ok()) + .map(|it| Port(it.port())) + .next() +} + +#[cfg(test)] +#[allow(clippy::ignored_unit_patterns)] +mod tests { + use assert2::{check, let_assert}; + use rstest::rstest; + + use super::*; + + #[rstest] + #[case("0.0.0.0:32780", 32780)] + #[case( + "0.0.0.0:32780 +[::]:32780 +", + 32780 + )] + fn should_parse_port(#[case] s: &str, #[case] expected: u16) { + let result = parse_port(s); + let_assert!(Some(port) = result); + check!(port == expected); + } +} diff --git a/rustainers/src/runner/mod.rs b/rustainers/src/runner/mod.rs new file mode 100644 index 0000000..1594ad6 --- /dev/null +++ b/rustainers/src/runner/mod.rs @@ -0,0 +1,220 @@ +use std::fmt::{self, Debug, Display}; + +use tracing::info; + +use crate::{Container, RunnableContainer, ToRunnableContainer}; + +mod docker; +pub use self::docker::Docker; + +mod nerdctl; +pub use self::nerdctl::Nerdctl; + +mod podman; +pub use self::podman::Podman; + +mod error; +pub use self::error::*; + +mod inner; +pub(crate) use self::inner::*; + +mod options; +pub use self::options::*; + +/// The test containers runner +/// +/// Use the [`Runner::auto`], [`Runner::docker`], [`Runner::podman`], [`Runner::nerdctl`] functions +/// to create your runner +// Note: we do not derive Copy to avoid a future breaking-change if add another implementation +#[derive(Debug, Clone)] +#[non_exhaustive] +pub enum Runner { + /// Docker + Docker(Docker), + + /// Podman + Podman(Podman), + + /// Nerdctl + Nerdctl(Nerdctl), +} + +impl Display for Runner { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Docker(runner) => write!(f, "{runner}"), + Self::Podman(runner) => write!(f, "{runner}"), + Self::Nerdctl(runner) => write!(f, "{runner}"), + } + } +} + +impl Runner { + /// Find an available runner + /// + /// # Errors + /// + /// Fail if no runner is available + /// + pub fn auto() -> Result { + // Check Docker + if let Ok(runner) = Self::docker() { + info!("🐳 Using docker"); + return Ok(runner); + } + + // Check Podman + if let Ok(runner) = Self::podman() { + info!("Using podman"); + return Ok(runner); + } + + // Check nerdctl + if let Ok(runner) = Self::nerdctl() { + info!("Using nerdctl"); + return Ok(runner); + } + + // Fallback + Err(RunnerError::NoRunnerAvailable) + } + + /// Create a docker runner + /// + /// # Errors + /// + /// Fail if the docker command is not found + /// Fail if the docker command version is unsupported + pub fn docker() -> Result { + let runner = docker::create()?; + Ok(Self::Docker(runner)) + } + + /// Create a podman runner + /// + /// # Errors + /// + /// Fail if the podman command is not found + /// Fail if the podman command version is unsupported + pub fn podman() -> Result { + let runner = podman::create()?; + Ok(Self::Podman(runner)) + } + + /// Create a nerdctl runner + /// + /// # Errors + /// + /// Fail if the nerdctl command is not found + /// Fail if the nerdctl command version is unsupported + pub fn nerdctl() -> Result { + let runner = nerdctl::create()?; + Ok(Self::Nerdctl(runner)) + } +} + +impl Runner { + /// Start a runnable container + /// + /// The default [`RunOption`] is used + /// + /// # Errors + /// + /// Fail if we cannot launch the container + pub async fn start(&self, image: I) -> Result, RunnerError> + where + I: ToRunnableContainer, + { + let options = RunOption::default(); + self.start_with_options(image, options).await + } + + /// Start a runnable container with option + /// + /// # Errors + /// + /// Fail if we cannot launch the container + pub async fn start_with_options( + &self, + image: I, + options: RunOption, + ) -> Result, RunnerError> + where + I: ToRunnableContainer, + { + let mut container = image.to_runnable(RunnableContainer::builder()); + let image_ref = container.image.clone(); + + let id = match self { + Self::Docker(runner) => runner.start_container(&mut container, options).await, + Self::Podman(runner) => runner.start_container(&mut container, options).await, + Self::Nerdctl(runner) => runner.start_container(&mut container, options).await, + } + .map_err(|source| RunnerError::StartError { + runner: self.clone(), + container: Box::new(container), + source: Box::new(source), + })?; + + Ok(Container { + runner: self.clone(), + image, + image_ref, + id, + detached: false, + }) + } + + /// Execute a command into the container + /// + /// # Errors + /// + /// Could fail if we cannot execute the command + pub async fn exec( + &self, + container: &Container, + exec_command: impl IntoIterator + Debug, + ) -> Result<(), RunnerError> + where + S: Into, + I: ToRunnableContainer, + { + let id = container.id; + let exec_command = exec_command.into_iter().map(Into::into).collect(); + match self { + Self::Docker(runner) => runner.exec(id, exec_command).await, + Self::Podman(runner) => runner.exec(id, exec_command).await, + Self::Nerdctl(runner) => runner.exec(id, exec_command).await, + } + .map_err(|source| RunnerError::ExecError { + runner: self.clone(), + id, + source: Box::new(source), + }) + } + + /// Stop the container + /// + /// This method is call during the [`crate::Container`] drop if it's not detached + /// + /// # Errors + /// + /// Fail if we cannot launch the container + pub fn stop(&self, container: &Container) -> Result<(), RunnerError> + where + I: ToRunnableContainer, + { + let id = container.id; + match self { + Self::Docker(runner) => runner.stop(id), + Self::Podman(runner) => runner.stop(id), + Self::Nerdctl(runner) => runner.stop(id), + } + .map_err(|source| RunnerError::StopError { + runner: self.clone(), + id, + source: Box::new(source), + }) + } +} diff --git a/rustainers/src/runner/nerdctl.rs b/rustainers/src/runner/nerdctl.rs new file mode 100644 index 0000000..f3684c3 --- /dev/null +++ b/rustainers/src/runner/nerdctl.rs @@ -0,0 +1,89 @@ +use std::fmt::Display; + +use async_trait::async_trait; +use serde::{Deserialize, Serialize}; +use tracing::debug; + +use super::{InnerRunner, RunnerError}; +use crate::cmd::Cmd; +use crate::version::Version; + +const MINIMAL_VERSION: Version = Version::new(1, 5); + +/// A Nerdctl runner +/// +/// This runner use the nerdctl CLI +/// +/// It requires nerdctl client v1.5+ +#[derive(Debug, Clone, Copy)] +pub struct Nerdctl { + /// The nerdctl version + pub version: Version, +} + +#[async_trait] +impl InnerRunner for Nerdctl { + fn command(&self) -> Cmd<'static> { + Cmd::new("nerdctl") + } +} + +impl Display for Nerdctl { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Nerdctl {}", self.version) + } +} + +pub(super) fn create() -> Result { + // Check binary version + let mut cmd = Cmd::new("nerdctl"); + cmd.push_args(["version", "--format", "json"]); + let Ok(Some(version)) = cmd.json_blocking::>() else { + return Err(RunnerError::CommandNotAvailable(String::from("nerdctl"))); + }; + debug!("Found docker version: {version:#?}"); + + // Check client version + let current = version.client.version; + if current < MINIMAL_VERSION { + return Err(RunnerError::UnsupportedVersion { + command: String::from("nerdctl"), + current, + minimal: MINIMAL_VERSION, + }); + } + + Ok(Nerdctl { version: current }) +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +struct NerdctlVersion { + client: NerdctlClientVersion, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +struct NerdctlClientVersion { + version: Version, +} + +#[cfg(test)] +mod tests { + + use super::*; + + #[test] + fn should_serde() { + let json = include_str!("../../tests/assets/nerdctl_version.json"); + let version = serde_json::from_str::(json).unwrap(); + insta::assert_debug_snapshot!(version); + } + + #[cfg(feature = "ensure-nerdctl")] + #[test] + fn should_works() { + _ = tracing_subscriber::fmt::try_init(); + assert2::let_assert!(Ok(_) = create()); + } +} diff --git a/rustainers/src/runner/options.rs b/rustainers/src/runner/options.rs new file mode 100644 index 0000000..5c7b1a8 --- /dev/null +++ b/rustainers/src/runner/options.rs @@ -0,0 +1,39 @@ +use std::time::Duration; + +use typed_builder::TypedBuilder; + +/// Run options +#[derive(Debug, Clone, TypedBuilder)] +#[builder(field_defaults(default, setter(prefix = "with_")))] +pub struct RunOption { + /// Wait interval for container health check + #[builder(default = Duration::from_millis(500))] + pub(super) wait_interval: Duration, + + /// Automatically remove the container when it exits + pub(super) remove: bool, + + /// Assign a name to the container + #[builder(setter(into, strip_option))] + pub(super) name: Option, +} + +impl RunOption { + /// If we need to remove the container when it's stopped + #[must_use] + pub fn remove(&self) -> bool { + self.remove + } + + /// The container name + #[must_use] + pub fn name(&self) -> Option<&str> { + self.name.as_deref() + } +} + +impl Default for RunOption { + fn default() -> Self { + RunOption::builder().build() + } +} diff --git a/rustainers/src/runner/podman.rs b/rustainers/src/runner/podman.rs new file mode 100644 index 0000000..d500517 --- /dev/null +++ b/rustainers/src/runner/podman.rs @@ -0,0 +1,180 @@ +use std::fmt::Display; + +use async_trait::async_trait; +use serde::{Deserialize, Serialize}; +use tracing::{debug, info}; + +use crate::cmd::Cmd; +use crate::version::Version; +use crate::ContainerProcess; + +use super::{ContainerError, InnerRunner, RunnerError}; + +const MINIMAL_VERSION: Version = Version::new(4, 0); +const COMPOSE_MINIMAL_VERSION: Version = Version::new(1, 0); + +/// A Podman runner +/// +/// This runner use the podman CLI +/// +/// It requires podman client v4.0+ +/// +/// podman-compose is supported if v1.0+ +#[derive(Debug, Clone, Copy)] +pub struct Podman { + /// The client version + pub version: Version, + + /// The podman-compose version + pub compose_version: Option, +} + +#[async_trait] +impl InnerRunner for Podman { + fn command(&self) -> Cmd<'static> { + Cmd::new("podman") + } + + #[tracing::instrument(level = "debug", skip(self), fields(runner = %self))] + async fn ps(&self, name: &str) -> Result, ContainerError> { + let mut cmd = self.command(); + cmd.push_args([ + "ps", + "--all", + "--no-trunc", + "--filter", + &format!("name={name}"), + "--format=json", + ]); + + let containers = cmd.json::>().await?; + let result = containers.into_iter().find(|it| it.names.contains(name)); + Ok(result) + } +} + +impl Display for Podman { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Podman {}", self.version)?; + if let Some(compose_version) = self.compose_version { + write!(f, " - podman-compose {compose_version}")?; + } + Ok(()) + } +} +pub(super) fn create() -> Result { + // Check binary version + let mut cmd = Cmd::new("podman"); + cmd.push_args(["version", "--format", "json"]); + let Ok(Some(version)) = cmd.json_blocking::>() else { + return Err(RunnerError::CommandNotAvailable(String::from("docker"))); + }; + + // Check client version + let current = version.client.api_version; + debug!("Found docker version: {current}"); + if current < MINIMAL_VERSION { + return Err(RunnerError::UnsupportedVersion { + command: String::from("docker"), + current, + minimal: MINIMAL_VERSION, + }); + } + + let compose_version = compose_version(); + + Ok(Podman { + version: current, + compose_version, + }) +} + +fn compose_version() -> Option { + // Check the help command not fail + let mut cmd = Cmd::new("podman-compose"); + cmd.ignore_stderr(); + cmd.push_args(["version", "--format", "json"]); + let Ok(result) = cmd.result_blocking() else { + debug!("Fail to check podman-compose version"); + return None; + }; + + let Ok(Some(compose_version)) = extract_podman_compose_version(&result) else { + debug!("Invalid podman-compose version, {result}"); + return None; + }; + + // Check minimal version + let version = compose_version.version; + debug!("Podman compose version: {version}"); + if version < COMPOSE_MINIMAL_VERSION { + info!( + "Podman compose version {version} is not supported, require to be >= {COMPOSE_MINIMAL_VERSION}" + ); + return None; + } + + Some(version) +} + +fn extract_podman_compose_version( + output: &str, +) -> Result, serde_json::Error> { + let Some(last_line) = output.trim().lines().last() else { + debug!("Fail to retrieve podman-compose version"); + return Ok(None); + }; + + let result = serde_json::from_str::(last_line)?; + Ok(Some(result)) +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +struct PodmanVersion { + client: PodmanVersionItem, + server: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +struct PodmanComposeVersion { + version: Version, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +struct PodmanVersionItem { + #[serde(rename = "APIVersion")] + api_version: Version, + version: Version, +} + +#[cfg(test)] +mod tests { + + use assert2::let_assert; + + use super::*; + + #[test] + fn should_serde() { + let json = include_str!("../../tests/assets/podman_version.json"); + let version = serde_json::from_str::(json).unwrap(); + let result = serde_json::to_string_pretty(&version).unwrap(); + insta::assert_display_snapshot!(result); + } + #[test] + fn should_serde_compose() { + let output = include_str!("../../tests/assets/podman-compose_version.txt"); + let result = extract_podman_compose_version(output); + let_assert!(Ok(Some(version)) = result); + insta::assert_debug_snapshot!(version); + } + + #[cfg(feature = "ensure-podman")] + #[test] + fn should_works() { + _ = tracing_subscriber::fmt::try_init(); + assert2::let_assert!(Ok(_) = create()); + } +} diff --git a/rustainers/src/runner/snapshots/rustainers__runner__docker__tests__should_serde.snap b/rustainers/src/runner/snapshots/rustainers__runner__docker__tests__should_serde.snap new file mode 100644 index 0000000..c6f3f16 --- /dev/null +++ b/rustainers/src/runner/snapshots/rustainers__runner__docker__tests__should_serde.snap @@ -0,0 +1,21 @@ +--- +source: rustainers/src/runner/docker.rs +expression: version +--- +DockerVersion { + client: DockerVersionItem { + api_version: Version { + major: 1, + minor: 41, + patch: None, + }, + version: Version { + major: 20, + minor: 10, + patch: Some( + 14, + ), + }, + }, + server: None, +} diff --git a/rustainers/src/runner/snapshots/rustainers__runner__docker__tests__should_serde_compose.snap b/rustainers/src/runner/snapshots/rustainers__runner__docker__tests__should_serde_compose.snap new file mode 100644 index 0000000..f6a6c0f --- /dev/null +++ b/rustainers/src/runner/snapshots/rustainers__runner__docker__tests__should_serde_compose.snap @@ -0,0 +1,13 @@ +--- +source: rustainers/src/runner/docker.rs +expression: version +--- +DockerComposeVersion { + version: Version { + major: 2, + minor: 20, + patch: Some( + 2, + ), + }, +} diff --git a/rustainers/src/runner/snapshots/rustainers__runner__nerdctl__tests__should_serde.snap b/rustainers/src/runner/snapshots/rustainers__runner__nerdctl__tests__should_serde.snap new file mode 100644 index 0000000..475930c --- /dev/null +++ b/rustainers/src/runner/snapshots/rustainers__runner__nerdctl__tests__should_serde.snap @@ -0,0 +1,15 @@ +--- +source: rustainers/src/runner/nerdctl.rs +expression: version +--- +NerdctlVersion { + client: NerdctlClientVersion { + version: Version { + major: 1, + minor: 5, + patch: Some( + 0, + ), + }, + }, +} diff --git a/rustainers/src/runner/snapshots/rustainers__runner__podman__tests__should_serde.snap b/rustainers/src/runner/snapshots/rustainers__runner__podman__tests__should_serde.snap new file mode 100644 index 0000000..3cbbf75 --- /dev/null +++ b/rustainers/src/runner/snapshots/rustainers__runner__podman__tests__should_serde.snap @@ -0,0 +1,14 @@ +--- +source: rustainers/src/runner/podman.rs +expression: result +--- +{ + "Client": { + "APIVersion": "4.4.4", + "Version": "4.4.4" + }, + "Server": { + "APIVersion": "4.4.2", + "Version": "4.4.2" + } +} diff --git a/rustainers/src/runner/snapshots/rustainers__runner__podman__tests__should_serde_compose.snap b/rustainers/src/runner/snapshots/rustainers__runner__podman__tests__should_serde_compose.snap new file mode 100644 index 0000000..10abb07 --- /dev/null +++ b/rustainers/src/runner/snapshots/rustainers__runner__podman__tests__should_serde_compose.snap @@ -0,0 +1,13 @@ +--- +source: rustainers/src/runner/podman.rs +expression: version +--- +PodmanComposeVersion { + version: Version { + major: 1, + minor: 0, + patch: Some( + 6, + ), + }, +} diff --git a/rustainers/src/version.rs b/rustainers/src/version.rs new file mode 100644 index 0000000..830120e --- /dev/null +++ b/rustainers/src/version.rs @@ -0,0 +1,221 @@ +use std::fmt::{self, Display}; +use std::str::FromStr; + +use crate::VersionError; + +/// A version with the `..-` form +/// +/// This is a relaxed [semver version](https://semver.org/) because +/// +/// * the patch could be omit +/// * there are no constraints on the rest part, we remove it +/// * allow the 'v' prefix +/// +/// As we do not need additional semver version (build, pre-release), +/// we could implement [`Copy`] +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +pub struct Version { + major: u64, + minor: u64, + patch: Option, +} + +impl Version { + pub const fn new(major: u64, minor: u64) -> Self { + Self { + major, + minor, + patch: None, + } + } +} + +fn extract_simple_version(s: &str) -> Result { + let Some((major, rest)) = s.split_once('.') else { + return Err(VersionError::RequireMajorMinor); + }; + let major = major.parse().map_err(VersionError::InvalidMajorVersion)?; + + let (minor, patch) = if let Some((minor, patch)) = rest.split_once('.') { + let minor = minor.parse().map_err(VersionError::InvalidMinorVersion)?; + let patch = patch.parse().map_err(VersionError::InvalidPatchVersion)?; + (minor, Some(patch)) + } else { + let minor = rest.parse().map_err(VersionError::InvalidMinorVersion)?; + (minor, None) + }; + + let result = Version { + major, + minor, + patch, + }; + Ok(result) +} + +impl FromStr for Version { + type Err = VersionError; + + fn from_str(s: &str) -> Result { + let s = s.trim_start_matches('v'); + if s.is_empty() { + return Err(VersionError::Empty); + } + + if let Some(idx) = s.find(['-', '+']) { + let (version, _) = s.split_at(idx); + extract_simple_version(version) + } else { + extract_simple_version(s) + } + } +} + +impl Display for Version { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let Self { + major, + minor, + patch, + } = self; + write!(f, "{major}.{minor}")?; + if let Some(patch) = patch { + write!(f, ".{patch}")?; + } + Ok(()) + } +} + +mod serde_version { + use std::fmt; + + use serde::de::Visitor; + use serde::{Deserialize, Deserializer, Serialize, Serializer}; + + use super::Version; + + impl Serialize for Version { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let version = self.to_string(); + serializer.serialize_str(&version) + } + } + + struct VersionVisitor; + + impl<'de> Visitor<'de> for VersionVisitor { + type Value = Version; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("an version with the '..+' pattern") + } + + fn visit_str(self, v: &str) -> Result + where + E: serde::de::Error, + { + v.parse().map_err(E::custom) + } + } + + impl<'de> Deserialize<'de> for Version { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_str(VersionVisitor) + } + } +} + +#[cfg(test)] +#[allow(clippy::ignored_unit_patterns)] +mod tests { + use assert2::{check, let_assert}; + use rstest::rstest; + + use super::*; + + fn v(major: u64, minor: u64, patch: Option) -> Version { + Version { + major, + minor, + patch, + } + } + + #[rstest] + #[case("v1.2.3+plop", v(1, 2, Some(3)))] + #[case("1.2.3+plop", v(1, 2, Some(3)))] + #[case("1.2.3-plop", v(1, 2, Some(3)))] + #[case("1.2.3", v(1, 2, Some(3)))] + #[case("1.2+plop", v(1, 2, None))] + #[case("1.2", v(1, 2, None))] + #[case( + "11011.246546.465465-asd+asdasd~asdasd", + v(11_011, 246_546, Some(465_465)) + )] + fn should_parse(#[case] input: &str, #[case] expected: Version) { + // Check parsing + let result = input.parse::(); + let_assert!(Ok(version) = result); + + // Check expected + check!(version == expected); + } + + #[rstest] + #[case("")] + #[case("w1.2.3")] + #[case("1.2.3.4")] + #[case("1.x")] + #[case("1.")] + #[case("1.a")] + #[case("1.1.x")] + #[case("1.1.0 alpha")] + #[case("1.-1.0")] + fn should_not_parse(#[case] input: &str) { + // Check parsing + let result = input.parse::(); + let_assert!(Err(_) = result); + } + + #[rstest] + #[case(v(1, 2, Some(3)))] + #[case(v(1, 2, Some(3)))] + #[case(v(1, 2, Some(3)))] + #[case(v(1, 2, None))] + #[case(v(1, 2, None))] + #[case(v(1_1011, 246_546, Some(465_465)))] + fn should_serde(#[case] value: Version) { + let result = serde_json::to_string(&value); + let_assert!(Ok(json) = result); + + let result = serde_json::from_str::(&json); + let_assert!(Ok(version) = result); + check!(version == value); + } + + #[rstest] + #[case::major("10.2.1", "1.2.2")] + #[case::minor("1.20.1", "1.2.2")] + #[case::patch("1.2.4", "1.2.3")] + #[case::with_patch("1.2.0", "1.2")] + fn should_compare(#[case] a: &str, #[case] b: &str) { + let a = a.parse::().unwrap(); + let b = b.parse::().unwrap(); + + // equals + check!(a == a); + check!(b == b); + + // greater + check!(a > b); + + // lower + check!(b < a); + } +} diff --git a/rustainers/tests/assets/docker-compose_ps.jsonl b/rustainers/tests/assets/docker-compose_ps.jsonl new file mode 100644 index 0000000..14ab272 --- /dev/null +++ b/rustainers/tests/assets/docker-compose_ps.jsonl @@ -0,0 +1,3 @@ +{"Command":"\"/kafdrop.sh\"","CreatedAt":"2023-10-26 13:08:16 +0200 CEST","ExitCode":0,"Health":"","ID":"4a22558ba951230557309aeae020574382632b6147bb2c58105ce70f16d948fa","Image":"obsidiandynamics/kafdrop","Labels":"com.docker.compose.project.working_dir=/Users/igor/Documents/Workspaces/github/arcane-spire/docker/kafka-kraft,com.docker.compose.version=2.22.0,org.opencontainers.image.ref.name=ubuntu,org.opencontainers.image.version=22.04,com.docker.compose.project=kafka-kraft,com.docker.compose.container-number=1,com.docker.compose.depends_on=my-kafka:service_started:false,my-schema-registry:service_started:false,com.docker.compose.image=sha256:d982cd1654ae97bb9dd84841c50eb5b5fcde528d741a3db5b1bdd5157e3858d8,com.docker.compose.oneoff=False,com.docker.compose.project.config_files=/Users/igor/Documents/Workspaces/github/arcane-spire/docker/kafka-kraft/docker-compose.yaml,com.docker.compose.service=my-kafdrop,com.docker.compose.config-hash=82592e23bb3af198af5230cd6df401dd17c80a60295bb47b645be02e6364d287","LocalVolumes":"0","Mounts":"","Name":"kafka-kraft-my-kafdrop-1","Names":"kafka-kraft-my-kafdrop-1","Networks":"kafka-kraft_default","Ports":"0.0.0.0:9000-\u003e9000/tcp","Publishers":[{"URL":"0.0.0.0","TargetPort":9000,"PublishedPort":9000,"Protocol":"tcp"}],"RunningFor":"39 seconds ago","Service":"my-kafdrop","Size":"0B","State":"running","Status":"Up 39 seconds"} +{"Command":"\"bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \\\"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\\\" \u0026\u0026 exit 1 ; else /tmp/update_run.sh \u0026\u0026 /etc/confluent/docker/run ; fi'\"","CreatedAt":"2023-10-26 13:08:16 +0200 CEST","ExitCode":0,"Health":"","ID":"36eb91e2ba2f36303c95d5716d0726d349cbb554624724caf43a30349592a1d4","Image":"confluentinc/cp-kafka:7.2.4","Labels":"name=cp-kafka,url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8-minimal/images/8.7-1049.1675784874,io.buildah.version=1.27.3,io.confluent.docker=true,summary=Confluent platform Kafka.,vcs-type=git,com.docker.compose.project.config_files=/Users/igor/Documents/Workspaces/github/arcane-spire/docker/kafka-kraft/docker-compose.yaml,com.docker.compose.project.working_dir=/Users/igor/Documents/Workspaces/github/arcane-spire/docker/kafka-kraft,io.confluent.docker.git.id=e429d7d0,com.docker.compose.config-hash=e87b688815ebe029570ff01b0a2590d4ba056ebfc3f1e69993d57b35d1d5c8ab,description=Common base image for Confluent's Docker images.,io.openshift.expose-services=,io.openshift.tags=minimal rhel8,vendor=Confluent,com.docker.compose.version=2.22.0,maintainer=partner-support@confluent.io,release=7.2.4,com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI,com.docker.compose.service=my-kafka,com.redhat.component=ubi8-minimal-container,desktop.docker.io/binds/0/SourceKind=hostFile,vcs-ref=146fdafc2595e26f5f9c1b9a2b3f36bbca8237e4,com.docker.compose.project=kafka-kraft,build-date=2023-02-07T16:17:52,com.docker.compose.image=sha256:4c2157694fcfe64a69033d68c9e68b4581f3eda10774c1dcffa72e6655455527,desktop.docker.io/binds/0/Source=/Users/igor/Documents/Workspaces/github/arcane-spire/docker/kafka-kraft/kafka_update_run.sh,desktop.docker.io/binds/0/Target=/tmp/update_run.sh,distribution-scope=public,architecture=aarch64,com.docker.compose.depends_on=,com.docker.compose.oneoff=False,io.confluent.docker.build.number=2,io.confluent.docker.git.repo=confluentinc/kafka-images,io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.,io.k8s.display-name=Red Hat Universal Base Image 8 Minimal,version=e429d7d0,com.docker.compose.container-number=1","LocalVolumes":"2","Mounts":"218832bb6986fc24342beefd04a97ed784f505c623f138c859c19751062ba810,/host_mnt/Users/igor/Documents/Workspaces/github/arcane-spire/docker/kafka-kraft/kafka_update_run.sh,dd1f32e7469c4e680933f7ec671b7639173ed5e658ffc1ec58c6da8413b7964c","Name":"kafka-kraft-my-kafka-1","Names":"kafka-kraft-my-kafka-1","Networks":"kafka-kraft_default","Ports":"0.0.0.0:9092-\u003e9092/tcp","Publishers":[{"URL":"0.0.0.0","TargetPort":9092,"PublishedPort":9092,"Protocol":"tcp"}],"RunningFor":"39 seconds ago","Service":"my-kafka","Size":"0B","State":"running","Status":"Up 39 seconds"} +{"Command":"\"/etc/confluent/docker/run\"","CreatedAt":"2023-10-26 13:08:16 +0200 CEST","ExitCode":0,"Health":"","ID":"faa56796d83b772524305dbfb42358d9089fa0352fcbba60ac13ac73b3916ffe","Image":"confluentinc/cp-schema-registry:7.2.4","Labels":"io.confluent.docker=true,io.confluent.docker.git.id=c77431e0,io.k8s.display-name=Red Hat Universal Base Image 8 Minimal,summary=Confluent Schema Registry provides a RESTful interface for developers to define standard schemas for their events, share them across the organization and safely evolve them in a way that is backward compatible and future proof.,vcs-ref=146fdafc2595e26f5f9c1b9a2b3f36bbca8237e4,version=c77431e0,com.docker.compose.depends_on=my-kafka:service_started:false,com.docker.compose.service=my-schema-registry,io.confluent.docker.build.number=2,url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8-minimal/images/8.7-1049.1675784874,com.docker.compose.container-number=1,io.buildah.version=1.27.3,vcs-type=git,com.docker.compose.config-hash=4493555e618ad296e5118562acacc41878c25bc53d1f3e8099861136def5b0b5,com.docker.compose.oneoff=False,description=Confluent Schema Registry provides a RESTful interface for developers to define standard schemas for their events, share them across the organization and safely evolve them in a way that is backward compatible and future proof.,io.openshift.tags=minimal rhel8,com.docker.compose.project=kafka-kraft,distribution-scope=public,io.confluent.docker.git.repo=confluentinc/schema-registry-images,maintainer=partner-support@confluent.io,release=7.2.4,com.docker.compose.project.config_files=/Users/igor/Documents/Workspaces/github/arcane-spire/docker/kafka-kraft/docker-compose.yaml,com.docker.compose.version=2.22.0,io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.,io.openshift.expose-services=,vendor=Confluent,build-date=2023-02-07T16:17:52,com.docker.compose.image=sha256:38d8762f72a8dc2092ae9c023cfa640e27a845e5e1bcb308d73b23e5321c0413,com.docker.compose.project.working_dir=/Users/igor/Documents/Workspaces/github/arcane-spire/docker/kafka-kraft,com.redhat.component=ubi8-minimal-container,com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI,name=cp-schema-registry,architecture=aarch64","LocalVolumes":"1","Mounts":"de7a4b9bdb913934d255c11c584fc1f530e1c465e9c008215543cea5c36f1c23","Name":"my-schema-registry","Names":"my-schema-registry","Networks":"kafka-kraft_default","Ports":"0.0.0.0:8081-\u003e8081/tcp","Publishers":[{"URL":"0.0.0.0","TargetPort":8081,"PublishedPort":8081,"Protocol":"tcp"}],"RunningFor":"39 seconds ago","Service":"my-schema-registry","Size":"0B","State":"running","Status":"Up 39 seconds"} diff --git a/rustainers/tests/assets/docker-compose_version.json b/rustainers/tests/assets/docker-compose_version.json new file mode 100644 index 0000000..7d2167c --- /dev/null +++ b/rustainers/tests/assets/docker-compose_version.json @@ -0,0 +1,3 @@ +{ + "version": "2.20.2+ds1-0ubuntu1~22.04.1" +} \ No newline at end of file diff --git a/rustainers/tests/assets/docker-ps.jsonl b/rustainers/tests/assets/docker-ps.jsonl new file mode 100644 index 0000000..6d0286c --- /dev/null +++ b/rustainers/tests/assets/docker-ps.jsonl @@ -0,0 +1,3 @@ +{"Command":"\"/bin/sh -c 'echo \\\"$CONSOLE_CONFIG_FILE\\\" \u003e /tmp/config.yml; /app/console'\"","CreatedAt":"2023-10-31 08:09:58 +0100 CET","ID":"efb522e2ce2be08d3ff96685c07dd6521c27ddb2aac2c33761f292a74dd872d0","Image":"docker.redpanda.com/redpandadata/console:v2.3.1","Labels":"com.docker.compose.config-hash=3296331ac9fc28f502ae8d441a01bdda377ddf205f52ffd127ae380015c64238,com.docker.compose.container-number=1,com.docker.compose.depends_on=redpanda-0:service_started:false,com.docker.compose.image=sha256:f800e7e38f136f21ea355d182bc410ce41887bc2198c73b8305f7bd693db45f9,com.docker.compose.project.config_files=/private/var/folders/2x/tdn33dyd1tz5nwv_g_sht294bq9pmc/T/tc_redpanda-single_01HE292MFVNRVNXGTS4ZD3J6H9/docker-compose.yaml,com.docker.compose.service=console,com.docker.compose.version=2.23.0,com.docker.compose.oneoff=False,com.docker.compose.project=tc_redpanda-single_01he292mfvnrvnxgts4zd3j6h9,com.docker.compose.project.working_dir=/private/var/folders/2x/tdn33dyd1tz5nwv_g_sht294bq9pmc/T/tc_redpanda-single_01HE292MFVNRVNXGTS4ZD3J6H9","LocalVolumes":"0","Mounts":"","Names":"redpanda-console","Networks":"tc_redpanda-single_01he292mfvnrvnxgts4zd3j6h9_redpanda_network","Ports":"","RunningFor":"3 hours ago","Size":"0B","State":"exited","Status":"Exited (0) 54 minutes ago"} +{"Command":"\"/entrypoint.sh redpanda start '--kafka-addr internal://0.0.0.0:9092,external://0.0.0.0:19092' '--advertise-kafka-addr internal://redpanda-0:9092,external://localhost:19092' '--pandaproxy-addr internal://0.0.0.0:8082,external://0.0.0.0:18082' '--advertise-pandaproxy-addr internal://redpanda-0:8082,external://localhost:18082' '--schema-registry-addr internal://0.0.0.0:8081,external://0.0.0.0:18081' '--rpc-addr redpanda-0:33145' '--advertise-rpc-addr redpanda-0:33145' '--smp 1' '--memory 1G' '--mode dev-container' --default-log-level=debug\"","CreatedAt":"2023-10-31 08:09:58 +0100 CET","ID":"58e4e6921c09b9ad17e62ae6fa92455bbe69eca31782b7b848b3972b86af1eb7","Image":"docker.redpanda.com/redpandadata/redpanda:v23.2.14","Labels":"com.docker.compose.version=2.23.0,com.docker.compose.config-hash=031c12f72e210164448c0953355f027f56940e1b5abed6007bf3a76fe2df1a25,com.docker.compose.container-number=1,com.docker.compose.depends_on=,com.docker.compose.oneoff=False,com.docker.compose.project=tc_redpanda-single_01he292mfvnrvnxgts4zd3j6h9,com.docker.compose.project.working_dir=/private/var/folders/2x/tdn33dyd1tz5nwv_g_sht294bq9pmc/T/tc_redpanda-single_01HE292MFVNRVNXGTS4ZD3J6H9,com.docker.compose.service=redpanda-0,org.opencontainers.image.authors=Redpanda Data \u003chi@redpanda.com\u003e,com.docker.compose.image=sha256:88b9c19ba78ef1322be5e86be8d3bb1b7231b62d1e0cb3c5e02b60046d16bdd6,com.docker.compose.project.config_files=/private/var/folders/2x/tdn33dyd1tz5nwv_g_sht294bq9pmc/T/tc_redpanda-single_01HE292MFVNRVNXGTS4ZD3J6H9/docker-compose.yaml","LocalVolumes":"1","Mounts":"tc_redpanda-single_01he292mfvnrvnxgts4zd3j6h9_redpanda-0","Names":"redpanda-0","Networks":"tc_redpanda-single_01he292mfvnrvnxgts4zd3j6h9_redpanda_network","Ports":"","RunningFor":"3 hours ago","Size":"0B","State":"exited","Status":"Exited (0) 54 minutes ago"} +{"Command":"\"/hello\"","CreatedAt":"2023-10-31 07:46:29 +0100 CET","ID":"4133b65cffa94eb08caefba96e8182e119efccd9655d3230365bc73b0a6e109e","Image":"hello-world","Labels":"","LocalVolumes":"0","Mounts":"","Names":"trusting_antonelli","Networks":"bridge","Ports":"","RunningFor":"3 hours ago","Size":"0B","State":"exited","Status":"Exited (0) 2 hours ago"} diff --git a/rustainers/tests/assets/docker_version.json b/rustainers/tests/assets/docker_version.json new file mode 100644 index 0000000..554682c --- /dev/null +++ b/rustainers/tests/assets/docker_version.json @@ -0,0 +1,19 @@ +{ + "Client": { + "Platform": { + "Name": "" + }, + "CloudIntegration": "v1.0.23", + "Version": "20.10.14", + "ApiVersion": "1.41", + "DefaultAPIVersion": "1.41", + "GitCommit": "a224086", + "GoVersion": "go1.16.15", + "Os": "darwin", + "Arch": "arm64", + "BuildTime": "Thu Mar 24 01:49:20 2022", + "Context": "default", + "Experimental": true + }, + "Server": null +} diff --git a/rustainers/tests/assets/inspect-state-exited.json b/rustainers/tests/assets/inspect-state-exited.json new file mode 100644 index 0000000..63e23d9 --- /dev/null +++ b/rustainers/tests/assets/inspect-state-exited.json @@ -0,0 +1,13 @@ +{ + "Status": "exited", + "Running": false, + "Paused": false, + "Restarting": false, + "OOMKilled": false, + "Dead": false, + "Pid": 0, + "ExitCode": 0, + "Error": "", + "StartedAt": "2023-10-31T08:32:14.261437625Z", + "FinishedAt": "2023-10-31T08:32:14.26109975Z" +} \ No newline at end of file diff --git a/rustainers/tests/assets/inspect-state.json b/rustainers/tests/assets/inspect-state.json new file mode 100644 index 0000000..8a276f9 --- /dev/null +++ b/rustainers/tests/assets/inspect-state.json @@ -0,0 +1,23 @@ +{ + "OciVersion": "1.1.0-rc.3", + "Status": "running", + "Running": true, + "Paused": false, + "Restarting": false, + "OOMKilled": false, + "Dead": false, + "Pid": 395624, + "ConmonPid": 395622, + "ExitCode": 0, + "Error": "", + "StartedAt": "2023-08-25T12:47:17.861262357+02:00", + "FinishedAt": "0001-01-01T00:00:00Z", + "Health": { + "Status": "starting", + "FailingStreak": 0, + "Log": null + }, + "CgroupPath": "/user.slice/user-393534092.slice/user@393534092.service/user.slice/libpod-fcded727eee4d15ecccfcecdb656b131a75f2f2c37cb06a7b71a3b193ebb3aad.scope", + "CheckpointedAt": "0001-01-01T00:00:00Z", + "RestoredAt": "0001-01-01T00:00:00Z" +} \ No newline at end of file diff --git a/rustainers/tests/assets/nerdctl-compose_version.json b/rustainers/tests/assets/nerdctl-compose_version.json new file mode 100644 index 0000000..e6ec687 --- /dev/null +++ b/rustainers/tests/assets/nerdctl-compose_version.json @@ -0,0 +1,3 @@ +{ + "version": "v1.5.0" +} \ No newline at end of file diff --git a/rustainers/tests/assets/nerdctl_version.json b/rustainers/tests/assets/nerdctl_version.json new file mode 100644 index 0000000..095f474 --- /dev/null +++ b/rustainers/tests/assets/nerdctl_version.json @@ -0,0 +1,36 @@ +{ + "Client": { + "Version": "v1.5.0", + "GitCommit": "b33a58f288bc42351404a016e694190b897cd252", + "GoVersion": "go1.20.6", + "Os": "linux", + "Arch": "amd64", + "Components": [ + { + "Name": "buildctl", + "Version": "v0.11.6", + "Details": { + "GitCommit": "2951a28cd7085eb18979b1f710678623d94ed578" + } + } + ] + }, + "Server": { + "Components": [ + { + "Name": "containerd", + "Version": "v1.7.2", + "Details": { + "GitCommit": "0cae528dd6cb557f7201036e9f43420650207b58" + } + }, + { + "Name": "runc", + "Version": "1.1.7", + "Details": { + "GitCommit": "860f061b76bb4fc671f0f9e900f7d80ff93d4eb7" + } + } + ] + } +} \ No newline at end of file diff --git a/rustainers/tests/assets/podman-compose_version.txt b/rustainers/tests/assets/podman-compose_version.txt new file mode 100644 index 0000000..0b2df06 --- /dev/null +++ b/rustainers/tests/assets/podman-compose_version.txt @@ -0,0 +1,4 @@ +podman-compose version: 1.0.6 +['podman', '--version', ''] +using podman version: 4.7.1 +{"version": "1.0.6"} diff --git a/rustainers/tests/assets/podman_lookup.json b/rustainers/tests/assets/podman_lookup.json new file mode 100644 index 0000000..e3bad8f --- /dev/null +++ b/rustainers/tests/assets/podman_lookup.json @@ -0,0 +1,214 @@ +[ + { + "AutoRemove": false, + "Command": [ + "bash", + "-c", + "if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" \u0026\u0026 exit 1 ; else /tmp/update_run.sh \u0026\u0026 /etc/confluent/docker/run ; fi" + ], + "CreatedAt": "20 minutes ago", + "CIDFile": "", + "Exited": false, + "ExitedAt": -62135596800, + "ExitCode": 0, + "Id": "2307493ebc7c2cf221e339ecc23c0d7c53065f0e01bc374cf1af13876062aee3", + "Image": "docker.io/confluentinc/cp-kafka:7.2.4", + "ImageID": "4c2157694fcfe64a69033d68c9e68b4581f3eda10774c1dcffa72e6655455527", + "IsInfra": false, + "Labels": { + "PODMAN_SYSTEMD_UNIT": "podman-compose@kafka_schema_registry.service", + "architecture": "aarch64", + "build-date": "2023-02-07T16:17:52", + "com.docker.compose.container-number": "1", + "com.docker.compose.project": "kafka_schema_registry", + "com.docker.compose.project.config_files": "docker-compose.yaml", + "com.docker.compose.project.working_dir": "/Users/igor.laborie/Documents/Workspaces/Wefox/shared/wefox-ai-wai-rust/wai-testcontainers/src/compose/kafka_schema_registry", + "com.docker.compose.service": "kafka", + "com.redhat.component": "ubi8-minimal-container", + "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI", + "description": "Common base image for Confluent's Docker images.", + "distribution-scope": "public", + "io.buildah.version": "1.27.3", + "io.confluent.docker": "true", + "io.confluent.docker.build.number": "2", + "io.confluent.docker.git.id": "e429d7d0", + "io.confluent.docker.git.repo": "confluentinc/kafka-images", + "io.k8s.description": "The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.", + "io.k8s.display-name": "Red Hat Universal Base Image 8 Minimal", + "io.openshift.expose-services": "", + "io.openshift.tags": "minimal rhel8", + "io.podman.compose.config-hash": "0d314a7d44ef729913c68b99f213bc0e13bc08543b01438c061bae1f1c1b0fe0", + "io.podman.compose.project": "kafka_schema_registry", + "io.podman.compose.version": "1.0.6", + "maintainer": "partner-support@confluent.io", + "name": "cp-kafka", + "release": "7.2.4", + "summary": "Confluent platform Kafka.", + "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8-minimal/images/8.7-1049.1675784874", + "vcs-ref": "146fdafc2595e26f5f9c1b9a2b3f36bbca8237e4", + "vcs-type": "git", + "vendor": "Confluent", + "version": "e429d7d0" + }, + "Mounts": [ + "/tmp/update_run.sh", + "/var/lib/kafka/data", + "/etc/kafka/secrets" + ], + "Names": [ + "tc-kafka" + ], + "Namespaces": {}, + "Networks": [ + "kafka_schema_registry_default" + ], + "Pid": 2664, + "Pod": "", + "PodName": "", + "Ports": [ + { + "host_ip": "", + "container_port": 9092, + "host_port": 9092, + "range": 1, + "protocol": "tcp" + } + ], + "Restarts": 0, + "Size": null, + "StartedAt": 1698253886, + "State": "running", + "Status": "Up 19 minutes", + "Created": 1698253843 + }, + { + "AutoRemove": false, + "Command": [ + "/etc/confluent/docker/run" + ], + "CreatedAt": "19 minutes ago", + "CIDFile": "", + "Exited": false, + "ExitedAt": -62135596800, + "ExitCode": 0, + "Id": "e383b5e5d80e2f7ab640940e14221a66d07914cad2d1189076efc43ea03609b7", + "Image": "docker.io/confluentinc/cp-schema-registry:7.2.4", + "ImageID": "38d8762f72a8dc2092ae9c023cfa640e27a845e5e1bcb308d73b23e5321c0413", + "IsInfra": false, + "Labels": { + "PODMAN_SYSTEMD_UNIT": "podman-compose@kafka_schema_registry.service", + "architecture": "aarch64", + "build-date": "2023-02-07T16:17:52", + "com.docker.compose.container-number": "1", + "com.docker.compose.project": "kafka_schema_registry", + "com.docker.compose.project.config_files": "docker-compose.yaml", + "com.docker.compose.project.working_dir": "/Users/igor.laborie/Documents/Workspaces/Wefox/shared/wefox-ai-wai-rust/wai-testcontainers/src/compose/kafka_schema_registry", + "com.docker.compose.service": "schema-registry", + "com.redhat.component": "ubi8-minimal-container", + "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI", + "description": "Confluent Schema Registry provides a RESTful interface for developers to define standard schemas for their events, share them across the organization and safely evolve them in a way that is backward compatible and future proof.", + "distribution-scope": "public", + "io.buildah.version": "1.27.3", + "io.confluent.docker": "true", + "io.confluent.docker.build.number": "2", + "io.confluent.docker.git.id": "c77431e0", + "io.confluent.docker.git.repo": "confluentinc/schema-registry-images", + "io.k8s.description": "The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.", + "io.k8s.display-name": "Red Hat Universal Base Image 8 Minimal", + "io.openshift.expose-services": "", + "io.openshift.tags": "minimal rhel8", + "io.podman.compose.config-hash": "0d314a7d44ef729913c68b99f213bc0e13bc08543b01438c061bae1f1c1b0fe0", + "io.podman.compose.project": "kafka_schema_registry", + "io.podman.compose.version": "1.0.6", + "maintainer": "partner-support@confluent.io", + "name": "cp-schema-registry", + "release": "7.2.4", + "summary": "Confluent Schema Registry provides a RESTful interface for developers to define standard schemas for their events, share them across the organization and safely evolve them in a way that is backward compatible and future proof.", + "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8-minimal/images/8.7-1049.1675784874", + "vcs-ref": "146fdafc2595e26f5f9c1b9a2b3f36bbca8237e4", + "vcs-type": "git", + "vendor": "Confluent", + "version": "c77431e0" + }, + "Mounts": [ + "/etc/schema-registry/secrets" + ], + "Names": [ + "tc-schema-registry" + ], + "Namespaces": {}, + "Networks": [ + "kafka_schema_registry_default" + ], + "Pid": 2759, + "Pod": "", + "PodName": "", + "Ports": [ + { + "host_ip": "", + "container_port": 8081, + "host_port": 8081, + "range": 1, + "protocol": "tcp" + } + ], + "Restarts": 0, + "Size": null, + "StartedAt": 1698253887, + "State": "running", + "Status": "Up 19 minutes", + "Created": 1698253872 + }, + { + "AutoRemove": false, + "Command": null, + "CreatedAt": "19 minutes ago", + "CIDFile": "", + "Exited": false, + "ExitedAt": -62135596800, + "ExitCode": 0, + "Id": "93b68542fcc252e3734c4d28a028c41240915b062b4ba1fc804e960486540c79", + "Image": "docker.io/obsidiandynamics/kafdrop:latest", + "ImageID": "d982cd1654ae97bb9dd84841c50eb5b5fcde528d741a3db5b1bdd5157e3858d8", + "IsInfra": false, + "Labels": { + "PODMAN_SYSTEMD_UNIT": "podman-compose@kafka_schema_registry.service", + "com.docker.compose.container-number": "1", + "com.docker.compose.project": "kafka_schema_registry", + "com.docker.compose.project.config_files": "docker-compose.yaml", + "com.docker.compose.project.working_dir": "/Users/igor.laborie/Documents/Workspaces/Wefox/shared/wefox-ai-wai-rust/wai-testcontainers/src/compose/kafka_schema_registry", + "com.docker.compose.service": "kafdrop", + "io.podman.compose.config-hash": "0d314a7d44ef729913c68b99f213bc0e13bc08543b01438c061bae1f1c1b0fe0", + "io.podman.compose.project": "kafka_schema_registry", + "io.podman.compose.version": "1.0.6", + "org.opencontainers.image.ref.name": "ubuntu", + "org.opencontainers.image.version": "22.04" + }, + "Mounts": [], + "Names": [ + "tc-kafdrop" + ], + "Namespaces": {}, + "Networks": [ + "kafka_schema_registry_default" + ], + "Pid": 2835, + "Pod": "", + "PodName": "", + "Ports": [ + { + "host_ip": "", + "container_port": 9000, + "host_port": 9000, + "range": 1, + "protocol": "tcp" + } + ], + "Restarts": 0, + "Size": null, + "StartedAt": 1698253888, + "State": "running", + "Status": "Up 19 minutes", + "Created": 1698253885 + } +] \ No newline at end of file diff --git a/rustainers/tests/assets/podman_ps.json b/rustainers/tests/assets/podman_ps.json new file mode 100644 index 0000000..433af80 --- /dev/null +++ b/rustainers/tests/assets/podman_ps.json @@ -0,0 +1,128 @@ +[ + { + "AutoRemove": false, + "Command": [ + "/usr/local/bin/podman_hello_world" + ], + "CreatedAt": "16 hours ago", + "CIDFile": "", + "Exited": true, + "ExitedAt": 1698687451, + "ExitCode": 0, + "Id": "e160e315373d38ae1b9560b709de656d94cddad2a774b295030cd016b75ddbd7", + "Image": "quay.io/podman/hello:latest", + "ImageID": "1b10fa0fd8d184d9de22a553688af8f9f8adbabb11f5dfc15f1a0fdd21873db2", + "IsInfra": false, + "Labels": { + "artist": "Máirín Ní Ḋuḃṫaiġ, Twitter:@mairin", + "io.buildah.version": "1.31.2", + "maintainer": "Podman Maintainers" + }, + "Mounts": [], + "Names": [ + "optimistic_bardeen" + ], + "Namespaces": {}, + "Networks": [ + "podman" + ], + "Pid": 0, + "Pod": "", + "PodName": "", + "Ports": null, + "Restarts": 0, + "Size": null, + "StartedAt": 1698687451, + "State": "exited", + "Status": "Exited (0) 16 hours ago", + "Created": 1698687451 + }, + { + "AutoRemove": false, + "Command": [ + "redis-server" + ], + "CreatedAt": "16 hours ago", + "CIDFile": "", + "Exited": true, + "ExitedAt": 1698687458, + "ExitCode": 0, + "Id": "da0b92141113225a661be45f691e27320077ac4fe749681ca2406846d705d346", + "Image": "docker.io/library/redis:latest", + "ImageID": "637ceb59b7a01df4466442fc5bb30bcf0ce3428289b00bbc02f62ddaa3e6bd8d", + "IsInfra": false, + "Labels": null, + "Mounts": [ + "/data" + ], + "Names": [ + "compassionate_kilby" + ], + "Namespaces": {}, + "Networks": [ + "podman" + ], + "Pid": 0, + "Pod": "", + "PodName": "", + "Ports": [ + { + "host_ip": "", + "container_port": 6379, + "host_port": 34165, + "range": 1, + "protocol": "tcp" + } + ], + "Restarts": 0, + "Size": null, + "StartedAt": 1698687453, + "State": "exited", + "Status": "Exited (0) 16 hours ago (healthy)", + "Created": 1698687452 + }, + { + "AutoRemove": false, + "Command": [ + "redis-server" + ], + "CreatedAt": "16 hours ago", + "CIDFile": "", + "Exited": true, + "ExitedAt": 1698687458, + "ExitCode": 0, + "Id": "409e83a1c1f9952e3ea51de48dd5829e711b645e240b2a843f77c0a27342398a", + "Image": "docker.io/library/redis:latest", + "ImageID": "637ceb59b7a01df4466442fc5bb30bcf0ce3428289b00bbc02f62ddaa3e6bd8d", + "IsInfra": false, + "Labels": null, + "Mounts": [ + "/data" + ], + "Names": [ + "nostalgic_blackburn" + ], + "Namespaces": {}, + "Networks": [ + "podman" + ], + "Pid": 0, + "Pod": "", + "PodName": "", + "Ports": [ + { + "host_ip": "", + "container_port": 6379, + "host_port": 44931, + "range": 1, + "protocol": "tcp" + } + ], + "Restarts": 0, + "Size": null, + "StartedAt": 1698687456, + "State": "exited", + "Status": "Exited (0) 16 hours ago (healthy)", + "Created": 1698687455 + } +] \ No newline at end of file diff --git a/rustainers/tests/assets/podman_version.json b/rustainers/tests/assets/podman_version.json new file mode 100644 index 0000000..441beed --- /dev/null +++ b/rustainers/tests/assets/podman_version.json @@ -0,0 +1,22 @@ +{ + "Client": { + "APIVersion": "4.4.4", + "Version": "4.4.4", + "GoVersion": "go1.20.2", + "GitCommit": "c8223435f49a860c8ac4281a85db39232745a7cb", + "BuiltTime": "Mon Mar 27 19:36:54 2023", + "Built": 1679938614, + "OsArch": "darwin/arm64", + "Os": "darwin" + }, + "Server": { + "APIVersion": "4.4.2", + "Version": "4.4.2", + "GoVersion": "go1.19.6", + "GitCommit": "", + "BuiltTime": "Wed Mar 1 12:22:39 2023", + "Built": 1677669759, + "OsArch": "linux/arm64", + "Os": "linux" + } +} diff --git a/rustainers/tests/common/mod.rs b/rustainers/tests/common/mod.rs new file mode 100644 index 0000000..5d4bc35 --- /dev/null +++ b/rustainers/tests/common/mod.rs @@ -0,0 +1,34 @@ +use rstest::fixture; +use tracing::{debug, Level}; +use tracing_subscriber::fmt::format::FmtSpan; +use tracing_subscriber::fmt::time; + +use rustainers::images::Redis; +use rustainers::runner::Runner; + +pub fn init_tracing(level: Level) { + tracing_subscriber::fmt() + .pretty() + .with_line_number(true) + .with_thread_names(true) + .with_span_events(FmtSpan::NONE) + .with_timer(time::uptime()) + .with_max_level(level) + .init(); +} + +#[fixture] +#[once] +pub fn runner() -> Runner { + init_tracing(Level::INFO); + + let runner = Runner::auto().expect("Should find a valid runner"); + debug!("Using runner {runner:?}"); + runner +} + +#[fixture] +#[once] +pub fn redis() -> Redis { + Redis::default() +} diff --git a/rustainers/tests/custom.rs b/rustainers/tests/custom.rs new file mode 100644 index 0000000..3fc7cfb --- /dev/null +++ b/rustainers/tests/custom.rs @@ -0,0 +1,36 @@ +use assert2::let_assert; +use rstest::rstest; + +use rustainers::runner::{RunOption, Runner}; +use rustainers::{ + ContainerStatus, ImageName, RunnableContainer, RunnableContainerBuilder, ToRunnableContainer, +}; + +mod common; +pub use self::common::*; + +#[derive(Debug, Clone, Copy)] +struct HelloWorld; + +impl ToRunnableContainer for HelloWorld { + fn to_runnable(&self, builder: RunnableContainerBuilder) -> RunnableContainer { + builder + .with_image(ImageName::new("hello-world")) + .with_wait_strategy(ContainerStatus::Exited) + .build() + } +} + +#[rstest] +#[tokio::test] +async fn should_run_hello_world(runner: &Runner) { + _ = tracing_subscriber::fmt::try_init(); + + let result = runner + .start_with_options(HelloWorld, RunOption::default()) + .await; + if let Err(e) = &result { + eprintln!("{e}"); + } + let_assert!(Ok(_) = result); +} diff --git a/rustainers/tests/images.rs b/rustainers/tests/images.rs new file mode 100644 index 0000000..62759c6 --- /dev/null +++ b/rustainers/tests/images.rs @@ -0,0 +1,101 @@ +use std::fmt::Debug; +use std::time::SystemTime; + +use assert2::check; +use rstest::rstest; +use tokio::task::JoinSet; +use tracing::debug; + +use rustainers::images::{Minio, Postgres, Redis}; +use rustainers::runner::{RunOption, Runner}; +use rustainers::ToRunnableContainer; + +mod common; +pub use self::common::*; + +#[rstest] +#[case::pg(Postgres::default())] +#[case::minio(Minio::default())] +#[case::redis(Redis::default())] +#[tokio::test] +async fn test_image( + runner: &Runner, + #[case] image: impl ToRunnableContainer + Debug, +) -> anyhow::Result<()> { + let options = RunOption::builder().with_remove(true).build(); + debug!("Image {image:?}"); + + let container = runner.start_with_options(image, options).await?; + debug!("Started {container}"); + + Ok(()) +} + +#[rstest] +#[tokio::test] +async fn test_double_run(runner: &Runner, redis: &Redis) -> anyhow::Result<()> { + let container = runner.start(redis.clone()).await?; + println!("Container {container}"); + + let container2 = runner.start(redis.clone()).await?; + println!("Container2 {container2}"); + + check!( + container.id() != container2.id(), + "Should create two containers" + ); + Ok(()) +} + +#[rstest] +#[tokio::test] +async fn test_reuse(runner: &Runner, redis: &Redis) -> anyhow::Result<()> { + let option = RunOption::builder().with_name("my-redis").build(); + + let container = runner + .start_with_options(redis.clone(), option.clone()) + .await?; + println!("Container {container}"); + + let container2 = runner.start_with_options(redis.clone(), option).await?; + println!("Container2 {container2}"); + + check!( + container.id() == container2.id(), + "Should reuse the same container" + ); + + Ok(()) +} + +#[rstest] +#[tokio::test] +#[ignore = "work with docker, but fail with podman"] // FIXME find a solution +async fn test_run_in_multiple_tasks(runner: &Runner, redis: &Redis) -> anyhow::Result<()> { + let start = SystemTime::now(); + let mut set = JoinSet::new(); + let size = 20; + + for id in 0..size { + let img = redis.clone(); + let r = runner.clone(); + set.spawn(async move { + let container = r.start(img).await.unwrap(); + (id, container) + }); + } + + // wait all + let mut finished = vec![false; size]; + while let Some(Ok((id, container))) = set.join_next().await { + println!("Container #{id} {container:#?}"); + finished[id] = true; + } + let duration = start.elapsed()?; + for (id, v) in finished.iter().enumerate() { + check!(*v == true, "Task #{id} not finished"); + } + println!("Took {}s", duration.as_secs_f32()); + + Ok(()) +}