From dfcea4a12bb3cabc25f09fa3e5c5ec883c0e2d32 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 19 Jun 2024 08:59:10 -0400 Subject: [PATCH 1/2] chunking: Add some doc comments Just a drive by. --- lib/src/chunking.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs index 69593e5e..eaa22ff8 100644 --- a/lib/src/chunking.rs +++ b/lib/src/chunking.rs @@ -30,7 +30,10 @@ pub(crate) const MAX_CHUNKS: u32 = 64; /// we will just drop down to one. const MIN_CHUNKED_LAYERS: u32 = 4; +/// A convenient alias for a reference-counted, immutable string. type RcStr = Rc; +/// Maps from a checksum to its size and file names (multiple in the case of +/// hard links). pub(crate) type ChunkMapping = BTreeMap)>; // TODO type PackageSet = HashSet; From 4374c05b25f2b16a228bc120a97c452c52f1a090 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 18 Jan 2024 14:18:41 -0500 Subject: [PATCH 2/2] container: Add support for re-exporting a fetched container The status quo today is basically that with a "pure ostree" container image, one can pull it, and one *can* re-export it with `ostree container encapsulate`...but doing so loses *all chunking* i.e. you end up with a single giant layer again. Further, we don't support exporting derived images at all. Fix both of these with a new CLI and API, for example: ``` $ ostree container image reexport --repo=/path/to/repo registry:quay.io/exampleos/someos:latest containers-storage:localhost/exampleos ``` Now...before one gets too excited, this is still suboptimal in a bunch of ways: - Copying to `containers-storage` is super inefficient, we indirect through a local `oci` directory because of the lack of "push" support in containers-image-proxy, *and* we end up with a full physical copy of the files even when we *could* reflink; cc https://github.com/containers/storage/issues/1849 - Because we don't currently save tar-split data, the use case of pushing to a registry is virtually guaranteed to produce changed diffids, and we'll hence end up duplicating layers on the registry Now what is more interesting is that this code is going to help us a bit for the use case of "recommitting" a derived container image. Signed-off-by: Colin Walters --- lib/Cargo.toml | 3 +- lib/src/chunking.rs | 4 +- lib/src/cli.rs | 59 +++++++++- lib/src/container/encapsulate.rs | 2 +- lib/src/container/store.rs | 185 +++++++++++++++++++++++++++++++ lib/src/fixture.rs | 130 +++++++++++++++++++++- lib/tests/it/main.rs | 95 +++++++++++++++- 7 files changed, 470 insertions(+), 8 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 17105542..10dcd5a8 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -53,6 +53,7 @@ zstd = { version = "0.13.1", features = ["pkg-config"] } indoc = { version = "2", optional = true } xshell = { version = "0.2", optional = true } +similar-asserts = { version = "1.5.0", optional = true } [dev-dependencies] quickcheck = "1" @@ -66,4 +67,4 @@ features = ["dox"] [features] docgen = ["clap_mangen"] dox = ["ostree/dox"] -internal-testing-api = ["xshell", "indoc"] +internal-testing-api = ["xshell", "indoc", "similar-asserts"] diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs index eaa22ff8..4b047e74 100644 --- a/lib/src/chunking.rs +++ b/lib/src/chunking.rs @@ -31,7 +31,7 @@ pub(crate) const MAX_CHUNKS: u32 = 64; const MIN_CHUNKED_LAYERS: u32 = 4; /// A convenient alias for a reference-counted, immutable string. -type RcStr = Rc; +pub(crate) type RcStr = Rc; /// Maps from a checksum to its size and file names (multiple in the case of /// hard links). pub(crate) type ChunkMapping = BTreeMap)>; @@ -215,7 +215,7 @@ impl Chunk { } } - fn move_obj(&mut self, dest: &mut Self, checksum: &str) -> bool { + pub(crate) fn move_obj(&mut self, dest: &mut Self, checksum: &str) -> bool { // In most cases, we expect the object to exist in the source. However, it's // conveneient here to simply ignore objects which were already moved into // a chunk. diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 99d223c8..c9f91cb4 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -24,7 +24,7 @@ use std::process::Command; use tokio::sync::mpsc::Receiver; use crate::commit::container_commit; -use crate::container::store::{ImportProgress, LayerProgress, PreparedImport}; +use crate::container::store::{ExportToOCIOpts, ImportProgress, LayerProgress, PreparedImport}; use crate::container::{self as ostree_container, ManifestDiff}; use crate::container::{Config, ImageReference, OstreeImageReference}; use crate::sysroot::SysrootLock; @@ -117,7 +117,13 @@ pub(crate) enum ContainerOpts { imgref: OstreeImageReference, }, - /// Wrap an ostree commit into a container + /// Wrap an ostree commit into a container image. + /// + /// The resulting container image will have a single layer, which is + /// very often not what's desired. To handle things more intelligently, + /// you will need to use (or create) a higher level tool that splits + /// content into distinct "chunks"; functionality for this is + /// exposed by the API but not CLI currently. #[clap(alias = "export")] Encapsulate { /// Path to the repository @@ -277,6 +283,32 @@ pub(crate) enum ContainerImageOpts { imgref: OstreeImageReference, }, + /// Re-export a fetched image. + /// + /// Unlike `encapsulate`, this verb handles layered images, and will + /// also automatically preserve chunked structure from the fetched image. + Reexport { + /// Path to the repository + #[clap(long, value_parser)] + repo: Utf8PathBuf, + + /// Source image reference, e.g. registry:quay.io/exampleos/exampleos:latest + #[clap(value_parser = parse_base_imgref)] + src_imgref: ImageReference, + + /// Destination image reference, e.g. registry:quay.io/exampleos/exampleos:latest + #[clap(value_parser = parse_base_imgref)] + dest_imgref: ImageReference, + + #[clap(long)] + /// Path to Docker-formatted authentication file. + authfile: Option, + + /// Compress at the fastest level (e.g. gzip level 1) + #[clap(long)] + compression_fast: bool, + }, + /// Replace the detached metadata (e.g. to add a signature) ReplaceDetachedMetadata { /// Path to the source repository @@ -969,6 +1001,29 @@ async fn run_from_opt(opt: Opt) -> Result<()> { let repo = parse_repo(&repo)?; container_store(&repo, &imgref, proxyopts, quiet, check).await } + ContainerImageOpts::Reexport { + repo, + src_imgref, + dest_imgref, + authfile, + compression_fast, + } => { + let repo = &parse_repo(&repo)?; + let opts = ExportToOCIOpts { + authfile, + skip_compression: compression_fast, + ..Default::default() + }; + let digest = ostree_container::store::export( + repo, + &src_imgref, + &dest_imgref, + Some(opts), + ) + .await?; + println!("Exported: {digest}"); + Ok(()) + } ContainerImageOpts::History { repo, imgref } => { let repo = parse_repo(&repo)?; container_history(&repo, &imgref).await diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index 5b7479a0..d61355b3 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -102,7 +102,7 @@ fn export_chunks( /// Write an ostree commit to an OCI blob #[context("Writing ostree root to blob")] #[allow(clippy::too_many_arguments)] -fn export_chunked( +pub(crate) fn export_chunked( repo: &ostree::Repo, commit: &str, ociw: &mut OciDir, diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 9ca92dbf..084ab9a4 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -6,14 +6,18 @@ //! base. See [`encapsulate`][`super::encapsulate()`] for more information on encaspulation of images. use super::*; +use crate::chunking::{self, Chunk}; use crate::logging::system_repo_journal_print; use crate::refescape; use crate::sysroot::SysrootLock; use crate::utils::ResultExt; use anyhow::{anyhow, Context}; use camino::{Utf8Path, Utf8PathBuf}; +use cap_std_ext::cap_std; use cap_std_ext::cap_std::fs::{Dir, MetadataExt}; +use cap_std_ext::cmdext::CapStdExtCommandExt; use containers_image_proxy::{ImageProxy, OpenedImage}; +use flate2::Compression; use fn_error_context::context; use futures_util::TryFutureExt; use oci_spec::image::{self as oci_image, Descriptor, History, ImageConfiguration, ImageManifest}; @@ -1209,6 +1213,187 @@ pub async fn copy( Ok(()) } +/// Options controlling commit export into OCI +#[derive(Clone, Debug, Default)] +#[non_exhaustive] +pub struct ExportToOCIOpts { + /// If true, do not perform gzip compression of the tar layers. + pub skip_compression: bool, + /// Path to Docker-formatted authentication file. + pub authfile: Option, +} + +/// The way we store "chunk" layers in ostree is by writing a commit +/// whose filenames are their own object identifier. This function parses +/// what is written by the `ImporterMode::ObjectSet` logic, turning +/// it back into a "chunked" structure that is used by the export code. +fn chunking_from_layer_committed( + repo: &ostree::Repo, + l: &Descriptor, + chunking: &mut chunking::Chunking, +) -> Result<()> { + let mut chunk = Chunk::default(); + let layer_ref = &ref_for_layer(l)?; + let root = repo.read_commit(&layer_ref, gio::Cancellable::NONE)?.0; + let e = root.enumerate_children( + "standard::name,standard::size", + gio::FileQueryInfoFlags::NOFOLLOW_SYMLINKS, + gio::Cancellable::NONE, + )?; + for child in e.clone() { + let child = &child?; + // The name here should be a valid checksum + let name = child.name(); + // SAFETY: ostree doesn't give us non-UTF8 filenames + let name = Utf8Path::from_path(&name).unwrap(); + ostree::validate_checksum_string(name.as_str())?; + chunking.remainder.move_obj(&mut chunk, name.as_str()); + } + chunking.chunks.push(chunk); + Ok(()) +} + +/// Export an imported container image to a target OCI directory. +#[context("Copying image")] +pub(crate) fn export_to_oci( + repo: &ostree::Repo, + imgref: &ImageReference, + dest_oci: &Dir, + tag: Option<&str>, + opts: ExportToOCIOpts, +) -> Result { + let srcinfo = query_image(repo, imgref)?.ok_or_else(|| anyhow!("No such image"))?; + let (commit_layer, component_layers, remaining_layers) = + parse_manifest_layout(&srcinfo.manifest, &srcinfo.configuration)?; + let commit_chunk_ref = ref_for_layer(commit_layer)?; + let commit_chunk_rev = repo.require_rev(&commit_chunk_ref)?; + let mut chunking = chunking::Chunking::new(repo, &commit_chunk_rev)?; + for layer in component_layers { + chunking_from_layer_committed(repo, layer, &mut chunking)?; + } + // Unfortunately today we can't guarantee we reserialize the same tar stream + // or compression, so we'll need to generate a new copy of the manifest and config + // with the layers reset. + let mut new_manifest = srcinfo.manifest.clone(); + new_manifest.layers_mut().clear(); + let mut new_config = srcinfo.configuration.clone(); + new_config.history_mut().clear(); + + let mut dest_oci = ocidir::OciDir::ensure(&dest_oci)?; + + let opts = ExportOpts { + skip_compression: opts.skip_compression, + authfile: opts.authfile, + ..Default::default() + }; + + let mut labels = HashMap::new(); + + // Given the object chunking information we recomputed from what + // we found on disk, re-serialize to layers (tarballs). + export_chunked( + repo, + &srcinfo.base_commit, + &mut dest_oci, + &mut new_manifest, + &mut new_config, + &mut labels, + chunking, + &opts, + "", + )?; + + // Now, handle the non-ostree layers; this is a simple conversion of + // + let compression = opts.skip_compression.then_some(Compression::none()); + for (i, layer) in remaining_layers.iter().enumerate() { + let layer_ref = &ref_for_layer(layer)?; + let mut target_blob = dest_oci.create_raw_layer(compression)?; + // Sadly the libarchive stuff isn't exposed via Rust due to type unsafety, + // so we'll just fork off the CLI. + let repo_dfd = repo.dfd_borrow(); + let repo_dir = cap_std_ext::cap_std::fs::Dir::reopen_dir(&repo_dfd)?; + let mut subproc = std::process::Command::new("ostree") + .args(["--repo=.", "export", layer_ref.as_str()]) + .stdout(std::process::Stdio::piped()) + .cwd_dir(repo_dir) + .spawn()?; + // SAFETY: we piped just above + let mut stdout = subproc.stdout.take().unwrap(); + std::io::copy(&mut stdout, &mut target_blob).context("Creating blob")?; + let layer = target_blob.complete()?; + let previous_annotations = srcinfo + .manifest + .layers() + .get(i) + .and_then(|l| l.annotations().as_ref()) + .cloned(); + let previous_description = srcinfo + .configuration + .history() + .get(i) + .and_then(|h| h.comment().as_deref()) + .unwrap_or_default(); + dest_oci.push_layer( + &mut new_manifest, + &mut new_config, + layer, + previous_description, + previous_annotations, + ) + } + + let new_config = dest_oci.write_config(new_config)?; + new_manifest.set_config(new_config); + + dest_oci.insert_manifest(new_manifest, tag, oci_image::Platform::default()) +} + +/// Given a container image reference which is stored in `repo`, export it to the +/// target image location. +#[context("Export")] +pub async fn export( + repo: &ostree::Repo, + src_imgref: &ImageReference, + dest_imgref: &ImageReference, + opts: Option, +) -> Result { + let target_oci = dest_imgref.transport == Transport::OciDir; + let tempdir = if !target_oci { + let vartmp = cap_std::fs::Dir::open_ambient_dir("/var/tmp", cap_std::ambient_authority())?; + let td = cap_std_ext::cap_tempfile::TempDir::new_in(&vartmp)?; + // Always skip compression when making a temporary copy + let opts = ExportToOCIOpts { + skip_compression: true, + ..Default::default() + }; + export_to_oci(repo, src_imgref, &td, None, opts)?; + td + } else { + let opts = opts.unwrap_or_default(); + let (path, tag) = parse_oci_path_and_tag(dest_imgref.name.as_str()); + tracing::debug!("using OCI path={path} tag={tag:?}"); + let path = Dir::open_ambient_dir(path, cap_std::ambient_authority()) + .with_context(|| format!("Opening {path}"))?; + let descriptor = export_to_oci(repo, src_imgref, &path, tag, opts)?; + return Ok(descriptor.digest().clone()); + }; + // Pass the temporary oci directory as the current working directory for the skopeo process + let target_fd = 3i32; + let tempoci = ImageReference { + transport: Transport::OciDir, + name: format!("/proc/self/fd/{target_fd}"), + }; + let authfile = opts.as_ref().and_then(|o| o.authfile.as_deref()); + skopeo::copy( + &tempoci, + dest_imgref, + authfile, + Some((std::sync::Arc::new(tempdir.try_clone()?.into()), target_fd)), + ) + .await +} + /// Iterate over deployment commits, returning the manifests from /// commits which point to a container image. #[context("Listing deployment manifests")] diff --git a/lib/src/fixture.rs b/lib/src/fixture.rs index 35f406d8..b21bec1e 100644 --- a/lib/src/fixture.rs +++ b/lib/src/fixture.rs @@ -3,8 +3,10 @@ #![allow(missing_docs)] use crate::chunking::ObjectMetaSized; +use crate::container::store::{self, LayeredImageState}; use crate::container::{Config, ExportOpts, ImageReference, Transport}; use crate::objectsource::{ObjectMeta, ObjectSourceMeta}; +use crate::objgv::gv_dirtree; use crate::prelude::*; use crate::{gio, glib}; use anyhow::{anyhow, Context, Result}; @@ -14,13 +16,16 @@ use cap_std_ext::cap_std; use cap_std_ext::prelude::CapStdExtCommandExt; use chrono::TimeZone; use fn_error_context::context; +use gvariant::aligned_bytes::TryAsAligned; +use gvariant::{Marker, Structure}; use io_lifetimes::AsFd; use once_cell::sync::Lazy; use regex::Regex; use std::borrow::Cow; +use std::fmt::Write as _; use std::io::Write; use std::ops::Add; -use std::process::Stdio; +use std::process::{Command, Stdio}; use std::rc::Rc; use std::sync::Arc; @@ -360,6 +365,108 @@ fn build_mapping_recurse( Ok(()) } +/// Thin wrapper for `ostree ls -RXC` to show the full file contents +pub fn recursive_ostree_ls_text(repo: &ostree::Repo, refspec: &str) -> Result { + let o = Command::new("ostree") + .cwd_dir(Dir::reopen_dir(&repo.dfd_borrow())?) + .args(["--repo=.", "ls", "-RXC", refspec]) + .output()?; + let st = o.status; + if !st.success() { + anyhow::bail!("ostree ls failed: {st:?}"); + } + let r = String::from_utf8(o.stdout)?; + Ok(r) +} + +pub fn assert_commits_content_equal( + a_repo: &ostree::Repo, + a: &str, + b_repo: &ostree::Repo, + b: &str, +) { + let a = a_repo.require_rev(a).unwrap(); + let b = a_repo.require_rev(b).unwrap(); + let a_commit = a_repo.load_commit(&a).unwrap().0; + let b_commit = b_repo.load_commit(&b).unwrap().0; + let a_contentid = ostree::commit_get_content_checksum(&a_commit).unwrap(); + let b_contentid = ostree::commit_get_content_checksum(&b_commit).unwrap(); + if a_contentid == b_contentid { + return; + } + let a_contents = recursive_ostree_ls_text(a_repo, &a).unwrap(); + let b_contents = recursive_ostree_ls_text(b_repo, &b).unwrap(); + similar_asserts::assert_eq!(a_contents, b_contents); + panic!("Should not be reached; had different content hashes but same recursive ls") +} + +fn ls_recurse( + repo: &ostree::Repo, + path: &mut Utf8PathBuf, + buf: &mut String, + dt: &glib::Variant, +) -> Result<()> { + let dt = dt.data_as_bytes(); + let dt = dt.try_as_aligned()?; + let dt = gv_dirtree!().cast(dt); + let (files, dirs) = dt.to_tuple(); + // A reusable buffer to avoid heap allocating these + let mut hexbuf = [0u8; 64]; + for file in files { + let (name, csum) = file.to_tuple(); + path.push(name.to_str()); + hex::encode_to_slice(csum, &mut hexbuf)?; + let checksum = std::str::from_utf8(&hexbuf)?; + let meta = repo.query_file(checksum, gio::Cancellable::NONE)?.0; + let size = meta.size() as u64; + writeln!(buf, "r {path} {size}").unwrap(); + assert!(path.pop()); + } + for item in dirs { + let (name, contents_csum, _) = item.to_tuple(); + let name = name.to_str(); + // Extend our current path + path.push(name); + hex::encode_to_slice(contents_csum, &mut hexbuf)?; + let checksum_s = std::str::from_utf8(&hexbuf)?; + let child_v = repo.load_variant(ostree::ObjectType::DirTree, checksum_s)?; + ls_recurse(repo, path, buf, &child_v)?; + // We did a push above, so pop must succeed. + assert!(path.pop()); + } + Ok(()) +} + +pub fn ostree_ls(repo: &ostree::Repo, r: &str) -> Result { + let root = repo.read_commit(r, gio::Cancellable::NONE).unwrap().0; + // SAFETY: Must be a repofile + let root = root.downcast_ref::().unwrap(); + // SAFETY: must be a tree root + let root_contents = root.tree_get_contents_checksum().unwrap(); + let root_contents = repo + .load_variant(ostree::ObjectType::DirTree, &root_contents) + .unwrap(); + + let mut contents_buf = String::new(); + let mut pathbuf = Utf8PathBuf::from("/"); + ls_recurse(repo, &mut pathbuf, &mut contents_buf, &root_contents)?; + Ok(contents_buf) +} + +/// Verify the filenames (but not metadata) are the same between two commits. +/// We unfortunately need to do this because the current commit merge path +/// sets ownership of directories to the current user, which breaks in unit tests. +pub fn assert_commits_filenames_equal( + a_repo: &ostree::Repo, + a: &str, + b_repo: &ostree::Repo, + b: &str, +) { + let a_contents_buf = ostree_ls(a_repo, a).unwrap(); + let b_contents_buf = ostree_ls(b_repo, b).unwrap(); + similar_asserts::assert_eq!(a_contents_buf, b_contents_buf); +} + #[derive(Debug)] pub struct Fixture { // Just holds a reference @@ -443,6 +550,27 @@ impl Fixture { Ok(sh) } + /// Given the input image reference, import it into destrepo using the default + /// import config. The image must not exist already in the store. + pub async fn must_import(&self, imgref: &ImageReference) -> Result> { + let ostree_imgref = crate::container::OstreeImageReference { + sigverify: crate::container::SignatureSource::ContainerPolicyAllowInsecure, + imgref: imgref.clone(), + }; + let mut imp = + store::ImageImporter::new(self.destrepo(), &ostree_imgref, Default::default()) + .await + .unwrap(); + assert!(store::query_image(self.destrepo(), &imgref) + .unwrap() + .is_none()); + let prep = match imp.prepare().await.context("Init prep derived")? { + store::PrepareResult::AlreadyPresent(_) => panic!("should not be already imported"), + store::PrepareResult::Ready(r) => r, + }; + imp.import(prep).await + } + // Delete all objects in the destrepo pub fn clear_destrepo(&self) -> Result<()> { self.destrepo() diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index c4836d6d..70efc1f6 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -10,9 +10,9 @@ use ostree_ext::container::{store, ManifestDiff}; use ostree_ext::container::{ Config, ExportOpts, ImageReference, OstreeImageReference, SignatureSource, Transport, }; -use ostree_ext::ostree_manual; use ostree_ext::prelude::{Cast, FileExt}; use ostree_ext::tar::TarImportOptions; +use ostree_ext::{fixture, ostree_manual}; use ostree_ext::{gio, glib}; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; @@ -601,6 +601,99 @@ async fn impl_test_container_import_export(chunked: bool) -> Result<()> { Ok(()) } +#[tokio::test] +async fn test_export_as_container_nonderived() -> Result<()> { + let fixture = Fixture::new_v1()?; + // Export into an OCI directory + let src_imgref = fixture.export_container().await.unwrap().0; + + let initimport = fixture.must_import(&src_imgref).await?; + let initimport_ls = fixture::ostree_ls(fixture.destrepo(), &initimport.merge_commit).unwrap(); + + let exported_ocidir_name = "exported.ocidir"; + let dest = ImageReference { + transport: Transport::OciDir, + name: format!("{}:exported-test", fixture.path.join(exported_ocidir_name)), + }; + fixture.dir.create_dir(exported_ocidir_name)?; + let ocidir = ocidir::OciDir::ensure(&fixture.dir.open_dir(exported_ocidir_name)?)?; + let exported = store::export(fixture.destrepo(), &src_imgref, &dest, None) + .await + .unwrap(); + let (new_manifest, desc) = ocidir.read_manifest_and_descriptor()?; + assert_eq!(desc.digest(), exported.as_str()); + assert_eq!(new_manifest.layers().len(), fixture::LAYERS_V0_LEN); + + // Reset the destrepo + fixture.clear_destrepo()?; + // Clear out the original source + std::fs::remove_dir_all(src_imgref.name.as_str())?; + + let reimported = fixture.must_import(&dest).await?; + let reimport_ls = fixture::ostree_ls(fixture.destrepo(), &reimported.merge_commit).unwrap(); + similar_asserts::assert_eq!(initimport_ls, reimport_ls); + Ok(()) +} + +#[tokio::test] +async fn test_export_as_container_derived() -> Result<()> { + let fixture = Fixture::new_v1()?; + // Export into an OCI directory + let src_imgref = fixture.export_container().await.unwrap().0; + // Add a derived layer + let derived_tag = "derived"; + // Build a derived image + let srcpath = src_imgref.name.as_str(); + let temproot = &fixture.path.join("temproot"); + || -> Result<_> { + std::fs::create_dir(temproot)?; + let temprootd = Dir::open_ambient_dir(temproot, cap_std::ambient_authority())?; + let mut db = DirBuilder::new(); + db.mode(0o755); + db.recursive(true); + temprootd.create_dir_with("usr/bin", &db)?; + temprootd.write("usr/bin/newderivedfile", "newderivedfile v0")?; + temprootd.write("usr/bin/newderivedfile3", "newderivedfile3 v0")?; + Ok(()) + }() + .context("generating temp content")?; + ostree_ext::integrationtest::generate_derived_oci(srcpath, temproot, Some(derived_tag))?; + let derived_imgref = ImageReference { + transport: src_imgref.transport.clone(), + name: format!("{}:{derived_tag}", src_imgref.name.as_str()), + }; + + // The first import into destrepo of the derived OCI + let initimport = fixture.must_import(&derived_imgref).await?; + let initimport_ls = fixture::ostree_ls(fixture.destrepo(), &initimport.merge_commit).unwrap(); + // Export it + let exported_ocidir_name = "exported.ocidir"; + let dest = ImageReference { + transport: Transport::OciDir, + name: format!("{}:exported-test", fixture.path.join(exported_ocidir_name)), + }; + fixture.dir.create_dir(exported_ocidir_name)?; + let ocidir = ocidir::OciDir::ensure(&fixture.dir.open_dir(exported_ocidir_name)?)?; + let exported = store::export(fixture.destrepo(), &derived_imgref, &dest, None) + .await + .unwrap(); + + let (new_manifest, desc) = ocidir.read_manifest_and_descriptor()?; + assert_eq!(desc.digest(), exported.as_str()); + assert_eq!(new_manifest.layers().len(), fixture::LAYERS_V0_LEN + 1); + + // Reset the destrepo + fixture.clear_destrepo()?; + // Clear out the original source + std::fs::remove_dir_all(srcpath)?; + + let reimported = fixture.must_import(&dest).await?; + let reimport_ls = fixture::ostree_ls(fixture.destrepo(), &reimported.merge_commit).unwrap(); + similar_asserts::assert_eq!(initimport_ls, reimport_ls); + + Ok(()) +} + #[tokio::test] async fn test_unencapsulate_unbootable() -> Result<()> { let fixture = {