diff --git a/src/bin/nydus-image/deduplicate.rs b/src/bin/nydus-image/deduplicate.rs new file mode 100644 index 00000000000..66db26e69c4 --- /dev/null +++ b/src/bin/nydus-image/deduplicate.rs @@ -0,0 +1,661 @@ +// Copyright (C) 2023 Nydus Developers. All rights reserved. +// +// SPDX-License-Identifier: Apache-2.0 + +//! Deduplicate for Chunk. +use anyhow::{Context, Result}; +use nydus_api::ConfigV2; +use nydus_builder::Tree; +use nydus_rafs::metadata::RafsSuper; +use nydus_storage::device::BlobInfo; +use rusqlite::{params, Connection}; +use std::fs; +use std::path::Path; +use std::sync::{Arc, Mutex}; + +#[derive(Debug)] +pub enum DatabaseError { + SqliteError(rusqlite::Error), + PoisonError(String), + // Add other database error variants here as needed, e.g.: + // MysqlError(mysql::Error), +} + +impl std::fmt::Display for DatabaseError { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match *self { + DatabaseError::SqliteError(ref err) => err.fmt(f), + DatabaseError::PoisonError(ref err) => write!(f, "PoisonError: {}", err), + // Add other error type formatting here + } + } +} + +impl std::error::Error for DatabaseError {} + +impl From for DatabaseError { + fn from(error: rusqlite::Error) -> Self { + DatabaseError::SqliteError(error) + } +} + +pub trait Database { + /// Creates a new chunk in the database. + fn create_chunk_table(&self) -> Result<()>; + + /// Creates a new blob in the database. + fn create_blob_table(&self) -> Result<()>; + + /// Inserts chunk information into the database. + fn insert_chunk(&self, chunk_info: &Chunk) -> Result<()>; + + /// Inserts blob information into the database. + fn insert_blob(&self, blob_info: &Blob) -> Result<()>; + + /// Retrieves all chunk information from the database. + fn get_chunks(&self) -> Result>; + + /// Retrieves all blob information from the database. + fn get_blobs(&self) -> Result>; +} + +pub struct SqliteDatabase { + chunk_table: Option, + blob_table: Option, +} + +impl SqliteDatabase { + pub fn new(database_url: &str) -> Result { + // Delete the database file if it exists. + if let Ok(metadata) = fs::metadata(database_url) { + if metadata.is_file() { + if let Err(err) = fs::remove_file(database_url) { + warn!( + "Warning: Unable to delete existing database file: {:?}.", + err + ); + } + } + } + + let chunk_table_result = ChunkTable::new(database_url); + let chunk_table = match chunk_table_result { + Ok(table) => table, + Err(e) => { + println!("An error occurred when creating the ChunkTable: {}", e); + return Err(e); + } + }; + + let blob_table_result = BlobTable::new(database_url); + let blob_table = match blob_table_result { + Ok(table) => table, + Err(e) => { + println!("An error occurred when creating the BlobTable: {}", e); + return Err(e); + } + }; + + Ok(Self { + chunk_table: Some(chunk_table), + blob_table: Some(blob_table), + }) + } + + pub fn new_in_memory() -> Result { + let chunk_table_result = ChunkTable::new_in_memory(); + let chunk_table = match chunk_table_result { + Ok(table) => table, + Err(e) => { + println!("An error occurred when creating the ChunkTable: {}", e); + return Err(e); + } + }; + let blob_table_result = BlobTable::new_in_memory(); + let blob_table = match blob_table_result { + Ok(table) => table, + Err(e) => { + println!("An error occurred when creating the BlobTable: {}", e); + return Err(e); + } + }; + Ok(Self { + chunk_table: Some(chunk_table), + blob_table: Some(blob_table), + }) + } +} + +impl Database for SqliteDatabase { + fn create_chunk_table(&self) -> Result<()> { + ChunkTable::create(self.chunk_table.as_ref().unwrap()) + .context("Failed to create chunk table") + } + + fn create_blob_table(&self) -> Result<()> { + BlobTable::create(self.blob_table.as_ref().unwrap()).context("Failed to create blob table") + } + + fn insert_chunk(&self, chunk: &Chunk) -> Result<()> { + self.chunk_table + .as_ref() + .unwrap() + .insert(chunk) + .context("Failed to insert chunk") + } + + fn insert_blob(&self, blob: &Blob) -> Result<()> { + self.blob_table + .as_ref() + .unwrap() + .insert(blob) + .context("Failed to insert blob") + } + + fn get_chunks(&self) -> Result> { + ChunkTable::list_all(self.chunk_table.as_ref().unwrap()).context("Failed to get chunks") + } + + fn get_blobs(&self) -> Result> { + BlobTable::list_all(self.blob_table.as_ref().unwrap()).context("Failed to get blobs") + } +} + +pub struct Deduplicate { + sb: RafsSuper, + db: D, +} + +impl Deduplicate { + pub fn new(bootstrap_path: &Path, config: Arc, db_url: &str) -> anyhow::Result { + let (sb, _) = RafsSuper::load_from_file(bootstrap_path, config, false)?; + + // Create a new SQLite database. + let db = if db_url == "/sqlite_memory_model" { + SqliteDatabase::new_in_memory()? + } else { + SqliteDatabase::new(db_url)? + }; + + Ok(Self { sb, db }) + } + + /// Save metadata to the database: chunk and blob info. + pub fn save_metadata(&mut self) -> anyhow::Result>> { + let tree = Tree::from_bootstrap(&self.sb, &mut ()) + .context("Failed to load bootstrap for deduplication.")?; + + // Create the blob table and chunk table. + self.db + .create_chunk_table() + .context("Failed to create chunk.")?; + self.db + .create_blob_table() + .context("Failed to create blob.")?; + + // Save blob info to the blob table. + let blob_infos = self.sb.superblock.get_blob_infos(); + for blob in &blob_infos { + self.db + .insert_blob(&Blob { + blob_id: blob.blob_id().to_string(), + blob_compressed_size: blob.compressed_size(), + blob_uncompressed_size: blob.uncompressed_size(), + }) + .context("Failed to insert blob")?; + } + + // Save chunk info to the chunk table. + let pre = &mut |t: &Tree| -> Result<()> { + let node = t.lock_node(); + for chunk in &node.chunks { + let index = chunk.inner.blob_index(); + // Get the blob ID. + let chunk_blob_id = blob_infos[index as usize].blob_id(); + // Insert the chunk into the chunk table. + self.db + .insert_chunk(&Chunk { + chunk_blob_id, + chunk_digest: chunk.inner.id().to_string(), + chunk_compressed_size: chunk.inner.compressed_size(), + chunk_uncompressed_size: chunk.inner.uncompressed_size(), + chunk_compressed_offset: chunk.inner.compressed_offset(), + chunk_uncompressed_offset: chunk.inner.uncompressed_offset(), + }) + .context("Failed to insert chunk")?; + } + Ok(()) + }; + tree.walk_dfs_pre(pre)?; + + Ok(self.sb.superblock.get_blob_infos()) + } +} + +pub trait Table: Sync + Send + Sized + 'static +where + Err: std::error::Error + 'static, +{ + /// clear table. + fn clear(&self) -> Result<(), Err>; + + /// create table. + fn create(&self) -> Result<(), Err>; + + /// insert data. + fn insert(&self, table: &T) -> Result<(), Err>; + + /// select all data. + fn list_all(&self) -> Result, Err>; + + /// select data with offset and limit. + fn list_paged(&self, offset: i64, limit: i64) -> Result, Err>; +} + +#[derive(Debug)] +pub struct ChunkTable { + conn: Arc>, +} + +impl ChunkTable { + pub fn new(database_url: &str) -> Result { + let conn = Connection::open(database_url)?; + Ok(ChunkTable { + conn: Arc::new(Mutex::new(conn)), + }) + } + + pub fn new_in_memory() -> Result { + let conn = Connection::open_in_memory()?; + Ok(ChunkTable { + conn: Arc::new(Mutex::new(conn)), + }) + } +} + +#[derive(Debug)] +pub struct Chunk { + chunk_blob_id: String, + chunk_digest: String, + chunk_compressed_size: u32, + chunk_uncompressed_size: u32, + chunk_compressed_offset: u64, + chunk_uncompressed_offset: u64, +} + +impl Table for ChunkTable { + fn clear(&self) -> Result<(), DatabaseError> { + self.conn + .lock() + .map_err(|e| DatabaseError::PoisonError(e.to_string()))? + .execute("DROP TABLE chunk", []) + .map_err(DatabaseError::SqliteError)?; + Ok(()) + } + + fn create(&self) -> Result<(), DatabaseError> { + self.conn + .lock() + .map_err(|e| DatabaseError::PoisonError(e.to_string()))? + .execute( + "CREATE TABLE IF NOT EXISTS chunk ( + id INTEGER PRIMARY KEY, + chunk_blob_id TEXT NOT NULL, + chunk_digest TEXT, + chunk_compressed_size INT, + chunk_uncompressed_size INT, + chunk_compressed_offset INT, + chunk_uncompressed_offset INT + )", + [], + ) + .map_err(DatabaseError::SqliteError)?; + Ok(()) + } + + fn insert(&self, chunk: &Chunk) -> Result<(), DatabaseError> { + self.conn + .lock() + .map_err(|e| DatabaseError::PoisonError(e.to_string()))? + .execute( + "INSERT INTO chunk( + chunk_blob_id, + chunk_digest, + chunk_compressed_size, + chunk_uncompressed_size, + chunk_compressed_offset, + chunk_uncompressed_offset + ) + VALUES (?1, ?2, ?3, ?4, ?5, ?6); + ", + rusqlite::params![ + chunk.chunk_blob_id, + chunk.chunk_digest, + chunk.chunk_compressed_size, + chunk.chunk_uncompressed_size, + chunk.chunk_compressed_offset, + chunk.chunk_uncompressed_offset, + ], + ) + .map_err(DatabaseError::SqliteError)?; + Ok(()) + } + + fn list_all(&self) -> Result, DatabaseError> { + let mut offset = 0; + let limit: i64 = 100; + let mut all_chunks = Vec::new(); + + loop { + let chunks = self.list_paged(offset, limit)?; + if chunks.is_empty() { + break; + } + + all_chunks.extend(chunks); + offset += limit; + } + + Ok(all_chunks) + } + + fn list_paged(&self, offset: i64, limit: i64) -> Result, DatabaseError> { + let conn_guard = self + .conn + .lock() + .map_err(|e| DatabaseError::PoisonError(e.to_string()))?; + let mut stmt: rusqlite::Statement<'_> = conn_guard + .prepare( + "SELECT id, chunk_blob_id, chunk_digest, chunk_compressed_size, + chunk_uncompressed_size, chunk_compressed_offset, chunk_uncompressed_offset from chunk + ORDER BY id LIMIT ?1 OFFSET ?2", + )?; + let chunk_iterator = stmt.query_map(params![limit, offset], |row| { + Ok(Chunk { + chunk_blob_id: row.get(1)?, + chunk_digest: row.get(2)?, + chunk_compressed_size: row.get(3)?, + chunk_uncompressed_size: row.get(4)?, + chunk_compressed_offset: row.get(5)?, + chunk_uncompressed_offset: row.get(6)?, + }) + })?; + let mut chunks = Vec::new(); + for chunk in chunk_iterator { + chunks.push(chunk.map_err(DatabaseError::SqliteError)?); + } + Ok(chunks) + } +} + +#[derive(Debug)] +pub struct BlobTable { + conn: Arc>, +} + +impl BlobTable { + pub fn new(database_url: &str) -> Result { + let conn = Connection::open(database_url)?; + Ok(BlobTable { + conn: Arc::new(Mutex::new(conn)), + }) + } + + pub fn new_in_memory() -> Result { + let conn = Connection::open_in_memory()?; + Ok(BlobTable { + conn: Arc::new(Mutex::new(conn)), + }) + } +} + +pub struct Blob { + blob_id: String, + blob_compressed_size: u64, + blob_uncompressed_size: u64, +} + +impl Table for BlobTable { + fn clear(&self) -> Result<(), DatabaseError> { + self.conn + .lock() + .map_err(|e| DatabaseError::PoisonError(e.to_string()))? + .execute("DROP TABLE blob", []) + .map_err(DatabaseError::SqliteError)?; + Ok(()) + } + + fn create(&self) -> Result<(), DatabaseError> { + self.conn + .lock() + .map_err(|e| DatabaseError::PoisonError(e.to_string()))? + .execute( + "CREATE TABLE IF NOT EXISTS blob ( + id INTEGER PRIMARY KEY, + blob_id TEXT NOT NULL, + blob_compressed_size INT, + blob_uncompressed_size INT + )", + [], + ) + .map_err(DatabaseError::SqliteError)?; + Ok(()) + } + + fn insert(&self, blob: &Blob) -> Result<(), DatabaseError> { + self.conn + .lock() + .map_err(|e| DatabaseError::PoisonError(e.to_string()))? + .execute( + "INSERT INTO blob ( + blob_id, + blob_compressed_size, + blob_uncompressed_size + ) + VALUES (?1, ?2, ?3); + ", + rusqlite::params![ + blob.blob_id, + blob.blob_compressed_size, + blob.blob_uncompressed_size + ], + ) + .map_err(DatabaseError::SqliteError)?; + Ok(()) + } + + fn list_all(&self) -> Result, DatabaseError> { + let mut offset = 0; + let limit: i64 = 100; + let mut all_blobs = Vec::new(); + + loop { + let blobs = self.list_paged(offset, limit)?; + if blobs.is_empty() { + break; + } + + all_blobs.extend(blobs); + offset += limit; + } + + Ok(all_blobs) + } + + fn list_paged(&self, offset: i64, limit: i64) -> Result, DatabaseError> { + let conn_guard = self + .conn + .lock() + .map_err(|e| DatabaseError::PoisonError(e.to_string()))?; + let mut stmt: rusqlite::Statement<'_> = conn_guard.prepare( + "SELECT blob_id, blob_compressed_size, blob_uncompressed_size from blob + ORDER BY id LIMIT ?1 OFFSET ?2", + )?; + let blob_iterator = stmt.query_map(params![limit, offset], |row| { + Ok(Blob { + blob_id: row.get(0)?, + blob_compressed_size: row.get(1)?, + blob_uncompressed_size: row.get(2)?, + }) + })?; + let mut blobs = Vec::new(); + for blob in blob_iterator { + blobs.push(blob.map_err(DatabaseError::SqliteError)?); + } + Ok(blobs) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use rusqlite::Result; + + #[test] + fn test_blob_table() -> Result<(), Box> { + let blob_table = BlobTable::new_in_memory()?; + { + let conn = blob_table.conn.lock().map_err(|_| "Mutex poisoned")?; + conn.execute( + "CREATE TABLE IF NOT EXISTS blob ( + id INTEGER PRIMARY KEY, + blob_id TEXT NOT NULL, + blob_compressed_size INT, + blob_uncompressed_size INT + )", + [], + )?; + } + let blob = Blob { + blob_id: "BLOB123".to_string(), + blob_compressed_size: 1024, + blob_uncompressed_size: 2048, + }; + blob_table.insert(&blob)?; + let blobs = blob_table.list_all()?; + assert_eq!(blobs.len(), 1); + assert_eq!(blobs[0].blob_id, blob.blob_id); + assert_eq!(blobs[0].blob_compressed_size, blob.blob_compressed_size); + assert_eq!(blobs[0].blob_uncompressed_size, blob.blob_uncompressed_size); + Ok(()) + } + + #[test] + fn test_chunk_table() -> Result<(), Box> { + let chunk_table = ChunkTable::new_in_memory()?; + { + let conn = chunk_table.conn.lock().map_err(|_| "Mutex poisoned")?; + conn.execute( + "CREATE TABLE IF NOT EXISTS chunk ( + id INTEGER PRIMARY KEY, + chunk_blob_id TEXT NOT NULL, + chunk_digest TEXT NOT NULL, + chunk_compressed_size INT, + chunk_uncompressed_size INT, + chunk_compressed_offset INT, + chunk_uncompressed_offset INT + )", + [], + )?; + } + let chunk = Chunk { + chunk_blob_id: "BLOB123".to_string(), + chunk_digest: "DIGEST123".to_string(), + chunk_compressed_size: 512, + chunk_uncompressed_size: 1024, + chunk_compressed_offset: 0, + chunk_uncompressed_offset: 0, + }; + chunk_table.insert(&chunk)?; + let chunks = chunk_table.list_all()?; + assert_eq!(chunks.len(), 1); + assert_eq!(chunks[0].chunk_blob_id, chunk.chunk_blob_id); + assert_eq!(chunks[0].chunk_digest, chunk.chunk_digest); + assert_eq!(chunks[0].chunk_compressed_size, chunk.chunk_compressed_size); + assert_eq!( + chunks[0].chunk_uncompressed_size, + chunk.chunk_uncompressed_size + ); + assert_eq!( + chunks[0].chunk_compressed_offset, + chunk.chunk_compressed_offset + ); + assert_eq!( + chunks[0].chunk_uncompressed_offset, + chunk.chunk_uncompressed_offset + ); + Ok(()) + } + + #[test] + fn test_blob_table_paged() -> Result<(), Box> { + let blob_table = BlobTable::new_in_memory()?; + { + let conn = blob_table.conn.lock().map_err(|_| "Mutex poisoned")?; + conn.execute( + "CREATE TABLE IF NOT EXISTS blob ( + id INTEGER PRIMARY KEY, + blob_id TEXT NOT NULL, + blob_compressed_size INT, + blob_uncompressed_size INT + )", + [], + )?; + } + for i in 0..200 { + let blob = Blob { + blob_id: format!("BLOB{}", i), + blob_compressed_size: i, + blob_uncompressed_size: i * 2, + }; + blob_table.insert(&blob)?; + } + let blobs = blob_table.list_paged(100, 100)?; + assert_eq!(blobs.len(), 100); + assert_eq!(blobs[0].blob_id, "BLOB100"); + assert_eq!(blobs[0].blob_compressed_size, 100); + assert_eq!(blobs[0].blob_uncompressed_size, 200); + Ok(()) + } + + #[test] + fn test_chunk_table_paged() -> Result<(), Box> { + let chunk_table = ChunkTable::new_in_memory()?; + { + let conn = chunk_table.conn.lock().map_err(|_| "Mutex poisoned")?; + conn.execute( + "CREATE TABLE IF NOT EXISTS chunk ( + id INTEGER PRIMARY KEY, + chunk_blob_id TEXT NOT NULL, + chunk_digest TEXT NOT NULL, + chunk_compressed_size INT, + chunk_uncompressed_size INT, + chunk_compressed_offset INT, + chunk_uncompressed_offset INT + )", + [], + )?; + } + for i in 0..200 { + let i64 = i as u64; + let chunk = Chunk { + chunk_blob_id: format!("BLOB{}", i), + chunk_digest: format!("DIGEST{}", i), + chunk_compressed_size: i, + chunk_uncompressed_size: i * 2, + chunk_compressed_offset: i64 * 3, + chunk_uncompressed_offset: i64 * 4, + }; + chunk_table.insert(&chunk)?; + } + let chunks = chunk_table.list_paged(100, 100)?; + assert_eq!(chunks.len(), 100); + assert_eq!(chunks[0].chunk_blob_id, "BLOB100"); + assert_eq!(chunks[0].chunk_digest, "DIGEST100"); + assert_eq!(chunks[0].chunk_compressed_size, 100); + assert_eq!(chunks[0].chunk_uncompressed_size, 200); + assert_eq!(chunks[0].chunk_compressed_offset, 300); + assert_eq!(chunks[0].chunk_uncompressed_offset, 400); + Ok(()) + } +} diff --git a/src/bin/nydus-image/main.rs b/src/bin/nydus-image/main.rs index 1eed52a2a9f..18efc036455 100644 --- a/src/bin/nydus-image/main.rs +++ b/src/bin/nydus-image/main.rs @@ -13,7 +13,7 @@ extern crate log; extern crate serde_json; #[macro_use] extern crate lazy_static; - +use crate::deduplicate::SqliteDatabase; use std::convert::TryFrom; use std::fs::{self, metadata, DirEntry, File, OpenOptions}; use std::os::unix::fs::FileTypeExt; @@ -34,7 +34,7 @@ use nydus_builder::{ use nydus_rafs::metadata::{RafsSuper, RafsSuperConfig, RafsVersion}; use nydus_storage::backend::localfs::LocalFs; use nydus_storage::backend::BlobBackend; -use nydus_storage::device::BlobFeatures; +use nydus_storage::device::{BlobFeatures, BlobInfo}; use nydus_storage::factory::BlobFactory; use nydus_storage::meta::{format_blob_features, BatchContextGenerator}; use nydus_storage::{RAFS_DEFAULT_CHUNK_SIZE, RAFS_MAX_CHUNK_SIZE}; @@ -44,6 +44,7 @@ use nydus_utils::{ }; use serde::{Deserialize, Serialize}; +use crate::deduplicate::Deduplicate; use crate::unpack::{OCIUnpacker, Unpacker}; use crate::validator::Validator; @@ -52,6 +53,7 @@ use nydus_service::ServiceArgs; #[cfg(target_os = "linux")] use std::str::FromStr; +mod deduplicate; mod inspect; mod stat; mod unpack; @@ -356,6 +358,49 @@ fn prepare_cmd_args(bti_string: &'static str) -> App { ) ); + let app = app.subcommand( + App::new("chunkdict") + .about("deduplicate RAFS filesystem metadata") + .subcommand( + App::new("save") + .about("Save chunk info to a database") + .arg( + Arg::new("bootstrap") + .short('B') + .long("bootstrap") + .help("File path of RAFS meta blob/bootstrap") + .conflicts_with("BOOTSTRAP") + .required(false), + ) + .arg( + Arg::new("database") + .long("database") + .help("Database connection URI for assisting chunk dict generation, e.g. sqlite:///path/to/database.db.") + .default_value("sqlite:///sqlite_memory_model") + .required(false), + ) + .arg( + Arg::new("blob-dir") + .long("blob-dir") + .short('D') + .conflicts_with("config") + .help( + "Directory for localfs storage backend, hosting data blobs and cache files", + ), + ) + .arg(arg_config.clone()) + .arg( + Arg::new("verbose") + .long("verbose") + .short('v') + .help("Output message in verbose mode") + .action(ArgAction::SetTrue) + .required(false), + ) + .arg(arg_output_json.clone()) + ) + ); + let app = app.subcommand( App::new("merge") .about("Merge multiple bootstraps into a overlaid bootstrap") @@ -699,6 +744,14 @@ fn main() -> Result<()> { if let Some(matches) = cmd.subcommand_matches("create") { Command::create(matches, &build_info) + } else if let Some(matches) = cmd.subcommand_matches("chunkdict") { + match matches.subcommand_name() { + Some("save") => Command::chunkdict_save(matches.subcommand_matches("save").unwrap()), + _ => { + println!("{}", usage); + Ok(()) + } + } } else if let Some(matches) = cmd.subcommand_matches("merge") { Command::merge(matches, &build_info) } else if let Some(matches) = cmd.subcommand_matches("check") { @@ -1077,6 +1130,50 @@ impl Command { OutputSerializer::dump(matches, build_output, build_info) } + fn chunkdict_save(matches: &ArgMatches) -> Result<()> { + let bootstrap_path = Self::get_bootstrap(matches)?; + let config = Self::get_configuration(matches)?; + let db_url: &String = matches.get_one::("database").unwrap(); + debug!("db_url: {}", db_url); + // For backward compatibility with v2.1. + config + .internal + .set_blob_accessible(matches.get_one::("bootstrap").is_none()); + + let db_strs: Vec<&str> = db_url.split("://").collect(); + if db_strs.len() != 2 || !db_strs[1].starts_with('/') { + bail!("Invalid database URL") + } + + let blobs: Vec> = match db_strs[0] { + "sqlite" => { + let mut deduplicate: Deduplicate = + Deduplicate::::new(bootstrap_path, config, db_strs[1])?; + deduplicate.save_metadata()? + } + _ => { + bail!("Unsupported database type: {}, please use a valid database URI, such as 'sqlite:///path/to/database.db'.", db_strs[0]) + } + }; + info!("RAFS filesystem metadata is saved:"); + + let mut blob_ids = Vec::new(); + for (idx, blob) in blobs.iter().enumerate() { + info!( + "\t {}: {}, compressed data size 0x{:x}, compressed file size 0x{:x}, uncompressed file size 0x{:x}, chunks: 0x{:x}, features: {}.", + idx, + blob.blob_id(), + blob.compressed_data_size(), + blob.compressed_size(), + blob.uncompressed_size(), + blob.chunk_count(), + format_blob_features(blob.features()), + ); + blob_ids.push(blob.blob_id().to_string()); + } + Ok(()) + } + fn merge(matches: &ArgMatches, build_info: &BuildTimeInfo) -> Result<()> { let source_bootstrap_paths: Vec = matches .get_many::("SOURCE")