Skip to content

Commit

Permalink
add smoke test for local-cas
Browse files Browse the repository at this point in the history
Signed-off-by: xwb1136021767 <[email protected]>
  • Loading branch information
xwb1136021767 committed Nov 16, 2023
1 parent d86f1bf commit 33b2206
Show file tree
Hide file tree
Showing 11 changed files with 400 additions and 57 deletions.
35 changes: 33 additions & 2 deletions api/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1059,7 +1059,10 @@ pub struct DeduplicationConfigV2 {
impl DeduplicationConfigV2 {
/// Validate image deduplication configuration.
pub fn validate(&self) -> bool {
true
match self.get_enable() {
true => !self.work_dir.is_empty(),
false => true,
}
}

pub fn get_enable(&self) -> bool {
Expand Down Expand Up @@ -2673,7 +2676,7 @@ mod tests {
}

#[test]
fn test_bckend_config_try_from() {
fn test_backend_config_try_from() {
let config = BackendConfig {
backend_type: "localdisk".to_string(),
backend_config: serde_json::to_value(LocalDiskConfig::default()).unwrap(),
Expand Down Expand Up @@ -2713,6 +2716,34 @@ mod tests {

#[test]
fn test_dedup_config() {
let dedup_config = DeduplicationConfig {
enable: true,
work_dir: "/tmp/nydus-cas".to_string(),
};

let str_val = serde_json::to_string(&dedup_config).unwrap();
let dedup_config2 = serde_json::from_str(&str_val).unwrap();
assert_eq!(dedup_config, dedup_config2);
}

#[test]
fn test_dedup_config_valid() {
let mut dedup_config = DeduplicationConfigV2 {
enable: true,
work_dir: "".to_string(),
};
assert!(!dedup_config.validate());

dedup_config.enable = false;
assert!(dedup_config.validate());

dedup_config.enable = true;
dedup_config.work_dir = "/tmp/nydus-cas".to_string();
assert!(dedup_config.validate());
}

#[test]
fn test_dedup_config_try_from() {
let content = r#"{
"enable": true,
"work_dir": "/tmp/nydus-cas"
Expand Down
30 changes: 30 additions & 0 deletions builder/src/core/bootstrap_dedup.rs
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,31 @@ pub struct BootstrapDedup {
writer: Box<dyn RafsIoWrite>,
backend: BackendConfigV2,
encrypt: bool,
metrics: DeduplicationMetrics,
}

#[derive(Debug)]
pub struct DeduplicationMetrics {
pub chunk_cnt: u32,
pub chunk_size_cnt: u32,
pub new_chunk_cnt: u32,
pub new_chunk_size_cnt: u32,
}

impl DeduplicationMetrics {
pub fn new(
chunk_cnt: u32,
chunk_size_cnt: u32,
new_chunk_cnt: u32,
new_chunk_size_cnt: u32,
) -> Self {
DeduplicationMetrics {
chunk_cnt,
chunk_size_cnt,
new_chunk_cnt,
new_chunk_size_cnt,
}
}
}

impl BootstrapDedup {
Expand All @@ -64,6 +89,8 @@ impl BootstrapDedup {
ArtifactStorage::SingleFile(PathBuf::from(&output_path)),
)?)) as Box<dyn RafsIoWrite>;

let metrics = DeduplicationMetrics::new(0, 0, 0, 0);

fs::copy(&bootstrap_path, &output_path)?;

Ok(BootstrapDedup {
Expand All @@ -76,6 +103,7 @@ impl BootstrapDedup {
writer,
backend,
encrypt,
metrics,
})
}

Expand Down Expand Up @@ -139,6 +167,7 @@ impl BootstrapDedup {
&mut self.insert_chunks,
&self.cas_mgr,
&mut chunk_cache,
&mut self.metrics,
)
})
},
Expand Down Expand Up @@ -167,6 +196,7 @@ impl BootstrapDedup {
self.cas_mgr.add_blobs(&self.insert_blobs, is_v6)?;
self.cas_mgr.add_chunks(&self.insert_chunks, is_v6)?;

info!("{:?}", self.metrics);
Ok(())
}
}
24 changes: 21 additions & 3 deletions builder/src/core/node.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ use sha2::digest::Digest;

use crate::{BlobContext, BlobManager, BuildContext, ChunkDict, ConversionType, Overlay};

use super::bootstrap_dedup::DeduplicationMetrics;
use super::context::Artifact;

use super::chunk_dict::DigestWithBlobIndex;
Expand Down Expand Up @@ -781,6 +782,7 @@ impl Node {
Ok((chunk_ofs, unit))
}

// Perform deduplication for the node to achieve chunk reuse.
#[allow(clippy::too_many_arguments)]
pub fn dedup_chunk_for_node(
&mut self,
Expand All @@ -792,6 +794,7 @@ impl Node {
insert_chunks: &mut Vec<(String, String, String)>,
cas_mgr: &CasMgr,
chunk_cache: &mut BTreeMap<DigestWithBlobIndex, Arc<ChunkWrapper>>,
metrics: &mut DeduplicationMetrics,
) -> Result<()> {
let (mut chunk_ofs, chunk_size) = self.get_chunk_ofs(meta)?;

Expand All @@ -802,17 +805,23 @@ impl Node {
.get_blob_id_by_idx(chunk.inner.blob_index() as usize)
.unwrap();

// Record metrics.
metrics.chunk_cnt += 1;
metrics.chunk_size_cnt += chunk.inner.uncompressed_size();

writer
.seek(SeekFrom::Start(chunk_ofs))
.context("failed seek for chunk_ofs")
.unwrap();

match cache_chunks.get(chunk_id) {
// dedup chunk between layers
// Dedup chunk between layers
Some(new_chunk) => {
// if the chunk is belong to other image's blob
// In this case, the chunk is from native image or other image.
let mut new_chunk = new_chunk.deref().clone();
let blob_index = new_chunk.blob_index() as usize;

// if this chunk is from other blob, mark it as dedup.
if origin_blob_index != blob_index {
new_chunk.set_deduped(true);
}
Expand All @@ -821,10 +830,13 @@ impl Node {
DigestWithBlobIndex(*new_chunk.id(), new_chunk.blob_index() + 1),
Arc::new(new_chunk.clone()),
);

// Update chunk info in bootstrap.
self.dedup_bootstrap(build_ctx, &new_chunk, writer)?
}
None => match cas_mgr.get_chunk(chunk_id, &blob_id, true)? {
Some((new_blob_id, chunk_info)) => {
// In this case, the chunk is from local cas.
let blob_idx = match blob_mgr.get_blob_idx_by_id(&new_blob_id) {
Some(blob_idx) => blob_idx,
None => {
Expand All @@ -848,7 +860,7 @@ impl Node {
RafsVersion::V6 => ChunkWrapper::V6(new_chunk),
};

// if this chunk is from other blob, mark it as dedup
// If this chunk is from other blob, mark it as dedup.
if origin_blob_index != blob_idx as usize {
new_chunk.set_deduped(true);
}
Expand All @@ -862,6 +874,12 @@ impl Node {
cache_chunks.insert(*chunk_id, new_chunk);
}
None => {
// In this case, the chunk is a new chunk.

// Record metrics.
metrics.new_chunk_cnt += 1;
metrics.new_chunk_size_cnt += chunk.inner.uncompressed_size();

let new_chunk = chunk.inner.as_ref().clone();
cache_chunks.insert(*chunk_id, new_chunk.clone());
chunk_cache.insert(
Expand Down
File renamed without changes.
1 change: 1 addition & 0 deletions rafs/src/fs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -331,6 +331,7 @@ impl Rafs {
entry
}

// Get backend config from local cas for dynamic deduplication.
fn generate_dedup_bitmap_by_chunk_info(
cfg: &Arc<ConfigV2>,
blob_infos: &Vec<Arc<BlobInfo>>,
Expand Down
72 changes: 39 additions & 33 deletions smoke/tests/api_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,19 +36,21 @@ func (a *APIV1TestSuite) TestDaemonStatus(t *testing.T) {
rafs := a.rootFsToRafs(t, ctx, rootFs)

nydusd, err := tool.NewNydusd(tool.NydusdConfig{
NydusdPath: ctx.Binary.Nydusd,
BootstrapPath: rafs,
ConfigPath: filepath.Join(ctx.Env.WorkDir, "nydusd-config.fusedev.json"),
MountPath: ctx.Env.MountDir,
APISockPath: filepath.Join(ctx.Env.WorkDir, "nydusd-api.sock"),
BackendType: "localfs",
BackendConfig: fmt.Sprintf(`{"dir": "%s"}`, ctx.Env.BlobDir),
EnablePrefetch: ctx.Runtime.EnablePrefetch,
BlobCacheDir: ctx.Env.CacheDir,
CacheType: ctx.Runtime.CacheType,
CacheCompressed: ctx.Runtime.CacheCompressed,
RafsMode: ctx.Runtime.RafsMode,
DigestValidate: false,
NydusdPath: ctx.Binary.Nydusd,
BootstrapPath: rafs,
ConfigPath: filepath.Join(ctx.Env.WorkDir, "nydusd-config.fusedev.json"),
MountPath: ctx.Env.MountDir,
APISockPath: filepath.Join(ctx.Env.WorkDir, "nydusd-api.sock"),
BackendType: "localfs",
BackendConfig: fmt.Sprintf(`{"dir": "%s"}`, ctx.Env.BlobDir),
EnablePrefetch: ctx.Runtime.EnablePrefetch,
BlobCacheDir: ctx.Env.CacheDir,
CacheType: ctx.Runtime.CacheType,
CacheCompressed: ctx.Runtime.CacheCompressed,
RafsMode: ctx.Runtime.RafsMode,
DigestValidate: false,
EnableDeduplication: false,
DeduplicationDir: "",
})
require.NoError(t, err)

Expand Down Expand Up @@ -85,22 +87,24 @@ func (a *APIV1TestSuite) TestMetrics(t *testing.T) {
rafs := a.rootFsToRafs(t, ctx, rootFs)

nydusd, err := tool.NewNydusd(tool.NydusdConfig{
NydusdPath: ctx.Binary.Nydusd,
BootstrapPath: rafs,
ConfigPath: filepath.Join(ctx.Env.WorkDir, "nydusd-config.fusedev.json"),
MountPath: ctx.Env.MountDir,
APISockPath: filepath.Join(ctx.Env.WorkDir, "nydusd-api.sock"),
BackendType: "localfs",
BackendConfig: fmt.Sprintf(`{"dir": "%s"}`, ctx.Env.BlobDir),
EnablePrefetch: ctx.Runtime.EnablePrefetch,
BlobCacheDir: ctx.Env.CacheDir,
CacheType: ctx.Runtime.CacheType,
CacheCompressed: ctx.Runtime.CacheCompressed,
RafsMode: ctx.Runtime.RafsMode,
DigestValidate: false,
IOStatsFiles: true,
LatestReadFiles: true,
AccessPattern: true,
NydusdPath: ctx.Binary.Nydusd,
BootstrapPath: rafs,
ConfigPath: filepath.Join(ctx.Env.WorkDir, "nydusd-config.fusedev.json"),
MountPath: ctx.Env.MountDir,
APISockPath: filepath.Join(ctx.Env.WorkDir, "nydusd-api.sock"),
BackendType: "localfs",
BackendConfig: fmt.Sprintf(`{"dir": "%s"}`, ctx.Env.BlobDir),
EnablePrefetch: ctx.Runtime.EnablePrefetch,
BlobCacheDir: ctx.Env.CacheDir,
CacheType: ctx.Runtime.CacheType,
CacheCompressed: ctx.Runtime.CacheCompressed,
RafsMode: ctx.Runtime.RafsMode,
DigestValidate: false,
IOStatsFiles: true,
LatestReadFiles: true,
AccessPattern: true,
EnableDeduplication: false,
DeduplicationDir: "",
})
require.NoError(t, err)

Expand Down Expand Up @@ -167,10 +171,12 @@ func (a *APIV1TestSuite) TestPrefetch(t *testing.T) {
rafs := a.rootFsToRafs(t, ctx, rootFs)

config := tool.NydusdConfig{
NydusdPath: ctx.Binary.Nydusd,
MountPath: ctx.Env.MountDir,
APISockPath: filepath.Join(ctx.Env.WorkDir, "nydusd-api.sock"),
ConfigPath: filepath.Join(ctx.Env.WorkDir, "nydusd-config.fusedev.json"),
NydusdPath: ctx.Binary.Nydusd,
MountPath: ctx.Env.MountDir,
APISockPath: filepath.Join(ctx.Env.WorkDir, "nydusd-api.sock"),
ConfigPath: filepath.Join(ctx.Env.WorkDir, "nydusd-config.fusedev.json"),
EnableDeduplication: false,
DeduplicationDir: "",
}
nydusd, err := tool.NewNydusd(config)
require.NoError(t, err)
Expand Down
Loading

0 comments on commit 33b2206

Please sign in to comment.