Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Ctu from ckmgr #1429

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 11 additions & 6 deletions sn_cli/src/subcommands/acc_packet.rs
Original file line number Diff line number Diff line change
Expand Up @@ -661,14 +661,19 @@ impl AccountPacket {
options: FilesUploadOptions,
) -> Result<Folders> {
let files_api = FilesApi::build(self.client.clone(), self.wallet_dir.clone())?;
let chunk_manager = ChunkManager::new(&self.tracking_info_dir.clone());
let mut chunk_manager = ChunkManager::new(&self.tracking_info_dir.clone());

let total_files = chunk_manager.chunk_with_iter(
self.iter_only_files(),
true,
options.make_data_public,
)?;
if total_files == 0 {
Ok(0)
}

IterativeUploader::new(chunk_manager, files_api)
.iterate_upload(
self.iter_only_files(),
self.files_dir.clone(),
options.clone(),
)
.iterate_upload(total_files, self.files_dir.clone(), options.clone())
.await?;

// Let's make the storage payment for Folders
Expand Down
13 changes: 11 additions & 2 deletions sn_cli/src/subcommands/files.rs
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ pub(crate) async fn files_cmds(
verify_store: bool,
) -> Result<()> {
let files_api = FilesApi::build(client.clone(), root_dir.to_path_buf())?;
let chunk_manager = ChunkManager::new(root_dir);
let mut chunk_manager = ChunkManager::new(root_dir);

match cmds {
FilesCmds::Estimate {
Expand All @@ -118,9 +118,18 @@ pub(crate) async fn files_cmds(
retry_strategy,
make_data_public,
} => {
let total_files = chunk_manager.chunk_with_iter(
WalkDir::new(&file_path).into_iter().flatten(),
true,
make_data_public,
)?;
if total_files == 0 {
Ok(0)
}

let total_files = IterativeUploader::new(chunk_manager, files_api)
.iterate_upload(
WalkDir::new(&file_path).into_iter().flatten(),
total_files,
file_path.clone(),
FilesUploadOptions {
make_data_public,
Expand Down
17 changes: 2 additions & 15 deletions sn_cli/src/subcommands/files/iterative_uploader.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ impl IterativeUploader {
/// Given an iterator over files, upload them. Optionally verify if the data was stored successfully.
pub(crate) async fn iterate_upload(
mut self,
entries_iter: impl Iterator<Item = DirEntry>,
total_files: usize,
files_path: PathBuf,
options: FilesUploadOptions,
) -> Result<usize> {
Expand All @@ -44,18 +44,8 @@ impl IterativeUploader {
retry_strategy,
} = options;

let mut rng = thread_rng();

msg_init(&files_path, &batch_size, &verify_store, make_data_public);

let total_files =
self.chunk_manager
.chunk_with_iter(entries_iter, true, make_data_public)?;
if total_files == 0 {
return Ok(0);
}

// Return early if we already uploaded them
let mut chunks_to_upload = if self.chunk_manager.is_chunks_empty() {
// make sure we don't have any failed chunks in those

Expand All @@ -73,15 +63,13 @@ impl IterativeUploader {
.verify_uploaded_chunks(&chunks, batch_size)
.await?;

// mark the non-failed ones as completed
self.chunk_manager.mark_completed(
chunks
.into_iter()
.filter(|c| !failed_chunks.contains(c))
.map(|(xor, _)| xor),
)?;

// if none are failed, we can return early
if failed_chunks.is_empty() {
msg_files_already_uploaded_verified();
if !make_data_public {
Expand All @@ -100,8 +88,7 @@ impl IterativeUploader {
self.chunk_manager.get_chunks()
};

// Random shuffle the chunks_to_upload, so that uploading of a large file can be speed up by
// having multiple client instances uploading the same target.
let mut rng = thread_rng();
chunks_to_upload.shuffle(&mut rng);

let chunk_amount_to_upload = chunks_to_upload.len();
Expand Down
Loading