Skip to content

Commit

Permalink
refactor(cli): allow to pass files iterator to chunk-mgr and files-up…
Browse files Browse the repository at this point in the history
…load tools

Also allow to use different paths for local wallet dir and chunk-mgr artifacts/cache.
  • Loading branch information
bochaco authored and joshuef committed Feb 19, 2024
1 parent dfd489a commit 5dd7bb3
Show file tree
Hide file tree
Showing 3 changed files with 99 additions and 42 deletions.
61 changes: 48 additions & 13 deletions sn_cli/src/subcommands/files.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ use std::{
},
time::{Duration, Instant},
};
use walkdir::WalkDir;
use walkdir::{DirEntry, WalkDir};
use xor_name::XorName;

/// The default folder to download files to.
Expand All @@ -44,6 +44,15 @@ const DOWNLOAD_FOLDER: &str = "safe_files";
/// Subdir for storing uploaded file into
pub(crate) const UPLOADED_FILES: &str = "uploaded_files";

/// Options to configure different aspects of the logic to upload files
#[derive(Clone)]
pub struct FilesUploadOptions {
pub make_data_public: bool,
pub verify_store: bool,
pub batch_size: usize,
pub retry_strategy: RetryStrategy,
}

#[derive(Parser, Debug)]
pub enum FilesCmds {
Upload {
Expand All @@ -58,7 +67,7 @@ pub enum FilesCmds {
batch_size: usize,
/// Should the file be made accessible to all. (This is irreversible)
#[clap(long, name = "make_public", default_value = "false", short = 'p')]
make_public: bool,
make_data_public: bool,
/// Set the strategy to use on chunk upload failure. Does not modify the spend failure retry attempts yet.
///
/// Choose a retry strategy based on effort level, from 'quick' (least effort), through 'balanced',
Expand Down Expand Up @@ -163,16 +172,18 @@ pub(crate) async fn files_cmds(
path,
batch_size,
retry_strategy,
make_public,
make_data_public,
} => {
upload_files(
path,
make_public,
client,
root_dir.to_path_buf(),
verify_store,
batch_size,
retry_strategy,
FilesUploadOptions {
make_data_public,
verify_store,
batch_size,
retry_strategy,
},
)
.await?
}
Expand Down Expand Up @@ -255,27 +266,51 @@ pub(crate) async fn files_cmds(
/// verify if the data was stored successfully.
pub(crate) async fn upload_files(
files_path: PathBuf,
make_data_public: bool,
client: &Client,
root_dir: PathBuf,
verify_store: bool,
batch_size: usize,
retry_strategy: RetryStrategy,
options: FilesUploadOptions,
) -> Result<()> {
upload_files_with_iter(
WalkDir::new(&files_path).into_iter().flatten(),
files_path,
client,
root_dir.clone(),
root_dir,
options,
)
.await
}

/// Given an iterator over files, upload them. Optionally
/// verify if the data was stored successfully.
pub(crate) async fn upload_files_with_iter(
entries_iter: impl Iterator<Item = DirEntry>,
files_path: PathBuf,
client: &Client,
wallet_dir: PathBuf,
root_dir: PathBuf,
options: FilesUploadOptions,
) -> Result<()> {
let FilesUploadOptions {
make_data_public,
verify_store,
batch_size,
retry_strategy,
} = options;
debug!("Uploading file(s) from {files_path:?}, batch size {batch_size:?} will verify?: {verify_store}");
if make_data_public {
info!("{files_path:?} will be made public and linkable");
println!("{files_path:?} will be made public and linkable");
}

let files_api: FilesApi = FilesApi::new(client.clone(), root_dir.to_path_buf());
let files_api: FilesApi = FilesApi::new(client.clone(), wallet_dir);
if files_api.wallet()?.balance().is_zero() {
bail!("The wallet is empty. Cannot upload any files! Please transfer some funds into the wallet");
}

let mut chunk_manager = ChunkManager::new(&root_dir);
println!("Starting to chunk {files_path:?} now.");
chunk_manager.chunk_path(&files_path, true, make_data_public)?;
chunk_manager.chunk_with_iter(entries_iter, true, make_data_public)?;

// Return early if we already uploaded them
let mut chunks_to_upload = if chunk_manager.is_chunks_empty() {
Expand Down
57 changes: 38 additions & 19 deletions sn_cli/src/subcommands/files/chunk_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ use std::{
path::{Path, PathBuf},
time::Instant,
};
use walkdir::WalkDir;
use walkdir::{DirEntry, WalkDir};
use xor_name::XorName;

const CHUNK_ARTIFACTS_DIR: &str = "chunk_artifacts";
Expand Down Expand Up @@ -93,6 +93,22 @@ impl ChunkManager {
files_path: &Path,
read_cache: bool,
include_data_maps: bool,
) -> Result<()> {
self.chunk_with_iter(
WalkDir::new(files_path).into_iter().flatten(),
read_cache,
include_data_maps,
)
}

/// Chunk all the files in the provided iterator
/// These are stored to the CHUNK_ARTIFACTS_DIR
/// if read_cache is true, will take cache from previous runs into account
pub(crate) fn chunk_with_iter(
&mut self,
entries_iter: impl Iterator<Item = DirEntry>,
read_cache: bool,
include_data_maps: bool,
) -> Result<()> {
let now = Instant::now();
// clean up
Expand All @@ -103,26 +119,29 @@ impl ChunkManager {
self.resumed_files_count = 0;

// collect the files to chunk
WalkDir::new(files_path)
.into_iter()
.flatten()
.for_each(|entry| {
if entry.file_type().is_file() {
let path_xor = PathXorName::new(entry.path());
info!(
"Added file {:?} with path_xor: {path_xor:?} to be chunked/resumed",
entry.path()
);
self.files_to_chunk.push((
entry.file_name().to_owned(),
path_xor,
entry.into_path(),
));
}
});
let mut files_path_is_dir = false;
entries_iter.for_each(|entry| {
let is_file = entry.file_type().is_file();
if entry.depth() == 0 {
files_path_is_dir = !is_file;
}

if is_file {
let path_xor = PathXorName::new(entry.path());
info!(
"Added file {:?} with path_xor: {path_xor:?} to be chunked/resumed",
entry.path()
);
self.files_to_chunk.push((
entry.file_name().to_owned(),
path_xor,
entry.into_path(),
));
}
});
let total_files = self.files_to_chunk.len();
if total_files == 0 {
if files_path.is_dir() {
if files_path_is_dir {
bail!(
"The directory specified for upload is empty. Please verify the provided path."
);
Expand Down
23 changes: 13 additions & 10 deletions sn_cli/src/subcommands/folders.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,9 @@
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.

use super::files::{download_file, upload_files, ChunkManager, UploadedFile, UPLOADED_FILES};
use super::files::{
download_file, upload_files, ChunkManager, FilesUploadOptions, UploadedFile, UPLOADED_FILES,
};

use sn_client::{Client, FilesApi, FolderEntry, FoldersApi, WalletClient, BATCH_SIZE};

Expand Down Expand Up @@ -41,7 +43,7 @@ pub enum FoldersCmds {
batch_size: usize,
/// Should the file be made accessible to all. (This is irreversible)
#[clap(long, name = "make_public", default_value = "false", short = 'p')]
make_public: bool,
make_data_public: bool,
/// Set the strategy to use on chunk upload failure. Does not modify the spend failure retry attempts yet.
///
/// Choose a retry strategy based on effort level, from 'quick' (least effort), through 'balanced',
Expand Down Expand Up @@ -78,22 +80,24 @@ pub(crate) async fn folders_cmds(
FoldersCmds::Upload {
path,
batch_size,
make_public,
make_data_public,
retry_strategy,
} => {
upload_files(
path.clone(),
make_public,
client,
root_dir.to_path_buf(),
verify_store,
batch_size,
retry_strategy,
FilesUploadOptions {
make_data_public,
verify_store,
batch_size,
retry_strategy,
},
)
.await?;

let mut chunk_manager = ChunkManager::new(root_dir);
chunk_manager.chunk_path(&path, true, make_public)?;
chunk_manager.chunk_path(&path, true, make_data_public)?;

let mut folders = build_folders_hierarchy(&path, client, root_dir)?;

Expand All @@ -116,7 +120,6 @@ pub(crate) async fn folders_cmds(
.ok_or(eyre!("Failed to obtain main Folder network address"))?;

pay_and_upload_folders(folders, verify_store, client, root_dir).await?;

println!(
"\nFolder hierarchy from {path:?} uploaded successfully at {}",
root_dir_address.to_hex()
Expand Down Expand Up @@ -190,7 +193,7 @@ pub(crate) async fn folders_cmds(
.await;
}
}
};
}
Ok(())
}

Expand Down

0 comments on commit 5dd7bb3

Please sign in to comment.