Skip to content

Commit

Permalink
Address PR feedback.
Browse files Browse the repository at this point in the history
  • Loading branch information
teo-tsirpanis committed Sep 6, 2024
1 parent 66389e2 commit 80b0af2
Show file tree
Hide file tree
Showing 3 changed files with 28 additions and 29 deletions.
2 changes: 1 addition & 1 deletion tiledb/common/log_duration_instrument.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
*
* @section DESCRIPTION
*
* This file defines class `LogDuration`.
* This file defines class `LogDurationInstrument`.
*/

#ifndef TILEDB_LOG_DURATION_INSTRUMENT_H
Expand Down
45 changes: 23 additions & 22 deletions tiledb/sm/filesystem/vfs.cc
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@ Status VFS::dir_size(const URI& dir_name, uint64_t* dir_size) const {
}

Status VFS::touch(const URI& uri) const {
auto op = start_operation(uri, "touch");
auto instrument = make_log_duration_instrument(uri, "touch");
if (uri.is_file()) {
#ifdef _WIN32
return win_.touch(uri.to_path());
Expand Down Expand Up @@ -288,7 +288,7 @@ Status VFS::cancel_all_tasks() {
}

Status VFS::create_bucket(const URI& uri) const {
auto op = start_operation(uri, "create_bucket");
auto instrument = make_log_duration_instrument(uri, "create_bucket");
if (uri.is_s3()) {
#ifdef HAVE_S3
s3().create_bucket(uri);
Expand Down Expand Up @@ -341,7 +341,7 @@ Status VFS::remove_bucket(const URI& uri) const {
}

Status VFS::empty_bucket(const URI& uri) const {
auto op = start_operation(uri, "empty_bucket");
auto instrument = make_log_duration_instrument(uri, "empty_bucket");
if (uri.is_s3()) {
#ifdef HAVE_S3
s3().empty_bucket(uri);
Expand Down Expand Up @@ -369,7 +369,7 @@ Status VFS::empty_bucket(const URI& uri) const {

Status VFS::is_empty_bucket(
const URI& uri, [[maybe_unused]] bool* is_empty) const {
auto op = start_operation(uri, "is_empty_bucket");
auto instrument = make_log_duration_instrument(uri, "is_empty_bucket");
if (uri.is_s3()) {
#ifdef HAVE_S3
*is_empty = s3().is_empty_bucket(uri);
Expand All @@ -396,7 +396,7 @@ Status VFS::is_empty_bucket(
}

Status VFS::remove_dir(const URI& uri) const {
auto op = start_operation(uri, "remove_dir");
auto instrument = make_log_duration_instrument(uri, "remove_dir");
if (uri.is_file()) {
#ifdef _WIN32
return win_.remove_dir(uri.to_path());
Expand Down Expand Up @@ -459,7 +459,7 @@ void VFS::remove_dirs(
}

Status VFS::remove_file(const URI& uri) const {
auto op = start_operation(uri, "remove_file");
auto instrument = make_log_duration_instrument(uri, "remove_file");
if (uri.is_file()) {
#ifdef _WIN32
return win_.remove_file(uri.to_path());
Expand Down Expand Up @@ -545,7 +545,7 @@ Status VFS::max_parallel_ops(const URI& uri, uint64_t* ops) const {
}

Status VFS::file_size(const URI& uri, uint64_t* size) const {
auto op = start_operation(uri, "file_size");
auto instrument = make_log_duration_instrument(uri, "file_size");
stats_->add_counter("file_size_num", 1);
if (uri.is_file()) {
#ifdef _WIN32
Expand Down Expand Up @@ -590,7 +590,7 @@ Status VFS::file_size(const URI& uri, uint64_t* size) const {
}

Status VFS::is_dir(const URI& uri, bool* is_dir) const {
auto op = start_operation(uri, "is_dir");
auto instrument = make_log_duration_instrument(uri, "is_dir");
if (uri.is_file()) {
#ifdef _WIN32
*is_dir = win_.is_dir(uri.to_path());
Expand Down Expand Up @@ -639,7 +639,7 @@ Status VFS::is_dir(const URI& uri, bool* is_dir) const {
}

Status VFS::is_file(const URI& uri, bool* is_file) const {
auto op = start_operation(uri, "is_file");
auto instrument = make_log_duration_instrument(uri, "is_file");
stats_->add_counter("is_object_num", 1);
if (uri.is_file()) {
#ifdef _WIN32
Expand Down Expand Up @@ -690,7 +690,7 @@ Status VFS::is_file(const URI& uri, bool* is_file) const {
}

Status VFS::is_bucket(const URI& uri, bool* is_bucket) const {
auto op = start_operation(uri, "is_bucket");
auto instrument = make_log_duration_instrument(uri, "is_bucket");
if (uri.is_s3()) {
#ifdef HAVE_S3
*is_bucket = s3().is_bucket(uri);
Expand Down Expand Up @@ -733,7 +733,7 @@ Status VFS::ls(const URI& parent, std::vector<URI>* uris) const {
}

std::vector<directory_entry> VFS::ls_with_sizes(const URI& parent) const {
auto op = start_operation(parent, "ls");
auto instrument = make_log_duration_instrument(parent, "ls");
// Noop if `parent` is not a directory, do not error out.
// For S3, GCS and Azure, `ls` on a non-directory will just
// return an empty `uris` vector.
Expand Down Expand Up @@ -796,7 +796,7 @@ std::vector<directory_entry> VFS::ls_with_sizes(const URI& parent) const {
}

Status VFS::move_file(const URI& old_uri, const URI& new_uri) {
auto op = start_operation(old_uri, new_uri);
auto instrument = make_log_duration_instrument(old_uri, new_uri);
// If new_uri exists, delete it or raise an error based on `force`
bool is_file;
RETURN_NOT_OK(this->is_file(new_uri, &is_file));
Expand Down Expand Up @@ -873,7 +873,7 @@ Status VFS::move_file(const URI& old_uri, const URI& new_uri) {
}

Status VFS::move_dir(const URI& old_uri, const URI& new_uri) {
auto op = start_operation(old_uri, new_uri);
auto instrument = make_log_duration_instrument(old_uri, new_uri);
// File
if (old_uri.is_file()) {
if (new_uri.is_file()) {
Expand Down Expand Up @@ -946,7 +946,7 @@ Status VFS::move_dir(const URI& old_uri, const URI& new_uri) {
}

Status VFS::copy_file(const URI& old_uri, const URI& new_uri) {
auto op = start_operation(old_uri, new_uri);
auto instrument = make_log_duration_instrument(old_uri, new_uri);
// If new_uri exists, delete it or raise an error based on `force`
bool is_file;
RETURN_NOT_OK(this->is_file(new_uri, &is_file));
Expand Down Expand Up @@ -1020,7 +1020,7 @@ Status VFS::copy_file(const URI& old_uri, const URI& new_uri) {
}

Status VFS::copy_dir(const URI& old_uri, const URI& new_uri) {
auto op = start_operation(old_uri, new_uri);
auto instrument = make_log_duration_instrument(old_uri, new_uri);
// File
if (old_uri.is_file()) {
if (new_uri.is_file()) {
Expand Down Expand Up @@ -1156,7 +1156,7 @@ Status VFS::read_impl(
void* const buffer,
const uint64_t nbytes,
[[maybe_unused]] const bool use_read_ahead) {
auto op = start_operation(uri, "read");
auto instrument = make_log_duration_instrument(uri, "read");
stats_->add_counter("read_ops_num", 1);
log_read(uri, offset, nbytes);

Expand Down Expand Up @@ -1317,7 +1317,7 @@ bool VFS::supports_uri_scheme(const URI& uri) const {
}

Status VFS::sync(const URI& uri) {
auto op = start_operation(uri, "sync");
auto instrument = make_log_duration_instrument(uri, "sync");
if (uri.is_file()) {
#ifdef _WIN32
return win_.sync(uri.to_path());
Expand Down Expand Up @@ -1409,7 +1409,7 @@ Status VFS::open_file(const URI& uri, VFSMode mode) {
}

Status VFS::close_file(const URI& uri) {
auto op = start_operation(uri, "close_file");
auto instrument = make_log_duration_instrument(uri, "close_file");
if (uri.is_file()) {
#ifdef _WIN32
return win_.sync(uri.to_path());
Expand Down Expand Up @@ -1469,7 +1469,7 @@ Status VFS::write(
const void* buffer,
uint64_t buffer_size,
[[maybe_unused]] bool remote_global_order_write) {
auto op = start_operation(uri, "write");
auto instrument = make_log_duration_instrument(uri, "write");
stats_->add_counter("write_byte_num", buffer_size);
stats_->add_counter("write_ops_num", 1);

Expand Down Expand Up @@ -1615,7 +1615,8 @@ Status VFS::set_multipart_upload_state(
}

Status VFS::flush_multipart_file_buffer(const URI& uri) {
auto op = start_operation(uri, "flush_multipart_file_buffer");
auto instrument =
make_log_duration_instrument(uri, "flush_multipart_file_buffer");
if (uri.is_s3()) {
#ifdef HAVE_S3
Buffer* buff = nullptr;
Expand Down Expand Up @@ -1677,7 +1678,7 @@ void VFS::log_read(const URI& uri, uint64_t offset, uint64_t nbytes) {
LOG_INFO("VFS Read: " + read_to_log);
}

optional<LogDurationInstrument> VFS::start_operation(
optional<LogDurationInstrument> VFS::make_log_duration_instrument(
const URI& uri, const std::string_view operation_name) const {
if (!vfs_params_.log_operations_ || logger_ == nullptr) {
return nullopt;
Expand All @@ -1687,7 +1688,7 @@ optional<LogDurationInstrument> VFS::start_operation(
logger_, "{} on {}", operation_name, uri.to_string());
}

optional<LogDurationInstrument> VFS::start_operation(
optional<LogDurationInstrument> VFS::make_log_duration_instrument(
const URI& source,
const URI& destination,
const std::string_view operation_name) const {
Expand Down
10 changes: 4 additions & 6 deletions tiledb/sm/filesystem/vfs.h
Original file line number Diff line number Diff line change
Expand Up @@ -1051,25 +1051,23 @@ class VFS : private VFSBase, protected S3_within_VFS {
void log_read(const URI& uri, uint64_t offset, uint64_t nbytes);

/**
* Returns an object whose scope defines the start and end of a VFS operation,
* if enabled in config.
* Creates a LogDurationInstrument, if enabled in config.
*
* @param uri The URI of the object being operated on.
* @param operation_name The name of the operation. Defaults to the name of
* the calling function.
*/
optional<LogDurationInstrument> start_operation(
optional<LogDurationInstrument> make_log_duration_instrument(
const URI& uri, const std::string_view operation_name) const;

/**
* Returns an object whose scope defines the start and end of a VFS operation,
* if enabled in config.
* Creates a LogDurationInstrument, if enabled in config.
*
* @param uri The URI of the object being operated on.
* @param operation_name The name of the operation. Defaults to the name of
* the calling function.
*/
optional<LogDurationInstrument> start_operation(
optional<LogDurationInstrument> make_log_duration_instrument(
const URI& source,
const URI& destination,
const std::string_view operation_name) const;
Expand Down

0 comments on commit 80b0af2

Please sign in to comment.