From f685497d563186c4f71ef0e5c11a0552392d6b96 Mon Sep 17 00:00:00 2001 From: Dalton Bohning Date: Mon, 9 Oct 2023 15:41:47 -0700 Subject: [PATCH 1/8] DAOS-14472 cq: fix codespell errors (#13126) Fix new codespell errors that seem to have appeared after an update. Signed-off-by: Dalton Bohning --- .github/workflows/ci2.yml | 2 +- SConstruct | 4 ++-- docs/admin/troubleshooting.md | 2 +- site_scons/prereq_tools/base.py | 2 +- src/bio/bio_context.c | 4 ++-- src/cart/crt_hg_proc.c | 2 +- src/cart/crt_register.c | 2 +- src/cart/crt_self_test.h | 2 +- src/cart/crt_self_test_client.c | 4 ++-- src/client/api/README.md | 2 +- src/client/dfuse/dfuse.h | 2 +- src/client/dfuse/dfuse_core.c | 2 +- src/client/dfuse/il/int_posix.c | 4 ++-- src/client/dfuse/pil4dfs/int_dfs.c | 4 ++-- .../main/native/io_daos_dfs_DaosFsClient.c | 8 +++---- src/common/ad_mem.h | 16 +++++++------- src/common/dav/util.h | 2 +- src/control/cmd/daos/README.md | 4 ++-- src/engine/sched.c | 2 +- src/gurt/slab.c | 2 +- src/gurt/telemetry.c | 4 ++-- src/include/cart/iv.h | 6 ++--- src/include/daos/checksum.h | 8 +++---- src/include/daos/stack_mmap.h | 2 +- src/include/daos/tests_lib.h | 2 +- src/include/daos_array.h | 2 +- src/include/daos_srv/ad_mem.h | 2 +- src/include/daos_srv/bio.h | 2 +- src/object/obj_tx.c | 2 +- src/placement/ring_map.c | 2 +- src/rebuild/srv.c | 2 +- src/rsvc/srv.c | 2 +- src/tests/ftest/cart/util/cart_logparse.py | 2 +- src/tests/ftest/dfuse/daos_build.yaml | 2 +- src/tests/ftest/harness/skip_list.py | 22 +++++++++---------- src/tests/ftest/util/apricot/apricot/test.py | 12 +++++----- src/tests/suite/daos_obj.c | 8 +++---- src/utils/ctl/cart_ctl.c | 2 +- src/vos/vos_aggregate.c | 2 +- src/vos/vos_csum_recalc.c | 2 +- utils/cq/words.dict | 1 - utils/scripts/install-leap15.sh | 2 +- 42 files changed, 81 insertions(+), 82 deletions(-) diff --git a/.github/workflows/ci2.yml b/.github/workflows/ci2.yml index 87162f1da55..acce0423bd4 100644 --- a/.github/workflows/ci2.yml +++ b/.github/workflows/ci2.yml @@ -9,7 +9,7 @@ concurrency: jobs: - # Re-use the cache from the landing-builds workflow if available, if not then build the images + # reuse the cache from the landing-builds workflow if available, if not then build the images # from scratch, but do not save them. Build-and-test: name: Run DAOS/NLT tests diff --git a/SConstruct b/SConstruct index 8e59cc56c4f..c018e4b223a 100644 --- a/SConstruct +++ b/SConstruct @@ -317,8 +317,8 @@ def check_for_release_target(): # pylint: disable=too-many-locals try: remote.push(['refs/heads/{}'.format(branch)], callbacks=MyCallbacks()) - except pygit2.GitError as excpt: - print("Error pushing branch: {}".format(excpt)) + except pygit2.GitError as err: + print("Error pushing branch: {}".format(err)) Exit(1) print("Creating the PR...") diff --git a/docs/admin/troubleshooting.md b/docs/admin/troubleshooting.md index f7e0414be4e..df5532baa14 100644 --- a/docs/admin/troubleshooting.md +++ b/docs/admin/troubleshooting.md @@ -316,7 +316,7 @@ sudo ipcrm -M 0x10242049 1. Verify that the `access_points` host is accessible and the port is not used. 1. Check the `provider` entry. See the "Network Scan and Configuration" section of the admin guide for determining the right provider to use. 1. Check `fabric_iface` in `engines`. They should be available and enabled. -1. Check that `socket_dir` is writeable by the daos_server. +1. Check that `socket_dir` is writable by the daos_server. ### Errors creating a Pool 1. Check which engine rank you want to create a pool in with `dmg system query --verbose` and verify their State is Joined. diff --git a/site_scons/prereq_tools/base.py b/site_scons/prereq_tools/base.py index f016e4295b0..8f7081d4b64 100644 --- a/site_scons/prereq_tools/base.py +++ b/site_scons/prereq_tools/base.py @@ -679,7 +679,7 @@ def save_build_info(self): self.__build_info.save('.build_vars.json') def __parse_build_deps(self): - """Parse the build dependances command line flag""" + """Parse the build dependencies command line flag""" build_deps = GetOption('build_deps') if build_deps in ('yes', 'only'): self.download_deps = True diff --git a/src/bio/bio_context.c b/src/bio/bio_context.c index 5953b9d6c2e..332ad5c16e3 100644 --- a/src/bio/bio_context.c +++ b/src/bio/bio_context.c @@ -408,7 +408,7 @@ int bio_mc_destroy(struct bio_xs_context *xs_ctxt, uuid_t pool_id, enum bio_mc_f if (rc == -DER_NONEXIST) { return 0; } else if (rc) { - D_ERROR("Qeury data blob for pool "DF_UUID" tgt:%u failed. "DF_RC"\n", + D_ERROR("Query data blob for pool " DF_UUID " tgt:%u failed. " DF_RC "\n", DP_UUID(pool_id), xs_ctxt->bxc_tgt_id, DP_RC(rc)); return rc; } @@ -906,7 +906,7 @@ int bio_mc_open(struct bio_xs_context *xs_ctxt, uuid_t pool_id, D_ASSERT(data_blobid == SPDK_BLOBID_INVALID); return 0; } else if (rc) { - D_ERROR("Qeury data blob for pool "DF_UUID" tgt:%u failed. "DF_RC"\n", + D_ERROR("Query data blob for pool " DF_UUID " tgt:%u failed. " DF_RC "\n", DP_UUID(pool_id), xs_ctxt->bxc_tgt_id, DP_RC(rc)); return rc; } diff --git a/src/cart/crt_hg_proc.c b/src/cart/crt_hg_proc.c index e317f1ecbfb..382cc4b1507 100644 --- a/src/cart/crt_hg_proc.c +++ b/src/cart/crt_hg_proc.c @@ -56,7 +56,7 @@ crt_proc_get_op(crt_proc_t proc, crt_proc_op_t *proc_op) int rc = 0; if (unlikely(proc == NULL)) { - D_ERROR("Proc is not initilalized.\n"); + D_ERROR("Proc is not initialized.\n"); D_GOTO(out, rc = -DER_INVAL); } diff --git a/src/cart/crt_register.c b/src/cart/crt_register.c index 21dc3a722ce..a18350986a1 100644 --- a/src/cart/crt_register.c +++ b/src/cart/crt_register.c @@ -515,7 +515,7 @@ crt_proto_register_internal(struct crt_proto_format *cpf) /* validate base_opc is in range */ /* TODO: This doesn't make any sense, a XOR used as a truth value is - * just checking equlity so only one mask would be allowed here + * just checking equality so only one mask would be allowed here */ if (cpf->cpf_base ^ CRT_PROTO_BASEOPC_MASK) { D_ERROR("Invalid base_opc: %#x.\n", cpf->cpf_base); diff --git a/src/cart/crt_self_test.h b/src/cart/crt_self_test.h index 6ce2e8ef830..4dbab5b86d5 100644 --- a/src/cart/crt_self_test.h +++ b/src/cart/crt_self_test.h @@ -88,7 +88,7 @@ * - In the future, the amount of information passed to self-test can grow * without changing the size of the test RPCs (which instead only require a * session id to convey all that same information) - * - Provide long-lived bulk handles to re-use across multiple test messages, + * - Provide long-lived bulk handles to reuse across multiple test messages, * reducing their overhead * * Opening a session before starting a test is required for all messages except diff --git a/src/cart/crt_self_test_client.c b/src/cart/crt_self_test_client.c index 522684d4aad..f69308407de 100644 --- a/src/cart/crt_self_test_client.c +++ b/src/cart/crt_self_test_client.c @@ -360,7 +360,7 @@ static void send_next_rpc(struct st_cb_args *cb_args, int skip_inc_complete) local_endpt.ep_rank = endpt_ptr->rank; local_endpt.ep_tag = endpt_ptr->tag; - /* Re-use payload data memory, set arguments */ + /* reuse payload data memory, set arguments */ cb_args->rep_idx = local_rep; /* @@ -519,7 +519,7 @@ static void send_next_rpc(struct st_cb_args *cb_args, int skip_inc_complete) * max_inflight_rpcs, passing into the callback data pointer for each one its * own private pointer to the slot it can use in the arguments array. Each * time the callback is called (and needs to generate another RPC), it can - * re-use the previous slot allocated to it as callback data for the RPC it is + * reuse the previous slot allocated to it as callback data for the RPC it is * just now creating. */ static void diff --git a/src/client/api/README.md b/src/client/api/README.md index ca65d81a400..336ced9a7fb 100644 --- a/src/client/api/README.md +++ b/src/client/api/README.md @@ -47,7 +47,7 @@ limit the number of event queue being created in their applications or IO middle top of DAOS. Alternatively, an event can be created without an event queue, and be tracked individually. In this case, and for blocking operations, an internal global task scheduler and network context is used instead for the independent ones that would be created for an event -queue. Once an event is completed, it can re-used for another DAOS API call to minimize the need for +queue. Once an event is completed, it can be reused for another DAOS API call to minimize the need for event creation and allocations inside the DAOS library. ## Task Engine Integration diff --git a/src/client/dfuse/dfuse.h b/src/client/dfuse/dfuse.h index 369ab21fa09..c7083fc581b 100644 --- a/src/client/dfuse/dfuse.h +++ b/src/client/dfuse/dfuse.h @@ -172,7 +172,7 @@ struct dfuse_obj_hdl { /** True if caching is enabled for this file. */ bool doh_caching; - /* True if the file handle is writeable - used for cache invalidation */ + /* True if the file handle is writable - used for cache invalidation */ bool doh_writeable; /* Track possible kernel cache of readdir on this directory */ diff --git a/src/client/dfuse/dfuse_core.c b/src/client/dfuse/dfuse_core.c index cc346df619d..c27a05794a5 100644 --- a/src/client/dfuse/dfuse_core.c +++ b/src/client/dfuse/dfuse_core.c @@ -755,7 +755,7 @@ dfuse_cont_open_by_label(struct dfuse_info *dfuse_info, struct dfuse_pool *dfp, /* * Return a container connection by uuid. * - * Re-use an existing connection if possible, otherwise open new connection + * Reuse an existing connection if possible, otherwise open new connection * and setup dfs. * * In the case of a container which has been created by mkdir _dfs will be a diff --git a/src/client/dfuse/il/int_posix.c b/src/client/dfuse/il/int_posix.c index 93a91cd6215..2afe06ee47a 100644 --- a/src/client/dfuse/il/int_posix.c +++ b/src/client/dfuse/il/int_posix.c @@ -163,9 +163,9 @@ entry_array_close(void *arg) { entry->fd_cont->ioc_open_count -= 1; /* Do not close container/pool handles at this point - * to allow for re-use. + * to allow for reuse. * ioil_shrink_cont(entry->fd_cont, true, true); - */ + */ } static int diff --git a/src/client/dfuse/pil4dfs/int_dfs.c b/src/client/dfuse/pil4dfs/int_dfs.c index 4f853e409af..9093c5e5c52 100644 --- a/src/client/dfuse/pil4dfs/int_dfs.c +++ b/src/client/dfuse/pil4dfs/int_dfs.c @@ -338,7 +338,7 @@ lookup_insert_dir(struct dfs_mt *mt, const char *name, size_t len, dfs_obj_t **o } /* Allocate struct and string in a single buffer. This includes a extra byte so name will - * be \0 terminiated however that is not required. + * be \0 terminated however that is not required. */ D_ALLOC(hdl, sizeof(*hdl) + len + 1); if (hdl == NULL) @@ -587,7 +587,7 @@ discover_daos_mount_with_env(void) D_GOTO(out, rc = 0); if (num_dfs >= MAX_DAOS_MT) { - D_FATAL("dfs_list[] is full already. Need to incease MAX_DAOS_MT.\n"); + D_FATAL("dfs_list[] is full already. Need to increase MAX_DAOS_MT.\n"); D_GOTO(out, rc = EBUSY); } diff --git a/src/client/java/daos-java/src/main/native/io_daos_dfs_DaosFsClient.c b/src/client/java/daos-java/src/main/native/io_daos_dfs_DaosFsClient.c index 806a30068d4..68fae682140 100644 --- a/src/client/java/daos-java/src/main/native/io_daos_dfs_DaosFsClient.c +++ b/src/client/java/daos-java/src/main/native/io_daos_dfs_DaosFsClient.c @@ -1005,7 +1005,7 @@ Java_io_daos_dfs_DaosFsClient_dfsWriteAsync(JNIEnv *env, jobject client, * * \param[in] env JNI environment * \param[in] client DaosFsClient object - * \param[in] dfsPtr ointer to dfs object + * \param[in] dfsPtr pointer to dfs object * \param[in] objId pointer to opened fs object * \param[in] maxEntries maximum entries to be read. not implemented yet * @@ -1142,7 +1142,7 @@ set_user_group_name(JNIEnv *env, char *buffer, struct stat *stat) * * \param[in] env JNI environment * \param[in] client DaosFsClient object - * \param[in] dfsPtr ointer to dfs object + * \param[in] dfsPtr pointer to dfs object * \param[in] objId pointer to opened fs object * \param[in] bufferAddress pointer to opened fs object */ @@ -1196,7 +1196,7 @@ Java_io_daos_dfs_DaosFsClient_dfsOpenedObjStat(JNIEnv *env, * * \param[in] env JNI environment * \param[in] client DaosFsClient object - * \param[in] dfsPtr ointer to dfs object + * \param[in] dfsPtr pointer to dfs object * \param[in] objId pointer to opened fs object * \param[in] name attribute name * \param[in] value attribute value @@ -1238,7 +1238,7 @@ Java_io_daos_dfs_DaosFsClient_dfsSetExtAttr(JNIEnv *env, * * \param[in] env JNI environment * \param[in] client DaosFsClient object - * \param[in] dfsPtr ointer to dfs object + * \param[in] dfsPtr pointer to dfs object * \param[in] objId pointer to opened fs object * \param[in] name attribute name * \param[in] expectedValenLen expected value length diff --git a/src/common/ad_mem.h b/src/common/ad_mem.h index 89f42ecc128..fe81a9fd33d 100644 --- a/src/common/ad_mem.h +++ b/src/common/ad_mem.h @@ -304,14 +304,14 @@ struct ad_maxheap_node { /** unusable padding bytes in groups */ int mh_frag_size; uint32_t mh_arena_id; - unsigned int mh_in_tree:1, - /** - * Arena is inactive, it's set to true when any type of group failed to allocate memory - * and create more groups. - * XXX: this is not enough, we should save failed allocatoin counter in matrics and - * set arenea as full only if it encounters multiple failures. - */ - mh_inactive:1; + unsigned int mh_in_tree : 1, + /** + * Arena is inactive, it's set to true when any type of group failed to allocate memory + * and create more groups. + * XXX: this is not enough, we should save failed allocatoin counter in metrics and + * set arenea as full only if it encounters multiple failures. + */ + mh_inactive : 1; }; /** DRAM blob open handle */ diff --git a/src/common/dav/util.h b/src/common/dav/util.h index 1263fa34faa..f1e12321918 100644 --- a/src/common/dav/util.h +++ b/src/common/dav/util.h @@ -169,7 +169,7 @@ util_div_ceil(unsigned a, unsigned b) #define GLUE(A, B) GLUE_I(A, B) #define GLUE_I(A, B) A##B -/* macro for suppresing errors from unused variables (zero to 9) */ +/* macro for suppressing errors from unused variables (zero to 9) */ #define SUPPRESS_UNUSED(...)\ GLUE(SUPPRESS_ARG_, COUNT(__VA_ARGS__))(__VA_ARGS__) #define SUPPRESS_ARG_0(X) diff --git a/src/control/cmd/daos/README.md b/src/control/cmd/daos/README.md index a5bc2dec477..a29d6e25ac7 100644 --- a/src/control/cmd/daos/README.md +++ b/src/control/cmd/daos/README.md @@ -31,7 +31,7 @@ Adding new features should hopefully be fairly straightforward. The new frontend As an example, we can look at adding a new container subcommand such that running `daos cont scrub` will invoke a libdaos API for scrubbing a container, whatever that means. -The first question to answer is whether or not we need a C-based handler to wrap the API call, or if we can call the API directly from Go. This is somewhat of a philosophical question and it really depends on how comfortable the implementor is in working with Go. For the purposes of this example, let's assume that the implementor is going to add a new handler named `cont_scrub_hdlr()` to daos_hdlr.c: +The first question to answer is whether or not we need a C-based handler to wrap the API call, or if we can call the API directly from Go. This is somewhat of a philosophical question and it really depends on how comfortable the implementer is in working with Go. For the purposes of this example, let's assume that the implementer is going to add a new handler named `cont_scrub_hdlr()` to daos_hdlr.c: In src/utils/daos_hdlr.h: ```C @@ -126,7 +126,7 @@ func (cmd *containerScrubCmd) Execute(_ []string) error { // cmd_args_s. ap.scrub_level = C.CString(cmd.Level) // The freeArgs() closure only frees fields used with all - // handlers. It's up to the handler implementor to free + // handlers. It's up to the handler implementer to free // C memory allocated in the handler. defer freeString(ap.scrub_level) diff --git a/src/engine/sched.c b/src/engine/sched.c index 01a6bd71e7e..d62fffdcfea 100644 --- a/src/engine/sched.c +++ b/src/engine/sched.c @@ -724,7 +724,7 @@ check_space_pressure(struct dss_xstream *dx, struct sched_pool_info *spi) if (info->si_stop) goto out; - /* Use cached space presure info */ + /* Use cached space pressure info */ if ((spi->spi_space_ts + SCHED_SPACE_AGE_MAX) > info->si_cur_ts) goto out; diff --git a/src/gurt/slab.c b/src/gurt/slab.c index 15077354459..337daa31eb7 100644 --- a/src/gurt/slab.c +++ b/src/gurt/slab.c @@ -367,7 +367,7 @@ d_slab_release(struct d_slab_type *type, void *ptr) /* Re-stock an object type. * * This is a function called off the critical path to pre-alloc and recycle - * objects to be ready for re-use. In an ideal world this function does + * objects to be ready for reuse. In an ideal world this function does * all the heavy lifting and acquire/release are very cheap. * * Ideally this function should be called once for every acquire(), after the diff --git a/src/gurt/telemetry.c b/src/gurt/telemetry.c index a5e34fab2d4..3294c066226 100644 --- a/src/gurt/telemetry.c +++ b/src/gurt/telemetry.c @@ -621,7 +621,7 @@ add_child(struct d_tm_node_t **newnode, struct d_tm_node_t *parent, /* * Search for either: - * 1) a previously-cleared link node that can be re-used, or + * 1) a previously-cleared link node that can be reused, or * 2) the right place to attach a newly allocated node. */ child = parent->dtn_child; @@ -631,7 +631,7 @@ add_child(struct d_tm_node_t **newnode, struct d_tm_node_t *parent, } if (is_cleared_link(tm_shmem.ctx, child)) { - /* we can re-use this node instead of allocating a new one */ + /* we can reuse this node instead of allocating a new one */ rc = init_node(shmem, child, name); if (rc != 0) { D_ERROR("failed to reinit cleared link node, " DF_RC diff --git a/src/include/cart/iv.h b/src/include/cart/iv.h index d410ba554d3..815382919bc 100644 --- a/src/include/cart/iv.h +++ b/src/include/cart/iv.h @@ -142,7 +142,7 @@ typedef int (*crt_iv_on_update_cb_t)(crt_iv_namespace_t ivns, * \param[in] ivns the local handle of the IV namespace * \param[in] iv_key key of the IV * \param[in] cb a callback which must be called or scheduled by - * the user in order to compelete the handling of + * the user in order to complete the handling of * the crt_iv_fetch() request. * \param[in] cb_arg arguments for \a cb. * @@ -165,7 +165,7 @@ typedef void (*crt_iv_pre_fetch_cb_t)(crt_iv_namespace_t ivns, * \param[in] ivns the local handle of the IV namespace * \param[in] iv_key key of the IV * \param[in] cb a callback which must be called or scheduled by - * the user in order to compelete the handling of + * the user in order to complete the handling of * the crt_iv_update() request. * \param[in] cb_arg arguments for \a cb. */ @@ -184,7 +184,7 @@ typedef void (*crt_iv_pre_update_cb_t)(crt_iv_namespace_t ivns, * \param[in] ivns the local handle of the IV namespace * \param[in] iv_key key of the IV * \param[in] cb a callback which must be called or scheduled by - * the user in order to compelete the handling of + * the user in order to complete the handling of * the crt_iv_sync() request. * \param[in] cb_arg arguments for \a cb. */ diff --git a/src/include/daos/checksum.h b/src/include/daos/checksum.h index 1c9ebf1b7ff..fe4771f9fbf 100644 --- a/src/include/daos/checksum.h +++ b/src/include/daos/checksum.h @@ -1,5 +1,5 @@ /** - * (C) Copyright 2019-2022 Intel Corporation. + * (C) Copyright 2019-2023 Intel Corporation. * * SPDX-License-Identifier: BSD-2-Clause-Patent */ @@ -510,9 +510,9 @@ ci2csum(struct dcs_csum_info ci); */ #define ci_csums_len(obj) ((obj).cs_nr * (obj).cs_len) -/** Serialze a \dcs_csum_info structure to an I/O vector. First the structure -* fields are added to the memory buf, then the actual csum. -*/ +/** Serialize a \dcs_csum_info structure to an I/O vector. First the structure + * fields are added to the memory buf, then the actual csum. + */ int ci_serialize(struct dcs_csum_info *obj, d_iov_t *iov); void diff --git a/src/include/daos/stack_mmap.h b/src/include/daos/stack_mmap.h index 6909251f424..96964e50915 100644 --- a/src/include/daos/stack_mmap.h +++ b/src/include/daos/stack_mmap.h @@ -19,7 +19,7 @@ * that will be reserved for no other mapping and prevented to be accessed. * The stacks are managed as a pool, using the mmap_stack_desc_t struct * being located at the bottom (upper addresses) of each stack and being - * linked as a list upon ULT exit for future re-use by a new ULT, based on + * linked as a list upon ULT exit for future reuse by a new ULT, based on * the requested stack size. * The free stacks list is drained upon a certain number of free stacks or * upon a certain percentage of free stacks. diff --git a/src/include/daos/tests_lib.h b/src/include/daos/tests_lib.h index da707ac3e52..49ecf221e7b 100644 --- a/src/include/daos/tests_lib.h +++ b/src/include/daos/tests_lib.h @@ -118,7 +118,7 @@ dts_sgl_init_with_strings(d_sg_list_t *sgl, uint32_t count, char *d, ...); * * @param sgl Scatter gather list to initialize * @param count Number of IO Vectors that will be created in the SGL - * @param repeat Number of tiems to repeat the string + * @param repeat Number of times to repeat the string * @param d First string that will be used * @param ... Rest of strings, up to count */ diff --git a/src/include/daos_array.h b/src/include/daos_array.h index 1d8c622a7d9..ecd491e8c05 100644 --- a/src/include/daos_array.h +++ b/src/include/daos_array.h @@ -341,7 +341,7 @@ daos_array_stat(daos_handle_t oh, daos_handle_t th, daos_array_stbuf_t *stbuf, d /** * Set the array size (truncate) in records. If array is shrinking, we punch - * dkeys/records above the required size. If the array is epxanding, we insert 1 + * dkeys/records above the required size. If the array is expanding, we insert 1 * record at the corresponding size. This is NOT equivalent to an allocate. * * diff --git a/src/include/daos_srv/ad_mem.h b/src/include/daos_srv/ad_mem.h index e191c094f8c..114e0716c95 100644 --- a/src/include/daos_srv/ad_mem.h +++ b/src/include/daos_srv/ad_mem.h @@ -5,7 +5,7 @@ #define AD_ARENA_DEFAULT 0 -/* the memory region (blob) manged by ad-hoc allocator */ +/* the memory region (blob) managed by ad-hoc allocator */ struct ad_blob; struct ad_arena; struct ad_group; diff --git a/src/include/daos_srv/bio.h b/src/include/daos_srv/bio.h index 83f91f30d50..37973f1fa7b 100644 --- a/src/include/daos_srv/bio.h +++ b/src/include/daos_srv/bio.h @@ -1064,7 +1064,7 @@ struct bio_wal_info { }; /* - * Qeury WAL total blocks & used blocks. + * Query WAL total blocks & used blocks. */ void bio_wal_query(struct bio_meta_context *mc, struct bio_wal_info *info); diff --git a/src/object/obj_tx.c b/src/object/obj_tx.c index 340cecf22e4..54e7ff68b73 100644 --- a/src/object/obj_tx.c +++ b/src/object/obj_tx.c @@ -1656,7 +1656,7 @@ dc_tx_cpd_adjust_size(size_t size) return size * 11 / 10; } -/* The calculted CPD RPC sub-requests size may be some larger than the real case, no matter. */ +/* The calculated CPD RPC sub-requests size may be some larger than the real case, no matter. */ static size_t dc_tx_cpd_sub_reqs_size(struct daos_cpd_sub_req *dcsr, int count) { diff --git a/src/placement/ring_map.c b/src/placement/ring_map.c index 0425cb4a127..48b1247b357 100644 --- a/src/placement/ring_map.c +++ b/src/placement/ring_map.c @@ -186,7 +186,7 @@ static daos_sort_ops_t ring_target_shuff_sops = { .so_swap = ring_target_swap, }; -/** compare versoins of two domains */ +/** compare versions of two domains */ static int ring_domain_ver_cmp(void *array, int a, int b) { diff --git a/src/rebuild/srv.c b/src/rebuild/srv.c index 5b1c2089266..b0cf408e37e 100644 --- a/src/rebuild/srv.c +++ b/src/rebuild/srv.c @@ -2057,7 +2057,7 @@ ds_rebuild_regenerate_task(struct ds_pool *pool, daos_prop_t *prop) * 1. extend job needs to add new targets to the pool map. * 2. reintegrate job needs to discard the existing objects/records on the * reintegrating targets. - * But since the pool map already includs these extending targets, and also + * But since the pool map already includes these extending targets, and also * discarding on an empty targets is harmless. So it is ok to use REINT to * do EXTEND here. */ diff --git a/src/rsvc/srv.c b/src/rsvc/srv.c index 0182f37fc4c..b1f66917da7 100644 --- a/src/rsvc/srv.c +++ b/src/rsvc/srv.c @@ -1311,7 +1311,7 @@ ds_rsvc_start_aggregator(crt_rpc_t *source, crt_rpc_t *result, void *priv) * * XXX excluded and ranks are a bit duplicate here, since this function only * suppose to send RPC to @ranks list, but cart does not have such interface - * for collective RPC, so we have to use both ranks and exclued for the moment, + * for collective RPC, so we have to use both ranks and excluded for the moment, * and it should be simplified once cart can provide rank list collective RPC. * * \param[in] class replicated service class diff --git a/src/tests/ftest/cart/util/cart_logparse.py b/src/tests/ftest/cart/util/cart_logparse.py index 50da3aef8ee..a64ff6a74e4 100644 --- a/src/tests/ftest/cart/util/cart_logparse.py +++ b/src/tests/ftest/cart/util/cart_logparse.py @@ -386,7 +386,7 @@ def __init__(self, log_iter): def __iter__(self): - # Dict, indexed by pointer, containing re-use index for that pointer. + # Dict, indexed by pointer, containing reuse index for that pointer. self.reuse_table = {} # Conversion from active pointer to line where it was created. self.active_desc = {} diff --git a/src/tests/ftest/dfuse/daos_build.yaml b/src/tests/ftest/dfuse/daos_build.yaml index acc3220daf8..07582af954e 100644 --- a/src/tests/ftest/dfuse/daos_build.yaml +++ b/src/tests/ftest/dfuse/daos_build.yaml @@ -2,7 +2,7 @@ hosts: test_servers: 1 test_clients: 1 # Hard limit the test at ten hours, however individual tests have lower timeouts, if the timeout -# in the test is hit then the test itself will perform some diagnostics so it's prefereable +# in the test is hit then the test itself will perform some diagnostics so it's preferable # to fail there rather than here. timeout: 36000 server_config: diff --git a/src/tests/ftest/harness/skip_list.py b/src/tests/ftest/harness/skip_list.py index 6a9e7792b1a..46c997efcd8 100644 --- a/src/tests/ftest/harness/skip_list.py +++ b/src/tests/ftest/harness/skip_list.py @@ -32,19 +32,19 @@ def setUp(self): # create a temporary commit_fixes file try: os.rename(self.commit_fixes_file, self.commit_fixes_file + '.orig') - except OSError as excpt: - if excpt.errno == errno.ENOENT: + except OSError as err: + if err.errno == errno.ENOENT: pass else: self.fail("Could not rename {0}" "{{,.orig}}: {1}".format(self.commit_fixes_file, - excpt)) + err)) try: with open(self.commit_fixes_file, 'w') as cf_handle: cf_handle.write("DAOS-9999 test: Fixing DAOS-9999") - except Exception as excpt: # pylint: disable=broad-except + except Exception as err: # pylint: disable=broad-except self.fail("Could not create {0}: " - "{1}".format(self.commit_fixes_file, excpt)) + "{1}".format(self.commit_fixes_file, err)) super().setUp() @@ -52,17 +52,17 @@ def tearDown(self): """Put back the original commit_fixes file.""" try: os.unlink(self.commit_fixes_file) - except Exception as excpt: # pylint: disable=broad-except + except Exception as err: # pylint: disable=broad-except self.fail("Could not remove {0}: " - "{1}".format(self.commit_fixes_file, excpt)) + "{1}".format(self.commit_fixes_file, err)) try: os.rename(self.commit_fixes_file + '.orig', self.commit_fixes_file) - except OSError as excpt: - if excpt.errno == errno.ENOENT: + except OSError as err: + if err.errno == errno.ENOENT: pass - except Exception as excpt: # pylint: disable=broad-except + except Exception as err: # pylint: disable=broad-except self.fail("Could not rename {0}{{.orig,}}: " - "{1}".format(self.commit_fixes_file, excpt)) + "{1}".format(self.commit_fixes_file, err)) super().tearDown() diff --git a/src/tests/ftest/util/apricot/apricot/test.py b/src/tests/ftest/util/apricot/apricot/test.py index f998f9176e2..36bfb769804 100644 --- a/src/tests/ftest/util/apricot/apricot/test.py +++ b/src/tests/ftest/util/apricot/apricot/test.py @@ -195,8 +195,8 @@ def cancel_for_ticket(ticket, skip_list): try: with open(self.cancel_file) as skip_handle: skip_list = skip_handle.readlines() - except Exception as excpt: # pylint: disable=broad-except - skip_process_error("Unable to read skip list: {}".format(excpt)) + except Exception as err: # pylint: disable=broad-except + skip_process_error("Unable to read skip list: {}".format(err)) skip_list = [] for item in skip_list: @@ -218,9 +218,9 @@ def cancel_for_ticket(ticket, skip_list): return except exceptions.TestCancel: # pylint: disable=try-except-raise raise - except Exception as excpt: # pylint: disable=broad-except + except Exception as err: # pylint: disable=broad-except skip_process_error("Unable to read commit title: " - "{}".format(excpt)) + "{}".format(err)) # Nope, but there is a commit that fixes it # Maybe in this code base, maybe not... if len(vals) > 1: @@ -228,9 +228,9 @@ def cancel_for_ticket(ticket, skip_list): with open(os.path.join(os.sep, 'tmp', 'commit_list')) as commit_handle: commits = commit_handle.readlines() - except Exception as excpt: # pylint: disable=broad-except + except Exception as err: # pylint: disable=broad-except skip_process_error("Unable to read commit list: " - "{}".format(excpt)) + "{}".format(err)) return if commits and vals[1] in commits: # fix is in this code base diff --git a/src/tests/suite/daos_obj.c b/src/tests/suite/daos_obj.c index b82064cd9b4..5415d3fa9fa 100644 --- a/src/tests/suite/daos_obj.c +++ b/src/tests/suite/daos_obj.c @@ -605,7 +605,7 @@ int pool_storage_info(test_arg_t *arg, daos_pool_info_t *pinfo) } /** - * Enabled/Disabled Aggrgation strategy for Pool. + * Enabled/Disabled Aggregation strategy for Pool. */ static int set_pool_reclaim_strategy(test_arg_t *arg, char *strategy) @@ -703,7 +703,7 @@ io_overwrite_large(void **state, daos_obj_id_t oid) SMALL_POOL_SIZE, 0, NULL); assert_success(rc); - /* Disabled Pool Aggrgation */ + /* Disable Pool Aggregation */ rc = set_pool_reclaim_strategy(arg, aggr_disabled); assert_rc_equal(rc, 0); /** @@ -802,7 +802,7 @@ io_overwrite_large(void **state, daos_obj_id_t oid) nvme_initial_size = pinfo.pi_space.ps_space.s_free[1]; } - /* Enabled Pool Aggrgation */ + /* Enable Pool Aggregation */ rc = set_pool_reclaim_strategy(arg, aggr_set_time); assert_rc_equal(rc, 0); @@ -4748,7 +4748,7 @@ enum_recxs_with_aggregation_internal(void **state, bool incr) total_size += recxs[i].rx_nr; if (!enable_agg) { - /* Enabled Pool Aggrgation */ + /* Enable Pool Aggregation */ print_message("enable aggregation\n"); rc = set_pool_reclaim_strategy(arg, aggr_set_time); assert_rc_equal(rc, 0); diff --git a/src/utils/ctl/cart_ctl.c b/src/utils/ctl/cart_ctl.c index 9af2fc058c3..66bb4f80727 100644 --- a/src/utils/ctl/cart_ctl.c +++ b/src/utils/ctl/cart_ctl.c @@ -272,7 +272,7 @@ print_usage_msg(const char *msg) printf("\tReturn pids of the specified ranks\n"); printf("\nset_fi_attr\n"); printf("\tset fault injection attributes for a fault ID. This command\n" - "\tmust be acompanied by the option\n" + "\tmust be accompanied by the option\n" "\t--attr fault_id,max_faults,probability,err_code" "[,argument]\n"); printf("\noptions:\n"); diff --git a/src/vos/vos_aggregate.c b/src/vos/vos_aggregate.c index ea344eafa73..909893eade6 100644 --- a/src/vos/vos_aggregate.c +++ b/src/vos/vos_aggregate.c @@ -587,7 +587,7 @@ csum_prepare_ent(struct evt_entry_in *ent_in, unsigned int cs_type, * the verification checksum for the component (input) segments. * The full buffer is extended to hold checksums for entire merge window. * Currently, allocations for prior windows are retained until aggregation - * for an evtree is complete (in vos_agg_akey, and at end of agggregation). + * for an evtree is complete (in vos_agg_akey, and at end of aggregation). */ static int csum_prepare_buf(struct agg_lgc_seg *segs, unsigned int seg_cnt, diff --git a/src/vos/vos_csum_recalc.c b/src/vos/vos_csum_recalc.c index fcac19dcfd8..ce0efc52a36 100644 --- a/src/vos/vos_csum_recalc.c +++ b/src/vos/vos_csum_recalc.c @@ -38,7 +38,7 @@ * */ -/* Determine checksum parameters for verification of an input segemnt. */ +/* Determine checksum parameters for verification of an input segment. */ static unsigned int calc_csum_params(struct dcs_csum_info *csum_info, struct csum_recalc *recalc, unsigned int prefix_len, unsigned int suffix_len, diff --git a/utils/cq/words.dict b/utils/cq/words.dict index 3d6eaffba4e..4fd4e0fb94d 100644 --- a/utils/cq/words.dict +++ b/utils/cq/words.dict @@ -135,7 +135,6 @@ debian debuginfo defusedxml del -dependances deps dereference dereferencing diff --git a/utils/scripts/install-leap15.sh b/utils/scripts/install-leap15.sh index f4c60e8c5e7..caa1ad992fb 100755 --- a/utils/scripts/install-leap15.sh +++ b/utils/scripts/install-leap15.sh @@ -6,7 +6,7 @@ # Switch to dnf as it seems a bit faster. # libatomic should be in this list, but can not for now due to CI # post provisioning issue. -# *** Keep these in as much alphbetical order as possible *** +# *** Keep these in as much alphabetical order as possible *** set -e From c2474aed00a573670b23a4348e8a65a39a8b3935 Mon Sep 17 00:00:00 2001 From: Alexander Oganezov Date: Mon, 9 Oct 2023 22:34:38 -0700 Subject: [PATCH 2/8] DAOS-14431 cart: Remove sockets support (#13090) - Remove support from sockets and gni providers - Change crt_na_dict[] table to no longer require string ordering of providers Required-githooks: true Signed-off-by: Alexander A Oganezov --- src/cart/crt_hg.c | 46 +++++++++++++++++++++++++++------------------ src/cart/crt_init.c | 2 +- 2 files changed, 29 insertions(+), 19 deletions(-) diff --git a/src/cart/crt_hg.c b/src/cart/crt_hg.c index e6c5fe70fb1..464afbb9554 100644 --- a/src/cart/crt_hg.c +++ b/src/cart/crt_hg.c @@ -11,8 +11,8 @@ #include "crt_internal.h" /* - * na_dict table should be in the same order of enum crt_provider_t, the last one - * is terminator with NULL nad_str. + * List of supported CaRT providers. The table is terminated with the last entry + * having nad_str = NULL. */ struct crt_na_dict crt_na_dict[] = { { @@ -20,23 +20,12 @@ struct crt_na_dict crt_na_dict[] = { .nad_str = "sm", .nad_contig_eps = false, .nad_port_bind = false, - }, { - .nad_type = CRT_PROV_OFI_SOCKETS, - .nad_str = "ofi+sockets", - .nad_alt_str = "ofi+socket", - .nad_contig_eps = true, - .nad_port_bind = true, }, { .nad_type = CRT_PROV_OFI_VERBS_RXM, .nad_str = "ofi+verbs;ofi_rxm", .nad_alt_str = "ofi+verbs", .nad_contig_eps = true, .nad_port_bind = true, - }, { - .nad_type = CRT_PROV_OFI_GNI, - .nad_str = "ofi+gni", - .nad_contig_eps = true, - .nad_port_bind = false, }, { .nad_type = CRT_PROV_OFI_TCP, .nad_str = "ofi+tcp", @@ -156,13 +145,14 @@ crt_hg_parse_uri(const char *uri, crt_provider_t *prov, char *addr) crt_provider_t crt_prov_str_to_prov(const char *prov_str) { - int i; + int i = 0; - for (i = 0; i < CRT_PROV_COUNT; i++) { + while (crt_na_dict[i].nad_str) { if (strcmp(prov_str, crt_na_dict[i].nad_str) == 0 || (crt_na_dict[i].nad_alt_str && strcmp(prov_str, crt_na_dict[i].nad_alt_str) == 0)) return crt_na_dict[i].nad_type; + i++; } return CRT_PROV_UNKNOWN; @@ -511,10 +501,26 @@ crt_provider_domain_get(bool primary, int provider) return prov_data->cpg_na_config.noc_domain; } +static struct crt_na_dict * +crt_get_na_dict_entry(int provider) +{ + struct crt_na_dict *entry = &crt_na_dict[0]; + + while (entry && entry->nad_str) { + if (entry->nad_type == provider) + return entry; + entry++; + } + + return NULL; +} + char * crt_provider_name_get(int provider) { - return crt_na_dict[provider].nad_str; + struct crt_na_dict *entry = crt_get_na_dict_entry(provider); + + return entry ? entry->nad_str : NULL; } static char* @@ -541,13 +547,17 @@ crt_provider_is_block_mode(int provider) bool crt_provider_is_contig_ep(int provider) { - return crt_na_dict[provider].nad_contig_eps; + struct crt_na_dict *entry = crt_get_na_dict_entry(provider); + + return entry ? entry->nad_contig_eps : false; } bool crt_provider_is_port_based(int provider) { - return crt_na_dict[provider].nad_port_bind; + struct crt_na_dict *entry = crt_get_na_dict_entry(provider); + + return entry ? entry->nad_port_bind : false; } bool diff --git a/src/cart/crt_init.c b/src/cart/crt_init.c index 7a4f83c57c8..179e2902247 100644 --- a/src/cart/crt_init.c +++ b/src/cart/crt_init.c @@ -42,7 +42,7 @@ crt_lib_init(void) crt_gdata.cg_refcount = 0; crt_gdata.cg_inited = 0; - crt_gdata.cg_primary_prov = CRT_PROV_OFI_SOCKETS; + crt_gdata.cg_primary_prov = CRT_PROV_OFI_TCP_RXM; d_srand(d_timeus_secdiff(0) + getpid()); start_rpcid = ((uint64_t)d_rand()) << 32; From e9aac2d3d0b40beaa918325dc0a8766a716bede7 Mon Sep 17 00:00:00 2001 From: Wang Shilong Date: Tue, 10 Oct 2023 14:25:31 +0800 Subject: [PATCH 3/8] DAOS-14421 vea: vea upgrade fixes (#13092) 1. don't upgrade vea if it is SCM only 2. recreate bitmap tree with different feats. 4. load hint for bitmap Required-githooks: true Signed-off-by: Wang Shilong --- src/vea/vea_api.c | 59 ++++++++++++++++++++++++++++++++++------------ src/vos/vos_pool.c | 5 ++-- 2 files changed, 47 insertions(+), 17 deletions(-) diff --git a/src/vea/vea_api.c b/src/vea/vea_api.c index a5530a8e5f2..9461add243c 100644 --- a/src/vea/vea_api.c +++ b/src/vea/vea_api.c @@ -44,8 +44,10 @@ vea_upgrade(struct vea_space_info *vsi, struct umem_instance *umem, uint64_t offset; d_iov_t key, val; struct vea_hint_df dummy; + struct umem_attr uma = {0}; + struct vea_hint_df *df; - if (version < 3) + if (md->vsd_compat & VEA_COMPAT_FEATURE_BITMAP) return 0; /* Start transaction to initialize allocation metadata */ @@ -53,6 +55,29 @@ vea_upgrade(struct vea_space_info *vsi, struct umem_instance *umem, if (rc != 0) return rc; + /* + * bitmap tree reused vec tree which was created with + * BTR_FEAT_DIRECT_KEY, recreate tree with BTR_FEAT_UINT_KEY + */ + if (daos_handle_is_valid(vsi->vsi_md_bitmap_btr)) { + dbtree_destroy(vsi->vsi_md_bitmap_btr, NULL); + vsi->vsi_md_bitmap_btr = DAOS_HDL_INVAL; + } + + /* Create bitmap tree */ + uma.uma_id = umem->umm_id; + uma.uma_pool = umem->umm_pool; + rc = dbtree_create_inplace(DBTREE_CLASS_IFV, BTR_FEAT_UINT_KEY, VEA_TREE_ODR, &uma, + &md->vsd_bitmap_tree, &vsi->vsi_md_bitmap_btr); + if (rc != 0) + goto out; + + /* Open bitmap tree */ + rc = dbtree_open_inplace(&md->vsd_bitmap_tree, &uma, + &vsi->vsi_md_bitmap_btr); + if (rc != 0) + goto out; + offset = VEA_BITMAP_CHUNK_HINT_KEY; d_iov_set(&key, &offset, sizeof(offset)); d_iov_set(&val, &dummy, sizeof(dummy)); @@ -68,9 +93,23 @@ vea_upgrade(struct vea_space_info *vsi, struct umem_instance *umem, if (rc != 0) goto out; - md->vsd_compat |= VEA_COMPAT_FEATURE_BITMAP; + d_iov_set(&val, NULL, 0); + rc = dbtree_fetch(vsi->vsi_md_bitmap_btr, BTR_PROBE_EQ, DAOS_INTENT_DEFAULT, + &key, NULL, &val); + if (rc) + goto out; + + df = (struct vea_hint_df *)val.iov_buf; + rc = vea_hint_load(df, &vsi->vsi_bitmap_hint_context); + if (rc) + goto out; + md->vsd_compat |= VEA_COMPAT_FEATURE_BITMAP; out: + if (rc && daos_handle_is_valid(vsi->vsi_md_bitmap_btr)) { + dbtree_close(vsi->vsi_md_bitmap_btr); + vsi->vsi_md_bitmap_btr = DAOS_HDL_INVAL; + } /* Commit/Abort transaction on success/error */ return rc ? umem_tx_abort(umem, rc) : umem_tx_commit(umem); } @@ -88,7 +127,7 @@ vea_format(struct umem_instance *umem, struct umem_tx_stage_data *txd, struct vea_free_extent free_ext; struct umem_attr uma; uint64_t tot_blks, offset; - daos_handle_t free_btr, bitmap_btr; + daos_handle_t free_btr; struct vea_hint_df dummy; d_iov_t key, val; daos_handle_t md_bitmap_btr = DAOS_HDL_INVAL; @@ -149,7 +188,7 @@ vea_format(struct umem_instance *umem, struct umem_tx_stage_data *txd, if (rc != 0) return rc; - free_btr = bitmap_btr = DAOS_HDL_INVAL; + free_btr = DAOS_HDL_INVAL; rc = umem_tx_add_ptr(umem, md, sizeof(*md)); if (rc != 0) @@ -185,15 +224,7 @@ vea_format(struct umem_instance *umem, struct umem_tx_stage_data *txd, /* Create bitmap tree */ rc = dbtree_create_inplace(DBTREE_CLASS_IFV, BTR_FEAT_UINT_KEY, VEA_TREE_ODR, &uma, - &md->vsd_bitmap_tree, &bitmap_btr); - if (rc != 0) - goto out; - - /* Open bitmap tree */ - uma.uma_id = umem->umm_id; - uma.uma_pool = umem->umm_pool; - rc = dbtree_open_inplace(&md->vsd_bitmap_tree, &uma, - &md_bitmap_btr); + &md->vsd_bitmap_tree, &md_bitmap_btr); if (rc != 0) goto out; @@ -207,8 +238,6 @@ vea_format(struct umem_instance *umem, struct umem_tx_stage_data *txd, out: if (daos_handle_is_valid(free_btr)) dbtree_close(free_btr); - if (daos_handle_is_valid(bitmap_btr)) - dbtree_close(bitmap_btr); if (daos_handle_is_valid(md_bitmap_btr)) dbtree_close(md_bitmap_btr); diff --git a/src/vos/vos_pool.c b/src/vos/vos_pool.c index 171235b7ceb..c0dbfc950e4 100644 --- a/src/vos/vos_pool.c +++ b/src/vos/vos_pool.c @@ -1424,8 +1424,9 @@ vos_pool_upgrade(daos_handle_t poh, uint32_t version) "Invalid pool upgrade version %d, current version is %d\n", version, pool_df->pd_version); - rc = vea_upgrade(pool->vp_vea_info, &pool->vp_umm, &pool_df->pd_vea_df, - pool_df->pd_version); + if (version >= VOS_POOL_DF_2_6 && pool_df->pd_version < VOS_POOL_DF_2_6 && + pool->vp_vea_info) + rc = vea_upgrade(pool->vp_vea_info, &pool->vp_umm, &pool_df->pd_vea_df, version); if (rc) return rc; From 30fa0b512d11822bd95b014fd9b07659697be0d6 Mon Sep 17 00:00:00 2001 From: wangdi Date: Tue, 10 Oct 2023 05:42:35 -0700 Subject: [PATCH 4/8] DAOS-14413 cart: Using inline buffer for IV operation (#13091) If IV buffer size < 19K, let's use inline buffer for IV operation, instead of bulk to avoid extra overhead. Signed-off-by: Di Wang --- src/cart/crt_hg_proc.c | 43 +++- src/cart/crt_init.c | 2 + src/cart/crt_internal.h | 2 + src/cart/crt_internal_types.h | 1 + src/cart/crt_iv.c | 404 +++++++++++++++++++++++----------- src/cart/crt_rpc.h | 14 +- src/common/misc.c | 11 +- src/engine/server_iv.c | 10 +- src/include/cart/api.h | 12 + src/include/cart/iv.h | 1 + src/include/gurt/common.h | 32 +++ src/object/obj_rpc.c | 46 ---- src/pipeline/pipeline_rpc.c | 46 ---- 13 files changed, 388 insertions(+), 236 deletions(-) diff --git a/src/cart/crt_hg_proc.c b/src/cart/crt_hg_proc.c index 382cc4b1507..47d003ee822 100644 --- a/src/cart/crt_hg_proc.c +++ b/src/cart/crt_hg_proc.c @@ -1,5 +1,5 @@ /* - * (C) Copyright 2016-2022 Intel Corporation. + * (C) Copyright 2016-2023 Intel Corporation. * * SPDX-License-Identifier: BSD-2-Clause-Patent */ @@ -254,6 +254,47 @@ crt_proc_d_iov_t(crt_proc_t proc, crt_proc_op_t proc_op, d_iov_t *div) return rc; } +int +crt_proc_d_sg_list_t(crt_proc_t proc, crt_proc_op_t proc_op, d_sg_list_t *p) +{ + int i; + int rc; + + if (FREEING(proc_op)) { + /* NB: don't need free in crt_proc_d_iov_t() */ + D_FREE(p->sg_iovs); + return 0; + } + + rc = crt_proc_uint32_t(proc, proc_op, &p->sg_nr); + if (unlikely(rc)) + return rc; + + rc = crt_proc_uint32_t(proc, proc_op, &p->sg_nr_out); + if (unlikely(rc)) + return rc; + + if (p->sg_nr == 0) + return 0; + + if (DECODING(proc_op)) { + D_ALLOC_ARRAY(p->sg_iovs, p->sg_nr); + if (p->sg_iovs == NULL) + return -DER_NOMEM; + } + + for (i = 0; i < p->sg_nr; i++) { + rc = crt_proc_d_iov_t(proc, proc_op, &p->sg_iovs[i]); + if (unlikely(rc)) { + if (DECODING(proc_op)) + D_FREE(p->sg_iovs); + return rc; + } + } + + return rc; +} + static inline int crt_proc_corpc_hdr(crt_proc_t proc, struct crt_corpc_hdr *hdr) { diff --git a/src/cart/crt_init.c b/src/cart/crt_init.c index 179e2902247..4a97a259bd5 100644 --- a/src/cart/crt_init.c +++ b/src/cart/crt_init.c @@ -49,6 +49,7 @@ crt_lib_init(void) crt_gdata.cg_rpcid = start_rpcid; crt_gdata.cg_num_cores = sysconf(_SC_NPROCESSORS_ONLN); + crt_gdata.cg_iv_inline_limit = 19456; /* 19KB */ } /* Library deinit */ @@ -815,6 +816,7 @@ crt_init_opt(crt_group_id_t grpid, uint32_t flags, crt_init_options_t *opt) crt_self_test_init(); + crt_iv_init(opt); rc = crt_opc_map_create(); if (rc != 0) { D_ERROR("crt_opc_map_create() failed, "DF_RC"\n", DP_RC(rc)); diff --git a/src/cart/crt_internal.h b/src/cart/crt_internal.h index 1067128249c..64392ee1c00 100644 --- a/src/cart/crt_internal.h +++ b/src/cart/crt_internal.h @@ -91,4 +91,6 @@ crt_hdlr_ctl_get_hostname(crt_rpc_t *rpc_req); void crt_hdlr_ctl_get_pid(crt_rpc_t *rpc_req); +void +crt_iv_init(crt_init_options_t *ops); #endif /* __CRT_INTERNAL_H__ */ diff --git a/src/cart/crt_internal_types.h b/src/cart/crt_internal_types.h index 1e4eaa28cfa..b83d30307de 100644 --- a/src/cart/crt_internal_types.h +++ b/src/cart/crt_internal_types.h @@ -100,6 +100,7 @@ struct crt_gdata { /** credits limitation for #in-flight RPCs per target EP CTX */ uint32_t cg_credit_ep_ctx; + uint32_t cg_iv_inline_limit; /** the global opcode map */ struct crt_opc_map *cg_opc_map; /** HG level global data */ diff --git a/src/cart/crt_iv.c b/src/cart/crt_iv.c index e998a195159..92890b84ca4 100644 --- a/src/cart/crt_iv.c +++ b/src/cart/crt_iv.c @@ -135,6 +135,14 @@ struct crt_ivns_internal { void *cii_user_priv; }; +void +crt_iv_init(crt_init_options_t *ops) +{ + if (ops != NULL && ops->cio_max_unexpected_size > 1024) + crt_gdata.cg_iv_inline_limit = ops->cio_max_expected_size - 1024; + D_INFO("max inline buf size is %u\n", crt_gdata.cg_iv_inline_limit); +} + static void handle_response_cb(const struct crt_cb_info *cb_info); @@ -381,14 +389,13 @@ crt_ivf_finalize(struct iv_fetch_cb_info *iv_info, crt_iv_key_t *iv_key, if (rpc) { /* If there is child to respond to - bulk transfer to it */ - if (output_rc == 0) { + if (output_rc == 0 && iv_info->ifc_child_bulk != CRT_BULK_NULL) { /* Note: function will increment ref count on 'rpc' */ rc = crt_ivf_bulk_transfer(iv_info->ifc_ivns_internal, iv_info->ifc_class_id, iv_key, iv_value, iv_info->ifc_child_bulk, - rpc, - iv_info->ifc_user_priv); + rpc, iv_info->ifc_user_priv); if (rc != 0) D_ERROR("Bulk transfer failed for key=%p\n", iv_key); @@ -399,7 +406,7 @@ crt_ivf_finalize(struct iv_fetch_cb_info *iv_info, crt_iv_key_t *iv_key, output = crt_reply_get(rpc); output->ifo_rc = output_rc; - + output->ifo_sgl = *iv_value; /* Reply can fail */ crt_reply_send(rpc); } @@ -434,7 +441,6 @@ crt_ivf_pending_reqs_process(struct crt_ivns_internal *ivns_internal, struct iv_fetch_cb_info *iv_info; struct crt_iv_fetch_out *output; int rc = 0; - bool put_needed = false; iv_ops = crt_iv_ops_get(ivns_internal, class_id); D_ASSERT(iv_ops != NULL); @@ -450,7 +456,8 @@ crt_ivf_pending_reqs_process(struct crt_ivns_internal *ivns_internal, &kip_entry->kip_pending_fetch_list, struct pending_fetch, pf_link))) { - d_sg_list_t tmp_iv_value = {0}; + d_sg_list_t tmp_iv_value = {0}; + bool put_needed = false; iv_info = pending_fetch->pf_cb_info; @@ -489,8 +496,6 @@ crt_ivf_pending_reqs_process(struct crt_ivns_internal *ivns_internal, CRT_IV_PERM_READ, &tmp_iv_value, &iv_info->ifc_user_priv); - - put_needed = false; if (rc == 0) { put_needed = true; rc = iv_ops->ivo_on_fetch(ivns_internal, @@ -501,13 +506,26 @@ crt_ivf_pending_reqs_process(struct crt_ivns_internal *ivns_internal, if (rc == 0) { /* Function will do IVNS_ADDREF if needed */ - rc = crt_ivf_bulk_transfer(ivns_internal, - class_id, - &iv_info->ifc_iv_key, - &tmp_iv_value, - iv_info->ifc_child_bulk, - iv_info->ifc_child_rpc, - iv_info->ifc_user_priv); + if (iv_info->ifc_child_bulk != CRT_BULK_NULL) { + rc = crt_ivf_bulk_transfer(ivns_internal, class_id, + &iv_info->ifc_iv_key, + &tmp_iv_value, + iv_info->ifc_child_bulk, + iv_info->ifc_child_rpc, + iv_info->ifc_user_priv); + /* bulk transfer callback will put if succeed */ + if (rc == 0) + put_needed = false; + } else { + /* inline reply */ + output = crt_reply_get(iv_info->ifc_child_rpc); + output->ifo_rc = 0; + output->ifo_sgl = tmp_iv_value; + rc = crt_reply_send(iv_info->ifc_child_rpc); + if (rc != 0) + D_ERROR("crt_reply_send(): "DF_RC"\n", + DP_RC(rc)); + } } else { D_ERROR("Failed to process pending request\n"); @@ -517,7 +535,7 @@ crt_ivf_pending_reqs_process(struct crt_ivns_internal *ivns_internal, crt_reply_send(iv_info->ifc_child_rpc); } - if (rc != 0 && put_needed) + if (put_needed) iv_ops->ivo_on_put(ivns_internal, &tmp_iv_value, iv_info->ifc_user_priv); @@ -547,12 +565,8 @@ crt_ivf_pending_reqs_process(struct crt_ivns_internal *ivns_internal, 0, CRT_IV_PERM_READ, &tmp_iv_value, &iv_info->ifc_user_priv); - - put_needed = false; - if (rc == 0) { put_needed = true; - rc = iv_ops->ivo_on_fetch(ivns_internal, &iv_info->ifc_iv_key, 0x0, @@ -968,8 +982,7 @@ crt_ivf_bulk_transfer(struct crt_ivns_internal *ivns_internal, crt_bulk_opid_t opid; crt_bulk_t bulk_hdl; struct crt_iv_fetch_out *output; - int size; - int i; + size_t size; int rc2; int rc = 0; @@ -987,10 +1000,7 @@ crt_ivf_bulk_transfer(struct crt_ivns_internal *ivns_internal, } /* Calculate total size of all iovs in sg list */ - size = 0; - for (i = 0; i < iv_value->sg_nr; i++) - size += iv_value->sg_iovs[i].iov_buf_len; - + size = d_sgl_buf_size(iv_value); /* crt_req_decref done in crt_ivf_bulk_transfer_done_cb */ RPC_PUB_ADDREF(rpc); @@ -1050,6 +1060,7 @@ handle_ivfetch_response(const struct crt_cb_info *cb_info) struct crt_iv_ops *iv_ops; struct crt_ivns_internal *ivns; struct ivf_key_in_progress *kip_entry; + d_sg_list_t *iv_value = NULL; uint32_t class_id; int rc; @@ -1066,11 +1077,17 @@ handle_ivfetch_response(const struct crt_cb_info *cb_info) IV_DBG(&input->ifi_key, "response received, rc = %d\n", rc); + if (rc == 0) { + /* Inline IV fetch */ + if (iv_info->ifc_bulk_hdl == NULL) + d_sgl_buf_copy(&iv_info->ifc_iv_value, &output->ifo_sgl); + + iv_value = &iv_info->ifc_iv_value; + } + /* In case of a failure, call on_refresh with NULL iv_value */ - iv_ops->ivo_on_refresh(ivns, &input->ifi_key, - 0, /* TODO: iv_ver */ - rc == 0 ? &iv_info->ifc_iv_value : NULL, - false, rc, iv_info->ifc_user_priv); + iv_ops->ivo_on_refresh(ivns, &input->ifi_key, 0, /* TODO: iv_ver */ + iv_value, false, rc, iv_info->ifc_user_priv); if (iv_info->ifc_bulk_hdl) crt_bulk_free(iv_info->ifc_bulk_hdl); @@ -1168,11 +1185,14 @@ crt_ivf_rpc_issue(d_rank_t dest_node, crt_iv_key_t *iv_key, D_MUTEX_UNLOCK(&entry->kip_lock); D_MUTEX_UNLOCK(&ivns_internal->cii_lock); - rc = crt_bulk_create(ivns_internal->cii_ctx, iv_value, CRT_BULK_RW, - &local_bulk); - if (rc != 0) { - D_ERROR("crt_bulk_create(): "DF_RC"\n", DP_RC(rc)); - D_GOTO(exit, rc); + if (d_sgl_buf_size(iv_value) > crt_gdata.cg_iv_inline_limit) { + IV_DBG(iv_key, "bulk transfer with size %zu\n", d_sgl_buf_size(iv_value)); + rc = crt_bulk_create(ivns_internal->cii_ctx, iv_value, CRT_BULK_RW, + &local_bulk); + if (rc != 0) { + D_ERROR("crt_bulk_create(): "DF_RC"\n", DP_RC(rc)); + D_GOTO(exit, rc); + } } /* Note: destination node is using global rank already */ @@ -1189,9 +1209,10 @@ crt_ivf_rpc_issue(d_rank_t dest_node, crt_iv_key_t *iv_key, input = crt_req_get(rpc); D_ASSERT(input != NULL); - input->ifi_value_bulk = local_bulk; - - cb_info->ifc_bulk_hdl = local_bulk; + if (local_bulk != CRT_BULK_NULL) { + input->ifi_value_bulk = local_bulk; + cb_info->ifc_bulk_hdl = local_bulk; + } d_iov_set(&input->ifi_key, iv_key->iov_buf, iv_key->iov_buf_len); input->ifi_class_id = cb_info->ifc_class_id; @@ -1307,12 +1328,11 @@ crt_hdlr_iv_fetch_aux(void *arg) d_sg_list_t iv_value = {0}; bool put_needed = false; void *user_priv = NULL; - crt_rpc_t *rpc_req; + crt_rpc_t *rpc_req = arg; uint32_t grp_ver_entry; uint32_t grp_ver_current; int rc = 0; - rpc_req = arg; input = crt_req_get(rpc_req); output = crt_reply_get(rpc_req); @@ -1323,7 +1343,7 @@ crt_hdlr_iv_fetch_aux(void *arg) ivns_internal = crt_ivns_internal_lookup(&ivns_id); if (ivns_internal == NULL) { D_ERROR("Failed to lookup ivns internal!\n"); - D_GOTO(send_error, rc = -DER_NONEXIST); + D_GOTO(reply_direct, rc = -DER_NONEXIST); } /* This function is called with ivns_internal ref count held. Since @@ -1346,13 +1366,13 @@ crt_hdlr_iv_fetch_aux(void *arg) "Group (%s) version mismatch. Local: %d Remote :%d\n", ivns_id.ii_group_name, grp_ver_entry, input->ifi_grp_ver); - D_GOTO(send_error, rc = -DER_GRPVER); + D_GOTO(reply_direct, rc = -DER_GRPVER); } iv_ops = crt_iv_ops_get(ivns_internal, input->ifi_class_id); if (iv_ops == NULL) { D_ERROR("Returned iv_ops were NULL\n"); - D_GOTO(send_error, rc = -DER_INVAL); + D_GOTO(reply_direct, rc = -DER_INVAL); } IV_DBG(&input->ifi_key, "fetch handler entered\n"); @@ -1360,23 +1380,28 @@ crt_hdlr_iv_fetch_aux(void *arg) 0, CRT_IV_PERM_READ, &iv_value, &user_priv); if (rc != 0) { D_ERROR("ivo_on_get(): "DF_RC"\n", DP_RC(rc)); - D_GOTO(send_error, rc); + D_GOTO(reply_direct, rc); } put_needed = true; - rc = iv_ops->ivo_on_fetch(ivns_internal, &input->ifi_key, 0, 0x0, &iv_value, user_priv); if (rc == 0) { /* Note: This increments ref count on 'rpc_req' and ivns */ - rc = crt_ivf_bulk_transfer(ivns_internal, - input->ifi_class_id, - &input->ifi_key, - &iv_value, input->ifi_value_bulk, - rpc_req, user_priv); - if (rc != 0) { - D_ERROR("bulk transfer failed; "DF_RC"\n", DP_RC(rc)); - D_GOTO(send_error, rc); + if (input->ifi_value_bulk != CRT_BULK_NULL) { + rc = crt_ivf_bulk_transfer(ivns_internal, + input->ifi_class_id, + &input->ifi_key, + &iv_value, input->ifi_value_bulk, + rpc_req, user_priv); + if (rc != 0) { + DL_ERROR(rc, "bulk transfer failed.\n"); + D_GOTO(reply_direct, rc); + } + put_needed = false; + } else { + output->ifo_sgl = iv_value; + D_GOTO(reply_direct, rc); } } else if (rc == -DER_IVCB_FORWARD) { /* Forward the request to the parent */ @@ -1386,7 +1411,7 @@ crt_hdlr_iv_fetch_aux(void *arg) if (ivns_internal->cii_grp_priv->gp_self == input->ifi_root_node) { D_ERROR("Forward requested for root node\n"); - D_GOTO(send_error, rc = -DER_INVAL); + D_GOTO(reply_direct, rc = -DER_INVAL); } iv_ops->ivo_on_put(ivns_internal, &iv_value, user_priv); @@ -1399,7 +1424,7 @@ crt_hdlr_iv_fetch_aux(void *arg) &user_priv); if (rc != 0) { D_ERROR("ivo_on_get(): "DF_RC"\n", DP_RC(rc)); - D_GOTO(send_error, rc); + D_GOTO(reply_direct, rc); } put_needed = true; @@ -1413,7 +1438,7 @@ crt_hdlr_iv_fetch_aux(void *arg) if (rc != 0) { D_DEBUG(DB_TRACE, "crt_iv_parent_get() returned %d\n", rc); - D_GOTO(send_error, rc = -DER_OOG); + D_GOTO(reply_direct, rc = -DER_OOG); } /* Check here for change in group */ @@ -1422,12 +1447,12 @@ crt_hdlr_iv_fetch_aux(void *arg) "On Entry: %d:: Changed To :%d\n", ivns_id.ii_group_name, grp_ver_entry, grp_ver_current); - D_GOTO(send_error, rc = -DER_GRPVER); + D_GOTO(reply_direct, rc = -DER_GRPVER); } D_ALLOC_PTR(cb_info); if (cb_info == NULL) - D_GOTO(send_error, rc = -DER_NOMEM); + D_GOTO(reply_direct, rc = -DER_NOMEM); cb_info->ifc_child_rpc = rpc_req; cb_info->ifc_child_bulk = input->ifi_value_bulk; @@ -1455,11 +1480,11 @@ crt_hdlr_iv_fetch_aux(void *arg) IVNS_DECREF(cb_info->ifc_ivns_internal); D_FREE(cb_info); - D_GOTO(send_error, rc); + D_GOTO(reply_direct, rc); } } else { D_ERROR("ERROR happened: "DF_RC"\n", DP_RC(rc)); - D_GOTO(send_error, rc); + D_GOTO(reply_direct, rc); } /* addref in crt_hdlr_iv_fetch */ @@ -1470,16 +1495,14 @@ crt_hdlr_iv_fetch_aux(void *arg) IVNS_DECREF(ivns_internal); return; -send_error: - if (put_needed && iv_ops) - iv_ops->ivo_on_put(ivns_internal, &iv_value, user_priv); +reply_direct: output->ifo_rc = rc; rc = crt_reply_send(rpc_req); - if (rc != DER_SUCCESS) { + if (rc != DER_SUCCESS) D_ERROR("crt_reply_send(opc: %#x): "DF_RC"\n", rpc_req->cr_opc, DP_RC(rc)); - } - + if (put_needed && iv_ops) + iv_ops->ivo_on_put(ivns_internal, &iv_value, user_priv); /* ADDREF done in lookup above */ if (ivns_internal) IVNS_DECREF(ivns_internal); @@ -1787,7 +1810,6 @@ crt_hdlr_iv_sync_aux(void *arg) struct crt_iv_ops *iv_ops = NULL; struct crt_ivns_id ivns_id; crt_iv_sync_t *sync_type; - d_sg_list_t iv_value = {0}; bool need_put = false; void *user_priv = NULL; crt_rpc_t *rpc_req; @@ -1838,7 +1860,7 @@ crt_hdlr_iv_sync_aux(void *arg) D_ASSERT(iv_ops != NULL); /* If bulk is not set, we issue invalidate call */ - if (rpc_req->cr_co_bulk_hdl == CRT_BULK_NULL) { + if (rpc_req->cr_co_bulk_hdl == CRT_BULK_NULL && input->ivs_sync_sgl.sg_nr == 0) { rc = iv_ops->ivo_on_refresh(ivns_internal, &input->ivs_key, 0, NULL, true, 0x0, NULL); D_GOTO(exit, rc); @@ -1849,10 +1871,10 @@ crt_hdlr_iv_sync_aux(void *arg) case CRT_IV_SYNC_EVENT_UPDATE: { d_sg_list_t tmp_iv; - d_iov_t *tmp_iovs; + d_iov_t tmp_iovs; rc = iv_ops->ivo_on_get(ivns_internal, &input->ivs_key, - 0, CRT_IV_PERM_READ, &iv_value, + 0, CRT_IV_PERM_READ | CRT_IV_NO_ALLOC, &tmp_iv, &user_priv); if (rc != 0) { D_ERROR("ivo_on_get(): "DF_RC"\n", DP_RC(rc)); @@ -1861,31 +1883,26 @@ crt_hdlr_iv_sync_aux(void *arg) need_put = true; - D_ALLOC_ARRAY(tmp_iovs, iv_value.sg_nr); - if (tmp_iovs == NULL) { - D_GOTO(exit, rc = -DER_NOMEM); - } - - tmp_iv.sg_nr = iv_value.sg_nr; - tmp_iv.sg_iovs = tmp_iovs; - - /* Populate tmp_iv.sg_iovs[0] to [sg_nr] */ - rc = crt_bulk_access(rpc_req->cr_co_bulk_hdl, &tmp_iv); - if (rc != 0) { - D_FREE(tmp_iovs); - D_ERROR("crt_bulk_access(): "DF_RC"\n", DP_RC(rc)); - D_GOTO(exit, rc); + if (rpc_req->cr_co_bulk_hdl != CRT_BULK_NULL) { + tmp_iv.sg_iovs = &tmp_iovs; + /* Populate tmp_iv.sg_iovs[0] to [sg_nr] */ + rc = crt_bulk_access(rpc_req->cr_co_bulk_hdl, &tmp_iv); + if (rc != 0) { + D_ERROR("crt_bulk_access(): "DF_RC"\n", DP_RC(rc)); + D_GOTO(exit, rc); + } + } else if (input->ivs_sync_sgl.sg_nr > 0) { + tmp_iv = input->ivs_sync_sgl; } rc = iv_ops->ivo_on_refresh(ivns_internal, &input->ivs_key, 0, &tmp_iv, false, 0, user_priv); - D_FREE(tmp_iovs); if (rc != 0) { D_ERROR("ivo_on_refresh(): "DF_RC"\n", DP_RC(rc)); D_GOTO(exit, rc); } - iv_ops->ivo_on_put(ivns_internal, &iv_value, user_priv); + iv_ops->ivo_on_put(ivns_internal, NULL, user_priv); need_put = false; break; @@ -1909,7 +1926,7 @@ crt_hdlr_iv_sync_aux(void *arg) exit: if (need_put && iv_ops) - iv_ops->ivo_on_put(ivns_internal, &iv_value, user_priv); + iv_ops->ivo_on_put(ivns_internal, NULL, user_priv); output->rc = rc; crt_reply_send(rpc_req); @@ -2057,6 +2074,8 @@ call_pre_sync_cb(struct crt_ivns_internal *ivns_internal, D_ERROR("crt_bulk_access(): "DF_RC"\n", DP_RC(rc)); D_GOTO(exit, rc); } + } else if (input->ivs_sync_sgl.sg_nr > 0) { + tmp_iv = input->ivs_sync_sgl; } D_DEBUG(DB_TRACE, "Executing ivo_pre_sync\n"); @@ -2249,8 +2268,9 @@ crt_ivsync_rpc_issue(struct crt_ivns_internal *ivns_internal, uint32_t class_id, } local_bulk = CRT_BULK_NULL; - if (iv_value != NULL) { - D_DEBUG(DB_TRACE, "Create Bulk\n"); + if (iv_value != NULL && d_sgl_buf_size(iv_value) > crt_gdata.cg_iv_inline_limit) { + IV_DBG(iv_key, "bulk transfer with size %zu\n", + d_sgl_buf_size(iv_value)); rc = crt_bulk_create(ivns_internal->cii_ctx, iv_value, CRT_BULK_RO, &local_bulk); if (rc != 0) { @@ -2275,9 +2295,8 @@ crt_ivsync_rpc_issue(struct crt_ivns_internal *ivns_internal, uint32_t class_id, D_ASSERT(input != NULL); D_ALLOC_PTR(iv_sync_cb); - if (iv_sync_cb == NULL) { + if (iv_sync_cb == NULL) D_GOTO(exit, rc = -DER_NOMEM); - } iv_sync_cb->isc_sync_type = *sync_type; input->ivs_ivns_id = ivns_internal->cii_gns.gn_ivns_id.ii_nsid; @@ -2289,6 +2308,10 @@ crt_ivsync_rpc_issue(struct crt_ivns_internal *ivns_internal, uint32_t class_id, d_iov_set(&input->ivs_sync_type, &iv_sync_cb->isc_sync_type, sizeof(crt_iv_sync_t)); + /* Set inline IV buffer if needed */ + if (iv_value != NULL && local_bulk == CRT_BULK_NULL) + input->ivs_sync_sgl = *iv_value; + input->ivs_class_id = class_id; iv_sync_cb->isc_bulk_hdl = local_bulk; @@ -2401,7 +2424,8 @@ finalize_transfer_back(struct update_cb_info *cb_info, int rc) crt_reply_send(cb_info->uci_child_rpc); /* ADDREF done in crt_hdlr_iv_update */ - crt_bulk_free(cb_info->uci_bulk_hdl); + if (cb_info->uci_bulk_hdl != CRT_BULK_NULL) + crt_bulk_free(cb_info->uci_bulk_hdl); iv_ops = crt_iv_ops_get(ivns, cb_info->uci_class_id); D_ASSERT(iv_ops != NULL); @@ -2426,16 +2450,16 @@ bulk_update_transfer_back_done(const struct crt_bulk_cb_info *info) } /* Helper function to transfer iv_value back to child */ -static -int transfer_back_to_child(crt_iv_key_t *key, struct update_cb_info *cb_info, - bool do_refresh, int update_rc) +static int +transfer_back_to_child(crt_iv_key_t *key, struct update_cb_info *cb_info, + int update_rc) { struct crt_bulk_desc bulk_desc = {0}; struct crt_iv_update_in *child_input; + struct crt_iv_update_out *child_output; struct crt_ivns_internal *ivns; struct crt_iv_ops *iv_ops; - int size = 0; - int i; + size_t size = 0; int rc = 0; ivns = cb_info->uci_ivns_internal; @@ -2443,11 +2467,6 @@ int transfer_back_to_child(crt_iv_key_t *key, struct update_cb_info *cb_info, iv_ops = crt_iv_ops_get(ivns, cb_info->uci_class_id); D_ASSERT(iv_ops != NULL); - if (do_refresh) - iv_ops->ivo_on_refresh(ivns, key, 0, - &cb_info->uci_iv_value, - false, update_rc, cb_info->uci_user_priv); - /* No more children -- we are the originator; call update_cb */ if (cb_info->uci_child_rpc == NULL) { cb_info->uci_comp_cb(ivns, cb_info->uci_class_id, key, NULL, @@ -2466,13 +2485,18 @@ int transfer_back_to_child(crt_iv_key_t *key, struct update_cb_info *cb_info, return 0; } - /* Perform bulk transfer back to the child */ child_input = crt_req_get(cb_info->uci_child_rpc); + if (child_input->ivu_iv_value_bulk == CRT_BULK_NULL) { + child_output = crt_reply_get(cb_info->uci_child_rpc); + D_ASSERT(update_rc != 0 || child_output->ivo_iv_sgl.sg_nr > 0); + finalize_transfer_back(cb_info, update_rc); + return 0; + } - /* Calculate size of iv value */ - for (i = 0; i < cb_info->uci_iv_value.sg_nr; i++) - size += cb_info->uci_iv_value.sg_iovs[i].iov_buf_len; + /* Perform bulk transfer back to the child */ + /* Calculate size of iv value */ + size = d_sgl_buf_size(&cb_info->uci_iv_value); bulk_desc.bd_rpc = cb_info->uci_child_rpc; bulk_desc.bd_bulk_op = CRT_BULK_PUT; bulk_desc.bd_remote_hdl = child_input->ivu_iv_value_bulk; @@ -2502,17 +2526,26 @@ handle_ivupdate_response(const struct crt_cb_info *cb_info) struct crt_iv_ops *iv_ops; int rc; + iv_ops = crt_iv_ops_get(iv_info->uci_ivns_internal, iv_info->uci_class_id); + D_ASSERT(iv_ops != NULL); + /* For bi-directional updates, transfer data back to child */ if (iv_info->uci_sync_type.ivs_flags & CRT_IV_SYNC_BIDIRECTIONAL) { - transfer_back_to_child(&input->ivu_key, iv_info, true, - cb_info->cci_rc ?: output->rc); + if (iv_info->uci_bulk_hdl == CRT_BULK_NULL) + d_sgl_buf_copy(&iv_info->uci_iv_value, &output->ivo_iv_sgl); + iv_ops->ivo_on_refresh(iv_info->uci_ivns_internal, &input->ivu_key, 0, + &iv_info->uci_iv_value, false, cb_info->cci_rc ?: output->rc, + iv_info->uci_user_priv); + + if (input->ivu_iv_value_bulk == CRT_BULK_NULL && + iv_info->uci_child_rpc != NULL) { + child_output = crt_reply_get(iv_info->uci_child_rpc); + child_output->ivo_iv_sgl = iv_info->uci_iv_value; + } + transfer_back_to_child(&input->ivu_key, iv_info, cb_info->cci_rc ?: output->rc); D_GOTO(exit, 0); } - iv_ops = crt_iv_ops_get(iv_info->uci_ivns_internal, - iv_info->uci_class_id); - D_ASSERT(iv_ops != NULL); - if (iv_info->uci_child_rpc) { child_output = crt_reply_get(iv_info->uci_child_rpc); @@ -2536,7 +2569,7 @@ handle_ivupdate_response(const struct crt_cb_info *cb_info) } else { d_sg_list_t *tmp_iv_value; - if (iv_info->uci_bulk_hdl == CRT_BULK_NULL) + if (iv_info->uci_iv_value.sg_nr == 0) tmp_iv_value = NULL; else tmp_iv_value = &iv_info->uci_iv_value; @@ -2604,9 +2637,14 @@ crt_ivu_rpc_issue(d_rank_t dest_rank, crt_iv_key_t *iv_key, /* Update with NULL value is invalidate call */ if (iv_value) { - rc = crt_bulk_create(ivns_internal->cii_ctx, iv_value, - CRT_BULK_RW, &local_bulk); - + if (d_sgl_buf_size(iv_value) > crt_gdata.cg_iv_inline_limit) { + IV_DBG(iv_key, "bulk transfer with size %zu\n", + d_sgl_buf_size(iv_value)); + rc = crt_bulk_create(ivns_internal->cii_ctx, iv_value, + CRT_BULK_RW, &local_bulk); + } else { + input->ivu_iv_sgl = *iv_value; + } if (rc != 0) { D_ERROR("crt_bulk_create(): "DF_RC"\n", DP_RC(rc)); D_GOTO(exit, rc); @@ -2857,9 +2895,8 @@ bulk_update_transfer_done_aux(const struct crt_bulk_cb_info *info) } else if (update_rc == 0) { /* If sync was bi-directional - transfer value back */ if (sync_type->ivs_flags & CRT_IV_SYNC_BIDIRECTIONAL) { - rc = transfer_back_to_child(&input->ivu_key, - update_cb_info, - false, update_rc); + rc = transfer_back_to_child(&input->ivu_key, update_cb_info, + update_rc); if (rc == 0) rc = update_rc; @@ -2993,6 +3030,120 @@ bulk_update_transfer_done(const struct crt_bulk_cb_info *info) return rc; } +static void +crt_iv_update_inline_hdlr(crt_rpc_t *rpc, struct crt_ivns_internal *ivns_internal) +{ + struct crt_iv_ops *iv_ops; + struct crt_iv_update_in *input; + struct crt_iv_update_out *output; + d_sg_list_t iv_value = {0}; + struct update_cb_info *update_cb_info = NULL; + int rc = 0; + d_rank_t next_rank; + crt_iv_sync_t *sync_type; + void *user_priv; + uint32_t grp_ver; + + input = crt_req_get(rpc); + output = crt_reply_get(rpc); + + D_ASSERT(input != NULL); + D_ASSERT(output != NULL); + + iv_ops = crt_iv_ops_get(ivns_internal, input->ivu_class_id); + D_ASSERT(iv_ops != NULL); + + rc = iv_ops->ivo_on_get(ivns_internal, &input->ivu_key, 0, + CRT_IV_PERM_WRITE, &iv_value, &user_priv); + if (rc != 0) { + D_ERROR("ivo_on_get(): "DF_RC"\n", DP_RC(rc)); + D_GOTO(send_error, rc); + } + + d_sgl_buf_copy(&iv_value, &input->ivu_iv_sgl); + + rc = iv_ops->ivo_on_update(ivns_internal, &input->ivu_key, 0, false, + &iv_value, user_priv); + + sync_type = input->ivu_sync_type.iov_buf; + + if (rc == -DER_IVCB_FORWARD || + (rc == 0 && sync_type->ivs_flags & CRT_IV_SYNC_BIDIRECTIONAL)) { + + D_ALLOC_PTR(update_cb_info); + if (update_cb_info == NULL) + D_GOTO(put_error, rc = -DER_NOMEM); + + update_cb_info->uci_child_rpc = rpc; + + update_cb_info->uci_ivns_internal = ivns_internal; + IVNS_ADDREF(ivns_internal); + + update_cb_info->uci_class_id = input->ivu_class_id; + update_cb_info->uci_caller_rank = input->ivu_caller_node; + update_cb_info->uci_sync_type = *sync_type; + update_cb_info->uci_user_priv = user_priv; + update_cb_info->uci_iv_value = iv_value; + if (rc == -DER_IVCB_FORWARD) { + /* + * Forward request to the parent + * Get group version to associate with next_rank. + * Pass it down to crt_ivu_rpc_issue + */ + D_RWLOCK_RDLOCK(&ivns_internal->cii_grp_priv->gp_rwlock); + grp_ver = ivns_internal->cii_grp_priv->gp_membs_ver; + + rc = crt_iv_parent_get(ivns_internal, input->ivu_root_node, &next_rank); + D_RWLOCK_UNLOCK(&ivns_internal->cii_grp_priv->gp_rwlock); + + if (rc != 0) { + D_DEBUG(DB_TRACE, "crt_iv_parent_get() returned %d\n", rc); + D_GOTO(decref_error, rc = -DER_OOG); + } + + rc = crt_ivu_rpc_issue(next_rank, &input->ivu_key, + &iv_value, sync_type, + input->ivu_root_node, grp_ver, + update_cb_info); + if (rc != 0) { + D_ERROR("crt_ivu_rpc_issue(): "DF_RC"\n", DP_RC(rc)); + D_GOTO(decref_error, rc); + } + RPC_PUB_ADDREF(update_cb_info->uci_child_rpc); + } else { + int rc1; + + /* If sync was bi-directional - transfer value back */ + output->ivo_iv_sgl = iv_value; + RPC_PUB_ADDREF(update_cb_info->uci_child_rpc); + rc1 = transfer_back_to_child(&input->ivu_key, update_cb_info, rc); + if (rc == 0) + rc = rc1; + + D_GOTO(exit, rc); + } + } else if (rc == 0) { + output->rc = rc; + crt_reply_send(rpc); + iv_ops->ivo_on_put(ivns_internal, &iv_value, user_priv); + } else { + D_GOTO(put_error, rc); + } +exit: + return; + +decref_error: + IVNS_DECREF(ivns_internal); +put_error: + iv_ops->ivo_on_put(ivns_internal, &iv_value, &user_priv); + +send_error: + output->rc = rc; + crt_reply_send(rpc); + if (update_cb_info) + D_FREE(update_cb_info); +} + /* IV UPDATE RPC handler */ void crt_hdlr_iv_update(crt_rpc_t *rpc_req) @@ -3009,9 +3160,8 @@ crt_hdlr_iv_update(crt_rpc_t *rpc_req) crt_iv_sync_t *sync_type; d_rank_t next_rank; struct update_cb_info *update_cb_info; - int size; + size_t size; void *user_priv; - int i; uint32_t grp_ver_entry; uint32_t grp_ver_current; bool put_needed = false; @@ -3053,7 +3203,7 @@ crt_hdlr_iv_update(crt_rpc_t *rpc_req) D_GOTO(send_error, rc = -DER_INVAL); } - if (input->ivu_iv_value_bulk == CRT_BULK_NULL) { + if (input->ivu_iv_value_bulk == CRT_BULK_NULL && input->ivu_iv_sgl.sg_nr == 0) { rc = iv_ops->ivo_on_refresh(ivns_internal, &input->ivu_key, 0, NULL, true, 0, NULL); if (rc == -DER_IVCB_FORWARD) { @@ -3131,6 +3281,11 @@ crt_hdlr_iv_update(crt_rpc_t *rpc_req) D_GOTO(exit, rc = 0); } + if (input->ivu_iv_value_bulk == CRT_BULK_NULL) { + crt_iv_update_inline_hdlr(rpc_req, ivns_internal); + D_GOTO(exit, rc); + } + rc = iv_ops->ivo_on_get(ivns_internal, &input->ivu_key, 0, CRT_IV_PERM_WRITE, &iv_value, &user_priv); if (rc != 0) { @@ -3139,10 +3294,7 @@ crt_hdlr_iv_update(crt_rpc_t *rpc_req) } put_needed = true; - size = 0; - for (i = 0; i < iv_value.sg_nr; i++) - size += iv_value.sg_iovs[i].iov_buf_len; - + size = d_sgl_buf_size(&iv_value); rc = crt_bulk_create(rpc_req->cr_ctx, &iv_value, CRT_BULK_RW, &local_bulk_handle); if (rc != 0) { diff --git a/src/cart/crt_rpc.h b/src/cart/crt_rpc.h index ebda720c995..7bb33790047 100644 --- a/src/cart/crt_rpc.h +++ b/src/cart/crt_rpc.h @@ -216,7 +216,7 @@ crt_rpc_unlock(struct crt_rpc_priv *rpc_priv) D_MUTEX_UNLOCK(&rpc_priv->crp_mutex); } -#define CRT_PROTO_INTERNAL_VERSION 4 +#define CRT_PROTO_INTERNAL_VERSION 5 #define CRT_PROTO_FI_VERSION 3 #define CRT_PROTO_ST_VERSION 1 #define CRT_PROTO_CTL_VERSION 1 @@ -308,7 +308,7 @@ crt_rpc_unlock(struct crt_rpc_priv *rpc_priv) crt_hdlr_iv_update, NULL) \ X(CRT_OPC_IV_SYNC, \ 0, &CQF_crt_iv_sync, \ - crt_hdlr_iv_sync, &crt_iv_sync_co_ops) \ + crt_hdlr_iv_sync, &crt_iv_sync_co_ops) /* Define for RPC enum population below */ #define X(a, b, c, d, e) a, @@ -476,7 +476,8 @@ CRT_RPC_DECLARE(crt_st_status_req, ((d_rank_t) (ifi_root_node) CRT_VAR) #define CRT_OSEQ_IV_FETCH /* output fields */ \ - ((int32_t) (ifo_rc) CRT_VAR) + ((d_sg_list_t) (ifo_sgl) CRT_VAR) \ + ((int32_t) (ifo_rc) CRT_VAR) \ CRT_RPC_DECLARE(crt_iv_fetch, CRT_ISEQ_IV_FETCH, CRT_OSEQ_IV_FETCH) @@ -491,6 +492,7 @@ CRT_RPC_DECLARE(crt_iv_fetch, CRT_ISEQ_IV_FETCH, CRT_OSEQ_IV_FETCH) ((d_iov_t) (ivu_sync_type) CRT_VAR) \ /* Bulk handle for iv value */ \ ((crt_bulk_t) (ivu_iv_value_bulk) CRT_VAR) \ + ((d_sg_list_t) (ivu_iv_sgl) CRT_VAR) \ /* Root node for IV UPDATE */ \ ((d_rank_t) (ivu_root_node) CRT_VAR) \ /* Original node that issued crt_iv_update call */ \ @@ -500,7 +502,8 @@ CRT_RPC_DECLARE(crt_iv_fetch, CRT_ISEQ_IV_FETCH, CRT_OSEQ_IV_FETCH) ((uint32_t) (padding) CRT_VAR) #define CRT_OSEQ_IV_UPDATE /* output fields */ \ - ((uint64_t) (rc) CRT_VAR) + ((uint64_t) (rc) CRT_VAR) \ + ((d_sg_list_t) (ivo_iv_sgl) CRT_VAR) CRT_RPC_DECLARE(crt_iv_update, CRT_ISEQ_IV_UPDATE, CRT_OSEQ_IV_UPDATE) @@ -513,8 +516,9 @@ CRT_RPC_DECLARE(crt_iv_update, CRT_ISEQ_IV_UPDATE, CRT_OSEQ_IV_UPDATE) ((d_iov_t) (ivs_key) CRT_VAR) \ /* IOV for sync type */ \ ((d_iov_t) (ivs_sync_type) CRT_VAR) \ + ((d_sg_list_t) (ivs_sync_sgl) CRT_VAR) \ /* IV Class ID */ \ - ((uint32_t) (ivs_class_id) CRT_VAR) + ((uint32_t) (ivs_class_id) CRT_VAR) \ #define CRT_OSEQ_IV_SYNC /* output fields */ \ ((int32_t) (rc) CRT_VAR) diff --git a/src/common/misc.c b/src/common/misc.c index a3a8c7bfd6c..047e4b5e6ad 100644 --- a/src/common/misc.c +++ b/src/common/misc.c @@ -235,16 +235,9 @@ daos_sgl_data_len(d_sg_list_t *sgl) daos_size_t daos_sgl_buf_size(d_sg_list_t *sgl) { - daos_size_t size = 0; - int i; - - if (sgl == NULL || sgl->sg_iovs == NULL) + if (sgl == NULL) return 0; - - for (i = 0, size = 0; i < sgl->sg_nr; i++) - size += sgl->sg_iovs[i].iov_buf_len; - - return size; + return d_sgl_buf_size(sgl); } daos_size_t diff --git a/src/engine/server_iv.c b/src/engine/server_iv.c index c58acef36b7..a7d258705a3 100644 --- a/src/engine/server_iv.c +++ b/src/engine/server_iv.c @@ -598,9 +598,13 @@ ivc_on_get(crt_iv_namespace_t ivns, crt_iv_key_t *iv_key, class = entry->iv_class; if (iv_value) { - rc = class->iv_class_ops->ivc_value_alloc(entry, &key, iv_value); - if (rc) - D_GOTO(out, rc); + if (permission & CRT_IV_NO_ALLOC) { + iv_value->sg_nr = 1; + } else { + rc = class->iv_class_ops->ivc_value_alloc(entry, &key, iv_value); + if (rc) + D_GOTO(out, rc); + } } rc = class->iv_class_ops->ivc_ent_get(entry, &entry_priv_val); diff --git a/src/include/cart/api.h b/src/include/cart/api.h index 45ef8c67529..345d6b611c9 100644 --- a/src/include/cart/api.h +++ b/src/include/cart/api.h @@ -1630,6 +1630,18 @@ crt_proc_d_rank_list_t(crt_proc_t proc, crt_proc_op_t proc_op, int crt_proc_d_iov_t(crt_proc_t proc, crt_proc_op_t proc_op, d_iov_t *data); +/** + * Generic processing routine. + * + * \param[in,out] proc abstract processor object + * \param[in] proc_op proc operation type + * \param[in,out] data pointer to data + * + * \return DER_SUCCESS on success, negative value if error + */ +int +crt_proc_d_sg_list_t(crt_proc_t proc, crt_proc_op_t proc_op, d_sg_list_t *data); + /** * Create the processor object. * diff --git a/src/include/cart/iv.h b/src/include/cart/iv.h index 815382919bc..db5ab93d34e 100644 --- a/src/include/cart/iv.h +++ b/src/include/cart/iv.h @@ -239,6 +239,7 @@ typedef int (*crt_iv_on_hash_cb_t)(crt_iv_namespace_t ivns, typedef enum { CRT_IV_PERM_READ = 0x1, CRT_IV_PERM_WRITE = 0x2, + CRT_IV_NO_ALLOC = 0x4, } crt_iv_perm_t; /** diff --git a/src/include/gurt/common.h b/src/include/gurt/common.h index cfce1a490ec..fd4da07d485 100644 --- a/src/include/gurt/common.h +++ b/src/include/gurt/common.h @@ -499,6 +499,38 @@ d_sgl_fini(d_sg_list_t *sgl, bool free_iovs) sgl->sg_nr = 0; } +static inline size_t __attribute__((nonnull)) +d_sgl_buf_size(d_sg_list_t *sgl) +{ + size_t size = 0; + int i; + + if (sgl->sg_iovs == NULL) + return 0; + + for (i = 0, size = 0; i < sgl->sg_nr; i++) + size += sgl->sg_iovs[i].iov_buf_len; + + return size; +} + +static inline void +d_sgl_buf_copy(d_sg_list_t *dst_sgl, d_sg_list_t *src_sgl) +{ + int i; + + D_ASSERT(dst_sgl->sg_nr >= src_sgl->sg_nr); + for (i = 0; i < src_sgl->sg_nr; i++) { + D_ASSERT(dst_sgl->sg_iovs[i].iov_buf_len >= + src_sgl->sg_iovs[i].iov_buf_len); + + memcpy(dst_sgl->sg_iovs[i].iov_buf, src_sgl->sg_iovs[i].iov_buf, + src_sgl->sg_iovs[i].iov_buf_len); + dst_sgl->sg_iovs[i].iov_len = src_sgl->sg_iovs[i].iov_len; + dst_sgl->sg_iovs[i].iov_buf_len = src_sgl->sg_iovs[i].iov_buf_len; + } +} + void d_getenv_bool(const char *env, bool *bool_val); void d_getenv_char(const char *env, char *char_val); void d_getenv_int(const char *env, unsigned int *int_val); diff --git a/src/object/obj_rpc.c b/src/object/obj_rpc.c index 0137bc48dbc..bfff5544b40 100644 --- a/src/object/obj_rpc.c +++ b/src/object/obj_rpc.c @@ -491,52 +491,6 @@ crt_proc_struct_obj_iod_array(crt_proc_t proc, crt_proc_op_t proc_op, return rc; } -static int -crt_proc_d_sg_list_t(crt_proc_t proc, crt_proc_op_t proc_op, d_sg_list_t *p) -{ - int i; - int rc; - - if (FREEING(proc_op)) { - /* NB: don't need free in crt_proc_d_iov_t() */ - D_FREE(p->sg_iovs); - return 0; - } - - rc = crt_proc_uint32_t(proc, proc_op, &p->sg_nr); - if (unlikely(rc)) - return rc; - - rc = crt_proc_uint32_t(proc, proc_op, &p->sg_nr_out); - if (unlikely(rc)) - return rc; - - if (p->sg_nr == 0) - return 0; - - switch (proc_op) { - case CRT_PROC_DECODE: - D_ALLOC_ARRAY(p->sg_iovs, p->sg_nr); - if (p->sg_iovs == NULL) - return -DER_NOMEM; - /* fall through to fill sg_iovs */ - case CRT_PROC_ENCODE: - for (i = 0; i < p->sg_nr; i++) { - rc = crt_proc_d_iov_t(proc, proc_op, &p->sg_iovs[i]); - if (unlikely(rc)) { - if (DECODING(proc_op)) - D_FREE(p->sg_iovs); - return rc; - } - } - break; - default: - return -DER_INVAL; - } - - return rc; -} - static int crt_proc_struct_daos_shard_tgt(crt_proc_t proc, crt_proc_op_t proc_op, struct daos_shard_tgt *p) diff --git a/src/pipeline/pipeline_rpc.c b/src/pipeline/pipeline_rpc.c index b1ccb029635..547765ae630 100644 --- a/src/pipeline/pipeline_rpc.c +++ b/src/pipeline/pipeline_rpc.c @@ -28,52 +28,6 @@ crt_proc_daos_key_desc_t(crt_proc_t proc, crt_proc_op_t proc_op, daos_key_desc_t return 0; } -static int -crt_proc_d_sg_list_t(crt_proc_t proc, crt_proc_op_t proc_op, d_sg_list_t *p) -{ - int i; - int rc; - - if (FREEING(proc_op)) { - /* NB: don't need free in crt_proc_d_iov_t() */ - D_FREE(p->sg_iovs); - return 0; - } - - rc = crt_proc_uint32_t(proc, proc_op, &p->sg_nr); - if (unlikely(rc)) - return rc; - - rc = crt_proc_uint32_t(proc, proc_op, &p->sg_nr_out); - if (unlikely(rc)) - return rc; - - if (p->sg_nr == 0) - return 0; - - switch (proc_op) { - case CRT_PROC_DECODE: - D_ALLOC_ARRAY(p->sg_iovs, p->sg_nr); - if (p->sg_iovs == NULL) - return -DER_NOMEM; - /* fall through to fill sg_iovs */ - case CRT_PROC_ENCODE: - for (i = 0; i < p->sg_nr; i++) { - rc = crt_proc_d_iov_t(proc, proc_op, &p->sg_iovs[i]); - if (unlikely(rc)) { - if (DECODING(proc_op)) - D_FREE(p->sg_iovs); - return rc; - } - } - break; - default: - return -DER_INVAL; - } - - return rc; -} - static int crt_proc_daos_unit_oid_t(crt_proc_t proc, crt_proc_op_t proc_op, daos_unit_oid_t *p) { From 97a7c346a41a4d9ebb617ce922550726ede151f8 Mon Sep 17 00:00:00 2001 From: Mohamad Chaarawi Date: Tue, 10 Oct 2023 10:10:29 -0500 Subject: [PATCH 5/8] DAOS-14391 test: bump build parameters for IL VM build test (#13084) Signed-off-by: Mohamad Chaarawi --- src/client/dfuse/il/int_posix.c | 2 +- src/tests/ftest/dfuse/daos_build.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/client/dfuse/il/int_posix.c b/src/client/dfuse/il/int_posix.c index 2afe06ee47a..e814031f6c7 100644 --- a/src/client/dfuse/il/int_posix.c +++ b/src/client/dfuse/il/int_posix.c @@ -799,7 +799,7 @@ child_hdlr(void) int rc; daos_dti_reset(); - ioil_eqh = DAOS_HDL_INVAL; + ioil_eqh = ioil_iog.iog_main_eqh = DAOS_HDL_INVAL; rc = daos_eq_create(&ioil_eqh); if (rc) DFUSE_LOG_WARNING("daos_eq_create() failed: "DF_RC, DP_RC(rc)); diff --git a/src/tests/ftest/dfuse/daos_build.py b/src/tests/ftest/dfuse/daos_build.py index d1afe8366b2..2b1f8af8381 100644 --- a/src/tests/ftest/dfuse/daos_build.py +++ b/src/tests/ftest/dfuse/daos_build.py @@ -138,8 +138,8 @@ def run_build_test(self, cache_mode, intercept=False, run_on_vms=False): remote_env = {} if run_on_vms: dfuse_namespace = dfuse_namespace = "/run/dfuse_vm/*" - build_jobs = 6 - remote_env['D_IL_MAX_EQ'] = '2' + build_jobs = 6 * 2 + remote_env['D_IL_MAX_EQ'] = '0' intercept_jobs = build_jobs if intercept: From 49a4580f475a909b507b29d8a7eacf93f7f03e9e Mon Sep 17 00:00:00 2001 From: Tom Nabarro Date: Tue, 10 Oct 2023 16:49:19 +0100 Subject: [PATCH 6/8] DAOS-14427 test: Bump system_ram_reserved for control/log_entry ftest (#13105) Signed-off-by: Tom Nabarro --- src/tests/ftest/control/log_entry.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/ftest/control/log_entry.yaml b/src/tests/ftest/control/log_entry.yaml index f209ef1ea54..8feead0292f 100644 --- a/src/tests/ftest/control/log_entry.yaml +++ b/src/tests/ftest/control/log_entry.yaml @@ -15,7 +15,7 @@ server_config: 0: class: ram scm_mount: /mnt/daos0 - system_ram_reserved: 1 + system_ram_reserved: 2 pool: size: 1G From afdcbdfeb7c2029164751e07d21f7d7fcbd8ffc8 Mon Sep 17 00:00:00 2001 From: Dalton Bohning Date: Tue, 10 Oct 2023 12:47:49 -0700 Subject: [PATCH 7/8] DAOS-14482 cq: ignore cyclic import in pydaos (#13151) Ignore cyclic import in pydaos since it would require reworking the import structure. Signed-off-by: Dalton Bohning --- src/client/dfuse/ops/read.c | 2 +- src/client/pydaos/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client/dfuse/ops/read.c b/src/client/dfuse/ops/read.c index 7a5bbedc0d2..ef119509eb8 100644 --- a/src/client/dfuse/ops/read.c +++ b/src/client/dfuse/ops/read.c @@ -83,7 +83,7 @@ dfuse_readahead_reply(fuse_req_t req, size_t len, off_t position, struct dfuse_o /* At this point there is a buffer of known length that contains the data, and a read * request. * If the attempted read is bigger than the data then it will be truncated. - * It the atttempted read is smaller than the buffer it will be met in full. + * It the attempted read is smaller than the buffer it will be met in full. */ if (position + len < oh->doh_readahead->dra_ev->de_readahead_len) { diff --git a/src/client/pydaos/__init__.py b/src/client/pydaos/__init__.py index cca8bbceae6..c8e4e1c3359 100644 --- a/src/client/pydaos/__init__.py +++ b/src/client/pydaos/__init__.py @@ -2,7 +2,7 @@ # # SPDX-License-Identifier: BSD-2-Clause-Patent # -# pylint: disable=consider-using-f-string +# pylint: disable=consider-using-f-string,cyclic-import """ PyDAOS Module allowing global access to the DAOS containers and objects. """ From bf5d36649384535d659a291f33a74b0da025ca4a Mon Sep 17 00:00:00 2001 From: Joseph Moore <26410038+jgmoore-or@users.noreply.github.com> Date: Wed, 11 Oct 2023 10:18:17 -0600 Subject: [PATCH 8/8] DAOS-14418 mercury: Retry am_send for UCX out of memory error. (#13139) - Update build.config to include latest mercury patch Signed-off-by: Joseph Moore --- utils/build.config | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/build.config b/utils/build.config index 3f695ac38cc..86e1624bede 100644 --- a/utils/build.config +++ b/utils/build.config @@ -15,3 +15,4 @@ UCX=v1.14.1 [patch_versions] spdk=https://github.com/spdk/spdk/commit/b0aba3fcd5aceceea530a702922153bc75664978.diff,https://github.com/spdk/spdk/commit/445a4c808badbad3942696ecf16fa60e8129a747.diff ofi=https://raw.githubusercontent.com/daos-stack/libfabric/06c3dce3f046f1869e87e840bd6fece81882e9af/prov_opx_u32_extended.patch,https://github.com/ofiwg/libfabric/commit/b65d274fd8ef3a37348ab0471020f047c64b0280.diff,https://github.com/ofiwg/libfabric/commit/15ea97c1bd2f5bcd594781cd311437fc2d26c835.diff,https://github.com/ofiwg/libfabric/commit/7c897a3ed934c2ff3820603bcb00255d63a5ca04.diff +mercury=https://raw.githubusercontent.com/daos-stack/mercury/857f1d5d2ca72d4c1b8d7be5e7fd26d6292b495f/na_ucx_am_send_retry.patch