From db366ac9c59021f14d776f7eb78cf8c772294563 Mon Sep 17 00:00:00 2001 From: jmachowinski Date: Wed, 3 Apr 2024 16:29:34 +0200 Subject: [PATCH] improved rcl_wait in the area of timeout computation and spurious wakeups (#1146) Added special handling for timers with a clock that has time override enabled. For these timer we should not compute a timeout, as the waitset is waken up by the associated guard condition. Before this change, the waitset could wait up, because of an expected ready timer, that was acutally not ready, as the time update to the ROS_TIME had not yet arrived. Signed-off-by: Janosch Machowinski --- rcl/include/rcl/timer.h | 26 ++++++- rcl/src/rcl/timer.c | 18 ++++- rcl/src/rcl/wait.c | 130 ++++++++++++++++++++++++++-------- rcl/test/rcl/test_wait.cpp | 141 +++++++++++++++++++++++++++++++++++++ 4 files changed, 285 insertions(+), 30 deletions(-) diff --git a/rcl/include/rcl/timer.h b/rcl/include/rcl/timer.h index b60ebd952..5aa10b4a1 100644 --- a/rcl/include/rcl/timer.h +++ b/rcl/include/rcl/timer.h @@ -319,7 +319,7 @@ rcl_timer_call_with_info(rcl_timer_t * timer, rcl_timer_call_info_t * call_info) RCL_PUBLIC RCL_WARN_UNUSED rcl_ret_t -rcl_timer_clock(rcl_timer_t * timer, rcl_clock_t ** clock); +rcl_timer_clock(const rcl_timer_t * timer, rcl_clock_t ** clock); /// Calculates whether or not the timer should be called. /** @@ -387,6 +387,30 @@ RCL_WARN_UNUSED rcl_ret_t rcl_timer_get_time_until_next_call(const rcl_timer_t * timer, int64_t * time_until_next_call); +/// Retrieve the time when the next call to rcl_timer_call() shall occur. +/** + *
+ * Attribute | Adherence + * ------------------ | ------------- + * Allocates Memory | No + * Thread-Safe | Yes + * Uses Atomics | Yes + * Lock-Free | Yes [1] + * [1] if `atomic_is_lock_free()` returns true for `atomic_int_least64_t` + * + * \param[in] timer the handle to the timer that is being queried + * \param[out] next_call_time the output variable for the result + * \return #RCL_RET_OK if the timer until next call was successfully calculated, or + * \return #RCL_RET_INVALID_ARGUMENT if any arguments are invalid, or + * \return #RCL_RET_TIMER_INVALID if the timer->impl is invalid, or + * \return #RCL_RET_TIMER_CANCELED if the timer is canceled, or + * \return #RCL_RET_ERROR an unspecified error occur. + */ +RCL_PUBLIC +RCL_WARN_UNUSED +rcl_ret_t +rcl_timer_get_next_call_time(const rcl_timer_t * timer, int64_t * next_call_time); + /// Retrieve the time since the previous call to rcl_timer_call() occurred. /** * This function calculates the time since the last call and copies it into diff --git a/rcl/src/rcl/timer.c b/rcl/src/rcl/timer.c index 2fe7479fa..cf9acda5d 100644 --- a/rcl/src/rcl/timer.c +++ b/rcl/src/rcl/timer.c @@ -254,7 +254,7 @@ rcl_timer_fini(rcl_timer_t * timer) RCL_PUBLIC RCL_WARN_UNUSED rcl_ret_t -rcl_timer_clock(rcl_timer_t * timer, rcl_clock_t ** clock) +rcl_timer_clock(const rcl_timer_t * timer, rcl_clock_t ** clock) { RCL_CHECK_ARGUMENT_FOR_NULL(timer, RCL_RET_INVALID_ARGUMENT); RCL_CHECK_ARGUMENT_FOR_NULL(clock, RCL_RET_INVALID_ARGUMENT); @@ -343,6 +343,22 @@ rcl_timer_is_ready(const rcl_timer_t * timer, bool * is_ready) return RCL_RET_OK; } +rcl_ret_t +rcl_timer_get_next_call_time(const rcl_timer_t * timer, int64_t * next_call_time) +{ + RCL_CHECK_ARGUMENT_FOR_NULL(timer, RCL_RET_INVALID_ARGUMENT); + RCL_CHECK_ARGUMENT_FOR_NULL(timer->impl, RCL_RET_TIMER_INVALID); + RCL_CHECK_ARGUMENT_FOR_NULL(next_call_time, RCL_RET_INVALID_ARGUMENT); + + if (rcutils_atomic_load_bool(&timer->impl->canceled)) { + return RCL_RET_TIMER_CANCELED; + } + + *next_call_time = + rcutils_atomic_load_int64_t(&timer->impl->next_call_time); + return RCL_RET_OK; +} + rcl_ret_t rcl_timer_get_time_until_next_call(const rcl_timer_t * timer, int64_t * time_until_next_call) { diff --git a/rcl/src/rcl/wait.c b/rcl/src/rcl/wait.c index 1f79a6a54..f7dea479e 100644 --- a/rcl/src/rcl/wait.c +++ b/rcl/src/rcl/wait.c @@ -541,54 +541,123 @@ rcl_wait(rcl_wait_set_t * wait_set, int64_t timeout) // By default, set the timer to block indefinitely if none of the below conditions are met. rmw_time_t * timeout_argument = NULL; rmw_time_t temporary_timeout_storage; + bool is_non_blocking = timeout == 0; - bool is_timer_timeout = false; - int64_t min_timeout = timeout > 0 ? timeout : INT64_MAX; - { // scope to prevent i from colliding below - uint64_t i = 0; - for (i = 0; i < wait_set->impl->timer_index; ++i) { - if (!wait_set->timers[i]) { + for (uint64_t t_idx = 0; t_idx < wait_set->impl->timer_index; ++t_idx) { + if (!wait_set->timers[t_idx]) { + continue; // Skip NULL timers. + } + rmw_guard_conditions_t * rmw_gcs = &(wait_set->impl->rmw_guard_conditions); + size_t gc_idx = wait_set->size_of_guard_conditions + t_idx; + if (NULL != rmw_gcs->guard_conditions[gc_idx]) { + // This timer has a guard condition, so move it to make a legal wait set. + rmw_gcs->guard_conditions[rmw_gcs->guard_condition_count] = + rmw_gcs->guard_conditions[gc_idx]; + ++(rmw_gcs->guard_condition_count); + } + } + + int64_t min_next_call_time[RCL_STEADY_TIME + 1]; + rcl_clock_t * clocks[RCL_STEADY_TIME + 1] = {0, 0, 0, 0}; + + min_next_call_time[RCL_ROS_TIME] = INT64_MAX; + min_next_call_time[RCL_SYSTEM_TIME] = INT64_MAX; + min_next_call_time[RCL_STEADY_TIME] = INT64_MAX; + + if (!is_non_blocking) { + for (size_t t_idx = 0; t_idx < wait_set->impl->timer_index; ++t_idx) { + if (!wait_set->timers[t_idx]) { continue; // Skip NULL timers. } - rmw_guard_conditions_t * rmw_gcs = &(wait_set->impl->rmw_guard_conditions); - size_t gc_idx = wait_set->size_of_guard_conditions + i; - if (NULL != rmw_gcs->guard_conditions[gc_idx]) { - // This timer has a guard condition, so move it to make a legal wait set. - rmw_gcs->guard_conditions[rmw_gcs->guard_condition_count] = - rmw_gcs->guard_conditions[gc_idx]; - ++(rmw_gcs->guard_condition_count); + + rcl_clock_t * clock; + if (rcl_timer_clock(wait_set->timers[t_idx], &clock) != RCL_RET_OK) { + // should never happen + return RCL_RET_ERROR; } - // use timer time to to set the rmw_wait timeout - // TODO(sloretz) fix spurious wake-ups on ROS_TIME timers with ROS_TIME enabled - int64_t timer_timeout = INT64_MAX; - rcl_ret_t ret = rcl_timer_get_time_until_next_call(wait_set->timers[i], &timer_timeout); + + if (clock->type == RCL_ROS_TIME) { + bool timer_override_active = false; + if (rcl_is_enabled_ros_time_override(clock, &timer_override_active) != RCL_RET_OK) { + // should never happen + return RCL_RET_ERROR; + } + + if (timer_override_active) { + // we need to check, it the timer is already ready + bool override_timer_is_ready = false; + if (rcl_timer_is_ready(wait_set->timers[t_idx], &override_timer_is_ready) != RCL_RET_OK) { + // should never happen + return RCL_RET_ERROR; + } + + if (override_timer_is_ready) { + // no need to search further for the timeout, we need to wake up instantly + is_non_blocking = true; + break; + } + + // if the timer override is active, there is no point in computing a wait time, + // as it might be on a total wrong time basis. In case this timer becomes ready, + // the guard_condition above will wake us. + continue; + } + } + + // get the time of the next call to the timer + int64_t next_call_time = INT64_MAX; + rcl_ret_t ret = rcl_timer_get_next_call_time(wait_set->timers[t_idx], &next_call_time); if (ret == RCL_RET_TIMER_CANCELED) { - wait_set->timers[i] = NULL; + wait_set->timers[t_idx] = NULL; continue; } if (ret != RCL_RET_OK) { return ret; // The rcl error state should already be set. } - if (timer_timeout < min_timeout) { - is_timer_timeout = true; - min_timeout = timer_timeout; + if (next_call_time < min_next_call_time[clock->type]) { + clocks[clock->type] = clock; + min_next_call_time[clock->type] = next_call_time; } } } - if (timeout == 0) { - // Then it is non-blocking, so set the temporary storage to 0, 0 and pass it. + if (is_non_blocking) { temporary_timeout_storage.sec = 0; temporary_timeout_storage.nsec = 0; timeout_argument = &temporary_timeout_storage; - } else if (timeout > 0 || is_timer_timeout) { + } else { + int64_t min_timeout = timeout > 0 ? timeout : INT64_MAX; + bool has_valid_timeout = timeout > 0; + + // determine the min timeout of all clocks + for (size_t i = RCL_ROS_TIME; i <= RCL_STEADY_TIME; i++) { + if (clocks[i] == 0) { + continue; + } + + int64_t cur_time; + rmw_ret_t ret = rcl_clock_get_now(clocks[i], &cur_time); + if (ret != RCL_RET_OK) { + return ret; // The rcl error state should already be set. + } + + int64_t timer_timeout = min_next_call_time[i] - cur_time; + + if (timer_timeout <= min_timeout) { + has_valid_timeout = true; + min_timeout = timer_timeout; + } + } + // If min_timeout was negative, we need to wake up immediately. if (min_timeout < 0) { min_timeout = 0; } - temporary_timeout_storage.sec = RCL_NS_TO_S(min_timeout); - temporary_timeout_storage.nsec = min_timeout % 1000000000; - timeout_argument = &temporary_timeout_storage; + if (has_valid_timeout) { + temporary_timeout_storage.sec = RCL_NS_TO_S(min_timeout); + temporary_timeout_storage.nsec = min_timeout % 1000000000; + timeout_argument = &temporary_timeout_storage; + } } // Wait. @@ -604,6 +673,8 @@ rcl_wait(rcl_wait_set_t * wait_set, int64_t timeout) // Items that are not ready will have been set to NULL by rmw_wait. // We now update our handles accordingly. + bool timer_is_ready = false; + // Check for ready timers // and set not ready timers (which includes canceled timers) to NULL. size_t i; @@ -611,6 +682,7 @@ rcl_wait(rcl_wait_set_t * wait_set, int64_t timeout) if (!wait_set->timers[i]) { continue; } + bool is_ready = false; rcl_ret_t ret = rcl_timer_is_ready(wait_set->timers[i], &is_ready); if (ret != RCL_RET_OK) { @@ -618,6 +690,8 @@ rcl_wait(rcl_wait_set_t * wait_set, int64_t timeout) } if (!is_ready) { wait_set->timers[i] = NULL; + } else { + timer_is_ready = true; } } // Check for timeout, return RCL_RET_TIMEOUT only if it wasn't a timer. @@ -661,7 +735,7 @@ rcl_wait(rcl_wait_set_t * wait_set, int64_t timeout) } } - if (RMW_RET_TIMEOUT == ret && !is_timer_timeout) { + if (RMW_RET_TIMEOUT == ret && !timer_is_ready) { return RCL_RET_TIMEOUT; } return RCL_RET_OK; diff --git a/rcl/test/rcl/test_wait.cpp b/rcl/test/rcl/test_wait.cpp index 626d047dc..b7c44840b 100644 --- a/rcl/test/rcl/test_wait.cpp +++ b/rcl/test/rcl/test_wait.cpp @@ -321,6 +321,147 @@ TEST_F(WaitSetTestFixture, zero_timeout_overrun_timer) { EXPECT_LE(diff, RCL_MS_TO_NS(50)); } +// Test rcl_wait with a timeout value and an overrun timer +TEST_F(WaitSetTestFixture, no_wakeup_on_override_timer) { + rcl_wait_set_t wait_set = rcl_get_zero_initialized_wait_set(); + rcl_ret_t ret = + rcl_wait_set_init(&wait_set, 0, 0, 1, 0, 0, 0, context_ptr, rcl_get_default_allocator()); + EXPECT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str; + OSRF_TESTING_TOOLS_CPP_SCOPE_EXIT( + { + ret = rcl_wait_set_fini(&wait_set); + EXPECT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str; + }); + rcl_clock_t clock; + rcl_allocator_t allocator = rcl_get_default_allocator(); + ret = rcl_clock_init(RCL_ROS_TIME, &clock, &allocator); + OSRF_TESTING_TOOLS_CPP_SCOPE_EXIT( + { + ret = rcl_clock_fini(&clock); + EXPECT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str; + }); + ASSERT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str; + + ret = rcl_enable_ros_time_override(&clock); + ASSERT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str; + + rcl_timer_t timer = rcl_get_zero_initialized_timer(); + ret = rcl_timer_init2( + &timer, &clock, this->context_ptr, RCL_MS_TO_NS(10), nullptr, rcl_get_default_allocator(), + true); + EXPECT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str; + OSRF_TESTING_TOOLS_CPP_SCOPE_EXIT( + { + ret = rcl_timer_fini(&timer); + EXPECT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str; + }); + ret = rcl_wait_set_add_timer(&wait_set, &timer, NULL); + EXPECT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str; + + // Time spent during wait should be negligible, definitely less than the given timeout + std::chrono::steady_clock::time_point before_sc = std::chrono::steady_clock::now(); + ret = rcl_wait(&wait_set, RCL_MS_TO_NS(100)); + std::chrono::steady_clock::time_point after_sc = std::chrono::steady_clock::now(); + // We don't expect a timeout here (since the guard condition had already been triggered) + ASSERT_EQ(RCL_RET_TIMEOUT, ret) << rcl_get_error_string().str; + int64_t diff = std::chrono::duration_cast(after_sc - before_sc).count(); + EXPECT_GE(diff, RCL_MS_TO_NS(100)); +} + +// Test rcl_wait with a timeout value and an overrun timer +TEST_F(WaitSetTestFixture, wakeup_on_override_timer) { + rcl_wait_set_t wait_set = rcl_get_zero_initialized_wait_set(); + rcl_ret_t ret = + rcl_wait_set_init(&wait_set, 0, 0, 1, 0, 0, 0, context_ptr, rcl_get_default_allocator()); + EXPECT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str; + OSRF_TESTING_TOOLS_CPP_SCOPE_EXIT( + { + ret = rcl_wait_set_fini(&wait_set); + EXPECT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str; + }); + rcl_clock_t clock; + rcl_allocator_t allocator = rcl_get_default_allocator(); + ret = rcl_clock_init(RCL_ROS_TIME, &clock, &allocator); + OSRF_TESTING_TOOLS_CPP_SCOPE_EXIT( + { + ret = rcl_clock_fini(&clock); + EXPECT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str; + }); + ASSERT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str; + + ret = rcl_enable_ros_time_override(&clock); + ASSERT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str; + + rcl_timer_t timer = rcl_get_zero_initialized_timer(); + ret = rcl_timer_init2( + &timer, &clock, this->context_ptr, RCL_MS_TO_NS(10), nullptr, rcl_get_default_allocator(), + true); + EXPECT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str; + OSRF_TESTING_TOOLS_CPP_SCOPE_EXIT( + { + ret = rcl_timer_fini(&timer); + EXPECT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str; + }); + ret = rcl_wait_set_add_timer(&wait_set, &timer, NULL); + EXPECT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str; + + // Time spent during wait should be negligible, definitely less than the given timeout + std::chrono::steady_clock::time_point before_sc = std::chrono::steady_clock::now(); + + rcl_ret_t wait_ret = RCL_RET_ERROR; + std::atomic_bool wait_done = false; + std::thread t([&wait_ret, &wait_set, &wait_done]() { + wait_ret = rcl_wait(&wait_set, RCL_MS_TO_NS(100)); + wait_done = true; + }); + + ret = rcl_set_ros_time_override(&clock, RCL_MS_TO_NS(5)); + EXPECT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str; + + std::this_thread::sleep_for(std::chrono::milliseconds(15)); + EXPECT_EQ(wait_done, false) << "Error, wait terminated to early"; + + ret = rcl_set_ros_time_override(&clock, RCL_MS_TO_NS(15)); + EXPECT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str; + + std::this_thread::sleep_for(std::chrono::milliseconds(5)); + + EXPECT_EQ(wait_done, true) << "Error, wait waken up by time jump"; + + EXPECT_NE(wait_ret, RCL_RET_TIMEOUT); + + t.join(); + + wait_done = false; + wait_ret = RCL_RET_ERROR; + + ret = rcl_wait_set_clear(&wait_set); + EXPECT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str; + ret = rcl_wait_set_add_timer(&wait_set, &timer, NULL); + EXPECT_EQ(RCL_RET_OK, ret) << rcl_get_error_string().str; + + // now all guard conditions are cleared, if we wait again, + // we should still wake up instantly as next call time was not advance yet + // and therefore the timer should still be ready + t = std::thread( + [&wait_ret, &wait_set, &wait_done]() { + wait_ret = rcl_wait(&wait_set, RCL_MS_TO_NS(100)); + wait_done = true; + }); + + std::this_thread::sleep_for(std::chrono::milliseconds(5)); + + EXPECT_EQ(wait_done, true) << "Error, wait waken up by time jump"; + + EXPECT_NE(wait_ret, RCL_RET_TIMEOUT); + + t.join(); + + std::chrono::steady_clock::time_point after_sc = std::chrono::steady_clock::now(); + int64_t diff = std::chrono::duration_cast(after_sc - before_sc).count(); + EXPECT_LT(diff, RCL_MS_TO_NS(100)); +} + // Check that a canceled timer doesn't wake up rcl_wait TEST_F(WaitSetTestFixture, canceled_timer) { rcl_wait_set_t wait_set = rcl_get_zero_initialized_wait_set();