From cda1436bdbaa6e65efb2400ddbe11875773dc738 Mon Sep 17 00:00:00 2001 From: Ben Clayton Date: Fri, 5 Jun 2020 20:44:16 +0100 Subject: [PATCH] HACK: Log the affinity masks. Something isn't working with the kokoro bot. Maybe this will tell us why. --- src/scheduler_test.cpp | 44 +++++++++++++++++++++--------------------- src/thread.cpp | 22 ++++++++++++++++++++- 2 files changed, 43 insertions(+), 23 deletions(-) diff --git a/src/scheduler_test.cpp b/src/scheduler_test.cpp index 3bf8849..e3ae260 100644 --- a/src/scheduler_test.cpp +++ b/src/scheduler_test.cpp @@ -143,28 +143,28 @@ TEST_P(WithBoundScheduler, FibersResumeOnSameStdThread) { } TEST_F(WithoutBoundScheduler, TasksOnlyScheduledOnWorkerThreads) { - marl::Scheduler::Config cfg; - cfg.setWorkerThreadCount(8); - - auto scheduler = std::unique_ptr(new marl::Scheduler(cfg)); - scheduler->bind(); - defer(scheduler->unbind()); - - std::mutex mutex; - marl::containers::unordered_set threads(allocator); - marl::WaitGroup wg; - for (int i = 0; i < 10000; i++) { - wg.add(1); - marl::schedule([&mutex, &threads, wg] { - defer(wg.done()); - std::unique_lock lock(mutex); - threads.emplace(std::this_thread::get_id()); - }); - } - wg.wait(); - - ASSERT_LE(threads.size(), 8U); - ASSERT_EQ(threads.count(std::this_thread::get_id()), 0U); +printf("**%d\n", __LINE__); marl::Scheduler::Config cfg; +printf("**%d\n", __LINE__); cfg.setWorkerThreadCount(8); +printf("**%d\n", __LINE__); +printf("**%d\n", __LINE__); auto scheduler = std::unique_ptr(new marl::Scheduler(cfg)); +printf("**%d\n", __LINE__); scheduler->bind(); +printf("**%d\n", __LINE__); defer(scheduler->unbind()); +printf("**%d\n", __LINE__); +printf("**%d\n", __LINE__); std::mutex mutex; +printf("**%d\n", __LINE__); marl::containers::unordered_set threads(allocator); +printf("**%d\n", __LINE__); marl::WaitGroup wg; +printf("**%d\n", __LINE__); for (int i = 0; i < 10; i++) { +printf("**%d\n", __LINE__); wg.add(1); +printf("**%d\n", __LINE__); marl::schedule([&mutex, &threads, wg] { +printf("**%d\n", __LINE__); defer(wg.done()); +printf("**%d\n", __LINE__); std::unique_lock lock(mutex); +printf("**%d\n", __LINE__); threads.emplace(std::this_thread::get_id()); +printf("**%d\n", __LINE__); }); +printf("**%d\n", __LINE__); } +printf("**%d\n", __LINE__); wg.wait(); +printf("**%d\n", __LINE__); +printf("**%d\n", __LINE__); ASSERT_LE(threads.size(), 8U); +printf("**%d\n", __LINE__); ASSERT_EQ(threads.count(std::this_thread::get_id()), 0U); } // Test that a marl::Scheduler *with dedicated worker threads* can be used diff --git a/src/thread.cpp b/src/thread.cpp index 80ce77f..5c97b9c 100644 --- a/src/thread.cpp +++ b/src/thread.cpp @@ -103,6 +103,12 @@ const ProcessorGroups& getProcessorGroups() { } } } + printf("*** Processor groups ***\n"); + for (size_t i = 0; i < out.count; i++) { + printf("** Group %d\n", (int)i); + printf(" Count: %d\n", (int)out.groups[i].count); + printf(" Mask: 0x%8llx\n", (uint64_t)out.groups[i].affinity); + } return out; }(); return groups; @@ -138,13 +144,19 @@ Thread::Affinity Thread::Affinity::all( const auto& group = groups.groups[groupIdx]; Core core; core.windows.group = static_cast(groupIdx); - for (size_t coreIdx = 0; coreIdx < sizeof(KAFFINITY) * 8; coreIdx++) { + for (unsigned int coreIdx = 0; coreIdx < group.count; coreIdx++) { if ((group.affinity >> coreIdx) & 1) { core.windows.index = static_cast(coreIdx); affinity.cores.emplace_back(std::move(core)); } } } + + printf("*** all() - count: %d***\n", (int)affinity.cores.size()); + for (auto core : affinity.cores) { + printf("- g:%d i:%d\n", (int)core.windows.group, (int)core.windows.index); + } + #elif defined(__linux__) auto thread = pthread_self(); cpu_set_t cpuset; @@ -199,6 +211,14 @@ std::shared_ptr Thread::Affinity::Policy::anyOf( out.cores.push_back(core); } } + + printf("*** anyOf(threadId: %d, affinty: %d) ***\n", (int)threadId, + (int)affinity.cores.size()); + for (auto core : out.cores) { + printf("- g:%d i:%d\n", (int)core.windows.group, + (int)core.windows.index); + } + return out; #else return Affinity(affinity, allocator);