Skip to content

Commit

Permalink
Rename thread pool executor and wait on tokens
Browse files Browse the repository at this point in the history
  • Loading branch information
Ali ihsan Cengiz committed Oct 27, 2023
1 parent eef3b2d commit 4758c9f
Show file tree
Hide file tree
Showing 5 changed files with 21 additions and 15 deletions.
2 changes: 1 addition & 1 deletion example/ExampleTasks.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
#include <memory>
#include <mutex>
#include "TaskListener.hpp"
#include "executor/ThreadPool.hpp"
#include "executor/ThreadPoolExecutor.hpp"

std::mutex m;

Expand Down
24 changes: 15 additions & 9 deletions example/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,17 +50,23 @@ struct DeviceEventListener : TaskListenerBase
int main()
{
DefaultSchedulerPolicy policy;
std::shared_ptr<ThreadPool> wg = std::make_shared<ThreadPool>(policy);
TaskChannel ch(wg);
DeviceFwUploadTaskListener listener(ch);
auto executor = std::make_shared<ThreadPoolExecutor>(policy);
TaskChannel ch(executor);
DeviceFwUploadTaskListener deviceListener(ch);
DeviceEventListener evListener(ch);
std::vector<TokenPtr> listOfToken;

listener.get_channel().push(std::make_shared<UploadTask>("my_server_url:/upload/image/", "firmware.bin"));
listener.get_channel().push(std::make_shared<DownloadTask>("my_server_url:/download/image/", "firmware.bin"));
evListener.get_channel().push(std::make_shared<OpenedEvent>());
evListener.get_channel().push(std::make_shared<ClosedEvent>());
evListener.get_channel().push(std::make_shared<DataReceivedEvent>());
evListener.get_channel().push(std::make_shared<DataTransmittedEvent>());
listOfToken.push_back(deviceListener.get_channel().push(std::make_shared<UploadTask>("my_server_url:/upload/image/", "firmware.bin")));
listOfToken.push_back(deviceListener.get_channel().push(std::make_shared<DownloadTask>("my_server_url:/download/image/", "firmware.bin")));
listOfToken.push_back(evListener.get_channel().push(std::make_shared<OpenedEvent>()));
listOfToken.push_back(evListener.get_channel().push(std::make_shared<ClosedEvent>()));
listOfToken.push_back(evListener.get_channel().push(std::make_shared<DataReceivedEvent>()));
listOfToken.push_back(evListener.get_channel().push(std::make_shared<DataTransmittedEvent>()));

for (size_t i = 0; i < listOfToken.size(); i++)
{
while (!listOfToken[i]->is_set()) { }
}

return 0;
}
2 changes: 1 addition & 1 deletion include/TaskDispatcher.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

#include <map>
#include <typeindex>
#include "executor/ThreadPool.hpp"
#include "executor/ThreadPoolExecutor.hpp"
#include "detail/TaskMapping.hpp"

struct TaskDispatcher
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -86,9 +86,9 @@ struct Worker

} // namespace detail

struct ThreadPool final : public executor::detail::ExecutorInterface
struct ThreadPoolExecutor final : public executor::detail::ExecutorInterface
{
ThreadPool(SchedulePolicy& policy, std::size_t worker_size = 4) : number_of_workers(worker_size), _policy(policy)
ThreadPoolExecutor(SchedulePolicy& policy, std::size_t worker_size = 4) : number_of_workers(worker_size), _policy(policy)
{
for (size_t i = 0; i < number_of_workers; i++)
{
Expand All @@ -103,7 +103,7 @@ struct ThreadPool final : public executor::detail::ExecutorInterface
}
_policy.set_worker_id_list(wids);
}
virtual ~ThreadPool() override
virtual ~ThreadPoolExecutor() override
{
}

Expand Down
2 changes: 1 addition & 1 deletion tests/task_tests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ struct DummyTaskListener : TaskListenerBase
TEST(TaskTestFixture, canDispatchTasks)
{
DefaultSchedulerPolicy policy;
std::shared_ptr<ThreadPool> wg = std::make_shared<ThreadPool>(policy);
std::shared_ptr<ThreadPoolExecutor> wg = std::make_shared<ThreadPoolExecutor>(policy);
TaskChannel ch(wg);
DummyTaskListener listener(ch);

Expand Down

0 comments on commit 4758c9f

Please sign in to comment.