Skip to content

Commit

Permalink
Merge branch 'main' into fix/make_runnable_zenoh_router_in_launch
Browse files Browse the repository at this point in the history
  • Loading branch information
CihatAltiparmak authored Aug 15, 2024
2 parents 6cd5673 + a6e71c4 commit 6597fa7
Show file tree
Hide file tree
Showing 21 changed files with 691 additions and 24 deletions.
2 changes: 2 additions & 0 deletions .github/workflows/create_docker_image.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ jobs:
build_and_push_docker_image:
name: build
runs-on: ubuntu-latest
strategy:
fail-fast: false
permissions:
contents: read
packages: write
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/industrial_ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ jobs:
industrial_ci:
name: ROS ${{ matrix.ROS_DISTRO }} (${{ matrix.ROS_REPO }})
strategy:
fail-fast: false
matrix:
ROS_DISTRO: [rolling]
ROS_REPO: [testing, main]
Expand Down
66 changes: 56 additions & 10 deletions .github/workflows/run_benchmarks.yml
Original file line number Diff line number Diff line change
@@ -1,36 +1,82 @@
name: Run MoveIt Middleware Benchmarks and Push Results
name: Run MoveIt Middleware Benchmarks

on: [push]
on: [push, pull_request, workflow_dispatch]

jobs:
run_middleware_benchmarks:
name: run_benchmarks
run_all_benchmarks:
name: run_all_benchmarks
runs-on: ubuntu-latest
strategy:
fail-fast: false
permissions:
contents: write
deployments: write
container:
image: ghcr.io/cihataltiparmak/moveit_middleware_benchmark:latest
steps:
- name: run perception benchmark
- name: run benchmarks for rmw_fastrtps
run: |
cd /ws
cd ${ROS_UNDERLAY}/..
. /opt/ros/rolling/setup.sh
. install/setup.sh
ros2 launch moveit_middleware_benchmark scenario_perception_pipeline_benchmark.launch.py
sh src/moveit_middleware_benchmark/scripts/run_all_benchmarks.sh -i ./src/moveit_middleware_benchmark/middleware_configurations/rmw_fastrtps/config_rmw_fastrtps.sh -d /benchmark_results -m rmw_fastrtps_cpp
- name: run benchmarks for rmw_cyclonedds
run: |
cd ${ROS_UNDERLAY}/..
. /opt/ros/rolling/setup.sh
. install/setup.sh
sh src/moveit_middleware_benchmark/scripts/run_all_benchmarks.sh -i ./src/moveit_middleware_benchmark/middleware_configurations/rmw_cyclonedds/config_rmw_cyclonedds.sh -d /benchmark_results -m rmw_cyclonedds_cpp
- name: clone repo
uses: actions/checkout@v3
- name: add to safe directory
run: |
git config --global --add safe.directory /__w/moveit_middleware_benchmark/moveit_middleware_benchmark
- name: push perception benchmark results to github pages
- name: push perception benchmark results for rmw_fastrtps to github pages
uses: benchmark-action/github-action-benchmark@v1
with:
name: Perception Pipeline Benchmark
tool: 'googlecpp'
output-file-path: /benchmark_results/scenario_perception_pipeline/rmw_fastrtps_cpp.json
# Access token to deploy GitHub Pages branch
github-token: ${{ secrets.GITHUB_TOKEN }}
# Push and deploy GitHub pages branch automatically
auto-push: true
gh-pages-branch: "gh-pages"
benchmark-data-dir-path: "rmw_fastrtps"
- name: push simple service client benchmark results for rmw_fastrtps to github pages
uses: benchmark-action/github-action-benchmark@v1
with:
name: Basic Service Client Benchmark
tool: 'googlecpp'
output-file-path: /benchmark_results/scenario_basic_service_client/rmw_fastrtps_cpp.json
# Access token to deploy GitHub Pages branch
github-token: ${{ secrets.GITHUB_TOKEN }}
# Push and deploy GitHub pages branch automatically
auto-push: true
gh-pages-branch: "gh-pages"
benchmark-data-dir-path: "rmw_fastrtps"

- name: push perception benchmark results for rmw_cyclonedds to github pages
uses: benchmark-action/github-action-benchmark@v1
with:
name: Perception Pipeline Benchmark
tool: 'googlecpp'
output-file-path: /benchmark_results/scenario_perception_pipeline/rmw_cyclonedds_cpp.json
# Access token to deploy GitHub Pages branch
github-token: ${{ secrets.GITHUB_TOKEN }}
# Push and deploy GitHub pages branch automatically
auto-push: true
gh-pages-branch: "gh-pages"
benchmark-data-dir-path: "rmw_cyclonedds"
- name: push simple service client benchmark results for rme_cyclonedds to github pages
uses: benchmark-action/github-action-benchmark@v1
with:
name: Movet Middleware Benchmark Project Perception Pipeline Benchmark
name: Basic Service Client Benchmark
tool: 'googlecpp'
output-file-path: /ws/middleware_benchmark_results.json
output-file-path: /benchmark_results/scenario_basic_service_client/rmw_cyclonedds_cpp.json
# Access token to deploy GitHub Pages branch
github-token: ${{ secrets.GITHUB_TOKEN }}
# Push and deploy GitHub pages branch automatically
auto-push: true
gh-pages-branch: "gh-pages"
benchmark-data-dir-path: "rmw_cyclonedds"
2 changes: 2 additions & 0 deletions .github/workflows/test_moveit_middleware_benchmark_action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ jobs:
moveit_middleware_benchmark_github_action_test:
name: build
runs-on: ubuntu-latest
strategy:
fail-fast: false
steps:
- uses: actions/checkout@v2
- uses: ./
28 changes: 27 additions & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ find_package(dynmsg REQUIRED)
find_package(nav_msgs REQUIRED)
find_package(ament_index_cpp REQUIRED)
find_package(yaml-cpp REQUIRED)
find_package(std_msgs REQUIRED)
find_package(example_interfaces REQUIRED)

add_executable(
scenario_perception_pipeline_benchmark_main
Expand All @@ -28,7 +30,8 @@ ament_target_dependencies(
"benchmark"
"dynmsg"
"nav_msgs"
"yaml-cpp")
"yaml-cpp"
"example_interfaces")

target_include_directories(
scenario_perception_pipeline_benchmark_main
Expand All @@ -38,7 +41,30 @@ target_include_directories(
target_link_libraries(scenario_perception_pipeline_benchmark_main
PUBLIC "benchmark::benchmark" ${YAML_CPP_LIBRARIES})

add_executable(
scenario_basic_service_client_benchmark_main
src/scenario_basic_service_client_benchmark_main.cpp
src/scenarios/scenario_basic_service_client.cpp)

ament_target_dependencies(
scenario_basic_service_client_benchmark_main
PUBLIC
"moveit_ros_planning_interface"
"rclcpp"
"benchmark"
"std_msgs"
"example_interfaces")

target_include_directories(
scenario_basic_service_client_benchmark_main
PUBLIC $<INSTALL_INTERFACE:include>
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>)

target_link_libraries(scenario_basic_service_client_benchmark_main
PUBLIC "benchmark::benchmark" ${YAML_CPP_LIBRARIES})

install(TARGETS scenario_perception_pipeline_benchmark_main
scenario_basic_service_client_benchmark_main
DESTINATION lib/moveit_middleware_benchmark)

install(DIRECTORY launch config DESTINATION share/moveit_middleware_benchmark)
Expand Down
15 changes: 6 additions & 9 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -1,25 +1,22 @@
FROM ros:rolling
FROM moveit/moveit2:rolling-source

RUN apt-get update && \
apt install wget -y

RUN mkdir ws/src -p

RUN . /opt/ros/rolling/setup.sh && \
cd ws/src && \
git clone https://github.com/CihatAltiparmak/moveit_middleware_benchmark.git -b development && \
cd ${ROS_UNDERLAY}/../src && \
git clone https://github.com/CihatAltiparmak/moveit_middleware_benchmark.git && \
vcs import < moveit_middleware_benchmark/moveit_middleware_benchmark.repos --recursive

RUN cd ws/src && \
# git clone https://github.com/ros2/rmw_zenoh.git && \
git clone https://github.com/ros2/rmw_cyclonedds.git

RUN . /opt/ros/rolling/setup.sh && \
cd ws && \
cd ${ROS_UNDERLAY}/.. && \
rosdep update --rosdistro=$ROS_DISTRO && \
apt-get update && \
apt upgrade -y && \
rosdep install --from-paths src --ignore-src -r -y

RUN . /opt/ros/rolling/setup.sh && \
cd ws && \
cd ${ROS_UNDERLAY}/.. && \
colcon build --mixin release --packages-skip test_dynmsg dynmsg_demo
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ Under Construction
This middleware benchmark tool aims to measure middleware effects on various scenarios like perception pipeline in MoveIt. There is a following list to see scenarios and how to measure the effects of middleware.

* [Perception Pipeline](./docs/scenarios/perception_pipeline_benchmark.md)
* [Basic Service Client Works](./docs/scenarios/basic_service_client_benchmark.md)

## Getting Started

Expand Down
87 changes: 87 additions & 0 deletions docs/how_to_run.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,84 @@
## Run All Benchmarks
Before running the following benchmarks, please read and apply the instructions outlined in the [How To Install](how_to_install.md) section for providing the necessary requirements.

To run all benchmarks, just select your middleware implementation and go ahead with `run_all_benchmarks.sh` bash script. This command initially will run the script you chose. In this example, it's used the default config scripts which the necessary middleware configuration is applied. This property serves the functionality for the users to apply some custom configurations freely.

```shell
# go to workspace this repository is built
cd ws
source /opt/ros/rolling/setup.sh
source install/setup.sh
# go to moveit_middleware_benchmark package's directory
cd src/moveit_middleware_benchmark
# conduct all benchmarks
sh src/moveit_middleware_benchmark/scripts/run_all_benchmarks.sh -i ./src/moveit_middleware_benchmark/middleware_configurations/rmw_cyclonedds/config.sh -d /benchmark_results
```

Let's explain all operations at `run_all_benchmarks.sh`.

#### selection of initial script
```
-i ./src/moveit_middleware_benchmark/middleware_configurations/rmw_cyclonedds/config.sh
```

This argument is for selecting the initial scripts to be run. These initial scripts are used for configuring middleware-specific settings for improved performance. For example, you can use the initial script to configure TCP settings for rmw_zenoh like below.

```shell
echo "The configurations for rmw_zenoh_cpp is started!"
export RMW_IMPLEMENTATION=rmw_zenoh_cpp
sudo sysctl -w "net.ipv4.tcp_rmem=4096 4096 4096"
sudo sysctl -w "net.ipv4.tcp_wmem=4096 4096 4096"
sudo sysctl -w "net.ipv4.tcp_mem=4096 4096 4096"
echo "The configurations for rmw_zenoh_cpp is finished!"
```

#### directory selection to save benchmark results
```
-d /benchmark_results
```

This argument indicates where the benchmark results are saved. For scenario_perception_benchmark and scenario_basic_service_client, the results of these scenarios are written in the shape of the below directory tree. It should be added that `run_all_benchmarks.sh` script uses json format to save the benchmark results.

```
benchmark_results/
├── scenario_basic_service_client
│   ├── rmw_cyclonedds_cpp.json
│   ├── rmw_fastrtps_cpp.json
│   └── rmw_zenoh_cpp.json
└── scenario_perception_pipeline
├── rmw_cyclonedds_cpp.json
├── rmw_fastrtps_cpp.json
└── rmw_zenoh_cpp.json
```

## Plot Visualization of Benchmark Results

After running `run_all_benchmarks.sh`, you can also visualize the box plots of benchmark results. Suppose that you have some benchmark results stored in below directory and the directory named `benchmark_results` is located in `ws` directory which this repository is built.


```
benchmark_results/
├── scenario_basic_service_client
│   ├── rmw_cyclonedds_cpp.json
│   ├── rmw_fastrtps_cpp.json
│   └── rmw_zenoh_cpp.json
└── scenario_perception_pipeline
├── rmw_cyclonedds_cpp.json
├── rmw_fastrtps_cpp.json
└── rmw_zenoh_cpp.json
```

Just give the directory of benchmark results as argument and then visualize the benchmark results in plot.
```shell
cd ws
python3 src/moveit_middleware_benchmark/scripts/box_plot_visualizer.py benchmark_results

```

**NOTE THAT THE BELOW PICTURE DOESN'T PRESENT REAL RESULTS. IT'S JUST FOR SHOWCASE**

![](./pictures/box_plot_example.png)

## Scenarios

### [Perception Pipeline Benchmark](scenarios/perception_pipeline_benchmark.md)
Expand All @@ -7,3 +88,9 @@ This benchmark measures the elapsed time by which the determined path is sent fo
Firstly, `node` and `move_group_interface`in SetUp are created before each benchmark. `poses` inside `nav_msgs/msg/Path` is sent one by one to plan trajectory for robot. If planning is failed, only `failure_rate` is increased. If planning is successful, the trajectory_plan which move_group_server plan is sent via `move_group_interface` to start the execution of this planned trajectory. Then `success_number` is increased.

For instance, the selected test_case includes 20 goal poses. These 20 goals is sent one by one to `move_group_server`. If the 5 goal poses out of 20 goal poses are failed, `success_number` equals 15 and `failure_number` equals 5. `success_number` and `failure_number` is important to observe the middlewares' behaviours.

### [Basic Service Client Works Benchmark](scenarios/basic_service_client_benchmark.md)

This benchmark measures the total elapsed time based on the time interval between sending the request by the client to the server and getting the response of server. This benchmark utilizes the [ros2/demos](https://github.com/ros2/demos) packages' [example server](https://github.com/ros2/demos/blob/rolling/demo_nodes_cpp/src/services/add_two_ints_server.cpp).

In this benchmark scenario, the benchmarker node only has client interface. The necessary server for this client is run in [the launch file of this benchmark scenario](../launch/scenario_basic_service_client_benchmark.launch.py). Client sends a request to server and waits for the response from server. Client sends second request to server once the client receives response of first request from client. This actions are repeated `sending_request_number` times. You can configure this `sending_request_number` parameter in [this scenario's launch file]((../launch/scenario_basic_service_client_benchmark.launch.py)).
Binary file added docs/pictures/box_plot_example.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
38 changes: 38 additions & 0 deletions docs/scenarios/basic_service_client_benchmark.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
## How To Run Basic Service Client Benchmark

Firstly, source your ros version. It's suggested to test with rolling version of ROS 2.

For instance, to test with rmw_zenoh, start to zenoh router using following command in the terminal.
```sh
# go to your workspace
cd ws
# Be sure that ros2 daemon is killed.
pkill -9 -f ros && ros2 daemon stop
# Then start zenoh router
source /opt/ros/rolling/setup.bash
source install/setup.bash
export RMW_IMPLEMENTATION=rmw_zenoh_cpp
ros2 run rmw_zenoh_cpp rmw_zenohd
```

Select your rmw_implementation as `rmw_zenoh_cpp` and run the perception benchmark launch file in the another terminal.
```sh
# go to your workspace
cd ws
source /opt/ros/rolling/setup.bash
source install/setup.bash
export RMW_IMPLEMENTATION=rmw_zenoh_cpp # select your rmw_implementation to benchmark
ros2 launch moveit_middleware_benchmark scenario_basic_service_client_benchmark.launch.py
```

It will be defaultly benchmarked with 6 repetitions. It will be created the json file named `middleware_benchmark_results.json` for benchmarking results after finishing benchmark code execution. You can see the benchmark results in more detail inside this json file.

If you want to customize your benchmark arguments or select different test case, you can use below command.

```shell
ros2 launch moveit_middleware_benchmark scenario_basic_service_client_benchmark.launch.py benchmark_command_args:="--benchmark_out=middleware_benchmark_results.json --benchmark_out_format=json --benchmark_repetitions=1" sending_request_number:=50000
```

## How to benchmark the basic service client execution

The main idea here is to send some client request in `example_interfaces::srv::AddTwoInts` format to `add_two_ints_server` which is one of the examples of [ros2/demos package](https://github.com/ros2/demos) and then to measure the elapsed time by waiting response from server. This logic helps us to measure elapsed time between sending request and receiving response.
Loading

0 comments on commit 6597fa7

Please sign in to comment.