Skip to content
Snippets Groups Projects
Commit e57a3c82 authored by 戈振鹏's avatar 戈振鹏
Browse files

use bool arg use_internal_executor instead of use_default_callback_group


Signed-off-by: default avatarzhenpeng ge <zhenpeng.ge@qq.com>
parent 881dbb01
No related tags found
No related merge requests found
......@@ -16,7 +16,7 @@
#define NAV2_UTIL__SERVICE_CLIENT_HPP_
#include <string>
#include <memory>
#include "rclcpp/rclcpp.hpp"
namespace nav2_util
......@@ -34,32 +34,31 @@ public:
* @brief A constructor
* @param service_name name of the service to call
* @param provided_node Node to create the service client off of
* @param use_default_callback_group Whether to use default callback group. when true,
* ServiceClient can't be invoked in ros callback.
* @param use_internal_executor Whether to create and use SingleThreadedExecutor, defalut True.
*/
explicit ServiceClient(
const std::string & service_name,
const rclcpp::Node::SharedPtr & provided_node,
bool use_default_callback_group = false)
bool use_internal_executor = true)
: service_name_(service_name), node_(provided_node),
use_default_callback_group_(use_default_callback_group)
use_internal_executor_(use_internal_executor)
{
if (use_default_callback_group_) {
client_ = node_->create_client<ServiceT>(
service_name,
rmw_qos_profile_services_default);
} else {
if (use_internal_executor_) {
callback_group_ = node_->create_callback_group(
rclcpp::CallbackGroupType::MutuallyExclusive,
false);
callback_group_executor_.add_callback_group(
callback_group_executor_ = std::make_unique<rclcpp::executors::SingleThreadedExecutor>();
callback_group_executor_->add_callback_group(
callback_group_,
node_->get_node_base_interface());
client_ = node_->create_client<ServiceT>(
service_name,
rmw_qos_profile_services_default,
callback_group_);
} else {
callback_group_ = node_->create_callback_group(
rclcpp::CallbackGroupType::MutuallyExclusive);
}
client_ = node_->create_client<ServiceT>(
service_name,
rmw_qos_profile_services_default,
callback_group_);
}
using RequestType = typename ServiceT::Request;
......@@ -91,7 +90,10 @@ public:
auto future_result = client_->async_send_request(request);
bool success = true;
if (use_default_callback_group_) {
if (use_internal_executor_) {
auto status = callback_group_executor_->spin_until_future_complete(future_result, timeout);
success = (status == rclcpp::FutureReturnCode::SUCCESS);
} else {
std::future_status status;
if (timeout < std::chrono::nanoseconds::zero()) {
// Block forever until future completed
......@@ -102,9 +104,6 @@ public:
status = future_result.wait_for(timeout);
}
success = (status == std::future_status::ready);
} else {
auto status = callback_group_executor_.spin_until_future_complete(future_result, timeout);
success = (status == rclcpp::FutureReturnCode::SUCCESS);
}
if (!success) {
throw std::runtime_error(service_name_ + " service client: async_send_request failed");
......@@ -139,15 +138,15 @@ public:
auto future_result = client_->async_send_request(request);
bool success = true;
if (use_default_callback_group_) {
if (use_internal_executor_) {
auto status = callback_group_executor_->spin_until_future_complete(future_result);
success = (status == rclcpp::FutureReturnCode::SUCCESS);
} else {
std::future_status status;
// Block forever until future completed
do {
status = future_result.wait_for(std::chrono::seconds(1));
} while (status != std::future_status::ready);
} else {
auto status = callback_group_executor_.spin_until_future_complete(future_result);
success = (status == rclcpp::FutureReturnCode::SUCCESS);
}
if (!success) {
......@@ -172,8 +171,8 @@ protected:
std::string service_name_;
rclcpp::Node::SharedPtr node_;
rclcpp::CallbackGroup::SharedPtr callback_group_;
rclcpp::executors::SingleThreadedExecutor callback_group_executor_;
bool use_default_callback_group_;
std::unique_ptr<rclcpp::executors::SingleThreadedExecutor> callback_group_executor_;
bool use_internal_executor_;
typename rclcpp::Client<ServiceT>::SharedPtr client_;
};
......
......@@ -87,7 +87,50 @@ TEST(ServiceClient, can_ServiceClient_invoke_in_callback)
ASSERT_EQ(a, 1);
}
TEST(ServiceClient, can_ServiceClient_use_default_callback_group)
TEST(ServiceClient, can_ServiceClient_not_use_internal_executor1)
{
rclcpp::init(0, nullptr);
int a = 0;
auto service_node = rclcpp::Node::make_shared("service_node");
auto service = service_node->create_service<std_srvs::srv::Empty>(
"empty_srv",
[&a](std_srvs::srv::Empty::Request::SharedPtr, std_srvs::srv::Empty::Response::SharedPtr) {
a = 1;
});
auto srv_thread = std::thread([&]() {rclcpp::spin(service_node);});
auto pub_node = rclcpp::Node::make_shared("pub_node");
auto pub = pub_node->create_publisher<std_msgs::msg::Empty>(
"empty_topic",
rclcpp::QoS(1).transient_local());
auto pub_thread = std::thread([&]() {rclcpp::spin(pub_node);});
auto sub_node = rclcpp::Node::make_shared("sub_node");
ServiceClient<std_srvs::srv::Empty> client("empty_srv", sub_node, false);
auto sub = sub_node->create_subscription<std_msgs::msg::Empty>(
"empty_topic",
rclcpp::QoS(1),
[&client](std_msgs::msg::Empty::SharedPtr) {
auto req = std::make_shared<std_srvs::srv::Empty::Request>();
auto res = client.invoke(req);
});
auto sub_thread = std::thread(
[&]() {
rclcpp::executors::MultiThreadedExecutor exec(rclcpp::ExecutorOptions(), 2);
exec.add_node(sub_node);
exec.spin();
exec.remove_node(sub_node);
});
pub->publish(std_msgs::msg::Empty());
sleep(1);
rclcpp::shutdown();
srv_thread.join();
pub_thread.join();
sub_thread.join();
ASSERT_EQ(a, 1);
}
TEST(ServiceClient, can_ServiceClient_not_use_internal_executor2)
{
rclcpp::init(0, nullptr);
int a = 0;
......@@ -100,7 +143,7 @@ TEST(ServiceClient, can_ServiceClient_use_default_callback_group)
auto srv_thread = std::thread([&]() {rclcpp::spin(service_node);});
auto client_node = rclcpp::Node::make_shared("client_node");
ServiceClient<std_srvs::srv::Empty> client("empty_srv", client_node, true);
ServiceClient<std_srvs::srv::Empty> client("empty_srv", client_node, false);
auto client_thread = std::thread([&]() {rclcpp::spin(client_node);});
auto req = std::make_shared<std_srvs::srv::Empty::Request>();
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment