-
Notifications
You must be signed in to change notification settings - Fork 407
/
executor.cpp
914 lines (849 loc) · 30.6 KB
/
executor.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
// Copyright 2015 Open Source Robotics Foundation, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <algorithm>
#include <memory>
#include <map>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include "rcl/allocator.h"
#include "rcl/error_handling.h"
#include "rclcpp/exceptions.hpp"
#include "rclcpp/executor.hpp"
#include "rclcpp/guard_condition.hpp"
#include "rclcpp/memory_strategy.hpp"
#include "rclcpp/node.hpp"
#include "rclcpp/scope_exit.hpp"
#include "rclcpp/utilities.hpp"
#include "rcutils/logging_macros.h"
using namespace std::chrono_literals;
using rclcpp::exceptions::throw_from_rcl_error;
using rclcpp::AnyExecutable;
using rclcpp::Executor;
using rclcpp::ExecutorOptions;
using rclcpp::FutureReturnCode;
Executor::Executor(const rclcpp::ExecutorOptions & options)
: spinning(false),
shutdown_guard_condition_(std::make_shared<rclcpp::GuardCondition>(options.context)),
memory_strategy_(options.memory_strategy)
{
// Store the context for later use.
context_ = options.context;
rcl_guard_condition_options_t guard_condition_options = rcl_guard_condition_get_default_options();
rcl_ret_t ret = rcl_guard_condition_init(
&interrupt_guard_condition_, context_->get_rcl_context().get(), guard_condition_options);
if (RCL_RET_OK != ret) {
throw_from_rcl_error(ret, "Failed to create interrupt guard condition in Executor constructor");
}
context_->on_shutdown(
[weak_gc = std::weak_ptr<rclcpp::GuardCondition>{shutdown_guard_condition_}]() {
auto strong_gc = weak_gc.lock();
if (strong_gc) {
strong_gc->trigger();
}
});
// The number of guard conditions is always at least 2: 1 for the ctrl-c guard cond,
// and one for the executor's guard cond (interrupt_guard_condition_)
memory_strategy_->add_guard_condition(&shutdown_guard_condition_->get_rcl_guard_condition());
// Put the executor's guard condition in
memory_strategy_->add_guard_condition(&interrupt_guard_condition_);
rcl_allocator_t allocator = memory_strategy_->get_allocator();
ret = rcl_wait_set_init(
&wait_set_,
0, 2, 0, 0, 0, 0,
context_->get_rcl_context().get(),
allocator);
if (RCL_RET_OK != ret) {
RCUTILS_LOG_ERROR_NAMED(
"rclcpp",
"failed to create wait set: %s", rcl_get_error_string().str);
rcl_reset_error();
if (rcl_guard_condition_fini(&interrupt_guard_condition_) != RCL_RET_OK) {
RCUTILS_LOG_ERROR_NAMED(
"rclcpp",
"failed to destroy guard condition: %s", rcl_get_error_string().str);
rcl_reset_error();
}
throw_from_rcl_error(ret, "Failed to create wait set in Executor constructor");
}
}
Executor::~Executor()
{
// Disassociate all callback groups.
for (auto & pair : weak_groups_to_nodes_) {
auto group = pair.first.lock();
if (group) {
std::atomic_bool & has_executor = group->get_associated_with_executor_atomic();
has_executor.store(false);
}
}
// Disassociate all nodes.
std::for_each(
weak_nodes_.begin(), weak_nodes_.end(), []
(rclcpp::node_interfaces::NodeBaseInterface::WeakPtr weak_node_ptr) {
auto shared_node_ptr = weak_node_ptr.lock();
if (shared_node_ptr) {
std::atomic_bool & has_executor = shared_node_ptr->get_associated_with_executor_atomic();
has_executor.store(false);
}
});
weak_nodes_.clear();
weak_groups_associated_with_executor_to_nodes_.clear();
weak_groups_to_nodes_associated_with_executor_.clear();
weak_groups_to_nodes_.clear();
for (const auto & pair : weak_nodes_to_guard_conditions_) {
auto & guard_condition = pair.second;
memory_strategy_->remove_guard_condition(guard_condition);
}
weak_nodes_to_guard_conditions_.clear();
// Finalize the wait set.
if (rcl_wait_set_fini(&wait_set_) != RCL_RET_OK) {
RCUTILS_LOG_ERROR_NAMED(
"rclcpp",
"failed to destroy wait set: %s", rcl_get_error_string().str);
rcl_reset_error();
}
// Finalize the interrupt guard condition.
if (rcl_guard_condition_fini(&interrupt_guard_condition_) != RCL_RET_OK) {
RCUTILS_LOG_ERROR_NAMED(
"rclcpp",
"failed to destroy guard condition: %s", rcl_get_error_string().str);
rcl_reset_error();
}
// Remove and release the sigint guard condition
memory_strategy_->remove_guard_condition(&shutdown_guard_condition_->get_rcl_guard_condition());
}
std::vector<rclcpp::CallbackGroup::WeakPtr>
Executor::get_all_callback_groups()
{
std::vector<rclcpp::CallbackGroup::WeakPtr> groups;
std::lock_guard<std::mutex> guard{mutex_};
for (const auto & group_node_ptr : weak_groups_associated_with_executor_to_nodes_) {
groups.push_back(group_node_ptr.first);
}
for (auto const & group_node_ptr : weak_groups_to_nodes_associated_with_executor_) {
groups.push_back(group_node_ptr.first);
}
return groups;
}
std::vector<rclcpp::CallbackGroup::WeakPtr>
Executor::get_manually_added_callback_groups()
{
std::vector<rclcpp::CallbackGroup::WeakPtr> groups;
std::lock_guard<std::mutex> guard{mutex_};
for (auto const & group_node_ptr : weak_groups_associated_with_executor_to_nodes_) {
groups.push_back(group_node_ptr.first);
}
return groups;
}
std::vector<rclcpp::CallbackGroup::WeakPtr>
Executor::get_automatically_added_callback_groups_from_nodes()
{
std::vector<rclcpp::CallbackGroup::WeakPtr> groups;
std::lock_guard<std::mutex> guard{mutex_};
for (auto const & group_node_ptr : weak_groups_to_nodes_associated_with_executor_) {
groups.push_back(group_node_ptr.first);
}
return groups;
}
void
Executor::add_callback_groups_from_nodes_associated_to_executor()
{
for (auto & weak_node : weak_nodes_) {
auto node = weak_node.lock();
if (node) {
auto group_ptrs = node->get_callback_groups();
std::for_each(
group_ptrs.begin(), group_ptrs.end(),
[this, node](rclcpp::CallbackGroup::WeakPtr group_ptr)
{
auto shared_group_ptr = group_ptr.lock();
if (shared_group_ptr && shared_group_ptr->automatically_add_to_executor_with_node() &&
!shared_group_ptr->get_associated_with_executor_atomic().load())
{
this->add_callback_group_to_map(
shared_group_ptr,
node,
weak_groups_to_nodes_associated_with_executor_,
true);
}
});
}
}
}
void
Executor::add_callback_group_to_map(
rclcpp::CallbackGroup::SharedPtr group_ptr,
rclcpp::node_interfaces::NodeBaseInterface::SharedPtr node_ptr,
rclcpp::memory_strategy::MemoryStrategy::WeakCallbackGroupsToNodesMap & weak_groups_to_nodes,
bool notify)
{
// If the callback_group already has an executor
std::atomic_bool & has_executor = group_ptr->get_associated_with_executor_atomic();
if (has_executor.exchange(true)) {
throw std::runtime_error("Callback group has already been added to an executor.");
}
bool is_new_node = !has_node(node_ptr, weak_groups_to_nodes_associated_with_executor_) &&
!has_node(node_ptr, weak_groups_associated_with_executor_to_nodes_);
rclcpp::CallbackGroup::WeakPtr weak_group_ptr = group_ptr;
auto insert_info =
weak_groups_to_nodes.insert(std::make_pair(weak_group_ptr, node_ptr));
bool was_inserted = insert_info.second;
if (!was_inserted) {
throw std::runtime_error("Callback group was already added to executor.");
}
// Also add to the map that contains all callback groups
weak_groups_to_nodes_.insert(std::make_pair(weak_group_ptr, node_ptr));
if (is_new_node) {
rclcpp::node_interfaces::NodeBaseInterface::WeakPtr node_weak_ptr(node_ptr);
weak_nodes_to_guard_conditions_[node_weak_ptr] = node_ptr->get_notify_guard_condition();
if (notify) {
// Interrupt waiting to handle new node
rcl_ret_t ret = rcl_trigger_guard_condition(&interrupt_guard_condition_);
if (ret != RCL_RET_OK) {
throw_from_rcl_error(ret, "Failed to trigger guard condition on callback group add");
}
}
// Add the node's notify condition to the guard condition handles
memory_strategy_->add_guard_condition(node_ptr->get_notify_guard_condition());
}
}
void
Executor::add_callback_group(
rclcpp::CallbackGroup::SharedPtr group_ptr,
rclcpp::node_interfaces::NodeBaseInterface::SharedPtr node_ptr,
bool notify)
{
std::lock_guard<std::mutex> guard{mutex_};
this->add_callback_group_to_map(
group_ptr,
node_ptr,
weak_groups_associated_with_executor_to_nodes_,
notify);
}
void
Executor::add_node(rclcpp::node_interfaces::NodeBaseInterface::SharedPtr node_ptr, bool notify)
{
// If the node already has an executor
std::atomic_bool & has_executor = node_ptr->get_associated_with_executor_atomic();
if (has_executor.exchange(true)) {
throw std::runtime_error("Node has already been added to an executor.");
}
std::lock_guard<std::mutex> guard{mutex_};
for (auto & weak_group : node_ptr->get_callback_groups()) {
auto group_ptr = weak_group.lock();
if (group_ptr != nullptr && !group_ptr->get_associated_with_executor_atomic().load() &&
group_ptr->automatically_add_to_executor_with_node())
{
this->add_callback_group_to_map(
group_ptr,
node_ptr,
weak_groups_to_nodes_associated_with_executor_,
notify);
}
}
weak_nodes_.push_back(node_ptr);
}
void
Executor::remove_callback_group_from_map(
rclcpp::CallbackGroup::SharedPtr group_ptr,
rclcpp::memory_strategy::MemoryStrategy::WeakCallbackGroupsToNodesMap & weak_groups_to_nodes,
bool notify)
{
rclcpp::node_interfaces::NodeBaseInterface::SharedPtr node_ptr;
rclcpp::CallbackGroup::WeakPtr weak_group_ptr = group_ptr;
auto iter = weak_groups_to_nodes.find(weak_group_ptr);
if (iter != weak_groups_to_nodes.end()) {
node_ptr = iter->second.lock();
if (node_ptr == nullptr) {
throw std::runtime_error("Node must not be deleted before its callback group(s).");
}
weak_groups_to_nodes.erase(iter);
weak_groups_to_nodes_.erase(group_ptr);
std::atomic_bool & has_executor = group_ptr->get_associated_with_executor_atomic();
has_executor.store(false);
} else {
throw std::runtime_error("Callback group needs to be associated with executor.");
}
// If the node was matched and removed, interrupt waiting.
if (!has_node(node_ptr, weak_groups_to_nodes_associated_with_executor_) &&
!has_node(node_ptr, weak_groups_associated_with_executor_to_nodes_))
{
rclcpp::node_interfaces::NodeBaseInterface::WeakPtr node_weak_ptr(node_ptr);
weak_nodes_to_guard_conditions_.erase(node_weak_ptr);
if (notify) {
rcl_ret_t ret = rcl_trigger_guard_condition(&interrupt_guard_condition_);
if (ret != RCL_RET_OK) {
throw_from_rcl_error(ret, "Failed to trigger guard condition on callback group remove");
}
}
memory_strategy_->remove_guard_condition(node_ptr->get_notify_guard_condition());
}
}
void
Executor::remove_callback_group(
rclcpp::CallbackGroup::SharedPtr group_ptr,
bool notify)
{
std::lock_guard<std::mutex> guard{mutex_};
this->remove_callback_group_from_map(
group_ptr,
weak_groups_associated_with_executor_to_nodes_,
notify);
}
void
Executor::add_node(std::shared_ptr<rclcpp::Node> node_ptr, bool notify)
{
this->add_node(node_ptr->get_node_base_interface(), notify);
}
void
Executor::remove_node(rclcpp::node_interfaces::NodeBaseInterface::SharedPtr node_ptr, bool notify)
{
if (!node_ptr->get_associated_with_executor_atomic().load()) {
throw std::runtime_error("Node needs to be associated with an executor.");
}
std::lock_guard<std::mutex> guard{mutex_};
bool found_node = false;
auto node_it = weak_nodes_.begin();
while (node_it != weak_nodes_.end()) {
bool matched = (node_it->lock() == node_ptr);
if (matched) {
found_node = true;
node_it = weak_nodes_.erase(node_it);
} else {
++node_it;
}
}
if (!found_node) {
throw std::runtime_error("Node needs to be associated with this executor.");
}
for (auto it = weak_groups_to_nodes_associated_with_executor_.begin();
it != weak_groups_to_nodes_associated_with_executor_.end(); )
{
auto weak_node_ptr = it->second;
auto shared_node_ptr = weak_node_ptr.lock();
auto group_ptr = it->first.lock();
// Increment iterator before removing in case it's invalidated
it++;
if (shared_node_ptr == node_ptr) {
remove_callback_group_from_map(
group_ptr,
weak_groups_to_nodes_associated_with_executor_,
notify);
}
}
std::atomic_bool & has_executor = node_ptr->get_associated_with_executor_atomic();
has_executor.store(false);
}
void
Executor::remove_node(std::shared_ptr<rclcpp::Node> node_ptr, bool notify)
{
this->remove_node(node_ptr->get_node_base_interface(), notify);
}
void
Executor::spin_node_once_nanoseconds(
rclcpp::node_interfaces::NodeBaseInterface::SharedPtr node,
std::chrono::nanoseconds timeout)
{
this->add_node(node, false);
// non-blocking = true
spin_once(timeout);
this->remove_node(node, false);
}
void
Executor::spin_node_some(rclcpp::node_interfaces::NodeBaseInterface::SharedPtr node)
{
this->add_node(node, false);
spin_some();
this->remove_node(node, false);
}
void
Executor::spin_node_some(std::shared_ptr<rclcpp::Node> node)
{
this->spin_node_some(node->get_node_base_interface());
}
void Executor::spin_some(std::chrono::nanoseconds max_duration)
{
return this->spin_some_impl(max_duration, false);
}
void Executor::spin_all(std::chrono::nanoseconds max_duration)
{
if (max_duration <= 0ns) {
throw std::invalid_argument("max_duration must be positive");
}
return this->spin_some_impl(max_duration, true);
}
void
Executor::spin_some_impl(std::chrono::nanoseconds max_duration, bool exhaustive)
{
auto start = std::chrono::steady_clock::now();
auto max_duration_not_elapsed = [max_duration, start]() {
if (std::chrono::nanoseconds(0) == max_duration) {
// told to spin forever if need be
return true;
} else if (std::chrono::steady_clock::now() - start < max_duration) {
// told to spin only for some maximum amount of time
return true;
}
// spun too long
return false;
};
if (spinning.exchange(true)) {
throw std::runtime_error("spin_some() called while already spinning");
}
RCLCPP_SCOPE_EXIT(this->spinning.store(false); );
bool work_available = false;
while (rclcpp::ok(context_) && spinning.load() && max_duration_not_elapsed()) {
AnyExecutable any_exec;
if (!work_available) {
wait_for_work(std::chrono::milliseconds::zero());
}
if (get_next_ready_executable(any_exec)) {
execute_any_executable(any_exec);
work_available = true;
} else {
if (!work_available || !exhaustive) {
break;
}
work_available = false;
}
}
}
void
Executor::spin_once_impl(std::chrono::nanoseconds timeout)
{
AnyExecutable any_exec;
if (get_next_executable(any_exec, timeout)) {
execute_any_executable(any_exec);
}
}
void
Executor::spin_once(std::chrono::nanoseconds timeout)
{
if (spinning.exchange(true)) {
throw std::runtime_error("spin_once() called while already spinning");
}
RCLCPP_SCOPE_EXIT(this->spinning.store(false); );
spin_once_impl(timeout);
}
void
Executor::cancel()
{
spinning.store(false);
rcl_ret_t ret = rcl_trigger_guard_condition(&interrupt_guard_condition_);
if (ret != RCL_RET_OK) {
throw_from_rcl_error(ret, "Failed to trigger guard condition in cancel");
}
}
void
Executor::set_memory_strategy(rclcpp::memory_strategy::MemoryStrategy::SharedPtr memory_strategy)
{
if (memory_strategy == nullptr) {
throw std::runtime_error("Received NULL memory strategy in executor.");
}
std::lock_guard<std::mutex> guard{mutex_};
memory_strategy_ = memory_strategy;
}
void
Executor::execute_any_executable(AnyExecutable & any_exec)
{
if (!spinning.load()) {
return;
}
if (any_exec.timer) {
execute_timer(any_exec.timer);
}
if (any_exec.subscription) {
execute_subscription(any_exec.subscription);
}
if (any_exec.service) {
execute_service(any_exec.service);
}
if (any_exec.client) {
execute_client(any_exec.client);
}
if (any_exec.waitable) {
any_exec.waitable->execute(any_exec.data);
}
// Reset the callback_group, regardless of type
any_exec.callback_group->can_be_taken_from().store(true);
// Wake the wait, because it may need to be recalculated or work that
// was previously blocked is now available.
rcl_ret_t ret = rcl_trigger_guard_condition(&interrupt_guard_condition_);
if (ret != RCL_RET_OK) {
throw_from_rcl_error(ret, "Failed to trigger guard condition from execute_any_executable");
}
}
static
void
take_and_do_error_handling(
const char * action_description,
const char * topic_or_service_name,
std::function<bool()> take_action,
std::function<void()> handle_action)
{
bool taken = false;
try {
taken = take_action();
} catch (const rclcpp::exceptions::RCLError & rcl_error) {
RCLCPP_ERROR(
rclcpp::get_logger("rclcpp"),
"executor %s '%s' unexpectedly failed: %s",
action_description,
topic_or_service_name,
rcl_error.what());
}
if (taken) {
handle_action();
} else {
// Message or Service was not taken for some reason.
// Note that this can be normal, if the underlying middleware needs to
// interrupt wait spuriously it is allowed.
// So in that case the executor cannot tell the difference in a
// spurious wake up and an entity actually having data until trying
// to take the data.
RCLCPP_DEBUG(
rclcpp::get_logger("rclcpp"),
"executor %s '%s' failed to take anything",
action_description,
topic_or_service_name);
}
}
void
Executor::execute_subscription(rclcpp::SubscriptionBase::SharedPtr subscription)
{
rclcpp::MessageInfo message_info;
message_info.get_rmw_message_info().from_intra_process = false;
if (subscription->is_serialized()) {
// This is the case where a copy of the serialized message is taken from
// the middleware via inter-process communication.
std::shared_ptr<SerializedMessage> serialized_msg = subscription->create_serialized_message();
take_and_do_error_handling(
"taking a serialized message from topic",
subscription->get_topic_name(),
[&]() {return subscription->take_serialized(*serialized_msg.get(), message_info);},
[&]()
{
auto void_serialized_msg = std::static_pointer_cast<void>(serialized_msg);
subscription->handle_message(void_serialized_msg, message_info);
});
subscription->return_serialized_message(serialized_msg);
} else if (subscription->can_loan_messages()) {
// This is the case where a loaned message is taken from the middleware via
// inter-process communication, given to the user for their callback,
// and then returned.
void * loaned_msg = nullptr;
// TODO(wjwwood): refactor this into methods on subscription when LoanedMessage
// is extened to support subscriptions as well.
take_and_do_error_handling(
"taking a loaned message from topic",
subscription->get_topic_name(),
[&]()
{
rcl_ret_t ret = rcl_take_loaned_message(
subscription->get_subscription_handle().get(),
&loaned_msg,
&message_info.get_rmw_message_info(),
nullptr);
if (RCL_RET_SUBSCRIPTION_TAKE_FAILED == ret) {
return false;
} else if (RCL_RET_OK != ret) {
rclcpp::exceptions::throw_from_rcl_error(ret);
}
return true;
},
[&]() {subscription->handle_loaned_message(loaned_msg, message_info);});
rcl_ret_t ret = rcl_return_loaned_message_from_subscription(
subscription->get_subscription_handle().get(),
loaned_msg);
if (RCL_RET_OK != ret) {
RCLCPP_ERROR(
rclcpp::get_logger("rclcpp"),
"rcl_return_loaned_message_from_subscription() failed for subscription on topic '%s': %s",
subscription->get_topic_name(), rcl_get_error_string().str);
}
loaned_msg = nullptr;
} else {
// This case is taking a copy of the message data from the middleware via
// inter-process communication.
std::shared_ptr<void> message = subscription->create_message();
take_and_do_error_handling(
"taking a message from topic",
subscription->get_topic_name(),
[&]() {return subscription->take_type_erased(message.get(), message_info);},
[&]() {subscription->handle_message(message, message_info);});
subscription->return_message(message);
}
}
void
Executor::execute_timer(rclcpp::TimerBase::SharedPtr timer)
{
timer->execute_callback();
}
void
Executor::execute_service(rclcpp::ServiceBase::SharedPtr service)
{
auto request_header = service->create_request_header();
std::shared_ptr<void> request = service->create_request();
take_and_do_error_handling(
"taking a service server request from service",
service->get_service_name(),
[&]() {return service->take_type_erased_request(request.get(), *request_header);},
[&]() {service->handle_request(request_header, request);});
}
void
Executor::execute_client(
rclcpp::ClientBase::SharedPtr client)
{
auto request_header = client->create_request_header();
std::shared_ptr<void> response = client->create_response();
take_and_do_error_handling(
"taking a service client response from service",
client->get_service_name(),
[&]() {return client->take_type_erased_response(response.get(), *request_header);},
[&]() {client->handle_response(request_header, response);});
}
void
Executor::wait_for_work(std::chrono::nanoseconds timeout)
{
{
std::lock_guard<std::mutex> guard(mutex_);
// Check weak_nodes_ to find any callback group that is not owned
// by an executor and add it to the list of callbackgroups for
// collect entities. Also exchange to false so it is not
// allowed to add to another executor
add_callback_groups_from_nodes_associated_to_executor();
// Collect the subscriptions and timers to be waited on
memory_strategy_->clear_handles();
bool has_invalid_weak_groups_or_nodes =
memory_strategy_->collect_entities(weak_groups_to_nodes_);
if (has_invalid_weak_groups_or_nodes) {
std::vector<rclcpp::CallbackGroup::WeakPtr> invalid_group_ptrs;
for (auto pair : weak_groups_to_nodes_) {
auto weak_group_ptr = pair.first;
auto weak_node_ptr = pair.second;
if (weak_group_ptr.expired() || weak_node_ptr.expired()) {
invalid_group_ptrs.push_back(weak_group_ptr);
auto node_guard_pair = weak_nodes_to_guard_conditions_.find(weak_node_ptr);
if (node_guard_pair != weak_nodes_to_guard_conditions_.end()) {
auto guard_condition = node_guard_pair->second;
weak_nodes_to_guard_conditions_.erase(weak_node_ptr);
memory_strategy_->remove_guard_condition(guard_condition);
}
}
}
std::for_each(
invalid_group_ptrs.begin(), invalid_group_ptrs.end(),
[this](rclcpp::CallbackGroup::WeakPtr group_ptr) {
if (weak_groups_to_nodes_associated_with_executor_.find(group_ptr) !=
weak_groups_to_nodes_associated_with_executor_.end())
{
weak_groups_to_nodes_associated_with_executor_.erase(group_ptr);
}
if (weak_groups_associated_with_executor_to_nodes_.find(group_ptr) !=
weak_groups_associated_with_executor_to_nodes_.end())
{
weak_groups_associated_with_executor_to_nodes_.erase(group_ptr);
}
weak_groups_to_nodes_.erase(group_ptr);
});
}
// clear wait set
rcl_ret_t ret = rcl_wait_set_clear(&wait_set_);
if (ret != RCL_RET_OK) {
throw_from_rcl_error(ret, "Couldn't clear wait set");
}
// The size of waitables are accounted for in size of the other entities
ret = rcl_wait_set_resize(
&wait_set_, memory_strategy_->number_of_ready_subscriptions(),
memory_strategy_->number_of_guard_conditions(), memory_strategy_->number_of_ready_timers(),
memory_strategy_->number_of_ready_clients(), memory_strategy_->number_of_ready_services(),
memory_strategy_->number_of_ready_events());
if (RCL_RET_OK != ret) {
throw_from_rcl_error(ret, "Couldn't resize the wait set");
}
if (!memory_strategy_->add_handles_to_wait_set(&wait_set_)) {
throw std::runtime_error("Couldn't fill wait set");
}
}
rcl_ret_t status =
rcl_wait(&wait_set_, std::chrono::duration_cast<std::chrono::nanoseconds>(timeout).count());
if (status == RCL_RET_WAIT_SET_EMPTY) {
RCUTILS_LOG_WARN_NAMED(
"rclcpp",
"empty wait set received in rcl_wait(). This should never happen.");
} else if (status != RCL_RET_OK && status != RCL_RET_TIMEOUT) {
using rclcpp::exceptions::throw_from_rcl_error;
throw_from_rcl_error(status, "rcl_wait() failed");
}
// check the null handles in the wait set and remove them from the handles in memory strategy
// for callback-based entities
std::lock_guard<std::mutex> guard(mutex_);
memory_strategy_->remove_null_handles(&wait_set_);
}
rclcpp::node_interfaces::NodeBaseInterface::SharedPtr
Executor::get_node_by_group(
const rclcpp::memory_strategy::MemoryStrategy::WeakCallbackGroupsToNodesMap & weak_groups_to_nodes,
rclcpp::CallbackGroup::SharedPtr group)
{
if (!group) {
return nullptr;
}
rclcpp::CallbackGroup::WeakPtr weak_group_ptr(group);
const auto finder = weak_groups_to_nodes.find(weak_group_ptr);
if (finder != weak_groups_to_nodes.end()) {
rclcpp::node_interfaces::NodeBaseInterface::SharedPtr node_ptr = finder->second.lock();
return node_ptr;
}
return nullptr;
}
rclcpp::CallbackGroup::SharedPtr
Executor::get_group_by_timer(rclcpp::TimerBase::SharedPtr timer)
{
std::lock_guard<std::mutex> guard{mutex_};
for (const auto & pair : weak_groups_associated_with_executor_to_nodes_) {
auto group = pair.first.lock();
if (!group) {
continue;
}
auto timer_ref = group->find_timer_ptrs_if(
[timer](const rclcpp::TimerBase::SharedPtr & timer_ptr) -> bool {
return timer_ptr == timer;
});
if (timer_ref) {
return group;
}
}
for (const auto & pair : weak_groups_to_nodes_associated_with_executor_) {
auto group = pair.first.lock();
if (!group) {
continue;
}
auto timer_ref = group->find_timer_ptrs_if(
[timer](const rclcpp::TimerBase::SharedPtr & timer_ptr) -> bool {
return timer_ptr == timer;
});
if (timer_ref) {
return group;
}
}
return nullptr;
}
bool
Executor::get_next_ready_executable(AnyExecutable & any_executable)
{
bool success = get_next_ready_executable_from_map(any_executable, weak_groups_to_nodes_);
return success;
}
bool
Executor::get_next_ready_executable_from_map(
AnyExecutable & any_executable,
const rclcpp::memory_strategy::MemoryStrategy::WeakCallbackGroupsToNodesMap & weak_groups_to_nodes)
{
bool success = false;
std::lock_guard<std::mutex> guard{mutex_};
// Check the timers to see if there are any that are ready
memory_strategy_->get_next_timer(any_executable, weak_groups_to_nodes);
if (any_executable.timer) {
success = true;
}
if (!success) {
// Check the subscriptions to see if there are any that are ready
memory_strategy_->get_next_subscription(any_executable, weak_groups_to_nodes);
if (any_executable.subscription) {
success = true;
}
}
if (!success) {
// Check the services to see if there are any that are ready
memory_strategy_->get_next_service(any_executable, weak_groups_to_nodes);
if (any_executable.service) {
success = true;
}
}
if (!success) {
// Check the clients to see if there are any that are ready
memory_strategy_->get_next_client(any_executable, weak_groups_to_nodes);
if (any_executable.client) {
success = true;
}
}
if (!success) {
// Check the waitables to see if there are any that are ready
memory_strategy_->get_next_waitable(any_executable, weak_groups_to_nodes);
if (any_executable.waitable) {
any_executable.data = any_executable.waitable->take_data();
success = true;
}
}
// At this point any_executable should be valid with either a valid subscription
// or a valid timer, or it should be a null shared_ptr
if (success) {
rclcpp::CallbackGroup::WeakPtr weak_group_ptr = any_executable.callback_group;
auto iter = weak_groups_to_nodes.find(weak_group_ptr);
if (iter == weak_groups_to_nodes.end()) {
success = false;
}
}
if (success) {
// If it is valid, check to see if the group is mutually exclusive or
// not, then mark it accordingly ..Check if the callback_group belongs to this executor
if (any_executable.callback_group && any_executable.callback_group->type() == \
CallbackGroupType::MutuallyExclusive)
{
// It should not have been taken otherwise
assert(any_executable.callback_group->can_be_taken_from().load());
// Set to false to indicate something is being run from this group
// This is reset to true either when the any_executable is executed or when the
// any_executable is destructued
any_executable.callback_group->can_be_taken_from().store(false);
}
}
// If there is no ready executable, return false
return success;
}
bool
Executor::get_next_executable(AnyExecutable & any_executable, std::chrono::nanoseconds timeout)
{
bool success = false;
// Check to see if there are any subscriptions or timers needing service
// TODO(wjwwood): improve run to run efficiency of this function
success = get_next_ready_executable(any_executable);
// If there are none
if (!success) {
// Wait for subscriptions or timers to work on
wait_for_work(timeout);
if (!spinning.load()) {
return false;
}
// Try again
success = get_next_ready_executable(any_executable);
}
return success;
}
// Returns true iff the weak_groups_to_nodes map has node_ptr as the value in any of its entry.
bool
Executor::has_node(
const rclcpp::node_interfaces::NodeBaseInterface::SharedPtr node_ptr,
const rclcpp::memory_strategy::MemoryStrategy::WeakCallbackGroupsToNodesMap & weak_groups_to_nodes) const
{
return std::find_if(
weak_groups_to_nodes.begin(),
weak_groups_to_nodes.end(),
[&](const WeakCallbackGroupsToNodesMap::value_type & other) -> bool {
auto other_ptr = other.second.lock();
return other_ptr == node_ptr;
}) != weak_groups_to_nodes.end();
}