join.hpp 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544
  1. //
  2. // Copyright (c) 2022 Klemens Morgenstern ([email protected])
  3. //
  4. // Distributed under the Boost Software License, Version 1.0. (See accompanying
  5. // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
  6. //
  7. #ifndef BOOST_COBALT_DETAIL_JOIN_HPP
  8. #define BOOST_COBALT_DETAIL_JOIN_HPP
  9. #include <boost/cobalt/detail/await_result_helper.hpp>
  10. #include <boost/cobalt/detail/exception.hpp>
  11. #include <boost/cobalt/detail/fork.hpp>
  12. #include <boost/cobalt/detail/forward_cancellation.hpp>
  13. #include <boost/cobalt/detail/util.hpp>
  14. #include <boost/cobalt/detail/wrapper.hpp>
  15. #include <boost/cobalt/task.hpp>
  16. #include <boost/cobalt/this_thread.hpp>
  17. #include <boost/asio/associated_cancellation_slot.hpp>
  18. #include <boost/asio/bind_cancellation_slot.hpp>
  19. #include <boost/asio/cancellation_signal.hpp>
  20. #include <boost/core/ignore_unused.hpp>
  21. #include <boost/intrusive_ptr.hpp>
  22. #include <boost/system/result.hpp>
  23. #include <boost/variant2/variant.hpp>
  24. #include <array>
  25. #include <coroutine>
  26. #include <algorithm>
  27. namespace boost::cobalt::detail
  28. {
  29. template<typename ... Args>
  30. struct join_variadic_impl
  31. {
  32. using tuple_type = std::tuple<decltype(get_awaitable_type(std::declval<Args&&>()))...>;
  33. join_variadic_impl(Args && ... args)
  34. : args{std::forward<Args>(args)...}
  35. {
  36. }
  37. std::tuple<Args...> args;
  38. constexpr static std::size_t tuple_size = sizeof...(Args);
  39. struct awaitable : fork::static_shared_state<256 * tuple_size>
  40. {
  41. template<std::size_t ... Idx>
  42. awaitable(std::tuple<Args...> & args, std::index_sequence<Idx...>) :
  43. aws(awaitable_type_getter<Args>(std::get<Idx>(args))...)
  44. {
  45. }
  46. tuple_type aws;
  47. std::array<asio::cancellation_signal, tuple_size> cancel_;
  48. template<typename > constexpr static auto make_null() {return nullptr;};
  49. std::array<asio::cancellation_signal*, tuple_size> cancel = {make_null<Args>()...};
  50. constexpr static bool all_void = (std::is_void_v<co_await_result_t<Args>> && ...);
  51. template<typename T>
  52. using result_store_part =
  53. std::optional<void_as_monostate<co_await_result_t<T>>>;
  54. std::conditional_t<all_void,
  55. variant2::monostate,
  56. std::tuple<result_store_part<Args>...>> result;
  57. std::exception_ptr error;
  58. template<std::size_t Idx>
  59. void cancel_step()
  60. {
  61. auto &r = cancel[Idx];
  62. if (r)
  63. std::exchange(r, nullptr)->emit(asio::cancellation_type::all);
  64. }
  65. void cancel_all()
  66. {
  67. mp11::mp_for_each<mp11::mp_iota_c<sizeof...(Args)>>
  68. ([&](auto idx)
  69. {
  70. cancel_step<idx>();
  71. });
  72. }
  73. template<std::size_t Idx>
  74. void interrupt_await_step()
  75. {
  76. using type = std::tuple_element_t<Idx, tuple_type>;
  77. using t = std::conditional_t<std::is_reference_v<std::tuple_element_t<Idx, std::tuple<Args...>>>,
  78. type &,
  79. type &&>;
  80. if constexpr (interruptible<t>)
  81. if (this->cancel[Idx] != nullptr)
  82. static_cast<t>(std::get<Idx>(aws)).interrupt_await();
  83. }
  84. void interrupt_await()
  85. {
  86. mp11::mp_for_each<mp11::mp_iota_c<sizeof...(Args)>>
  87. ([&](auto idx)
  88. {
  89. interrupt_await_step<idx>();
  90. });
  91. }
  92. // GCC doesn't like member funs
  93. template<std::size_t Idx>
  94. static detail::fork await_impl(awaitable & this_)
  95. try
  96. {
  97. auto & aw = std::get<Idx>(this_.aws);
  98. // check manually if we're ready
  99. auto rd = aw.await_ready();
  100. if (!rd)
  101. {
  102. this_.cancel[Idx] = &this_.cancel_[Idx];
  103. co_await this_.cancel[Idx]->slot();
  104. // make sure the executor is set
  105. co_await detail::fork::wired_up;
  106. // do the await - this doesn't call await-ready again
  107. if constexpr (std::is_void_v<decltype(aw.await_resume())>)
  108. {
  109. co_await aw;
  110. if constexpr (!all_void)
  111. std::get<Idx>(this_.result).emplace();
  112. }
  113. else
  114. std::get<Idx>(this_.result).emplace(co_await aw);
  115. }
  116. else
  117. {
  118. if constexpr (std::is_void_v<decltype(aw.await_resume())>)
  119. {
  120. aw.await_resume();
  121. if constexpr (!all_void)
  122. std::get<Idx>(this_.result).emplace();
  123. }
  124. else
  125. std::get<Idx>(this_.result).emplace(aw.await_resume());
  126. }
  127. }
  128. catch(...)
  129. {
  130. if (!this_.error)
  131. this_.error = std::current_exception();
  132. this_.cancel_all();
  133. }
  134. std::array<detail::fork(*)(awaitable&), tuple_size> impls {
  135. []<std::size_t ... Idx>(std::index_sequence<Idx...>)
  136. {
  137. return std::array<detail::fork(*)(awaitable&), tuple_size>{&await_impl<Idx>...};
  138. }(std::make_index_sequence<tuple_size>{})
  139. };
  140. detail::fork last_forked;
  141. std::size_t last_index = 0u;
  142. bool await_ready()
  143. {
  144. while (last_index < tuple_size)
  145. {
  146. last_forked = impls[last_index++](*this);
  147. if (!last_forked.done())
  148. return false; // one coro didn't immediately complete!
  149. }
  150. last_forked.release();
  151. return true;
  152. }
  153. template<typename H>
  154. auto await_suspend(
  155. std::coroutine_handle<H> h
  156. #if defined(BOOST_ASIO_ENABLE_HANDLER_TRACKING)
  157. , const boost::source_location & loc = BOOST_CURRENT_LOCATION
  158. #endif
  159. )
  160. {
  161. #if defined(BOOST_ASIO_ENABLE_HANDLER_TRACKING)
  162. this->loc = loc;
  163. #endif
  164. this->exec = &detail::get_executor(h);
  165. last_forked.release().resume();
  166. while (last_index < tuple_size)
  167. impls[last_index++](*this).release();
  168. if (error)
  169. cancel_all();
  170. if (!this->outstanding_work()) // already done, resume rightaway.
  171. return false;
  172. // arm the cancel
  173. assign_cancellation(
  174. h,
  175. [&](asio::cancellation_type ct)
  176. {
  177. for (auto cs : cancel)
  178. if (cs)
  179. cs->emit(ct);
  180. });
  181. this->coro.reset(h.address());
  182. return true;
  183. }
  184. #if _MSC_VER
  185. BOOST_NOINLINE
  186. #endif
  187. auto await_resume()
  188. {
  189. if (error)
  190. std::rethrow_exception(error);
  191. if constexpr(!all_void)
  192. return mp11::tuple_transform(
  193. []<typename T>(std::optional<T> & var)
  194. -> T
  195. {
  196. BOOST_ASSERT(var.has_value());
  197. return std::move(*var);
  198. }, result);
  199. }
  200. auto await_resume(const as_tuple_tag &)
  201. {
  202. using t = decltype(await_resume());
  203. if constexpr(!all_void)
  204. {
  205. if (error)
  206. return std::make_tuple(error, t{});
  207. else
  208. return std::make_tuple(std::current_exception(),
  209. mp11::tuple_transform(
  210. []<typename T>(std::optional<T> & var)
  211. -> T
  212. {
  213. BOOST_ASSERT(var.has_value());
  214. return std::move(*var);
  215. }, result));
  216. }
  217. else
  218. return std::make_tuple(error);
  219. }
  220. auto await_resume(const as_result_tag &)
  221. {
  222. using t = decltype(await_resume());
  223. using rt = system::result<t, std::exception_ptr>;
  224. if (error)
  225. return rt(system::in_place_error, error);
  226. if constexpr(!all_void)
  227. return mp11::tuple_transform(
  228. []<typename T>(std::optional<T> & var)
  229. -> rt
  230. {
  231. BOOST_ASSERT(var.has_value());
  232. return std::move(*var);
  233. }, result);
  234. else
  235. return rt{system::in_place_value};
  236. }
  237. };
  238. awaitable operator co_await() &&
  239. {
  240. return awaitable(args, std::make_index_sequence<sizeof...(Args)>{});
  241. }
  242. };
  243. template<typename Range>
  244. struct join_ranged_impl
  245. {
  246. Range aws;
  247. using result_type = co_await_result_t<std::decay_t<decltype(*std::begin(std::declval<Range>()))>>;
  248. constexpr static std::size_t result_size =
  249. sizeof(std::conditional_t<std::is_void_v<result_type>, variant2::monostate, result_type>);
  250. struct awaitable : fork::shared_state
  251. {
  252. struct dummy
  253. {
  254. template<typename ... Args>
  255. dummy(Args && ...) {}
  256. };
  257. using type = std::decay_t<decltype(*std::begin(std::declval<Range>()))>;
  258. #if !defined(BOOST_COBALT_NO_PMR)
  259. pmr::polymorphic_allocator<void> alloc{&resource};
  260. std::conditional_t<awaitable_type<type>, Range &,
  261. pmr::vector<co_awaitable_type<type>>> aws;
  262. pmr::vector<bool> ready{std::size(aws), alloc};
  263. pmr::vector<asio::cancellation_signal> cancel_{std::size(aws), alloc};
  264. pmr::vector<asio::cancellation_signal*> cancel{std::size(aws), alloc};
  265. std::conditional_t<
  266. std::is_void_v<result_type>,
  267. dummy,
  268. pmr::vector<std::optional<void_as_monostate<result_type>>>>
  269. result{
  270. cancel.size(),
  271. alloc};
  272. #else
  273. std::allocator<void> alloc;
  274. std::conditional_t<awaitable_type<type>, Range &, std::vector<co_awaitable_type<type>>> aws;
  275. std::vector<bool> ready{std::size(aws), alloc};
  276. std::vector<asio::cancellation_signal> cancel_{std::size(aws), alloc};
  277. std::vector<asio::cancellation_signal*> cancel{std::size(aws), alloc};
  278. std::conditional_t<
  279. std::is_void_v<result_type>,
  280. dummy,
  281. std::vector<std::optional<void_as_monostate<result_type>>>>
  282. result{
  283. cancel.size(),
  284. alloc};
  285. #endif
  286. std::exception_ptr error;
  287. awaitable(Range & aws_, std::false_type /* needs operator co_await */)
  288. : fork::shared_state((512 + sizeof(co_awaitable_type<type>) + result_size) * std::size(aws_))
  289. , aws{alloc}
  290. , ready{std::size(aws_), alloc}
  291. , cancel_{std::size(aws_), alloc}
  292. , cancel{std::size(aws_), alloc}
  293. {
  294. aws.reserve(std::size(aws_));
  295. for (auto && a : aws_)
  296. {
  297. using a_0 = std::decay_t<decltype(a)>;
  298. using a_t = std::conditional_t<
  299. std::is_lvalue_reference_v<Range>, a_0 &, a_0 &&>;
  300. aws.emplace_back(awaitable_type_getter<a_t>(static_cast<a_t>(a)));
  301. }
  302. std::transform(std::begin(this->aws),
  303. std::end(this->aws),
  304. std::begin(ready),
  305. [](auto & aw) {return aw.await_ready();});
  306. }
  307. awaitable(Range & aws, std::true_type /* needs operator co_await */)
  308. : fork::shared_state((512 + sizeof(co_awaitable_type<type>) + result_size) * std::size(aws))
  309. , aws(aws)
  310. {
  311. std::transform(std::begin(aws), std::end(aws), std::begin(ready), [](auto & aw) {return aw.await_ready();});
  312. }
  313. awaitable(Range & aws)
  314. : awaitable(aws, std::bool_constant<awaitable_type<type>>{})
  315. {
  316. }
  317. void cancel_all()
  318. {
  319. for (auto & r : cancel)
  320. if (r)
  321. std::exchange(r, nullptr)->emit(asio::cancellation_type::all);
  322. }
  323. void interrupt_await()
  324. {
  325. using t = std::conditional_t<std::is_reference_v<Range>,
  326. co_awaitable_type<type> &,
  327. co_awaitable_type<type> &&>;
  328. if constexpr (interruptible<t>)
  329. {
  330. std::size_t idx = 0u;
  331. for (auto & aw : aws)
  332. if (cancel[idx])
  333. static_cast<t>(aw).interrupt_await();
  334. }
  335. }
  336. static detail::fork await_impl(awaitable & this_, std::size_t idx)
  337. try
  338. {
  339. auto & aw = *std::next(std::begin(this_.aws), idx);
  340. auto rd = aw.await_ready();
  341. if (!rd)
  342. {
  343. this_.cancel[idx] = &this_.cancel_[idx];
  344. co_await this_.cancel[idx]->slot();
  345. co_await detail::fork::wired_up;
  346. if constexpr (std::is_void_v<decltype(aw.await_resume())>)
  347. co_await aw;
  348. else
  349. this_.result[idx].emplace(co_await aw);
  350. }
  351. else
  352. {
  353. if constexpr (std::is_void_v<decltype(aw.await_resume())>)
  354. aw.await_resume();
  355. else
  356. this_.result[idx].emplace(aw.await_resume());
  357. }
  358. }
  359. catch(...)
  360. {
  361. if (!this_.error)
  362. this_.error = std::current_exception();
  363. this_.cancel_all();
  364. }
  365. detail::fork last_forked;
  366. std::size_t last_index = 0u;
  367. bool await_ready()
  368. {
  369. while (last_index < cancel.size())
  370. {
  371. last_forked = await_impl(*this, last_index++);
  372. if (!last_forked.done())
  373. return false; // one coro didn't immediately complete!
  374. }
  375. last_forked.release();
  376. return true;
  377. }
  378. template<typename H>
  379. auto await_suspend(
  380. std::coroutine_handle<H> h
  381. #if defined(BOOST_ASIO_ENABLE_HANDLER_TRACKING)
  382. , const boost::source_location & loc = BOOST_CURRENT_LOCATION
  383. #endif
  384. )
  385. {
  386. #if defined(BOOST_ASIO_ENABLE_HANDLER_TRACKING)
  387. this->loc = loc;
  388. #endif
  389. exec = &detail::get_executor(h);
  390. last_forked.release().resume();
  391. while (last_index < cancel.size())
  392. await_impl(*this, last_index++).release();
  393. if (error)
  394. cancel_all();
  395. if (!this->outstanding_work()) // already done, resume right away.
  396. return false;
  397. // arm the cancel
  398. assign_cancellation(
  399. h,
  400. [&](asio::cancellation_type ct)
  401. {
  402. for (auto cs : cancel)
  403. if (cs)
  404. cs->emit(ct);
  405. });
  406. this->coro.reset(h.address());
  407. return true;
  408. }
  409. auto await_resume(const as_tuple_tag & )
  410. {
  411. #if defined(BOOST_COBALT_NO_PMR)
  412. std::vector<result_type> rr;
  413. #else
  414. pmr::vector<result_type> rr{this_thread::get_allocator()};
  415. #endif
  416. if (error)
  417. return std::make_tuple(error, rr);
  418. if constexpr (!std::is_void_v<result_type>)
  419. {
  420. rr.reserve(result.size());
  421. for (auto & t : result)
  422. rr.push_back(*std::move(t));
  423. return std::make_tuple(std::exception_ptr(), std::move(rr));
  424. }
  425. }
  426. auto await_resume(const as_result_tag & )
  427. {
  428. #if defined(BOOST_COBALT_NO_PMR)
  429. std::vector<result_type> rr;
  430. #else
  431. pmr::vector<result_type> rr{this_thread::get_allocator()};
  432. #endif
  433. if (error)
  434. return system::result<decltype(rr), std::exception_ptr>(error);
  435. if constexpr (!std::is_void_v<result_type>)
  436. {
  437. rr.reserve(result.size());
  438. for (auto & t : result)
  439. rr.push_back(*std::move(t));
  440. return rr;
  441. }
  442. }
  443. #if _MSC_VER
  444. BOOST_NOINLINE
  445. #endif
  446. auto await_resume()
  447. {
  448. if (error)
  449. std::rethrow_exception(error);
  450. if constexpr (!std::is_void_v<result_type>)
  451. {
  452. #if defined(BOOST_COBALT_NO_PMR)
  453. std::vector<result_type> rr;
  454. #else
  455. pmr::vector<result_type> rr{this_thread::get_allocator()};
  456. #endif
  457. rr.reserve(result.size());
  458. for (auto & t : result)
  459. rr.push_back(*std::move(t));
  460. return rr;
  461. }
  462. }
  463. };
  464. awaitable operator co_await() && {return awaitable{aws};}
  465. };
  466. }
  467. #endif //BOOST_COBALT_DETAIL_JOIN_HPP