connection_base.hpp 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964
  1. /* Copyright (c) 2018-2023 Marcelo Zimbres Silva ([email protected])
  2. *
  3. * Distributed under the Boost Software License, Version 1.0. (See
  4. * accompanying file LICENSE.txt)
  5. */
  6. #ifndef BOOST_REDIS_CONNECTION_BASE_HPP
  7. #define BOOST_REDIS_CONNECTION_BASE_HPP
  8. #include <boost/redis/adapter/adapt.hpp>
  9. #include <boost/redis/detail/helper.hpp>
  10. #include <boost/redis/error.hpp>
  11. #include <boost/redis/operation.hpp>
  12. #include <boost/redis/request.hpp>
  13. #include <boost/redis/resp3/type.hpp>
  14. #include <boost/redis/config.hpp>
  15. #include <boost/redis/detail/runner.hpp>
  16. #include <boost/redis/usage.hpp>
  17. #include <boost/system.hpp>
  18. #include <boost/asio/basic_stream_socket.hpp>
  19. #include <boost/asio/bind_executor.hpp>
  20. #include <boost/asio/experimental/parallel_group.hpp>
  21. #include <boost/asio/ip/tcp.hpp>
  22. #include <boost/asio/steady_timer.hpp>
  23. #include <boost/asio/write.hpp>
  24. #include <boost/assert.hpp>
  25. #include <boost/core/ignore_unused.hpp>
  26. #include <boost/asio/ssl/stream.hpp>
  27. #include <boost/asio/read_until.hpp>
  28. #include <boost/asio/buffer.hpp>
  29. #include <boost/asio/experimental/channel.hpp>
  30. #include <algorithm>
  31. #include <array>
  32. #include <chrono>
  33. #include <deque>
  34. #include <memory>
  35. #include <string_view>
  36. #include <type_traits>
  37. #include <functional>
  38. namespace boost::redis::detail
  39. {
  40. template <class DynamicBuffer>
  41. std::string_view buffer_view(DynamicBuffer buf) noexcept
  42. {
  43. char const* start = static_cast<char const*>(buf.data(0, buf.size()).data());
  44. return std::string_view{start, std::size(buf)};
  45. }
  46. template <class AsyncReadStream, class DynamicBuffer>
  47. class append_some_op {
  48. private:
  49. AsyncReadStream& stream_;
  50. DynamicBuffer buf_;
  51. std::size_t size_ = 0;
  52. std::size_t tmp_ = 0;
  53. asio::coroutine coro_{};
  54. public:
  55. append_some_op(AsyncReadStream& stream, DynamicBuffer buf, std::size_t size)
  56. : stream_ {stream}
  57. , buf_ {std::move(buf)}
  58. , size_{size}
  59. { }
  60. template <class Self>
  61. void operator()( Self& self
  62. , system::error_code ec = {}
  63. , std::size_t n = 0)
  64. {
  65. BOOST_ASIO_CORO_REENTER (coro_)
  66. {
  67. tmp_ = buf_.size();
  68. buf_.grow(size_);
  69. BOOST_ASIO_CORO_YIELD
  70. stream_.async_read_some(buf_.data(tmp_, size_), std::move(self));
  71. if (ec) {
  72. self.complete(ec, 0);
  73. return;
  74. }
  75. buf_.shrink(buf_.size() - tmp_ - n);
  76. self.complete({}, n);
  77. }
  78. }
  79. };
  80. template <class AsyncReadStream, class DynamicBuffer, class CompletionToken>
  81. auto
  82. async_append_some(
  83. AsyncReadStream& stream,
  84. DynamicBuffer buffer,
  85. std::size_t size,
  86. CompletionToken&& token)
  87. {
  88. return asio::async_compose
  89. < CompletionToken
  90. , void(system::error_code, std::size_t)
  91. >(append_some_op<AsyncReadStream, DynamicBuffer> {stream, buffer, size}, token, stream);
  92. }
  93. template <class Conn>
  94. struct exec_op {
  95. using req_info_type = typename Conn::req_info;
  96. using adapter_type = typename Conn::adapter_type;
  97. Conn* conn_ = nullptr;
  98. std::shared_ptr<req_info_type> info_ = nullptr;
  99. asio::coroutine coro{};
  100. template <class Self>
  101. void operator()(Self& self , system::error_code ec = {}, std::size_t = 0)
  102. {
  103. BOOST_ASIO_CORO_REENTER (coro)
  104. {
  105. // Check whether the user wants to wait for the connection to
  106. // be stablished.
  107. if (info_->req_->get_config().cancel_if_not_connected && !conn_->is_open()) {
  108. BOOST_ASIO_CORO_YIELD
  109. asio::post(std::move(self));
  110. return self.complete(error::not_connected, 0);
  111. }
  112. conn_->add_request_info(info_);
  113. EXEC_OP_WAIT:
  114. BOOST_ASIO_CORO_YIELD
  115. info_->async_wait(std::move(self));
  116. if (info_->ec_) {
  117. self.complete(info_->ec_, 0);
  118. return;
  119. }
  120. if (info_->stop_requested()) {
  121. // Don't have to call remove_request as it has already
  122. // been by cancel(exec).
  123. return self.complete(asio::error::operation_aborted, 0);
  124. }
  125. if (is_cancelled(self)) {
  126. if (!info_->is_waiting()) {
  127. using c_t = asio::cancellation_type;
  128. auto const c = self.get_cancellation_state().cancelled();
  129. if ((c & c_t::terminal) != c_t::none) {
  130. // Cancellation requires closing the connection
  131. // otherwise it stays in inconsistent state.
  132. conn_->cancel(operation::run);
  133. return self.complete(asio::error::operation_aborted, 0);
  134. } else {
  135. // Can't implement other cancelation types, ignoring.
  136. self.get_cancellation_state().clear();
  137. // TODO: Find out a better way to ignore
  138. // cancelation.
  139. goto EXEC_OP_WAIT;
  140. }
  141. } else {
  142. // Cancelation can be honored.
  143. conn_->remove_request(info_);
  144. self.complete(asio::error::operation_aborted, 0);
  145. return;
  146. }
  147. }
  148. self.complete(info_->ec_, info_->read_size_);
  149. }
  150. }
  151. };
  152. template <class Conn, class Logger>
  153. struct run_op {
  154. Conn* conn = nullptr;
  155. Logger logger_;
  156. asio::coroutine coro{};
  157. template <class Self>
  158. void operator()( Self& self
  159. , std::array<std::size_t, 2> order = {}
  160. , system::error_code ec0 = {}
  161. , system::error_code ec1 = {})
  162. {
  163. BOOST_ASIO_CORO_REENTER (coro)
  164. {
  165. conn->reset();
  166. BOOST_ASIO_CORO_YIELD
  167. asio::experimental::make_parallel_group(
  168. [this](auto token) { return conn->reader(logger_, token);},
  169. [this](auto token) { return conn->writer(logger_, token);}
  170. ).async_wait(
  171. asio::experimental::wait_for_one(),
  172. std::move(self));
  173. if (is_cancelled(self)) {
  174. logger_.trace("run-op: canceled. Exiting ...");
  175. self.complete(asio::error::operation_aborted);
  176. return;
  177. }
  178. logger_.on_run(ec0, ec1);
  179. switch (order[0]) {
  180. case 0: self.complete(ec0); break;
  181. case 1: self.complete(ec1); break;
  182. default: BOOST_ASSERT(false);
  183. }
  184. }
  185. }
  186. };
  187. template <class Conn, class Logger>
  188. struct writer_op {
  189. Conn* conn_;
  190. Logger logger_;
  191. asio::coroutine coro{};
  192. template <class Self>
  193. void operator()( Self& self
  194. , system::error_code ec = {}
  195. , std::size_t n = 0)
  196. {
  197. ignore_unused(n);
  198. BOOST_ASIO_CORO_REENTER (coro) for (;;)
  199. {
  200. while (conn_->coalesce_requests()) {
  201. if (conn_->use_ssl())
  202. BOOST_ASIO_CORO_YIELD asio::async_write(conn_->next_layer(), asio::buffer(conn_->write_buffer_), std::move(self));
  203. else
  204. BOOST_ASIO_CORO_YIELD asio::async_write(conn_->next_layer().next_layer(), asio::buffer(conn_->write_buffer_), std::move(self));
  205. logger_.on_write(ec, conn_->write_buffer_);
  206. if (ec) {
  207. logger_.trace("writer-op: error. Exiting ...");
  208. conn_->cancel(operation::run);
  209. self.complete(ec);
  210. return;
  211. }
  212. if (is_cancelled(self)) {
  213. logger_.trace("writer-op: canceled. Exiting ...");
  214. self.complete(asio::error::operation_aborted);
  215. return;
  216. }
  217. conn_->on_write();
  218. // A socket.close() may have been called while a
  219. // successful write might had already been queued, so we
  220. // have to check here before proceeding.
  221. if (!conn_->is_open()) {
  222. logger_.trace("writer-op: canceled (2). Exiting ...");
  223. self.complete({});
  224. return;
  225. }
  226. }
  227. BOOST_ASIO_CORO_YIELD
  228. conn_->writer_timer_.async_wait(std::move(self));
  229. if (!conn_->is_open() || is_cancelled(self)) {
  230. logger_.trace("writer-op: canceled (3). Exiting ...");
  231. // Notice this is not an error of the op, stoping was
  232. // requested from the outside, so we complete with
  233. // success.
  234. self.complete({});
  235. return;
  236. }
  237. }
  238. }
  239. };
  240. template <class Conn, class Logger>
  241. struct reader_op {
  242. using parse_result = typename Conn::parse_result;
  243. using parse_ret_type = typename Conn::parse_ret_type;
  244. Conn* conn_;
  245. Logger logger_;
  246. parse_ret_type res_{parse_result::resp, 0};
  247. asio::coroutine coro{};
  248. template <class Self>
  249. void operator()( Self& self
  250. , system::error_code ec = {}
  251. , std::size_t n = 0)
  252. {
  253. ignore_unused(n);
  254. BOOST_ASIO_CORO_REENTER (coro) for (;;)
  255. {
  256. // Appends some data to the buffer if necessary.
  257. if ((res_.first == parse_result::needs_more) || std::empty(conn_->read_buffer_)) {
  258. if (conn_->use_ssl()) {
  259. BOOST_ASIO_CORO_YIELD
  260. async_append_some(
  261. conn_->next_layer(),
  262. conn_->dbuf_,
  263. conn_->get_suggested_buffer_growth(),
  264. std::move(self));
  265. } else {
  266. BOOST_ASIO_CORO_YIELD
  267. async_append_some(
  268. conn_->next_layer().next_layer(),
  269. conn_->dbuf_,
  270. conn_->get_suggested_buffer_growth(),
  271. std::move(self));
  272. }
  273. logger_.on_read(ec, n);
  274. // EOF is not treated as error.
  275. if (ec == asio::error::eof) {
  276. logger_.trace("reader-op: EOF received. Exiting ...");
  277. conn_->cancel(operation::run);
  278. return self.complete({}); // EOFINAE: EOF is not an error.
  279. }
  280. // The connection is not viable after an error.
  281. if (ec) {
  282. logger_.trace("reader-op: error. Exiting ...");
  283. conn_->cancel(operation::run);
  284. self.complete(ec);
  285. return;
  286. }
  287. // Somebody might have canceled implicitly or explicitly
  288. // while we were suspended and after queueing so we have to
  289. // check.
  290. if (!conn_->is_open() || is_cancelled(self)) {
  291. logger_.trace("reader-op: canceled. Exiting ...");
  292. self.complete(ec);
  293. return;
  294. }
  295. }
  296. res_ = conn_->on_read(buffer_view(conn_->dbuf_), ec);
  297. if (ec) {
  298. logger_.trace("reader-op: parse error. Exiting ...");
  299. conn_->cancel(operation::run);
  300. self.complete(ec);
  301. return;
  302. }
  303. if (res_.first == parse_result::push) {
  304. if (!conn_->receive_channel_.try_send(ec, res_.second)) {
  305. BOOST_ASIO_CORO_YIELD
  306. conn_->receive_channel_.async_send(ec, res_.second, std::move(self));
  307. }
  308. if (ec) {
  309. logger_.trace("reader-op: error. Exiting ...");
  310. conn_->cancel(operation::run);
  311. self.complete(ec);
  312. return;
  313. }
  314. if (!conn_->is_open() || is_cancelled(self)) {
  315. logger_.trace("reader-op: canceled (2). Exiting ...");
  316. self.complete(asio::error::operation_aborted);
  317. return;
  318. }
  319. }
  320. }
  321. }
  322. };
  323. /** @brief Base class for high level Redis asynchronous connections.
  324. * @ingroup high-level-api
  325. *
  326. * @tparam Executor The executor type.
  327. *
  328. */
  329. template <class Executor>
  330. class connection_base {
  331. public:
  332. /// Executor type
  333. using executor_type = Executor;
  334. /// Type of the next layer
  335. using next_layer_type = asio::ssl::stream<asio::basic_stream_socket<asio::ip::tcp, Executor>>;
  336. using clock_type = std::chrono::steady_clock;
  337. using clock_traits_type = asio::wait_traits<clock_type>;
  338. using timer_type = asio::basic_waitable_timer<clock_type, clock_traits_type, executor_type>;
  339. using this_type = connection_base<Executor>;
  340. /// Constructs from an executor.
  341. connection_base(
  342. executor_type ex,
  343. asio::ssl::context ctx,
  344. std::size_t max_read_size)
  345. : ctx_{std::move(ctx)}
  346. , stream_{std::make_unique<next_layer_type>(ex, ctx_)}
  347. , writer_timer_{ex}
  348. , receive_channel_{ex, 256}
  349. , runner_{ex, {}}
  350. , dbuf_{read_buffer_, max_read_size}
  351. {
  352. set_receive_response(ignore);
  353. writer_timer_.expires_at((std::chrono::steady_clock::time_point::max)());
  354. }
  355. /// Returns the ssl context.
  356. auto const& get_ssl_context() const noexcept
  357. { return ctx_;}
  358. /// Resets the underlying stream.
  359. void reset_stream()
  360. {
  361. stream_ = std::make_unique<next_layer_type>(writer_timer_.get_executor(), ctx_);
  362. }
  363. /// Returns a reference to the next layer.
  364. auto& next_layer() noexcept { return *stream_; }
  365. /// Returns a const reference to the next layer.
  366. auto const& next_layer() const noexcept { return *stream_; }
  367. /// Returns the associated executor.
  368. auto get_executor() {return writer_timer_.get_executor();}
  369. /// Cancels specific operations.
  370. void cancel(operation op)
  371. {
  372. runner_.cancel(op);
  373. if (op == operation::all) {
  374. cancel_impl(operation::run);
  375. cancel_impl(operation::receive);
  376. cancel_impl(operation::exec);
  377. return;
  378. }
  379. cancel_impl(op);
  380. }
  381. template <class Response, class CompletionToken>
  382. auto async_exec(request const& req, Response& resp, CompletionToken token)
  383. {
  384. using namespace boost::redis::adapter;
  385. auto f = boost_redis_adapt(resp);
  386. BOOST_ASSERT_MSG(req.get_expected_responses() <= f.get_supported_response_size(), "Request and response have incompatible sizes.");
  387. auto info = std::make_shared<req_info>(req, f, get_executor());
  388. return asio::async_compose
  389. < CompletionToken
  390. , void(system::error_code, std::size_t)
  391. >(exec_op<this_type>{this, info}, token, writer_timer_);
  392. }
  393. template <class Response, class CompletionToken>
  394. [[deprecated("Set the response with set_receive_response and use the other overload.")]]
  395. auto async_receive(Response& response, CompletionToken token)
  396. {
  397. set_receive_response(response);
  398. return receive_channel_.async_receive(std::move(token));
  399. }
  400. template <class CompletionToken>
  401. auto async_receive(CompletionToken token)
  402. { return receive_channel_.async_receive(std::move(token)); }
  403. std::size_t receive(system::error_code& ec)
  404. {
  405. std::size_t size = 0;
  406. auto f = [&](system::error_code const& ec2, std::size_t n)
  407. {
  408. ec = ec2;
  409. size = n;
  410. };
  411. auto const res = receive_channel_.try_receive(f);
  412. if (ec)
  413. return 0;
  414. if (!res)
  415. ec = error::sync_receive_push_failed;
  416. return size;
  417. }
  418. template <class Logger, class CompletionToken>
  419. auto async_run(config const& cfg, Logger l, CompletionToken token)
  420. {
  421. runner_.set_config(cfg);
  422. l.set_prefix(runner_.get_config().log_prefix);
  423. return runner_.async_run(*this, l, std::move(token));
  424. }
  425. template <class Response>
  426. void set_receive_response(Response& response)
  427. {
  428. using namespace boost::redis::adapter;
  429. auto g = boost_redis_adapt(response);
  430. receive_adapter_ = adapter::detail::make_adapter_wrapper(g);
  431. }
  432. usage get_usage() const noexcept
  433. { return usage_; }
  434. auto run_is_canceled() const noexcept
  435. { return cancel_run_called_; }
  436. private:
  437. using receive_channel_type = asio::experimental::channel<executor_type, void(system::error_code, std::size_t)>;
  438. using runner_type = runner<executor_type>;
  439. using adapter_type = std::function<void(std::size_t, resp3::basic_node<std::string_view> const&, system::error_code&)>;
  440. using receiver_adapter_type = std::function<void(resp3::basic_node<std::string_view> const&, system::error_code&)>;
  441. using exec_notifier_type = receive_channel_type;
  442. auto use_ssl() const noexcept
  443. { return runner_.get_config().use_ssl;}
  444. auto cancel_on_conn_lost() -> std::size_t
  445. {
  446. // Must return false if the request should be removed.
  447. auto cond = [](auto const& ptr)
  448. {
  449. BOOST_ASSERT(ptr != nullptr);
  450. if (ptr->is_waiting()) {
  451. return !ptr->req_->get_config().cancel_on_connection_lost;
  452. } else {
  453. return !ptr->req_->get_config().cancel_if_unresponded;
  454. }
  455. };
  456. auto point = std::stable_partition(std::begin(reqs_), std::end(reqs_), cond);
  457. auto const ret = std::distance(point, std::end(reqs_));
  458. std::for_each(point, std::end(reqs_), [](auto const& ptr) {
  459. ptr->stop();
  460. });
  461. reqs_.erase(point, std::end(reqs_));
  462. std::for_each(std::begin(reqs_), std::end(reqs_), [](auto const& ptr) {
  463. return ptr->mark_waiting();
  464. });
  465. return ret;
  466. }
  467. auto cancel_unwritten_requests() -> std::size_t
  468. {
  469. auto f = [](auto const& ptr)
  470. {
  471. BOOST_ASSERT(ptr != nullptr);
  472. return !ptr->is_waiting();
  473. };
  474. auto point = std::stable_partition(std::begin(reqs_), std::end(reqs_), f);
  475. auto const ret = std::distance(point, std::end(reqs_));
  476. std::for_each(point, std::end(reqs_), [](auto const& ptr) {
  477. ptr->stop();
  478. });
  479. reqs_.erase(point, std::end(reqs_));
  480. return ret;
  481. }
  482. void cancel_impl(operation op)
  483. {
  484. switch (op) {
  485. case operation::exec:
  486. {
  487. cancel_unwritten_requests();
  488. } break;
  489. case operation::run:
  490. {
  491. // Protects the code below from being called more than
  492. // once, see https://github.com/boostorg/redis/issues/181
  493. if (std::exchange(cancel_run_called_, true)) {
  494. return;
  495. }
  496. close();
  497. writer_timer_.cancel();
  498. receive_channel_.cancel();
  499. cancel_on_conn_lost();
  500. } break;
  501. case operation::receive:
  502. {
  503. receive_channel_.cancel();
  504. } break;
  505. default: /* ignore */;
  506. }
  507. }
  508. void on_write()
  509. {
  510. // We have to clear the payload right after writing it to use it
  511. // as a flag that informs there is no ongoing write.
  512. write_buffer_.clear();
  513. // Notice this must come before the for-each below.
  514. cancel_push_requests();
  515. // There is small optimization possible here: traverse only the
  516. // partition of unwritten requests instead of them all.
  517. std::for_each(std::begin(reqs_), std::end(reqs_), [](auto const& ptr) {
  518. BOOST_ASSERT_MSG(ptr != nullptr, "Expects non-null pointer.");
  519. if (ptr->is_staged()) {
  520. ptr->mark_written();
  521. }
  522. });
  523. }
  524. struct req_info {
  525. public:
  526. using node_type = resp3::basic_node<std::string_view>;
  527. using wrapped_adapter_type = std::function<void(node_type const&, system::error_code&)>;
  528. explicit req_info(request const& req, adapter_type adapter, executor_type ex)
  529. : notifier_{ex, 1}
  530. , req_{&req}
  531. , adapter_{}
  532. , expected_responses_{req.get_expected_responses()}
  533. , status_{status::waiting}
  534. , ec_{{}}
  535. , read_size_{0}
  536. {
  537. adapter_ = [this, adapter](node_type const& nd, system::error_code& ec)
  538. {
  539. auto const i = req_->get_expected_responses() - expected_responses_;
  540. adapter(i, nd, ec);
  541. };
  542. }
  543. auto proceed()
  544. {
  545. notifier_.try_send(std::error_code{}, 0);
  546. }
  547. void stop()
  548. {
  549. notifier_.close();
  550. }
  551. [[nodiscard]] auto is_waiting() const noexcept
  552. { return status_ == status::waiting; }
  553. [[nodiscard]] auto is_written() const noexcept
  554. { return status_ == status::written; }
  555. [[nodiscard]] auto is_staged() const noexcept
  556. { return status_ == status::staged; }
  557. void mark_written() noexcept
  558. { status_ = status::written; }
  559. void mark_staged() noexcept
  560. { status_ = status::staged; }
  561. void mark_waiting() noexcept
  562. { status_ = status::waiting; }
  563. [[nodiscard]] auto stop_requested() const noexcept
  564. { return !notifier_.is_open();}
  565. template <class CompletionToken>
  566. auto async_wait(CompletionToken token)
  567. {
  568. return notifier_.async_receive(std::move(token));
  569. }
  570. //private:
  571. enum class status
  572. { waiting
  573. , staged
  574. , written
  575. };
  576. exec_notifier_type notifier_;
  577. request const* req_;
  578. wrapped_adapter_type adapter_;
  579. // Contains the number of commands that haven't been read yet.
  580. std::size_t expected_responses_;
  581. status status_;
  582. system::error_code ec_;
  583. std::size_t read_size_;
  584. };
  585. void remove_request(std::shared_ptr<req_info> const& info)
  586. {
  587. reqs_.erase(std::remove(std::begin(reqs_), std::end(reqs_), info));
  588. }
  589. using reqs_type = std::deque<std::shared_ptr<req_info>>;
  590. template <class, class> friend struct reader_op;
  591. template <class, class> friend struct writer_op;
  592. template <class, class> friend struct run_op;
  593. template <class> friend struct exec_op;
  594. template <class, class, class> friend struct run_all_op;
  595. void cancel_push_requests()
  596. {
  597. auto point = std::stable_partition(std::begin(reqs_), std::end(reqs_), [](auto const& ptr) {
  598. return !(ptr->is_staged() && ptr->req_->get_expected_responses() == 0);
  599. });
  600. std::for_each(point, std::end(reqs_), [](auto const& ptr) {
  601. ptr->proceed();
  602. });
  603. reqs_.erase(point, std::end(reqs_));
  604. }
  605. [[nodiscard]] bool is_writing() const noexcept
  606. {
  607. return !write_buffer_.empty();
  608. }
  609. void add_request_info(std::shared_ptr<req_info> const& info)
  610. {
  611. reqs_.push_back(info);
  612. if (info->req_->has_hello_priority()) {
  613. auto rend = std::partition_point(std::rbegin(reqs_), std::rend(reqs_), [](auto const& e) {
  614. return e->is_waiting();
  615. });
  616. std::rotate(std::rbegin(reqs_), std::rbegin(reqs_) + 1, rend);
  617. }
  618. if (is_open() && !is_writing())
  619. writer_timer_.cancel();
  620. }
  621. template <class CompletionToken, class Logger>
  622. auto reader(Logger l, CompletionToken&& token)
  623. {
  624. return asio::async_compose
  625. < CompletionToken
  626. , void(system::error_code)
  627. >(reader_op<this_type, Logger>{this, l}, token, writer_timer_);
  628. }
  629. template <class CompletionToken, class Logger>
  630. auto writer(Logger l, CompletionToken&& token)
  631. {
  632. return asio::async_compose
  633. < CompletionToken
  634. , void(system::error_code)
  635. >(writer_op<this_type, Logger>{this, l}, token, writer_timer_);
  636. }
  637. template <class Logger, class CompletionToken>
  638. auto async_run_lean(config const& cfg, Logger l, CompletionToken token)
  639. {
  640. runner_.set_config(cfg);
  641. l.set_prefix(runner_.get_config().log_prefix);
  642. return asio::async_compose
  643. < CompletionToken
  644. , void(system::error_code)
  645. >(run_op<this_type, Logger>{this, l}, token, writer_timer_);
  646. }
  647. [[nodiscard]] bool coalesce_requests()
  648. {
  649. // Coalesces the requests and marks them staged. After a
  650. // successful write staged requests will be marked as written.
  651. auto const point = std::partition_point(std::cbegin(reqs_), std::cend(reqs_), [](auto const& ri) {
  652. return !ri->is_waiting();
  653. });
  654. std::for_each(point, std::cend(reqs_), [this](auto const& ri) {
  655. // Stage the request.
  656. write_buffer_ += ri->req_->payload();
  657. ri->mark_staged();
  658. usage_.commands_sent += ri->expected_responses_;
  659. });
  660. usage_.bytes_sent += std::size(write_buffer_);
  661. return point != std::cend(reqs_);
  662. }
  663. bool is_waiting_response() const noexcept
  664. {
  665. if (std::empty(reqs_))
  666. return false;
  667. // Under load and on low-latency networks we might start
  668. // receiving responses before the write operation completed and
  669. // the request is still maked as staged and not written. See
  670. // https://github.com/boostorg/redis/issues/170
  671. return !reqs_.front()->is_waiting();
  672. }
  673. void close()
  674. {
  675. if (stream_->next_layer().is_open()) {
  676. system::error_code ec;
  677. stream_->next_layer().close(ec);
  678. }
  679. }
  680. auto is_open() const noexcept { return stream_->next_layer().is_open(); }
  681. auto& lowest_layer() noexcept { return stream_->lowest_layer(); }
  682. auto is_next_push()
  683. {
  684. BOOST_ASSERT(!read_buffer_.empty());
  685. // Useful links to understand the heuristics below.
  686. //
  687. // - https://github.com/redis/redis/issues/11784
  688. // - https://github.com/redis/redis/issues/6426
  689. // - https://github.com/boostorg/redis/issues/170
  690. // The message's resp3 type is a push.
  691. if (resp3::to_type(read_buffer_.front()) == resp3::type::push)
  692. return true;
  693. // This is non-push type and the requests queue is empty. I have
  694. // noticed this is possible, for example with -MISCONF. I don't
  695. // know why they are not sent with a push type so we can
  696. // distinguish them from responses to commands. If we are lucky
  697. // enough to receive them when the command queue is empty they
  698. // can be treated as server pushes, otherwise it is impossible
  699. // to handle them properly
  700. if (reqs_.empty())
  701. return true;
  702. // The request does not expect any response but we got one. This
  703. // may happen if for example, subscribe with wrong syntax.
  704. if (reqs_.front()->expected_responses_ == 0)
  705. return true;
  706. // Added to deal with MONITOR and also to fix PR170 which
  707. // happens under load and on low-latency networks, where we
  708. // might start receiving responses before the write operation
  709. // completed and the request is still maked as staged and not
  710. // written.
  711. return reqs_.front()->is_waiting();
  712. }
  713. auto get_suggested_buffer_growth() const noexcept
  714. {
  715. return parser_.get_suggested_buffer_growth(4096);
  716. }
  717. enum class parse_result { needs_more, push, resp };
  718. using parse_ret_type = std::pair<parse_result, std::size_t>;
  719. parse_ret_type on_finish_parsing(parse_result t)
  720. {
  721. if (t == parse_result::push) {
  722. usage_.pushes_received += 1;
  723. usage_.push_bytes_received += parser_.get_consumed();
  724. } else {
  725. usage_.responses_received += 1;
  726. usage_.response_bytes_received += parser_.get_consumed();
  727. }
  728. on_push_ = false;
  729. dbuf_.consume(parser_.get_consumed());
  730. auto const res = std::make_pair(t, parser_.get_consumed());
  731. parser_.reset();
  732. return res;
  733. }
  734. parse_ret_type on_read(std::string_view data, system::error_code& ec)
  735. {
  736. // We arrive here in two states:
  737. //
  738. // 1. While we are parsing a message. In this case we
  739. // don't want to determine the type of the message in the
  740. // buffer (i.e. response vs push) but leave it untouched
  741. // until the parsing of a complete message ends.
  742. //
  743. // 2. On a new message, in which case we have to determine
  744. // whether the next messag is a push or a response.
  745. //
  746. if (!on_push_) // Prepare for new message.
  747. on_push_ = is_next_push();
  748. if (on_push_) {
  749. if (!resp3::parse(parser_, data, receive_adapter_, ec))
  750. return std::make_pair(parse_result::needs_more, 0);
  751. if (ec)
  752. return std::make_pair(parse_result::push, 0);
  753. return on_finish_parsing(parse_result::push);
  754. }
  755. BOOST_ASSERT_MSG(is_waiting_response(), "Not waiting for a response (using MONITOR command perhaps?)");
  756. BOOST_ASSERT(!reqs_.empty());
  757. BOOST_ASSERT(reqs_.front() != nullptr);
  758. BOOST_ASSERT(reqs_.front()->expected_responses_ != 0);
  759. if (!resp3::parse(parser_, data, reqs_.front()->adapter_, ec))
  760. return std::make_pair(parse_result::needs_more, 0);
  761. if (ec) {
  762. reqs_.front()->ec_ = ec;
  763. reqs_.front()->proceed();
  764. return std::make_pair(parse_result::resp, 0);
  765. }
  766. reqs_.front()->read_size_ += parser_.get_consumed();
  767. if (--reqs_.front()->expected_responses_ == 0) {
  768. // Done with this request.
  769. reqs_.front()->proceed();
  770. reqs_.pop_front();
  771. }
  772. return on_finish_parsing(parse_result::resp);
  773. }
  774. void reset()
  775. {
  776. write_buffer_.clear();
  777. read_buffer_.clear();
  778. parser_.reset();
  779. on_push_ = false;
  780. cancel_run_called_ = false;
  781. }
  782. asio::ssl::context ctx_;
  783. std::unique_ptr<next_layer_type> stream_;
  784. // Notice we use a timer to simulate a condition-variable. It is
  785. // also more suitable than a channel and the notify operation does
  786. // not suspend.
  787. timer_type writer_timer_;
  788. receive_channel_type receive_channel_;
  789. runner_type runner_;
  790. receiver_adapter_type receive_adapter_;
  791. using dyn_buffer_type = asio::dynamic_string_buffer<char, std::char_traits<char>, std::allocator<char>>;
  792. std::string read_buffer_;
  793. dyn_buffer_type dbuf_;
  794. std::string write_buffer_;
  795. reqs_type reqs_;
  796. resp3::parser parser_{};
  797. bool on_push_ = false;
  798. bool cancel_run_called_ = false;
  799. usage usage_;
  800. };
  801. } // boost::redis::detail
  802. #endif // BOOST_REDIS_CONNECTION_BASE_HPP