spsc_queue.hpp 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054
  1. // lock-free single-producer/single-consumer ringbuffer
  2. // this algorithm is implemented in various projects (linux kernel)
  3. //
  4. // Copyright (C) 2009-2013 Tim Blechmann
  5. //
  6. // Distributed under the Boost Software License, Version 1.0. (See
  7. // accompanying file LICENSE_1_0.txt or copy at
  8. // http://www.boost.org/LICENSE_1_0.txt)
  9. #ifndef BOOST_LOCKFREE_SPSC_QUEUE_HPP_INCLUDED
  10. #define BOOST_LOCKFREE_SPSC_QUEUE_HPP_INCLUDED
  11. #include <algorithm>
  12. #include <memory>
  13. #include <boost/aligned_storage.hpp>
  14. #include <boost/assert.hpp>
  15. #include <boost/static_assert.hpp>
  16. #include <boost/core/allocator_access.hpp>
  17. #include <boost/utility.hpp>
  18. #include <boost/next_prior.hpp>
  19. #include <boost/utility/enable_if.hpp>
  20. #include <boost/config.hpp> // for BOOST_LIKELY
  21. #include <boost/type_traits/has_trivial_destructor.hpp>
  22. #include <boost/type_traits/is_convertible.hpp>
  23. #include <boost/lockfree/detail/atomic.hpp>
  24. #include <boost/lockfree/detail/copy_payload.hpp>
  25. #include <boost/lockfree/detail/parameter.hpp>
  26. #include <boost/lockfree/detail/prefix.hpp>
  27. #include <boost/lockfree/lockfree_forward.hpp>
  28. #ifdef BOOST_HAS_PRAGMA_ONCE
  29. #pragma once
  30. #endif
  31. namespace boost {
  32. namespace lockfree {
  33. namespace detail {
  34. typedef parameter::parameters<boost::parameter::optional<tag::capacity>,
  35. boost::parameter::optional<tag::allocator>
  36. > ringbuffer_signature;
  37. template <typename T>
  38. class ringbuffer_base
  39. {
  40. #ifndef BOOST_DOXYGEN_INVOKED
  41. protected:
  42. typedef std::size_t size_t;
  43. static const int padding_size = BOOST_LOCKFREE_CACHELINE_BYTES - sizeof(size_t);
  44. atomic<size_t> write_index_;
  45. char padding1[padding_size]; /* force read_index and write_index to different cache lines */
  46. atomic<size_t> read_index_;
  47. BOOST_DELETED_FUNCTION(ringbuffer_base(ringbuffer_base const&))
  48. BOOST_DELETED_FUNCTION(ringbuffer_base& operator= (ringbuffer_base const&))
  49. protected:
  50. ringbuffer_base(void):
  51. write_index_(0), read_index_(0)
  52. {}
  53. static size_t next_index(size_t arg, size_t max_size)
  54. {
  55. size_t ret = arg + 1;
  56. while (BOOST_UNLIKELY(ret >= max_size))
  57. ret -= max_size;
  58. return ret;
  59. }
  60. static size_t read_available(size_t write_index, size_t read_index, size_t max_size)
  61. {
  62. if (write_index >= read_index)
  63. return write_index - read_index;
  64. const size_t ret = write_index + max_size - read_index;
  65. return ret;
  66. }
  67. static size_t write_available(size_t write_index, size_t read_index, size_t max_size)
  68. {
  69. size_t ret = read_index - write_index - 1;
  70. if (write_index >= read_index)
  71. ret += max_size;
  72. return ret;
  73. }
  74. size_t read_available(size_t max_size) const
  75. {
  76. size_t write_index = write_index_.load(memory_order_acquire);
  77. const size_t read_index = read_index_.load(memory_order_relaxed);
  78. return read_available(write_index, read_index, max_size);
  79. }
  80. size_t write_available(size_t max_size) const
  81. {
  82. size_t write_index = write_index_.load(memory_order_relaxed);
  83. const size_t read_index = read_index_.load(memory_order_acquire);
  84. return write_available(write_index, read_index, max_size);
  85. }
  86. bool push(T const & t, T * buffer, size_t max_size)
  87. {
  88. const size_t write_index = write_index_.load(memory_order_relaxed); // only written from push thread
  89. const size_t next = next_index(write_index, max_size);
  90. if (next == read_index_.load(memory_order_acquire))
  91. return false; /* ringbuffer is full */
  92. new (buffer + write_index) T(t); // copy-construct
  93. write_index_.store(next, memory_order_release);
  94. return true;
  95. }
  96. size_t push(const T * input_buffer, size_t input_count, T * internal_buffer, size_t max_size)
  97. {
  98. return push(input_buffer, input_buffer + input_count, internal_buffer, max_size) - input_buffer;
  99. }
  100. template <typename ConstIterator>
  101. ConstIterator push(ConstIterator begin, ConstIterator end, T * internal_buffer, size_t max_size)
  102. {
  103. // FIXME: avoid std::distance
  104. const size_t write_index = write_index_.load(memory_order_relaxed); // only written from push thread
  105. const size_t read_index = read_index_.load(memory_order_acquire);
  106. const size_t avail = write_available(write_index, read_index, max_size);
  107. if (avail == 0)
  108. return begin;
  109. size_t input_count = std::distance(begin, end);
  110. input_count = (std::min)(input_count, avail);
  111. size_t new_write_index = write_index + input_count;
  112. const ConstIterator last = boost::next(begin, input_count);
  113. if (write_index + input_count > max_size) {
  114. /* copy data in two sections */
  115. const size_t count0 = max_size - write_index;
  116. const ConstIterator midpoint = boost::next(begin, count0);
  117. std::uninitialized_copy(begin, midpoint, internal_buffer + write_index);
  118. std::uninitialized_copy(midpoint, last, internal_buffer);
  119. new_write_index -= max_size;
  120. } else {
  121. std::uninitialized_copy(begin, last, internal_buffer + write_index);
  122. if (new_write_index == max_size)
  123. new_write_index = 0;
  124. }
  125. write_index_.store(new_write_index, memory_order_release);
  126. return last;
  127. }
  128. template <typename Functor>
  129. bool consume_one(Functor & functor, T * buffer, size_t max_size)
  130. {
  131. const size_t write_index = write_index_.load(memory_order_acquire);
  132. const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
  133. if ( empty(write_index, read_index) )
  134. return false;
  135. T & object_to_consume = buffer[read_index];
  136. functor( object_to_consume );
  137. object_to_consume.~T();
  138. size_t next = next_index(read_index, max_size);
  139. read_index_.store(next, memory_order_release);
  140. return true;
  141. }
  142. template <typename Functor>
  143. bool consume_one(Functor const & functor, T * buffer, size_t max_size)
  144. {
  145. const size_t write_index = write_index_.load(memory_order_acquire);
  146. const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
  147. if ( empty(write_index, read_index) )
  148. return false;
  149. T & object_to_consume = buffer[read_index];
  150. functor( object_to_consume );
  151. object_to_consume.~T();
  152. size_t next = next_index(read_index, max_size);
  153. read_index_.store(next, memory_order_release);
  154. return true;
  155. }
  156. template <typename Functor>
  157. size_t consume_all (Functor const & functor, T * internal_buffer, size_t max_size)
  158. {
  159. const size_t write_index = write_index_.load(memory_order_acquire);
  160. const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
  161. const size_t avail = read_available(write_index, read_index, max_size);
  162. if (avail == 0)
  163. return 0;
  164. const size_t output_count = avail;
  165. size_t new_read_index = read_index + output_count;
  166. if (read_index + output_count > max_size) {
  167. /* copy data in two sections */
  168. const size_t count0 = max_size - read_index;
  169. const size_t count1 = output_count - count0;
  170. run_functor_and_delete(internal_buffer + read_index, internal_buffer + max_size, functor);
  171. run_functor_and_delete(internal_buffer, internal_buffer + count1, functor);
  172. new_read_index -= max_size;
  173. } else {
  174. run_functor_and_delete(internal_buffer + read_index, internal_buffer + read_index + output_count, functor);
  175. if (new_read_index == max_size)
  176. new_read_index = 0;
  177. }
  178. read_index_.store(new_read_index, memory_order_release);
  179. return output_count;
  180. }
  181. template <typename Functor>
  182. size_t consume_all (Functor & functor, T * internal_buffer, size_t max_size)
  183. {
  184. const size_t write_index = write_index_.load(memory_order_acquire);
  185. const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
  186. const size_t avail = read_available(write_index, read_index, max_size);
  187. if (avail == 0)
  188. return 0;
  189. const size_t output_count = avail;
  190. size_t new_read_index = read_index + output_count;
  191. if (read_index + output_count > max_size) {
  192. /* copy data in two sections */
  193. const size_t count0 = max_size - read_index;
  194. const size_t count1 = output_count - count0;
  195. run_functor_and_delete(internal_buffer + read_index, internal_buffer + max_size, functor);
  196. run_functor_and_delete(internal_buffer, internal_buffer + count1, functor);
  197. new_read_index -= max_size;
  198. } else {
  199. run_functor_and_delete(internal_buffer + read_index, internal_buffer + read_index + output_count, functor);
  200. if (new_read_index == max_size)
  201. new_read_index = 0;
  202. }
  203. read_index_.store(new_read_index, memory_order_release);
  204. return output_count;
  205. }
  206. size_t pop (T * output_buffer, size_t output_count, T * internal_buffer, size_t max_size)
  207. {
  208. const size_t write_index = write_index_.load(memory_order_acquire);
  209. const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
  210. const size_t avail = read_available(write_index, read_index, max_size);
  211. if (avail == 0)
  212. return 0;
  213. output_count = (std::min)(output_count, avail);
  214. size_t new_read_index = read_index + output_count;
  215. if (read_index + output_count > max_size) {
  216. /* copy data in two sections */
  217. const size_t count0 = max_size - read_index;
  218. const size_t count1 = output_count - count0;
  219. copy_and_delete(internal_buffer + read_index, internal_buffer + max_size, output_buffer);
  220. copy_and_delete(internal_buffer, internal_buffer + count1, output_buffer + count0);
  221. new_read_index -= max_size;
  222. } else {
  223. copy_and_delete(internal_buffer + read_index, internal_buffer + read_index + output_count, output_buffer);
  224. if (new_read_index == max_size)
  225. new_read_index = 0;
  226. }
  227. read_index_.store(new_read_index, memory_order_release);
  228. return output_count;
  229. }
  230. template <typename OutputIterator>
  231. size_t pop_to_output_iterator (OutputIterator it, T * internal_buffer, size_t max_size)
  232. {
  233. const size_t write_index = write_index_.load(memory_order_acquire);
  234. const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
  235. const size_t avail = read_available(write_index, read_index, max_size);
  236. if (avail == 0)
  237. return 0;
  238. size_t new_read_index = read_index + avail;
  239. if (read_index + avail > max_size) {
  240. /* copy data in two sections */
  241. const size_t count0 = max_size - read_index;
  242. const size_t count1 = avail - count0;
  243. it = copy_and_delete(internal_buffer + read_index, internal_buffer + max_size, it);
  244. copy_and_delete(internal_buffer, internal_buffer + count1, it);
  245. new_read_index -= max_size;
  246. } else {
  247. copy_and_delete(internal_buffer + read_index, internal_buffer + read_index + avail, it);
  248. if (new_read_index == max_size)
  249. new_read_index = 0;
  250. }
  251. read_index_.store(new_read_index, memory_order_release);
  252. return avail;
  253. }
  254. const T& front(const T * internal_buffer) const
  255. {
  256. const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
  257. return *(internal_buffer + read_index);
  258. }
  259. T& front(T * internal_buffer)
  260. {
  261. const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
  262. return *(internal_buffer + read_index);
  263. }
  264. #endif
  265. public:
  266. /** reset the ringbuffer
  267. *
  268. * \note Not thread-safe
  269. * */
  270. void reset(void)
  271. {
  272. if ( !boost::has_trivial_destructor<T>::value ) {
  273. // make sure to call all destructors!
  274. detail::consume_noop consume_functor;
  275. (void)consume_all( consume_functor );
  276. } else {
  277. write_index_.store(0, memory_order_relaxed);
  278. read_index_.store(0, memory_order_release);
  279. }
  280. }
  281. /** Check if the ringbuffer is empty
  282. *
  283. * \return true, if the ringbuffer is empty, false otherwise
  284. * \note Due to the concurrent nature of the ringbuffer the result may be inaccurate.
  285. * */
  286. bool empty(void)
  287. {
  288. return empty(write_index_.load(memory_order_relaxed), read_index_.load(memory_order_relaxed));
  289. }
  290. /**
  291. * \return true, if implementation is lock-free.
  292. *
  293. * */
  294. bool is_lock_free(void) const
  295. {
  296. return write_index_.is_lock_free() && read_index_.is_lock_free();
  297. }
  298. private:
  299. bool empty(size_t write_index, size_t read_index)
  300. {
  301. return write_index == read_index;
  302. }
  303. template< class OutputIterator >
  304. OutputIterator copy_and_delete( T * first, T * last, OutputIterator out )
  305. {
  306. if (boost::has_trivial_destructor<T>::value) {
  307. return std::copy(first, last, out); // will use memcpy if possible
  308. } else {
  309. for (; first != last; ++first, ++out) {
  310. *out = *first;
  311. first->~T();
  312. }
  313. return out;
  314. }
  315. }
  316. template< class Functor >
  317. void run_functor_and_delete( T * first, T * last, Functor & functor )
  318. {
  319. for (; first != last; ++first) {
  320. functor(*first);
  321. first->~T();
  322. }
  323. }
  324. template< class Functor >
  325. void run_functor_and_delete( T * first, T * last, Functor const & functor )
  326. {
  327. for (; first != last; ++first) {
  328. functor(*first);
  329. first->~T();
  330. }
  331. }
  332. };
  333. template <typename T, std::size_t MaxSize>
  334. class compile_time_sized_ringbuffer:
  335. public ringbuffer_base<T>
  336. {
  337. typedef std::size_t size_type;
  338. static const std::size_t max_size = MaxSize + 1;
  339. typedef typename boost::aligned_storage<max_size * sizeof(T),
  340. boost::alignment_of<T>::value
  341. >::type storage_type;
  342. storage_type storage_;
  343. T * data()
  344. {
  345. return static_cast<T*>(storage_.address());
  346. }
  347. const T * data() const
  348. {
  349. return static_cast<const T*>(storage_.address());
  350. }
  351. protected:
  352. size_type max_number_of_elements() const
  353. {
  354. return max_size;
  355. }
  356. ~compile_time_sized_ringbuffer(void)
  357. {
  358. // destroy all remaining items
  359. detail::consume_noop consume_functor;
  360. (void)consume_all(consume_functor);
  361. }
  362. public:
  363. bool push(T const & t)
  364. {
  365. return ringbuffer_base<T>::push(t, data(), max_size);
  366. }
  367. template <typename Functor>
  368. bool consume_one(Functor & f)
  369. {
  370. return ringbuffer_base<T>::consume_one(f, data(), max_size);
  371. }
  372. template <typename Functor>
  373. bool consume_one(Functor const & f)
  374. {
  375. return ringbuffer_base<T>::consume_one(f, data(), max_size);
  376. }
  377. template <typename Functor>
  378. size_type consume_all(Functor & f)
  379. {
  380. return ringbuffer_base<T>::consume_all(f, data(), max_size);
  381. }
  382. template <typename Functor>
  383. size_type consume_all(Functor const & f)
  384. {
  385. return ringbuffer_base<T>::consume_all(f, data(), max_size);
  386. }
  387. size_type push(T const * t, size_type size)
  388. {
  389. return ringbuffer_base<T>::push(t, size, data(), max_size);
  390. }
  391. template <size_type size>
  392. size_type push(T const (&t)[size])
  393. {
  394. return push(t, size);
  395. }
  396. template <typename ConstIterator>
  397. ConstIterator push(ConstIterator begin, ConstIterator end)
  398. {
  399. return ringbuffer_base<T>::push(begin, end, data(), max_size);
  400. }
  401. size_type pop(T * ret, size_type size)
  402. {
  403. return ringbuffer_base<T>::pop(ret, size, data(), max_size);
  404. }
  405. template <typename OutputIterator>
  406. size_type pop_to_output_iterator(OutputIterator it)
  407. {
  408. return ringbuffer_base<T>::pop_to_output_iterator(it, data(), max_size);
  409. }
  410. const T& front(void) const
  411. {
  412. return ringbuffer_base<T>::front(data());
  413. }
  414. T& front(void)
  415. {
  416. return ringbuffer_base<T>::front(data());
  417. }
  418. };
  419. template <typename T, typename Alloc>
  420. class runtime_sized_ringbuffer:
  421. public ringbuffer_base<T>,
  422. private Alloc
  423. {
  424. typedef std::size_t size_type;
  425. size_type max_elements_;
  426. #ifdef BOOST_NO_CXX11_ALLOCATOR
  427. typedef typename Alloc::pointer pointer;
  428. #else
  429. typedef std::allocator_traits<Alloc> allocator_traits;
  430. typedef typename allocator_traits::pointer pointer;
  431. #endif
  432. pointer array_;
  433. protected:
  434. size_type max_number_of_elements() const
  435. {
  436. return max_elements_;
  437. }
  438. public:
  439. explicit runtime_sized_ringbuffer(size_type max_elements):
  440. max_elements_(max_elements + 1)
  441. {
  442. #ifdef BOOST_NO_CXX11_ALLOCATOR
  443. array_ = Alloc::allocate(max_elements_);
  444. #else
  445. Alloc& alloc = *this;
  446. array_ = allocator_traits::allocate(alloc, max_elements_);
  447. #endif
  448. }
  449. template <typename U>
  450. runtime_sized_ringbuffer(typename boost::allocator_rebind<Alloc, U>::type const & alloc, size_type max_elements):
  451. Alloc(alloc), max_elements_(max_elements + 1)
  452. {
  453. #ifdef BOOST_NO_CXX11_ALLOCATOR
  454. array_ = Alloc::allocate(max_elements_);
  455. #else
  456. Alloc& allocator = *this;
  457. array_ = allocator_traits::allocate(allocator, max_elements_);
  458. #endif
  459. }
  460. runtime_sized_ringbuffer(Alloc const & alloc, size_type max_elements):
  461. Alloc(alloc), max_elements_(max_elements + 1)
  462. {
  463. #ifdef BOOST_NO_CXX11_ALLOCATOR
  464. array_ = Alloc::allocate(max_elements_);
  465. #else
  466. Alloc& allocator = *this;
  467. array_ = allocator_traits::allocate(allocator, max_elements_);
  468. #endif
  469. }
  470. ~runtime_sized_ringbuffer(void)
  471. {
  472. // destroy all remaining items
  473. detail::consume_noop consume_functor;
  474. (void)consume_all(consume_functor);
  475. #ifdef BOOST_NO_CXX11_ALLOCATOR
  476. Alloc::deallocate(array_, max_elements_);
  477. #else
  478. Alloc& allocator = *this;
  479. allocator_traits::deallocate(allocator, array_, max_elements_);
  480. #endif
  481. }
  482. bool push(T const & t)
  483. {
  484. return ringbuffer_base<T>::push(t, &*array_, max_elements_);
  485. }
  486. template <typename Functor>
  487. bool consume_one(Functor & f)
  488. {
  489. return ringbuffer_base<T>::consume_one(f, &*array_, max_elements_);
  490. }
  491. template <typename Functor>
  492. bool consume_one(Functor const & f)
  493. {
  494. return ringbuffer_base<T>::consume_one(f, &*array_, max_elements_);
  495. }
  496. template <typename Functor>
  497. size_type consume_all(Functor & f)
  498. {
  499. return ringbuffer_base<T>::consume_all(f, &*array_, max_elements_);
  500. }
  501. template <typename Functor>
  502. size_type consume_all(Functor const & f)
  503. {
  504. return ringbuffer_base<T>::consume_all(f, &*array_, max_elements_);
  505. }
  506. size_type push(T const * t, size_type size)
  507. {
  508. return ringbuffer_base<T>::push(t, size, &*array_, max_elements_);
  509. }
  510. template <size_type size>
  511. size_type push(T const (&t)[size])
  512. {
  513. return push(t, size);
  514. }
  515. template <typename ConstIterator>
  516. ConstIterator push(ConstIterator begin, ConstIterator end)
  517. {
  518. return ringbuffer_base<T>::push(begin, end, &*array_, max_elements_);
  519. }
  520. size_type pop(T * ret, size_type size)
  521. {
  522. return ringbuffer_base<T>::pop(ret, size, &*array_, max_elements_);
  523. }
  524. template <typename OutputIterator>
  525. size_type pop_to_output_iterator(OutputIterator it)
  526. {
  527. return ringbuffer_base<T>::pop_to_output_iterator(it, &*array_, max_elements_);
  528. }
  529. const T& front(void) const
  530. {
  531. return ringbuffer_base<T>::front(&*array_);
  532. }
  533. T& front(void)
  534. {
  535. return ringbuffer_base<T>::front(&*array_);
  536. }
  537. };
  538. #ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES
  539. template <typename T, typename A0, typename A1>
  540. #else
  541. template <typename T, typename ...Options>
  542. #endif
  543. struct make_ringbuffer
  544. {
  545. #ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES
  546. typedef typename ringbuffer_signature::bind<A0, A1>::type bound_args;
  547. #else
  548. typedef typename ringbuffer_signature::bind<Options...>::type bound_args;
  549. #endif
  550. typedef extract_capacity<bound_args> extract_capacity_t;
  551. static const bool runtime_sized = !extract_capacity_t::has_capacity;
  552. static const size_t capacity = extract_capacity_t::capacity;
  553. typedef extract_allocator<bound_args, T> extract_allocator_t;
  554. typedef typename extract_allocator_t::type allocator;
  555. // allocator argument is only sane, for run-time sized ringbuffers
  556. BOOST_STATIC_ASSERT((mpl::if_<mpl::bool_<!runtime_sized>,
  557. mpl::bool_<!extract_allocator_t::has_allocator>,
  558. mpl::true_
  559. >::type::value));
  560. typedef typename mpl::if_c<runtime_sized,
  561. runtime_sized_ringbuffer<T, allocator>,
  562. compile_time_sized_ringbuffer<T, capacity>
  563. >::type ringbuffer_type;
  564. };
  565. } /* namespace detail */
  566. /** The spsc_queue class provides a single-writer/single-reader fifo queue, pushing and popping is wait-free.
  567. *
  568. * \b Policies:
  569. * - \c boost::lockfree::capacity<>, optional <br>
  570. * If this template argument is passed to the options, the size of the ringbuffer is set at compile-time.
  571. *
  572. * - \c boost::lockfree::allocator<>, defaults to \c boost::lockfree::allocator<std::allocator<T>> <br>
  573. * Specifies the allocator that is used to allocate the ringbuffer. This option is only valid, if the ringbuffer is configured
  574. * to be sized at run-time
  575. *
  576. * \b Requirements:
  577. * - T must have a default constructor
  578. * - T must be copyable
  579. * */
  580. #ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES
  581. template <typename T, class A0, class A1>
  582. #else
  583. template <typename T, typename ...Options>
  584. #endif
  585. class spsc_queue:
  586. #ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES
  587. public detail::make_ringbuffer<T, A0, A1>::ringbuffer_type
  588. #else
  589. public detail::make_ringbuffer<T, Options...>::ringbuffer_type
  590. #endif
  591. {
  592. private:
  593. #ifndef BOOST_DOXYGEN_INVOKED
  594. #ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES
  595. typedef typename detail::make_ringbuffer<T, A0, A1>::ringbuffer_type base_type;
  596. static const bool runtime_sized = detail::make_ringbuffer<T, A0, A1>::runtime_sized;
  597. typedef typename detail::make_ringbuffer<T, A0, A1>::allocator allocator_arg;
  598. #else
  599. typedef typename detail::make_ringbuffer<T, Options...>::ringbuffer_type base_type;
  600. static const bool runtime_sized = detail::make_ringbuffer<T, Options...>::runtime_sized;
  601. typedef typename detail::make_ringbuffer<T, Options...>::allocator allocator_arg;
  602. #endif
  603. struct implementation_defined
  604. {
  605. typedef allocator_arg allocator;
  606. typedef std::size_t size_type;
  607. };
  608. #endif
  609. public:
  610. typedef T value_type;
  611. typedef typename implementation_defined::allocator allocator;
  612. typedef typename implementation_defined::size_type size_type;
  613. /** Constructs a spsc_queue
  614. *
  615. * \pre spsc_queue must be configured to be sized at compile-time
  616. */
  617. spsc_queue(void)
  618. {
  619. // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling
  620. // this function and this function may be compiled even when it isn't being used.
  621. BOOST_ASSERT(!runtime_sized);
  622. }
  623. /** Constructs a spsc_queue with a custom allocator
  624. *
  625. * \pre spsc_queue must be configured to be sized at compile-time
  626. *
  627. * \note This is just for API compatibility: an allocator isn't actually needed
  628. */
  629. template <typename U>
  630. explicit spsc_queue(typename boost::allocator_rebind<allocator, U>::type const &)
  631. {
  632. BOOST_STATIC_ASSERT(!runtime_sized);
  633. }
  634. /** Constructs a spsc_queue with a custom allocator
  635. *
  636. * \pre spsc_queue must be configured to be sized at compile-time
  637. *
  638. * \note This is just for API compatibility: an allocator isn't actually needed
  639. */
  640. explicit spsc_queue(allocator const &)
  641. {
  642. // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling
  643. // this function and this function may be compiled even when it isn't being used.
  644. BOOST_ASSERT(!runtime_sized);
  645. }
  646. /** Constructs a spsc_queue for element_count elements
  647. *
  648. * \pre spsc_queue must be configured to be sized at run-time
  649. */
  650. explicit spsc_queue(size_type element_count):
  651. base_type(element_count)
  652. {
  653. // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling
  654. // this function and this function may be compiled even when it isn't being used.
  655. BOOST_ASSERT(runtime_sized);
  656. }
  657. /** Constructs a spsc_queue for element_count elements with a custom allocator
  658. *
  659. * \pre spsc_queue must be configured to be sized at run-time
  660. */
  661. template <typename U>
  662. spsc_queue(size_type element_count, typename boost::allocator_rebind<allocator, U>::type const & alloc):
  663. base_type(alloc, element_count)
  664. {
  665. BOOST_STATIC_ASSERT(runtime_sized);
  666. }
  667. /** Constructs a spsc_queue for element_count elements with a custom allocator
  668. *
  669. * \pre spsc_queue must be configured to be sized at run-time
  670. */
  671. spsc_queue(size_type element_count, allocator_arg const & alloc):
  672. base_type(alloc, element_count)
  673. {
  674. // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling
  675. // this function and this function may be compiled even when it isn't being used.
  676. BOOST_ASSERT(runtime_sized);
  677. }
  678. /** Pushes object t to the ringbuffer.
  679. *
  680. * \pre only one thread is allowed to push data to the spsc_queue
  681. * \post object will be pushed to the spsc_queue, unless it is full.
  682. * \return true, if the push operation is successful.
  683. *
  684. * \note Thread-safe and wait-free
  685. * */
  686. bool push(T const & t)
  687. {
  688. return base_type::push(t);
  689. }
  690. /** Pops one object from ringbuffer.
  691. *
  692. * \pre only one thread is allowed to pop data to the spsc_queue
  693. * \post if ringbuffer is not empty, object will be discarded.
  694. * \return true, if the pop operation is successful, false if ringbuffer was empty.
  695. *
  696. * \note Thread-safe and wait-free
  697. */
  698. bool pop ()
  699. {
  700. detail::consume_noop consume_functor;
  701. return consume_one( consume_functor );
  702. }
  703. /** Pops one object from ringbuffer.
  704. *
  705. * \pre only one thread is allowed to pop data to the spsc_queue
  706. * \post if ringbuffer is not empty, object will be copied to ret.
  707. * \return true, if the pop operation is successful, false if ringbuffer was empty.
  708. *
  709. * \note Thread-safe and wait-free
  710. */
  711. template <typename U>
  712. typename boost::enable_if<typename is_convertible<T, U>::type, bool>::type
  713. pop (U & ret)
  714. {
  715. detail::consume_via_copy<U> consume_functor(ret);
  716. return consume_one( consume_functor );
  717. }
  718. /** Pushes as many objects from the array t as there is space.
  719. *
  720. * \pre only one thread is allowed to push data to the spsc_queue
  721. * \return number of pushed items
  722. *
  723. * \note Thread-safe and wait-free
  724. */
  725. size_type push(T const * t, size_type size)
  726. {
  727. return base_type::push(t, size);
  728. }
  729. /** Pushes as many objects from the array t as there is space available.
  730. *
  731. * \pre only one thread is allowed to push data to the spsc_queue
  732. * \return number of pushed items
  733. *
  734. * \note Thread-safe and wait-free
  735. */
  736. template <size_type size>
  737. size_type push(T const (&t)[size])
  738. {
  739. return push(t, size);
  740. }
  741. /** Pushes as many objects from the range [begin, end) as there is space .
  742. *
  743. * \pre only one thread is allowed to push data to the spsc_queue
  744. * \return iterator to the first element, which has not been pushed
  745. *
  746. * \note Thread-safe and wait-free
  747. */
  748. template <typename ConstIterator>
  749. ConstIterator push(ConstIterator begin, ConstIterator end)
  750. {
  751. return base_type::push(begin, end);
  752. }
  753. /** Pops a maximum of size objects from ringbuffer.
  754. *
  755. * \pre only one thread is allowed to pop data to the spsc_queue
  756. * \return number of popped items
  757. *
  758. * \note Thread-safe and wait-free
  759. * */
  760. size_type pop(T * ret, size_type size)
  761. {
  762. return base_type::pop(ret, size);
  763. }
  764. /** Pops a maximum of size objects from spsc_queue.
  765. *
  766. * \pre only one thread is allowed to pop data to the spsc_queue
  767. * \return number of popped items
  768. *
  769. * \note Thread-safe and wait-free
  770. * */
  771. template <size_type size>
  772. size_type pop(T (&ret)[size])
  773. {
  774. return pop(ret, size);
  775. }
  776. /** Pops objects to the output iterator it
  777. *
  778. * \pre only one thread is allowed to pop data to the spsc_queue
  779. * \return number of popped items
  780. *
  781. * \note Thread-safe and wait-free
  782. * */
  783. template <typename OutputIterator>
  784. typename boost::disable_if<typename is_convertible<T, OutputIterator>::type, size_type>::type
  785. pop(OutputIterator it)
  786. {
  787. return base_type::pop_to_output_iterator(it);
  788. }
  789. /** consumes one element via a functor
  790. *
  791. * pops one element from the queue and applies the functor on this object
  792. *
  793. * \returns true, if one element was consumed
  794. *
  795. * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
  796. * */
  797. template <typename Functor>
  798. bool consume_one(Functor & f)
  799. {
  800. return base_type::consume_one(f);
  801. }
  802. /// \copydoc boost::lockfree::spsc_queue::consume_one(Functor & rhs)
  803. template <typename Functor>
  804. bool consume_one(Functor const & f)
  805. {
  806. return base_type::consume_one(f);
  807. }
  808. /** consumes all elements via a functor
  809. *
  810. * sequentially pops all elements from the queue and applies the functor on each object
  811. *
  812. * \returns number of elements that are consumed
  813. *
  814. * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
  815. * */
  816. template <typename Functor>
  817. size_type consume_all(Functor & f)
  818. {
  819. return base_type::consume_all(f);
  820. }
  821. /// \copydoc boost::lockfree::spsc_queue::consume_all(Functor & rhs)
  822. template <typename Functor>
  823. size_type consume_all(Functor const & f)
  824. {
  825. return base_type::consume_all(f);
  826. }
  827. /** get number of elements that are available for read
  828. *
  829. * \return number of available elements that can be popped from the spsc_queue
  830. *
  831. * \note Thread-safe and wait-free, should only be called from the consumer thread
  832. * */
  833. size_type read_available() const
  834. {
  835. return base_type::read_available(base_type::max_number_of_elements());
  836. }
  837. /** get write space to write elements
  838. *
  839. * \return number of elements that can be pushed to the spsc_queue
  840. *
  841. * \note Thread-safe and wait-free, should only be called from the producer thread
  842. * */
  843. size_type write_available() const
  844. {
  845. return base_type::write_available(base_type::max_number_of_elements());
  846. }
  847. /** get reference to element in the front of the queue
  848. *
  849. * Availability of front element can be checked using read_available().
  850. *
  851. * \pre only a consuming thread is allowed to check front element
  852. * \pre read_available() > 0. If ringbuffer is empty, it's undefined behaviour to invoke this method.
  853. * \return reference to the first element in the queue
  854. *
  855. * \note Thread-safe and wait-free
  856. */
  857. const T& front() const
  858. {
  859. BOOST_ASSERT(read_available() > 0);
  860. return base_type::front();
  861. }
  862. /// \copydoc boost::lockfree::spsc_queue::front() const
  863. T& front()
  864. {
  865. BOOST_ASSERT(read_available() > 0);
  866. return base_type::front();
  867. }
  868. /** reset the ringbuffer
  869. *
  870. * \note Not thread-safe
  871. * */
  872. void reset(void)
  873. {
  874. if ( !boost::has_trivial_destructor<T>::value ) {
  875. // make sure to call all destructors!
  876. detail::consume_noop consume_functor;
  877. (void)consume_all(consume_functor);
  878. } else {
  879. base_type::write_index_.store(0, memory_order_relaxed);
  880. base_type::read_index_.store(0, memory_order_release);
  881. }
  882. }
  883. };
  884. } /* namespace lockfree */
  885. } /* namespace boost */
  886. #endif /* BOOST_LOCKFREE_SPSC_QUEUE_HPP_INCLUDED */