stack.hpp 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816
  1. // Copyright (C) 2008-2013 Tim Blechmann
  2. //
  3. // Distributed under the Boost Software License, Version 1.0. (See
  4. // accompanying file LICENSE_1_0.txt or copy at
  5. // http://www.boost.org/LICENSE_1_0.txt)
  6. #ifndef BOOST_LOCKFREE_STACK_HPP_INCLUDED
  7. #define BOOST_LOCKFREE_STACK_HPP_INCLUDED
  8. #include <boost/config.hpp>
  9. #ifdef BOOST_HAS_PRAGMA_ONCE
  10. # pragma once
  11. #endif
  12. #include <boost/assert.hpp>
  13. #include <boost/core/allocator_access.hpp>
  14. #include <boost/core/no_exceptions_support.hpp>
  15. #include <boost/core/span.hpp>
  16. #include <boost/parameter/optional.hpp>
  17. #include <boost/parameter/parameters.hpp>
  18. #include <boost/static_assert.hpp>
  19. #include <boost/lockfree/detail/atomic.hpp>
  20. #include <boost/lockfree/detail/copy_payload.hpp>
  21. #include <boost/lockfree/detail/freelist.hpp>
  22. #include <boost/lockfree/detail/parameter.hpp>
  23. #include <boost/lockfree/detail/tagged_ptr.hpp>
  24. #include <boost/lockfree/detail/uses_optional.hpp>
  25. #include <boost/lockfree/lockfree_forward.hpp>
  26. #include <cstdint>
  27. #include <tuple>
  28. #include <type_traits>
  29. namespace boost { namespace lockfree {
  30. namespace detail {
  31. typedef parameter::parameters< boost::parameter::optional< tag::allocator >, boost::parameter::optional< tag::capacity > >
  32. stack_signature;
  33. } // namespace detail
  34. /** The stack class provides a multi-writer/multi-reader stack, pushing and popping is lock-free,
  35. * construction/destruction has to be synchronized. It uses a freelist for memory management,
  36. * freed nodes are pushed to the freelist and not returned to the OS before the stack is destroyed.
  37. *
  38. * \b Policies:
  39. *
  40. * - \c boost::lockfree::fixed_sized<>, defaults to \c boost::lockfree::fixed_sized<false> <br>
  41. * Can be used to completely disable dynamic memory allocations during push in order to ensure lockfree behavior.<br>
  42. * If the data structure is configured as fixed-sized, the internal nodes are stored inside an array and they are
  43. * addressed by array indexing. This limits the possible size of the stack to the number of elements that can be
  44. * addressed by the index type (usually 2**16-2), but on platforms that lack double-width compare-and-exchange
  45. * instructions, this is the best way to achieve lock-freedom.
  46. *
  47. * - \c boost::lockfree::capacity<>, optional <br>
  48. * If this template argument is passed to the options, the size of the stack is set at compile-time. <br>
  49. * It this option implies \c fixed_sized<true>
  50. *
  51. * - \c boost::lockfree::allocator<>, defaults to \c boost::lockfree::allocator<std::allocator<void>> <br>
  52. * Specifies the allocator that is used for the internal freelist
  53. *
  54. * \b Requirements:
  55. * - T must have a copy constructor or a move constructor
  56. * */
  57. template < typename T, typename... Options >
  58. #if !defined( BOOST_NO_CXX20_HDR_CONCEPTS )
  59. requires( std::is_copy_assignable_v< T > || std::is_move_assignable_v< T > )
  60. #endif
  61. class stack
  62. {
  63. private:
  64. #ifndef BOOST_DOXYGEN_INVOKED
  65. BOOST_STATIC_ASSERT( std::is_copy_constructible< T >::value || std::is_move_constructible< T >::value );
  66. typedef typename detail::stack_signature::bind< Options... >::type bound_args;
  67. static const bool has_capacity = detail::extract_capacity< bound_args >::has_capacity;
  68. static const size_t capacity = detail::extract_capacity< bound_args >::capacity;
  69. static const bool fixed_sized = detail::extract_fixed_sized< bound_args >::value;
  70. static const bool node_based = !( has_capacity || fixed_sized );
  71. static const bool compile_time_sized = has_capacity;
  72. struct node
  73. {
  74. node( const T& val ) :
  75. v( val )
  76. {}
  77. node( T&& val ) :
  78. v( std::forward< T >( val ) )
  79. {}
  80. typedef typename detail::select_tagged_handle< node, node_based >::handle_type handle_t;
  81. handle_t next;
  82. T v;
  83. };
  84. typedef typename detail::extract_allocator< bound_args, node >::type node_allocator;
  85. typedef
  86. typename detail::select_freelist< node, node_allocator, compile_time_sized, fixed_sized, capacity >::type pool_t;
  87. typedef typename pool_t::tagged_node_handle tagged_node_handle;
  88. // check compile-time capacity
  89. static constexpr bool capacity_is_valid = has_capacity ? capacity - 1 < std::numeric_limits< std::uint16_t >::max()
  90. : true;
  91. BOOST_STATIC_ASSERT( capacity_is_valid );
  92. struct implementation_defined
  93. {
  94. typedef node_allocator allocator;
  95. typedef std::size_t size_type;
  96. };
  97. #endif
  98. public:
  99. typedef T value_type;
  100. typedef typename implementation_defined::allocator allocator;
  101. typedef typename implementation_defined::size_type size_type;
  102. /**
  103. * \return true, if implementation is lock-free.
  104. *
  105. * \warning It only checks, if the top stack node and the freelist can be modified in a lock-free manner.
  106. * On most platforms, the whole implementation is lock-free, if this is true. Using c++0x-style atomics,
  107. * there is no possibility to provide a completely accurate implementation, because one would need to test
  108. * every internal node, which is impossible if further nodes will be allocated from the operating system.
  109. *
  110. * */
  111. bool is_lock_free( void ) const
  112. {
  113. return tos.is_lock_free() && pool.is_lock_free();
  114. }
  115. /** Construct a fixed-sized stack
  116. *
  117. * \pre Must specify a capacity<> argument
  118. * */
  119. explicit stack( void )
  120. #if !defined( BOOST_NO_CXX20_HDR_CONCEPTS )
  121. requires( has_capacity )
  122. #endif
  123. :
  124. pool( node_allocator(), capacity )
  125. {
  126. // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling
  127. // this function and this function may be compiled even when it isn't being used.
  128. BOOST_ASSERT( has_capacity );
  129. initialize();
  130. }
  131. /** Construct a fixed-sized stack with a custom allocator
  132. *
  133. * \pre Must specify a capacity<> argument
  134. * */
  135. template < typename U, typename Enabler = std::enable_if< has_capacity > >
  136. explicit stack( typename boost::allocator_rebind< node_allocator, U >::type const& alloc ) :
  137. pool( alloc, capacity )
  138. {
  139. initialize();
  140. }
  141. /** Construct a fixed-sized stack with a custom allocator
  142. *
  143. * \pre Must specify a capacity<> argument
  144. * */
  145. template < typename Enabler = std::enable_if< has_capacity > >
  146. explicit stack( allocator const& alloc ) :
  147. pool( alloc, capacity )
  148. {
  149. initialize();
  150. }
  151. /** Construct a variable-sized stack
  152. *
  153. * Allocate n nodes initially for the freelist
  154. *
  155. * \pre Must \b not specify a capacity<> argument
  156. * */
  157. template < typename Enabler = std::enable_if< !has_capacity > >
  158. explicit stack( size_type n ) :
  159. pool( node_allocator(), n )
  160. {
  161. initialize();
  162. }
  163. stack( const stack& ) = delete;
  164. stack& operator=( const stack& ) = delete;
  165. stack( stack&& ) = delete;
  166. stack& operator=( stack&& ) = delete;
  167. /** Construct a variable-sized stack with a custom allocator
  168. *
  169. * Allocate n nodes initially for the freelist
  170. *
  171. * \pre Must \b not specify a capacity<> argument
  172. * */
  173. template < typename U, typename Enabler = std::enable_if< !has_capacity > >
  174. stack( size_type n, typename boost::allocator_rebind< node_allocator, U >::type const& alloc ) :
  175. pool( alloc, n )
  176. {
  177. initialize();
  178. }
  179. /** Construct a variable-sized stack with a custom allocator
  180. *
  181. * Allocate n nodes initially for the freelist
  182. *
  183. * \pre Must \b not specify a capacity<> argument
  184. * */
  185. template < typename Enabler = std::enable_if< !has_capacity > >
  186. stack( size_type n, node_allocator const& alloc ) :
  187. pool( alloc, n )
  188. {
  189. initialize();
  190. }
  191. /** Allocate n nodes for freelist
  192. *
  193. * \pre only valid if no capacity<> argument given
  194. * \note thread-safe, may block if memory allocator blocks
  195. *
  196. * */
  197. template < typename Enabler = std::enable_if< !has_capacity > >
  198. void reserve( size_type n )
  199. {
  200. pool.template reserve< true >( n );
  201. }
  202. /** Allocate n nodes for freelist
  203. *
  204. * \pre only valid if no capacity<> argument given
  205. * \note not thread-safe, may block if memory allocator blocks
  206. *
  207. * */
  208. template < typename Enabler = std::enable_if< !has_capacity > >
  209. void reserve_unsafe( size_type n )
  210. {
  211. pool.template reserve< false >( n );
  212. }
  213. /** Destroys stack, free all nodes from freelist.
  214. *
  215. * \note not thread-safe
  216. *
  217. * */
  218. ~stack( void )
  219. {
  220. consume_all( []( const T& ) {} );
  221. }
  222. private:
  223. #ifndef BOOST_DOXYGEN_INVOKED
  224. void initialize( void )
  225. {
  226. tos.store( tagged_node_handle( pool.null_handle(), 0 ) );
  227. }
  228. void link_nodes_atomic( node* new_top_node, node* end_node )
  229. {
  230. tagged_node_handle old_tos = tos.load( detail::memory_order_relaxed );
  231. for ( ;; ) {
  232. tagged_node_handle new_tos( pool.get_handle( new_top_node ), old_tos.get_tag() );
  233. end_node->next = pool.get_handle( old_tos );
  234. if ( tos.compare_exchange_weak( old_tos, new_tos ) )
  235. break;
  236. }
  237. }
  238. void link_nodes_unsafe( node* new_top_node, node* end_node )
  239. {
  240. tagged_node_handle old_tos = tos.load( detail::memory_order_relaxed );
  241. tagged_node_handle new_tos( pool.get_handle( new_top_node ), old_tos.get_tag() );
  242. end_node->next = pool.get_handle( old_tos );
  243. tos.store( new_tos, memory_order_relaxed );
  244. }
  245. template < bool Threadsafe, bool Bounded, typename ConstIterator >
  246. std::tuple< node*, node* > prepare_node_list( ConstIterator begin, ConstIterator end, ConstIterator& ret )
  247. {
  248. ConstIterator it = begin;
  249. node* end_node = pool.template construct< Threadsafe, Bounded >( *it++ );
  250. if ( end_node == NULL ) {
  251. ret = begin;
  252. return std::make_tuple< node*, node* >( NULL, NULL );
  253. }
  254. node* new_top_node = end_node;
  255. end_node->next = NULL;
  256. BOOST_TRY
  257. {
  258. /* link nodes */
  259. for ( ; it != end; ++it ) {
  260. node* newnode = pool.template construct< Threadsafe, Bounded >( *it );
  261. if ( newnode == NULL )
  262. break;
  263. newnode->next = new_top_node;
  264. new_top_node = newnode;
  265. }
  266. }
  267. BOOST_CATCH( ... )
  268. {
  269. for ( node* current_node = new_top_node; current_node != NULL; ) {
  270. node* next = current_node->next;
  271. pool.template destruct< Threadsafe >( current_node );
  272. current_node = next;
  273. }
  274. BOOST_RETHROW;
  275. }
  276. BOOST_CATCH_END
  277. ret = it;
  278. return std::make_tuple( new_top_node, end_node );
  279. }
  280. template < bool Bounded >
  281. bool do_push( T&& v )
  282. {
  283. node* newnode = pool.template construct< true, Bounded >( std::forward< T >( v ) );
  284. if ( newnode == 0 )
  285. return false;
  286. link_nodes_atomic( newnode, newnode );
  287. return true;
  288. }
  289. template < bool Bounded >
  290. bool do_push( T const& v )
  291. {
  292. node* newnode = pool.template construct< true, Bounded >( v );
  293. if ( newnode == 0 )
  294. return false;
  295. link_nodes_atomic( newnode, newnode );
  296. return true;
  297. }
  298. template < bool Bounded, typename ConstIterator >
  299. ConstIterator do_push( ConstIterator begin, ConstIterator end )
  300. {
  301. node* new_top_node;
  302. node* end_node;
  303. ConstIterator ret;
  304. std::tie( new_top_node, end_node ) = prepare_node_list< true, Bounded >( begin, end, ret );
  305. if ( new_top_node )
  306. link_nodes_atomic( new_top_node, end_node );
  307. return ret;
  308. }
  309. #endif
  310. public:
  311. /** Pushes object t to the stack.
  312. *
  313. * \post object will be pushed to the stack, if internal node can be allocated
  314. * \returns true, if the push operation is successful.
  315. *
  316. * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will
  317. * be allocated from the OS. This may not be lock-free. \throws if memory allocator throws
  318. * */
  319. bool push( const T& v )
  320. {
  321. return do_push< false >( v );
  322. }
  323. /// \copydoc boost::lockfree::stack::push(const T& t)
  324. bool push( T&& v )
  325. {
  326. return do_push< false >( std::forward< T >( v ) );
  327. }
  328. /** Pushes object t to the stack.
  329. *
  330. * \post object will be pushed to the stack, if internal node can be allocated
  331. * \returns true, if the push operation is successful.
  332. *
  333. * \note Thread-safe and non-blocking. If internal memory pool is exhausted, the push operation will fail
  334. * */
  335. bool bounded_push( const T& v )
  336. {
  337. return do_push< true >( v );
  338. }
  339. /// \copydoc boost::lockfree::stack::bounded_push(const T& t)
  340. bool bounded_push( T&& v )
  341. {
  342. return do_push< true >( std::forward< T >( v ) );
  343. }
  344. /** Pushes as many objects from the range [begin, end) as freelist node can be allocated.
  345. *
  346. * \return iterator to the first element, which has not been pushed
  347. *
  348. * \note Operation is applied atomically
  349. * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will
  350. * be allocated from the OS. This may not be lock-free.
  351. *
  352. * \throws if memory allocator throws
  353. */
  354. template < typename ConstIterator >
  355. ConstIterator push( ConstIterator begin, ConstIterator end )
  356. {
  357. return do_push< false, ConstIterator >( begin, end );
  358. }
  359. /** Pushes as many objects from the span as freelist node can be allocated.
  360. *
  361. * \return Number of elements pushed
  362. *
  363. * \note Operation is applied atomically
  364. * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will
  365. * be allocated from the OS. This may not be lock-free.
  366. *
  367. * \throws if memory allocator throws
  368. */
  369. template < std::size_t Extent >
  370. size_type push( boost::span< const T, Extent > t )
  371. {
  372. const T* end_pushed = push( t.begin(), t.end() );
  373. return std::distance( t.begin(), end_pushed );
  374. }
  375. /** Pushes as many objects from the range [begin, end) as freelist node can be allocated.
  376. *
  377. * \return iterator to the first element, which has not been pushed
  378. *
  379. * \note Operation is applied atomically
  380. * \note Thread-safe and non-blocking. If internal memory pool is exhausted, the push operation will fail
  381. * \throws if memory allocator throws
  382. */
  383. template < typename ConstIterator >
  384. ConstIterator bounded_push( ConstIterator begin, ConstIterator end )
  385. {
  386. return do_push< true, ConstIterator >( begin, end );
  387. }
  388. /** Pushes as many objects from the span as freelist node can be allocated.
  389. *
  390. * \return Number of elements pushed
  391. *
  392. * \note Operation is applied atomically
  393. * \note Thread-safe and non-blocking. If internal memory pool is exhausted, the push operation will fail
  394. * \throws if memory allocator throws
  395. */
  396. template < std::size_t Extent >
  397. size_type bounded_push( boost::span< const T, Extent > t )
  398. {
  399. const T* end_pushed = bounded_push( t.begin(), t.end() );
  400. return std::distance( t.begin(), end_pushed );
  401. }
  402. /** Pushes object t to the stack.
  403. *
  404. * \post object will be pushed to the stack, if internal node can be allocated
  405. * \returns true, if the push operation is successful.
  406. *
  407. * \note Not thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node
  408. * will be allocated from the OS. This may not be lock-free.
  409. * \throws if memory allocator throws
  410. * */
  411. bool unsynchronized_push( const T& v )
  412. {
  413. node* newnode = pool.template construct< false, false >( v );
  414. if ( newnode == 0 )
  415. return false;
  416. link_nodes_unsafe( newnode, newnode );
  417. return true;
  418. }
  419. /// \copydoc boost::lockfree::stack::unsynchronized_push(const T& t)
  420. bool unsynchronized_push( T&& v )
  421. {
  422. node* newnode = pool.template construct< false, false >( std::forward< T >( v ) );
  423. if ( newnode == 0 )
  424. return false;
  425. link_nodes_unsafe( newnode, newnode );
  426. return true;
  427. }
  428. /** Pushes as many objects from the range [begin, end) as freelist node can be allocated.
  429. *
  430. * \return iterator to the first element, which has not been pushed
  431. *
  432. * \note Not thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node
  433. * will be allocated from the OS. This may not be lock-free.
  434. * \throws if memory allocator throws
  435. */
  436. template < typename ConstIterator >
  437. ConstIterator unsynchronized_push( ConstIterator begin, ConstIterator end )
  438. {
  439. node* new_top_node;
  440. node* end_node;
  441. ConstIterator ret;
  442. std::tie( new_top_node, end_node ) = prepare_node_list< false, false >( begin, end, ret );
  443. if ( new_top_node )
  444. link_nodes_unsafe( new_top_node, end_node );
  445. return ret;
  446. }
  447. /** Pushes as many objects from the span as freelist node can be allocated.
  448. *
  449. * \return iterator to the first element, which has not been pushed
  450. *
  451. * \note Not thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node
  452. * will be allocated from the OS. This may not be lock-free. \throws if memory allocator throws
  453. */
  454. template < std::size_t Extent >
  455. size_type unsynchronized_push( boost::span< const T, Extent > t )
  456. {
  457. const T* end_pushed = unsynchronized_push( t.begin(), t.end() );
  458. return std::distance( t.begin(), end_pushed );
  459. }
  460. /** Pops object from stack.
  461. *
  462. * \post if pop operation is successful, object will be copied to ret.
  463. * \returns true, if the pop operation is successful, false if stack was empty.
  464. *
  465. * \note Thread-safe and non-blocking
  466. *
  467. * */
  468. bool pop( T& ret )
  469. {
  470. return pop< T >( ret );
  471. }
  472. /** Pops object from stack.
  473. *
  474. * \pre type T must be convertible to U
  475. * \post if pop operation is successful, object will be copied to ret.
  476. * \returns true, if the pop operation is successful, false if stack was empty.
  477. *
  478. * \note Thread-safe and non-blocking
  479. *
  480. * */
  481. template < typename U, typename Enabler = std::enable_if< std::is_convertible< T, U >::value > >
  482. bool pop( U& ret )
  483. {
  484. return consume_one( [ & ]( T&& arg ) {
  485. ret = std::forward< T >( arg );
  486. } );
  487. }
  488. #if !defined( BOOST_NO_CXX17_HDR_OPTIONAL ) || defined( BOOST_DOXYGEN_INVOKED )
  489. /** Pops object from stack, returning a std::optional<>
  490. *
  491. * \returns `std::optional` with value if successful, `std::nullopt` if stack is empty.
  492. *
  493. * \note Thread-safe and non-blocking
  494. *
  495. * */
  496. std::optional< T > pop( uses_optional_t )
  497. {
  498. T to_dequeue;
  499. if ( pop( to_dequeue ) )
  500. return to_dequeue;
  501. else
  502. return std::nullopt;
  503. }
  504. /** Pops object from stack, returning a std::optional<>
  505. *
  506. * \pre type T must be convertible to U
  507. * \returns `std::optional` with value if successful, `std::nullopt` if stack is empty.
  508. *
  509. * \note Thread-safe and non-blocking
  510. *
  511. * */
  512. template < typename U >
  513. std::optional< U > pop( uses_optional_t )
  514. {
  515. U to_dequeue;
  516. if ( pop( to_dequeue ) )
  517. return to_dequeue;
  518. else
  519. return std::nullopt;
  520. }
  521. #endif
  522. /** Pops object from stack.
  523. *
  524. * \post if pop operation is successful, object will be copied to ret.
  525. * \returns true, if the pop operation is successful, false if stack was empty.
  526. *
  527. * \note Not thread-safe, but non-blocking
  528. *
  529. * */
  530. bool unsynchronized_pop( T& ret )
  531. {
  532. return unsynchronized_pop< T >( ret );
  533. }
  534. /** Pops object from stack.
  535. *
  536. * \pre type T must be convertible to U
  537. * \post if pop operation is successful, object will be copied to ret.
  538. * \returns true, if the pop operation is successful, false if stack was empty.
  539. *
  540. * \note Not thread-safe, but non-blocking
  541. *
  542. * */
  543. template < typename U, typename Enabler = std::enable_if< std::is_convertible< T, U >::value > >
  544. bool unsynchronized_pop( U& ret )
  545. {
  546. tagged_node_handle old_tos = tos.load( detail::memory_order_relaxed );
  547. node* old_tos_pointer = pool.get_pointer( old_tos );
  548. if ( !pool.get_pointer( old_tos ) )
  549. return false;
  550. node* new_tos_ptr = pool.get_pointer( old_tos_pointer->next );
  551. tagged_node_handle new_tos( pool.get_handle( new_tos_ptr ), old_tos.get_next_tag() );
  552. tos.store( new_tos, memory_order_relaxed );
  553. ret = std::move( old_tos_pointer->v );
  554. pool.template destruct< false >( old_tos );
  555. return true;
  556. }
  557. /** consumes one element via a functor
  558. *
  559. * pops one element from the stack and applies the functor on this object
  560. *
  561. * \returns true, if one element was consumed
  562. *
  563. * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
  564. * */
  565. template < typename Functor >
  566. bool consume_one( Functor&& f )
  567. {
  568. tagged_node_handle old_tos = tos.load( detail::memory_order_consume );
  569. for ( ;; ) {
  570. node* old_tos_pointer = pool.get_pointer( old_tos );
  571. if ( !old_tos_pointer )
  572. return false;
  573. tagged_node_handle new_tos( old_tos_pointer->next, old_tos.get_next_tag() );
  574. if ( tos.compare_exchange_weak( old_tos, new_tos ) ) {
  575. f( std::move( old_tos_pointer->v ) );
  576. pool.template destruct< true >( old_tos );
  577. return true;
  578. }
  579. }
  580. }
  581. /** consumes all elements via a functor
  582. *
  583. * sequentially pops all elements from the stack and applies the functor on each object
  584. *
  585. * \returns number of elements that are consumed
  586. *
  587. * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
  588. * */
  589. template < typename Functor >
  590. size_t consume_all( Functor&& f )
  591. {
  592. size_t element_count = 0;
  593. while ( consume_one( f ) )
  594. element_count += 1;
  595. return element_count;
  596. }
  597. /** consumes all elements via a functor
  598. *
  599. * atomically pops all elements from the stack and applies the functor on each object
  600. *
  601. * \returns number of elements that are consumed
  602. *
  603. * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
  604. * */
  605. template < typename Functor >
  606. size_t consume_all_atomic( Functor&& f )
  607. {
  608. size_t element_count = 0;
  609. tagged_node_handle old_tos = tos.load( detail::memory_order_consume );
  610. for ( ;; ) {
  611. node* old_tos_pointer = pool.get_pointer( old_tos );
  612. if ( !old_tos_pointer )
  613. return 0;
  614. tagged_node_handle new_tos( pool.null_handle(), old_tos.get_next_tag() );
  615. if ( tos.compare_exchange_weak( old_tos, new_tos ) )
  616. break;
  617. }
  618. tagged_node_handle nodes_to_consume = old_tos;
  619. for ( ;; ) {
  620. node* node_pointer = pool.get_pointer( nodes_to_consume );
  621. f( std::move( node_pointer->v ) );
  622. element_count += 1;
  623. node* next_node = pool.get_pointer( node_pointer->next );
  624. if ( !next_node ) {
  625. pool.template destruct< true >( nodes_to_consume );
  626. break;
  627. }
  628. tagged_node_handle next( pool.get_handle( next_node ), nodes_to_consume.get_next_tag() );
  629. pool.template destruct< true >( nodes_to_consume );
  630. nodes_to_consume = next;
  631. }
  632. return element_count;
  633. }
  634. /** consumes all elements via a functor
  635. *
  636. * atomically pops all elements from the stack and applies the functor on each object in reversed order
  637. *
  638. * \returns number of elements that are consumed
  639. *
  640. * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
  641. * */
  642. template < typename Functor >
  643. size_t consume_all_atomic_reversed( Functor&& f )
  644. {
  645. size_t element_count = 0;
  646. tagged_node_handle old_tos = tos.load( detail::memory_order_consume );
  647. for ( ;; ) {
  648. node* old_tos_pointer = pool.get_pointer( old_tos );
  649. if ( !old_tos_pointer )
  650. return 0;
  651. tagged_node_handle new_tos( pool.null_handle(), old_tos.get_next_tag() );
  652. if ( tos.compare_exchange_weak( old_tos, new_tos ) )
  653. break;
  654. }
  655. tagged_node_handle nodes_to_consume = old_tos;
  656. node* last_node_pointer = NULL;
  657. tagged_node_handle nodes_in_reversed_order;
  658. for ( ;; ) {
  659. node* node_pointer = pool.get_pointer( nodes_to_consume );
  660. node* next_node = pool.get_pointer( node_pointer->next );
  661. node_pointer->next = pool.get_handle( last_node_pointer );
  662. last_node_pointer = node_pointer;
  663. if ( !next_node ) {
  664. nodes_in_reversed_order = nodes_to_consume;
  665. break;
  666. }
  667. tagged_node_handle next( pool.get_handle( next_node ), nodes_to_consume.get_next_tag() );
  668. nodes_to_consume = next;
  669. }
  670. for ( ;; ) {
  671. node* node_pointer = pool.get_pointer( nodes_in_reversed_order );
  672. f( std::move( node_pointer->v ) );
  673. element_count += 1;
  674. node* next_node = pool.get_pointer( node_pointer->next );
  675. if ( !next_node ) {
  676. pool.template destruct< true >( nodes_in_reversed_order );
  677. break;
  678. }
  679. tagged_node_handle next( pool.get_handle( next_node ), nodes_in_reversed_order.get_next_tag() );
  680. pool.template destruct< true >( nodes_in_reversed_order );
  681. nodes_in_reversed_order = next;
  682. }
  683. return element_count;
  684. }
  685. /**
  686. * \return true, if stack is empty.
  687. *
  688. * \note It only guarantees that at some point during the execution of the function the stack has been empty.
  689. * It is rarely practical to use this value in program logic, because the stack can be modified by other threads.
  690. * */
  691. bool empty( void ) const
  692. {
  693. return pool.get_pointer( tos.load() ) == NULL;
  694. }
  695. private:
  696. #ifndef BOOST_DOXYGEN_INVOKED
  697. detail::atomic< tagged_node_handle > tos;
  698. static const int padding_size = detail::cacheline_bytes - sizeof( tagged_node_handle );
  699. char padding[ padding_size ];
  700. pool_t pool;
  701. #endif
  702. };
  703. }} // namespace boost::lockfree
  704. #endif /* BOOST_LOCKFREE_STACK_HPP_INCLUDED */