queue.hpp 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629
  1. // lock-free queue from
  2. // Michael, M. M. and Scott, M. L.,
  3. // "simple, fast and practical non-blocking and blocking concurrent queue algorithms"
  4. //
  5. // Copyright (C) 2008-2013 Tim Blechmann
  6. //
  7. // Distributed under the Boost Software License, Version 1.0. (See
  8. // accompanying file LICENSE_1_0.txt or copy at
  9. // http://www.boost.org/LICENSE_1_0.txt)
  10. #ifndef BOOST_LOCKFREE_FIFO_HPP_INCLUDED
  11. #define BOOST_LOCKFREE_FIFO_HPP_INCLUDED
  12. #include <boost/config.hpp>
  13. #ifdef BOOST_HAS_PRAGMA_ONCE
  14. # pragma once
  15. #endif
  16. #include <boost/assert.hpp>
  17. #include <boost/core/allocator_access.hpp>
  18. #include <boost/parameter/optional.hpp>
  19. #include <boost/parameter/parameters.hpp>
  20. #include <boost/static_assert.hpp>
  21. #include <boost/lockfree/detail/atomic.hpp>
  22. #include <boost/lockfree/detail/copy_payload.hpp>
  23. #include <boost/lockfree/detail/freelist.hpp>
  24. #include <boost/lockfree/detail/parameter.hpp>
  25. #include <boost/lockfree/detail/tagged_ptr.hpp>
  26. #include <boost/lockfree/detail/uses_optional.hpp>
  27. #include <boost/lockfree/lockfree_forward.hpp>
  28. #if defined( _MSC_VER )
  29. # pragma warning( push )
  30. # pragma warning( disable : 4324 ) // structure was padded due to __declspec(align())
  31. #endif
  32. #if defined( BOOST_INTEL ) && ( BOOST_INTEL_CXX_VERSION > 1000 )
  33. # pragma warning( push )
  34. # pragma warning( disable : 488 ) // template parameter unused in declaring parameter types,
  35. // gets erronously triggered the queue constructor which
  36. // takes an allocator of another type and rebinds it
  37. #endif
  38. namespace boost { namespace lockfree {
  39. #ifndef BOOST_DOXYGEN_INVOKED
  40. namespace detail {
  41. typedef parameter::parameters< boost::parameter::optional< tag::allocator >, boost::parameter::optional< tag::capacity > >
  42. queue_signature;
  43. } /* namespace detail */
  44. #endif
  45. /** The queue class provides a multi-writer/multi-reader queue, pushing and popping is lock-free,
  46. * construction/destruction has to be synchronized. It uses a freelist for memory management,
  47. * freed nodes are pushed to the freelist and not returned to the OS before the queue is destroyed.
  48. *
  49. * \b Policies:
  50. * - \ref boost::lockfree::fixed_sized, defaults to \c boost::lockfree::fixed_sized<false> \n
  51. * Can be used to completely disable dynamic memory allocations during push in order to ensure lockfree behavior. \n
  52. * If the data structure is configured as fixed-sized, the internal nodes are stored inside an array and they are
  53. * addressed by array indexing. This limits the possible size of the queue to the number of elements that can be
  54. * addressed by the index type (usually 2**16-2), but on platforms that lack double-width compare-and-exchange
  55. * instructions, this is the best way to achieve lock-freedom.
  56. *
  57. * - \ref boost::lockfree::capacity, optional \n
  58. * If this template argument is passed to the options, the size of the queue is set at compile-time.\n
  59. * This option implies \c fixed_sized<true>
  60. *
  61. * - \ref boost::lockfree::allocator, defaults to \c boost::lockfree::allocator<std::allocator<void>> \n
  62. * Specifies the allocator that is used for the internal freelist
  63. *
  64. * \b Requirements:
  65. * - T must have a copy constructor
  66. * - T must have a trivial copy assignment operator
  67. * - T must have a trivial destructor
  68. *
  69. * */
  70. template < typename T, typename... Options >
  71. #if !defined( BOOST_NO_CXX20_HDR_CONCEPTS )
  72. requires( std::is_copy_assignable_v< T >,
  73. std::is_trivially_copy_assignable_v< T >,
  74. std::is_trivially_destructible_v< T > )
  75. #endif
  76. class queue
  77. {
  78. private:
  79. #ifndef BOOST_DOXYGEN_INVOKED
  80. BOOST_STATIC_ASSERT( ( std::is_trivially_destructible< T >::value ) );
  81. BOOST_STATIC_ASSERT( ( std::is_trivially_copy_assignable< T >::value ) );
  82. typedef typename detail::queue_signature::bind< Options... >::type bound_args;
  83. static constexpr bool has_capacity = detail::extract_capacity< bound_args >::has_capacity;
  84. static constexpr size_t capacity
  85. = detail::extract_capacity< bound_args >::capacity + 1; // the queue uses one dummy node
  86. static constexpr bool fixed_sized = detail::extract_fixed_sized< bound_args >::value;
  87. static constexpr bool node_based = !( has_capacity || fixed_sized );
  88. static constexpr bool compile_time_sized = has_capacity;
  89. struct alignas( detail::cacheline_bytes ) node
  90. {
  91. typedef typename detail::select_tagged_handle< node, node_based >::tagged_handle_type tagged_node_handle;
  92. typedef typename detail::select_tagged_handle< node, node_based >::handle_type handle_type;
  93. node( T const& v, handle_type null_handle ) :
  94. data( v )
  95. {
  96. /* increment tag to avoid ABA problem */
  97. tagged_node_handle old_next = next.load( memory_order_relaxed );
  98. tagged_node_handle new_next( null_handle, old_next.get_next_tag() );
  99. next.store( new_next, memory_order_release );
  100. }
  101. node( handle_type null_handle ) :
  102. next( tagged_node_handle( null_handle, 0 ) )
  103. {}
  104. node( void )
  105. {}
  106. atomic< tagged_node_handle > next;
  107. T data;
  108. };
  109. typedef detail::extract_allocator_t< bound_args, node > node_allocator;
  110. typedef detail::select_freelist_t< node, node_allocator, compile_time_sized, fixed_sized, capacity > pool_t;
  111. typedef typename pool_t::tagged_node_handle tagged_node_handle;
  112. typedef typename detail::select_tagged_handle< node, node_based >::handle_type handle_type;
  113. void initialize( void )
  114. {
  115. node* n = pool.template construct< true, false >( pool.null_handle() );
  116. tagged_node_handle dummy_node( pool.get_handle( n ), 0 );
  117. head_.store( dummy_node, memory_order_relaxed );
  118. tail_.store( dummy_node, memory_order_release );
  119. }
  120. struct implementation_defined
  121. {
  122. typedef node_allocator allocator;
  123. typedef std::size_t size_type;
  124. };
  125. #endif
  126. public:
  127. typedef T value_type;
  128. typedef typename implementation_defined::allocator allocator;
  129. typedef typename implementation_defined::size_type size_type;
  130. /**
  131. * \return true, if implementation is lock-free.
  132. *
  133. * \warning It only checks, if the queue head and tail nodes and the freelist can be modified in a lock-free manner.
  134. * On most platforms, the whole implementation is lock-free, if this is true. Using c++0x-style atomics, there
  135. * is no possibility to provide a completely accurate implementation, because one would need to test every internal
  136. * node, which is impossible if further nodes will be allocated from the operating system.
  137. * */
  138. bool is_lock_free( void ) const
  139. {
  140. return head_.is_lock_free() && tail_.is_lock_free() && pool.is_lock_free();
  141. }
  142. /** Construct a fixed-sized queue
  143. *
  144. * \pre Must specify a capacity<> argument
  145. * */
  146. queue( void )
  147. #if !defined( BOOST_NO_CXX20_HDR_CONCEPTS )
  148. requires( has_capacity )
  149. #endif
  150. :
  151. head_( tagged_node_handle( 0, 0 ) ),
  152. tail_( tagged_node_handle( 0, 0 ) ),
  153. pool( node_allocator(), capacity )
  154. {
  155. // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling
  156. // this function and this function may be compiled even when it isn't being used.
  157. BOOST_ASSERT( has_capacity );
  158. initialize();
  159. }
  160. /** Construct a fixed-sized queue with a custom allocator
  161. *
  162. * \pre Must specify a capacity<> argument
  163. * */
  164. template < typename U, typename Enabler = std::enable_if< has_capacity > >
  165. explicit queue( typename boost::allocator_rebind< node_allocator, U >::type const& alloc ) :
  166. head_( tagged_node_handle( 0, 0 ) ),
  167. tail_( tagged_node_handle( 0, 0 ) ),
  168. pool( alloc, capacity )
  169. {
  170. initialize();
  171. }
  172. /** Construct a fixed-sized queue with a custom allocator
  173. *
  174. * \pre Must specify a capacity<> argument
  175. * */
  176. template < typename Enabler = std::enable_if< has_capacity > >
  177. explicit queue( allocator const& alloc ) :
  178. head_( tagged_node_handle( 0, 0 ) ),
  179. tail_( tagged_node_handle( 0, 0 ) ),
  180. pool( alloc, capacity )
  181. {
  182. initialize();
  183. }
  184. /** Construct a variable-sized queue
  185. *
  186. * Allocate n nodes initially for the freelist
  187. *
  188. * \pre Must \b not specify a capacity<> argument
  189. * */
  190. template < typename Enabler = std::enable_if< !has_capacity > >
  191. explicit queue( size_type n ) :
  192. head_( tagged_node_handle( 0, 0 ) ),
  193. tail_( tagged_node_handle( 0, 0 ) ),
  194. pool( node_allocator(), n + 1 )
  195. {
  196. initialize();
  197. }
  198. /** Construct a variable-sized queue with a custom allocator
  199. *
  200. * Allocate n nodes initially for the freelist
  201. *
  202. * \pre Must \b not specify a capacity<> argument
  203. * */
  204. template < typename U, typename Enabler = std::enable_if< !has_capacity > >
  205. queue( size_type n, typename boost::allocator_rebind< node_allocator, U >::type const& alloc ) :
  206. head_( tagged_node_handle( 0, 0 ) ),
  207. tail_( tagged_node_handle( 0, 0 ) ),
  208. pool( alloc, n + 1 )
  209. {
  210. initialize();
  211. }
  212. /** Construct a variable-sized queue with a custom allocator
  213. *
  214. * Allocate n nodes initially for the freelist
  215. *
  216. * \pre Must \b not specify a capacity<> argument
  217. * */
  218. template < typename Enabler = std::enable_if< !has_capacity > >
  219. queue( size_type n, allocator const& alloc ) :
  220. head_( tagged_node_handle( 0, 0 ) ),
  221. tail_( tagged_node_handle( 0, 0 ) ),
  222. pool( alloc, n + 1 )
  223. {
  224. initialize();
  225. }
  226. queue( const queue& ) = delete;
  227. queue& operator=( const queue& ) = delete;
  228. queue( queue&& ) = delete;
  229. queue& operator=( queue&& ) = delete;
  230. /** \copydoc boost::lockfree::stack::reserve
  231. * */
  232. void reserve( size_type n )
  233. {
  234. pool.template reserve< true >( n );
  235. }
  236. /** \copydoc boost::lockfree::stack::reserve_unsafe
  237. * */
  238. void reserve_unsafe( size_type n )
  239. {
  240. pool.template reserve< false >( n );
  241. }
  242. /** Destroys queue, free all nodes from freelist.
  243. * */
  244. ~queue( void )
  245. {
  246. consume_all( []( const T& ) {} );
  247. pool.template destruct< false >( head_.load( memory_order_relaxed ) );
  248. }
  249. /** Check if the queue is empty
  250. *
  251. * \return true, if the queue is empty, false otherwise
  252. * \note The result is only accurate, if no other thread modifies the queue. Therefore it is rarely practical to use
  253. * this value in program logic.
  254. * */
  255. bool empty( void ) const
  256. {
  257. return pool.get_handle( head_.load() ) == pool.get_handle( tail_.load() );
  258. }
  259. /** Pushes object t to the queue.
  260. *
  261. * \post object will be pushed to the queue, if internal node can be allocated
  262. * \returns true, if the push operation is successful.
  263. *
  264. * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will
  265. * be allocated from the OS. This may not be lock-free.
  266. * */
  267. bool push( const T& t )
  268. {
  269. return do_push< false >( t );
  270. }
  271. /// \copydoc boost::lockfree::queue::push(const T & t)
  272. bool push( T&& t )
  273. {
  274. return do_push< false >( std::forward< T >( t ) );
  275. }
  276. /** Pushes object t to the queue.
  277. *
  278. * \post object will be pushed to the queue, if internal node can be allocated
  279. * \returns true, if the push operation is successful.
  280. *
  281. * \note Thread-safe and non-blocking. If internal memory pool is exhausted, operation will fail
  282. * \throws if memory allocator throws
  283. * */
  284. bool bounded_push( const T& t )
  285. {
  286. return do_push< true >( t );
  287. }
  288. /// \copydoc boost::lockfree::queue::bounded_push(const T & t)
  289. bool bounded_push( T&& t )
  290. {
  291. return do_push< true >( std::forward< T >( t ) );
  292. }
  293. private:
  294. #ifndef BOOST_DOXYGEN_INVOKED
  295. template < bool Bounded >
  296. bool do_push( T&& t )
  297. {
  298. node* n = pool.template construct< true, Bounded >( std::forward< T >( t ), pool.null_handle() );
  299. return do_push_node( n );
  300. }
  301. template < bool Bounded >
  302. bool do_push( T const& t )
  303. {
  304. node* n = pool.template construct< true, Bounded >( t, pool.null_handle() );
  305. return do_push_node( n );
  306. }
  307. bool do_push_node( node* n )
  308. {
  309. handle_type node_handle = pool.get_handle( n );
  310. if ( n == NULL )
  311. return false;
  312. for ( ;; ) {
  313. tagged_node_handle tail = tail_.load( memory_order_acquire );
  314. node* tail_node = pool.get_pointer( tail );
  315. tagged_node_handle next = tail_node->next.load( memory_order_acquire );
  316. node* next_ptr = pool.get_pointer( next );
  317. tagged_node_handle tail2 = tail_.load( memory_order_acquire );
  318. if ( BOOST_LIKELY( tail == tail2 ) ) {
  319. if ( next_ptr == 0 ) {
  320. tagged_node_handle new_tail_next( node_handle, next.get_next_tag() );
  321. if ( tail_node->next.compare_exchange_weak( next, new_tail_next ) ) {
  322. tagged_node_handle new_tail( node_handle, tail.get_next_tag() );
  323. tail_.compare_exchange_strong( tail, new_tail );
  324. return true;
  325. }
  326. } else {
  327. tagged_node_handle new_tail( pool.get_handle( next_ptr ), tail.get_next_tag() );
  328. tail_.compare_exchange_strong( tail, new_tail );
  329. }
  330. }
  331. }
  332. }
  333. #endif
  334. public:
  335. /** Pushes object t to the queue.
  336. *
  337. * \post object will be pushed to the queue, if internal node can be allocated
  338. * \returns true, if the push operation is successful.
  339. *
  340. * \note Not Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node
  341. * will be allocated from the OS. This may not be lock-free. \throws if memory allocator throws
  342. * */
  343. bool unsynchronized_push( T&& t )
  344. {
  345. node* n = pool.template construct< false, false >( std::forward< T >( t ), pool.null_handle() );
  346. if ( n == NULL )
  347. return false;
  348. for ( ;; ) {
  349. tagged_node_handle tail = tail_.load( memory_order_relaxed );
  350. tagged_node_handle next = tail->next.load( memory_order_relaxed );
  351. node* next_ptr = next.get_ptr();
  352. if ( next_ptr == 0 ) {
  353. tail->next.store( tagged_node_handle( n, next.get_next_tag() ), memory_order_relaxed );
  354. tail_.store( tagged_node_handle( n, tail.get_next_tag() ), memory_order_relaxed );
  355. return true;
  356. } else
  357. tail_.store( tagged_node_handle( next_ptr, tail.get_next_tag() ), memory_order_relaxed );
  358. }
  359. }
  360. /** Pops object from queue.
  361. *
  362. * \post if pop operation is successful, object will be copied to ret.
  363. * \returns true, if the pop operation is successful, false if queue was empty.
  364. *
  365. * \note Thread-safe and non-blocking. Might modify return argument even if operation fails.
  366. * */
  367. bool pop( T& ret )
  368. {
  369. return pop< T >( ret );
  370. }
  371. /** Pops object from queue.
  372. *
  373. * \pre type U must be constructible by T and copyable, or T must be convertible to U
  374. * \post if pop operation is successful, object will be copied to ret.
  375. * \returns true, if the pop operation is successful, false if queue was empty.
  376. *
  377. * \note Thread-safe and non-blocking. Might modify return argument even if operation fails.
  378. * */
  379. template < typename U >
  380. bool pop( U& ret )
  381. {
  382. for ( ;; ) {
  383. tagged_node_handle head = head_.load( memory_order_acquire );
  384. node* head_ptr = pool.get_pointer( head );
  385. tagged_node_handle tail = tail_.load( memory_order_acquire );
  386. tagged_node_handle next = head_ptr->next.load( memory_order_acquire );
  387. node* next_ptr = pool.get_pointer( next );
  388. tagged_node_handle head2 = head_.load( memory_order_acquire );
  389. if ( BOOST_LIKELY( head == head2 ) ) {
  390. if ( pool.get_handle( head ) == pool.get_handle( tail ) ) {
  391. if ( next_ptr == 0 )
  392. return false;
  393. tagged_node_handle new_tail( pool.get_handle( next ), tail.get_next_tag() );
  394. tail_.compare_exchange_strong( tail, new_tail );
  395. } else {
  396. if ( next_ptr == 0 )
  397. /* this check is not part of the original algorithm as published by michael and scott
  398. *
  399. * however we reuse the tagged_ptr part for the freelist and clear the next part during node
  400. * allocation. we can observe a null-pointer here.
  401. * */
  402. continue;
  403. detail::copy_payload( next_ptr->data, ret );
  404. tagged_node_handle new_head( pool.get_handle( next ), head.get_next_tag() );
  405. if ( head_.compare_exchange_weak( head, new_head ) ) {
  406. pool.template destruct< true >( head );
  407. return true;
  408. }
  409. }
  410. }
  411. }
  412. }
  413. #if !defined( BOOST_NO_CXX17_HDR_OPTIONAL ) || defined( BOOST_DOXYGEN_INVOKED )
  414. /** Pops object from queue, returning a std::optional<>
  415. *
  416. * \returns `std::optional` with value if successful, `std::nullopt` if queue is empty.
  417. *
  418. * \note Thread-safe and non-blocking
  419. *
  420. * */
  421. std::optional< T > pop( uses_optional_t )
  422. {
  423. T to_dequeue;
  424. if ( pop( to_dequeue ) )
  425. return to_dequeue;
  426. else
  427. return std::nullopt;
  428. }
  429. /** Pops object from queue, returning a std::optional<>
  430. *
  431. * \pre type T must be convertible to U
  432. * \returns `std::optional` with value if successful, `std::nullopt` if queue is empty.
  433. *
  434. * \note Thread-safe and non-blocking
  435. *
  436. * */
  437. template < typename U >
  438. std::optional< U > pop( uses_optional_t )
  439. {
  440. U to_dequeue;
  441. if ( pop( to_dequeue ) )
  442. return to_dequeue;
  443. else
  444. return std::nullopt;
  445. }
  446. #endif
  447. /** Pops object from queue.
  448. *
  449. * \post if pop operation is successful, object will be copied to ret.
  450. * \returns true, if the pop operation is successful, false if queue was empty.
  451. *
  452. * \note Not thread-safe, but non-blocking. Might modify return argument even if operation fails.
  453. *
  454. * */
  455. bool unsynchronized_pop( T& ret )
  456. {
  457. return unsynchronized_pop< T >( ret );
  458. }
  459. /** Pops object from queue.
  460. *
  461. * \pre type U must be constructible by T and copyable, or T must be convertible to U
  462. * \post if pop operation is successful, object will be copied to ret.
  463. *
  464. * \returns true, if the pop operation is successful, false if queue was empty.
  465. *
  466. * \note Not thread-safe, but non-blocking. Might modify return argument even if operation fails.
  467. *
  468. * */
  469. template < typename U >
  470. bool unsynchronized_pop( U& ret )
  471. {
  472. for ( ;; ) {
  473. tagged_node_handle head = head_.load( memory_order_relaxed );
  474. node* head_ptr = pool.get_pointer( head );
  475. tagged_node_handle tail = tail_.load( memory_order_relaxed );
  476. tagged_node_handle next = head_ptr->next.load( memory_order_relaxed );
  477. node* next_ptr = pool.get_pointer( next );
  478. if ( pool.get_handle( head ) == pool.get_handle( tail ) ) {
  479. if ( next_ptr == 0 )
  480. return false;
  481. tagged_node_handle new_tail( pool.get_handle( next ), tail.get_next_tag() );
  482. tail_.store( new_tail );
  483. } else {
  484. if ( next_ptr == 0 )
  485. /* this check is not part of the original algorithm as published by michael and scott
  486. *
  487. * however we reuse the tagged_ptr part for the freelist and clear the next part during node
  488. * allocation. we can observe a null-pointer here.
  489. * */
  490. continue;
  491. detail::copy_payload( next_ptr->data, ret );
  492. tagged_node_handle new_head( pool.get_handle( next ), head.get_next_tag() );
  493. head_.store( new_head );
  494. pool.template destruct< false >( head );
  495. return true;
  496. }
  497. }
  498. }
  499. /** consumes one element via a functor
  500. *
  501. * pops one element from the queue and applies the functor on this object
  502. *
  503. * \returns true, if one element was consumed
  504. *
  505. * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
  506. * */
  507. template < typename Functor >
  508. bool consume_one( Functor&& f )
  509. {
  510. T element;
  511. bool success = pop( element );
  512. if ( success )
  513. f( std::move( element ) );
  514. return success;
  515. }
  516. /** consumes all elements via a functor
  517. *
  518. * sequentially pops all elements from the queue and applies the functor on each object
  519. *
  520. * \returns number of elements that are consumed
  521. *
  522. * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
  523. * */
  524. template < typename Functor >
  525. size_t consume_all( Functor&& f )
  526. {
  527. size_t element_count = 0;
  528. while ( consume_one( f ) )
  529. element_count += 1;
  530. return element_count;
  531. }
  532. private:
  533. #ifndef BOOST_DOXYGEN_INVOKED
  534. atomic< tagged_node_handle > head_;
  535. static constexpr int padding_size = detail::cacheline_bytes - sizeof( tagged_node_handle );
  536. char padding1[ padding_size ];
  537. atomic< tagged_node_handle > tail_;
  538. char padding2[ padding_size ];
  539. pool_t pool;
  540. #endif
  541. };
  542. }} // namespace boost::lockfree
  543. #if defined( BOOST_INTEL ) && ( BOOST_INTEL_CXX_VERSION > 1000 )
  544. # pragma warning( pop )
  545. #endif
  546. #if defined( _MSC_VER )
  547. # pragma warning( pop )
  548. #endif
  549. #endif /* BOOST_LOCKFREE_FIFO_HPP_INCLUDED */