unbuffered_channel.hpp 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624
  1. // Copyright Oliver Kowalke 2016.
  2. // Distributed under the Boost Software License, Version 1.0.
  3. // (See accompanying file LICENSE_1_0.txt or copy at
  4. // http://www.boost.org/LICENSE_1_0.txt)
  5. #ifndef BOOST_FIBERS_UNBUFFERED_CHANNEL_H
  6. #define BOOST_FIBERS_UNBUFFERED_CHANNEL_H
  7. #include <atomic>
  8. #include <chrono>
  9. #include <cstddef>
  10. #include <cstdint>
  11. #include <memory>
  12. #include <vector>
  13. #include <boost/config.hpp>
  14. #include <boost/fiber/channel_op_status.hpp>
  15. #include <boost/fiber/context.hpp>
  16. #include <boost/fiber/detail/config.hpp>
  17. #include <boost/fiber/detail/convert.hpp>
  18. #include <boost/fiber/detail/spinlock.hpp>
  19. #include <boost/fiber/exceptions.hpp>
  20. #ifdef BOOST_HAS_ABI_HEADERS
  21. # include BOOST_ABI_PREFIX
  22. #endif
  23. namespace boost {
  24. namespace fibers {
  25. template< typename T >
  26. class unbuffered_channel {
  27. public:
  28. typedef typename std::remove_reference< T >::type value_type;
  29. private:
  30. typedef context::wait_queue_t wait_queue_type;
  31. struct slot {
  32. value_type value;
  33. context * ctx;
  34. slot( value_type const& value_, context * ctx_) :
  35. value{ value_ },
  36. ctx{ ctx_ } {
  37. }
  38. slot( value_type && value_, context * ctx_) :
  39. value{ std::move( value_) },
  40. ctx{ ctx_ } {
  41. }
  42. };
  43. // shared cacheline
  44. std::atomic< slot * > slot_{ nullptr };
  45. // shared cacheline
  46. std::atomic_bool closed_{ false };
  47. mutable detail::spinlock splk_producers_{};
  48. wait_queue_type waiting_producers_{};
  49. mutable detail::spinlock splk_consumers_{};
  50. wait_queue_type waiting_consumers_{};
  51. char pad_[cacheline_length];
  52. bool is_empty_() {
  53. return nullptr == slot_.load( std::memory_order_acquire);
  54. }
  55. bool try_push_( slot * own_slot) {
  56. for (;;) {
  57. slot * s = slot_.load( std::memory_order_acquire);
  58. if ( nullptr == s) {
  59. if ( ! slot_.compare_exchange_strong( s, own_slot, std::memory_order_acq_rel) ) {
  60. continue;
  61. }
  62. return true;
  63. } else {
  64. return false;
  65. }
  66. }
  67. }
  68. slot * try_pop_() {
  69. slot * nil_slot = nullptr;
  70. for (;;) {
  71. slot * s = slot_.load( std::memory_order_acquire);
  72. if ( nullptr != s) {
  73. if ( ! slot_.compare_exchange_strong( s, nil_slot, std::memory_order_acq_rel) ) {
  74. continue;}
  75. }
  76. return s;
  77. }
  78. }
  79. public:
  80. unbuffered_channel() {
  81. }
  82. ~unbuffered_channel() {
  83. close();
  84. }
  85. unbuffered_channel( unbuffered_channel const&) = delete;
  86. unbuffered_channel & operator=( unbuffered_channel const&) = delete;
  87. bool is_closed() const noexcept {
  88. return closed_.load( std::memory_order_acquire);
  89. }
  90. void close() noexcept {
  91. context * active_ctx = context::active();
  92. // notify all waiting producers
  93. closed_.store( true, std::memory_order_release);
  94. detail::spinlock_lock lk1{ splk_producers_ };
  95. while ( ! waiting_producers_.empty() ) {
  96. context * producer_ctx = & waiting_producers_.front();
  97. waiting_producers_.pop_front();
  98. std::intptr_t expected = reinterpret_cast< std::intptr_t >( this);
  99. if ( producer_ctx->twstatus.compare_exchange_strong( expected, static_cast< std::intptr_t >( -1), std::memory_order_acq_rel) ) {
  100. // notify context
  101. active_ctx->schedule( producer_ctx);
  102. } else if ( static_cast< std::intptr_t >( 0) == expected) {
  103. // no timed-wait op.
  104. // notify context
  105. active_ctx->schedule( producer_ctx);
  106. }
  107. }
  108. // notify all waiting consumers
  109. detail::spinlock_lock lk2{ splk_consumers_ };
  110. while ( ! waiting_consumers_.empty() ) {
  111. context * consumer_ctx = & waiting_consumers_.front();
  112. waiting_consumers_.pop_front();
  113. std::intptr_t expected = reinterpret_cast< std::intptr_t >( this);
  114. if ( consumer_ctx->twstatus.compare_exchange_strong( expected, static_cast< std::intptr_t >( -1), std::memory_order_acq_rel) ) {
  115. // notify context
  116. active_ctx->schedule( consumer_ctx);
  117. } else if ( static_cast< std::intptr_t >( 0) == expected) {
  118. // no timed-wait op.
  119. // notify context
  120. active_ctx->schedule( consumer_ctx);
  121. }
  122. }
  123. }
  124. channel_op_status push( value_type const& value) {
  125. context * active_ctx = context::active();
  126. slot s{ value, active_ctx };
  127. for (;;) {
  128. if ( BOOST_UNLIKELY( is_closed() ) ) {
  129. return channel_op_status::closed;
  130. }
  131. if ( try_push_( & s) ) {
  132. detail::spinlock_lock lk{ splk_consumers_ };
  133. // notify one waiting consumer
  134. while ( ! waiting_consumers_.empty() ) {
  135. context * consumer_ctx = & waiting_consumers_.front();
  136. waiting_consumers_.pop_front();
  137. std::intptr_t expected = reinterpret_cast< std::intptr_t >( this);
  138. if ( consumer_ctx->twstatus.compare_exchange_strong( expected, static_cast< std::intptr_t >( -1), std::memory_order_acq_rel) ) {
  139. // notify context
  140. active_ctx->schedule( consumer_ctx);
  141. break;
  142. } else if ( static_cast< std::intptr_t >( 0) == expected) {
  143. // no timed-wait op.
  144. // notify context
  145. active_ctx->schedule( consumer_ctx);
  146. break;
  147. }
  148. }
  149. // suspend till value has been consumed
  150. active_ctx->suspend( lk);
  151. // resumed, value has been consumed
  152. return channel_op_status::success;
  153. } else {
  154. detail::spinlock_lock lk{ splk_producers_ };
  155. if ( BOOST_UNLIKELY( is_closed() ) ) {
  156. return channel_op_status::closed;
  157. }
  158. if ( is_empty_() ) {
  159. continue;
  160. }
  161. active_ctx->wait_link( waiting_producers_);
  162. active_ctx->twstatus.store( static_cast< std::intptr_t >( 0), std::memory_order_release);
  163. // suspend this producer
  164. active_ctx->suspend( lk);
  165. // resumed, slot mabye free
  166. }
  167. }
  168. }
  169. channel_op_status push( value_type && value) {
  170. context * active_ctx = context::active();
  171. slot s{ std::move( value), active_ctx };
  172. for (;;) {
  173. if ( BOOST_UNLIKELY( is_closed() ) ) {
  174. return channel_op_status::closed;
  175. }
  176. if ( try_push_( & s) ) {
  177. detail::spinlock_lock lk{ splk_consumers_ };
  178. // notify one waiting consumer
  179. while ( ! waiting_consumers_.empty() ) {
  180. context * consumer_ctx = & waiting_consumers_.front();
  181. waiting_consumers_.pop_front();
  182. std::intptr_t expected = reinterpret_cast< std::intptr_t >( this);
  183. if ( consumer_ctx->twstatus.compare_exchange_strong( expected, static_cast< std::intptr_t >( -1), std::memory_order_acq_rel) ) {
  184. // notify context
  185. active_ctx->schedule( consumer_ctx);
  186. break;
  187. } else if ( static_cast< std::intptr_t >( 0) == expected) {
  188. // no timed-wait op.
  189. // notify context
  190. active_ctx->schedule( consumer_ctx);
  191. break;
  192. }
  193. }
  194. // suspend till value has been consumed
  195. active_ctx->suspend( lk);
  196. // resumed, value has been consumed
  197. return channel_op_status::success;
  198. } else {
  199. detail::spinlock_lock lk{ splk_producers_ };
  200. if ( BOOST_UNLIKELY( is_closed() ) ) {
  201. return channel_op_status::closed;
  202. }
  203. if ( is_empty_() ) {
  204. continue;
  205. }
  206. active_ctx->wait_link( waiting_producers_);
  207. active_ctx->twstatus.store( static_cast< std::intptr_t >( 0), std::memory_order_release);
  208. // suspend this producer
  209. active_ctx->suspend( lk);
  210. // resumed, slot mabye free
  211. }
  212. }
  213. }
  214. template< typename Rep, typename Period >
  215. channel_op_status push_wait_for( value_type const& value,
  216. std::chrono::duration< Rep, Period > const& timeout_duration) {
  217. return push_wait_until( value,
  218. std::chrono::steady_clock::now() + timeout_duration);
  219. }
  220. template< typename Rep, typename Period >
  221. channel_op_status push_wait_for( value_type && value,
  222. std::chrono::duration< Rep, Period > const& timeout_duration) {
  223. return push_wait_until( std::forward< value_type >( value),
  224. std::chrono::steady_clock::now() + timeout_duration);
  225. }
  226. template< typename Clock, typename Duration >
  227. channel_op_status push_wait_until( value_type const& value,
  228. std::chrono::time_point< Clock, Duration > const& timeout_time_) {
  229. context * active_ctx = context::active();
  230. slot s{ value, active_ctx };
  231. std::chrono::steady_clock::time_point timeout_time = detail::convert( timeout_time_);
  232. for (;;) {
  233. if ( BOOST_UNLIKELY( is_closed() ) ) {
  234. return channel_op_status::closed;
  235. }
  236. if ( try_push_( & s) ) {
  237. detail::spinlock_lock lk{ splk_consumers_ };
  238. // notify one waiting consumer
  239. while ( ! waiting_consumers_.empty() ) {
  240. context * consumer_ctx = & waiting_consumers_.front();
  241. waiting_consumers_.pop_front();
  242. std::intptr_t expected = reinterpret_cast< std::intptr_t >( this);
  243. if ( consumer_ctx->twstatus.compare_exchange_strong( expected, static_cast< std::intptr_t >( -1), std::memory_order_acq_rel) ) {
  244. // notify context
  245. active_ctx->schedule( consumer_ctx);
  246. break;
  247. } else if ( static_cast< std::intptr_t >( 0) == expected) {
  248. // no timed-wait op.
  249. // notify context
  250. active_ctx->schedule( consumer_ctx);
  251. break;
  252. }
  253. }
  254. // suspend this producer
  255. active_ctx->twstatus.store( reinterpret_cast< std::intptr_t >( this), std::memory_order_release);
  256. if ( ! active_ctx->wait_until( timeout_time, lk) ) {
  257. // clear slot
  258. slot * nil_slot = nullptr, * own_slot = & s;
  259. slot_.compare_exchange_strong( own_slot, nil_slot, std::memory_order_acq_rel);
  260. // resumed, value has not been consumed
  261. return channel_op_status::timeout;
  262. }
  263. // resumed, value has been consumed
  264. return channel_op_status::success;
  265. } else {
  266. detail::spinlock_lock lk{ splk_producers_ };
  267. if ( BOOST_UNLIKELY( is_closed() ) ) {
  268. return channel_op_status::closed;
  269. }
  270. if ( is_empty_() ) {
  271. continue;
  272. }
  273. active_ctx->wait_link( waiting_producers_);
  274. active_ctx->twstatus.store( reinterpret_cast< std::intptr_t >( this), std::memory_order_release);
  275. // suspend this producer
  276. if ( ! active_ctx->wait_until( timeout_time, lk) ) {
  277. // relock local lk
  278. lk.lock();
  279. // remove from waiting-queue
  280. waiting_producers_.remove( * active_ctx);
  281. return channel_op_status::timeout;
  282. }
  283. // resumed, slot maybe free
  284. }
  285. }
  286. }
  287. template< typename Clock, typename Duration >
  288. channel_op_status push_wait_until( value_type && value,
  289. std::chrono::time_point< Clock, Duration > const& timeout_time_) {
  290. context * active_ctx = context::active();
  291. slot s{ std::move( value), active_ctx };
  292. std::chrono::steady_clock::time_point timeout_time = detail::convert( timeout_time_);
  293. for (;;) {
  294. if ( BOOST_UNLIKELY( is_closed() ) ) {
  295. return channel_op_status::closed;
  296. }
  297. if ( try_push_( & s) ) {
  298. detail::spinlock_lock lk{ splk_consumers_ };
  299. // notify one waiting consumer
  300. while ( ! waiting_consumers_.empty() ) {
  301. context * consumer_ctx = & waiting_consumers_.front();
  302. waiting_consumers_.pop_front();
  303. std::intptr_t expected = reinterpret_cast< std::intptr_t >( this);
  304. if ( consumer_ctx->twstatus.compare_exchange_strong( expected, static_cast< std::intptr_t >( -1), std::memory_order_acq_rel) ) {
  305. // notify context
  306. active_ctx->schedule( consumer_ctx);
  307. break;
  308. } else if ( static_cast< std::intptr_t >( 0) == expected) {
  309. // no timed-wait op.
  310. // notify context
  311. active_ctx->schedule( consumer_ctx);
  312. break;
  313. }
  314. }
  315. // suspend this producer
  316. active_ctx->twstatus.store( reinterpret_cast< std::intptr_t >( this), std::memory_order_release);
  317. if ( ! active_ctx->wait_until( timeout_time, lk) ) {
  318. // clear slot
  319. slot * nil_slot = nullptr, * own_slot = & s;
  320. slot_.compare_exchange_strong( own_slot, nil_slot, std::memory_order_acq_rel);
  321. // resumed, value has not been consumed
  322. return channel_op_status::timeout;
  323. }
  324. // resumed, value has been consumed
  325. return channel_op_status::success;
  326. } else {
  327. detail::spinlock_lock lk{ splk_producers_ };
  328. if ( BOOST_UNLIKELY( is_closed() ) ) {
  329. return channel_op_status::closed;
  330. }
  331. if ( is_empty_() ) {
  332. continue;
  333. }
  334. active_ctx->wait_link( waiting_producers_);
  335. active_ctx->twstatus.store( reinterpret_cast< std::intptr_t >( this), std::memory_order_release);
  336. // suspend this producer
  337. if ( ! active_ctx->wait_until( timeout_time, lk) ) {
  338. // relock local lk
  339. lk.lock();
  340. // remove from waiting-queue
  341. waiting_producers_.remove( * active_ctx);
  342. return channel_op_status::timeout;
  343. }
  344. // resumed, slot maybe free
  345. }
  346. }
  347. }
  348. channel_op_status pop( value_type & value) {
  349. context * active_ctx = context::active();
  350. slot * s = nullptr;
  351. for (;;) {
  352. if ( nullptr != ( s = try_pop_() ) ) {
  353. {
  354. detail::spinlock_lock lk{ splk_producers_ };
  355. // notify one waiting producer
  356. while ( ! waiting_producers_.empty() ) {
  357. context * producer_ctx = & waiting_producers_.front();
  358. waiting_producers_.pop_front();
  359. std::intptr_t expected = reinterpret_cast< std::intptr_t >( this);
  360. if ( producer_ctx->twstatus.compare_exchange_strong( expected, static_cast< std::intptr_t >( -1), std::memory_order_acq_rel) ) {
  361. lk.unlock();
  362. // notify context
  363. active_ctx->schedule( producer_ctx);
  364. break;
  365. } else if ( static_cast< std::intptr_t >( 0) == expected) {
  366. lk.unlock();
  367. // no timed-wait op.
  368. // notify context
  369. active_ctx->schedule( producer_ctx);
  370. break;
  371. }
  372. }
  373. }
  374. value = std::move( s->value);
  375. // notify context
  376. active_ctx->schedule( s->ctx);
  377. return channel_op_status::success;
  378. } else {
  379. detail::spinlock_lock lk{ splk_consumers_ };
  380. if ( BOOST_UNLIKELY( is_closed() ) ) {
  381. return channel_op_status::closed;
  382. }
  383. if ( ! is_empty_() ) {
  384. continue;
  385. }
  386. active_ctx->wait_link( waiting_consumers_);
  387. active_ctx->twstatus.store( static_cast< std::intptr_t >( 0), std::memory_order_release);
  388. // suspend this consumer
  389. active_ctx->suspend( lk);
  390. // resumed, slot mabye set
  391. }
  392. }
  393. }
  394. value_type value_pop() {
  395. context * active_ctx = context::active();
  396. slot * s = nullptr;
  397. for (;;) {
  398. if ( nullptr != ( s = try_pop_() ) ) {
  399. {
  400. detail::spinlock_lock lk{ splk_producers_ };
  401. // notify one waiting producer
  402. while ( ! waiting_producers_.empty() ) {
  403. context * producer_ctx = & waiting_producers_.front();
  404. waiting_producers_.pop_front();
  405. std::intptr_t expected = reinterpret_cast< std::intptr_t >( this);
  406. if ( producer_ctx->twstatus.compare_exchange_strong( expected, static_cast< std::intptr_t >( -1), std::memory_order_acq_rel) ) {
  407. lk.unlock();
  408. // notify context
  409. active_ctx->schedule( producer_ctx);
  410. break;
  411. } else if ( static_cast< std::intptr_t >( 0) == expected) {
  412. lk.unlock();
  413. // no timed-wait op.
  414. // notify context
  415. active_ctx->schedule( producer_ctx);
  416. break;
  417. }
  418. }
  419. }
  420. // consume value
  421. value_type value = std::move( s->value);
  422. // notify context
  423. active_ctx->schedule( s->ctx);
  424. return std::move( value);
  425. } else {
  426. detail::spinlock_lock lk{ splk_consumers_ };
  427. if ( BOOST_UNLIKELY( is_closed() ) ) {
  428. throw fiber_error{
  429. std::make_error_code( std::errc::operation_not_permitted),
  430. "boost fiber: channel is closed" };
  431. }
  432. if ( ! is_empty_() ) {
  433. continue;
  434. }
  435. active_ctx->wait_link( waiting_consumers_);
  436. active_ctx->twstatus.store( static_cast< std::intptr_t >( 0), std::memory_order_release);
  437. // suspend this consumer
  438. active_ctx->suspend( lk);
  439. // resumed, slot mabye set
  440. }
  441. }
  442. }
  443. template< typename Rep, typename Period >
  444. channel_op_status pop_wait_for( value_type & value,
  445. std::chrono::duration< Rep, Period > const& timeout_duration) {
  446. return pop_wait_until( value,
  447. std::chrono::steady_clock::now() + timeout_duration);
  448. }
  449. template< typename Clock, typename Duration >
  450. channel_op_status pop_wait_until( value_type & value,
  451. std::chrono::time_point< Clock, Duration > const& timeout_time_) {
  452. context * active_ctx = context::active();
  453. slot * s = nullptr;
  454. std::chrono::steady_clock::time_point timeout_time = detail::convert( timeout_time_);
  455. for (;;) {
  456. if ( nullptr != ( s = try_pop_() ) ) {
  457. {
  458. detail::spinlock_lock lk{ splk_producers_ };
  459. // notify one waiting producer
  460. while ( ! waiting_producers_.empty() ) {
  461. context * producer_ctx = & waiting_producers_.front();
  462. waiting_producers_.pop_front();
  463. std::intptr_t expected = reinterpret_cast< std::intptr_t >( this);
  464. if ( producer_ctx->twstatus.compare_exchange_strong( expected, static_cast< std::intptr_t >( -1), std::memory_order_acq_rel) ) {
  465. lk.unlock();
  466. // notify context
  467. active_ctx->schedule( producer_ctx);
  468. break;
  469. } else if ( static_cast< std::intptr_t >( 0) == expected) {
  470. lk.unlock();
  471. // no timed-wait op.
  472. // notify context
  473. active_ctx->schedule( producer_ctx);
  474. break;
  475. }
  476. }
  477. }
  478. // consume value
  479. value = std::move( s->value);
  480. // notify context
  481. active_ctx->schedule( s->ctx);
  482. return channel_op_status::success;
  483. } else {
  484. detail::spinlock_lock lk{ splk_consumers_ };
  485. if ( BOOST_UNLIKELY( is_closed() ) ) {
  486. return channel_op_status::closed;
  487. }
  488. if ( ! is_empty_() ) {
  489. continue;
  490. }
  491. active_ctx->wait_link( waiting_consumers_);
  492. active_ctx->twstatus.store( reinterpret_cast< std::intptr_t >( this), std::memory_order_release);
  493. // suspend this consumer
  494. if ( ! active_ctx->wait_until( timeout_time, lk) ) {
  495. // relock local lk
  496. lk.lock();
  497. // remove from waiting-queue
  498. waiting_consumers_.remove( * active_ctx);
  499. return channel_op_status::timeout;
  500. }
  501. }
  502. }
  503. }
  504. class iterator {
  505. private:
  506. typedef typename std::aligned_storage< sizeof( value_type), alignof( value_type) >::type storage_type;
  507. unbuffered_channel * chan_{ nullptr };
  508. storage_type storage_;
  509. void increment_() {
  510. BOOST_ASSERT( nullptr != chan_);
  511. try {
  512. ::new ( static_cast< void * >( std::addressof( storage_) ) ) value_type{ chan_->value_pop() };
  513. } catch ( fiber_error const&) {
  514. chan_ = nullptr;
  515. }
  516. }
  517. public:
  518. typedef std::input_iterator_tag iterator_category;
  519. typedef std::ptrdiff_t difference_type;
  520. typedef value_type * pointer;
  521. typedef value_type & reference;
  522. typedef pointer pointer_t;
  523. typedef reference reference_t;
  524. iterator() noexcept = default;
  525. explicit iterator( unbuffered_channel< T > * chan) noexcept :
  526. chan_{ chan } {
  527. increment_();
  528. }
  529. iterator( iterator const& other) noexcept :
  530. chan_{ other.chan_ } {
  531. }
  532. iterator & operator=( iterator const& other) noexcept {
  533. if ( this == & other) return * this;
  534. chan_ = other.chan_;
  535. return * this;
  536. }
  537. bool operator==( iterator const& other) const noexcept {
  538. return other.chan_ == chan_;
  539. }
  540. bool operator!=( iterator const& other) const noexcept {
  541. return other.chan_ != chan_;
  542. }
  543. iterator & operator++() {
  544. increment_();
  545. return * this;
  546. }
  547. iterator operator++( int) = delete;
  548. reference_t operator*() noexcept {
  549. return * reinterpret_cast< value_type * >( std::addressof( storage_) );
  550. }
  551. pointer_t operator->() noexcept {
  552. return reinterpret_cast< value_type * >( std::addressof( storage_) );
  553. }
  554. };
  555. friend class iterator;
  556. };
  557. template< typename T >
  558. typename unbuffered_channel< T >::iterator
  559. begin( unbuffered_channel< T > & chan) {
  560. return typename unbuffered_channel< T >::iterator( & chan);
  561. }
  562. template< typename T >
  563. typename unbuffered_channel< T >::iterator
  564. end( unbuffered_channel< T > &) {
  565. return typename unbuffered_channel< T >::iterator();
  566. }
  567. }}
  568. #ifdef BOOST_HAS_ABI_HEADERS
  569. # include BOOST_ABI_SUFFIX
  570. #endif
  571. #endif // BOOST_FIBERS_UNBUFFERED_CHANNEL_H