mutex 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775
  1. // <mutex> -*- C++ -*-
  2. // Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009
  3. // Free Software Foundation, Inc.
  4. //
  5. // This file is part of the GNU ISO C++ Library. This library is free
  6. // software; you can redistribute it and/or modify it under the
  7. // terms of the GNU General Public License as published by the
  8. // Free Software Foundation; either version 3, or (at your option)
  9. // any later version.
  10. // This library is distributed in the hope that it will be useful,
  11. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. // GNU General Public License for more details.
  14. // Under Section 7 of GPL version 3, you are granted additional
  15. // permissions described in the GCC Runtime Library Exception, version
  16. // 3.1, as published by the Free Software Foundation.
  17. // You should have received a copy of the GNU General Public License and
  18. // a copy of the GCC Runtime Library Exception along with this program;
  19. // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
  20. // <http://www.gnu.org/licenses/>.
  21. /** @file mutex
  22. * This is a Standard C++ Library header.
  23. */
  24. #ifndef _GLIBCXX_MUTEX
  25. #define _GLIBCXX_MUTEX 1
  26. #pragma GCC system_header
  27. #ifndef __GXX_EXPERIMENTAL_CXX0X__
  28. # include <c++0x_warning.h>
  29. #else
  30. #include <tuple>
  31. #include <cstddef>
  32. #include <chrono>
  33. #include <exception>
  34. #include <type_traits>
  35. #include <functional>
  36. #include <system_error>
  37. #include <bits/functexcept.h>
  38. #include <bits/gthr.h>
  39. #include <bits/move.h> // for std::swap
  40. #if defined(_GLIBCXX_HAS_GTHREADS) && defined(_GLIBCXX_USE_C99_STDINT_TR1)
  41. namespace std
  42. {
  43. /**
  44. * @defgroup mutexes Mutexes
  45. * @ingroup concurrency
  46. *
  47. * Classes for mutex support.
  48. * @{
  49. */
  50. /// mutex
  51. class mutex
  52. {
  53. typedef __gthread_mutex_t __native_type;
  54. __native_type _M_mutex;
  55. public:
  56. typedef __native_type* native_handle_type;
  57. mutex()
  58. {
  59. // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
  60. #ifdef __GTHREAD_MUTEX_INIT
  61. __native_type __tmp = __GTHREAD_MUTEX_INIT;
  62. _M_mutex = __tmp;
  63. #else
  64. __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
  65. #endif
  66. }
  67. mutex(const mutex&) = delete;
  68. mutex& operator=(const mutex&) = delete;
  69. void
  70. lock()
  71. {
  72. int __e = __gthread_mutex_lock(&_M_mutex);
  73. // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
  74. if (__e)
  75. __throw_system_error(__e);
  76. }
  77. bool
  78. try_lock()
  79. {
  80. // XXX EINVAL, EAGAIN, EBUSY
  81. return !__gthread_mutex_trylock(&_M_mutex);
  82. }
  83. void
  84. unlock()
  85. {
  86. // XXX EINVAL, EAGAIN, EPERM
  87. __gthread_mutex_unlock(&_M_mutex);
  88. }
  89. native_handle_type
  90. native_handle()
  91. { return &_M_mutex; }
  92. };
  93. /// recursive_mutex
  94. class recursive_mutex
  95. {
  96. typedef __gthread_recursive_mutex_t __native_type;
  97. __native_type _M_mutex;
  98. public:
  99. typedef __native_type* native_handle_type;
  100. recursive_mutex()
  101. {
  102. // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
  103. #ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
  104. __native_type __tmp = __GTHREAD_RECURSIVE_MUTEX_INIT;
  105. _M_mutex = __tmp;
  106. #else
  107. __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
  108. #endif
  109. }
  110. recursive_mutex(const recursive_mutex&) = delete;
  111. recursive_mutex& operator=(const recursive_mutex&) = delete;
  112. void
  113. lock()
  114. {
  115. int __e = __gthread_recursive_mutex_lock(&_M_mutex);
  116. // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
  117. if (__e)
  118. __throw_system_error(__e);
  119. }
  120. bool
  121. try_lock()
  122. {
  123. // XXX EINVAL, EAGAIN, EBUSY
  124. return !__gthread_recursive_mutex_trylock(&_M_mutex);
  125. }
  126. void
  127. unlock()
  128. {
  129. // XXX EINVAL, EAGAIN, EBUSY
  130. __gthread_recursive_mutex_unlock(&_M_mutex);
  131. }
  132. native_handle_type
  133. native_handle()
  134. { return &_M_mutex; }
  135. };
  136. /// timed_mutex
  137. class timed_mutex
  138. {
  139. typedef __gthread_mutex_t __native_type;
  140. #ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
  141. typedef chrono::monotonic_clock __clock_t;
  142. #else
  143. typedef chrono::high_resolution_clock __clock_t;
  144. #endif
  145. __native_type _M_mutex;
  146. public:
  147. typedef __native_type* native_handle_type;
  148. timed_mutex()
  149. {
  150. #ifdef __GTHREAD_MUTEX_INIT
  151. __native_type __tmp = __GTHREAD_MUTEX_INIT;
  152. _M_mutex = __tmp;
  153. #else
  154. __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
  155. #endif
  156. }
  157. timed_mutex(const timed_mutex&) = delete;
  158. timed_mutex& operator=(const timed_mutex&) = delete;
  159. void
  160. lock()
  161. {
  162. int __e = __gthread_mutex_lock(&_M_mutex);
  163. // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
  164. if (__e)
  165. __throw_system_error(__e);
  166. }
  167. bool
  168. try_lock()
  169. {
  170. // XXX EINVAL, EAGAIN, EBUSY
  171. return !__gthread_mutex_trylock(&_M_mutex);
  172. }
  173. template <class _Rep, class _Period>
  174. bool
  175. try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
  176. { return __try_lock_for_impl(__rtime); }
  177. template <class _Clock, class _Duration>
  178. bool
  179. try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
  180. {
  181. chrono::time_point<_Clock, chrono::seconds> __s =
  182. chrono::time_point_cast<chrono::seconds>(__atime);
  183. chrono::nanoseconds __ns =
  184. chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
  185. __gthread_time_t __ts = {
  186. static_cast<std::time_t>(__s.time_since_epoch().count()),
  187. static_cast<long>(__ns.count())
  188. };
  189. return !__gthread_mutex_timedlock(&_M_mutex, &__ts);
  190. }
  191. void
  192. unlock()
  193. {
  194. // XXX EINVAL, EAGAIN, EBUSY
  195. __gthread_mutex_unlock(&_M_mutex);
  196. }
  197. native_handle_type
  198. native_handle()
  199. { return &_M_mutex; }
  200. private:
  201. template<typename _Rep, typename _Period>
  202. typename enable_if<
  203. ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
  204. __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
  205. {
  206. __clock_t::time_point __atime = __clock_t::now()
  207. + chrono::duration_cast<__clock_t::duration>(__rtime);
  208. return try_lock_until(__atime);
  209. }
  210. template <typename _Rep, typename _Period>
  211. typename enable_if<
  212. !ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
  213. __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
  214. {
  215. __clock_t::time_point __atime = __clock_t::now()
  216. + ++chrono::duration_cast<__clock_t::duration>(__rtime);
  217. return try_lock_until(__atime);
  218. }
  219. };
  220. /// recursive_timed_mutex
  221. class recursive_timed_mutex
  222. {
  223. typedef __gthread_recursive_mutex_t __native_type;
  224. #ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
  225. typedef chrono::monotonic_clock __clock_t;
  226. #else
  227. typedef chrono::high_resolution_clock __clock_t;
  228. #endif
  229. __native_type _M_mutex;
  230. public:
  231. typedef __native_type* native_handle_type;
  232. recursive_timed_mutex()
  233. {
  234. // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
  235. #ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
  236. __native_type __tmp = __GTHREAD_RECURSIVE_MUTEX_INIT;
  237. _M_mutex = __tmp;
  238. #else
  239. __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
  240. #endif
  241. }
  242. recursive_timed_mutex(const recursive_timed_mutex&) = delete;
  243. recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
  244. void
  245. lock()
  246. {
  247. int __e = __gthread_recursive_mutex_lock(&_M_mutex);
  248. // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
  249. if (__e)
  250. __throw_system_error(__e);
  251. }
  252. bool
  253. try_lock()
  254. {
  255. // XXX EINVAL, EAGAIN, EBUSY
  256. return !__gthread_recursive_mutex_trylock(&_M_mutex);
  257. }
  258. template <class _Rep, class _Period>
  259. bool
  260. try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
  261. { return __try_lock_for_impl(__rtime); }
  262. template <class _Clock, class _Duration>
  263. bool
  264. try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
  265. {
  266. chrono::time_point<_Clock, chrono::seconds> __s =
  267. chrono::time_point_cast<chrono::seconds>(__atime);
  268. chrono::nanoseconds __ns =
  269. chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
  270. __gthread_time_t __ts = {
  271. static_cast<std::time_t>(__s.time_since_epoch().count()),
  272. static_cast<long>(__ns.count())
  273. };
  274. return !__gthread_recursive_mutex_timedlock(&_M_mutex, &__ts);
  275. }
  276. void
  277. unlock()
  278. {
  279. // XXX EINVAL, EAGAIN, EBUSY
  280. __gthread_recursive_mutex_unlock(&_M_mutex);
  281. }
  282. native_handle_type
  283. native_handle()
  284. { return &_M_mutex; }
  285. private:
  286. template<typename _Rep, typename _Period>
  287. typename enable_if<
  288. ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
  289. __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
  290. {
  291. __clock_t::time_point __atime = __clock_t::now()
  292. + chrono::duration_cast<__clock_t::duration>(__rtime);
  293. return try_lock_until(__atime);
  294. }
  295. template <typename _Rep, typename _Period>
  296. typename enable_if<
  297. !ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
  298. __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
  299. {
  300. __clock_t::time_point __atime = __clock_t::now()
  301. + ++chrono::duration_cast<__clock_t::duration>(__rtime);
  302. return try_lock_until(__atime);
  303. }
  304. };
  305. /// Do not acquire ownership of the mutex.
  306. struct defer_lock_t { };
  307. /// Try to acquire ownership of the mutex without blocking.
  308. struct try_to_lock_t { };
  309. /// Assume the calling thread has already obtained mutex ownership
  310. /// and manage it.
  311. struct adopt_lock_t { };
  312. extern const defer_lock_t defer_lock;
  313. extern const try_to_lock_t try_to_lock;
  314. extern const adopt_lock_t adopt_lock;
  315. /**
  316. * @brief Thrown to indicate errors with lock operations.
  317. *
  318. * @ingroup exceptions
  319. */
  320. class lock_error : public exception
  321. {
  322. public:
  323. virtual const char*
  324. what() const throw();
  325. };
  326. /// @brief Scoped lock idiom.
  327. // Acquire the mutex here with a constructor call, then release with
  328. // the destructor call in accordance with RAII style.
  329. template<typename _Mutex>
  330. class lock_guard
  331. {
  332. public:
  333. typedef _Mutex mutex_type;
  334. explicit lock_guard(mutex_type& __m) : _M_device(__m)
  335. { _M_device.lock(); }
  336. lock_guard(mutex_type& __m, adopt_lock_t __a) : _M_device(__m)
  337. { _M_device.lock(); }
  338. ~lock_guard()
  339. { _M_device.unlock(); }
  340. lock_guard(const lock_guard&) = delete;
  341. lock_guard& operator=(const lock_guard&) = delete;
  342. private:
  343. mutex_type& _M_device;
  344. };
  345. /// unique_lock
  346. template<typename _Mutex>
  347. class unique_lock
  348. {
  349. public:
  350. typedef _Mutex mutex_type;
  351. unique_lock()
  352. : _M_device(0), _M_owns(false)
  353. { }
  354. explicit unique_lock(mutex_type& __m)
  355. : _M_device(&__m), _M_owns(false)
  356. {
  357. lock();
  358. _M_owns = true;
  359. }
  360. unique_lock(mutex_type& __m, defer_lock_t)
  361. : _M_device(&__m), _M_owns(false)
  362. { }
  363. unique_lock(mutex_type& __m, try_to_lock_t)
  364. : _M_device(&__m), _M_owns(_M_device->try_lock())
  365. { }
  366. unique_lock(mutex_type& __m, adopt_lock_t)
  367. : _M_device(&__m), _M_owns(true)
  368. {
  369. // XXX calling thread owns mutex
  370. }
  371. template<typename _Clock, typename _Duration>
  372. unique_lock(mutex_type& __m,
  373. const chrono::time_point<_Clock, _Duration>& __atime)
  374. : _M_device(&__m), _M_owns(_M_device->try_lock_until(__atime))
  375. { }
  376. template<typename _Rep, typename _Period>
  377. unique_lock(mutex_type& __m,
  378. const chrono::duration<_Rep, _Period>& __rtime)
  379. : _M_device(&__m), _M_owns(_M_device->try_lock_for(__rtime))
  380. { }
  381. ~unique_lock()
  382. {
  383. if (_M_owns)
  384. unlock();
  385. }
  386. unique_lock(const unique_lock&) = delete;
  387. unique_lock& operator=(const unique_lock&) = delete;
  388. unique_lock(unique_lock&& __u)
  389. : _M_device(__u._M_device), _M_owns(__u._M_owns)
  390. {
  391. __u._M_device = 0;
  392. __u._M_owns = false;
  393. }
  394. unique_lock& operator=(unique_lock&& __u)
  395. {
  396. if(_M_owns)
  397. unlock();
  398. unique_lock(std::move(__u)).swap(*this);
  399. __u._M_device = 0;
  400. __u._M_owns = false;
  401. return *this;
  402. }
  403. void
  404. lock()
  405. {
  406. if (!_M_device)
  407. __throw_system_error(int(errc::operation_not_permitted));
  408. else if (_M_owns)
  409. __throw_system_error(int(errc::resource_deadlock_would_occur));
  410. else
  411. {
  412. _M_device->lock();
  413. _M_owns = true;
  414. }
  415. }
  416. bool
  417. try_lock()
  418. {
  419. if (!_M_device)
  420. __throw_system_error(int(errc::operation_not_permitted));
  421. else if (_M_owns)
  422. __throw_system_error(int(errc::resource_deadlock_would_occur));
  423. else
  424. {
  425. _M_owns = _M_device->try_lock();
  426. return _M_owns;
  427. }
  428. }
  429. template<typename _Clock, typename _Duration>
  430. bool
  431. try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
  432. {
  433. if (!_M_device)
  434. __throw_system_error(int(errc::operation_not_permitted));
  435. else if (_M_owns)
  436. __throw_system_error(int(errc::resource_deadlock_would_occur));
  437. else
  438. {
  439. _M_owns = _M_device->try_lock_until(__atime);
  440. return _M_owns;
  441. }
  442. }
  443. template<typename _Rep, typename _Period>
  444. bool
  445. try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
  446. {
  447. if (!_M_device)
  448. __throw_system_error(int(errc::operation_not_permitted));
  449. else if (_M_owns)
  450. __throw_system_error(int(errc::resource_deadlock_would_occur));
  451. else
  452. {
  453. _M_owns = _M_device->try_lock_for(__rtime);
  454. return _M_owns;
  455. }
  456. }
  457. void
  458. unlock()
  459. {
  460. if (!_M_owns)
  461. __throw_system_error(int(errc::operation_not_permitted));
  462. else if (_M_device)
  463. {
  464. _M_device->unlock();
  465. _M_owns = false;
  466. }
  467. }
  468. void
  469. swap(unique_lock&& __u)
  470. {
  471. std::swap(_M_device, __u._M_device);
  472. std::swap(_M_owns, __u._M_owns);
  473. }
  474. mutex_type*
  475. release()
  476. {
  477. mutex_type* __ret = _M_device;
  478. _M_device = 0;
  479. _M_owns = false;
  480. return __ret;
  481. }
  482. bool
  483. owns_lock() const
  484. { return _M_owns; }
  485. /* explicit */ operator bool () const
  486. { return owns_lock(); }
  487. mutex_type*
  488. mutex() const
  489. { return _M_device; }
  490. private:
  491. mutex_type* _M_device;
  492. bool _M_owns; // XXX use atomic_bool
  493. };
  494. template<typename _Mutex>
  495. inline void
  496. swap(unique_lock<_Mutex>& __x, unique_lock<_Mutex>& __y)
  497. { __x.swap(__y); }
  498. template<typename _Mutex>
  499. inline void
  500. swap(unique_lock<_Mutex>&& __x, unique_lock<_Mutex>& __y)
  501. { __x.swap(__y); }
  502. template<typename _Mutex>
  503. inline void
  504. swap(unique_lock<_Mutex>& __x, unique_lock<_Mutex>&& __y)
  505. { __x.swap(__y); }
  506. template<int _Idx>
  507. struct __unlock_impl
  508. {
  509. template<typename... _Lock>
  510. static void
  511. __do_unlock(tuple<_Lock&...>& __locks)
  512. {
  513. std::get<_Idx>(__locks).unlock();
  514. __unlock_impl<_Idx - 1>::__do_unlock(__locks);
  515. }
  516. };
  517. template<>
  518. struct __unlock_impl<-1>
  519. {
  520. template<typename... _Lock>
  521. static void
  522. __do_unlock(tuple<_Lock&...>&)
  523. { }
  524. };
  525. template<int _Idx, bool _Continue = true>
  526. struct __try_lock_impl
  527. {
  528. template<typename... _Lock>
  529. static int
  530. __do_try_lock(tuple<_Lock&...>& __locks)
  531. {
  532. if(std::get<_Idx>(__locks).try_lock())
  533. {
  534. return __try_lock_impl<_Idx + 1,
  535. _Idx + 2 < sizeof...(_Lock)>::__do_try_lock(__locks);
  536. }
  537. else
  538. {
  539. __unlock_impl<_Idx>::__do_unlock(__locks);
  540. return _Idx;
  541. }
  542. }
  543. };
  544. template<int _Idx>
  545. struct __try_lock_impl<_Idx, false>
  546. {
  547. template<typename... _Lock>
  548. static int
  549. __do_try_lock(tuple<_Lock&...>& __locks)
  550. {
  551. if(std::get<_Idx>(__locks).try_lock())
  552. return -1;
  553. else
  554. {
  555. __unlock_impl<_Idx>::__do_unlock(__locks);
  556. return _Idx;
  557. }
  558. }
  559. };
  560. /** @brief Generic try_lock.
  561. * @param __l1 Meets Mutex requirements (try_lock() may throw).
  562. * @param __l2 Meets Mutex requirements (try_lock() may throw).
  563. * @param __l3 Meets Mutex requirements (try_lock() may throw).
  564. * @return Returns -1 if all try_lock() calls return true. Otherwise returns
  565. * a 0-based index corresponding to the argument that returned false.
  566. * @post Either all arguments are locked, or none will be.
  567. *
  568. * Sequentially calls try_lock() on each argument.
  569. */
  570. template<typename _Lock1, typename _Lock2, typename... _Lock3>
  571. int
  572. try_lock(_Lock1& __l1, _Lock2& __l2, _Lock3&... __l3)
  573. {
  574. tuple<_Lock1&, _Lock2&, _Lock3&...> __locks(__l1, __l2, __l3...);
  575. return __try_lock_impl<0>::__do_try_lock(__locks);
  576. }
  577. /// lock
  578. template<typename _L1, typename _L2, typename ..._L3>
  579. void
  580. lock(_L1&, _L2&, _L3&...);
  581. /// once_flag
  582. struct once_flag
  583. {
  584. private:
  585. typedef __gthread_once_t __native_type;
  586. __native_type _M_once;
  587. public:
  588. once_flag()
  589. {
  590. __native_type __tmp = __GTHREAD_ONCE_INIT;
  591. _M_once = __tmp;
  592. }
  593. once_flag(const once_flag&) = delete;
  594. once_flag& operator=(const once_flag&) = delete;
  595. template<typename _Callable, typename... _Args>
  596. friend void
  597. call_once(once_flag& __once, _Callable __f, _Args&&... __args);
  598. };
  599. #ifdef _GLIBCXX_HAVE_TLS
  600. extern __thread void* __once_callable;
  601. extern __thread void (*__once_call)();
  602. template<typename _Callable>
  603. inline void
  604. __once_call_impl()
  605. {
  606. (*(_Callable*)__once_callable)();
  607. }
  608. #else
  609. extern function<void()> __once_functor;
  610. extern void
  611. __set_once_functor_lock_ptr(unique_lock<mutex>*);
  612. extern mutex&
  613. __get_once_mutex();
  614. #endif
  615. extern "C" void __once_proxy();
  616. /// call_once
  617. template<typename _Callable, typename... _Args>
  618. void
  619. call_once(once_flag& __once, _Callable __f, _Args&&... __args)
  620. {
  621. #ifdef _GLIBCXX_HAVE_TLS
  622. auto __bound_functor = bind(__f, __args...);
  623. __once_callable = &__bound_functor;
  624. __once_call = &__once_call_impl<decltype(__bound_functor)>;
  625. #else
  626. unique_lock<mutex> __functor_lock(__get_once_mutex());
  627. __once_functor = bind(__f, __args...);
  628. __set_once_functor_lock_ptr(&__functor_lock);
  629. #endif
  630. int __e = __gthread_once(&(__once._M_once), &__once_proxy);
  631. #ifndef _GLIBCXX_HAVE_TLS
  632. if (__functor_lock)
  633. __set_once_functor_lock_ptr(0);
  634. #endif
  635. if (__e)
  636. __throw_system_error(__e);
  637. }
  638. // @} group mutexes
  639. }
  640. #endif // _GLIBCXX_HAS_GTHREADS && _GLIBCXX_USE_C99_STDINT_TR1
  641. #endif // __GXX_EXPERIMENTAL_CXX0X__
  642. #endif // _GLIBCXX_MUTEX