wait_ops_freebsd_umtx.hpp 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409
  1. /*
  2. * Distributed under the Boost Software License, Version 1.0.
  3. * (See accompanying file LICENSE_1_0.txt or copy at
  4. * http://www.boost.org/LICENSE_1_0.txt)
  5. *
  6. * Copyright (c) 2020-2025 Andrey Semashev
  7. */
  8. /*!
  9. * \file atomic/detail/wait_ops_freebsd_umtx.hpp
  10. *
  11. * This header contains implementation of the waiting/notifying atomic operations based on FreeBSD _umtx_op syscall.
  12. * https://man.freebsd.org/cgi/man.cgi?query=_umtx_op&apropos=0&sektion=2&manpath=FreeBSD+11.0-RELEASE&arch=default&format=html
  13. */
  14. #ifndef BOOST_ATOMIC_DETAIL_WAIT_OPS_FREEBSD_UMTX_HPP_INCLUDED_
  15. #define BOOST_ATOMIC_DETAIL_WAIT_OPS_FREEBSD_UMTX_HPP_INCLUDED_
  16. #include <sys/umtx.h>
  17. #include <time.h>
  18. #include <cstdint>
  19. #include <cerrno>
  20. #include <limits>
  21. #include <chrono>
  22. #include <type_traits>
  23. #include <boost/memory_order.hpp>
  24. #include <boost/atomic/posix_clock_traits_fwd.hpp>
  25. #include <boost/atomic/detail/config.hpp>
  26. #if defined(UMTX_ABSTIME)
  27. #include <boost/atomic/detail/intptr.hpp>
  28. #endif
  29. #include <boost/atomic/detail/chrono.hpp>
  30. #include <boost/atomic/detail/int_sizes.hpp>
  31. #include <boost/atomic/detail/has_posix_clock_traits.hpp>
  32. #include <boost/atomic/detail/wait_operations_fwd.hpp>
  33. #include <boost/atomic/detail/header.hpp>
  34. #ifdef BOOST_HAS_PRAGMA_ONCE
  35. #pragma once
  36. #endif
  37. namespace boost {
  38. namespace atomics {
  39. namespace detail {
  40. // Brief evolution of _umtx_op in FreeBSD:
  41. //
  42. // * FreeBSD 6.0.
  43. // Initial version that supports UMTX_OP_WAIT and UMTX_OP_WAKE for long-sized futexes. Supports timed waits, where initially the timeout was absolute
  44. // against CLOCK_REALTIME (https://github.com/freebsd/freebsd-src/commit/cc1000ac5b235516dd312340fca34e8847add3d0), but later was changed to relative
  45. // timeouts that count against CLOCK_MONOTONIC (https://github.com/freebsd/freebsd-src/commit/b7be40d612b794ea9165b71a8afe07777dc9d31a). Presumably,
  46. // the initial version with absolute timeouts was not released, so we can ignore it.
  47. // * FreeBSD 8.0.
  48. // Added UMTX_OP_WAIT_UINT (https://github.com/freebsd/freebsd-src/commit/110de0cf17923839e434ead831b7a7c74a7ce102) for int-sized futexes, as well as
  49. // UMTX_OP_WAIT_UINT_PRIVATE and UMTX_OP_WAKE_PRIVATE (https://github.com/freebsd/freebsd-src/commit/727158f6f64df04094d41ca5ee4b0641308c39d0) for
  50. // process-local futexes.
  51. // * FreeBSD 10.0.
  52. // Added UMTX_ABSTIME (https://github.com/freebsd/freebsd-src/commit/df1f1bae9eac5f3f838c8939e4de2c5458aba001). By default, relative timeouts are now
  53. // counted against CLOCK_REALTIME (a breaking change), but the caller can now supply the timeout in the form of struct _umtx_time, which allows for
  54. // specifying flags (where UMTX_ABSTIME means absolute timeout) and the clock id. Presumably, all clocks supported by clock_gettime are supported
  55. // by this new API. Whether the caller is using this new API or the legacy API with a relative timeout in struct timespec is indicated by the uaddr
  56. // argument of _umtx_op, which must be the size of the timeout struct casted to a pointer. Previous FreeBSD releases ignored this argument, so
  57. // new binaries running on old FreeBSD versions will silently misbehave (and old binaries on new FreeBSD as well, due to the CLOCK_REALTIME change).
  58. #if defined(UMTX_OP_WAIT_UINT) || defined(UMTX_OP_WAIT)
  59. template< typename Base, typename UmtxOps >
  60. struct wait_operations_freebsd_umtx_common :
  61. public Base
  62. {
  63. using base_type = Base;
  64. using storage_type = typename base_type::storage_type;
  65. static constexpr bool always_has_native_wait_notify = true;
  66. private:
  67. using umtx_ops = UmtxOps;
  68. public:
  69. static BOOST_FORCEINLINE bool has_native_wait_notify(storage_type const volatile&) noexcept
  70. {
  71. return true;
  72. }
  73. static BOOST_FORCEINLINE storage_type wait(storage_type const volatile& storage, storage_type old_val, memory_order order) noexcept
  74. {
  75. storage_type new_val = base_type::load(storage, order);
  76. while (new_val == old_val)
  77. {
  78. _umtx_op(const_cast< storage_type* >(&storage), umtx_ops::wait_op, old_val, nullptr, nullptr);
  79. new_val = base_type::load(storage, order);
  80. }
  81. return new_val;
  82. }
  83. #if defined(UMTX_ABSTIME)
  84. private:
  85. template< typename Clock >
  86. static BOOST_FORCEINLINE storage_type wait_until_fallback
  87. (
  88. storage_type const volatile& storage,
  89. storage_type old_val,
  90. typename Clock::time_point timeout,
  91. typename Clock::time_point now,
  92. memory_order order,
  93. bool& timed_out
  94. ) noexcept(noexcept(Clock::now()))
  95. {
  96. _umtx_time umt{};
  97. umt._clockid = CLOCK_MONOTONIC;
  98. storage_type new_val = base_type::load(storage, order);
  99. while (new_val == old_val)
  100. {
  101. const std::int64_t nsec = atomics::detail::chrono::ceil< std::chrono::nanoseconds >(timeout - now).count();
  102. if (nsec <= 0)
  103. {
  104. timed_out = true;
  105. break;
  106. }
  107. const std::int64_t sec = nsec / 1000000000;
  108. if (BOOST_LIKELY(sec <= (std::numeric_limits< decltype(umt._timeout.tv_sec) >::max)()))
  109. {
  110. umt._timeout.tv_sec = static_cast< decltype(umt._timeout.tv_sec) >(sec);
  111. umt._timeout.tv_nsec = static_cast< decltype(umt._timeout.tv_nsec) >(nsec % 1000000000);
  112. }
  113. else
  114. {
  115. umt._timeout.tv_sec = (std::numeric_limits< decltype(umt._timeout.tv_sec) >::max)();
  116. umt._timeout.tv_nsec = static_cast< decltype(umt._timeout.tv_nsec) >(999999999);
  117. }
  118. _umtx_op
  119. (
  120. const_cast< storage_type* >(&storage),
  121. umtx_ops::wait_op,
  122. old_val,
  123. reinterpret_cast< void* >(static_cast< uintptr_t >(sizeof(umt))),
  124. &umt
  125. );
  126. now = Clock::now();
  127. new_val = base_type::load(storage, order);
  128. }
  129. return new_val;
  130. }
  131. static BOOST_FORCEINLINE storage_type wait_until_abs_timeout
  132. (
  133. storage_type const volatile& storage,
  134. storage_type old_val,
  135. _umtx_time const& umt,
  136. memory_order order,
  137. bool& timed_out
  138. ) noexcept
  139. {
  140. storage_type new_val = base_type::load(storage, order);
  141. while (new_val == old_val)
  142. {
  143. int err = _umtx_op
  144. (
  145. const_cast< storage_type* >(&storage),
  146. umtx_ops::wait_op,
  147. old_val,
  148. reinterpret_cast< void* >(static_cast< uintptr_t >(sizeof(umt))),
  149. const_cast< _umtx_time* >(&umt)
  150. );
  151. if (err < 0)
  152. {
  153. err = errno;
  154. if (err == ETIMEDOUT)
  155. {
  156. new_val = base_type::load(storage, order);
  157. timed_out = new_val == old_val;
  158. break;
  159. }
  160. }
  161. new_val = base_type::load(storage, order);
  162. }
  163. return new_val;
  164. }
  165. template< typename Clock >
  166. static BOOST_FORCEINLINE storage_type wait_until_dispatch
  167. (
  168. storage_type const volatile& storage,
  169. storage_type old_val,
  170. typename Clock::time_point timeout,
  171. memory_order order,
  172. bool& timed_out,
  173. std::false_type
  174. ) noexcept(noexcept(Clock::now()))
  175. {
  176. return wait_until_fallback< Clock >(storage, old_val, timeout, Clock::now(), order, timed_out);
  177. }
  178. template< typename Clock >
  179. static BOOST_FORCEINLINE storage_type wait_until_dispatch
  180. (
  181. storage_type const volatile& storage,
  182. storage_type old_val,
  183. typename Clock::time_point timeout,
  184. memory_order order,
  185. bool& timed_out,
  186. std::true_type
  187. ) noexcept
  188. {
  189. _umtx_time umt{};
  190. umt._timeout = posix_clock_traits< Clock >::to_timespec(timeout);
  191. if (BOOST_LIKELY(umt._timeout.tv_sec >= 0))
  192. {
  193. umt._flags = UMTX_ABSTIME;
  194. umt._clockid = posix_clock_traits< Clock >::clock_id;
  195. return wait_until_abs_timeout(storage, old_val, umt, order, timed_out);
  196. }
  197. else
  198. {
  199. storage_type new_val = base_type::load(storage, order);
  200. timed_out = new_val == old_val;
  201. return new_val;
  202. }
  203. }
  204. public:
  205. template< typename Clock, typename Duration >
  206. static BOOST_FORCEINLINE storage_type wait_until
  207. (
  208. storage_type const volatile& storage,
  209. storage_type old_val,
  210. std::chrono::time_point< Clock, Duration > timeout,
  211. memory_order order,
  212. bool& timed_out
  213. ) noexcept(noexcept(wait_until_dispatch< Clock >(
  214. storage, old_val, timeout, order, timed_out, std::integral_constant< bool, has_posix_clock_traits< Clock >::value >())))
  215. {
  216. return wait_until_dispatch< Clock >(storage, old_val, timeout, order, timed_out, std::integral_constant< bool, has_posix_clock_traits< Clock >::value >());
  217. }
  218. template< typename Rep, typename Period >
  219. static BOOST_FORCEINLINE storage_type wait_for
  220. (
  221. storage_type const volatile& storage,
  222. storage_type old_val,
  223. std::chrono::duration< Rep, Period > timeout,
  224. memory_order order,
  225. bool& timed_out
  226. ) noexcept
  227. {
  228. if (BOOST_LIKELY(timeout.count() >= 0))
  229. {
  230. _umtx_time umt{};
  231. if (BOOST_LIKELY(clock_gettime(CLOCK_MONOTONIC, &umt._timeout) == 0))
  232. {
  233. const std::int64_t nsec = static_cast< std::int64_t >(umt._timeout.tv_nsec) + atomics::detail::chrono::ceil< std::chrono::nanoseconds >(timeout).count();
  234. const std::int64_t sec = static_cast< std::int64_t >(umt._timeout.tv_sec) + nsec / 1000000000;
  235. if (BOOST_LIKELY(sec <= (std::numeric_limits< decltype(timespec::tv_sec) >::max)()))
  236. {
  237. umt._timeout.tv_sec = static_cast< decltype(umt._timeout.tv_sec) >(sec);
  238. umt._timeout.tv_nsec = static_cast< decltype(umt._timeout.tv_nsec) >(nsec % 1000000000);
  239. umt._flags = UMTX_ABSTIME;
  240. umt._clockid = CLOCK_MONOTONIC;
  241. return wait_until_abs_timeout(storage, old_val, umt, order, timed_out);
  242. }
  243. }
  244. }
  245. const std::chrono::steady_clock::time_point now = std::chrono::steady_clock::now();
  246. return wait_until_fallback< std::chrono::steady_clock >(storage, old_val, now + timeout, now, order, timed_out);
  247. }
  248. #else // defined(UMTX_ABSTIME)
  249. private:
  250. template< typename Clock >
  251. static BOOST_FORCEINLINE storage_type wait_until_impl
  252. (
  253. storage_type const volatile& storage,
  254. storage_type old_val,
  255. typename Clock::time_point timeout,
  256. typename Clock::time_point now,
  257. memory_order order,
  258. bool& timed_out
  259. ) noexcept(noexcept(Clock::now()))
  260. {
  261. timespec ts{};
  262. storage_type new_val = base_type::load(storage, order);
  263. while (new_val == old_val)
  264. {
  265. const std::int64_t nsec = atomics::detail::chrono::ceil< std::chrono::nanoseconds >(timeout - now).count();
  266. if (nsec <= 0)
  267. {
  268. timed_out = true;
  269. break;
  270. }
  271. const std::int64_t sec = nsec / 1000000000;
  272. if (BOOST_LIKELY(sec <= (std::numeric_limits< decltype(ts.tv_sec) >::max)()))
  273. {
  274. ts.tv_sec = static_cast< decltype(ts.tv_sec) >(sec);
  275. ts.tv_nsec = static_cast< decltype(ts.tv_nsec) >(nsec % 1000000000);
  276. }
  277. else
  278. {
  279. ts.tv_sec = (std::numeric_limits< decltype(ts.tv_sec) >::max)();
  280. ts.tv_nsec = static_cast< decltype(ts.tv_nsec) >(999999999);
  281. }
  282. _umtx_op(const_cast< storage_type* >(&storage), umtx_ops::wait_op, old_val, nullptr, &ts);
  283. now = Clock::now();
  284. new_val = base_type::load(storage, order);
  285. }
  286. return new_val;
  287. }
  288. public:
  289. template< typename Clock, typename Duration >
  290. static BOOST_FORCEINLINE storage_type wait_until
  291. (
  292. storage_type const volatile& storage,
  293. storage_type old_val,
  294. std::chrono::time_point< Clock, Duration > timeout,
  295. memory_order order,
  296. bool& timed_out
  297. ) noexcept(noexcept(Clock::now()))
  298. {
  299. return wait_until_impl< Clock >(storage, old_val, timeout, Clock::now(), order, timed_out);
  300. }
  301. template< typename Rep, typename Period >
  302. static BOOST_FORCEINLINE storage_type wait_for
  303. (
  304. storage_type const volatile& storage,
  305. storage_type old_val,
  306. std::chrono::duration< Rep, Period > timeout,
  307. memory_order order,
  308. bool& timed_out
  309. ) noexcept
  310. {
  311. const std::chrono::steady_clock::time_point now = std::chrono::steady_clock::now();
  312. return wait_until_impl< std::chrono::steady_clock >(storage, old_val, now + timeout, now, order, timed_out);
  313. }
  314. #endif // defined(UMTX_ABSTIME)
  315. static BOOST_FORCEINLINE void notify_one(storage_type volatile& storage) noexcept
  316. {
  317. _umtx_op(const_cast< storage_type* >(&storage), umtx_ops::wake_op, 1u, nullptr, nullptr);
  318. }
  319. static BOOST_FORCEINLINE void notify_all(storage_type volatile& storage) noexcept
  320. {
  321. _umtx_op(const_cast< storage_type* >(&storage), umtx_ops::wake_op, (~static_cast< unsigned int >(0u)) >> 1u, nullptr, nullptr);
  322. }
  323. };
  324. #if defined(UMTX_OP_WAIT_UINT)
  325. template< bool Interprocess >
  326. struct uint_umtx_ops
  327. {
  328. #if defined(UMTX_OP_WAIT_UINT_PRIVATE) && defined(UMTX_OP_WAKE_PRIVATE)
  329. static constexpr int wait_op = Interprocess ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE;
  330. static constexpr int wake_op = Interprocess ? UMTX_OP_WAKE : UMTX_OP_WAKE_PRIVATE;
  331. #else
  332. static constexpr int wait_op = UMTX_OP_WAIT_UINT;
  333. static constexpr int wake_op = UMTX_OP_WAKE;
  334. #endif
  335. };
  336. template< typename Base, bool Interprocess >
  337. struct wait_operations< Base, sizeof(unsigned int), true, Interprocess > :
  338. public wait_operations_freebsd_umtx_common< Base, uint_umtx_ops< Interprocess > >
  339. {
  340. };
  341. #endif // defined(UMTX_OP_WAIT_UINT)
  342. #if defined(UMTX_OP_WAIT) && (!defined(UMTX_OP_WAIT_UINT) || BOOST_ATOMIC_DETAIL_SIZEOF_INT < BOOST_ATOMIC_DETAIL_SIZEOF_LONG)
  343. struct ulong_umtx_ops
  344. {
  345. static constexpr int wait_op = UMTX_OP_WAIT;
  346. static constexpr int wake_op = UMTX_OP_WAKE;
  347. };
  348. template< typename Base, bool Interprocess >
  349. struct wait_operations< Base, sizeof(unsigned long), true, Interprocess > :
  350. public wait_operations_freebsd_umtx_common< Base, ulong_umtx_ops >
  351. {
  352. };
  353. #endif // defined(UMTX_OP_WAIT) && (!defined(UMTX_OP_WAIT_UINT) || BOOST_ATOMIC_DETAIL_SIZEOF_INT < BOOST_ATOMIC_DETAIL_SIZEOF_LONG)
  354. #endif // defined(UMTX_OP_WAIT_UINT) || defined(UMTX_OP_WAIT)
  355. } // namespace detail
  356. } // namespace atomics
  357. } // namespace boost
  358. #include <boost/atomic/detail/footer.hpp>
  359. #endif // BOOST_ATOMIC_DETAIL_WAIT_OPS_FREEBSD_UMTX_HPP_INCLUDED_