fca.hpp 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927
  1. // Copyright (C) 2022-2025 Joaquin M Lopez Munoz.
  2. // Copyright (C) 2022 Christian Mazakas
  3. //
  4. // Distributed under the Boost Software License, Version 1.0. (See accompanying
  5. // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
  6. #ifndef BOOST_UNORDERED_DETAIL_FCA_HPP
  7. #define BOOST_UNORDERED_DETAIL_FCA_HPP
  8. /*
  9. The general structure of the fast closed addressing implementation is that we
  10. use straight-forward separate chaining (i.e. each bucket contains its own linked
  11. list) and then improve iteration time by adding an array of "bucket groups".
  12. A bucket group is a constant-width view into a subsection of the buckets array,
  13. containing a bitmask that indicates which one of the buckets in the subsection
  14. contains a list of nodes. This allows the code to test N buckets for occupancy
  15. in a single operation. Additional speed can be found by inter-linking occupied
  16. bucket groups with one another in a doubly-linked list. To this end, large
  17. swathes of the bucket groups array no longer need to be iterated and have their
  18. bitmasks examined for occupancy.
  19. A bucket group iterator contains a pointer to a bucket group along with a
  20. pointer into the buckets array. The iterator's bucket pointer is guaranteed to
  21. point to a bucket within the bucket group's view of the array. To advance the
  22. iterator, we need to determine if we need to skip to the next bucket group or
  23. simply move to the next occupied bucket as denoted by the bitmask.
  24. To accomplish this, we perform something roughly equivalent to this:
  25. ```
  26. bucket_iterator itb = ...
  27. bucket_pointer p = itb.p
  28. bucket_group_pointer pbg = itb.pbg
  29. offset = p - pbg->buckets
  30. // because we wish to see if the _next_ bit in the mask is occupied, we'll
  31. // generate a testing mask from the current offset + 1
  32. //
  33. testing_mask = reset_first_bits(offset + 1)
  34. n = ctz(pbg->bitmask & testing_mask)
  35. if (n < N) {
  36. p = pbg->buckets + n
  37. } else {
  38. pbg = pbg->next
  39. p = pbg->buckets + ctz(pbg->bitmask)
  40. }
  41. ```
  42. `reset_first_bits` yields an unsigned integral with the first n bits set to 0
  43. and then by counting the number of trailing zeroes when AND'd against the bucket
  44. group's bitmask, we can derive the offset into the buckets array. When the
  45. calculated offset is equal to N, we know we've reached the end of a bucket group
  46. and we can advance to the next one.
  47. This is a rough explanation for how iterator incrementation should work for a
  48. fixed width size of N as 3 for the bucket groups
  49. ```
  50. N = 3
  51. p = buckets
  52. pbg->bitmask = 0b101
  53. pbg->buckets = buckets
  54. offset = p - pbg->buckets // => 0
  55. testing_mask = reset_first_bits(offset + 1) // reset_first_bits(1) => 0b110
  56. x = bitmask & testing_mask // => 0b101 & 0b110 => 0b100
  57. ctz(x) // ctz(0b100) => 2
  58. // 2 < 3
  59. => p = pbg->buckets + 2
  60. // increment again...
  61. offset = p - pbg->buckets // => 2
  62. testing_mask = reset_first_bits(offset + 1) // reset_first_bits(3) => 0b000
  63. bitmask & testing_mask // 0b101 & 0b000 => 0b000
  64. ctz(0b000) => 3
  65. // 3 < 3 is false now
  66. pbg = pbg->next
  67. initial_offset = ctz(pbg->bitmask)
  68. p = pbg->buckets + initial_offset
  69. ```
  70. For `size_` number of buckets, there are `1 + (size_ / N)` bucket groups where
  71. `N` is the width of a bucket group, determined at compile-time.
  72. We allocate space for `size_ + 1` buckets, using the last one as a dummy bucket
  73. which is kept permanently empty so it can act as a sentinel value in the
  74. implementation of `iterator end();`. We set the last bucket group to act as a
  75. sentinel.
  76. ```
  77. num_groups = size_ / N + 1
  78. groups = allocate(num_groups)
  79. pbg = groups + (num_groups - 1)
  80. // not guaranteed to point to exactly N buckets
  81. pbg->buckets = buckets + N * (size_ / N)
  82. // this marks the true end of the bucket array
  83. buckets pbg->bitmask = set_bit(size_ % N)
  84. // links in on itself
  85. pbg->next = pbg->prev = pbg
  86. ```
  87. To this end, we can devise a safe iteration scheme while also creating a useful
  88. sentinel to use as the end iterator.
  89. Otherwise, usage of the data structure is relatively straight-forward compared
  90. to normal separate chaining implementations.
  91. */
  92. #include <boost/unordered/detail/prime_fmod.hpp>
  93. #include <boost/unordered/detail/serialize_tracked_address.hpp>
  94. #include <boost/unordered/detail/opt_storage.hpp>
  95. #include <boost/assert.hpp>
  96. #include <boost/core/allocator_access.hpp>
  97. #include <boost/core/bit.hpp>
  98. #include <boost/core/empty_value.hpp>
  99. #include <boost/core/invoke_swap.hpp>
  100. #include <boost/core/no_exceptions_support.hpp>
  101. #include <boost/core/serialization.hpp>
  102. #include <boost/cstdint.hpp>
  103. #include <boost/config.hpp>
  104. #include <iterator>
  105. namespace boost {
  106. namespace unordered {
  107. namespace detail {
  108. template <class ValueType, class VoidPtr> struct node;
  109. // access to node::value_type and node::pointer for incomplete node
  110. template<class Node> struct node_value_type_impl;
  111. template <class ValueType, class VoidPtr>
  112. struct node_value_type_impl<node<ValueType, VoidPtr>>
  113. {
  114. typedef ValueType type;
  115. };
  116. template<class Node> using node_value_type =
  117. typename node_value_type_impl<Node>::type;
  118. template<class Node> struct node_pointer_impl;
  119. template <class ValueType, class VoidPtr>
  120. struct node_pointer_impl<node<ValueType, VoidPtr>>
  121. {
  122. typedef typename boost::pointer_traits<VoidPtr>::template rebind_to<
  123. node<ValueType, VoidPtr>>::type type;
  124. };
  125. template<class Node> using node_pointer =
  126. typename node_pointer_impl<Node>::type;
  127. template <class ValueType, class VoidPtr> struct node
  128. {
  129. typedef node_value_type<node> value_type;
  130. typedef detail::node_pointer<node> node_pointer;
  131. node_pointer next;
  132. opt_storage<value_type> buf;
  133. node() noexcept : next(), buf() {}
  134. value_type* value_ptr() noexcept
  135. {
  136. return buf.address();
  137. }
  138. value_type& value() noexcept
  139. {
  140. return *buf.address();
  141. }
  142. };
  143. template <class Node, class VoidPtr> struct bucket
  144. {
  145. typedef typename boost::pointer_traits<VoidPtr>::template rebind_to<
  146. Node>::type node_pointer;
  147. typedef typename boost::pointer_traits<VoidPtr>::template rebind_to<
  148. bucket>::type bucket_pointer;
  149. node_pointer next;
  150. bucket() noexcept : next() {}
  151. };
  152. template <class Bucket> struct bucket_group
  153. {
  154. typedef typename Bucket::bucket_pointer bucket_pointer;
  155. typedef
  156. typename boost::pointer_traits<bucket_pointer>::template rebind_to<
  157. bucket_group>::type bucket_group_pointer;
  158. BOOST_STATIC_CONSTANT(std::size_t, N = sizeof(std::size_t) * CHAR_BIT);
  159. bucket_pointer buckets;
  160. std::size_t bitmask;
  161. bucket_group_pointer next, prev;
  162. bucket_group() noexcept : buckets(), bitmask(0), next(), prev() {}
  163. ~bucket_group() {}
  164. };
  165. inline std::size_t set_bit(std::size_t n) { return std::size_t(1) << n; }
  166. inline std::size_t reset_bit(std::size_t n)
  167. {
  168. return ~(std::size_t(1) << n);
  169. }
  170. inline std::size_t reset_first_bits(std::size_t n) // n>0
  171. {
  172. return ~(~(std::size_t(0)) >> (sizeof(std::size_t) * 8 - n));
  173. }
  174. template <class Bucket> struct grouped_bucket_iterator
  175. {
  176. public:
  177. typedef typename Bucket::bucket_pointer bucket_pointer;
  178. typedef
  179. typename boost::pointer_traits<bucket_pointer>::template rebind_to<
  180. bucket_group<Bucket> >::type bucket_group_pointer;
  181. typedef Bucket value_type;
  182. typedef typename boost::pointer_traits<bucket_pointer>::difference_type
  183. difference_type;
  184. typedef Bucket& reference;
  185. typedef Bucket* pointer;
  186. typedef std::forward_iterator_tag iterator_category;
  187. private:
  188. bucket_pointer p;
  189. bucket_group_pointer pbg;
  190. public:
  191. grouped_bucket_iterator() : p(), pbg() {}
  192. reference operator*() const noexcept { return dereference(); }
  193. pointer operator->() const noexcept { return boost::to_address(p); }
  194. grouped_bucket_iterator& operator++() noexcept
  195. {
  196. increment();
  197. return *this;
  198. }
  199. grouped_bucket_iterator operator++(int) noexcept
  200. {
  201. grouped_bucket_iterator old = *this;
  202. increment();
  203. return old;
  204. }
  205. bool operator==(grouped_bucket_iterator const& other) const noexcept
  206. {
  207. return equal(other);
  208. }
  209. bool operator!=(grouped_bucket_iterator const& other) const noexcept
  210. {
  211. return !equal(other);
  212. }
  213. private:
  214. template <typename, typename, typename>
  215. friend class grouped_bucket_array;
  216. BOOST_STATIC_CONSTANT(std::size_t, N = bucket_group<Bucket>::N);
  217. grouped_bucket_iterator(bucket_pointer p_, bucket_group_pointer pbg_)
  218. : p(p_), pbg(pbg_)
  219. {
  220. }
  221. Bucket& dereference() const noexcept { return *p; }
  222. bool equal(const grouped_bucket_iterator& x) const noexcept
  223. {
  224. return p == x.p;
  225. }
  226. void increment() noexcept
  227. {
  228. std::size_t const offset = static_cast<std::size_t>(p - pbg->buckets);
  229. std::size_t n = std::size_t(boost::core::countr_zero(
  230. pbg->bitmask & reset_first_bits(offset + 1)));
  231. if (n < N) {
  232. p = pbg->buckets + static_cast<difference_type>(n);
  233. } else {
  234. pbg = pbg->next;
  235. std::ptrdiff_t x = boost::core::countr_zero(pbg->bitmask);
  236. p = pbg->buckets + x;
  237. }
  238. }
  239. template <typename Archive>
  240. friend void serialization_track(
  241. Archive& ar, grouped_bucket_iterator const& x)
  242. {
  243. // requires: not at end() position
  244. track_address(ar, x.p);
  245. track_address(ar, x.pbg);
  246. }
  247. friend class boost::serialization::access;
  248. template <typename Archive> void serialize(Archive& ar, unsigned int)
  249. {
  250. // requires: not at end() position
  251. serialize_tracked_address(ar, p);
  252. serialize_tracked_address(ar, pbg);
  253. }
  254. };
  255. template <class Node> struct const_grouped_local_bucket_iterator;
  256. template <class Node> struct grouped_local_bucket_iterator
  257. {
  258. typedef detail::node_pointer<Node> node_pointer;
  259. public:
  260. typedef detail::node_value_type<Node> value_type;
  261. typedef value_type element_type;
  262. typedef value_type* pointer;
  263. typedef value_type& reference;
  264. typedef std::ptrdiff_t difference_type;
  265. typedef std::forward_iterator_tag iterator_category;
  266. grouped_local_bucket_iterator() : p() {}
  267. reference operator*() const noexcept { return dereference(); }
  268. pointer operator->() const noexcept
  269. {
  270. return std::addressof(dereference());
  271. }
  272. grouped_local_bucket_iterator& operator++() noexcept
  273. {
  274. increment();
  275. return *this;
  276. }
  277. grouped_local_bucket_iterator operator++(int) noexcept
  278. {
  279. grouped_local_bucket_iterator old = *this;
  280. increment();
  281. return old;
  282. }
  283. bool operator==(
  284. grouped_local_bucket_iterator const& other) const noexcept
  285. {
  286. return equal(other);
  287. }
  288. bool operator!=(
  289. grouped_local_bucket_iterator const& other) const noexcept
  290. {
  291. return !equal(other);
  292. }
  293. private:
  294. template <typename, typename, typename>
  295. friend class grouped_bucket_array;
  296. template <class> friend struct const_grouped_local_bucket_iterator;
  297. grouped_local_bucket_iterator(node_pointer p_) : p(p_) {}
  298. value_type& dereference() const noexcept { return p->value(); }
  299. bool equal(const grouped_local_bucket_iterator& x) const noexcept
  300. {
  301. return p == x.p;
  302. }
  303. void increment() noexcept { p = p->next; }
  304. node_pointer p;
  305. };
  306. template <class Node> struct const_grouped_local_bucket_iterator
  307. {
  308. typedef detail::node_pointer<Node> node_pointer;
  309. public:
  310. typedef detail::node_value_type<Node> const value_type;
  311. typedef value_type const element_type;
  312. typedef value_type const* pointer;
  313. typedef value_type const& reference;
  314. typedef std::ptrdiff_t difference_type;
  315. typedef std::forward_iterator_tag iterator_category;
  316. const_grouped_local_bucket_iterator() : p() {}
  317. const_grouped_local_bucket_iterator(
  318. grouped_local_bucket_iterator<Node> it)
  319. : p(it.p)
  320. {
  321. }
  322. reference operator*() const noexcept { return dereference(); }
  323. pointer operator->() const noexcept
  324. {
  325. return std::addressof(dereference());
  326. }
  327. const_grouped_local_bucket_iterator& operator++() noexcept
  328. {
  329. increment();
  330. return *this;
  331. }
  332. const_grouped_local_bucket_iterator operator++(int) noexcept
  333. {
  334. const_grouped_local_bucket_iterator old = *this;
  335. increment();
  336. return old;
  337. }
  338. bool operator==(
  339. const_grouped_local_bucket_iterator const& other) const noexcept
  340. {
  341. return equal(other);
  342. }
  343. bool operator!=(
  344. const_grouped_local_bucket_iterator const& other) const noexcept
  345. {
  346. return !equal(other);
  347. }
  348. private:
  349. template <typename, typename, typename>
  350. friend class grouped_bucket_array;
  351. const_grouped_local_bucket_iterator(node_pointer p_) : p(p_) {}
  352. value_type& dereference() const noexcept { return p->value(); }
  353. bool equal(const const_grouped_local_bucket_iterator& x) const noexcept
  354. {
  355. return p == x.p;
  356. }
  357. void increment() noexcept { p = p->next; }
  358. node_pointer p;
  359. };
  360. template <class T> struct span
  361. {
  362. T* begin() const noexcept { return data; }
  363. T* end() const noexcept { return data + size; }
  364. T* data;
  365. std::size_t size;
  366. span(T* data_, std::size_t size_) : data(data_), size(size_) {}
  367. };
  368. template <class Bucket, class Allocator, class SizePolicy>
  369. class grouped_bucket_array
  370. : boost::empty_value<typename boost::allocator_rebind<Allocator,
  371. node<typename boost::allocator_value_type<Allocator>::type,
  372. typename boost::allocator_void_pointer<Allocator>::type> >::
  373. type>
  374. {
  375. typedef typename boost::allocator_value_type<Allocator>::type
  376. allocator_value_type;
  377. typedef
  378. typename boost::allocator_void_pointer<Allocator>::type void_pointer;
  379. typedef typename boost::allocator_difference_type<Allocator>::type
  380. difference_type;
  381. public:
  382. typedef typename boost::allocator_rebind<Allocator,
  383. node<allocator_value_type, void_pointer> >::type node_allocator_type;
  384. typedef node<allocator_value_type, void_pointer> node_type;
  385. typedef typename boost::allocator_pointer<node_allocator_type>::type
  386. node_pointer;
  387. typedef SizePolicy size_policy;
  388. private:
  389. typedef typename boost::allocator_rebind<Allocator, Bucket>::type
  390. bucket_allocator_type;
  391. typedef typename boost::allocator_pointer<bucket_allocator_type>::type
  392. bucket_pointer;
  393. typedef boost::pointer_traits<bucket_pointer> bucket_pointer_traits;
  394. typedef bucket_group<Bucket> group;
  395. typedef typename boost::allocator_rebind<Allocator, group>::type
  396. group_allocator_type;
  397. typedef typename boost::allocator_pointer<group_allocator_type>::type
  398. group_pointer;
  399. typedef typename boost::pointer_traits<group_pointer>
  400. group_pointer_traits;
  401. public:
  402. typedef Bucket value_type;
  403. typedef Bucket bucket_type;
  404. typedef std::size_t size_type;
  405. typedef Allocator allocator_type;
  406. typedef grouped_bucket_iterator<Bucket> iterator;
  407. typedef grouped_local_bucket_iterator<node_type> local_iterator;
  408. typedef const_grouped_local_bucket_iterator<node_type>
  409. const_local_iterator;
  410. private:
  411. std::size_t size_index_, size_;
  412. bucket_pointer buckets;
  413. group_pointer groups;
  414. public:
  415. static std::size_t bucket_count_for(std::size_t num_buckets)
  416. {
  417. if (num_buckets == 0) {
  418. return 0;
  419. }
  420. return size_policy::size(size_policy::size_index(num_buckets));
  421. }
  422. grouped_bucket_array()
  423. : empty_value<node_allocator_type>(
  424. empty_init_t(), node_allocator_type()),
  425. size_index_(0), size_(0), buckets(), groups()
  426. {
  427. }
  428. grouped_bucket_array(size_type n, const Allocator& al)
  429. : empty_value<node_allocator_type>(
  430. empty_init_t(), node_allocator_type(al)),
  431. size_index_(0), size_(0), buckets(), groups()
  432. {
  433. if (n == 0) {
  434. return;
  435. }
  436. size_index_ = size_policy::size_index(n);
  437. size_ = size_policy::size(size_index_);
  438. bucket_allocator_type bucket_alloc = this->get_bucket_allocator();
  439. group_allocator_type group_alloc = this->get_group_allocator();
  440. size_type const num_buckets = buckets_len();
  441. size_type const num_groups = groups_len();
  442. buckets = boost::allocator_allocate(bucket_alloc, num_buckets);
  443. BOOST_TRY
  444. {
  445. groups = boost::allocator_allocate(group_alloc, num_groups);
  446. bucket_type* pb = boost::to_address(buckets);
  447. for (size_type i = 0; i < num_buckets; ++i) {
  448. new (pb + i) bucket_type();
  449. }
  450. group* pg = boost::to_address(groups);
  451. for (size_type i = 0; i < num_groups; ++i) {
  452. new (pg + i) group();
  453. }
  454. }
  455. BOOST_CATCH(...)
  456. {
  457. boost::allocator_deallocate(bucket_alloc, buckets, num_buckets);
  458. BOOST_RETHROW
  459. }
  460. BOOST_CATCH_END
  461. size_type const N = group::N;
  462. group_pointer pbg =
  463. groups + static_cast<difference_type>(num_groups - 1);
  464. pbg->buckets =
  465. buckets + static_cast<difference_type>(N * (size_ / N));
  466. pbg->bitmask = set_bit(size_ % N);
  467. pbg->next = pbg->prev = pbg;
  468. }
  469. ~grouped_bucket_array() { this->deallocate(); }
  470. grouped_bucket_array(grouped_bucket_array const&) = delete;
  471. grouped_bucket_array& operator=(grouped_bucket_array const&) = delete;
  472. grouped_bucket_array(grouped_bucket_array&& other) noexcept
  473. : empty_value<node_allocator_type>(
  474. empty_init_t(), other.get_node_allocator()),
  475. size_index_(other.size_index_),
  476. size_(other.size_),
  477. buckets(other.buckets),
  478. groups(other.groups)
  479. {
  480. other.size_ = 0;
  481. other.size_index_ = 0;
  482. other.buckets = bucket_pointer();
  483. other.groups = group_pointer();
  484. }
  485. grouped_bucket_array& operator=(grouped_bucket_array&& other) noexcept
  486. {
  487. BOOST_ASSERT(
  488. this->get_node_allocator() == other.get_node_allocator());
  489. if (this == std::addressof(other)) {
  490. return *this;
  491. }
  492. this->deallocate();
  493. size_index_ = other.size_index_;
  494. size_ = other.size_;
  495. buckets = other.buckets;
  496. groups = other.groups;
  497. other.size_index_ = 0;
  498. other.size_ = 0;
  499. other.buckets = bucket_pointer();
  500. other.groups = group_pointer();
  501. return *this;
  502. }
  503. #if defined(BOOST_MSVC)
  504. #pragma warning(push)
  505. #pragma warning(disable : 4100) // unreferenced formal parameter (dtor calls)
  506. #endif
  507. void deallocate() noexcept
  508. {
  509. if (buckets) {
  510. size_type const num_buckets = buckets_len();
  511. bucket_type* pb = boost::to_address(buckets);
  512. (void)pb; // VS complains when dtor is trivial
  513. for (size_type i = 0; i < num_buckets; ++i) {
  514. (pb + i)->~bucket_type();
  515. }
  516. bucket_allocator_type bucket_alloc = this->get_bucket_allocator();
  517. boost::allocator_deallocate(bucket_alloc, buckets, num_buckets);
  518. buckets = bucket_pointer();
  519. }
  520. if (groups) {
  521. size_type const num_groups = groups_len();
  522. group* pg = boost::to_address(groups);
  523. (void)pg; // VS complains when dtor is trivial
  524. for (size_type i = 0; i < num_groups; ++i) {
  525. (pg + i)->~group();
  526. }
  527. group_allocator_type group_alloc = this->get_group_allocator();
  528. boost::allocator_deallocate(group_alloc, groups, num_groups);
  529. groups = group_pointer();
  530. }
  531. }
  532. #if defined(BOOST_MSVC)
  533. #pragma warning(pop)
  534. #endif
  535. void swap(grouped_bucket_array& other)
  536. {
  537. std::swap(size_index_, other.size_index_);
  538. std::swap(size_, other.size_);
  539. std::swap(buckets, other.buckets);
  540. std::swap(groups, other.groups);
  541. swap_allocator_if_pocs(other);
  542. }
  543. node_allocator_type const& get_node_allocator() const
  544. {
  545. return empty_value<node_allocator_type>::get();
  546. }
  547. node_allocator_type& get_node_allocator()
  548. {
  549. return empty_value<node_allocator_type>::get();
  550. }
  551. bucket_allocator_type get_bucket_allocator() const
  552. {
  553. return bucket_allocator_type(this->get_node_allocator());
  554. }
  555. group_allocator_type get_group_allocator() const
  556. {
  557. return group_allocator_type(this->get_node_allocator());
  558. }
  559. Allocator get_allocator() const
  560. {
  561. return Allocator(this->get_node_allocator());
  562. }
  563. size_type buckets_len() const noexcept { return size_ + 1; }
  564. size_type groups_len() const noexcept { return size_ / group::N + 1; }
  565. void reset_allocator(Allocator const& allocator_)
  566. {
  567. this->get_node_allocator() = node_allocator_type(allocator_);
  568. }
  569. size_type bucket_count() const { return size_; }
  570. iterator begin() const { return size_ == 0 ? end() : ++at(size_); }
  571. iterator end() const
  572. {
  573. // micro optimization: no need to return the bucket group
  574. // as end() is not incrementable
  575. iterator pbg;
  576. pbg.p =
  577. buckets + static_cast<difference_type>(this->buckets_len() - 1);
  578. return pbg;
  579. }
  580. local_iterator begin(size_type n) const
  581. {
  582. if (size_ == 0) {
  583. return this->end(n);
  584. }
  585. return local_iterator(
  586. (buckets + static_cast<difference_type>(n))->next);
  587. }
  588. local_iterator end(size_type) const { return local_iterator(); }
  589. size_type capacity() const noexcept { return size_; }
  590. iterator at(size_type n) const
  591. {
  592. if (size_ > 0) {
  593. std::size_t const N = group::N;
  594. iterator pbg(buckets + static_cast<difference_type>(n),
  595. groups + static_cast<difference_type>(n / N));
  596. return pbg;
  597. } else {
  598. return this->end();
  599. }
  600. }
  601. span<Bucket> raw()
  602. {
  603. BOOST_ASSERT(size_ == 0 || size_ < this->buckets_len());
  604. return span<Bucket>(boost::to_address(buckets), size_);
  605. }
  606. size_type position(std::size_t hash) const
  607. {
  608. return size_policy::position(hash, size_index_);
  609. }
  610. void clear()
  611. {
  612. this->deallocate();
  613. size_index_ = 0;
  614. size_ = 0;
  615. }
  616. void append_bucket_group(iterator itb) noexcept
  617. {
  618. std::size_t const N = group::N;
  619. bool const is_empty_bucket = (!itb->next);
  620. if (is_empty_bucket) {
  621. bucket_pointer pb = itb.p;
  622. group_pointer pbg = itb.pbg;
  623. std::size_t n =
  624. static_cast<std::size_t>(boost::to_address(pb) - &buckets[0]);
  625. bool const is_empty_group = (!pbg->bitmask);
  626. if (is_empty_group) {
  627. size_type const num_groups = this->groups_len();
  628. group_pointer last_group =
  629. groups + static_cast<difference_type>(num_groups - 1);
  630. pbg->buckets =
  631. buckets + static_cast<difference_type>(N * (n / N));
  632. pbg->next = last_group->next;
  633. pbg->next->prev = pbg;
  634. pbg->prev = last_group;
  635. pbg->prev->next = pbg;
  636. }
  637. pbg->bitmask |= set_bit(n % N);
  638. }
  639. }
  640. void insert_node(iterator itb, node_pointer p) noexcept
  641. {
  642. this->append_bucket_group(itb);
  643. p->next = itb->next;
  644. itb->next = p;
  645. }
  646. void insert_node_hint(
  647. iterator itb, node_pointer p, node_pointer hint) noexcept
  648. {
  649. this->append_bucket_group(itb);
  650. if (hint) {
  651. p->next = hint->next;
  652. hint->next = p;
  653. } else {
  654. p->next = itb->next;
  655. itb->next = p;
  656. }
  657. }
  658. void extract_node(iterator itb, node_pointer p) noexcept
  659. {
  660. node_pointer* pp = std::addressof(itb->next);
  661. while ((*pp) != p)
  662. pp = std::addressof((*pp)->next);
  663. *pp = p->next;
  664. if (!itb->next)
  665. unlink_bucket(itb);
  666. }
  667. void extract_node_after(iterator itb, node_pointer* pp) noexcept
  668. {
  669. *pp = (*pp)->next;
  670. if (!itb->next)
  671. unlink_bucket(itb);
  672. }
  673. void unlink_empty_buckets() noexcept
  674. {
  675. std::size_t const N = group::N;
  676. group_pointer pbg = groups,
  677. last = groups + static_cast<difference_type>(
  678. this->groups_len() - 1);
  679. for (; pbg != last; ++pbg) {
  680. if (!pbg->buckets) {
  681. continue;
  682. }
  683. for (std::size_t n = 0; n < N; ++n) {
  684. bucket_pointer bs = pbg->buckets;
  685. bucket_type& b = bs[static_cast<std::ptrdiff_t>(n)];
  686. if (!b.next)
  687. pbg->bitmask &= reset_bit(n);
  688. }
  689. if (!pbg->bitmask && pbg->next)
  690. unlink_group(pbg);
  691. }
  692. // do not check end bucket
  693. for (std::size_t n = 0; n < size_ % N; ++n) {
  694. if (!pbg->buckets[static_cast<std::ptrdiff_t>(n)].next)
  695. pbg->bitmask &= reset_bit(n);
  696. }
  697. }
  698. void unlink_bucket(iterator itb)
  699. {
  700. typename iterator::bucket_pointer p = itb.p;
  701. typename iterator::bucket_group_pointer pbg = itb.pbg;
  702. if (!(pbg->bitmask &=
  703. reset_bit(static_cast<std::size_t>(p - pbg->buckets))))
  704. unlink_group(pbg);
  705. }
  706. private:
  707. void unlink_group(group_pointer pbg)
  708. {
  709. pbg->next->prev = pbg->prev;
  710. pbg->prev->next = pbg->next;
  711. pbg->prev = pbg->next = group_pointer();
  712. }
  713. void swap_allocator_if_pocs(grouped_bucket_array& other)
  714. {
  715. using allocator_pocs =
  716. typename boost::allocator_propagate_on_container_swap<
  717. allocator_type>::type;
  718. swap_allocator_if_pocs(
  719. other, std::integral_constant<bool, allocator_pocs::value>());
  720. }
  721. void swap_allocator_if_pocs(
  722. grouped_bucket_array& other, std::true_type /* propagate */)
  723. {
  724. boost::core::invoke_swap(
  725. get_node_allocator(), other.get_node_allocator());
  726. }
  727. void swap_allocator_if_pocs(
  728. grouped_bucket_array&, std::false_type /* don't propagate */)
  729. {
  730. }
  731. };
  732. } // namespace detail
  733. } // namespace unordered
  734. } // namespace boost
  735. #endif // BOOST_UNORDERED_DETAIL_FCA_HPP