rbtree_best_fit.hpp 56 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372
  1. //////////////////////////////////////////////////////////////////////////////
  2. //
  3. // (C) Copyright Ion Gaztanaga 2005-2012. Distributed under the Boost
  4. // Software License, Version 1.0. (See accompanying file
  5. // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
  6. //
  7. // See http://www.boost.org/libs/interprocess for documentation.
  8. //
  9. //////////////////////////////////////////////////////////////////////////////
  10. #ifndef BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP
  11. #define BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP
  12. #ifndef BOOST_CONFIG_HPP
  13. # include <boost/config.hpp>
  14. #endif
  15. #
  16. #if defined(BOOST_HAS_PRAGMA_ONCE)
  17. # pragma once
  18. #endif
  19. #include <boost/interprocess/detail/config_begin.hpp>
  20. #include <boost/interprocess/detail/workaround.hpp>
  21. // interprocess
  22. #include <boost/interprocess/containers/allocation_type.hpp>
  23. #include <boost/interprocess/exceptions.hpp>
  24. #include <boost/interprocess/interprocess_fwd.hpp>
  25. #include <boost/interprocess/mem_algo/detail/mem_algo_common.hpp>
  26. #include <boost/interprocess/offset_ptr.hpp>
  27. #include <boost/interprocess/sync/scoped_lock.hpp>
  28. // interprocess/detail
  29. #include <boost/interprocess/detail/min_max.hpp>
  30. #include <boost/interprocess/detail/math_functions.hpp>
  31. #include <boost/interprocess/detail/type_traits.hpp>
  32. #include <boost/interprocess/detail/utilities.hpp>
  33. // container
  34. #include <boost/container/detail/multiallocation_chain.hpp>
  35. // container/detail
  36. #include <boost/container/detail/placement_new.hpp>
  37. // move/detail
  38. #include <boost/move/detail/type_traits.hpp> //make_unsigned, alignment_of
  39. #include <boost/move/detail/force_ptr.hpp> //make_unsigned, alignment_of
  40. // intrusive
  41. #include <boost/intrusive/pointer_traits.hpp>
  42. #include <boost/intrusive/set.hpp>
  43. // other boost
  44. #include <boost/assert.hpp>
  45. // std
  46. #include <climits>
  47. #include <cstring>
  48. //#define BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
  49. //to maintain ABI compatible with the original version
  50. //ABI had to be updated to fix compatibility issues when
  51. //sharing shared memory between 32 adn 64 bit processes.
  52. //!\file
  53. //!Describes a best-fit algorithm based in an intrusive red-black tree used to allocate
  54. //!objects in shared memory. This class is intended as a base class for single segment
  55. //!and multi-segment implementations.
  56. namespace boost {
  57. namespace interprocess {
  58. //!This class implements an algorithm that stores the free nodes in a red-black tree
  59. //!to have logarithmic search/insert times.
  60. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  61. class rbtree_best_fit
  62. {
  63. #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
  64. //Non-copyable
  65. rbtree_best_fit();
  66. rbtree_best_fit(const rbtree_best_fit &);
  67. rbtree_best_fit &operator=(const rbtree_best_fit &);
  68. private:
  69. struct block_ctrl;
  70. typedef typename boost::intrusive::
  71. pointer_traits<VoidPointer>::template
  72. rebind_pointer<block_ctrl>::type block_ctrl_ptr;
  73. typedef typename boost::intrusive::
  74. pointer_traits<VoidPointer>::template
  75. rebind_pointer<char>::type char_ptr;
  76. #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
  77. public:
  78. //!Shared mutex family used for the rest of the Interprocess framework
  79. typedef MutexFamily mutex_family;
  80. //!Pointer type to be used with the rest of the Interprocess framework
  81. typedef VoidPointer void_pointer;
  82. typedef ipcdetail::basic_multiallocation_chain<VoidPointer> multiallocation_chain;
  83. typedef typename boost::intrusive::pointer_traits<char_ptr>::difference_type difference_type;
  84. typedef typename boost::container::dtl::make_unsigned<difference_type>::type size_type;
  85. #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
  86. private:
  87. typedef typename bi::make_set_base_hook
  88. < bi::void_pointer<VoidPointer>
  89. , bi::optimize_size<true>
  90. , bi::link_mode<bi::normal_link> >::type TreeHook;
  91. struct SizeHolder
  92. {
  93. static const size_type size_mask = size_type(-1) >> 2;
  94. //!Previous block's memory size (including block_ctrl
  95. //!header) in Alignment units. This field (UsableByPreviousChunk bytes)
  96. //!is OVERWRITTEN by the previous block if allocated (m_prev_allocated)
  97. size_type m_prev_size;
  98. //!This block's memory size (including block_ctrl
  99. //!header) in Alignment units
  100. size_type m_size : sizeof(size_type)*CHAR_BIT - 2;
  101. size_type m_prev_allocated : 1;
  102. size_type m_allocated : 1;
  103. };
  104. //!Block control structure
  105. struct block_ctrl
  106. : public SizeHolder
  107. //This tree hook is overwritten when this block is used
  108. , public TreeHook
  109. {
  110. block_ctrl()
  111. {
  112. this->SizeHolder::m_size = 0;
  113. this->SizeHolder::m_allocated = 0;
  114. this->SizeHolder::m_prev_allocated = 0;
  115. }
  116. friend bool operator<(const block_ctrl &a, const block_ctrl &b)
  117. { return a.SizeHolder::m_size < b.SizeHolder::m_size; }
  118. friend bool operator==(const block_ctrl &a, const block_ctrl &b)
  119. { return a.SizeHolder::m_size == b.SizeHolder::m_size; }
  120. };
  121. struct size_block_ctrl_compare
  122. {
  123. bool operator()(size_type size, const block_ctrl &block) const
  124. { return size < block.m_size; }
  125. bool operator()(const block_ctrl &block, size_type size) const
  126. { return block.m_size < size; }
  127. };
  128. //!Shared mutex to protect memory allocate/deallocate
  129. typedef typename MutexFamily::mutex_type mutex_type;
  130. typedef typename bi::make_multiset
  131. <block_ctrl, bi::base_hook<TreeHook> >::type Imultiset;
  132. typedef typename Imultiset::iterator imultiset_iterator;
  133. typedef typename Imultiset::const_iterator imultiset_const_iterator;
  134. //!This struct includes needed data and derives from
  135. //!mutex_type to allow EBO when using null mutex_type
  136. struct header_t : public mutex_type
  137. {
  138. Imultiset m_imultiset;
  139. //!The extra size required by the segment
  140. size_type m_extra_hdr_bytes;
  141. //!Allocated bytes for internal checking
  142. size_type m_allocated;
  143. //!The size of the memory segment
  144. size_type m_size;
  145. } m_header;
  146. friend class ipcdetail::memory_algorithm_common<rbtree_best_fit>;
  147. typedef ipcdetail::memory_algorithm_common<rbtree_best_fit> algo_impl_t;
  148. public:
  149. #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
  150. //!Constructor. "size" is the total size of the managed memory segment,
  151. //!"extra_hdr_bytes" indicates the extra bytes beginning in the sizeof(rbtree_best_fit)
  152. //!offset that the allocator should not use at all.
  153. rbtree_best_fit (size_type size, size_type extra_hdr_bytes);
  154. //!Destructor.
  155. ~rbtree_best_fit();
  156. //!Obtains the minimum size needed by the algorithm
  157. static size_type get_min_size (size_type extra_hdr_bytes);
  158. //Functions for single segment management
  159. //!Allocates bytes, returns 0 if there is not more memory.
  160. //!Returned memory is aligned to Alignment bytes.
  161. void* allocate (size_type nbytes);
  162. //!Deallocates previously allocated bytes
  163. void deallocate(void* addr);
  164. #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
  165. //Experimental. Dont' use
  166. //!Multiple element allocation, same size
  167. //!Experimental. Dont' use
  168. void allocate_many(size_type elem_bytes, size_type num_elements, multiallocation_chain &chain)
  169. {
  170. //-----------------------
  171. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  172. //-----------------------
  173. algo_impl_t::allocate_many(this, elem_bytes, num_elements, chain);
  174. }
  175. //!Multiple element allocation, different size
  176. //!Experimental. Dont' use
  177. void allocate_many(const size_type *elem_sizes, size_type n_elements, size_type sizeof_element, multiallocation_chain &chain)
  178. {
  179. //-----------------------
  180. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  181. //-----------------------
  182. algo_impl_t::allocate_many(this, elem_sizes, n_elements, sizeof_element, chain);
  183. }
  184. //!Multiple element allocation, different size
  185. //!Experimental. Dont' use
  186. void deallocate_many(multiallocation_chain &chain);
  187. template<class T>
  188. T* allocation_command(boost::interprocess::allocation_type command, size_type limit_size,
  189. size_type& prefer_in_recvd_out_size, T*& reuse);
  190. void* raw_allocation_command(boost::interprocess::allocation_type command, size_type limit_object,
  191. size_type& prefer_in_recvd_out_size,
  192. void*& reuse_ptr, size_type sizeof_object = 1);
  193. #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
  194. //!Returns the size of the memory segment
  195. size_type get_size() const;
  196. //!Returns the number of free bytes of the segment
  197. size_type get_free_memory() const;
  198. //!Initializes to zero all the memory that's not in use.
  199. //!This function is normally used for security reasons.
  200. void zero_free_memory();
  201. //!Increases managed memory in
  202. //!extra_size bytes more
  203. void grow(size_type extra_size);
  204. //!Decreases managed memory as much as possible
  205. void shrink_to_fit();
  206. //!Returns true if all allocated memory has been deallocated
  207. bool all_memory_deallocated();
  208. //!Makes an internal sanity check
  209. //!and returns true if success
  210. bool check_sanity();
  211. //!Returns the size of the buffer previously allocated pointed by ptr
  212. size_type size(const void *ptr) const;
  213. //!Allocates aligned bytes, returns 0 if there is not more memory.
  214. //!Alignment must be power of 2
  215. void* allocate_aligned (size_type nbytes, size_type alignment);
  216. #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
  217. private:
  218. static size_type priv_first_block_offset_from_this(const void *this_ptr, size_type extra_hdr_bytes);
  219. block_ctrl *priv_first_block();
  220. block_ctrl *priv_end_block();
  221. void* priv_allocation_command(boost::interprocess::allocation_type command, size_type limit_size,
  222. size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type sizeof_object);
  223. //!Real allocation algorithm with min allocation option
  224. void * priv_allocate( boost::interprocess::allocation_type command
  225. , size_type limit_size, size_type &prefer_in_recvd_out_size
  226. , void *&reuse_ptr, size_type backwards_multiple = 1);
  227. //!Obtains the block control structure of the user buffer
  228. static block_ctrl *priv_get_block(const void *ptr);
  229. //!Obtains the pointer returned to the user from the block control
  230. static void *priv_get_user_buffer(const block_ctrl *block);
  231. //!Returns the number of total units that a user buffer
  232. //!of "userbytes" bytes really occupies (including header)
  233. static size_type priv_get_total_units(size_type userbytes);
  234. //!Real expand function implementation
  235. bool priv_expand(void *ptr, const size_type min_size, size_type &prefer_in_recvd_out_size);
  236. //!Real expand to both sides implementation
  237. void* priv_expand_both_sides(boost::interprocess::allocation_type command
  238. ,size_type min_size
  239. ,size_type &prefer_in_recvd_out_size
  240. ,void *reuse_ptr
  241. ,bool only_preferred_backwards
  242. ,size_type backwards_multiple);
  243. //!Returns true if the previous block is allocated
  244. bool priv_is_prev_allocated(block_ctrl *ptr);
  245. //!Get a pointer of the "end" block from the first block of the segment
  246. static block_ctrl * priv_end_block(block_ctrl *first_segment_block);
  247. //!Get a pointer of the "first" block from the end block of the segment
  248. static block_ctrl * priv_first_block(block_ctrl *end_segment_block);
  249. //!Get poitner of the previous block (previous block must be free)
  250. static block_ctrl * priv_prev_block(block_ctrl *ptr);
  251. //!Get the size in the tail of the previous block
  252. static block_ctrl * priv_next_block(block_ctrl *ptr);
  253. //!Check if this block is free (not allocated)
  254. bool priv_is_allocated_block(block_ctrl *ptr);
  255. //!Marks the block as allocated
  256. void priv_mark_as_allocated_block(block_ctrl *ptr);
  257. //!Marks the block as allocated
  258. void priv_mark_new_allocated_block(block_ctrl *ptr)
  259. { return priv_mark_as_allocated_block(ptr); }
  260. //!Marks the block as allocated
  261. void priv_mark_as_free_block(block_ctrl *ptr);
  262. //!Checks if block has enough memory and splits/unlinks the block
  263. //!returning the address to the users
  264. void* priv_check_and_allocate(size_type units
  265. ,block_ctrl* block
  266. ,size_type &received_size);
  267. //!Real deallocation algorithm
  268. void priv_deallocate(void *addr);
  269. //!Makes a new memory portion available for allocation
  270. void priv_add_segment(void *addr, size_type size);
  271. public:
  272. static const size_type Alignment = !MemAlignment
  273. ? size_type(::boost::container::dtl::alignment_of
  274. < ::boost::container::dtl::max_align_t>::value)
  275. : size_type(MemAlignment)
  276. ;
  277. private:
  278. //Due to embedded bits in size, Alignment must be at least 4
  279. BOOST_INTERPROCESS_STATIC_ASSERT((Alignment >= 4));
  280. //Due to rbtree size optimizations, Alignment must have at least pointer alignment
  281. BOOST_INTERPROCESS_STATIC_ASSERT((Alignment >= ::boost::container::dtl::alignment_of<void_pointer>::value));
  282. static const size_type AlignmentMask = (Alignment - 1);
  283. static const size_type BlockCtrlBytes = ipcdetail::ct_rounded_size<sizeof(block_ctrl), Alignment>::value;
  284. static const size_type BlockCtrlUnits = BlockCtrlBytes/Alignment;
  285. static const size_type AllocatedCtrlBytes = ipcdetail::ct_rounded_size<sizeof(SizeHolder), Alignment>::value;
  286. static const size_type AllocatedCtrlUnits = AllocatedCtrlBytes/Alignment;
  287. static const size_type EndCtrlBlockBytes = ipcdetail::ct_rounded_size<sizeof(SizeHolder), Alignment>::value;
  288. static const size_type EndCtrlBlockUnits = EndCtrlBlockBytes/Alignment;
  289. static const size_type UsableByPreviousChunk = sizeof(size_type);
  290. //Make sure the maximum alignment is power of two
  291. BOOST_INTERPROCESS_STATIC_ASSERT((0 == (Alignment & (Alignment - size_type(1u)))));
  292. #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
  293. public:
  294. static const size_type PayloadPerAllocation = AllocatedCtrlBytes - UsableByPreviousChunk;
  295. };
  296. #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
  297. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  298. inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
  299. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
  300. ::priv_first_block_offset_from_this(const void *this_ptr, size_type extra_hdr_bytes)
  301. {
  302. size_type uint_this = (std::size_t)this_ptr;
  303. size_type main_hdr_end = uint_this + sizeof(rbtree_best_fit) + extra_hdr_bytes;
  304. size_type aligned_main_hdr_end = ipcdetail::get_rounded_size(main_hdr_end, Alignment);
  305. size_type block1_off = aligned_main_hdr_end - uint_this;
  306. algo_impl_t::assert_alignment(aligned_main_hdr_end);
  307. algo_impl_t::assert_alignment(uint_this + block1_off);
  308. return block1_off;
  309. }
  310. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  311. void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  312. priv_add_segment(void *addr, size_type segment_size)
  313. {
  314. //Check alignment
  315. algo_impl_t::check_alignment(addr);
  316. //Check size
  317. BOOST_ASSERT(segment_size >= (BlockCtrlBytes + EndCtrlBlockBytes));
  318. //Initialize the first big block and the "end" node
  319. block_ctrl *first_big_block = ::new(addr, boost_container_new_t()) block_ctrl;
  320. first_big_block->m_size = (segment_size/Alignment - EndCtrlBlockUnits) & block_ctrl::size_mask;
  321. BOOST_ASSERT(first_big_block->m_size >= BlockCtrlUnits);
  322. //The "end" node is just a node of size 0 with the "end" bit set
  323. SizeHolder *end_block =
  324. ::new(reinterpret_cast<char*>(addr) + first_big_block->m_size*Alignment, boost_container_new_t()) SizeHolder;
  325. //This will overwrite the prev part of the "end" node
  326. priv_mark_as_free_block (first_big_block);
  327. #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
  328. first_big_block->m_prev_size = end_block->m_size =
  329. size_type(reinterpret_cast<char*>(first_big_block) - reinterpret_cast<char*>(end_block))/Alignment) & block_ctrl::size_mask;
  330. #else
  331. first_big_block->m_prev_size = end_block->m_size =
  332. size_type(reinterpret_cast<char*>(end_block) - reinterpret_cast<char*>(first_big_block))/Alignment & block_ctrl::size_mask;
  333. #endif
  334. end_block->m_allocated = 1;
  335. first_big_block->m_prev_allocated = 1;
  336. BOOST_ASSERT(priv_next_block(first_big_block) == end_block);
  337. BOOST_ASSERT(priv_prev_block((block_ctrl*)end_block) == first_big_block);
  338. BOOST_ASSERT(priv_first_block() == first_big_block);
  339. BOOST_ASSERT(priv_end_block() == end_block);
  340. //Some check to validate the algorithm, since it makes some assumptions
  341. //to optimize the space wasted in bookkeeping:
  342. //Check that the sizes of the header are placed before the rbtree
  343. BOOST_ASSERT(static_cast<void*>(static_cast<SizeHolder*>(first_big_block))
  344. < static_cast<void*>(static_cast<TreeHook*>(first_big_block)));
  345. //Insert it in the intrusive containers
  346. m_header.m_imultiset.insert(*first_big_block);
  347. }
  348. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  349. inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
  350. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
  351. ::priv_first_block()
  352. {
  353. const size_type block1_off = priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
  354. return move_detail::force_ptr<block_ctrl*>(reinterpret_cast<char*>(this) + block1_off);
  355. }
  356. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  357. inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
  358. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
  359. ::priv_end_block()
  360. {
  361. const size_type block1_off = priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
  362. const size_type original_first_block_size = (m_header.m_size - block1_off)/Alignment - EndCtrlBlockUnits;
  363. block_ctrl *end_block = move_detail::force_ptr<block_ctrl*>
  364. (reinterpret_cast<char*>(this) + block1_off + original_first_block_size*Alignment);
  365. return end_block;
  366. }
  367. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  368. inline rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  369. rbtree_best_fit(size_type segment_size, size_type extra_hdr_bytes)
  370. {
  371. //Initialize the header
  372. m_header.m_allocated = 0;
  373. m_header.m_size = segment_size;
  374. m_header.m_extra_hdr_bytes = extra_hdr_bytes;
  375. //Now write calculate the offset of the first big block that will
  376. //cover the whole segment
  377. BOOST_ASSERT(get_min_size(extra_hdr_bytes) <= segment_size);
  378. size_type block1_off = priv_first_block_offset_from_this(this, extra_hdr_bytes);
  379. priv_add_segment(reinterpret_cast<char*>(this) + block1_off, segment_size - block1_off);
  380. }
  381. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  382. inline rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::~rbtree_best_fit()
  383. {
  384. //There is a memory leak!
  385. // BOOST_ASSERT(m_header.m_allocated == 0);
  386. // BOOST_ASSERT(m_header.m_root.m_next->m_next == block_ctrl_ptr(&m_header.m_root));
  387. }
  388. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  389. void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::grow(size_type extra_size)
  390. {
  391. //Get the address of the first block
  392. block_ctrl *first_block = priv_first_block();
  393. block_ctrl *old_end_block = priv_end_block();
  394. size_type old_border_offset = (size_type)(reinterpret_cast<char*>(old_end_block) -
  395. reinterpret_cast<char*>(this)) + EndCtrlBlockBytes;
  396. //Update managed buffer's size
  397. m_header.m_size += extra_size;
  398. //We need at least BlockCtrlBytes blocks to create a new block
  399. if((m_header.m_size - old_border_offset) < BlockCtrlBytes){
  400. return;
  401. }
  402. //Now create a new block between the old end and the new end
  403. size_type align_offset = (m_header.m_size - old_border_offset)/Alignment;
  404. block_ctrl *new_end_block = move_detail::force_ptr<block_ctrl*>
  405. (reinterpret_cast<char*>(old_end_block) + align_offset*Alignment);
  406. //the last and first block are special:
  407. //new_end_block->m_size & first_block->m_prev_size store the absolute value
  408. //between them
  409. new_end_block->m_allocated = 1;
  410. #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
  411. new_end_block->m_size = size_type(reinterpret_cast<char*>(first_block) -
  412. reinterpret_cast<char*>(new_end_block))/Alignment & block_ctrl::size_mask;
  413. #else
  414. new_end_block->m_size = size_type(reinterpret_cast<char*>(new_end_block) -
  415. reinterpret_cast<char*>(first_block))/Alignment & block_ctrl::size_mask;
  416. #endif
  417. first_block->m_prev_size = new_end_block->m_size;
  418. first_block->m_prev_allocated = 1;
  419. BOOST_ASSERT(new_end_block == priv_end_block());
  420. //The old end block is the new block
  421. block_ctrl *new_block = old_end_block;
  422. new_block->m_size = size_type(reinterpret_cast<char*>(new_end_block) -
  423. reinterpret_cast<char*>(new_block))/Alignment & block_ctrl::size_mask;
  424. BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
  425. priv_mark_as_allocated_block(new_block);
  426. BOOST_ASSERT(priv_next_block(new_block) == new_end_block);
  427. m_header.m_allocated += (size_type)new_block->m_size*Alignment;
  428. //Now deallocate the newly created block
  429. this->priv_deallocate(priv_get_user_buffer(new_block));
  430. }
  431. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  432. void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::shrink_to_fit()
  433. {
  434. //Get the address of the first block
  435. block_ctrl *first_block = priv_first_block();
  436. algo_impl_t::assert_alignment(first_block);
  437. //block_ctrl *old_end_block = priv_end_block(first_block);
  438. block_ctrl *old_end_block = priv_end_block();
  439. algo_impl_t::assert_alignment(old_end_block);
  440. size_type old_end_block_size = old_end_block->m_size;
  441. void *unique_buffer = 0;
  442. block_ctrl *last_block;
  443. //Check if no memory is allocated between the first and last block
  444. if(priv_next_block(first_block) == old_end_block){
  445. //If so check if we can allocate memory
  446. size_type ignore_recvd = 0;
  447. void *ignore_reuse = 0;
  448. unique_buffer = priv_allocate(boost::interprocess::allocate_new, 0, ignore_recvd, ignore_reuse);
  449. //If not, return, we can't shrink
  450. if(!unique_buffer)
  451. return;
  452. //If we can, mark the position just after the new allocation as the new end
  453. algo_impl_t::assert_alignment(unique_buffer);
  454. block_ctrl *unique_block = priv_get_block(unique_buffer);
  455. BOOST_ASSERT(priv_is_allocated_block(unique_block));
  456. algo_impl_t::assert_alignment(unique_block);
  457. last_block = priv_next_block(unique_block);
  458. BOOST_ASSERT(!priv_is_allocated_block(last_block));
  459. algo_impl_t::assert_alignment(last_block);
  460. }
  461. else{
  462. //If memory is allocated, check if the last block is allocated
  463. if(priv_is_prev_allocated(old_end_block))
  464. return;
  465. //If not, mark last block after the free block
  466. last_block = priv_prev_block(old_end_block);
  467. }
  468. size_type last_block_size = last_block->m_size;
  469. //Erase block from the free tree, since we will erase it
  470. m_header.m_imultiset.erase(Imultiset::s_iterator_to(*last_block));
  471. size_type shrunk_border_offset = (size_type)(reinterpret_cast<char*>(last_block) -
  472. reinterpret_cast<char*>(this)) + EndCtrlBlockBytes;
  473. block_ctrl *new_end_block = last_block;
  474. algo_impl_t::assert_alignment(new_end_block);
  475. //Write new end block attributes
  476. #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
  477. new_end_block->m_size =
  478. size_type(reinterpret_cast<char*>(first_block) - reinterpret_cast<char*>(new_end_block))/Alignment & block_ctrl::size_mask;
  479. first_block->m_prev_size = new_end_block->m_size;
  480. #else
  481. new_end_block->m_size =
  482. size_type(reinterpret_cast<char*>(new_end_block) - reinterpret_cast<char*>(first_block))/Alignment & block_ctrl::size_mask;
  483. first_block->m_prev_size = new_end_block->m_size;
  484. #endif
  485. new_end_block->m_allocated = 1;
  486. (void)last_block_size;
  487. (void)old_end_block_size;
  488. BOOST_ASSERT(new_end_block->m_size == (old_end_block_size - last_block_size));
  489. //Update managed buffer's size
  490. m_header.m_size = shrunk_border_offset & block_ctrl::size_mask;
  491. BOOST_ASSERT(priv_end_block() == new_end_block);
  492. if(unique_buffer)
  493. priv_deallocate(unique_buffer);
  494. }
  495. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  496. inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
  497. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::get_size() const
  498. { return m_header.m_size; }
  499. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  500. typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
  501. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::get_free_memory() const
  502. {
  503. return m_header.m_size - m_header.m_allocated -
  504. priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
  505. }
  506. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  507. typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
  508. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  509. get_min_size (size_type extra_hdr_bytes)
  510. {
  511. return (algo_impl_t::ceil_units(sizeof(rbtree_best_fit)) +
  512. algo_impl_t::ceil_units(extra_hdr_bytes) +
  513. BlockCtrlUnits + EndCtrlBlockUnits)*Alignment;
  514. }
  515. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  516. inline bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  517. all_memory_deallocated()
  518. {
  519. //-----------------------
  520. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  521. //-----------------------
  522. size_type block1_off =
  523. priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
  524. return m_header.m_allocated == 0 &&
  525. m_header.m_imultiset.begin() != m_header.m_imultiset.end() &&
  526. (++m_header.m_imultiset.begin()) == m_header.m_imultiset.end()
  527. && m_header.m_imultiset.begin()->m_size ==
  528. (m_header.m_size - block1_off - EndCtrlBlockBytes)/Alignment;
  529. }
  530. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  531. bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  532. check_sanity()
  533. {
  534. //-----------------------
  535. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  536. //-----------------------
  537. imultiset_iterator ib(m_header.m_imultiset.begin()), ie(m_header.m_imultiset.end());
  538. size_type free_memory = 0;
  539. //Iterate through all blocks obtaining their size
  540. for(; ib != ie; ++ib){
  541. free_memory += (size_type)ib->m_size*Alignment;
  542. if(!algo_impl_t::check_alignment(&*ib))
  543. return false;
  544. }
  545. //Check allocated bytes are less than size
  546. if(m_header.m_allocated > m_header.m_size){
  547. return false;
  548. }
  549. size_type block1_off =
  550. priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
  551. //Check free bytes are less than size
  552. if(free_memory > (m_header.m_size - block1_off)){
  553. return false;
  554. }
  555. return true;
  556. }
  557. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  558. inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  559. allocate(size_type nbytes)
  560. {
  561. //-----------------------
  562. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  563. //-----------------------
  564. size_type ignore_recvd = nbytes;
  565. void *ignore_reuse = 0;
  566. return priv_allocate(boost::interprocess::allocate_new, nbytes, ignore_recvd, ignore_reuse);
  567. }
  568. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  569. inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  570. allocate_aligned(size_type nbytes, size_type alignment)
  571. {
  572. //-----------------------
  573. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  574. //-----------------------
  575. return algo_impl_t::allocate_aligned(this, nbytes, alignment);
  576. }
  577. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  578. template<class T>
  579. inline T* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  580. allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
  581. size_type &prefer_in_recvd_out_size, T *&reuse)
  582. {
  583. void* raw_reuse = reuse;
  584. void* const ret = priv_allocation_command(command, limit_size, prefer_in_recvd_out_size, raw_reuse, sizeof(T));
  585. reuse = static_cast<T*>(raw_reuse);
  586. BOOST_ASSERT(0 == ((std::size_t)ret % ::boost::container::dtl::alignment_of<T>::value));
  587. return static_cast<T*>(ret);
  588. }
  589. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  590. inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  591. raw_allocation_command (boost::interprocess::allocation_type command, size_type limit_objects,
  592. size_type &prefer_in_recvd_out_objects, void *&reuse_ptr, size_type sizeof_object)
  593. {
  594. size_type const preferred_objects = prefer_in_recvd_out_objects;
  595. if(!sizeof_object)
  596. return reuse_ptr = 0, static_cast<void*>(0);
  597. if(command & boost::interprocess::try_shrink_in_place){
  598. if(!reuse_ptr) return static_cast<void*>(0);
  599. const bool success = algo_impl_t::try_shrink
  600. ( this, reuse_ptr, limit_objects*sizeof_object
  601. , prefer_in_recvd_out_objects = preferred_objects*sizeof_object);
  602. prefer_in_recvd_out_objects /= sizeof_object;
  603. return success ? reuse_ptr : 0;
  604. }
  605. else{
  606. return priv_allocation_command
  607. (command, limit_objects, prefer_in_recvd_out_objects, reuse_ptr, sizeof_object);
  608. }
  609. }
  610. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  611. inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  612. priv_allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
  613. size_type &prefer_in_recvd_out_size,
  614. void *&reuse_ptr, size_type sizeof_object)
  615. {
  616. void* ret;
  617. size_type const preferred_size = prefer_in_recvd_out_size;
  618. size_type const max_count = m_header.m_size/sizeof_object;
  619. if(limit_size > max_count || preferred_size > max_count){
  620. return reuse_ptr = 0, static_cast<void*>(0);
  621. }
  622. size_type l_size = limit_size*sizeof_object;
  623. size_type p_size = preferred_size*sizeof_object;
  624. size_type r_size;
  625. {
  626. //-----------------------
  627. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  628. //-----------------------
  629. ret = priv_allocate(command, l_size, r_size = p_size, reuse_ptr, sizeof_object);
  630. }
  631. prefer_in_recvd_out_size = r_size/sizeof_object;
  632. return ret;
  633. }
  634. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  635. typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
  636. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  637. size(const void *ptr) const
  638. {
  639. //We need no synchronization since this block's size is not going
  640. //to be modified by anyone else
  641. //Obtain the real size of the block
  642. return ((size_type)priv_get_block(ptr)->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
  643. }
  644. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  645. inline void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::zero_free_memory()
  646. {
  647. //-----------------------
  648. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  649. //-----------------------
  650. imultiset_iterator ib(m_header.m_imultiset.begin()), ie(m_header.m_imultiset.end());
  651. //Iterate through all blocks obtaining their size
  652. while(ib != ie){
  653. //Just clear user the memory part reserved for the user
  654. volatile char *ptr = reinterpret_cast<char*>(&*ib) + BlockCtrlBytes;
  655. size_type s = (size_type)ib->m_size*Alignment - BlockCtrlBytes;
  656. while(s--){
  657. *ptr++ = 0;
  658. }
  659. //This surprisingly is optimized out by Visual C++ 7.1 in release mode!
  660. //std::memset( reinterpret_cast<char*>(&*ib) + BlockCtrlBytes
  661. // , 0
  662. // , ib->m_size*Alignment - BlockCtrlBytes);
  663. ++ib;
  664. }
  665. }
  666. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  667. void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  668. priv_expand_both_sides(boost::interprocess::allocation_type command
  669. ,size_type min_size
  670. ,size_type &prefer_in_recvd_out_size
  671. ,void *reuse_ptr
  672. ,bool only_preferred_backwards
  673. ,size_type backwards_multiple)
  674. {
  675. size_type const preferred_size = prefer_in_recvd_out_size;
  676. algo_impl_t::assert_alignment(reuse_ptr);
  677. if(command & boost::interprocess::expand_fwd){
  678. if(priv_expand(reuse_ptr, min_size, prefer_in_recvd_out_size = preferred_size))
  679. return reuse_ptr;
  680. }
  681. else{
  682. prefer_in_recvd_out_size = this->size(reuse_ptr);
  683. if(prefer_in_recvd_out_size >= preferred_size || prefer_in_recvd_out_size >= min_size)
  684. return reuse_ptr;
  685. }
  686. if(backwards_multiple){
  687. BOOST_ASSERT(0 == (min_size % backwards_multiple));
  688. BOOST_ASSERT(0 == (preferred_size % backwards_multiple));
  689. }
  690. if(command & boost::interprocess::expand_bwd){
  691. //Obtain the real size of the block
  692. block_ctrl *reuse = priv_get_block(reuse_ptr);
  693. //Sanity check
  694. algo_impl_t::assert_alignment(reuse);
  695. block_ctrl *prev_block;
  696. //If the previous block is not free, there is nothing to do
  697. if(priv_is_prev_allocated(reuse)){
  698. return 0;
  699. }
  700. prev_block = priv_prev_block(reuse);
  701. BOOST_ASSERT(!priv_is_allocated_block(prev_block));
  702. //Some sanity checks
  703. BOOST_ASSERT(prev_block->m_size == reuse->m_prev_size);
  704. algo_impl_t::assert_alignment(prev_block);
  705. size_type needs_backwards_aligned;
  706. size_type lcm;
  707. if(!algo_impl_t::calculate_lcm_and_needs_backwards_lcmed
  708. ( backwards_multiple
  709. , prefer_in_recvd_out_size
  710. , only_preferred_backwards ? preferred_size : min_size
  711. , lcm, needs_backwards_aligned)){
  712. return 0;
  713. }
  714. //Check if previous block has enough size
  715. if(size_type(prev_block->m_size*Alignment) >= needs_backwards_aligned){
  716. //Now take all next space. This will succeed
  717. if(command & boost::interprocess::expand_fwd){
  718. size_type received_size2;
  719. if(!priv_expand(reuse_ptr, prefer_in_recvd_out_size, received_size2 = prefer_in_recvd_out_size)){
  720. BOOST_ASSERT(0);
  721. }
  722. BOOST_ASSERT(prefer_in_recvd_out_size == received_size2);
  723. }
  724. //We need a minimum size to split the previous one
  725. if(prev_block->m_size >= (needs_backwards_aligned/Alignment + BlockCtrlUnits)){
  726. block_ctrl *new_block = move_detail::force_ptr<block_ctrl*>
  727. (reinterpret_cast<char*>(reuse) - needs_backwards_aligned);
  728. //Free old previous buffer
  729. new_block->m_size =
  730. (AllocatedCtrlUnits + (needs_backwards_aligned + (prefer_in_recvd_out_size - UsableByPreviousChunk))/Alignment) & block_ctrl::size_mask;
  731. BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
  732. priv_mark_as_allocated_block(new_block);
  733. prev_block->m_size = size_type(reinterpret_cast<char*>(new_block) -
  734. reinterpret_cast<char*>(prev_block))/Alignment & block_ctrl::size_mask;
  735. BOOST_ASSERT(prev_block->m_size >= BlockCtrlUnits);
  736. priv_mark_as_free_block(prev_block);
  737. //Update the old previous block in the free blocks tree
  738. //If the new size fulfills tree invariants do nothing,
  739. //otherwise erase() + insert()
  740. {
  741. imultiset_iterator prev_block_it(Imultiset::s_iterator_to(*prev_block));
  742. imultiset_iterator was_smaller_it(prev_block_it);
  743. if(prev_block_it != m_header.m_imultiset.begin() &&
  744. (--(was_smaller_it = prev_block_it))->m_size > prev_block->m_size){
  745. m_header.m_imultiset.erase(prev_block_it);
  746. m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *prev_block);
  747. }
  748. }
  749. prefer_in_recvd_out_size = needs_backwards_aligned + prefer_in_recvd_out_size;
  750. m_header.m_allocated += needs_backwards_aligned;
  751. //Check alignment
  752. algo_impl_t::assert_alignment(new_block);
  753. //If the backwards expansion has remaining bytes in the
  754. //first bytes, fill them with a pattern
  755. void *p = priv_get_user_buffer(new_block);
  756. void *user_ptr = reinterpret_cast<char*>(p);
  757. BOOST_ASSERT(size_type(static_cast<char*>(reuse_ptr) - static_cast<char*>(user_ptr)) % backwards_multiple == 0);
  758. algo_impl_t::assert_alignment(user_ptr);
  759. return user_ptr;
  760. }
  761. //Check if there is no place to create a new block and
  762. //the whole new block is multiple of the backwards expansion multiple
  763. else if(prev_block->m_size >= needs_backwards_aligned/Alignment &&
  764. 0 == ((prev_block->m_size*Alignment) % lcm)) {
  765. //Erase old previous block, since we will change it
  766. m_header.m_imultiset.erase(Imultiset::s_iterator_to(*prev_block));
  767. //Just merge the whole previous block
  768. //prev_block->m_size*Alignment is multiple of lcm (and backwards_multiple)
  769. prefer_in_recvd_out_size = prefer_in_recvd_out_size + (size_type)prev_block->m_size*Alignment;
  770. m_header.m_allocated += (size_type)prev_block->m_size*Alignment;
  771. //Now update sizes
  772. prev_block->m_size = size_type(prev_block->m_size + reuse->m_size) & block_ctrl::size_mask;
  773. BOOST_ASSERT(prev_block->m_size >= BlockCtrlUnits);
  774. priv_mark_as_allocated_block(prev_block);
  775. //If the backwards expansion has remaining bytes in the
  776. //first bytes, fill them with a pattern
  777. void *user_ptr = priv_get_user_buffer(prev_block);
  778. BOOST_ASSERT(size_type(static_cast<char*>(reuse_ptr) - static_cast<char*>(user_ptr)) % backwards_multiple == 0);
  779. algo_impl_t::assert_alignment(user_ptr);
  780. return user_ptr;
  781. }
  782. else{
  783. //Alignment issues
  784. }
  785. }
  786. }
  787. return 0;
  788. }
  789. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  790. inline void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  791. deallocate_many(typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::multiallocation_chain &chain)
  792. {
  793. //-----------------------
  794. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  795. //-----------------------
  796. algo_impl_t::deallocate_many(this, chain);
  797. }
  798. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  799. void * rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  800. priv_allocate(boost::interprocess::allocation_type command
  801. ,size_type limit_size
  802. ,size_type &prefer_in_recvd_out_size
  803. ,void *&reuse_ptr
  804. ,size_type backwards_multiple)
  805. {
  806. size_type const preferred_size = prefer_in_recvd_out_size;
  807. if(command & boost::interprocess::shrink_in_place){
  808. if(!reuse_ptr) return static_cast<void*>(0);
  809. bool success =
  810. algo_impl_t::shrink(this, reuse_ptr, limit_size, prefer_in_recvd_out_size = preferred_size);
  811. return success ? reuse_ptr : 0;
  812. }
  813. prefer_in_recvd_out_size = 0;
  814. if(limit_size > preferred_size)
  815. return reuse_ptr = 0, static_cast<void*>(0);
  816. //Number of units to request (including block_ctrl header)
  817. size_type preferred_units = priv_get_total_units(preferred_size);
  818. //Number of units to request (including block_ctrl header)
  819. size_type limit_units = priv_get_total_units(limit_size);
  820. //Expand in place
  821. prefer_in_recvd_out_size = preferred_size;
  822. if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
  823. void *ret = priv_expand_both_sides
  824. (command, limit_size, prefer_in_recvd_out_size, reuse_ptr, true, backwards_multiple);
  825. if(ret)
  826. return ret;
  827. }
  828. if(command & boost::interprocess::allocate_new){
  829. size_block_ctrl_compare comp;
  830. imultiset_iterator it(m_header.m_imultiset.lower_bound(preferred_units, comp));
  831. if(it != m_header.m_imultiset.end()){
  832. return reuse_ptr = 0, this->priv_check_and_allocate
  833. (preferred_units, ipcdetail::to_raw_pointer(&*it), prefer_in_recvd_out_size);
  834. }
  835. if(it != m_header.m_imultiset.begin()&&
  836. (--it)->m_size >= limit_units){
  837. return reuse_ptr = 0, this->priv_check_and_allocate
  838. (it->m_size, ipcdetail::to_raw_pointer(&*it), prefer_in_recvd_out_size);
  839. }
  840. }
  841. //Now try to expand both sides with min size
  842. if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
  843. return priv_expand_both_sides
  844. (command, limit_size, prefer_in_recvd_out_size = preferred_size, reuse_ptr, false, backwards_multiple);
  845. }
  846. return reuse_ptr = 0, static_cast<void*>(0);
  847. }
  848. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  849. inline
  850. typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
  851. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_get_block(const void *ptr)
  852. {
  853. return const_cast<block_ctrl*>
  854. (move_detail::force_ptr<const block_ctrl*>
  855. (reinterpret_cast<const char*>(ptr) - AllocatedCtrlBytes));
  856. }
  857. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  858. inline
  859. void *rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  860. priv_get_user_buffer(const typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
  861. { return const_cast<char*>(reinterpret_cast<const char*>(block) + AllocatedCtrlBytes); }
  862. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  863. inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
  864. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  865. priv_get_total_units(size_type userbytes)
  866. {
  867. if(userbytes < UsableByPreviousChunk)
  868. userbytes = UsableByPreviousChunk;
  869. size_type units = ipcdetail::get_rounded_size(userbytes - UsableByPreviousChunk, Alignment)/Alignment + AllocatedCtrlUnits;
  870. if(units < BlockCtrlUnits) units = BlockCtrlUnits;
  871. return units;
  872. }
  873. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  874. bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  875. priv_expand (void *ptr, const size_type min_size, size_type &prefer_in_recvd_out_size)
  876. {
  877. size_type const preferred_size = prefer_in_recvd_out_size;
  878. //Obtain the real size of the block
  879. block_ctrl *block = priv_get_block(ptr);
  880. size_type old_block_units = block->m_size;
  881. //The block must be marked as allocated and the sizes must be equal
  882. BOOST_ASSERT(priv_is_allocated_block(block));
  883. //Put this to a safe value
  884. prefer_in_recvd_out_size = (old_block_units - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
  885. if(prefer_in_recvd_out_size >= preferred_size || prefer_in_recvd_out_size >= min_size)
  886. return true;
  887. //Now translate it to Alignment units
  888. const size_type min_user_units = algo_impl_t::ceil_units(min_size - UsableByPreviousChunk);
  889. const size_type preferred_user_units = algo_impl_t::ceil_units(preferred_size - UsableByPreviousChunk);
  890. //Some parameter checks
  891. BOOST_ASSERT(min_user_units <= preferred_user_units);
  892. block_ctrl *next_block;
  893. if(priv_is_allocated_block(next_block = priv_next_block(block))){
  894. return prefer_in_recvd_out_size >= min_size;
  895. }
  896. algo_impl_t::assert_alignment(next_block);
  897. //Is "block" + "next_block" big enough?
  898. const size_type merged_units = old_block_units + (size_type)next_block->m_size;
  899. //Now get the expansion size
  900. const size_type merged_user_units = merged_units - AllocatedCtrlUnits;
  901. if(merged_user_units < min_user_units){
  902. prefer_in_recvd_out_size = merged_units*Alignment - UsableByPreviousChunk;
  903. return false;
  904. }
  905. //Now get the maximum size the user can allocate
  906. size_type intended_user_units = (merged_user_units < preferred_user_units) ?
  907. merged_user_units : preferred_user_units;
  908. //These are total units of the merged block (supposing the next block can be split)
  909. const size_type intended_units = AllocatedCtrlUnits + intended_user_units;
  910. //Check if we can split the next one in two parts
  911. if((merged_units - intended_units) >= BlockCtrlUnits){
  912. //This block is bigger than needed, split it in
  913. //two blocks, the first one will be merged and
  914. //the second's size will be the remaining space
  915. BOOST_ASSERT(next_block->m_size == priv_next_block(next_block)->m_prev_size);
  916. const size_type rem_units = merged_units - intended_units;
  917. //Check if we we need to update the old next block in the free blocks tree
  918. //If the new size fulfills tree invariants, we just need to replace the node
  919. //(the block start has been displaced), otherwise erase() + insert().
  920. //
  921. //This fixup must be done in two parts, because the new next block might
  922. //overwrite the tree hook of the old next block. So we first erase the
  923. //old if needed and we'll insert the new one after creating the new next
  924. imultiset_iterator old_next_block_it(Imultiset::s_iterator_to(*next_block));
  925. m_header.m_imultiset.erase(old_next_block_it);
  926. //This is the remaining block
  927. block_ctrl *rem_block =
  928. ::new(reinterpret_cast<char*>(block) + intended_units*Alignment, boost_container_new_t()) block_ctrl;
  929. rem_block->m_size = rem_units & block_ctrl::size_mask;
  930. algo_impl_t::assert_alignment(rem_block);
  931. BOOST_ASSERT(rem_block->m_size >= BlockCtrlUnits);
  932. priv_mark_as_free_block(rem_block);
  933. m_header.m_imultiset.insert(*rem_block);
  934. //Write the new length
  935. block->m_size = (intended_user_units + AllocatedCtrlUnits) & block_ctrl::size_mask;
  936. BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
  937. m_header.m_allocated += (intended_units - old_block_units)*Alignment;
  938. }
  939. //There is no free space to create a new node: just merge both blocks
  940. else{
  941. //Now we have to update the data in the tree
  942. m_header.m_imultiset.erase(Imultiset::s_iterator_to(*next_block));
  943. //Write the new length
  944. block->m_size = merged_units & block_ctrl::size_mask;
  945. BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
  946. m_header.m_allocated += (merged_units - old_block_units)*Alignment;
  947. }
  948. priv_mark_as_allocated_block(block);
  949. prefer_in_recvd_out_size = ((size_type)block->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
  950. return true;
  951. }
  952. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  953. typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
  954. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_prev_block
  955. (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *ptr)
  956. {
  957. BOOST_ASSERT(!ptr->m_prev_allocated);
  958. return move_detail::force_ptr<block_ctrl*>
  959. (reinterpret_cast<char*>(ptr) - ptr->m_prev_size*Alignment);
  960. }
  961. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  962. typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
  963. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_end_block
  964. (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *first_segment_block)
  965. {
  966. //The first block's logic is different from the rest of blocks: stores in m_prev_size the absolute
  967. //distance with the end block
  968. BOOST_ASSERT(first_segment_block->m_prev_allocated);
  969. block_ctrl *end_block = move_detail::force_ptr<block_ctrl*>
  970. (reinterpret_cast<char*>(first_segment_block) + first_segment_block->m_prev_size*Alignment);
  971. (void)end_block;
  972. BOOST_ASSERT(end_block->m_allocated == 1);
  973. BOOST_ASSERT(end_block->m_size == first_segment_block->m_prev_size);
  974. BOOST_ASSERT(end_block > first_segment_block);
  975. return end_block;
  976. }
  977. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  978. typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
  979. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_first_block
  980. (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *end_segment_block)
  981. {
  982. //The first block's logic is different from the rest of blocks: stores in m_prev_size the absolute
  983. //distance with the end block
  984. BOOST_ASSERT(end_segment_block->m_allocated);
  985. block_ctrl *first_block = move_detail::force_ptr<block_ctrl*>
  986. (reinterpret_cast<char*>(end_segment_block) - end_segment_block->m_size*Alignment);
  987. (void)first_block;
  988. BOOST_ASSERT(first_block->m_prev_allocated == 1);
  989. BOOST_ASSERT(first_block->m_prev_size == end_segment_block->m_size);
  990. BOOST_ASSERT(end_segment_block > first_block);
  991. return first_block;
  992. }
  993. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  994. typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
  995. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_next_block
  996. (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *ptr)
  997. {
  998. return move_detail::force_ptr<block_ctrl*>
  999. (reinterpret_cast<char*>(ptr) + ptr->m_size*Alignment);
  1000. }
  1001. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  1002. bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_is_allocated_block
  1003. (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
  1004. {
  1005. bool allocated = block->m_allocated != 0;
  1006. #ifndef NDEBUG
  1007. if(block != priv_end_block()){
  1008. block_ctrl *next_block = move_detail::force_ptr<block_ctrl*>
  1009. (reinterpret_cast<char*>(block) + block->m_size*Alignment);
  1010. bool next_block_prev_allocated = next_block->m_prev_allocated != 0;
  1011. (void)next_block_prev_allocated;
  1012. BOOST_ASSERT(allocated == next_block_prev_allocated);
  1013. }
  1014. #endif
  1015. return allocated;
  1016. }
  1017. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  1018. bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_is_prev_allocated
  1019. (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
  1020. {
  1021. if(block->m_prev_allocated){
  1022. return true;
  1023. }
  1024. else{
  1025. #ifndef NDEBUG
  1026. if(block != priv_first_block()){
  1027. block_ctrl *prev = priv_prev_block(block);
  1028. (void)prev;
  1029. BOOST_ASSERT(!prev->m_allocated);
  1030. BOOST_ASSERT(prev->m_size == block->m_prev_size);
  1031. }
  1032. #endif
  1033. return false;
  1034. }
  1035. }
  1036. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  1037. void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_mark_as_allocated_block
  1038. (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
  1039. {
  1040. block->m_allocated = 1;
  1041. move_detail::force_ptr<block_ctrl*>
  1042. (reinterpret_cast<char*>(block)+ block->m_size*Alignment)->m_prev_allocated = 1;
  1043. }
  1044. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  1045. void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_mark_as_free_block
  1046. (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
  1047. {
  1048. block->m_allocated = 0;
  1049. block_ctrl *next_block = priv_next_block(block);
  1050. next_block->m_prev_allocated = 0;
  1051. next_block->m_prev_size = block->m_size;
  1052. }
  1053. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  1054. void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_check_and_allocate
  1055. (size_type nunits
  1056. ,typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl* block
  1057. ,size_type &received_size)
  1058. {
  1059. size_type upper_nunits = nunits + BlockCtrlUnits;
  1060. imultiset_iterator it_old = Imultiset::s_iterator_to(*block);
  1061. algo_impl_t::assert_alignment(block);
  1062. if (block->m_size >= upper_nunits){
  1063. //This block is bigger than needed, split it in
  1064. //two blocks, the first's size will be "units" and
  1065. //the second's size "block->m_size-units"
  1066. size_type block_old_size = block->m_size;
  1067. block->m_size = nunits & block_ctrl::size_mask;
  1068. BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
  1069. //This is the remaining block
  1070. block_ctrl *rem_block =
  1071. ::new(reinterpret_cast<char*>(block) + Alignment*nunits, boost_container_new_t()) block_ctrl;
  1072. algo_impl_t::assert_alignment(rem_block);
  1073. rem_block->m_size = (block_old_size - nunits) & block_ctrl::size_mask;
  1074. BOOST_ASSERT(rem_block->m_size >= BlockCtrlUnits);
  1075. priv_mark_as_free_block(rem_block);
  1076. //Now we have to update the data in the tree.
  1077. //Use the position of the erased one as a hint
  1078. m_header.m_imultiset.insert(m_header.m_imultiset.erase(it_old), *rem_block);
  1079. }
  1080. else if (block->m_size >= nunits){
  1081. m_header.m_imultiset.erase(it_old);
  1082. }
  1083. else{
  1084. BOOST_ASSERT(0);
  1085. return 0;
  1086. }
  1087. //We need block_ctrl for deallocation stuff, so
  1088. //return memory user can overwrite
  1089. m_header.m_allocated += (size_type)block->m_size*Alignment;
  1090. received_size = ((size_type)block->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
  1091. //Mark the block as allocated
  1092. priv_mark_as_allocated_block(block);
  1093. //Clear the memory occupied by the tree hook, since this won't be
  1094. //cleared with zero_free_memory
  1095. TreeHook *t = static_cast<TreeHook*>(block);
  1096. //Just clear the memory part reserved for the user
  1097. std::size_t tree_hook_offset_in_block = std::size_t((char*)t - (char*)block);
  1098. //volatile char *ptr =
  1099. char *ptr = reinterpret_cast<char*>(block)+tree_hook_offset_in_block;
  1100. const std::size_t s = BlockCtrlBytes - tree_hook_offset_in_block;
  1101. std::memset(ptr, 0, s);
  1102. this->priv_next_block(block)->m_prev_size = 0;
  1103. return priv_get_user_buffer(block);
  1104. }
  1105. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  1106. void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::deallocate(void* addr)
  1107. {
  1108. if(!addr) return;
  1109. //-----------------------
  1110. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  1111. //-----------------------
  1112. return this->priv_deallocate(addr);
  1113. }
  1114. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  1115. void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_deallocate(void* addr)
  1116. {
  1117. if(!addr) return;
  1118. block_ctrl *block = priv_get_block(addr);
  1119. //The blocks must be marked as allocated and the sizes must be equal
  1120. BOOST_ASSERT(priv_is_allocated_block(block));
  1121. //Check if alignment and block size are right
  1122. algo_impl_t::assert_alignment(addr);
  1123. size_type block_old_size = Alignment*(size_type)block->m_size;
  1124. BOOST_ASSERT(m_header.m_allocated >= block_old_size);
  1125. //Update used memory count
  1126. m_header.m_allocated -= block_old_size;
  1127. //The block to insert in the tree
  1128. block_ctrl *block_to_insert = block;
  1129. //Get the next block
  1130. block_ctrl *const next_block = priv_next_block(block);
  1131. const bool merge_with_prev = !priv_is_prev_allocated(block);
  1132. const bool merge_with_next = !priv_is_allocated_block(next_block);
  1133. //Merge logic. First just update block sizes, then fix free blocks tree
  1134. if(merge_with_prev || merge_with_next){
  1135. //Merge if the previous is free
  1136. if(merge_with_prev){
  1137. //Get the previous block
  1138. block_to_insert = priv_prev_block(block);
  1139. block_to_insert->m_size = size_type(block_to_insert->m_size + block->m_size) & block_ctrl::size_mask;
  1140. BOOST_ASSERT(block_to_insert->m_size >= BlockCtrlUnits);
  1141. m_header.m_imultiset.erase(Imultiset::s_iterator_to(*block_to_insert));
  1142. }
  1143. //Merge if the next is free
  1144. if(merge_with_next){
  1145. block_to_insert->m_size = size_type(block_to_insert->m_size + next_block->m_size) & block_ctrl::size_mask;
  1146. BOOST_ASSERT(block_to_insert->m_size >= BlockCtrlUnits);
  1147. const imultiset_iterator next_it = Imultiset::s_iterator_to(*next_block);
  1148. m_header.m_imultiset.erase(next_it);
  1149. }
  1150. }
  1151. priv_mark_as_free_block(block_to_insert);
  1152. m_header.m_imultiset.insert(*block_to_insert);
  1153. }
  1154. #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
  1155. } //namespace interprocess {
  1156. } //namespace boost {
  1157. #include <boost/interprocess/detail/config_end.hpp>
  1158. #endif //#ifndef BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP