bernoulli_details.hpp 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666
  1. ///////////////////////////////////////////////////////////////////////////////
  2. // Copyright 2013 John Maddock
  3. // Distributed under the Boost
  4. // Software License, Version 1.0. (See accompanying file
  5. // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
  6. #ifndef BOOST_MATH_BERNOULLI_DETAIL_HPP
  7. #define BOOST_MATH_BERNOULLI_DETAIL_HPP
  8. #include <boost/config.hpp>
  9. #include <boost/detail/lightweight_mutex.hpp>
  10. #include <boost/math/tools/atomic.hpp>
  11. #include <boost/utility/enable_if.hpp>
  12. #include <boost/math/tools/toms748_solve.hpp>
  13. #include <vector>
  14. namespace boost{ namespace math{ namespace detail{
  15. //
  16. // Asymptotic expansion for B2n due to
  17. // Luschny LogB3 formula (http://www.luschny.de/math/primes/bernincl.html)
  18. //
  19. template <class T, class Policy>
  20. T b2n_asymptotic(int n)
  21. {
  22. BOOST_MATH_STD_USING
  23. const T nx = static_cast<T>(n);
  24. const T nx2(nx * nx);
  25. const T approximate_log_of_bernoulli_bn =
  26. ((boost::math::constants::half<T>() + nx) * log(nx))
  27. + ((boost::math::constants::half<T>() - nx) * log(boost::math::constants::pi<T>()))
  28. + (((T(3) / 2) - nx) * boost::math::constants::ln_two<T>())
  29. + ((nx * (T(2) - (nx2 * 7) * (1 + ((nx2 * 30) * ((nx2 * 12) - 1))))) / (((nx2 * nx2) * nx2) * 2520));
  30. return ((n / 2) & 1 ? 1 : -1) * (approximate_log_of_bernoulli_bn > tools::log_max_value<T>()
  31. ? policies::raise_overflow_error<T>("boost::math::bernoulli_b2n<%1%>(std::size_t)", 0, nx, Policy())
  32. : static_cast<T>(exp(approximate_log_of_bernoulli_bn)));
  33. }
  34. template <class T, class Policy>
  35. T t2n_asymptotic(int n)
  36. {
  37. BOOST_MATH_STD_USING
  38. // Just get B2n and convert to a Tangent number:
  39. T t2n = fabs(b2n_asymptotic<T, Policy>(2 * n)) / (2 * n);
  40. T p2 = ldexp(T(1), n);
  41. if(tools::max_value<T>() / p2 < t2n)
  42. return policies::raise_overflow_error<T>("boost::math::tangent_t2n<%1%>(std::size_t)", 0, T(n), Policy());
  43. t2n *= p2;
  44. p2 -= 1;
  45. if(tools::max_value<T>() / p2 < t2n)
  46. return policies::raise_overflow_error<T>("boost::math::tangent_t2n<%1%>(std::size_t)", 0, Policy());
  47. t2n *= p2;
  48. return t2n;
  49. }
  50. //
  51. // We need to know the approximate value of /n/ which will
  52. // cause bernoulli_b2n<T>(n) to return infinity - this allows
  53. // us to elude a great deal of runtime checking for values below
  54. // n, and only perform the full overflow checks when we know that we're
  55. // getting close to the point where our calculations will overflow.
  56. // We use Luschny's LogB3 formula (http://www.luschny.de/math/primes/bernincl.html)
  57. // to find the limit, and since we're dealing with the log of the Bernoulli numbers
  58. // we need only perform the calculation at double precision and not with T
  59. // (which may be a multiprecision type). The limit returned is within 1 of the true
  60. // limit for all the types tested. Note that although the code below is basically
  61. // the same as b2n_asymptotic above, it has been recast as a continuous real-valued
  62. // function as this makes the root finding go smoother/faster. It also omits the
  63. // sign of the Bernoulli number.
  64. //
  65. struct max_bernoulli_root_functor
  66. {
  67. max_bernoulli_root_functor(long long t) : target(static_cast<double>(t)) {}
  68. double operator()(double n)
  69. {
  70. BOOST_MATH_STD_USING
  71. // Luschny LogB3(n) formula.
  72. const double nx2(n * n);
  73. const double approximate_log_of_bernoulli_bn
  74. = ((boost::math::constants::half<double>() + n) * log(n))
  75. + ((boost::math::constants::half<double>() - n) * log(boost::math::constants::pi<double>()))
  76. + (((double(3) / 2) - n) * boost::math::constants::ln_two<double>())
  77. + ((n * (2 - (nx2 * 7) * (1 + ((nx2 * 30) * ((nx2 * 12) - 1))))) / (((nx2 * nx2) * nx2) * 2520));
  78. return approximate_log_of_bernoulli_bn - target;
  79. }
  80. private:
  81. double target;
  82. };
  83. template <class T, class Policy>
  84. inline std::size_t find_bernoulli_overflow_limit(const mpl::false_&)
  85. {
  86. long long t = lltrunc(boost::math::tools::log_max_value<T>());
  87. max_bernoulli_root_functor fun(t);
  88. boost::math::tools::equal_floor tol;
  89. boost::uintmax_t max_iter = boost::math::policies::get_max_root_iterations<Policy>();
  90. return static_cast<std::size_t>(boost::math::tools::toms748_solve(fun, sqrt(double(t)), double(t), tol, max_iter).first) / 2;
  91. }
  92. template <class T, class Policy>
  93. inline std::size_t find_bernoulli_overflow_limit(const mpl::true_&)
  94. {
  95. return max_bernoulli_index<bernoulli_imp_variant<T>::value>::value;
  96. }
  97. template <class T, class Policy>
  98. std::size_t b2n_overflow_limit()
  99. {
  100. // This routine is called at program startup if it's called at all:
  101. // that guarantees safe initialization of the static variable.
  102. typedef mpl::bool_<(bernoulli_imp_variant<T>::value >= 1) && (bernoulli_imp_variant<T>::value <= 3)> tag_type;
  103. static const std::size_t lim = find_bernoulli_overflow_limit<T, Policy>(tag_type());
  104. return lim;
  105. }
  106. //
  107. // The tangent numbers grow larger much more rapidly than the Bernoulli numbers do....
  108. // so to compute the Bernoulli numbers from the tangent numbers, we need to avoid spurious
  109. // overflow in the calculation, we can do this by scaling all the tangent number by some scale factor:
  110. //
  111. template <class T>
  112. inline typename enable_if_c<std::numeric_limits<T>::is_specialized && (std::numeric_limits<T>::radix == 2), T>::type tangent_scale_factor()
  113. {
  114. BOOST_MATH_STD_USING
  115. return ldexp(T(1), std::numeric_limits<T>::min_exponent + 5);
  116. }
  117. template <class T>
  118. inline typename disable_if_c<std::numeric_limits<T>::is_specialized && (std::numeric_limits<T>::radix == 2), T>::type tangent_scale_factor()
  119. {
  120. return tools::min_value<T>() * 16;
  121. }
  122. //
  123. // Initializer: ensure all our constants are initialized prior to the first call of main:
  124. //
  125. template <class T, class Policy>
  126. struct bernoulli_initializer
  127. {
  128. struct init
  129. {
  130. init()
  131. {
  132. //
  133. // We call twice, once to initialize our static table, and once to
  134. // initialize our dymanic table:
  135. //
  136. boost::math::bernoulli_b2n<T>(2, Policy());
  137. #ifndef BOOST_NO_EXCEPTIONS
  138. try{
  139. #endif
  140. boost::math::bernoulli_b2n<T>(max_bernoulli_b2n<T>::value + 1, Policy());
  141. #ifndef BOOST_NO_EXCEPTIONS
  142. } catch(const std::overflow_error&){}
  143. #endif
  144. boost::math::tangent_t2n<T>(2, Policy());
  145. }
  146. void force_instantiate()const{}
  147. };
  148. static const init initializer;
  149. static void force_instantiate()
  150. {
  151. initializer.force_instantiate();
  152. }
  153. };
  154. template <class T, class Policy>
  155. const typename bernoulli_initializer<T, Policy>::init bernoulli_initializer<T, Policy>::initializer;
  156. //
  157. // We need something to act as a cache for our calculated Bernoulli numbers. In order to
  158. // ensure both fast access and thread safety, we need a stable table which may be extended
  159. // in size, but which never reallocates: that way values already calculated may be accessed
  160. // concurrently with another thread extending the table with new values.
  161. //
  162. // Very very simple vector class that will never allocate more than once, we could use
  163. // boost::container::static_vector here, but that allocates on the stack, which may well
  164. // cause issues for the amount of memory we want in the extreme case...
  165. //
  166. template <class T>
  167. struct fixed_vector : private std::allocator<T>
  168. {
  169. typedef unsigned size_type;
  170. typedef T* iterator;
  171. typedef const T* const_iterator;
  172. fixed_vector() : m_used(0)
  173. {
  174. std::size_t overflow_limit = 5 + b2n_overflow_limit<T, policies::policy<> >();
  175. m_capacity = static_cast<unsigned>((std::min)(overflow_limit, static_cast<std::size_t>(100000u)));
  176. m_data = this->allocate(m_capacity);
  177. }
  178. ~fixed_vector()
  179. {
  180. #ifdef BOOST_NO_CXX11_ALLOCATOR
  181. for(unsigned i = 0; i < m_used; ++i)
  182. this->destroy(&m_data[i]);
  183. this->deallocate(m_data, m_capacity);
  184. #else
  185. typedef std::allocator<T> allocator_type;
  186. typedef std::allocator_traits<allocator_type> allocator_traits;
  187. allocator_type& alloc = *this;
  188. for(unsigned i = 0; i < m_used; ++i)
  189. allocator_traits::destroy(alloc, &m_data[i]);
  190. allocator_traits::deallocate(alloc, m_data, m_capacity);
  191. #endif
  192. }
  193. T& operator[](unsigned n) { BOOST_ASSERT(n < m_used); return m_data[n]; }
  194. const T& operator[](unsigned n)const { BOOST_ASSERT(n < m_used); return m_data[n]; }
  195. unsigned size()const { return m_used; }
  196. unsigned size() { return m_used; }
  197. void resize(unsigned n, const T& val)
  198. {
  199. if(n > m_capacity)
  200. {
  201. BOOST_THROW_EXCEPTION(std::runtime_error("Exhausted storage for Bernoulli numbers."));
  202. }
  203. for(unsigned i = m_used; i < n; ++i)
  204. new (m_data + i) T(val);
  205. m_used = n;
  206. }
  207. void resize(unsigned n) { resize(n, T()); }
  208. T* begin() { return m_data; }
  209. T* end() { return m_data + m_used; }
  210. T* begin()const { return m_data; }
  211. T* end()const { return m_data + m_used; }
  212. unsigned capacity()const { return m_capacity; }
  213. void clear() { m_used = 0; }
  214. private:
  215. T* m_data;
  216. unsigned m_used, m_capacity;
  217. };
  218. template <class T, class Policy>
  219. class bernoulli_numbers_cache
  220. {
  221. public:
  222. bernoulli_numbers_cache() : m_overflow_limit((std::numeric_limits<std::size_t>::max)())
  223. #if defined(BOOST_HAS_THREADS) && !defined(BOOST_MATH_NO_ATOMIC_INT)
  224. , m_counter(0)
  225. #endif
  226. , m_current_precision(boost::math::tools::digits<T>())
  227. {}
  228. typedef fixed_vector<T> container_type;
  229. void tangent(std::size_t m)
  230. {
  231. static const std::size_t min_overflow_index = b2n_overflow_limit<T, Policy>() - 1;
  232. tn.resize(static_cast<typename container_type::size_type>(m), T(0U));
  233. BOOST_MATH_INSTRUMENT_VARIABLE(min_overflow_index);
  234. std::size_t prev_size = m_intermediates.size();
  235. m_intermediates.resize(m, T(0U));
  236. if(prev_size == 0)
  237. {
  238. m_intermediates[1] = tangent_scale_factor<T>() /*T(1U)*/;
  239. tn[0U] = T(0U);
  240. tn[1U] = tangent_scale_factor<T>()/* T(1U)*/;
  241. BOOST_MATH_INSTRUMENT_VARIABLE(tn[0]);
  242. BOOST_MATH_INSTRUMENT_VARIABLE(tn[1]);
  243. }
  244. for(std::size_t i = std::max<size_t>(2, prev_size); i < m; i++)
  245. {
  246. bool overflow_check = false;
  247. if(i >= min_overflow_index && (boost::math::tools::max_value<T>() / (i-1) < m_intermediates[1]) )
  248. {
  249. std::fill(tn.begin() + i, tn.end(), boost::math::tools::max_value<T>());
  250. break;
  251. }
  252. m_intermediates[1] = m_intermediates[1] * (i-1);
  253. for(std::size_t j = 2; j <= i; j++)
  254. {
  255. overflow_check =
  256. (i >= min_overflow_index) && (
  257. (boost::math::tools::max_value<T>() / (i - j) < m_intermediates[j])
  258. || (boost::math::tools::max_value<T>() / (i - j + 2) < m_intermediates[j-1])
  259. || (boost::math::tools::max_value<T>() - m_intermediates[j] * (i - j) < m_intermediates[j-1] * (i - j + 2))
  260. || ((boost::math::isinf)(m_intermediates[j]))
  261. );
  262. if(overflow_check)
  263. {
  264. std::fill(tn.begin() + i, tn.end(), boost::math::tools::max_value<T>());
  265. break;
  266. }
  267. m_intermediates[j] = m_intermediates[j] * (i - j) + m_intermediates[j-1] * (i - j + 2);
  268. }
  269. if(overflow_check)
  270. break; // already filled the tn...
  271. tn[static_cast<typename container_type::size_type>(i)] = m_intermediates[i];
  272. BOOST_MATH_INSTRUMENT_VARIABLE(i);
  273. BOOST_MATH_INSTRUMENT_VARIABLE(tn[static_cast<typename container_type::size_type>(i)]);
  274. }
  275. }
  276. void tangent_numbers_series(const std::size_t m)
  277. {
  278. BOOST_MATH_STD_USING
  279. static const std::size_t min_overflow_index = b2n_overflow_limit<T, Policy>() - 1;
  280. typename container_type::size_type old_size = bn.size();
  281. tangent(m);
  282. bn.resize(static_cast<typename container_type::size_type>(m));
  283. if(!old_size)
  284. {
  285. bn[0] = 1;
  286. old_size = 1;
  287. }
  288. T power_two(ldexp(T(1), static_cast<int>(2 * old_size)));
  289. for(std::size_t i = old_size; i < m; i++)
  290. {
  291. T b(static_cast<T>(i * 2));
  292. //
  293. // Not only do we need to take care to avoid spurious over/under flow in
  294. // the calculation, but we also need to avoid overflow altogether in case
  295. // we're calculating with a type where "bad things" happen in that case:
  296. //
  297. b = b / (power_two * tangent_scale_factor<T>());
  298. b /= (power_two - 1);
  299. bool overflow_check = (i >= min_overflow_index) && (tools::max_value<T>() / tn[static_cast<typename container_type::size_type>(i)] < b);
  300. if(overflow_check)
  301. {
  302. m_overflow_limit = i;
  303. while(i < m)
  304. {
  305. b = std::numeric_limits<T>::has_infinity ? std::numeric_limits<T>::infinity() : tools::max_value<T>();
  306. bn[static_cast<typename container_type::size_type>(i)] = ((i % 2U) ? b : T(-b));
  307. ++i;
  308. }
  309. break;
  310. }
  311. else
  312. {
  313. b *= tn[static_cast<typename container_type::size_type>(i)];
  314. }
  315. power_two = ldexp(power_two, 2);
  316. const bool b_neg = i % 2 == 0;
  317. bn[static_cast<typename container_type::size_type>(i)] = ((!b_neg) ? b : T(-b));
  318. }
  319. }
  320. template <class OutputIterator>
  321. OutputIterator copy_bernoulli_numbers(OutputIterator out, std::size_t start, std::size_t n, const Policy& pol)
  322. {
  323. //
  324. // There are basically 3 thread safety options:
  325. //
  326. // 1) There are no threads (BOOST_HAS_THREADS is not defined).
  327. // 2) There are threads, but we do not have a true atomic integer type,
  328. // in this case we just use a mutex to guard against race conditions.
  329. // 3) There are threads, and we have an atomic integer: in this case we can
  330. // use the double-checked locking pattern to avoid thread synchronisation
  331. // when accessing values already in the cache.
  332. //
  333. // First off handle the common case for overflow and/or asymptotic expansion:
  334. //
  335. if(start + n > bn.capacity())
  336. {
  337. if(start < bn.capacity())
  338. {
  339. out = copy_bernoulli_numbers(out, start, bn.capacity() - start, pol);
  340. n -= bn.capacity() - start;
  341. start = static_cast<std::size_t>(bn.capacity());
  342. }
  343. if(start < b2n_overflow_limit<T, Policy>() + 2u)
  344. {
  345. for(; n; ++start, --n)
  346. {
  347. *out = b2n_asymptotic<T, Policy>(static_cast<typename container_type::size_type>(start * 2U));
  348. ++out;
  349. }
  350. }
  351. for(; n; ++start, --n)
  352. {
  353. *out = policies::raise_overflow_error<T>("boost::math::bernoulli_b2n<%1%>(std::size_t)", 0, T(start), pol);
  354. ++out;
  355. }
  356. return out;
  357. }
  358. #if !defined(BOOST_HAS_THREADS)
  359. //
  360. // Single threaded code, very simple:
  361. //
  362. if(m_current_precision < boost::math::tools::digits<T>())
  363. {
  364. bn.clear();
  365. tn.clear();
  366. m_intermediates.clear();
  367. m_current_precision = boost::math::tools::digits<T>();
  368. }
  369. if(start + n >= bn.size())
  370. {
  371. std::size_t new_size = (std::min)((std::max)((std::max)(std::size_t(start + n), std::size_t(bn.size() + 20)), std::size_t(50)), std::size_t(bn.capacity()));
  372. tangent_numbers_series(new_size);
  373. }
  374. for(std::size_t i = (std::max)(std::size_t(max_bernoulli_b2n<T>::value + 1), start); i < start + n; ++i)
  375. {
  376. *out = (i >= m_overflow_limit) ? policies::raise_overflow_error<T>("boost::math::bernoulli_b2n<%1%>(std::size_t)", 0, T(i), pol) : bn[i];
  377. ++out;
  378. }
  379. #elif defined(BOOST_MATH_NO_ATOMIC_INT)
  380. //
  381. // We need to grab a mutex every time we get here, for both readers and writers:
  382. //
  383. boost::detail::lightweight_mutex::scoped_lock l(m_mutex);
  384. if(m_current_precision < boost::math::tools::digits<T>())
  385. {
  386. bn.clear();
  387. tn.clear();
  388. m_intermediates.clear();
  389. m_current_precision = boost::math::tools::digits<T>();
  390. }
  391. if(start + n >= bn.size())
  392. {
  393. std::size_t new_size = (std::min)((std::max)((std::max)(std::size_t(start + n), std::size_t(bn.size() + 20)), std::size_t(50)), std::size_t(bn.capacity()));
  394. tangent_numbers_series(new_size);
  395. }
  396. for(std::size_t i = (std::max)(std::size_t(max_bernoulli_b2n<T>::value + 1), start); i < start + n; ++i)
  397. {
  398. *out = (i >= m_overflow_limit) ? policies::raise_overflow_error<T>("boost::math::bernoulli_b2n<%1%>(std::size_t)", 0, T(i), pol) : bn[i];
  399. ++out;
  400. }
  401. #else
  402. //
  403. // Double-checked locking pattern, lets us access cached already cached values
  404. // without locking:
  405. //
  406. // Get the counter and see if we need to calculate more constants:
  407. //
  408. if((static_cast<std::size_t>(m_counter.load(BOOST_MATH_ATOMIC_NS::memory_order_consume)) < start + n)
  409. || (static_cast<int>(m_current_precision.load(BOOST_MATH_ATOMIC_NS::memory_order_consume)) < boost::math::tools::digits<T>()))
  410. {
  411. boost::detail::lightweight_mutex::scoped_lock l(m_mutex);
  412. if((static_cast<std::size_t>(m_counter.load(BOOST_MATH_ATOMIC_NS::memory_order_consume)) < start + n)
  413. || (static_cast<int>(m_current_precision.load(BOOST_MATH_ATOMIC_NS::memory_order_consume)) < boost::math::tools::digits<T>()))
  414. {
  415. if(static_cast<int>(m_current_precision.load(BOOST_MATH_ATOMIC_NS::memory_order_consume)) < boost::math::tools::digits<T>())
  416. {
  417. bn.clear();
  418. tn.clear();
  419. m_intermediates.clear();
  420. m_counter.store(0, BOOST_MATH_ATOMIC_NS::memory_order_release);
  421. m_current_precision = boost::math::tools::digits<T>();
  422. }
  423. if(start + n >= bn.size())
  424. {
  425. std::size_t new_size = (std::min)((std::max)((std::max)(std::size_t(start + n), std::size_t(bn.size() + 20)), std::size_t(50)), std::size_t(bn.capacity()));
  426. tangent_numbers_series(new_size);
  427. }
  428. m_counter.store(static_cast<atomic_integer_type>(bn.size()), BOOST_MATH_ATOMIC_NS::memory_order_release);
  429. }
  430. }
  431. for(std::size_t i = (std::max)(static_cast<std::size_t>(max_bernoulli_b2n<T>::value + 1), start); i < start + n; ++i)
  432. {
  433. *out = (i >= m_overflow_limit) ? policies::raise_overflow_error<T>("boost::math::bernoulli_b2n<%1%>(std::size_t)", 0, T(i), pol) : bn[static_cast<typename container_type::size_type>(i)];
  434. ++out;
  435. }
  436. #endif
  437. return out;
  438. }
  439. template <class OutputIterator>
  440. OutputIterator copy_tangent_numbers(OutputIterator out, std::size_t start, std::size_t n, const Policy& pol)
  441. {
  442. //
  443. // There are basically 3 thread safety options:
  444. //
  445. // 1) There are no threads (BOOST_HAS_THREADS is not defined).
  446. // 2) There are threads, but we do not have a true atomic integer type,
  447. // in this case we just use a mutex to guard against race conditions.
  448. // 3) There are threads, and we have an atomic integer: in this case we can
  449. // use the double-checked locking pattern to avoid thread synchronisation
  450. // when accessing values already in the cache.
  451. //
  452. //
  453. // First off handle the common case for overflow and/or asymptotic expansion:
  454. //
  455. if(start + n > bn.capacity())
  456. {
  457. if(start < bn.capacity())
  458. {
  459. out = copy_tangent_numbers(out, start, bn.capacity() - start, pol);
  460. n -= bn.capacity() - start;
  461. start = static_cast<std::size_t>(bn.capacity());
  462. }
  463. if(start < b2n_overflow_limit<T, Policy>() + 2u)
  464. {
  465. for(; n; ++start, --n)
  466. {
  467. *out = t2n_asymptotic<T, Policy>(static_cast<typename container_type::size_type>(start));
  468. ++out;
  469. }
  470. }
  471. for(; n; ++start, --n)
  472. {
  473. *out = policies::raise_overflow_error<T>("boost::math::bernoulli_b2n<%1%>(std::size_t)", 0, T(start), pol);
  474. ++out;
  475. }
  476. return out;
  477. }
  478. #if !defined(BOOST_HAS_THREADS)
  479. //
  480. // Single threaded code, very simple:
  481. //
  482. if(m_current_precision < boost::math::tools::digits<T>())
  483. {
  484. bn.clear();
  485. tn.clear();
  486. m_intermediates.clear();
  487. m_current_precision = boost::math::tools::digits<T>();
  488. }
  489. if(start + n >= bn.size())
  490. {
  491. std::size_t new_size = (std::min)((std::max)((std::max)(start + n, std::size_t(bn.size() + 20)), std::size_t(50)), std::size_t(bn.capacity()));
  492. tangent_numbers_series(new_size);
  493. }
  494. for(std::size_t i = start; i < start + n; ++i)
  495. {
  496. if(i >= m_overflow_limit)
  497. *out = policies::raise_overflow_error<T>("boost::math::bernoulli_b2n<%1%>(std::size_t)", 0, T(i), pol);
  498. else
  499. {
  500. if(tools::max_value<T>() * tangent_scale_factor<T>() < tn[static_cast<typename container_type::size_type>(i)])
  501. *out = policies::raise_overflow_error<T>("boost::math::bernoulli_b2n<%1%>(std::size_t)", 0, T(i), pol);
  502. else
  503. *out = tn[static_cast<typename container_type::size_type>(i)] / tangent_scale_factor<T>();
  504. }
  505. ++out;
  506. }
  507. #elif defined(BOOST_MATH_NO_ATOMIC_INT)
  508. //
  509. // We need to grab a mutex every time we get here, for both readers and writers:
  510. //
  511. boost::detail::lightweight_mutex::scoped_lock l(m_mutex);
  512. if(m_current_precision < boost::math::tools::digits<T>())
  513. {
  514. bn.clear();
  515. tn.clear();
  516. m_intermediates.clear();
  517. m_current_precision = boost::math::tools::digits<T>();
  518. }
  519. if(start + n >= bn.size())
  520. {
  521. std::size_t new_size = (std::min)((std::max)((std::max)(start + n, std::size_t(bn.size() + 20)), std::size_t(50)), std::size_t(bn.capacity()));
  522. tangent_numbers_series(new_size);
  523. }
  524. for(std::size_t i = start; i < start + n; ++i)
  525. {
  526. if(i >= m_overflow_limit)
  527. *out = policies::raise_overflow_error<T>("boost::math::bernoulli_b2n<%1%>(std::size_t)", 0, T(i), pol);
  528. else
  529. {
  530. if(tools::max_value<T>() * tangent_scale_factor<T>() < tn[static_cast<typename container_type::size_type>(i)])
  531. *out = policies::raise_overflow_error<T>("boost::math::bernoulli_b2n<%1%>(std::size_t)", 0, T(i), pol);
  532. else
  533. *out = tn[static_cast<typename container_type::size_type>(i)] / tangent_scale_factor<T>();
  534. }
  535. ++out;
  536. }
  537. #else
  538. //
  539. // Double-checked locking pattern, lets us access cached already cached values
  540. // without locking:
  541. //
  542. // Get the counter and see if we need to calculate more constants:
  543. //
  544. if((static_cast<std::size_t>(m_counter.load(BOOST_MATH_ATOMIC_NS::memory_order_consume)) < start + n)
  545. || (static_cast<int>(m_current_precision.load(BOOST_MATH_ATOMIC_NS::memory_order_consume)) < boost::math::tools::digits<T>()))
  546. {
  547. boost::detail::lightweight_mutex::scoped_lock l(m_mutex);
  548. if((static_cast<std::size_t>(m_counter.load(BOOST_MATH_ATOMIC_NS::memory_order_consume)) < start + n)
  549. || (static_cast<int>(m_current_precision.load(BOOST_MATH_ATOMIC_NS::memory_order_consume)) < boost::math::tools::digits<T>()))
  550. {
  551. if(static_cast<int>(m_current_precision.load(BOOST_MATH_ATOMIC_NS::memory_order_consume)) < boost::math::tools::digits<T>())
  552. {
  553. bn.clear();
  554. tn.clear();
  555. m_intermediates.clear();
  556. m_counter.store(0, BOOST_MATH_ATOMIC_NS::memory_order_release);
  557. m_current_precision = boost::math::tools::digits<T>();
  558. }
  559. if(start + n >= bn.size())
  560. {
  561. std::size_t new_size = (std::min)((std::max)((std::max)(start + n, std::size_t(bn.size() + 20)), std::size_t(50)), std::size_t(bn.capacity()));
  562. tangent_numbers_series(new_size);
  563. }
  564. m_counter.store(static_cast<atomic_integer_type>(bn.size()), BOOST_MATH_ATOMIC_NS::memory_order_release);
  565. }
  566. }
  567. for(std::size_t i = start; i < start + n; ++i)
  568. {
  569. if(i >= m_overflow_limit)
  570. *out = policies::raise_overflow_error<T>("boost::math::bernoulli_b2n<%1%>(std::size_t)", 0, T(i), pol);
  571. else
  572. {
  573. if(tools::max_value<T>() * tangent_scale_factor<T>() < tn[static_cast<typename container_type::size_type>(i)])
  574. *out = policies::raise_overflow_error<T>("boost::math::bernoulli_b2n<%1%>(std::size_t)", 0, T(i), pol);
  575. else
  576. *out = tn[static_cast<typename container_type::size_type>(i)] / tangent_scale_factor<T>();
  577. }
  578. ++out;
  579. }
  580. #endif
  581. return out;
  582. }
  583. private:
  584. //
  585. // The caches for Bernoulli and tangent numbers, once allocated,
  586. // these must NEVER EVER reallocate as it breaks our thread
  587. // safety guarantees:
  588. //
  589. fixed_vector<T> bn, tn;
  590. std::vector<T> m_intermediates;
  591. // The value at which we know overflow has already occurred for the Bn:
  592. std::size_t m_overflow_limit;
  593. #if !defined(BOOST_HAS_THREADS)
  594. int m_current_precision;
  595. #elif defined(BOOST_MATH_NO_ATOMIC_INT)
  596. boost::detail::lightweight_mutex m_mutex;
  597. int m_current_precision;
  598. #else
  599. boost::detail::lightweight_mutex m_mutex;
  600. atomic_counter_type m_counter, m_current_precision;
  601. #endif
  602. };
  603. template <class T, class Policy>
  604. inline bernoulli_numbers_cache<T, Policy>& get_bernoulli_numbers_cache()
  605. {
  606. //
  607. // Force this function to be called at program startup so all the static variables
  608. // get initailzed then (thread safety).
  609. //
  610. bernoulli_initializer<T, Policy>::force_instantiate();
  611. static bernoulli_numbers_cache<T, Policy> data;
  612. return data;
  613. }
  614. }}}
  615. #endif // BOOST_MATH_BERNOULLI_DETAIL_HPP