| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679 |
- ///////////////////////////////////////////////////////////////////////////////
- // Copyright 2021 - 2025 Fahad Syed.
- // Copyright 2021 - 2025 Christopher Kormanyos.
- // Copyright 2021 - 2025 Janek Kozicki.
- // Copyright 2025 Matt Borland.
- // Distributed under the Boost Software License, Version 1.0.
- // (See accompanying file LICENSE_1_0.txt or copy at
- // http://www.boost.org/LICENSE_1_0.txt)
- //
- #ifndef BOOST_MP_CPP_DOUBLE_FP_2021_06_05_HPP
- #define BOOST_MP_CPP_DOUBLE_FP_2021_06_05_HPP
- #include <boost/multiprecision/cpp_bin_float.hpp>
- #include <boost/multiprecision/cpp_df_qf/cpp_df_qf_detail.hpp>
- #include <boost/multiprecision/detail/hash.hpp>
- #include <boost/multiprecision/traits/max_digits10.hpp>
- #include <boost/multiprecision/traits/std_integer_traits.hpp>
- #ifdef BOOST_MP_MATH_AVAILABLE
- //
- // Headers required for Boost.Math integration:
- //
- #include <boost/math/policies/policy.hpp>
- //
- // Some includes we need from Boost.Math, since we rely on that library to provide these functions:
- //
- #include <boost/math/special_functions/acosh.hpp>
- #include <boost/math/special_functions/asinh.hpp>
- #include <boost/math/special_functions/atanh.hpp>
- #include <boost/math/special_functions/cbrt.hpp>
- #include <boost/math/special_functions/expm1.hpp>
- #include <boost/math/special_functions/gamma.hpp>
- #endif
- #include <limits>
- #include <string>
- #include <type_traits>
- #if (defined(BOOST_CLANG) && defined(BOOST_CLANG_VERSION) && (BOOST_CLANG_VERSION <= 90000))
- #define BOOST_MP_DF_QF_NUM_LIMITS_CLASS_TYPE struct
- #else
- #define BOOST_MP_DF_QF_NUM_LIMITS_CLASS_TYPE class
- #endif
- namespace boost { namespace multiprecision { namespace backends {
- template <typename FloatingPointType>
- class cpp_double_fp_backend;
- template <typename FloatingPointType>
- constexpr auto eval_add(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& x) -> void;
- template <typename FloatingPointType>
- constexpr auto eval_add(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& a, const cpp_double_fp_backend<FloatingPointType>& b) -> void;
- template <typename FloatingPointType>
- constexpr auto eval_subtract(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& x) -> void;
- template <typename FloatingPointType>
- constexpr auto eval_subtract(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& a, const cpp_double_fp_backend<FloatingPointType>& b) -> void;
- template <typename FloatingPointType>
- constexpr auto eval_multiply(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& x) -> void;
- template <typename FloatingPointType>
- constexpr auto eval_multiply(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& a, const cpp_double_fp_backend<FloatingPointType>& b) -> void;
- template <typename FloatingPointType>
- constexpr auto eval_divide(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& x) -> void;
- template <typename FloatingPointType>
- constexpr auto eval_divide(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& a, const cpp_double_fp_backend<FloatingPointType>& b) -> void;
- template <typename FloatingPointType>
- constexpr auto eval_eq(const cpp_double_fp_backend<FloatingPointType>& a, const cpp_double_fp_backend<FloatingPointType>& b) -> bool;
- template <typename FloatingPointType>
- constexpr auto eval_lt(const cpp_double_fp_backend<FloatingPointType>& a, const cpp_double_fp_backend<FloatingPointType>& b) -> bool;
- template <typename FloatingPointType>
- constexpr auto eval_gt(const cpp_double_fp_backend<FloatingPointType>& a, const cpp_double_fp_backend<FloatingPointType>& b) -> bool;
- template <typename FloatingPointType>
- constexpr auto eval_is_zero(const cpp_double_fp_backend<FloatingPointType>& x) -> bool;
- template <typename FloatingPointType>
- constexpr auto eval_signbit(const cpp_double_fp_backend<FloatingPointType>& x) -> int;
- template <typename FloatingPointType>
- constexpr auto eval_fabs(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& a) -> void;
- template <typename FloatingPointType>
- constexpr auto eval_frexp(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& a, int* v) -> void;
- template <typename FloatingPointType>
- constexpr auto eval_ldexp(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& a, int v) -> void;
- template <typename FloatingPointType>
- constexpr auto eval_floor(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& x) -> void;
- template <typename FloatingPointType>
- constexpr auto eval_ceil(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& x) -> void;
- template <typename FloatingPointType>
- constexpr auto eval_fpclassify(const cpp_double_fp_backend<FloatingPointType>& o) -> int;
- template <typename FloatingPointType>
- constexpr auto eval_sqrt(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& o) -> void;
- template <typename FloatingPointType>
- constexpr auto eval_pow(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& x, const cpp_double_fp_backend<FloatingPointType>& a) -> void;
- template <typename FloatingPointType,
- typename IntegralType>
- constexpr auto eval_pow(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& x, IntegralType n) -> typename ::std::enable_if<boost::multiprecision::detail::is_integral<IntegralType>::value, void>::type;
- template <typename FloatingPointType,
- typename ::std::enable_if<(cpp_df_qf_detail::is_floating_point<FloatingPointType>::value && ((cpp_df_qf_detail::ccmath::numeric_limits<FloatingPointType>::digits10 * 2) < 16))>::type const* = nullptr>
- constexpr auto eval_exp(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& x) -> void;
- template <typename FloatingPointType,
- typename ::std::enable_if<(cpp_df_qf_detail::is_floating_point<FloatingPointType>::value && (((cpp_df_qf_detail::ccmath::numeric_limits<FloatingPointType>::digits10 * 2) >= 16) && ((cpp_df_qf_detail::ccmath::numeric_limits<FloatingPointType>::digits10 * 2) <= 36)))>::type const* = nullptr>
- constexpr auto eval_exp(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& x) -> void;
- template <typename FloatingPointType,
- typename ::std::enable_if<(cpp_df_qf_detail::is_floating_point<FloatingPointType>::value && ((cpp_df_qf_detail::ccmath::numeric_limits<FloatingPointType>::digits10 * 2) > 36))>::type const* = nullptr>
- constexpr auto eval_exp(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& x) -> void;
- template <typename FloatingPointType>
- constexpr auto eval_log(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& x) -> void;
- template <typename FloatingPointType>
- constexpr auto eval_convert_to(signed long long* result, const cpp_double_fp_backend<FloatingPointType>& backend) -> void;
- template <typename FloatingPointType>
- constexpr auto eval_convert_to(unsigned long long* result, const cpp_double_fp_backend<FloatingPointType>& backend) -> void;
- #ifdef BOOST_HAS_INT128
- template <typename FloatingPointType>
- constexpr auto eval_convert_to(boost::int128_type* result, const cpp_double_fp_backend<FloatingPointType>& backend) -> typename std::enable_if<(cpp_df_qf_detail::ccmath::numeric_limits<FloatingPointType>::digits > 24), void>::type;
- template <typename FloatingPointType>
- constexpr auto eval_convert_to(boost::int128_type* result, const cpp_double_fp_backend<FloatingPointType>& backend) -> typename std::enable_if<!(cpp_df_qf_detail::ccmath::numeric_limits<FloatingPointType>::digits > 24), void>::type;
- template <typename FloatingPointType>
- constexpr auto eval_convert_to(boost::uint128_type* result, const cpp_double_fp_backend<FloatingPointType>& backend) -> typename std::enable_if<(cpp_df_qf_detail::ccmath::numeric_limits<FloatingPointType>::digits > 24), void>::type;
- template <typename FloatingPointType>
- constexpr auto eval_convert_to(boost::uint128_type* result, const cpp_double_fp_backend<FloatingPointType>& backend) -> typename std::enable_if<!(cpp_df_qf_detail::ccmath::numeric_limits<FloatingPointType>::digits > 24), void>::type;
- #endif
- template <typename FloatingPointType,
- typename OtherFloatingPointType>
- constexpr auto eval_convert_to(OtherFloatingPointType* result, const cpp_double_fp_backend<FloatingPointType>& backend) -> typename ::std::enable_if<cpp_df_qf_detail::is_floating_point<OtherFloatingPointType>::value>::type;
- template <typename FloatingPointType>
- constexpr auto hash_value(const cpp_double_fp_backend<FloatingPointType>& a) -> ::std::size_t;
- template <typename FloatingPointType>
- constexpr auto fabs(const cpp_double_fp_backend<FloatingPointType>& a) -> cpp_double_fp_backend<FloatingPointType>;
- } } } // namespace boost::multiprecision::backends
- namespace std {
- // Foward declarations of various specializations of std::numeric_limits
- template <typename FloatingPointType,
- const boost::multiprecision::expression_template_option ExpressionTemplatesOption>
- BOOST_MP_DF_QF_NUM_LIMITS_CLASS_TYPE numeric_limits<boost::multiprecision::number<boost::multiprecision::backends::cpp_double_fp_backend<FloatingPointType>, ExpressionTemplatesOption> >;
- } // namespace std
- namespace boost { namespace multiprecision {
- template <typename FloatingPointType>
- struct number_category<backends::cpp_double_fp_backend<FloatingPointType>> : public std::integral_constant<int, number_kind_floating_point> { };
- namespace backends {
- // A cpp_double_fp_backend is represented by an unevaluated sum of two
- // floating-point units, a0 and a1, which satisfy |a1| <= (1 / 2) * ulp(a0).
- // The type of the floating-point constituents should adhere to IEEE754.
- // Although the constituent parts (a0 and a1) satisfy |a1| <= (1 / 2) * ulp(a0),
- // the composite type does not adhere to these strict error bounds. Its error
- // bounds are larger.
- // This class has been tested with floats having single-precision (4 byte),
- // double-precision (8 byte) and quad precision (16 byte, such as GCC's __float128).
- template <typename FloatingPointType>
- class cpp_double_fp_backend
- {
- public:
- using float_type = FloatingPointType;
- static_assert
- (
- cpp_df_qf_detail::is_floating_point<float_type>::value
- && bool
- {
- ((cpp_df_qf_detail::ccmath::numeric_limits<float_type>::digits == 24) && cpp_df_qf_detail::ccmath::numeric_limits<float_type>::is_specialized && cpp_df_qf_detail::ccmath::numeric_limits<float_type>::is_iec559)
- || ((cpp_df_qf_detail::ccmath::numeric_limits<float_type>::digits == 53) && cpp_df_qf_detail::ccmath::numeric_limits<float_type>::is_specialized && cpp_df_qf_detail::ccmath::numeric_limits<float_type>::is_iec559)
- || ((cpp_df_qf_detail::ccmath::numeric_limits<float_type>::digits == 64) && cpp_df_qf_detail::ccmath::numeric_limits<float_type>::is_specialized && cpp_df_qf_detail::ccmath::numeric_limits<float_type>::is_iec559)
- || (cpp_df_qf_detail::ccmath::numeric_limits<float_type>::digits == 113)
- }, "Error: float_type does not fulfill the backend requirements of cpp_double_fp_backend"
- );
- using rep_type = cpp_df_qf_detail::pair<float_type, float_type>;
- using arithmetic = cpp_df_qf_detail::exact_arithmetic<float_type>;
- using signed_types = std::tuple<signed char, signed short, signed int, signed long, signed long long, std::intmax_t>;
- using unsigned_types = std::tuple<unsigned char, unsigned short, unsigned int, unsigned long, unsigned long long, std::uintmax_t>;
- using float_types = std::tuple<float, double, long double>;
- using exponent_type = int;
- static constexpr int my_digits = static_cast<int>(cpp_df_qf_detail::ccmath::numeric_limits<float_type>::digits * static_cast<int>(INT8_C(2)));
- static constexpr int my_digits10 = static_cast<int>(boost::multiprecision::detail::calc_digits10 <static_cast<unsigned>(my_digits)>::value);
- static constexpr int my_max_digits10 = static_cast<int>(boost::multiprecision::detail::calc_max_digits10<static_cast<unsigned>(my_digits)>::value);
- static constexpr int my_max_exponent = cpp_df_qf_detail::ccmath::numeric_limits<float_type>::max_exponent;
- static constexpr int my_max_exponent10 = cpp_df_qf_detail::ccmath::numeric_limits<float_type>::max_exponent10;
- static constexpr int my_min_exponent =
- static_cast<int>
- (
- cpp_df_qf_detail::ccmath::numeric_limits<float_type>::min_exponent
- + cpp_df_qf_detail::ccmath::numeric_limits<float_type>::digits
- );
- static constexpr int my_min_exponent10 =
- static_cast<int>
- (
- -static_cast<int>
- (
- boost::multiprecision::detail::calc_digits10<static_cast<unsigned>(-my_min_exponent)>::value
- )
- );
- // Default constructor.
- constexpr cpp_double_fp_backend() noexcept { }
- // Copy constructor.
- constexpr cpp_double_fp_backend(const cpp_double_fp_backend& other) : data(other.data) { }
- // Move constructor.
- constexpr cpp_double_fp_backend(cpp_double_fp_backend&& other) noexcept : data(static_cast<rep_type&&>(other.data)) { }
- // Constructors from other floating-point types.
- template <typename OtherFloatType,
- typename ::std::enable_if<( cpp_df_qf_detail::is_floating_point<OtherFloatType>::value
- && (cpp_df_qf_detail::ccmath::numeric_limits<OtherFloatType>::digits <= cpp_df_qf_detail::ccmath::numeric_limits<float_type>::digits))>::type const* = nullptr>
- constexpr cpp_double_fp_backend(const OtherFloatType& f)
- : data(f, static_cast<float_type>(0.0F)) { }
- template <typename OtherFloatType,
- typename ::std::enable_if<( cpp_df_qf_detail::is_floating_point<OtherFloatType>::value
- && (cpp_df_qf_detail::ccmath::numeric_limits<OtherFloatType>::digits > cpp_df_qf_detail::ccmath::numeric_limits<float_type>::digits))>::type const* = nullptr>
- constexpr cpp_double_fp_backend(const OtherFloatType& f)
- : data(static_cast<float_type>(f),
- static_cast<float_type>(f - static_cast<OtherFloatType>(static_cast<float_type>(f)))) { }
- // Construtor from another kind of cpp_double_fp_backend<> object.
- template <typename OtherFloatType,
- typename ::std::enable_if<( cpp_df_qf_detail::is_floating_point<OtherFloatType>::value
- && (!std::is_same<FloatingPointType, OtherFloatType>::value))>::type const* = nullptr>
- constexpr cpp_double_fp_backend(const cpp_double_fp_backend<OtherFloatType>& a)
- : cpp_double_fp_backend(cpp_double_fp_backend(a.my_first()) += a.my_second()) { }
- // Constructors from integers.
- template <typename SignedIntegralType,
- typename ::std::enable_if<( boost::multiprecision::detail::is_integral<SignedIntegralType>::value
- && (!boost::multiprecision::detail::is_unsigned<SignedIntegralType>::value)
- && ((static_cast<int>(sizeof(SignedIntegralType) * 8) - 1) <= cpp_df_qf_detail::ccmath::numeric_limits<float_type>::digits))>::type const* = nullptr>
- constexpr cpp_double_fp_backend(const SignedIntegralType& n)
- : data(static_cast<float_type>(n), static_cast<float_type>(0.0F)) { }
- template <typename UnsignedIntegralType,
- typename ::std::enable_if<( boost::multiprecision::detail::is_integral<UnsignedIntegralType>::value
- && boost::multiprecision::detail::is_unsigned<UnsignedIntegralType>::value
- && (static_cast<int>(sizeof(UnsignedIntegralType) * 8u) <= cpp_df_qf_detail::ccmath::numeric_limits<float_type>::digits))>::type const* = nullptr>
- constexpr cpp_double_fp_backend(const UnsignedIntegralType& u)
- : data(static_cast<float_type>(u), static_cast<float_type>(0.0F)) { }
- // Constructors from integers which hold more information than *this can contain.
- template <typename UnsignedIntegralType,
- typename ::std::enable_if<( boost::multiprecision::detail::is_integral<UnsignedIntegralType>::value
- && boost::multiprecision::detail::is_unsigned<UnsignedIntegralType>::value
- && (static_cast<int>(sizeof(UnsignedIntegralType) * 8u) > cpp_df_qf_detail::ccmath::numeric_limits<float_type>::digits))>::type const* = nullptr>
- constexpr cpp_double_fp_backend(UnsignedIntegralType u)
- : data(static_cast<float_type>(u & cpp_df_qf_detail::float_mask<UnsignedIntegralType, float_type>()),
- static_cast<float_type>(0.0F))
- {
- using local_unsigned_integral_type = UnsignedIntegralType;
- if (u > cpp_df_qf_detail::float_mask<local_unsigned_integral_type, float_type>())
- {
- local_unsigned_integral_type
- local_flt_mask
- {
- cpp_df_qf_detail::float_mask<local_unsigned_integral_type, float_type>()
- };
- for (int index_mask_lsb = cpp_df_qf_detail::ccmath::numeric_limits<float_type>::digits;
- (index_mask_lsb < static_cast<int>(sizeof(local_unsigned_integral_type) * 8u))
- && (local_flt_mask != local_unsigned_integral_type { UINT8_C(0) });
- index_mask_lsb += cpp_df_qf_detail::ccmath::numeric_limits<float_type>::digits)
- {
- local_flt_mask <<= static_cast<unsigned>(cpp_df_qf_detail::ccmath::numeric_limits<float_type>::digits);
- add_unchecked_limb(static_cast<float_type>(u & local_flt_mask));
- }
- }
- }
- template <typename SignedIntegralType,
- typename ::std::enable_if<( boost::multiprecision::detail::is_integral<SignedIntegralType>::value
- && (!boost::multiprecision::detail::is_unsigned<SignedIntegralType>::value)
- && ((static_cast<int>(sizeof(SignedIntegralType) * 8) - 1) > cpp_df_qf_detail::ccmath::numeric_limits<float_type>::digits))>::type const* = nullptr>
- constexpr cpp_double_fp_backend(SignedIntegralType n)
- {
- const bool is_neg { (n < SignedIntegralType { INT8_C(0) }) };
- using local_unsigned_integral_type = typename boost::multiprecision::detail::make_unsigned<SignedIntegralType>::type;
- const local_unsigned_integral_type
- u_val
- {
- (!is_neg)
- ? static_cast<local_unsigned_integral_type>(n)
- : static_cast<local_unsigned_integral_type>
- (
- static_cast<local_unsigned_integral_type>(~n)
- + static_cast<local_unsigned_integral_type>(UINT8_C(1))
- )
- };
- data = cpp_double_fp_backend(u_val).data;
- if(is_neg) { negate(); }
- }
- constexpr cpp_double_fp_backend(const float_type& a, const float_type& b) noexcept : data(a, b) { }
- constexpr cpp_double_fp_backend(const cpp_df_qf_detail::pair<float_type, float_type>& p) noexcept : data(p) { }
- cpp_double_fp_backend(const char* p_str)
- {
- *this = p_str;
- }
- // Assignment operator.
- constexpr auto operator=(const cpp_double_fp_backend& other) -> cpp_double_fp_backend&
- {
- if (this != &other)
- {
- data = other.data;
- }
- return *this;
- }
- // Move assignment operator.
- constexpr auto operator=(cpp_double_fp_backend&& other) noexcept -> cpp_double_fp_backend&
- {
- data = static_cast<rep_type&&>(other.data);
- return *this;
- }
- // Assignment operator from another kind of cpp_double_fp_backend<> object.
- template <typename OtherFloatType,
- typename ::std::enable_if<( cpp_df_qf_detail::is_floating_point<OtherFloatType>::value
- && (!std::is_same<FloatingPointType, OtherFloatType>::value))>::type const* = nullptr>
- constexpr auto operator=(const cpp_double_fp_backend<OtherFloatType>& other) -> cpp_double_fp_backend&
- {
- return operator=(cpp_double_fp_backend(other));
- }
- template <typename OtherFloatType>
- constexpr auto operator=(const OtherFloatType f) -> typename ::std::enable_if<cpp_df_qf_detail::is_floating_point<OtherFloatType>::value, cpp_double_fp_backend&>::type
- {
- return operator=(cpp_double_fp_backend(f));
- }
- template <typename IntegralType>
- constexpr auto operator=(const IntegralType n) -> typename ::std::enable_if<boost::multiprecision::detail::is_integral<IntegralType>::value, cpp_double_fp_backend&>::type
- {
- return operator=(cpp_double_fp_backend(n));
- }
- auto operator=(const char* p_str) -> cpp_double_fp_backend&
- {
- rd_string(p_str);
- return *this;
- }
- constexpr auto hash() const -> ::std::size_t
- {
- #if defined(BOOST_MP_CPP_DOUBLE_FP_HAS_FLOAT128)
- using local_float_type = typename std::conditional<::std::is_same<float_type, ::boost::float128_type>::value,
- long double,
- float_type>::type;
- #else
- using local_float_type = float_type;
- #endif
- std::size_t result { UINT8_C(0) };
- int n_first { };
- int n_second { };
- boost::multiprecision::detail::hash_combine(result, static_cast<local_float_type>(cpp_df_qf_detail::ccmath::frexp(data.first, &n_first)));
- boost::multiprecision::detail::hash_combine(result, static_cast<local_float_type>(cpp_df_qf_detail::ccmath::frexp(data.second, &n_second)));
- boost::multiprecision::detail::hash_combine(result, n_first);
- boost::multiprecision::detail::hash_combine(result, n_second);
- return result;
- }
- // The public methods follow.
- constexpr auto isneg_unchecked() const noexcept -> bool { return (data.first < 0); }
- constexpr auto iszero_unchecked() const noexcept -> bool { return (data.first == float_type { 0.0F }); }
- constexpr auto is_one() const noexcept -> bool
- {
- return
- (
- (data.second == float_type { 0.0F })
- && (data.first == float_type { 1.0F })
- );
- }
- constexpr auto negate() -> void
- {
- const bool isinf_u { (cpp_df_qf_detail::ccmath::isinf)(data.first) };
- const bool isnan_u { (cpp_df_qf_detail::ccmath::isnan)(data.first) };
- if (isnan_u) { }
- else if (isinf_u)
- {
- data.first = -data.first;
- }
- else
- {
- if (!iszero_unchecked())
- {
- data = arithmetic::normalize(-data.first, -data.second);
- }
- }
- }
- // Getters/Setters
- constexpr auto my_first () const noexcept -> const float_type& { return data.first; }
- constexpr auto my_second() const noexcept -> const float_type& { return data.second; }
- constexpr auto rep() noexcept -> rep_type& { return data; }
- constexpr auto rep() const noexcept -> const rep_type& { return data; }
- constexpr auto crep() const noexcept -> const rep_type& { return data; }
- // Unary add/sub/mul/div follow in the upcoming paragraphs.
- constexpr auto operator+=(const cpp_double_fp_backend& v) -> cpp_double_fp_backend&
- {
- const int fpc_u { eval_fpclassify(*this) };
- const int fpc_v { eval_fpclassify(v) };
- if ((fpc_u != FP_NORMAL) || (fpc_v != FP_NORMAL))
- {
- // Handle special cases like zero, inf and NaN.
- if (fpc_u == FP_NAN)
- {
- return *this;
- }
- const bool isinf_v { (fpc_v == FP_INFINITE) };
- if (fpc_u == FP_INFINITE)
- {
- if (isinf_v && (isneg_unchecked() != v.isneg_unchecked()))
- {
- *this = cpp_double_fp_backend::my_value_nan();
- }
- return *this;
- }
- const bool iszero_u { ((fpc_u == FP_ZERO) || (fpc_u == FP_SUBNORMAL)) };
- const bool isnan_v { (fpc_v == FP_NAN) };
- if (iszero_u || (isnan_v || isinf_v))
- {
- if (iszero_u)
- {
- data.first = float_type { 0.0F };
- data.second = float_type { 0.0F };
- }
- const bool iszero_v { ((fpc_v == FP_ZERO) || (fpc_v == FP_SUBNORMAL)) };
- return ((!iszero_v) ? operator=(v) : *this);
- }
- }
- add_unchecked(v);
- return *this;
- }
- constexpr auto operator-=(const cpp_double_fp_backend& v) -> cpp_double_fp_backend&
- {
- const int fpc_u { eval_fpclassify(*this) };
- const int fpc_v { eval_fpclassify(v) };
- if ((fpc_u != FP_NORMAL) || (fpc_v != FP_NORMAL))
- {
- // Handle special cases like zero, inf and NaN.
- if (fpc_u == FP_NAN)
- {
- return *this;
- }
- const bool isinf_v { (fpc_v == FP_INFINITE) };
- if (fpc_u == FP_INFINITE)
- {
- if (isinf_v && (isneg_unchecked() == v.isneg_unchecked()))
- {
- *this = cpp_double_fp_backend::my_value_nan();
- }
- return *this;
- }
- const bool iszero_u { ((fpc_u == FP_ZERO) || (fpc_u == FP_SUBNORMAL)) };
- const bool isnan_v { (fpc_v == FP_NAN) };
- if (iszero_u || (isnan_v || isinf_v))
- {
- if (iszero_u)
- {
- data.first = float_type { 0.0F };
- data.second = float_type { 0.0F };
- }
- const bool iszero_v { ((fpc_v == FP_ZERO) || (fpc_v == FP_SUBNORMAL)) };
- return ((!iszero_v) ? operator=(-v) : *this);
- }
- }
- if (this == &v)
- {
- data.first = float_type { 0.0F };
- data.second = float_type { 0.0F };
- return *this;
- }
- const rep_type thi_tlo { arithmetic::two_diff(data.second, v.data.second) };
- data = arithmetic::two_diff(data.first, v.data.first);
- if (cpp_df_qf_detail::ccmath::isinf(data.first))
- {
- // Handle overflow.
- const bool b_neg { (data.first < float_type { 0.0F }) };
- *this = cpp_double_fp_backend::my_value_inf();
- if (b_neg)
- {
- negate();
- }
- return *this;
- }
- data = arithmetic::two_hilo_sum(data.first, data.second + thi_tlo.first);
- data = arithmetic::two_hilo_sum(data.first, thi_tlo.second + data.second);
- return *this;
- }
- constexpr auto operator*=(const cpp_double_fp_backend& v) -> cpp_double_fp_backend&
- {
- // Evaluate the sign of the result.
- const int fpc_u { eval_fpclassify(*this) };
- const int fpc_v { eval_fpclassify(v) };
- if ((fpc_u != FP_NORMAL) || (fpc_v != FP_NORMAL))
- {
- // Handle special cases like zero, inf and NaN.
- const bool isinf_u { (fpc_u == FP_INFINITE) };
- const bool isinf_v { (fpc_v == FP_INFINITE) };
- const bool iszero_u { (fpc_u == FP_ZERO) };
- const bool iszero_v { (fpc_v == FP_ZERO) };
- if (((fpc_u == FP_NAN) || (fpc_v == FP_NAN)) || (isinf_u && iszero_v) || (isinf_v && iszero_u))
- {
- return operator=( cpp_double_fp_backend::my_value_nan());
- }
- if (isinf_u || isinf_v)
- {
- const bool b_neg { (isneg_unchecked() != v.isneg_unchecked()) };
- *this = cpp_double_fp_backend::my_value_inf();
- if (b_neg)
- {
- negate();
- }
- return *this;
- }
- if (iszero_u || iszero_v)
- {
- return operator=(cpp_double_fp_backend(0));
- }
- }
- mul_unchecked(v);
- return *this;
- }
- constexpr auto operator/=(const cpp_double_fp_backend& v) -> cpp_double_fp_backend&
- {
- const int fpc_u { eval_fpclassify(*this) };
- const int fpc_v { eval_fpclassify(v) };
- if ((fpc_u != FP_NORMAL) || (fpc_v != FP_NORMAL))
- {
- // Handle special cases like zero, inf and NaN.
- const bool isnan_u { (fpc_u == FP_NAN) };
- const bool isnan_v { (fpc_v == FP_NAN) };
- if (isnan_u || isnan_v)
- {
- return operator=(cpp_double_fp_backend::my_value_nan());
- }
- const bool iszero_u { (fpc_u == FP_ZERO) };
- const bool iszero_v { (fpc_v == FP_ZERO) };
- if (iszero_u)
- {
- if (iszero_v)
- {
- return operator=(cpp_double_fp_backend::my_value_nan());
- }
- else
- {
- return operator=(cpp_double_fp_backend(0));
- }
- }
- // Handle more special cases like zero, inf and NaN.
- if (iszero_v)
- {
- const bool b_neg = isneg_unchecked();
- *this = cpp_double_fp_backend::my_value_inf();
- if (b_neg)
- {
- negate();
- }
- return *this;
- }
- const bool isinf_v { (fpc_v == FP_INFINITE) };
- const bool isinf_u { (fpc_u == FP_INFINITE) };
- if (isinf_u)
- {
- if (isinf_v)
- {
- return operator=(cpp_double_fp_backend::my_value_nan());
- }
- else
- {
- const bool b_neg { isneg_unchecked() };
- return operator=((!b_neg) ? cpp_double_fp_backend::my_value_inf() : -cpp_double_fp_backend::my_value_inf());
- }
- }
- if (isinf_v)
- {
- return operator=(cpp_double_fp_backend(0));
- }
- }
- if (this == &v)
- {
- data.first = float_type { 1.0F };
- data.second = float_type { 0.0F };
- return *this;
- }
- // The division algorithm has been taken from Victor Shoup,
- // package WinNTL-5_3_2. It might originally be related to the
- // K. Briggs work. The algorithm has been significantly simplified
- // while still attempting to retain proper rounding corrections.
- // Checks for overflow and underflow have been added.
- const float_type C { data.first / v.data.first };
- float_type c { cpp_df_qf_detail::split_maker<float_type>::value * C };
- float_type hc { };
- if (cpp_df_qf_detail::ccmath::isinf(c))
- {
- // Handle overflow by scaling down (and then back up) with the split.
- hc =
- cpp_df_qf_detail::ccmath::ldexp
- (
- C - float_type { C - cpp_df_qf_detail::ccmath::ldexp(C, -cpp_df_qf_detail::split_maker<float_type>::n_shl) },
- cpp_df_qf_detail::split_maker<float_type>::n_shl
- );
- }
- else
- {
- hc = c - float_type { c - C };
- }
- float_type u { cpp_df_qf_detail::split_maker<float_type>::value * v.data.first };
- const float_type hv =
- (
- cpp_df_qf_detail::ccmath::isinf(u)
- ? cpp_df_qf_detail::ccmath::ldexp
- (
- // Handle overflow by scaling down (and then back up) with the split.
- v.data.first - float_type { v.data.first - cpp_df_qf_detail::ccmath::ldexp(v.data.first, -cpp_df_qf_detail::split_maker<float_type>::n_shl) },
- cpp_df_qf_detail::split_maker<float_type>::n_shl
- )
- : u - float_type { u - v.data.first }
- );
- const float_type U { C * v.data.first };
- u = cpp_df_qf_detail::ccmath::unsafe::fma(hc, hv, -U);
- {
- const float_type tv { v.data.first - hv };
- u = cpp_df_qf_detail::ccmath::unsafe::fma(hc, tv, u);
- const float_type tc { C - hc };
- u = cpp_df_qf_detail::ccmath::unsafe::fma(tc, hv, u);
- u = cpp_df_qf_detail::ccmath::unsafe::fma(tc, tv, u);
- }
- c = float_type { (data.first - U) - u } + data.second;
- c = (c - float_type { C * v.data.second }) / v.data.first;
- // Perform even more simplifications compared to Victor Shoup.
- data.first = C + c;
- data.second = float_type { C - data.first } + c;
- return *this;
- }
- // Unary minus operator.
- constexpr auto operator-() const -> cpp_double_fp_backend
- {
- cpp_double_fp_backend v { *this };
- v.negate();
- return v;
- }
- // Public helper functions.
- static constexpr auto pown(cpp_double_fp_backend& result, const cpp_double_fp_backend& x, int p) -> void
- {
- using local_float_type = cpp_double_fp_backend;
- if (p == 2)
- {
- result = x; result.mul_unchecked(x);
- }
- else if (p == 3)
- {
- result = x; result.mul_unchecked(x); result.mul_unchecked(x);
- }
- else if (p == 4)
- {
- local_float_type x2 { x };
- x2.mul_unchecked(x);
- result = x2;
- result.mul_unchecked(x2);
- }
- else if (p == 1)
- {
- result = x;
- }
- else if (p < 0)
- {
- // The case p == 0 is checked in a higher calling layer.
- pown(result, local_float_type(1U) / x, -p);
- }
- else
- {
- result = local_float_type(1U);
- local_float_type y(x);
- auto p_local = static_cast<std::uint32_t>(p);
- for (;;)
- {
- if (static_cast<std::uint_fast8_t>(p_local & static_cast<std::uint32_t>(UINT8_C(1))) != static_cast<std::uint_fast8_t>(UINT8_C(0)))
- {
- result.mul_unchecked(y);
- }
- p_local >>= 1U;
- if (p_local == static_cast<std::uint32_t>(UINT8_C(0)))
- {
- break;
- }
- else
- {
- y.mul_unchecked(y);
- }
- }
- }
- }
- constexpr auto swap(cpp_double_fp_backend& other) -> void
- {
- if (this != &other)
- {
- const rep_type tmp { data };
- data = other.data;
- other.data = tmp;
- }
- }
- constexpr auto swap(cpp_double_fp_backend&& other) noexcept -> void
- {
- const rep_type tmp { static_cast<typename cpp_double_fp_backend::rep_type&&>(data) };
- data = other.data;
- other.data = tmp;
- }
- constexpr auto compare(const cpp_double_fp_backend& other) const noexcept -> int
- {
- // Return 1 for *this > other, -1 for *this < other, 0 for *this = other.
- if ((cpp_df_qf_detail::ccmath::isnan)(data.first))
- {
- return -1;
- }
- else
- {
- return (my_first() > other.my_first()) ? 1 : (my_first() < other.my_first())
- ? -1 : (my_second() > other.my_second())
- ? 1 : (my_second() < other.my_second())
- ? -1 : 0;
- }
- }
- // TBD: Exactly what compilers/language-standards are needed to make this constexpr?
- // TBD: It odes not really become constexpr until we stop using an intermediate
- // cpp_bin_float anyway. But I will leave this comment for future library evolution.
- auto str(std::streamsize number_of_digits, const std::ios::fmtflags format_flags) const -> std::string
- {
- // Use cpp_bin_float when writing to string. This is similar
- // to the use of cpp_bin_float when reading from string.
- cpp_bin_float_read_write_backend_type f_bin { data.first };
- eval_add(f_bin, cpp_bin_float_read_write_backend_type(data.second));
- return f_bin.str(number_of_digits, format_flags);
- }
- constexpr auto order02() const -> int
- {
- // TBD: Is there another option to get the base-2 log
- // that's more unequivocally closer to constexpr?
- // TBD: Either way, integrate this (or something like it)
- // into any potential implementation of eval_ilogb().
- int e2 { };
- cpp_double_fp_backend dummy { };
- eval_frexp(dummy, *this, &e2);
- return e2;
- }
- static constexpr auto my_value_max() noexcept -> cpp_double_fp_backend
- {
- // Use the non-normalized sum of two maximum values, where the lower
- // value is "shifted" right in the sense of floating-point ldexp.
- return
- cpp_double_fp_backend
- (
- arithmetic::two_hilo_sum
- (
- float_type
- (
- (cpp_df_qf_detail::ccmath::numeric_limits<float_type>::max)()
- * (
- static_cast<float_type>(1.0F)
- - static_cast<float_type>(1.5F) * cpp_df_qf_detail::ccmath::sqrt(cpp_df_qf_detail::ccmath::numeric_limits<float_type>::epsilon())
- )
- ),
- float_type
- (
- cpp_df_qf_detail::ccmath::ldexp
- (
- (cpp_df_qf_detail::ccmath::numeric_limits<float_type>::max)(),
- -cpp_df_qf_detail::ccmath::numeric_limits<float_type>::digits
- )
- )
- )
- );
- }
- static constexpr auto my_value_min() noexcept -> cpp_double_fp_backend
- {
- // Use the non-normalized minimum value, where the lower value
- // is "shifted" left in the sense of floating-point ldexp.
- return
- cpp_double_fp_backend
- (
- float_type
- (
- cpp_df_qf_detail::ccmath::ldexp
- (
- (cpp_df_qf_detail::ccmath::numeric_limits<float_type>::min)(),
- cpp_df_qf_detail::ccmath::numeric_limits<float_type>::digits
- )
- )
- );
- }
- static constexpr auto my_value_eps() noexcept -> cpp_double_fp_backend
- {
- return
- cpp_double_fp_backend
- (
- float_type(cpp_df_qf_detail::ccmath::ldexp(float_type { 1 }, int { 3 - my_digits }))
- );
- }
- static constexpr auto my_value_nan() noexcept -> cpp_double_fp_backend
- {
- return cpp_double_fp_backend(static_cast<float_type>(NAN), static_cast<float_type>(0.0F));
- }
- static constexpr auto my_value_inf() noexcept -> cpp_double_fp_backend
- {
- return cpp_double_fp_backend(static_cast<float_type>(HUGE_VAL), static_cast<float_type>(0.0F)); // conversion from double infinity OK
- }
- static constexpr auto my_value_logmax() -> cpp_double_fp_backend
- {
- return
- cpp_double_fp_backend
- (
- cpp_df_qf_detail::ccmath::log
- (
- float_type
- (
- (cpp_df_qf_detail::ccmath::numeric_limits<float_type>::max)()
- * (
- static_cast<float_type>(1.0F)
- - static_cast<float_type>(1.5F) * cpp_df_qf_detail::ccmath::sqrt(cpp_df_qf_detail::ccmath::numeric_limits<float_type>::epsilon())
- )
- )
- )
- );
- }
- static constexpr auto my_value_logmin() -> cpp_double_fp_backend
- {
- return
- cpp_double_fp_backend
- (
- cpp_df_qf_detail::ccmath::log
- (
- float_type
- (
- cpp_df_qf_detail::ccmath::ldexp
- (
- (cpp_df_qf_detail::ccmath::numeric_limits<float_type>::min)(),
- cpp_df_qf_detail::ccmath::numeric_limits<float_type>::digits
- )
- )
- )
- );
- }
- constexpr auto add_unchecked_limb(const float_type v_first) -> void
- {
- const float_type thi { data.second };
- data = arithmetic::two_sum(data.first, v_first);
- data = arithmetic::two_hilo_sum(data.first, data.second + thi);
- data = arithmetic::two_hilo_sum(data.first, data.second);
- }
- private:
- rep_type data;
- using cpp_bin_float_read_write_backend_type = boost::multiprecision::backends::cpp_bin_float<static_cast<unsigned>(my_digits), digit_base_2, void, int, cpp_df_qf_detail::ccmath::numeric_limits<float_type>::min_exponent, cpp_df_qf_detail::ccmath::numeric_limits<float_type>::max_exponent>;
- constexpr auto rd_string(const char* p_str) -> bool;
- constexpr auto add_unchecked(const cpp_double_fp_backend& v) -> void
- {
- const rep_type thi_tlo { arithmetic::two_sum(data.second, v.data.second) };
- data = arithmetic::two_sum(data.first, v.data.first);
- if (cpp_df_qf_detail::ccmath::isinf(data.first))
- {
- // Handle overflow.
- const bool b_neg { (data.first < float_type { 0.0F }) };
- *this = cpp_double_fp_backend::my_value_inf();
- if (b_neg)
- {
- negate();
- }
- return;
- }
- data = arithmetic::two_hilo_sum(data.first, data.second + thi_tlo.first);
- data = arithmetic::two_hilo_sum(data.first, thi_tlo.second + data.second);
- }
- constexpr auto mul_unchecked(const cpp_double_fp_backend& v) -> void
- {
- // The multiplication algorithm has been taken from Victor Shoup,
- // package WinNTL-5_3_2. It might originally be related to the
- // K. Briggs work. The algorithm has been significantly simplified
- // while still attempting to retain proper rounding corrections.
- // Checks for overflow and underflow have been added.
- float_type C { cpp_df_qf_detail::split_maker<float_type>::value * data.first };
- float_type hu { };
- if (cpp_df_qf_detail::ccmath::isinf(C))
- {
- // Handle overflow by scaling down (and then back up) with the split.
- C = data.first - cpp_df_qf_detail::ccmath::ldexp(data.first, -cpp_df_qf_detail::split_maker<float_type>::n_shl);
- hu = cpp_df_qf_detail::ccmath::ldexp(data.first - C, cpp_df_qf_detail::split_maker<float_type>::n_shl);
- }
- else
- {
- hu = C - float_type { C - data.first };
- }
- C = data.first * v.data.first;
- if (cpp_df_qf_detail::ccmath::isinf(C))
- {
- // Handle overflow.
- const bool b_neg { (isneg_unchecked() != v.isneg_unchecked()) };
- *this = cpp_double_fp_backend::my_value_inf();
- if (b_neg)
- {
- negate();
- }
- return;
- }
- float_type c { cpp_df_qf_detail::split_maker<float_type>::value * v.data.first };
- float_type hv { };
- if (cpp_df_qf_detail::ccmath::isinf(c))
- {
- // Handle overflow by scaling down (and then back up) with the split.
- c = v.data.first - cpp_df_qf_detail::ccmath::ldexp(v.data.first, -cpp_df_qf_detail::split_maker<float_type>::n_shl);
- hv = cpp_df_qf_detail::ccmath::ldexp(v.data.first - c, cpp_df_qf_detail::split_maker<float_type>::n_shl);
- }
- else
- {
- hv = c - float_type { c - v.data.first };
- }
- {
- const float_type tv { v.data.first - hv };
- const float_type
- t1
- {
- cpp_df_qf_detail::ccmath::unsafe::fma
- (
- hu,
- tv,
- cpp_df_qf_detail::ccmath::unsafe::fma(hu, hv, -C)
- )
- };
- const float_type tu { data.first - hu };
- c = cpp_df_qf_detail::ccmath::unsafe::fma(tu, tv, cpp_df_qf_detail::ccmath::unsafe::fma(tu, hv, t1))
- + (data.first * v.data.second)
- + (data.second * v.data.first);
- }
- // Perform even more simplifications compared to Victor Shoup.
- data.first = C + c;
- data.second = float_type { C - data.first } + c;
- }
- template <typename OtherFloatingPointType,
- typename ::std::enable_if<(cpp_df_qf_detail::is_floating_point<OtherFloatingPointType>::value && ((cpp_df_qf_detail::ccmath::numeric_limits<OtherFloatingPointType>::digits10 * 2) < 16))>::type const*>
- friend constexpr auto eval_exp(cpp_double_fp_backend<OtherFloatingPointType>& result, const cpp_double_fp_backend<OtherFloatingPointType>& x) -> void;
- template <typename OtherFloatingPointType,
- typename ::std::enable_if<(cpp_df_qf_detail::is_floating_point<OtherFloatingPointType>::value && (((cpp_df_qf_detail::ccmath::numeric_limits<OtherFloatingPointType>::digits10 * 2) >= 16) && ((cpp_df_qf_detail::ccmath::numeric_limits<OtherFloatingPointType>::digits10 * 2) <= 36)))>::type const*>
- friend constexpr auto eval_exp(cpp_double_fp_backend<OtherFloatingPointType>& result, const cpp_double_fp_backend<OtherFloatingPointType>& x) -> void;
- template <typename OtherFloatingPointType,
- typename ::std::enable_if<(cpp_df_qf_detail::is_floating_point<OtherFloatingPointType>::value && ((cpp_df_qf_detail::ccmath::numeric_limits<OtherFloatingPointType>::digits10 * 2) > 36))>::type const*>
- friend constexpr auto eval_exp(cpp_double_fp_backend<OtherFloatingPointType>& result, const cpp_double_fp_backend<OtherFloatingPointType>& x) -> void;
- };
- template <typename FloatingPointType>
- constexpr auto cpp_double_fp_backend<FloatingPointType>::rd_string(const char* p_str) -> bool
- {
- // Use an intermediate cpp_bin_float backend type for reading string input.
- cpp_bin_float_read_write_backend_type f_bin { };
- f_bin = p_str;
- const int fpc { eval_fpclassify(f_bin) };
- const bool is_definitely_nan { (fpc == FP_NAN) };
- using local_double_fp_type = cpp_double_fp_backend<FloatingPointType>;
- if (is_definitely_nan)
- {
- static_cast<void>(operator=(local_double_fp_type::my_value_nan()));
- return true;
- }
- const bool b_neg { (eval_signbit(f_bin) == 1) };
- if (b_neg) { f_bin.negate(); }
- const int
- expval_from_f_bin
- {
- [&f_bin]()
- {
- int expval { };
- cpp_bin_float_read_write_backend_type dummy { };
- eval_frexp(dummy, f_bin, &expval);
- return expval;
- }()
- };
- const auto is_zero_or_subnormal =
- (
- (fpc == FP_ZERO)
- || (expval_from_f_bin < static_cast<typename cpp_bin_float_read_write_backend_type::exponent_type>(local_double_fp_type::my_min_exponent))
- );
- if (is_zero_or_subnormal)
- {
- data.first = float_type { 0.0F };
- data.second = float_type { 0.0F };
- return true;
- }
- float_type flt_inf_check_first { };
- eval_convert_to(&flt_inf_check_first, f_bin);
- bool is_definitely_inf { ((fpc == FP_INFINITE) || cpp_df_qf_detail::ccmath::isinf(flt_inf_check_first)) };
- if ((!is_definitely_inf) && (flt_inf_check_first > my_value_max().my_first()))
- {
- cpp_bin_float_read_write_backend_type f_bin_inf_check(f_bin);
- eval_subtract(f_bin_inf_check, cpp_bin_float_read_write_backend_type(flt_inf_check_first));
- float_type flt_inf_check_second { };
- eval_convert_to(&flt_inf_check_second, f_bin_inf_check);
- is_definitely_inf = eval_gt(local_double_fp_type(flt_inf_check_first, flt_inf_check_second), my_value_max());
- };
- if (is_definitely_inf)
- {
- static_cast<void>(operator=(local_double_fp_type::my_value_inf()));
- if (b_neg)
- {
- negate();
- }
- return true;
- }
- // The input string is normal. We will now extract its value.
- data.first = float_type { 0.0F };
- data.second = float_type { 0.0F };
- // Handle small input values. Scale (and re-scale them below) if possible.
- constexpr int pow2_scaling_for_small_input { cpp_df_qf_detail::ccmath::numeric_limits<float_type>::digits };
- const bool
- has_pow2_scaling_for_small_input
- {
- (expval_from_f_bin < static_cast<int>(local_double_fp_type::my_min_exponent + pow2_scaling_for_small_input))
- };
- if (has_pow2_scaling_for_small_input)
- {
- eval_ldexp(f_bin, f_bin, pow2_scaling_for_small_input);
- }
- using local_builtin_float_type = typename std::conditional<(sizeof(float_type) <= sizeof(double)), double, float_type>::type;
- constexpr unsigned
- digit_limit
- {
- static_cast<unsigned>
- (
- static_cast<int>
- (
- (local_double_fp_type::my_digits / cpp_df_qf_detail::ccmath::numeric_limits<local_builtin_float_type>::digits)
- + (((local_double_fp_type::my_digits % cpp_df_qf_detail::ccmath::numeric_limits<local_builtin_float_type>::digits) != 0) ? 1 : 0)
- )
- * cpp_df_qf_detail::ccmath::numeric_limits<local_builtin_float_type>::digits
- )
- };
- for(auto i = static_cast<unsigned>(UINT8_C(0));
- i < digit_limit;
- i = static_cast<unsigned>(i + static_cast<unsigned>(cpp_df_qf_detail::ccmath::numeric_limits<local_builtin_float_type>::digits)))
- {
- local_builtin_float_type flt_part { };
- eval_convert_to(&flt_part, f_bin);
- eval_subtract(f_bin, cpp_bin_float_read_write_backend_type(flt_part));
- eval_add(*this, local_double_fp_type { flt_part });
- }
- if (has_pow2_scaling_for_small_input)
- {
- eval_ldexp(*this, *this, -pow2_scaling_for_small_input);
- }
- if (b_neg) { negate(); }
- return true;
- }
- } } } // namespace boost::multiprecision::backends
- #include <boost/multiprecision/cpp_df_qf/cpp_df_qf_detail_constants.hpp>
- namespace boost { namespace multiprecision { namespace backends {
- template <typename FloatingPointType>
- constexpr int cpp_double_fp_backend<FloatingPointType>::my_digits;
- template <typename FloatingPointType>
- constexpr int cpp_double_fp_backend<FloatingPointType>::my_digits10;
- template <typename FloatingPointType>
- constexpr int cpp_double_fp_backend<FloatingPointType>::my_max_digits10;
- template <typename FloatingPointType>
- constexpr int cpp_double_fp_backend<FloatingPointType>::my_max_exponent;
- template <typename FloatingPointType>
- constexpr int cpp_double_fp_backend<FloatingPointType>::my_min_exponent;
- template <typename FloatingPointType>
- constexpr int cpp_double_fp_backend<FloatingPointType>::my_max_exponent10;
- template <typename FloatingPointType>
- constexpr int cpp_double_fp_backend<FloatingPointType>::my_min_exponent10;
- template <typename FloatingPointType>
- constexpr cpp_double_fp_backend<FloatingPointType> operator+(const cpp_double_fp_backend<FloatingPointType>& a, const cpp_double_fp_backend<FloatingPointType>& b) { return cpp_double_fp_backend<FloatingPointType>(a) += b; }
- template <typename FloatingPointType>
- constexpr cpp_double_fp_backend<FloatingPointType> operator-(const cpp_double_fp_backend<FloatingPointType>& a, const cpp_double_fp_backend<FloatingPointType>& b) { return cpp_double_fp_backend<FloatingPointType>(a) -= b; }
- template <typename FloatingPointType>
- constexpr cpp_double_fp_backend<FloatingPointType> operator*(const cpp_double_fp_backend<FloatingPointType>& a, const cpp_double_fp_backend<FloatingPointType>& b) { return cpp_double_fp_backend<FloatingPointType>(a) *= b; }
- template <typename FloatingPointType>
- constexpr cpp_double_fp_backend<FloatingPointType> operator/(const cpp_double_fp_backend<FloatingPointType>& a, const cpp_double_fp_backend<FloatingPointType>& b) { return cpp_double_fp_backend<FloatingPointType>(a) /= b; }
- template <typename FloatingPointType>
- constexpr auto eval_add(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& x) -> void { result += x; }
- template <typename FloatingPointType>
- constexpr auto eval_add(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& a, const cpp_double_fp_backend<FloatingPointType>& b) -> void { result = cpp_double_fp_backend<FloatingPointType>(a) += b; }
- template <typename FloatingPointType>
- constexpr auto eval_subtract(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& x) -> void { result -= x; }
- template <typename FloatingPointType>
- constexpr auto eval_subtract(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& a, const cpp_double_fp_backend<FloatingPointType>& b) -> void { result = cpp_double_fp_backend<FloatingPointType>(a) -= b; }
- template <typename FloatingPointType>
- constexpr auto eval_multiply(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& x) -> void { result *= x; }
- template <typename FloatingPointType>
- constexpr auto eval_multiply(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& a, const cpp_double_fp_backend<FloatingPointType>& b) -> void { result = cpp_double_fp_backend<FloatingPointType>(a) *= b; }
- template <typename FloatingPointType>
- constexpr auto eval_divide(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& x) -> void { result /= x; }
- template <typename FloatingPointType>
- constexpr auto eval_divide(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& a, const cpp_double_fp_backend<FloatingPointType>& b) -> void { result = cpp_double_fp_backend<FloatingPointType>(a) /= b; }
- template <typename FloatingPointType>
- constexpr auto eval_eq(const cpp_double_fp_backend<FloatingPointType>& a, const cpp_double_fp_backend<FloatingPointType>& b) -> bool { return (a.compare(b) == 0); }
- template <typename FloatingPointType>
- constexpr auto eval_lt(const cpp_double_fp_backend<FloatingPointType>& a, const cpp_double_fp_backend<FloatingPointType>& b) -> bool { return (a.compare(b) == -1); }
- template <typename FloatingPointType>
- constexpr auto eval_gt(const cpp_double_fp_backend<FloatingPointType>& a, const cpp_double_fp_backend<FloatingPointType>& b) -> bool { return (a.compare(b) == 1); }
- template <typename FloatingPointType>
- constexpr auto eval_is_zero(const cpp_double_fp_backend<FloatingPointType>& x) -> bool
- {
- return x.iszero_unchecked();
- }
- template <typename FloatingPointType>
- constexpr auto eval_signbit(const cpp_double_fp_backend<FloatingPointType>& x) -> int
- {
- return (x.isneg_unchecked() ? 1 : 0);
- }
- template <typename FloatingPointType>
- constexpr auto eval_fabs(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& a) -> void
- {
- result = a;
- if (a.isneg_unchecked())
- {
- result.negate();
- }
- }
- template <typename FloatingPointType>
- constexpr auto eval_frexp(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& a, int* v) -> void
- {
- using local_backend_type = cpp_double_fp_backend<FloatingPointType>;
- using local_float_type = typename local_backend_type::float_type;
- int expptr { };
- const local_float_type fhi { cpp_df_qf_detail::ccmath::frexp(a.rep().first, &expptr) };
- const local_float_type flo { cpp_df_qf_detail::ccmath::ldexp(a.rep().second, -expptr) };
- if (v != nullptr)
- {
- *v = expptr;
- }
- result.rep() = local_backend_type::arithmetic::normalize(fhi, flo);
- }
- template <typename FloatingPointType>
- constexpr auto eval_ldexp(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& a, int v) -> void
- {
- using local_backend_type = cpp_double_fp_backend<FloatingPointType>;
- using local_float_type = typename local_backend_type::float_type;
- const local_float_type fhi { cpp_df_qf_detail::ccmath::ldexp(a.crep().first, v) };
- const local_float_type flo { cpp_df_qf_detail::ccmath::ldexp(a.crep().second, v) };
- result.rep() = local_backend_type::arithmetic::normalize(fhi, flo);
- }
- template <typename FloatingPointType>
- constexpr auto eval_floor(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& x) -> void
- {
- using local_backend_type = cpp_double_fp_backend<FloatingPointType>;
- using local_float_type = typename local_backend_type::float_type;
- const local_float_type fhi { cpp_df_qf_detail::ccmath::floor(x.my_first()) };
- if (fhi != x.my_first())
- {
- result.rep().first = fhi;
- result.rep().second = local_float_type { 0 };
- }
- else
- {
- const local_float_type flo = { cpp_df_qf_detail::ccmath::floor(x.my_second()) };
- result.rep() = local_backend_type::arithmetic::normalize(fhi, flo);
- }
- }
- template <typename FloatingPointType>
- constexpr auto eval_ceil(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& x) -> void
- {
- // Compute -floor(-x);
- eval_floor(result, -x);
- result.negate();
- }
- template <typename FloatingPointType>
- constexpr auto eval_fpclassify(const cpp_double_fp_backend<FloatingPointType>& o) -> int
- {
- if (cpp_df_qf_detail::ccmath::isnan(o.crep().first)) { return FP_NAN; }
- else if (cpp_df_qf_detail::ccmath::isinf(o.crep().first)) { return FP_INFINITE; }
- else if (eval_is_zero(o)) { return FP_ZERO; }
- else
- {
- using local_backend_type = cpp_double_fp_backend<FloatingPointType>;
- using local_float_type = typename local_backend_type::float_type;
- const local_float_type fabs_x { cpp_df_qf_detail::ccmath::fabs(o.crep().first) };
- if ((fabs_x > 0) && (fabs_x < (cpp_df_qf_detail::ccmath::numeric_limits<local_float_type>::min)()))
- {
- return FP_SUBNORMAL;
- }
- else
- {
- return FP_NORMAL;
- }
- }
- }
- template <typename FloatingPointType>
- constexpr auto eval_sqrt(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& o) -> void
- {
- using double_float_type = cpp_double_fp_backend<FloatingPointType>;
- using local_float_type = typename double_float_type::float_type;
- const int fpc { eval_fpclassify(o) };
- const bool isneg_o { o.isneg_unchecked() };
- if ((fpc != FP_NORMAL) || isneg_o)
- {
- if (fpc == FP_ZERO)
- {
- result = double_float_type(0);
- return;
- }
- else if ((fpc == FP_NAN) || isneg_o)
- {
- result = double_float_type::my_value_nan();
- return;
- }
- else if (fpc == FP_INFINITE)
- {
- result = double_float_type::my_value_inf();
- return;
- }
- }
- // TBD: Do we need any overflow/underflow guards when multiplying
- // by the split or when multiplying (hx * tx) and/or (hx * hx)?
- const local_float_type c { cpp_df_qf_detail::ccmath::sqrt(o.crep().first) };
- local_float_type p { cpp_df_qf_detail::split_maker<local_float_type>::value * c };
- const local_float_type hx { local_float_type { c - p } + p };
- const local_float_type tx { c - hx };
- local_float_type q = hx * tx;
- q = q + q;
- p = hx * hx;
- const local_float_type u { p + q };
- const local_float_type uu { cpp_df_qf_detail::ccmath::unsafe::fma(tx, tx, local_float_type { p - u } + q) };
- const local_float_type
- cc
- {
- local_float_type { local_float_type { o.crep().first - u } - uu + o.crep().second } / local_float_type { c + c }
- };
- result.rep().first = c + cc;
- result.rep().second = local_float_type { c - result.my_first() } + cc;
- }
- template <typename FloatingPointType>
- constexpr auto eval_pow(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& x, const cpp_double_fp_backend<FloatingPointType>& a) -> void
- {
- using double_float_type = cpp_double_fp_backend<FloatingPointType>;
- constexpr double_float_type zero { 0 };
- result = zero;
- signed long long val_nll { };
- eval_convert_to(&val_nll, a);
- const int na { static_cast<int>(val_nll) };
- const int fpc_a { eval_fpclassify(a) };
- if((fpc_a == FP_NORMAL) && (a.compare(double_float_type(na)) == 0))
- {
- eval_pow(result, x, na);
- }
- else
- {
- constexpr double_float_type one { 1 };
- const int fpc_x { eval_fpclassify(x) };
- if (fpc_a == FP_ZERO)
- {
- // pow(base, +/-0) returns 1 for any base, even when base is NaN.
- result = one;
- }
- else if (fpc_x == FP_ZERO)
- {
- if ((fpc_a == FP_NORMAL) || (fpc_a == FP_INFINITE))
- {
- // pow(+/-0, exp), where exp is negative and finite, returns +infinity.
- // pow(+/-0, exp), where exp is positive non-integer, returns +0.
- // pow(+/-0, -infinity) returns +infinity.
- // pow(+/-0, +infinity) returns +0.
- result = (eval_signbit(a) ? double_float_type::my_value_inf() : zero);
- }
- else if (fpc_a == FP_NAN)
- {
- result = double_float_type::my_value_nan();
- }
- }
- else if (fpc_x == FP_INFINITE)
- {
- if ((fpc_a == FP_NORMAL) || (fpc_a == FP_INFINITE))
- {
- // pow(+infinity, exp) returns +0 for any negative exp.
- // pow(-infinity, exp) returns +infinity for any positive exp.
- result = (eval_signbit(a) ? zero : double_float_type::my_value_inf());
- }
- else if (fpc_a == FP_NAN)
- {
- result = double_float_type::my_value_nan();
- }
- }
- else if (fpc_x != FP_NORMAL)
- {
- result = x;
- }
- else
- {
- if (fpc_a == FP_INFINITE)
- {
- constexpr double_float_type one_minus { -1 };
- if (x.compare(one_minus) == 0)
- {
- result = one;
- }
- else
- {
- double_float_type xabs { };
- eval_fabs(xabs, x);
- const int compare_one_result { xabs.compare(one) };
- result =
- (
- (compare_one_result < 0) ? (eval_signbit(a) ? double_float_type::my_value_inf() : zero)
- : (compare_one_result > 0) ? (eval_signbit(a) ? zero : double_float_type::my_value_inf())
- : one
- );
- }
- }
- else if (fpc_a == FP_NAN)
- {
- result = (x.compare(one) == 0) ? one : double_float_type::my_value_nan();
- }
- else
- {
- double_float_type log_x { };
- eval_log(log_x, x);
- double_float_type a_log_x { };
- eval_multiply(a_log_x, a, log_x);
- eval_exp(result, a_log_x);
- }
- }
- }
- }
- template <typename FloatingPointType,
- typename IntegralType>
- constexpr auto eval_pow(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& x, IntegralType p) -> typename ::std::enable_if<::boost::multiprecision::detail::is_integral<IntegralType>::value, void>::type
- {
- const int fpc { eval_fpclassify(x) };
- using double_float_type = cpp_double_fp_backend<FloatingPointType>;
- using local_integral_type = IntegralType;
- const bool p_is_odd { (static_cast<local_integral_type>(p & 1) != static_cast<local_integral_type>(0)) };
- if (p == static_cast<local_integral_type>(0))
- {
- // pow(base, +/-0) returns 1 for any base, even when base is NaN.
- result = double_float_type { unsigned { UINT8_C(1) } };
- }
- else if (fpc != FP_NORMAL)
- {
- if (fpc == FP_ZERO)
- {
- // pow( +0, exp), where exp is a negative odd integer, returns +infinity.
- // pow( -0, exp), where exp is a negative odd integer, returns +infinity.
- // pow(+/-0, exp), where exp is a negative even integer, returns +infinity.
- // pow( +0, exp), where exp is a positive odd integer, returns +0.
- // pow( -0, exp), where exp is a positive odd integer, returns -0.
- // pow(+/-0, exp), where exp is a positive even integer, returns +0.
- result = ((p < static_cast<local_integral_type>(0)) ? double_float_type::my_value_inf() : double_float_type { unsigned { UINT8_C(0) } });
- }
- else if (fpc == FP_INFINITE)
- {
- if (eval_signbit(x))
- {
- if (p < static_cast<local_integral_type>(0))
- {
- // pow(-infinity, exp) returns -0 if exp is a negative odd integer.
- // pow(-infinity, exp) returns +0 if exp is a negative even integer.
- result = double_float_type { unsigned { UINT8_C(0) } };
- }
- else
- {
- // pow(-infinity, exp) returns -infinity if exp is a positive odd integer.
- // pow(-infinity, exp) returns +infinity if exp is a positive even integer.
- result = (p_is_odd ? -double_float_type::my_value_inf() : double_float_type::my_value_inf());
- }
- }
- else
- {
- // pow(+infinity, exp) returns +0 for any negative exp.
- // pow(+infinity, exp) returns +infinity for any positive exp.
- result = ((p < static_cast<local_integral_type>(0)) ? double_float_type { unsigned { UINT8_C(0) } } : double_float_type::my_value_inf());
- }
- }
- else
- {
- result = double_float_type::my_value_nan();
- }
- }
- else
- {
- double_float_type::pown(result, x, p);
- }
- }
- template <typename FloatingPointType,
- typename ::std::enable_if<(cpp_df_qf_detail::is_floating_point<FloatingPointType>::value && ((cpp_df_qf_detail::ccmath::numeric_limits<FloatingPointType>::digits10 * 2) < 16))>::type const*>
- constexpr auto eval_exp(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& x) -> void
- {
- using double_float_type = cpp_double_fp_backend<FloatingPointType>;
- constexpr double_float_type one { 1 };
- const int fpc { eval_fpclassify(x) };
- const bool b_neg { x.isneg_unchecked() };
- if (fpc == FP_ZERO)
- {
- result = one;
- }
- else if (fpc != FP_NORMAL)
- {
- if (fpc == FP_INFINITE)
- {
- result = (b_neg ? double_float_type { 0.0F } : double_float_type::my_value_inf());
- }
- else if (fpc == FP_NAN)
- {
- result = x;
- }
- }
- else
- {
- using local_float_type = typename double_float_type::float_type;
- // Get a local copy of the argument and force it to be positive.
- const double_float_type xx { (!b_neg) ? x : -x };
- // Check the range of the input.
- if (eval_lt(x, double_float_type::my_value_logmin()))
- {
- result = double_float_type(0U);
- }
- else if (eval_gt(xx, double_float_type::my_value_logmax()))
- {
- result = double_float_type::my_value_inf();
- }
- else if (xx.is_one())
- {
- if(!b_neg)
- {
- result = cpp_df_qf_detail::constant_df_exp1<local_float_type>();
- }
- else
- {
- eval_divide(result, one, cpp_df_qf_detail::constant_df_exp1<local_float_type>());
- }
- }
- else
- {
- // Use an argument reduction algorithm for exp() in classic MPFUN-style.
- double_float_type nf { };
- // Prepare the scaled variables.
- const bool b_scale { (xx.order02() > -1) };
- double_float_type r { };
- if (b_scale)
- {
- eval_floor(nf, xx / cpp_df_qf_detail::constant_df_ln_two<local_float_type>());
- eval_ldexp(r, xx - (nf * cpp_df_qf_detail::constant_df_ln_two<local_float_type>()), -2);
- }
- else
- {
- r = xx;
- }
- // PadeApproximant[Exp[x] - 1, {x, 0, {6, 6}}]
- // FullSimplify[%]
- // (84 x (7920 + 240 x^2 + x^4))
- // / (665280 + x (-332640 + x (75600 + x (-10080 + x (840 + (-42 + x) x)))))
- constexpr double_float_type n84(84);
- constexpr double_float_type n240(240);
- constexpr double_float_type n7920(7920);
- constexpr double_float_type n665280(665280);
- constexpr double_float_type n332640(332640);
- constexpr double_float_type n75600(75600);
- constexpr double_float_type n10080(10080);
- constexpr double_float_type n840(840);
- constexpr double_float_type n42(42);
- const double_float_type r2 { r * r };
- // Use the small-argument Pade approximation having coefficients shown above.
- result = (n84 * r * (n7920 + (n240 + r2) * r2));
- const double_float_type bot = (n665280 + r * (-n332640 + r * (n75600 + r * (-n10080 + r * (n840 + (-n42 + r) * r)))));
- eval_divide(result, bot);
- result.add_unchecked_limb(local_float_type { 1.0F });
- // Rescale the result.
- if (b_scale)
- {
- result *= result;
- result *= result;
- signed long long lln { };
- eval_convert_to(&lln, nf);
- const int n { static_cast<int>(lln) };
- if (n > 0)
- {
- eval_ldexp(result, result, n);
- }
- }
- if (b_neg)
- {
- eval_divide(result, one, result);
- }
- }
- }
- }
- template <typename FloatingPointType,
- typename ::std::enable_if<(cpp_df_qf_detail::is_floating_point<FloatingPointType>::value && (((cpp_df_qf_detail::ccmath::numeric_limits<FloatingPointType>::digits10 * 2) >= 16) && ((cpp_df_qf_detail::ccmath::numeric_limits<FloatingPointType>::digits10 * 2) <= 36)))>::type const*>
- constexpr auto eval_exp(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& x) -> void
- {
- using double_float_type = cpp_double_fp_backend<FloatingPointType>;
- constexpr double_float_type one { 1 };
- const int fpc { eval_fpclassify(x) };
- const bool b_neg { x.isneg_unchecked() };
- if (fpc == FP_ZERO)
- {
- result = one;
- }
- else if (fpc != FP_NORMAL)
- {
- if (fpc == FP_INFINITE)
- {
- result = (b_neg ? double_float_type(0) : double_float_type::my_value_inf());
- }
- else if (fpc == FP_NAN)
- {
- result = x;
- }
- }
- else
- {
- using local_float_type = typename double_float_type::float_type;
- // Get a local copy of the argument and force it to be positive.
- const double_float_type xx { (!b_neg) ? x : -x };
- // Check the range of the input.
- if (eval_lt(x, double_float_type::my_value_logmin()))
- {
- result = double_float_type(0U);
- }
- else if (eval_gt(xx, double_float_type::my_value_logmax()))
- {
- result = double_float_type::my_value_inf();
- }
- else if (xx.is_one())
- {
- if(!b_neg)
- {
- result = cpp_df_qf_detail::constant_df_exp1<local_float_type>();
- }
- else
- {
- eval_divide(result, one, cpp_df_qf_detail::constant_df_exp1<local_float_type>());
- }
- }
- else
- {
- // Use an argument reduction algorithm for exp() in classic MPFUN-style.
- double_float_type nf { };
- // Prepare the scaled variables.
- const bool b_scale { (xx.order02() > -4) };
- double_float_type r { };
- if (b_scale)
- {
- eval_floor(nf, xx / cpp_df_qf_detail::constant_df_ln_two<local_float_type>());
- eval_ldexp(r, xx - (nf * cpp_df_qf_detail::constant_df_ln_two<local_float_type>()), -4);
- }
- else
- {
- r = xx;
- }
- // PadeApproximant[Exp[r], {r, 0, 8, 8}]
- // FullSimplify[%]
- constexpr double_float_type n144(144U);
- constexpr double_float_type n3603600(3603600UL);
- constexpr double_float_type n120120(120120UL);
- constexpr double_float_type n770(770U);
- constexpr double_float_type n518918400(518918400UL);
- constexpr double_float_type n259459200(259459200UL);
- constexpr double_float_type n60540480(60540480UL);
- constexpr double_float_type n8648640(8648640UL);
- constexpr double_float_type n831600(831600UL);
- constexpr double_float_type n55440(55440U);
- constexpr double_float_type n2520(2520U);
- constexpr double_float_type n72(72U);
- const double_float_type r2 { r * r };
- result = (n144 * r) * (n3603600 + r2 * (n120120 + r2 * (n770 + r2)));
- const double_float_type bot = (n518918400 + r * (-n259459200 + r * (n60540480 + r * (-n8648640 + r * (n831600 + r * (-n55440 + r * (n2520 + r * (-n72 + r))))))));
- eval_divide(result, bot);
- result.add_unchecked_limb(local_float_type { 1.0F });
- // Rescale the result.
- if (b_scale)
- {
- result *= result;
- result *= result;
- result *= result;
- result *= result;
- signed long long lln { };
- eval_convert_to(&lln, nf);
- const int n { static_cast<int>(lln) };
- if (n > 0)
- {
- eval_ldexp(result, result, n);
- }
- }
- if (b_neg)
- {
- eval_divide(result, one, result);
- }
- }
- }
- }
- template <typename FloatingPointType,
- typename ::std::enable_if<(cpp_df_qf_detail::is_floating_point<FloatingPointType>::value && ((cpp_df_qf_detail::ccmath::numeric_limits<FloatingPointType>::digits10 * 2) > 36))>::type const*>
- constexpr auto eval_exp(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& x) -> void
- {
- using double_float_type = cpp_double_fp_backend<FloatingPointType>;
- constexpr double_float_type one { 1 };
- const int fpc { eval_fpclassify(x) };
- const bool b_neg { x.isneg_unchecked() };
- if (fpc == FP_ZERO)
- {
- result = one;
- }
- else if (fpc != FP_NORMAL)
- {
- if (fpc == FP_INFINITE)
- {
- result = (b_neg ? double_float_type(0) : double_float_type::my_value_inf());
- }
- else if (fpc == FP_NAN)
- {
- result = x;
- }
- }
- else
- {
- using local_float_type = typename double_float_type::float_type;
- // Get a local copy of the argument and force it to be positive.
- const double_float_type xx { (!b_neg) ? x : -x };
- // Check the range of the input.
- if (eval_lt(x, double_float_type::my_value_logmin()))
- {
- result = double_float_type(0U);
- }
- else if (eval_gt(xx, double_float_type::my_value_logmax()))
- {
- result = double_float_type::my_value_inf();
- }
- else if (xx.is_one())
- {
- if(!b_neg)
- {
- result = cpp_df_qf_detail::constant_df_exp1<local_float_type>();
- }
- else
- {
- eval_divide(result, one, cpp_df_qf_detail::constant_df_exp1<local_float_type>());
- }
- }
- else
- {
- // Use an argument reduction algorithm for exp() in classic MPFUN-style.
- double_float_type nf { };
- // Prepare the scaled variables.
- const bool b_scale { (xx.order02() > -4) };
- double_float_type xh { };
- if (b_scale)
- {
- eval_floor(nf, xx / cpp_df_qf_detail::constant_df_ln_two<local_float_type>());
- eval_ldexp(xh, xx - (nf * cpp_df_qf_detail::constant_df_ln_two<local_float_type>()), -4);
- }
- else
- {
- xh = xx;
- }
- double_float_type x_pow_n_div_n_fact(xh);
- result = x_pow_n_div_n_fact;
- result.add_unchecked_limb(local_float_type { 1.0F });
- double_float_type dummy { };
- // Use the Taylor series expansion of hypergeometric_0f0(; ; x).
- // For this high(er) digit count, a scaled argument with subsequent
- // Taylor series expansion is actually more precise than Pade approximation.
- for (unsigned n { UINT8_C(2) }; n < unsigned { UINT8_C(64) }; ++n)
- {
- eval_multiply(x_pow_n_div_n_fact, xh);
- eval_divide(x_pow_n_div_n_fact, double_float_type(n));
- int n_tol { };
- eval_frexp(dummy, x_pow_n_div_n_fact, &n_tol);
- if ((n > 4U) && (n_tol < -(double_float_type::my_digits - 1)))
- {
- break;
- }
- eval_add(result, x_pow_n_div_n_fact);
- }
- // Rescale the result.
- if (b_scale)
- {
- result *= result;
- result *= result;
- result *= result;
- result *= result;
- signed long long lln { };
- eval_convert_to(&lln, nf);
- const int n { static_cast<int>(lln) };
- if (n > 0)
- {
- eval_ldexp(result, result, n);
- }
- }
- if (b_neg)
- {
- eval_divide(result, one, result);
- }
- }
- }
- }
- template <typename FloatingPointType>
- constexpr auto eval_log(cpp_double_fp_backend<FloatingPointType>& result, const cpp_double_fp_backend<FloatingPointType>& x) -> void
- {
- using double_float_type = cpp_double_fp_backend<FloatingPointType>;
- constexpr double_float_type one { 1 };
- const int result_compare_with_one { x.compare(one) };
- const int fpc { eval_fpclassify(x) };
- if (fpc == FP_ZERO)
- {
- result = -double_float_type::my_value_inf();
- }
- else if (x.isneg_unchecked() || (fpc == FP_NAN))
- {
- result = double_float_type::my_value_nan();
- }
- else if (fpc == FP_INFINITE)
- {
- result = double_float_type::my_value_inf();
- }
- else if (result_compare_with_one == -1)
- {
- // Use argument inversion and negation of the result.
- double_float_type x_inv { };
- eval_divide(x_inv, one, x);
- eval_log(result, x_inv);
- result.negate();
- }
- else if (result_compare_with_one == 1)
- {
- // Optimize to only use eval_frexp if (and only if) the exponent is
- // actually large/small in the sense of above/below a defined cutoff.
- double_float_type x2 { };
- int n2 { };
- eval_frexp(x2, x, &n2);
- // Get initial estimate using the self-written, detail math function log.
- using local_float_type = typename double_float_type::float_type;
- const local_float_type s { cpp_df_qf_detail::ccmath::log(x2.my_first()) };
- double_float_type E { };
- eval_exp(E, double_float_type(s));
- // Perform one single step of Newton-Raphson iteration.
- // result = s + (x2 - E) / E;
- eval_subtract(result, x2, E);
- eval_divide(result, E);
- result.add_unchecked_limb(s);
- double_float_type xn2 { n2 };
- eval_multiply(xn2, cpp_df_qf_detail::constant_df_ln_two<local_float_type>());
- eval_add(result, xn2);
- }
- else
- {
- result = double_float_type { 0 };
- }
- }
- namespace detail {
- template<typename DestType, typename FloatingPointType>
- constexpr auto extract(cpp_double_fp_backend<FloatingPointType>& source) -> DestType
- {
- using double_float_type = cpp_double_fp_backend<FloatingPointType>;
- using local_float_type = typename double_float_type::float_type;
- using destination_type = DestType;
- destination_type result { };
- constexpr double_float_type zero { 0 };
- result = static_cast<destination_type>(0);
- unsigned fail_safe { UINT32_C(32) };
- constexpr bool
- destination_type_is_longer
- {
- (std::numeric_limits<signed long long>::digits > cpp_df_qf_detail::ccmath::numeric_limits<local_float_type>::digits)
- };
- using float_extract_type = typename std::conditional<destination_type_is_longer, local_float_type, float>::type;
- while((source.compare(zero) != 0) && (fail_safe > unsigned { UINT8_C(0) }))
- {
- const float_extract_type next_flt_val { static_cast<float_extract_type>(source.my_first()) };
- result += static_cast<destination_type>(next_flt_val);
- eval_subtract(source, double_float_type(next_flt_val));
- --fail_safe;
- }
- return result;
- }
- } // namespace detail
- template <typename FloatingPointType>
- constexpr auto eval_convert_to(signed long long* result, const cpp_double_fp_backend<FloatingPointType>& backend) -> void
- {
- const auto fpc = eval_fpclassify(backend);
- if (fpc != FP_NORMAL)
- {
- *result = static_cast<signed long long>(backend.crep().first);
- }
- else
- {
- using double_float_type = cpp_double_fp_backend<FloatingPointType>;
- using local_float_type = typename double_float_type::float_type;
- static_assert(std::is_same<local_float_type, FloatingPointType>::value, "Error something went wrong with the limb type");
- constexpr bool
- long_long_is_longer
- {
- (std::numeric_limits<signed long long>::digits > cpp_df_qf_detail::ccmath::numeric_limits<local_float_type>::digits)
- };
- using longer_type = typename ::std::conditional<long_long_is_longer, signed long long, double_float_type>::type;
- constexpr longer_type my_max_val { static_cast<longer_type>((std::numeric_limits<signed long long>::max)()) };
- constexpr longer_type my_min_val { static_cast<longer_type>((std::numeric_limits<signed long long>::min)()) };
- constexpr double_float_type my_max_val_dd { static_cast<double_float_type>(my_max_val) };
- constexpr double_float_type my_min_val_dd { static_cast<double_float_type>(my_min_val) };
- if (backend.compare(my_max_val_dd) >= 0)
- {
- *result = (std::numeric_limits<signed long long>::max)();
- }
- else if (backend.compare(my_min_val_dd) <= 0)
- {
- *result = (std::numeric_limits<signed long long>::min)();
- }
- else
- {
- double_float_type source { backend };
- *result = detail::extract<signed long long>(source);
- #if !defined(__x86_64__) && !defined(_M_X64)
- // It has been "empirically found" that non-X64 needs this workaround.
- // Even though the same conditions are met for x86_64 on GCC and MSVC,
- // this workaround will actually break the long long conversion tests
- // on those platforms.
- //
- // Our assumption is that on x64 there is x87 math (double -> long double)
- // being performed in the background. Seemingly these might "aid" the conversion
- // of double value to long long. Somehow I get the feeling this issue will arise
- // in future evolution of the cpp_double_fp_backend.
- //
- // This workaround has been tested on: ARM64 (linux and mac), s390x and PPC64LE.
- constexpr bool
- needs_workaround
- {
- (sizeof(signed long long) == 8U)
- && (std::is_same<local_float_type, double>::value || (std::is_same<local_float_type, long double>::value))
- };
- BOOST_IF_CONSTEXPR (needs_workaround)
- {
- // This is the last value stored in a double as 9223372036854775808
- constexpr signed long long upper_bound = 9223372036854775296LL;
- if (!eval_signbit(backend) && *result >= upper_bound)
- {
- // LONG_MAX is stored with .second = -1, so we compensate for the offset
- // We also only need this at the upper end where the values aren't exactly
- // representable in double. Below a certain point we are fine
- *result += static_cast<signed long long>(1);
- }
- }
- #endif // non-x64
- }
- }
- }
- template <typename FloatingPointType>
- constexpr auto eval_convert_to(unsigned long long* result, const cpp_double_fp_backend<FloatingPointType>& backend) -> void
- {
- const auto fpc = eval_fpclassify(backend);
- if (fpc != FP_NORMAL)
- {
- *result = static_cast<unsigned long long>(backend.crep().first);
- }
- else
- {
- using double_float_type = cpp_double_fp_backend<FloatingPointType>;
- using local_float_type = typename double_float_type::float_type;
- static_assert(std::is_same<local_float_type, FloatingPointType>::value, "Error something went wrong with the limb type");
- constexpr bool
- ulong_long_is_longer
- {
- (std::numeric_limits<unsigned long long>::digits > cpp_df_qf_detail::ccmath::numeric_limits<local_float_type>::digits)
- };
- using longer_type = typename ::std::conditional<ulong_long_is_longer, unsigned long long, double_float_type>::type;
- constexpr longer_type my_max_val { static_cast<longer_type>((std::numeric_limits<unsigned long long>::max)()) };
- constexpr double_float_type my_max_val_dd { static_cast<double_float_type>(my_max_val) };
- if (backend.compare(my_max_val_dd) >= 0)
- {
- *result = (std::numeric_limits<unsigned long long>::max)();
- }
- else
- {
- double_float_type source { backend };
- *result = detail::extract<unsigned long long>(source);
- }
- }
- }
- #ifdef BOOST_HAS_INT128
- template <typename FloatingPointType>
- constexpr auto eval_convert_to(boost::int128_type* result, const cpp_double_fp_backend<FloatingPointType>& backend) -> typename std::enable_if<(cpp_df_qf_detail::ccmath::numeric_limits<FloatingPointType>::digits > 24), void>::type
- {
- const auto fpc = eval_fpclassify(backend);
- if (fpc != FP_NORMAL)
- {
- *result = static_cast<boost::int128_type>(backend.crep().first);
- }
- else
- {
- using double_float_type = cpp_double_fp_backend<FloatingPointType>;
- using local_float_type = typename double_float_type::float_type;
- static_assert(std::is_same<local_float_type, FloatingPointType>::value, "Error something went wrong with the limb type");
- constexpr bool
- n128_is_longer
- {
- ((static_cast<int>(sizeof(boost::int128_type) * static_cast<std::size_t>(CHAR_BIT)) - 1) > cpp_df_qf_detail::ccmath::numeric_limits<local_float_type>::digits)
- };
- using longer_type = typename ::std::conditional<n128_is_longer, boost::int128_type, double_float_type>::type;
- constexpr boost::int128_type my_max_val_n128 = (((static_cast<boost::int128_type>(1) << (sizeof(boost::int128_type) * CHAR_BIT - 2)) - 1) << 1) + 1;
- constexpr boost::int128_type my_min_val_n128 = static_cast<boost::int128_type>(-my_max_val_n128 - 1);
- constexpr longer_type my_max_val(static_cast<longer_type>(my_max_val_n128));
- constexpr longer_type my_min_val(static_cast<longer_type>(my_min_val_n128));
- constexpr double_float_type my_max_val_dd(static_cast<double_float_type>(my_max_val));
- constexpr double_float_type my_min_val_dd(static_cast<double_float_type>(my_min_val));
- if (backend.compare(my_max_val_dd) >= 0)
- {
- *result = my_max_val;
- }
- else if (backend.compare(my_min_val_dd) <= 0)
- {
- *result = my_min_val;
- }
- else
- {
- double_float_type source { backend };
- *result = detail::extract<boost::int128_type>(source);
- }
- }
- }
- template <typename FloatingPointType>
- constexpr auto eval_convert_to(boost::int128_type* result, const cpp_double_fp_backend<FloatingPointType>& backend) -> typename std::enable_if<!(cpp_df_qf_detail::ccmath::numeric_limits<FloatingPointType>::digits > 24), void>::type
- {
- const auto fpc = eval_fpclassify(backend);
- if (fpc != FP_NORMAL)
- {
- *result = static_cast<boost::int128_type>(backend.crep().first);
- }
- else
- {
- using double_float_type = cpp_double_fp_backend<FloatingPointType>;
- double_float_type source { backend };
- *result = detail::extract<boost::int128_type>(source);
- }
- }
- template <typename FloatingPointType>
- constexpr auto eval_convert_to(boost::uint128_type* result, const cpp_double_fp_backend<FloatingPointType>& backend) -> typename std::enable_if<(cpp_df_qf_detail::ccmath::numeric_limits<FloatingPointType>::digits > 24), void>::type
- {
- const auto fpc = eval_fpclassify(backend);
- if (fpc != FP_NORMAL)
- {
- *result = static_cast<boost::int128_type>(backend.crep().first);
- }
- else
- {
- using double_float_type = cpp_double_fp_backend<FloatingPointType>;
- using local_float_type = typename double_float_type::float_type;
- static_assert(std::is_same<local_float_type, FloatingPointType>::value, "Error something went wrong with the limb type");
- constexpr bool
- u128_is_longer
- {
- (static_cast<int>(sizeof(boost::uint128_type) * static_cast<std::size_t>(CHAR_BIT)) > cpp_df_qf_detail::ccmath::numeric_limits<local_float_type>::digits)
- };
- using longer_type = typename ::std::conditional<u128_is_longer, boost::uint128_type, double_float_type>::type;
- constexpr boost::uint128_type my_max_val_u128 = static_cast<boost::uint128_type>(~static_cast<boost::uint128_type>(0));
- constexpr longer_type my_max_val(static_cast<longer_type>(my_max_val_u128));
- constexpr double_float_type my_max_val_dd(static_cast<double_float_type>(my_max_val));
- if (backend.compare(my_max_val_dd) >= 0)
- {
- *result = my_max_val;
- }
- else
- {
- double_float_type source { backend };
- *result = detail::extract<boost::uint128_type>(source);
- }
- }
- }
- template <typename FloatingPointType>
- constexpr auto eval_convert_to(boost::uint128_type* result, const cpp_double_fp_backend<FloatingPointType>& backend) -> typename std::enable_if<!(cpp_df_qf_detail::ccmath::numeric_limits<FloatingPointType>::digits > 24), void>::type
- {
- const auto fpc = eval_fpclassify(backend);
- if (fpc != FP_NORMAL)
- {
- *result = static_cast<boost::uint128_type>(backend.crep().first);
- }
- else
- {
- using double_float_type = cpp_double_fp_backend<FloatingPointType>;
- double_float_type source { backend };
- *result = detail::extract<boost::uint128_type>(source);
- }
- }
- #endif
- template <typename FloatingPointType,
- typename OtherFloatingPointType>
- constexpr auto eval_convert_to(OtherFloatingPointType* result, const cpp_double_fp_backend<FloatingPointType>& backend) -> typename ::std::enable_if<cpp_df_qf_detail::is_floating_point<OtherFloatingPointType>::value>::type
- {
- const auto fpc = eval_fpclassify(backend);
- // TBD: Implement min/max check for the destination floating-point type result.
- if (fpc != FP_NORMAL)
- {
- *result = static_cast<OtherFloatingPointType>(backend.my_first());
- }
- else
- {
- BOOST_IF_CONSTEXPR(cpp_df_qf_detail::ccmath::numeric_limits<OtherFloatingPointType>::digits > cpp_df_qf_detail::ccmath::numeric_limits<FloatingPointType>::digits)
- {
- *result = static_cast<OtherFloatingPointType>(backend.my_first());
- *result += static_cast<OtherFloatingPointType>(backend.my_second());
- }
- else
- {
- cpp_double_fp_backend<FloatingPointType> source = backend;
- *result = 0;
- for(auto digit_count = static_cast<int>(0);
- digit_count < cpp_double_fp_backend<FloatingPointType>::my_digits;
- digit_count += cpp_df_qf_detail::ccmath::numeric_limits<OtherFloatingPointType>::digits)
- {
- const auto next = static_cast<OtherFloatingPointType>(source.my_first());
- *result += next;
- eval_subtract(source, cpp_double_fp_backend<FloatingPointType>(next));
- }
- }
- }
- }
- template <typename FloatingPointType>
- constexpr auto hash_value(const cpp_double_fp_backend<FloatingPointType>& a) -> ::std::size_t
- {
- return a.hash();
- }
- } // namespace backends
- using backends::cpp_double_fp_backend;
- using cpp_double_float = number<cpp_double_fp_backend<float>, ::boost::multiprecision::et_off>;
- using cpp_double_double = number<cpp_double_fp_backend<double>, ::boost::multiprecision::et_off>;
- using cpp_double_long_double = number<cpp_double_fp_backend<long double>, ::boost::multiprecision::et_off>;
- #ifdef BOOST_MP_CPP_DOUBLE_FP_HAS_FLOAT128
- using cpp_double_float128 = number<cpp_double_fp_backend<::boost::float128_type>, ::boost::multiprecision::et_off>;
- #endif
- } } // namespace boost::multiprecision
- namespace std {
- // Specialization of numeric_limits for boost::multiprecision::number<cpp_double_fp_backend<>>
- template <typename FloatingPointType,
- const boost::multiprecision::expression_template_option ExpressionTemplatesOption>
- BOOST_MP_DF_QF_NUM_LIMITS_CLASS_TYPE numeric_limits<boost::multiprecision::number<boost::multiprecision::cpp_double_fp_backend<FloatingPointType>, ExpressionTemplatesOption> >
- {
- private:
- using local_float_type = FloatingPointType;
- using inner_self_type = boost::multiprecision::cpp_double_fp_backend<local_float_type>;
- using self_type = boost::multiprecision::number<inner_self_type, ExpressionTemplatesOption>;
- public:
- static constexpr bool is_specialized = true;
- static constexpr bool is_signed = true;
- static constexpr bool is_integer = false;
- static constexpr bool is_exact = false;
- static constexpr bool is_bounded = true;
- static constexpr bool is_modulo = false;
- static constexpr bool is_iec559 = false;
- static constexpr bool has_infinity = true;
- static constexpr bool has_quiet_NaN = true;
- static constexpr bool has_signaling_NaN = false;
- // These members were deprecated in C++23, but only MSVC warns (as of June 25)
- #ifdef BOOST_MSVC
- #pragma warning(push)
- #pragma warning(disable:4996)
- #endif
- static constexpr float_denorm_style has_denorm = denorm_absent;
- static constexpr bool has_denorm_loss = true;
- #ifdef BOOST_MSVC
- #pragma warning(pop)
- #endif // deprecated members
- static constexpr bool traps = false;
- static constexpr bool tinyness_before = false;
- static constexpr float_round_style round_style = round_to_nearest;
- static constexpr int radix = 2;
- static constexpr int digits = inner_self_type::my_digits;
- static constexpr int digits10 = inner_self_type::my_digits10;
- static constexpr int max_digits10 = inner_self_type::my_max_digits10;
- static constexpr int max_exponent = inner_self_type::my_max_exponent;
- static constexpr int min_exponent = inner_self_type::my_min_exponent;
- static constexpr int max_exponent10 = inner_self_type::my_max_exponent10;
- static constexpr int min_exponent10 = inner_self_type::my_min_exponent10;
- static constexpr auto(min) () noexcept -> self_type { return static_cast<self_type>(inner_self_type::my_value_min()); }
- static constexpr auto(max) () noexcept -> self_type { return static_cast<self_type>(inner_self_type::my_value_max()); }
- static constexpr auto lowest () noexcept -> self_type { return static_cast<self_type>(-(max)()); }
- static constexpr auto epsilon () noexcept -> self_type { return static_cast<self_type>(inner_self_type::my_value_eps()); }
- static constexpr auto round_error () noexcept -> self_type { return static_cast<self_type>(static_cast<local_float_type>(0.5)); }
- static constexpr auto denorm_min () noexcept -> self_type { return static_cast<self_type>((min)()); }
- static constexpr auto infinity () noexcept -> self_type { return static_cast<self_type>(inner_self_type::my_value_inf()); }
- static constexpr auto quiet_NaN () noexcept -> self_type { return static_cast<self_type>(inner_self_type::my_value_nan()); }
- static constexpr auto signaling_NaN() noexcept -> self_type { return static_cast<self_type>(static_cast<local_float_type>(0.0)); }
- };
- } // namespace std
- template <typename FloatingPointType, const boost::multiprecision::expression_template_option ExpressionTemplatesOption>
- constexpr bool std::numeric_limits<boost::multiprecision::number<boost::multiprecision::cpp_double_fp_backend<FloatingPointType>, ExpressionTemplatesOption> >::is_specialized;
- template <typename FloatingPointType, const boost::multiprecision::expression_template_option ExpressionTemplatesOption>
- constexpr bool std::numeric_limits<boost::multiprecision::number<boost::multiprecision::cpp_double_fp_backend<FloatingPointType>, ExpressionTemplatesOption> >::is_signed;
- template <typename FloatingPointType, const boost::multiprecision::expression_template_option ExpressionTemplatesOption>
- constexpr bool std::numeric_limits<boost::multiprecision::number<boost::multiprecision::cpp_double_fp_backend<FloatingPointType>, ExpressionTemplatesOption> >::is_integer;
- template <typename FloatingPointType, const boost::multiprecision::expression_template_option ExpressionTemplatesOption>
- constexpr bool std::numeric_limits<boost::multiprecision::number<boost::multiprecision::cpp_double_fp_backend<FloatingPointType>, ExpressionTemplatesOption> >::is_exact;
- template <typename FloatingPointType, const boost::multiprecision::expression_template_option ExpressionTemplatesOption>
- constexpr bool std::numeric_limits<boost::multiprecision::number<boost::multiprecision::cpp_double_fp_backend<FloatingPointType>, ExpressionTemplatesOption> >::is_bounded;
- template <typename FloatingPointType, const boost::multiprecision::expression_template_option ExpressionTemplatesOption>
- constexpr bool std::numeric_limits<boost::multiprecision::number<boost::multiprecision::cpp_double_fp_backend<FloatingPointType>, ExpressionTemplatesOption> >::is_modulo;
- template <typename FloatingPointType, const boost::multiprecision::expression_template_option ExpressionTemplatesOption>
- constexpr bool std::numeric_limits<boost::multiprecision::number<boost::multiprecision::cpp_double_fp_backend<FloatingPointType>, ExpressionTemplatesOption> >::is_iec559;
- #ifdef BOOST_MSVC
- #pragma warning(push)
- #pragma warning(disable:4996)
- #endif
- template <typename FloatingPointType, const boost::multiprecision::expression_template_option ExpressionTemplatesOption>
- constexpr std::float_denorm_style std::numeric_limits<boost::multiprecision::number<boost::multiprecision::cpp_double_fp_backend<FloatingPointType>, ExpressionTemplatesOption> >::has_denorm;
- template <typename FloatingPointType, const boost::multiprecision::expression_template_option ExpressionTemplatesOption>
- constexpr bool std::numeric_limits<boost::multiprecision::number<boost::multiprecision::cpp_double_fp_backend<FloatingPointType>, ExpressionTemplatesOption> >::has_denorm_loss;
- #ifdef BOOST_MSVC
- #pragma warning(pop)
- #endif // deprecated members
- template <typename FloatingPointType, const boost::multiprecision::expression_template_option ExpressionTemplatesOption>
- constexpr bool std::numeric_limits<boost::multiprecision::number<boost::multiprecision::cpp_double_fp_backend<FloatingPointType>, ExpressionTemplatesOption> >::has_infinity;
- template <typename FloatingPointType, const boost::multiprecision::expression_template_option ExpressionTemplatesOption>
- constexpr bool std::numeric_limits<boost::multiprecision::number<boost::multiprecision::cpp_double_fp_backend<FloatingPointType>, ExpressionTemplatesOption> >::has_quiet_NaN;
- template <typename FloatingPointType, const boost::multiprecision::expression_template_option ExpressionTemplatesOption>
- constexpr bool std::numeric_limits<boost::multiprecision::number<boost::multiprecision::cpp_double_fp_backend<FloatingPointType>, ExpressionTemplatesOption> >::has_signaling_NaN;
- template <typename FloatingPointType, const boost::multiprecision::expression_template_option ExpressionTemplatesOption>
- constexpr bool std::numeric_limits<boost::multiprecision::number<boost::multiprecision::cpp_double_fp_backend<FloatingPointType>, ExpressionTemplatesOption> >::traps;
- template <typename FloatingPointType, const boost::multiprecision::expression_template_option ExpressionTemplatesOption>
- constexpr bool std::numeric_limits<boost::multiprecision::number<boost::multiprecision::cpp_double_fp_backend<FloatingPointType>, ExpressionTemplatesOption> >::tinyness_before;
- template <typename FloatingPointType, const boost::multiprecision::expression_template_option ExpressionTemplatesOption>
- constexpr std::float_round_style std::numeric_limits<boost::multiprecision::number<boost::multiprecision::cpp_double_fp_backend<FloatingPointType>, ExpressionTemplatesOption> >::round_style;
- template <typename FloatingPointType, const boost::multiprecision::expression_template_option ExpressionTemplatesOption>
- constexpr int std::numeric_limits<boost::multiprecision::number<boost::multiprecision::cpp_double_fp_backend<FloatingPointType>, ExpressionTemplatesOption> >::radix;
- template <typename FloatingPointType, const boost::multiprecision::expression_template_option ExpressionTemplatesOption>
- constexpr int std::numeric_limits<boost::multiprecision::number<boost::multiprecision::cpp_double_fp_backend<FloatingPointType>, ExpressionTemplatesOption> >::digits;
- template <typename FloatingPointType, const boost::multiprecision::expression_template_option ExpressionTemplatesOption>
- constexpr int std::numeric_limits<boost::multiprecision::number<boost::multiprecision::cpp_double_fp_backend<FloatingPointType>, ExpressionTemplatesOption> >::digits10;
- template <typename FloatingPointType, const boost::multiprecision::expression_template_option ExpressionTemplatesOption>
- constexpr int std::numeric_limits<boost::multiprecision::number<boost::multiprecision::cpp_double_fp_backend<FloatingPointType>, ExpressionTemplatesOption> >::max_digits10;
- template <typename FloatingPointType, const boost::multiprecision::expression_template_option ExpressionTemplatesOption>
- constexpr int std::numeric_limits<boost::multiprecision::number<boost::multiprecision::cpp_double_fp_backend<FloatingPointType>, ExpressionTemplatesOption> >::max_exponent;
- template <typename FloatingPointType, const boost::multiprecision::expression_template_option ExpressionTemplatesOption>
- constexpr int std::numeric_limits<boost::multiprecision::number<boost::multiprecision::cpp_double_fp_backend<FloatingPointType>, ExpressionTemplatesOption> >::min_exponent;
- template <typename FloatingPointType, const boost::multiprecision::expression_template_option ExpressionTemplatesOption>
- constexpr int std::numeric_limits<boost::multiprecision::number<boost::multiprecision::cpp_double_fp_backend<FloatingPointType>, ExpressionTemplatesOption> >::max_exponent10;
- template <typename FloatingPointType, const boost::multiprecision::expression_template_option ExpressionTemplatesOption>
- constexpr int std::numeric_limits<boost::multiprecision::number<boost::multiprecision::cpp_double_fp_backend<FloatingPointType>, ExpressionTemplatesOption> >::min_exponent10;
- #if defined(BOOST_MP_MATH_AVAILABLE)
- namespace boost { namespace math { namespace policies {
- template <class FloatingPointType, class Policy, boost::multiprecision::expression_template_option ExpressionTemplates>
- struct precision<boost::multiprecision::number<boost::multiprecision::cpp_double_fp_backend<FloatingPointType>, ExpressionTemplates>, Policy>
- {
- private:
- using my_multiprecision_backend_type = boost::multiprecision::cpp_double_fp_backend<FloatingPointType>;
- using digits2_type = digits2<my_multiprecision_backend_type::my_digits>;
- static constexpr auto use_full_precision() noexcept -> bool
- {
- return ((digits2_type::value <= precision_type::value) || (precision_type::value <= 0));
- }
- public:
- using precision_type = typename Policy::precision_type;
- using type =
- typename std::conditional<use_full_precision(),
- digits2_type, // This is the default case: Use full precision for FloatingPointType.
- precision_type>::type; // Here we find (and use) user-customized precision.
- };
- } } } // namespace boost::math::policies
- #endif
- #endif // BOOST_MP_CPP_DOUBLE_FP_2021_06_05_HPP
|