llvmMathExtras.h 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900
  1. //===-- llvm/Support/MathExtras.h - Useful math functions -------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file contains some functions that are useful for math stuff.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #pragma once
  13. #include <algorithm>
  14. #include <cassert>
  15. #include <climits>
  16. #include <cmath>
  17. #include <cstdint>
  18. #include <cstring>
  19. #include <limits>
  20. #include <type_traits>
  21. #ifdef __ANDROID_NDK__
  22. #include <android/api-level.h>
  23. #endif
  24. #ifndef __has_builtin
  25. #define __has_builtin(x) 0
  26. #endif
  27. #ifndef LLVM_GNUC_PREREQ
  28. #if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
  29. #define LLVM_GNUC_PREREQ(maj, min, patch) \
  30. ((__GNUC__ << 20) + (__GNUC_MINOR__ << 10) + __GNUC_PATCHLEVEL__ >= \
  31. ((maj) << 20) + ((min) << 10) + (patch))
  32. #elif defined(__GNUC__) && defined(__GNUC_MINOR__)
  33. #define LLVM_GNUC_PREREQ(maj, min, patch) \
  34. ((__GNUC__ << 20) + (__GNUC_MINOR__ << 10) >= ((maj) << 20) + ((min) << 10))
  35. #else
  36. #define LLVM_GNUC_PREREQ(maj, min, patch) 0
  37. #endif
  38. #endif
  39. #ifdef _MSC_VER
  40. // Declare these intrinsics manually rather including intrin.h. It's very
  41. // expensive, and MathExtras.h is popular.
  42. // #include <intrin.h>
  43. extern "C" {
  44. unsigned char _BitScanForward(unsigned long* _Index, unsigned long _Mask);
  45. unsigned char _BitScanForward64(unsigned long* _Index, unsigned __int64 _Mask);
  46. unsigned char _BitScanReverse(unsigned long* _Index, unsigned long _Mask);
  47. unsigned char _BitScanReverse64(unsigned long* _Index, unsigned __int64 _Mask);
  48. }
  49. #endif
  50. namespace c10 {
  51. namespace llvm {
  52. /// The behavior an operation has on an input of 0.
  53. enum ZeroBehavior {
  54. /// The returned value is undefined.
  55. ZB_Undefined,
  56. /// The returned value is numeric_limits<T>::max()
  57. ZB_Max,
  58. /// The returned value is numeric_limits<T>::digits
  59. ZB_Width
  60. };
  61. namespace detail {
  62. template <typename T, std::size_t SizeOfT>
  63. struct TrailingZerosCounter {
  64. static std::size_t count(T Val, ZeroBehavior) {
  65. if (!Val)
  66. return std::numeric_limits<T>::digits;
  67. if (Val & 0x1)
  68. return 0;
  69. // Bisection method.
  70. std::size_t ZeroBits = 0;
  71. T Shift = std::numeric_limits<T>::digits >> 1;
  72. T Mask = std::numeric_limits<T>::max() >> Shift;
  73. while (Shift) {
  74. if ((Val & Mask) == 0) {
  75. Val >>= Shift;
  76. ZeroBits |= Shift;
  77. }
  78. Shift >>= 1;
  79. Mask >>= Shift;
  80. }
  81. return ZeroBits;
  82. }
  83. };
  84. #if (defined(__GNUC__) && __GNUC__ >= 4) || defined(_MSC_VER)
  85. template <typename T>
  86. struct TrailingZerosCounter<T, 4> {
  87. static std::size_t count(T Val, ZeroBehavior ZB) {
  88. if (ZB != ZB_Undefined && Val == 0)
  89. return 32;
  90. #if __has_builtin(__builtin_ctz) || LLVM_GNUC_PREREQ(4, 0, 0)
  91. return __builtin_ctz(Val);
  92. #elif defined(_MSC_VER)
  93. unsigned long Index;
  94. _BitScanForward(&Index, Val);
  95. return Index;
  96. #endif
  97. }
  98. };
  99. #if !defined(_MSC_VER) || defined(_M_X64)
  100. template <typename T>
  101. struct TrailingZerosCounter<T, 8> {
  102. static std::size_t count(T Val, ZeroBehavior ZB) {
  103. if (ZB != ZB_Undefined && Val == 0)
  104. return 64;
  105. #if __has_builtin(__builtin_ctzll) || LLVM_GNUC_PREREQ(4, 0, 0)
  106. return __builtin_ctzll(Val);
  107. #elif defined(_MSC_VER)
  108. unsigned long Index;
  109. _BitScanForward64(&Index, Val);
  110. return Index;
  111. #endif
  112. }
  113. };
  114. #endif
  115. #endif
  116. } // namespace detail
  117. /// Count number of 0's from the least significant bit to the most
  118. /// stopping at the first 1.
  119. ///
  120. /// Only unsigned integral types are allowed.
  121. ///
  122. /// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are
  123. /// valid arguments.
  124. template <typename T>
  125. std::size_t countTrailingZeros(T Val, ZeroBehavior ZB = ZB_Width) {
  126. static_assert(
  127. std::numeric_limits<T>::is_integer && !std::numeric_limits<T>::is_signed,
  128. "Only unsigned integral types are allowed.");
  129. return llvm::detail::TrailingZerosCounter<T, sizeof(T)>::count(Val, ZB);
  130. }
  131. namespace detail {
  132. template <typename T, std::size_t SizeOfT>
  133. struct LeadingZerosCounter {
  134. static std::size_t count(T Val, ZeroBehavior) {
  135. if (!Val)
  136. return std::numeric_limits<T>::digits;
  137. // Bisection method.
  138. std::size_t ZeroBits = 0;
  139. for (T Shift = std::numeric_limits<T>::digits >> 1; Shift; Shift >>= 1) {
  140. T Tmp = Val >> Shift;
  141. if (Tmp)
  142. Val = Tmp;
  143. else
  144. ZeroBits |= Shift;
  145. }
  146. return ZeroBits;
  147. }
  148. };
  149. #if (defined(__GNUC__) && __GNUC__ >= 4) || defined(_MSC_VER)
  150. template <typename T>
  151. struct LeadingZerosCounter<T, 4> {
  152. static std::size_t count(T Val, ZeroBehavior ZB) {
  153. if (ZB != ZB_Undefined && Val == 0)
  154. return 32;
  155. #if __has_builtin(__builtin_clz) || LLVM_GNUC_PREREQ(4, 0, 0)
  156. return __builtin_clz(Val);
  157. #elif defined(_MSC_VER)
  158. unsigned long Index;
  159. _BitScanReverse(&Index, Val);
  160. return Index ^ 31;
  161. #endif
  162. }
  163. };
  164. #if !defined(_MSC_VER) || defined(_M_X64)
  165. template <typename T>
  166. struct LeadingZerosCounter<T, 8> {
  167. static std::size_t count(T Val, ZeroBehavior ZB) {
  168. if (ZB != ZB_Undefined && Val == 0)
  169. return 64;
  170. #if __has_builtin(__builtin_clzll) || LLVM_GNUC_PREREQ(4, 0, 0)
  171. return __builtin_clzll(Val);
  172. #elif defined(_MSC_VER)
  173. unsigned long Index;
  174. _BitScanReverse64(&Index, Val);
  175. return Index ^ 63;
  176. #endif
  177. }
  178. };
  179. #endif
  180. #endif
  181. } // namespace detail
  182. /// Count number of 0's from the most significant bit to the least
  183. /// stopping at the first 1.
  184. ///
  185. /// Only unsigned integral types are allowed.
  186. ///
  187. /// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are
  188. /// valid arguments.
  189. template <typename T>
  190. std::size_t countLeadingZeros(T Val, ZeroBehavior ZB = ZB_Width) {
  191. static_assert(
  192. std::numeric_limits<T>::is_integer && !std::numeric_limits<T>::is_signed,
  193. "Only unsigned integral types are allowed.");
  194. return llvm::detail::LeadingZerosCounter<T, sizeof(T)>::count(Val, ZB);
  195. }
  196. /// Get the index of the first set bit starting from the least
  197. /// significant bit.
  198. ///
  199. /// Only unsigned integral types are allowed.
  200. ///
  201. /// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are
  202. /// valid arguments.
  203. template <typename T>
  204. T findFirstSet(T Val, ZeroBehavior ZB = ZB_Max) {
  205. if (ZB == ZB_Max && Val == 0)
  206. return std::numeric_limits<T>::max();
  207. return countTrailingZeros(Val, ZB_Undefined);
  208. }
  209. /// Create a bitmask with the N right-most bits set to 1, and all other
  210. /// bits set to 0. Only unsigned types are allowed.
  211. template <typename T>
  212. T maskTrailingOnes(unsigned N) {
  213. static_assert(std::is_unsigned<T>::value, "Invalid type!");
  214. const unsigned Bits = CHAR_BIT * sizeof(T);
  215. assert(N <= Bits && "Invalid bit index");
  216. return N == 0 ? 0 : (T(-1) >> (Bits - N));
  217. }
  218. /// Create a bitmask with the N left-most bits set to 1, and all other
  219. /// bits set to 0. Only unsigned types are allowed.
  220. template <typename T>
  221. T maskLeadingOnes(unsigned N) {
  222. return ~maskTrailingOnes<T>(CHAR_BIT * sizeof(T) - N);
  223. }
  224. /// Create a bitmask with the N right-most bits set to 0, and all other
  225. /// bits set to 1. Only unsigned types are allowed.
  226. template <typename T>
  227. T maskTrailingZeros(unsigned N) {
  228. return maskLeadingOnes<T>(CHAR_BIT * sizeof(T) - N);
  229. }
  230. /// Create a bitmask with the N left-most bits set to 0, and all other
  231. /// bits set to 1. Only unsigned types are allowed.
  232. template <typename T>
  233. T maskLeadingZeros(unsigned N) {
  234. return maskTrailingOnes<T>(CHAR_BIT * sizeof(T) - N);
  235. }
  236. /// Get the index of the last set bit starting from the least
  237. /// significant bit.
  238. ///
  239. /// Only unsigned integral types are allowed.
  240. ///
  241. /// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are
  242. /// valid arguments.
  243. template <typename T>
  244. T findLastSet(T Val, ZeroBehavior ZB = ZB_Max) {
  245. if (ZB == ZB_Max && Val == 0)
  246. return std::numeric_limits<T>::max();
  247. // Use ^ instead of - because both gcc and llvm can remove the associated ^
  248. // in the __builtin_clz intrinsic on x86.
  249. return countLeadingZeros(Val, ZB_Undefined) ^
  250. (std::numeric_limits<T>::digits - 1);
  251. }
  252. /// Macro compressed bit reversal table for 256 bits.
  253. ///
  254. /// http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
  255. static const unsigned char BitReverseTable256[256] = {
  256. #define R2(n) n, n + 2 * 64, n + 1 * 64, n + 3 * 64
  257. #define R4(n) R2(n), R2(n + 2 * 16), R2(n + 1 * 16), R2(n + 3 * 16)
  258. #define R6(n) R4(n), R4(n + 2 * 4), R4(n + 1 * 4), R4(n + 3 * 4)
  259. R6(0),
  260. R6(2),
  261. R6(1),
  262. R6(3)
  263. #undef R2
  264. #undef R4
  265. #undef R6
  266. };
  267. /// Reverse the bits in \p Val.
  268. template <typename T>
  269. T reverseBits(T Val) {
  270. unsigned char in[sizeof(Val)];
  271. unsigned char out[sizeof(Val)];
  272. std::memcpy(in, &Val, sizeof(Val));
  273. for (unsigned i = 0; i < sizeof(Val); ++i)
  274. out[(sizeof(Val) - i) - 1] = BitReverseTable256[in[i]];
  275. std::memcpy(&Val, out, sizeof(Val));
  276. return Val;
  277. }
  278. // NOTE: The following support functions use the _32/_64 extensions instead of
  279. // type overloading so that signed and unsigned integers can be used without
  280. // ambiguity.
  281. /// Return the high 32 bits of a 64 bit value.
  282. constexpr inline uint32_t Hi_32(uint64_t Value) {
  283. return static_cast<uint32_t>(Value >> 32);
  284. }
  285. /// Return the low 32 bits of a 64 bit value.
  286. constexpr inline uint32_t Lo_32(uint64_t Value) {
  287. return static_cast<uint32_t>(Value);
  288. }
  289. /// Make a 64-bit integer from a high / low pair of 32-bit integers.
  290. constexpr inline uint64_t Make_64(uint32_t High, uint32_t Low) {
  291. return ((uint64_t)High << 32) | (uint64_t)Low;
  292. }
  293. /// Checks if an integer fits into the given bit width.
  294. template <unsigned N>
  295. constexpr inline bool isInt(int64_t x) {
  296. return N >= 64 ||
  297. (-(INT64_C(1) << (N - 1)) <= x && x < (INT64_C(1) << (N - 1)));
  298. }
  299. // Template specializations to get better code for common cases.
  300. template <>
  301. constexpr inline bool isInt<8>(int64_t x) {
  302. return static_cast<int8_t>(x) == x;
  303. }
  304. template <>
  305. constexpr inline bool isInt<16>(int64_t x) {
  306. return static_cast<int16_t>(x) == x;
  307. }
  308. template <>
  309. constexpr inline bool isInt<32>(int64_t x) {
  310. return static_cast<int32_t>(x) == x;
  311. }
  312. /// Checks if a signed integer is an N bit number shifted left by S.
  313. template <unsigned N, unsigned S>
  314. constexpr inline bool isShiftedInt(int64_t x) {
  315. static_assert(
  316. N > 0, "isShiftedInt<0> doesn't make sense (refers to a 0-bit number.");
  317. static_assert(N + S <= 64, "isShiftedInt<N, S> with N + S > 64 is too wide.");
  318. return isInt<N + S>(x) && (x % (UINT64_C(1) << S) == 0);
  319. }
  320. /// Checks if an unsigned integer fits into the given bit width.
  321. ///
  322. /// This is written as two functions rather than as simply
  323. ///
  324. /// return N >= 64 || X < (UINT64_C(1) << N);
  325. ///
  326. /// to keep MSVC from (incorrectly) warning on isUInt<64> that we're shifting
  327. /// left too many places.
  328. template <unsigned N>
  329. constexpr inline typename std::enable_if<(N < 64), bool>::type isUInt(
  330. uint64_t X) {
  331. static_assert(N > 0, "isUInt<0> doesn't make sense");
  332. return X < (UINT64_C(1) << (N));
  333. }
  334. template <unsigned N>
  335. constexpr inline typename std::enable_if<N >= 64, bool>::type isUInt(
  336. uint64_t /*X*/) {
  337. return true;
  338. }
  339. // Template specializations to get better code for common cases.
  340. template <>
  341. constexpr inline bool isUInt<8>(uint64_t x) {
  342. return static_cast<uint8_t>(x) == x;
  343. }
  344. template <>
  345. constexpr inline bool isUInt<16>(uint64_t x) {
  346. return static_cast<uint16_t>(x) == x;
  347. }
  348. template <>
  349. constexpr inline bool isUInt<32>(uint64_t x) {
  350. return static_cast<uint32_t>(x) == x;
  351. }
  352. /// Checks if a unsigned integer is an N bit number shifted left by S.
  353. template <unsigned N, unsigned S>
  354. constexpr inline bool isShiftedUInt(uint64_t x) {
  355. static_assert(
  356. N > 0, "isShiftedUInt<0> doesn't make sense (refers to a 0-bit number)");
  357. static_assert(
  358. N + S <= 64, "isShiftedUInt<N, S> with N + S > 64 is too wide.");
  359. // Per the two static_asserts above, S must be strictly less than 64. So
  360. // 1 << S is not undefined behavior.
  361. return isUInt<N + S>(x) && (x % (UINT64_C(1) << S) == 0);
  362. }
  363. /// Gets the maximum value for a N-bit unsigned integer.
  364. inline uint64_t maxUIntN(uint64_t N) {
  365. assert(N > 0 && N <= 64 && "integer width out of range");
  366. // uint64_t(1) << 64 is undefined behavior, so we can't do
  367. // (uint64_t(1) << N) - 1
  368. // without checking first that N != 64. But this works and doesn't have a
  369. // branch.
  370. return UINT64_MAX >> (64 - N);
  371. }
  372. // Ignore the false warning "Arithmetic overflow" for MSVC
  373. #ifdef _MSC_VER
  374. #pragma warning(push)
  375. #pragma warning(disable : 4146)
  376. #endif
  377. /// Gets the minimum value for a N-bit signed integer.
  378. inline int64_t minIntN(int64_t N) {
  379. assert(N > 0 && N <= 64 && "integer width out of range");
  380. return -(UINT64_C(1) << (N - 1));
  381. }
  382. #ifdef _MSC_VER
  383. #pragma warning(pop)
  384. #endif
  385. /// Gets the maximum value for a N-bit signed integer.
  386. inline int64_t maxIntN(int64_t N) {
  387. assert(N > 0 && N <= 64 && "integer width out of range");
  388. // This relies on two's complement wraparound when N == 64, so we convert to
  389. // int64_t only at the very end to avoid UB.
  390. return (UINT64_C(1) << (N - 1)) - 1;
  391. }
  392. /// Checks if an unsigned integer fits into the given (dynamic) bit width.
  393. inline bool isUIntN(unsigned N, uint64_t x) {
  394. return N >= 64 || x <= maxUIntN(N);
  395. }
  396. /// Checks if an signed integer fits into the given (dynamic) bit width.
  397. inline bool isIntN(unsigned N, int64_t x) {
  398. return N >= 64 || (minIntN(N) <= x && x <= maxIntN(N));
  399. }
  400. /// Return true if the argument is a non-empty sequence of ones starting at the
  401. /// least significant bit with the remainder zero (32 bit version).
  402. /// Ex. isMask_32(0x0000FFFFU) == true.
  403. constexpr inline bool isMask_32(uint32_t Value) {
  404. return Value && ((Value + 1) & Value) == 0;
  405. }
  406. /// Return true if the argument is a non-empty sequence of ones starting at the
  407. /// least significant bit with the remainder zero (64 bit version).
  408. constexpr inline bool isMask_64(uint64_t Value) {
  409. return Value && ((Value + 1) & Value) == 0;
  410. }
  411. /// Return true if the argument contains a non-empty sequence of ones with the
  412. /// remainder zero (32 bit version.) Ex. isShiftedMask_32(0x0000FF00U) == true.
  413. constexpr inline bool isShiftedMask_32(uint32_t Value) {
  414. return Value && isMask_32((Value - 1) | Value);
  415. }
  416. /// Return true if the argument contains a non-empty sequence of ones with the
  417. /// remainder zero (64 bit version.)
  418. constexpr inline bool isShiftedMask_64(uint64_t Value) {
  419. return Value && isMask_64((Value - 1) | Value);
  420. }
  421. /// Return true if the argument is a power of two > 0.
  422. /// Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.)
  423. constexpr inline bool isPowerOf2_32(uint32_t Value) {
  424. return Value && !(Value & (Value - 1));
  425. }
  426. /// Return true if the argument is a power of two > 0 (64 bit edition.)
  427. constexpr inline bool isPowerOf2_64(uint64_t Value) {
  428. return Value && !(Value & (Value - 1));
  429. }
  430. /// Count the number of ones from the most significant bit to the first
  431. /// zero bit.
  432. ///
  433. /// Ex. countLeadingOnes(0xFF0FFF00) == 8.
  434. /// Only unsigned integral types are allowed.
  435. ///
  436. /// \param ZB the behavior on an input of all ones. Only ZB_Width and
  437. /// ZB_Undefined are valid arguments.
  438. template <typename T>
  439. std::size_t countLeadingOnes(T Value, ZeroBehavior ZB = ZB_Width) {
  440. static_assert(
  441. std::numeric_limits<T>::is_integer && !std::numeric_limits<T>::is_signed,
  442. "Only unsigned integral types are allowed.");
  443. return countLeadingZeros<T>(~Value, ZB);
  444. }
  445. /// Count the number of ones from the least significant bit to the first
  446. /// zero bit.
  447. ///
  448. /// Ex. countTrailingOnes(0x00FF00FF) == 8.
  449. /// Only unsigned integral types are allowed.
  450. ///
  451. /// \param ZB the behavior on an input of all ones. Only ZB_Width and
  452. /// ZB_Undefined are valid arguments.
  453. template <typename T>
  454. std::size_t countTrailingOnes(T Value, ZeroBehavior ZB = ZB_Width) {
  455. static_assert(
  456. std::numeric_limits<T>::is_integer && !std::numeric_limits<T>::is_signed,
  457. "Only unsigned integral types are allowed.");
  458. return countTrailingZeros<T>(~Value, ZB);
  459. }
  460. namespace detail {
  461. template <typename T, std::size_t SizeOfT>
  462. struct PopulationCounter {
  463. static unsigned count(T Value) {
  464. // Generic version, forward to 32 bits.
  465. static_assert(SizeOfT <= 4, "Not implemented!");
  466. #if defined(__GNUC__) && __GNUC__ >= 4
  467. return __builtin_popcount(Value);
  468. #else
  469. uint32_t v = Value;
  470. v = v - ((v >> 1) & 0x55555555);
  471. v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
  472. return ((v + (v >> 4) & 0xF0F0F0F) * 0x1010101) >> 24;
  473. #endif
  474. }
  475. };
  476. template <typename T>
  477. struct PopulationCounter<T, 8> {
  478. static unsigned count(T Value) {
  479. #if defined(__GNUC__) && __GNUC__ >= 4
  480. return __builtin_popcountll(Value);
  481. #else
  482. uint64_t v = Value;
  483. v = v - ((v >> 1) & 0x5555555555555555ULL);
  484. v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL);
  485. v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL;
  486. return unsigned((uint64_t)(v * 0x0101010101010101ULL) >> 56);
  487. #endif
  488. }
  489. };
  490. } // namespace detail
  491. /// Count the number of set bits in a value.
  492. /// Ex. countPopulation(0xF000F000) = 8
  493. /// Returns 0 if the word is zero.
  494. template <typename T>
  495. inline unsigned countPopulation(T Value) {
  496. static_assert(
  497. std::numeric_limits<T>::is_integer && !std::numeric_limits<T>::is_signed,
  498. "Only unsigned integral types are allowed.");
  499. return detail::PopulationCounter<T, sizeof(T)>::count(Value);
  500. }
  501. /// Return the log base 2 of the specified value.
  502. inline double Log2(double Value) {
  503. #if defined(__ANDROID_API__) && __ANDROID_API__ < 18
  504. return __builtin_log(Value) / __builtin_log(2.0);
  505. #else
  506. return log2(Value);
  507. #endif
  508. }
  509. /// Return the floor log base 2 of the specified value, -1 if the value is zero.
  510. /// (32 bit edition.)
  511. /// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2
  512. inline unsigned Log2_32(uint32_t Value) {
  513. return static_cast<unsigned>(31 - countLeadingZeros(Value));
  514. }
  515. /// Return the floor log base 2 of the specified value, -1 if the value is zero.
  516. /// (64 bit edition.)
  517. inline unsigned Log2_64(uint64_t Value) {
  518. return static_cast<unsigned>(63 - countLeadingZeros(Value));
  519. }
  520. /// Return the ceil log base 2 of the specified value, 32 if the value is zero.
  521. /// (32 bit edition).
  522. /// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3
  523. inline unsigned Log2_32_Ceil(uint32_t Value) {
  524. return static_cast<unsigned>(32 - countLeadingZeros(Value - 1));
  525. }
  526. /// Return the ceil log base 2 of the specified value, 64 if the value is zero.
  527. /// (64 bit edition.)
  528. inline unsigned Log2_64_Ceil(uint64_t Value) {
  529. return static_cast<unsigned>(64 - countLeadingZeros(Value - 1));
  530. }
  531. /// Return the greatest common divisor of the values using Euclid's algorithm.
  532. inline uint64_t GreatestCommonDivisor64(uint64_t A, uint64_t B) {
  533. while (B) {
  534. uint64_t T = B;
  535. B = A % B;
  536. A = T;
  537. }
  538. return A;
  539. }
  540. /// This function takes a 64-bit integer and returns the bit equivalent double.
  541. inline double BitsToDouble(uint64_t Bits) {
  542. double D;
  543. static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes");
  544. memcpy(&D, &Bits, sizeof(Bits));
  545. return D;
  546. }
  547. /// This function takes a 32-bit integer and returns the bit equivalent float.
  548. inline float BitsToFloat(uint32_t Bits) {
  549. // TODO: Use bit_cast once C++20 becomes available.
  550. float F;
  551. static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes");
  552. memcpy(&F, &Bits, sizeof(Bits));
  553. return F;
  554. }
  555. /// This function takes a double and returns the bit equivalent 64-bit integer.
  556. /// Note that copying doubles around changes the bits of NaNs on some hosts,
  557. /// notably x86, so this routine cannot be used if these bits are needed.
  558. inline uint64_t DoubleToBits(double Double) {
  559. uint64_t Bits;
  560. static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes");
  561. memcpy(&Bits, &Double, sizeof(Double));
  562. return Bits;
  563. }
  564. /// This function takes a float and returns the bit equivalent 32-bit integer.
  565. /// Note that copying floats around changes the bits of NaNs on some hosts,
  566. /// notably x86, so this routine cannot be used if these bits are needed.
  567. inline uint32_t FloatToBits(float Float) {
  568. uint32_t Bits;
  569. static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes");
  570. memcpy(&Bits, &Float, sizeof(Float));
  571. return Bits;
  572. }
  573. /// A and B are either alignments or offsets. Return the minimum alignment that
  574. /// may be assumed after adding the two together.
  575. constexpr inline uint64_t MinAlign(uint64_t A, uint64_t B) {
  576. // The largest power of 2 that divides both A and B.
  577. //
  578. // Replace "-Value" by "1+~Value" in the following commented code to avoid
  579. // MSVC warning C4146
  580. // return (A | B) & -(A | B);
  581. return (A | B) & (1 + ~(A | B));
  582. }
  583. /// Aligns \c Addr to \c Alignment bytes, rounding up.
  584. ///
  585. /// Alignment should be a power of two. This method rounds up, so
  586. /// alignAddr(7, 4) == 8 and alignAddr(8, 4) == 8.
  587. inline uintptr_t alignAddr(const void* Addr, size_t Alignment) {
  588. assert(
  589. Alignment && isPowerOf2_64((uint64_t)Alignment) &&
  590. "Alignment is not a power of two!");
  591. assert((uintptr_t)Addr + Alignment - 1 >= (uintptr_t)Addr);
  592. return (((uintptr_t)Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1));
  593. }
  594. /// Returns the necessary adjustment for aligning \c Ptr to \c Alignment
  595. /// bytes, rounding up.
  596. inline size_t alignmentAdjustment(const void* Ptr, size_t Alignment) {
  597. return alignAddr(Ptr, Alignment) - (uintptr_t)Ptr;
  598. }
  599. /// Returns the next power of two (in 64-bits) that is strictly greater than A.
  600. /// Returns zero on overflow.
  601. inline uint64_t NextPowerOf2(uint64_t A) {
  602. A |= (A >> 1);
  603. A |= (A >> 2);
  604. A |= (A >> 4);
  605. A |= (A >> 8);
  606. A |= (A >> 16);
  607. A |= (A >> 32);
  608. return A + 1;
  609. }
  610. /// Returns the power of two which is less than or equal to the given value.
  611. /// Essentially, it is a floor operation across the domain of powers of two.
  612. inline uint64_t PowerOf2Floor(uint64_t A) {
  613. if (!A)
  614. return 0;
  615. return 1ull << (63 - countLeadingZeros(A, ZB_Undefined));
  616. }
  617. /// Returns the power of two which is greater than or equal to the given value.
  618. /// Essentially, it is a ceil operation across the domain of powers of two.
  619. inline uint64_t PowerOf2Ceil(uint64_t A) {
  620. if (!A)
  621. return 0;
  622. return NextPowerOf2(A - 1);
  623. }
  624. /// Returns the next integer (mod 2**64) that is greater than or equal to
  625. /// \p Value and is a multiple of \p Align. \p Align must be non-zero.
  626. ///
  627. /// If non-zero \p Skew is specified, the return value will be a minimal
  628. /// integer that is greater than or equal to \p Value and equal to
  629. /// \p Align * N + \p Skew for some integer N. If \p Skew is larger than
  630. /// \p Align, its value is adjusted to '\p Skew mod \p Align'.
  631. ///
  632. /// Examples:
  633. /// \code
  634. /// alignTo(5, 8) = 8
  635. /// alignTo(17, 8) = 24
  636. /// alignTo(~0LL, 8) = 0
  637. /// alignTo(321, 255) = 510
  638. ///
  639. /// alignTo(5, 8, 7) = 7
  640. /// alignTo(17, 8, 1) = 17
  641. /// alignTo(~0LL, 8, 3) = 3
  642. /// alignTo(321, 255, 42) = 552
  643. /// \endcode
  644. inline uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew = 0) {
  645. assert(Align != 0u && "Align can't be 0.");
  646. Skew %= Align;
  647. return (Value + Align - 1 - Skew) / Align * Align + Skew;
  648. }
  649. /// Returns the next integer (mod 2**64) that is greater than or equal to
  650. /// \p Value and is a multiple of \c Align. \c Align must be non-zero.
  651. template <uint64_t Align>
  652. constexpr inline uint64_t alignTo(uint64_t Value) {
  653. static_assert(Align != 0u, "Align must be non-zero");
  654. return (Value + Align - 1) / Align * Align;
  655. }
  656. /// Returns the integer ceil(Numerator / Denominator).
  657. inline uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator) {
  658. return alignTo(Numerator, Denominator) / Denominator;
  659. }
  660. /// \c alignTo for contexts where a constant expression is required.
  661. /// \sa alignTo
  662. ///
  663. /// \todo FIXME: remove when \c constexpr becomes really \c constexpr
  664. template <uint64_t Align>
  665. struct AlignTo {
  666. static_assert(Align != 0u, "Align must be non-zero");
  667. template <uint64_t Value>
  668. struct from_value {
  669. static const uint64_t value = (Value + Align - 1) / Align * Align;
  670. };
  671. };
  672. /// Returns the largest uint64_t less than or equal to \p Value and is
  673. /// \p Skew mod \p Align. \p Align must be non-zero
  674. inline uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew = 0) {
  675. assert(Align != 0u && "Align can't be 0.");
  676. Skew %= Align;
  677. return (Value - Skew) / Align * Align + Skew;
  678. }
  679. /// Returns the offset to the next integer (mod 2**64) that is greater than
  680. /// or equal to \p Value and is a multiple of \p Align. \p Align must be
  681. /// non-zero.
  682. inline uint64_t OffsetToAlignment(uint64_t Value, uint64_t Align) {
  683. return alignTo(Value, Align) - Value;
  684. }
  685. /// Sign-extend the number in the bottom B bits of X to a 32-bit integer.
  686. /// Requires 0 < B <= 32.
  687. template <unsigned B>
  688. constexpr inline int32_t SignExtend32(uint32_t X) {
  689. static_assert(B > 0, "Bit width can't be 0.");
  690. static_assert(B <= 32, "Bit width out of range.");
  691. return int32_t(X << (32 - B)) >> (32 - B);
  692. }
  693. /// Sign-extend the number in the bottom B bits of X to a 32-bit integer.
  694. /// Requires 0 < B < 32.
  695. inline int32_t SignExtend32(uint32_t X, unsigned B) {
  696. assert(B > 0 && "Bit width can't be 0.");
  697. assert(B <= 32 && "Bit width out of range.");
  698. return int32_t(X << (32 - B)) >> (32 - B);
  699. }
  700. /// Sign-extend the number in the bottom B bits of X to a 64-bit integer.
  701. /// Requires 0 < B < 64.
  702. template <unsigned B>
  703. constexpr inline int64_t SignExtend64(uint64_t x) {
  704. static_assert(B > 0, "Bit width can't be 0.");
  705. static_assert(B <= 64, "Bit width out of range.");
  706. return int64_t(x << (64 - B)) >> (64 - B);
  707. }
  708. /// Sign-extend the number in the bottom B bits of X to a 64-bit integer.
  709. /// Requires 0 < B < 64.
  710. inline int64_t SignExtend64(uint64_t X, unsigned B) {
  711. assert(B > 0 && "Bit width can't be 0.");
  712. assert(B <= 64 && "Bit width out of range.");
  713. return int64_t(X << (64 - B)) >> (64 - B);
  714. }
  715. /// Subtract two unsigned integers, X and Y, of type T and return the absolute
  716. /// value of the result.
  717. template <typename T>
  718. typename std::enable_if<std::is_unsigned<T>::value, T>::type AbsoluteDifference(
  719. T X,
  720. T Y) {
  721. return std::max(X, Y) - std::min(X, Y);
  722. }
  723. /// Add two unsigned integers, X and Y, of type T. Clamp the result to the
  724. /// maximum representable value of T on overflow. ResultOverflowed indicates if
  725. /// the result is larger than the maximum representable value of type T.
  726. template <typename T>
  727. typename std::enable_if<std::is_unsigned<T>::value, T>::type SaturatingAdd(
  728. T X,
  729. T Y,
  730. bool* ResultOverflowed = nullptr) {
  731. bool Dummy;
  732. bool& Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
  733. // Hacker's Delight, p. 29
  734. T Z = X + Y;
  735. Overflowed = (Z < X || Z < Y);
  736. if (Overflowed)
  737. return std::numeric_limits<T>::max();
  738. else
  739. return Z;
  740. }
  741. /// Multiply two unsigned integers, X and Y, of type T. Clamp the result to the
  742. /// maximum representable value of T on overflow. ResultOverflowed indicates if
  743. /// the result is larger than the maximum representable value of type T.
  744. template <typename T>
  745. typename std::enable_if<std::is_unsigned<T>::value, T>::type SaturatingMultiply(
  746. T X,
  747. T Y,
  748. bool* ResultOverflowed = nullptr) {
  749. bool Dummy;
  750. bool& Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
  751. // Hacker's Delight, p. 30 has a different algorithm, but we don't use that
  752. // because it fails for uint16_t (where multiplication can have undefined
  753. // behavior due to promotion to int), and requires a division in addition
  754. // to the multiplication.
  755. Overflowed = false;
  756. // Log2(Z) would be either Log2Z or Log2Z + 1.
  757. // Special case: if X or Y is 0, Log2_64 gives -1, and Log2Z
  758. // will necessarily be less than Log2Max as desired.
  759. int Log2Z = Log2_64(X) + Log2_64(Y);
  760. const T Max = std::numeric_limits<T>::max();
  761. int Log2Max = Log2_64(Max);
  762. if (Log2Z < Log2Max) {
  763. return X * Y;
  764. }
  765. if (Log2Z > Log2Max) {
  766. Overflowed = true;
  767. return Max;
  768. }
  769. // We're going to use the top bit, and maybe overflow one
  770. // bit past it. Multiply all but the bottom bit then add
  771. // that on at the end.
  772. T Z = (X >> 1) * Y;
  773. if (Z & ~(Max >> 1)) {
  774. Overflowed = true;
  775. return Max;
  776. }
  777. Z <<= 1;
  778. if (X & 1)
  779. return SaturatingAdd(Z, Y, ResultOverflowed);
  780. return Z;
  781. }
  782. /// Multiply two unsigned integers, X and Y, and add the unsigned integer, A to
  783. /// the product. Clamp the result to the maximum representable value of T on
  784. /// overflow. ResultOverflowed indicates if the result is larger than the
  785. /// maximum representable value of type T.
  786. template <typename T>
  787. typename std::enable_if<std::is_unsigned<T>::value, T>::type
  788. SaturatingMultiplyAdd(T X, T Y, T A, bool* ResultOverflowed = nullptr) {
  789. bool Dummy;
  790. bool& Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
  791. T Product = SaturatingMultiply(X, Y, &Overflowed);
  792. if (Overflowed)
  793. return Product;
  794. return SaturatingAdd(A, Product, &Overflowed);
  795. }
  796. /// Use this rather than HUGE_VALF; the latter causes warnings on MSVC.
  797. extern const float huge_valf;
  798. } // namespace llvm
  799. } // namespace c10