PageRenderTime 60ms CodeModel.GetById 19ms RepoModel.GetById 1ms app.codeStats 0ms

/erts/emulator/beam/big.c

https://github.com/cobusc/otp
C | 2978 lines | 2366 code | 323 blank | 289 comment | 502 complexity | aaa466b4e97d1f386e712befb71f077c MD5 | raw file
Possible License(s): LGPL-2.1, MPL-2.0-no-copyleft-exception, Apache-2.0

Large files files are truncated, but you can click here to view the full file

  1. /*
  2. * %CopyrightBegin%
  3. *
  4. * Copyright Ericsson AB 1996-2018. All Rights Reserved.
  5. *
  6. * Licensed under the Apache License, Version 2.0 (the "License");
  7. * you may not use this file except in compliance with the License.
  8. * You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. *
  18. * %CopyrightEnd%
  19. */
  20. #ifdef HAVE_CONFIG_H
  21. # include "config.h"
  22. #endif
  23. #include "sys.h"
  24. #include "erl_vm.h"
  25. #include "global.h"
  26. #include "big.h"
  27. #include "error.h"
  28. #include "bif.h"
  29. #define ZERO_DIGITS(v, sz) do { \
  30. dsize_t _t_sz = sz; \
  31. ErtsDigit* _t_v = v; \
  32. while(_t_sz--) *_t_v++ = 0; \
  33. } while(0)
  34. #define MOVE_DIGITS(dst, src, sz) do { \
  35. dsize_t _t_sz = sz; \
  36. ErtsDigit* _t_dst; \
  37. ErtsDigit* _t_src; \
  38. if (dst < src) { \
  39. _t_dst = dst; \
  40. _t_src = src; \
  41. while(_t_sz--) *_t_dst++ = *_t_src++; \
  42. } \
  43. else if (dst > src) { \
  44. _t_dst = (dst)+((sz)-1); \
  45. _t_src = (src)+((sz)-1); \
  46. while(_t_sz--) *_t_dst-- = *_t_src--; \
  47. } \
  48. } while(0)
  49. /* add a and b with carry in + out */
  50. #define DSUMc(a,b,c,s) do { \
  51. ErtsDigit ___cr = (c); \
  52. ErtsDigit ___xr = (a)+(___cr); \
  53. ErtsDigit ___yr = (b); \
  54. ___cr = (___xr < ___cr); \
  55. ___xr = ___yr + ___xr; \
  56. ___cr += (___xr < ___yr); \
  57. s = ___xr; \
  58. c = ___cr; \
  59. } while(0)
  60. /* add a and b with carry out */
  61. #define DSUM(a,b,c,s) do { \
  62. ErtsDigit ___xr = (a); \
  63. ErtsDigit ___yr = (b); \
  64. ___xr = ___yr + ___xr; \
  65. s = ___xr; \
  66. c = (___xr < ___yr); \
  67. } while(0)
  68. #define DSUBb(a,b,r,d) do { \
  69. ErtsDigit ___cr = (r); \
  70. ErtsDigit ___xr = (a); \
  71. ErtsDigit ___yr = (b)+___cr; \
  72. ___cr = (___yr < ___cr); \
  73. ___yr = ___xr - ___yr; \
  74. ___cr += (___yr > ___xr); \
  75. d = ___yr; \
  76. r = ___cr; \
  77. } while(0)
  78. #define DSUB(a,b,r,d) do { \
  79. ErtsDigit ___xr = (a); \
  80. ErtsDigit ___yr = (b); \
  81. ___yr = ___xr - ___yr; \
  82. r = (___yr > ___xr); \
  83. d = ___yr; \
  84. } while(0)
  85. /* type a constant as a ErtsDigit - to get shifts correct */
  86. #define DCONST(n) ((ErtsDigit)(n))
  87. /*
  88. * BIG_HAVE_DOUBLE_DIGIT is defined if we have defined
  89. * the type ErtsDoubleDigit which MUST have
  90. * sizeof(ErtsDoubleDigit) >= sizeof(ErtsDigit)
  91. */
  92. #ifdef BIG_HAVE_DOUBLE_DIGIT
  93. /* ErtsDoubleDigit => ErtsDigit */
  94. #define DLOW(x) ((ErtsDigit)(x))
  95. #define DHIGH(x) ((ErtsDigit)(((ErtsDoubleDigit)(x)) >> D_EXP))
  96. /* ErtsDigit => ErtsDoubleDigit */
  97. #define DLOW2HIGH(x) (((ErtsDoubleDigit)(x)) << D_EXP)
  98. #define DDIGIT(a1,a0) (DLOW2HIGH(a1) + (a0))
  99. #define DMULc(a,b,c,p) do { \
  100. ErtsDoubleDigit _t = ((ErtsDoubleDigit)(a))*(b) + (c); \
  101. p = DLOW(_t); \
  102. c = DHIGH(_t); \
  103. } while(0)
  104. #define DMUL(a,b,c1,c0) do { \
  105. ErtsDoubleDigit _t = ((ErtsDoubleDigit)(a))*(b); \
  106. c0 = DLOW(_t); \
  107. c1 = DHIGH(_t); \
  108. } while(0)
  109. #define DDIV(a1,a0,b,q) do { \
  110. ErtsDoubleDigit _t = DDIGIT((a1),(a0)); \
  111. q = _t / (b); \
  112. } while(0)
  113. #define DDIV2(a1,a0,b1,b0,q) do { \
  114. ErtsDoubleDigit _t = DDIGIT((a1),(a0)); \
  115. q = _t / DDIGIT((b1),(b0)); \
  116. } while(0)
  117. #define DREM(a1,a0,b,r) do { \
  118. ErtsDoubleDigit _t = DDIGIT((a1),(a0)); \
  119. r = _t % (b); \
  120. } while(0)
  121. #else
  122. /* If we do not have double digit then we have some more work to do */
  123. #define H_EXP (D_EXP >> 1)
  124. #define LO_MASK ((ErtsDigit)((DCONST(1) << H_EXP)-1))
  125. #define HI_MASK ((ErtsDigit)(LO_MASK << H_EXP))
  126. #define DGT(a,b) ((a)>(b))
  127. #define DEQ(a,b) ((a)==(b))
  128. #define D2GT(a1,a0,b1,b0) (DGT(a1,b1) || (((a1)==(b1)) && DGT(a0,b0)))
  129. #define D2EQ(a1,a0,b1,b0) (DEQ(a1,b1) && DEQ(a0,b0))
  130. #define D2LT(a1,a0,b1,b0) D2GT(b1,b0,a1,a0)
  131. #define D2GTE(a1,a0,b1,b0) (!D2LT(a1,a0,b1,b0))
  132. #define D2LTE(a1,a0,b1,b0) (!D2GT(a1,a0,b1,b0))
  133. /* Add (A+B), A=(a1B+a0) B=(b1B+b0) */
  134. #define D2ADD(a1,a0,b1,b0,c1,c0) do { \
  135. ErtsDigit __ci = 0; \
  136. DSUM(a0,b0,__ci,c0); \
  137. DSUMc(a1,b1,__ci,c1); \
  138. } while(0)
  139. /* Subtract (A-B), A=(a1B+a0), B=(b1B+b0) (A>=B) */
  140. #define D2SUB(a1,a0,b1,b0,c1,c0) do { \
  141. ErtsDigit __bi; \
  142. DSUB(a0,b0,__bi,c0); \
  143. DSUBb(a1,b1,__bi,c1); \
  144. } while(0)
  145. /* Left shift (multiply by 2) (A <<= 1 where A=a1*B+a0) */
  146. #define D2LSHIFT1(a1,a0) do { \
  147. a1 = ((a0) >> (D_EXP-1)) | ((a1)<<1); \
  148. a0 = (a0) << 1; \
  149. } while(0)
  150. /* Right shift (divide by 2) (A >>= 1 where A=a1*B+a0) */
  151. #define D2RSHIFT1(a1,a0) do { \
  152. a0 = (((a1) & 1) << (D_EXP-1)) | ((a0)>>1); \
  153. a1 = ((a1) >> 1); \
  154. } while(0)
  155. /* Calculate a*b + d1 and store double prec result in d1, d0 */
  156. #define DMULc(a,b,d1,d0) do { \
  157. ErtsHalfDigit __a0 = (a); \
  158. ErtsHalfDigit __a1 = ((a) >> H_EXP); \
  159. ErtsHalfDigit __b0 = (b); \
  160. ErtsHalfDigit __b1 = ((b) >> H_EXP); \
  161. ErtsDigit __a0b0 = (ErtsDigit)__a0*__b0; \
  162. ErtsDigit __a0b1 = (ErtsDigit)__a0*__b1; \
  163. ErtsDigit __a1b0 = (ErtsDigit)__a1*__b0; \
  164. ErtsDigit __a1b1 = (ErtsDigit)__a1*__b1; \
  165. ErtsDigit __p0,__p1,__p2,__c0; \
  166. DSUM(__a0b0,d1,__c0,__p0); \
  167. DSUM((__c0<<H_EXP),(__p0>>H_EXP),__p2,__p1); \
  168. DSUM(__p1,__a0b1,__c0,__p1); \
  169. __p2 += __c0; \
  170. DSUM(__p1,__a1b0,__c0,__p1); \
  171. __p2 += __c0; \
  172. DSUM(__p1,__a1b1<<H_EXP,__c0,__p1); \
  173. __p2 += __c0; \
  174. DSUM(__a1b1, (__p2<<H_EXP),__c0,__p2); \
  175. d1 = (__p2 & HI_MASK) | (__p1 >> H_EXP); \
  176. d0 = (__p1 << H_EXP) | (__p0 & LO_MASK); \
  177. } while(0)
  178. #define DMUL(a,b,d1,d0) do { \
  179. ErtsDigit _ds = 0; \
  180. DMULc(a,b,_ds,d0); \
  181. d1 = _ds; \
  182. } while(0)
  183. /* Calculate a*(Bb1 + b0) + d2 = a*b1B + a*b0 + d2 */
  184. #define D2MULc(a,b1,b0,d2,d1,d0) do { \
  185. DMULc(a, b0, d2, d0); \
  186. DMULc(a, b1, d2, d1); \
  187. } while(0)
  188. /* Calculate s in a = 2^s*a1 */
  189. /* NOTE since D2PF is used by other macros variables is prefixed bt __ */
  190. #if D_EXP == 64
  191. #define D2PF(a, s) do { \
  192. ErtsDigit __x = (a); \
  193. int __s = 0; \
  194. if (__x <= 0x00000000FFFFFFFF) { __s += 32; __x <<= 32; } \
  195. if (__x <= 0x0000FFFFFFFFFFFF) { __s += 16; __x <<= 16; } \
  196. if (__x <= 0x00FFFFFFFFFFFFFF) { __s += 8; __x <<= 8; } \
  197. if (__x <= 0x0FFFFFFFFFFFFFFF) { __s += 4; __x <<= 4; } \
  198. if (__x <= 0x3FFFFFFFFFFFFFFF) { __s += 2; __x <<= 2; } \
  199. if (__x <= 0x7FFFFFFFFFFFFFFF) { __s += 1; } \
  200. s = __s; \
  201. } while(0)
  202. #elif D_EXP == 32
  203. #define D2PF(a, s) do { \
  204. ErtsDigit __x = (a); \
  205. int __s = 0; \
  206. if (__x <= 0x0000FFFF) { __s += 16; __x <<= 16; } \
  207. if (__x <= 0x00FFFFFF) { __s += 8; __x <<= 8; } \
  208. if (__x <= 0x0FFFFFFF) { __s += 4; __x <<= 4; } \
  209. if (__x <= 0x3FFFFFFF) { __s += 2; __x <<= 2; } \
  210. if (__x <= 0x7FFFFFFF) { __s += 1; } \
  211. s = __s; \
  212. } while(0)
  213. #elif D_EXP == 16
  214. #define D2PF(a, s) do { \
  215. ErtsDigit __x = (a); \
  216. int __s = 0; \
  217. if (__x <= 0x00FF) { __s += 8; __x <<= 8; } \
  218. if (__x <= 0x0FFF) { __s += 4; __x <<= 4; } \
  219. if (__x <= 0x3FFF) { __s += 2; __x <<= 2; } \
  220. if (__x <= 0x7FFF) { __s += 1; } \
  221. s = __s; \
  222. } while(0)
  223. #elif D_EXP == 8
  224. #define D2PF(a, s) do { \
  225. ErtsDigit __x = (a); \
  226. int __s = 0; \
  227. if (__x <= 0x0F) { __s += 4; __x <<= 4; } \
  228. if (__x <= 0x3F) { __s += 2; __x <<= 2; } \
  229. if (__x <= 0x7F) { __s += 1; } \
  230. s = _s; \
  231. } while(0)
  232. #endif
  233. /* Calculate q = (a1B + a0) / b, assume a1 < b */
  234. #define DDIVREM(a1,a0,b,q,r) do { \
  235. ErtsDigit _a1 = (a1); \
  236. ErtsDigit _a0 = (a0); \
  237. ErtsDigit _b = (b); \
  238. ErtsHalfDigit _un1, _un0; \
  239. ErtsHalfDigit _vn1, _vn0; \
  240. ErtsDigit _q1, _q0; \
  241. ErtsDigit _un32, _un21, _un10; \
  242. ErtsDigit _rh; \
  243. Sint _s; \
  244. D2PF(_b, _s); \
  245. _b = _b << _s; \
  246. _vn1 = _b >> H_EXP; \
  247. _vn0 = _b & LO_MASK; \
  248. /* If needed to avoid undefined behaviour */ \
  249. if (_s) _un32 = (_a1 << _s) | ((_a0>>(D_EXP-_s)) & (-_s >> (D_EXP-1))); \
  250. else _un32 = _a1; \
  251. _un10 = _a0 << _s; \
  252. _un1 = _un10 >> H_EXP; \
  253. _un0 = _un10 & LO_MASK; \
  254. _q1 = _un32/_vn1; \
  255. _rh = _un32 - _q1*_vn1; \
  256. while ((_q1 >= (DCONST(1)<<H_EXP))||(_q1*_vn0 > (_rh<<H_EXP)+_un1)) { \
  257. _q1--; \
  258. _rh += _vn1; \
  259. if (_rh >= (DCONST(1)<<H_EXP)) break; \
  260. } \
  261. _un21 = (_un32<<H_EXP) + _un1 - _q1*_b; \
  262. _q0 = _un21/_vn1; \
  263. _rh = _un21 - _q0*_vn1; \
  264. while ((_q0 >= (DCONST(1)<<H_EXP))||(_q0*_vn0 > ((_rh<<H_EXP)+_un0))) { \
  265. _q0--; \
  266. _rh += _vn1; \
  267. if (_rh >= (DCONST(1)<<H_EXP)) break; \
  268. } \
  269. r = ((_un21<<H_EXP) + _un0 - _q0*_b) >> _s; \
  270. q = (_q1<<H_EXP) + _q0; \
  271. } while(0)
  272. /* divide any a=(a1*B + a0) with b */
  273. #define DDIVREM2(a1,a0,b,q1,q0,r) do { \
  274. ErtsDigit __a1 = (a1); \
  275. ErtsDigit __b = (b); \
  276. q1 = __a1 / __b; \
  277. DDIVREM(__a1 % __b, (a0), __b, q0, r); \
  278. } while(0)
  279. /* Calculate q = (a1B + a0) % b */
  280. #define DREM(a1,a0,b,r) do { \
  281. ErtsDigit __a1 = (a1); \
  282. ErtsDigit __b = (b); \
  283. ERTS_DECLARE_DUMMY(ErtsDigit __q0); \
  284. DDIVREM((__a1 % __b), (a0), __b, __q0, r); \
  285. } while(0)
  286. #define DDIV(a1,a0,b,q) do { \
  287. ERTS_DECLARE_DUMMY(ErtsDigit _tmp); \
  288. DDIVREM(a1,a0,b,q,_tmp); \
  289. } while(0)
  290. /* Calculate q, r A = Bq+R when, assume A1 >= B */
  291. #if (SIZEOF_VOID_P == 8)
  292. #define QUOT_LIM 0x7FFFFFFFFFFFFFFF
  293. #else
  294. #define QUOT_LIM 0x7FFFFFFF
  295. #endif
  296. #define D2DIVREM(a1,a0,b1,b0,q0,r1,r0) do { \
  297. ErtsDigit _a1 = (a1); \
  298. ErtsDigit _a0 = (a0); \
  299. ErtsDigit _b1 = (b1); \
  300. ErtsDigit _b0 = (b0); \
  301. ErtsDigit _q = 0; \
  302. int _as = 1; \
  303. while(D2GTE(_a1,_a0,_b1,_b0)) { \
  304. ErtsDigit _q1; \
  305. ErtsDigit _t2=0, _t1, _t0; \
  306. if ((_b1 == 1) && (_a1 > 1)) \
  307. _q1 = _a1 / 2; \
  308. else if ((_a1 > QUOT_LIM) && (_b1 < _a1)) \
  309. _q1 = _a1/(_b1+1); \
  310. else \
  311. _q1 = _a1/_b1; \
  312. if (_as<0) \
  313. _q -= _q1; \
  314. else \
  315. _q += _q1; \
  316. D2MULc(_q1, _b1, _b0, _t2, _t1, _t0); \
  317. ASSERT(_t2 == 0); \
  318. if (D2GT(_t1,_t0,_a1,_a0)) { \
  319. D2SUB(_t1,_t0,_a1,_a0,_a1,_a0); \
  320. _as = -_as; \
  321. } \
  322. else { \
  323. D2SUB(_a1,_a0,_t1,_t0,_a1,_a0); \
  324. } \
  325. } \
  326. if (_as < 0) { \
  327. _q--; \
  328. D2SUB(_b1,_b0,_a1,_a0,_a1,_a0); \
  329. } \
  330. q0 = _q; \
  331. r1 = _a1; \
  332. r0 = _a0; \
  333. } while(0)
  334. /* Calculate q, r A = Bq+R when assume B>0 */
  335. #define D2DIVREM_0(a1,a0,b1,b0,q1,q0,r1,r0) do { \
  336. ErtsDigit _a1 = (a1); \
  337. ErtsDigit _a0 = (a0); \
  338. ErtsDigit _b1 = (b1); \
  339. ErtsDigit _b0 = (b0); \
  340. if (D2EQ(_a1,_a0,0,0)) { \
  341. q1 = q0 = 0; \
  342. r1 = r0 = 0; \
  343. } \
  344. else { \
  345. ErtsDigit _res1 = 0; \
  346. ErtsDigit _res0 = 0; \
  347. ErtsDigit _d1 = 0; \
  348. ErtsDigit _d0 = 1; \
  349. ErtsDigit _e1 = (1 << (D_EXP-1)); \
  350. ErtsDigit _e0 = 0; \
  351. while(_e1 && !(_a1 & _e1)) \
  352. _e1 >>= 1; \
  353. if (_e1 == 0) { \
  354. _e0 = (1 << (D_EXP-1)); \
  355. while(_e0 && !(_a0 & _e0)) \
  356. _e0 >>= 1; \
  357. } \
  358. if (D2GT(_b1,_b0,0,0)) { \
  359. while(D2GT(_e1,_e0,_b1,_b0)) { \
  360. D2LSHIFT1(_b1,_b0); \
  361. D2LSHIFT1(_d1,_d0); \
  362. } \
  363. } \
  364. do { \
  365. if (!D2GT(_b1,_b0,_a1,_a0)) { \
  366. D2SUB(_a1,_a0, _b1, _b0, _a1, _a0); \
  367. D2ADD(_d1,_d0, _res1,_res0, _res1, _res0); \
  368. } \
  369. D2RSHIFT1(_b1,_b0); \
  370. D2RSHIFT1(_d1,_d0); \
  371. } while (!D2EQ(_d1,_d0,0,0)); \
  372. r1 = _a1; \
  373. r0 = _a0; \
  374. q1 = _res1; \
  375. q0 = _res0; \
  376. } \
  377. } while(0)
  378. #define DDIV2(a1,a0,b1,b0,q) do { \
  379. ERTS_DECLARE_DUMMY(ErtsDigit _tmp_r1); \
  380. ERTS_DECLARE_DUMMY(ErtsDigit _tmp_r0); \
  381. D2DIVREM(a1,a0,b1,b0,q,_tmp_r1,_tmp_r0); \
  382. } while(0)
  383. #endif
  384. /* Forward declaration of lookup tables (See below in this file) used in list to
  385. * integer conversions for different bases. Also used in bignum printing.
  386. */
  387. static const byte digits_per_sint_lookup[36-1];
  388. static const byte digits_per_small_lookup[36-1];
  389. static const Sint largest_power_of_base_lookup[36-1];
  390. static ERTS_INLINE byte get_digits_per_signed_int(Uint base) {
  391. return digits_per_sint_lookup[base-2];
  392. }
  393. static ERTS_INLINE byte get_digits_per_small(Uint base) {
  394. return digits_per_small_lookup[base-2];
  395. }
  396. static ERTS_INLINE Sint get_largest_power_of_base(Uint base) {
  397. return largest_power_of_base_lookup[base-2];
  398. }
  399. /*
  400. ** compare two number vectors
  401. */
  402. static int I_comp(ErtsDigit* x, dsize_t xl, ErtsDigit* y, dsize_t yl)
  403. {
  404. if (xl < yl)
  405. return -1;
  406. else if (xl > yl)
  407. return 1;
  408. else {
  409. if (x == y)
  410. return 0;
  411. x += (xl-1);
  412. y += (yl-1);
  413. while((xl > 0) && (*x == *y)) {
  414. x--;
  415. y--;
  416. xl--;
  417. }
  418. if (xl == 0)
  419. return 0;
  420. return (*x < *y) ? -1 : 1;
  421. }
  422. }
  423. /*
  424. ** Add digits in x and y and store them in r
  425. ** assumption: (xl >= yl)
  426. */
  427. static dsize_t I_add(ErtsDigit* x, dsize_t xl, ErtsDigit* y, dsize_t yl, ErtsDigit* r)
  428. {
  429. dsize_t sz = xl;
  430. register ErtsDigit yr, xr;
  431. register ErtsDigit c = 0;
  432. ASSERT(xl >= yl);
  433. xl -= yl;
  434. do {
  435. xr = *x++ + c;
  436. yr = *y++;
  437. c = (xr < c);
  438. xr = yr + xr;
  439. c += (xr < yr);
  440. *r++ = xr;
  441. } while(--yl);
  442. while(xl--) {
  443. xr = *x++ + c;
  444. c = (xr < c);
  445. *r++ = xr;
  446. }
  447. if (c) {
  448. *r = 1;
  449. return sz+1;
  450. }
  451. return sz;
  452. }
  453. /*
  454. ** Add a digits in v1 and store result in vr
  455. */
  456. static dsize_t D_add(ErtsDigit* x, dsize_t xl, ErtsDigit c, ErtsDigit* r)
  457. {
  458. dsize_t sz = xl;
  459. register ErtsDigit xr;
  460. while(xl--) {
  461. xr = *x++ + c;
  462. c = (xr < c);
  463. *r++ = xr;
  464. }
  465. if (c) {
  466. *r = 1;
  467. return sz+1;
  468. }
  469. return sz;
  470. }
  471. /*
  472. ** Subtract digits v2 from v1 and store result in v3
  473. ** Assert I_comp(x, xl, y, yl) >= 0
  474. **
  475. */
  476. static dsize_t I_sub(ErtsDigit* x, dsize_t xl, ErtsDigit* y, dsize_t yl, ErtsDigit* r)
  477. {
  478. ErtsDigit* r0 = r;
  479. register ErtsDigit yr, xr;
  480. register ErtsDigit c = 0;
  481. ASSERT(I_comp(x, xl, y, yl) >= 0);
  482. xl -= yl;
  483. do {
  484. yr = *y++ + c;
  485. xr = *x++;
  486. c = (yr < c);
  487. yr = xr - yr;
  488. c += (yr > xr);
  489. *r++ = yr;
  490. } while(--yl);
  491. while(xl--) {
  492. xr = *x++;
  493. yr = xr - c;
  494. c = (yr > xr);
  495. *r++ = yr;
  496. }
  497. do {
  498. r--;
  499. } while(*r == 0 && r != r0);
  500. return (r - r0) + 1;
  501. }
  502. /*
  503. ** Subtract digit d from v1 and store result in vr
  504. */
  505. static dsize_t D_sub(ErtsDigit* x, dsize_t xl, ErtsDigit c, ErtsDigit* r)
  506. {
  507. ErtsDigit* r0 = r;
  508. register ErtsDigit yr, xr;
  509. ASSERT(I_comp(x, xl, x, 1) >= 0);
  510. while(xl--) {
  511. xr = *x++;
  512. yr = xr - c;
  513. c = (yr > xr);
  514. *r++ = yr;
  515. }
  516. do {
  517. r--;
  518. } while(*r == 0 && r != r0);
  519. return (r - r0) + 1;
  520. }
  521. /*
  522. ** subtract Z000...0 - y and store result in r, return new size
  523. */
  524. static dsize_t Z_sub(ErtsDigit* y, dsize_t yl, ErtsDigit* r)
  525. {
  526. ErtsDigit* r0 = r;
  527. register ErtsDigit yr;
  528. register ErtsDigit c = 0;
  529. while(yl--) {
  530. yr = *y++ + c;
  531. c = (yr < c);
  532. yr = 0 - yr;
  533. c += (yr > 0);
  534. *r++ = yr;
  535. }
  536. do {
  537. r--;
  538. } while(*r == 0 && r != r0);
  539. return (r - r0) + 1;
  540. }
  541. /*
  542. ** Multiply digits in x with digits in y and store in r
  543. ** Assumption: digits in r must be 0 (upto the size of x)
  544. */
  545. static dsize_t I_mul(ErtsDigit* x, dsize_t xl, ErtsDigit* y, dsize_t yl, ErtsDigit* r)
  546. {
  547. ErtsDigit* r0 = r;
  548. ErtsDigit* rt = r;
  549. while(xl--) {
  550. ErtsDigit cp = 0;
  551. ErtsDigit c = 0;
  552. dsize_t n = yl;
  553. ErtsDigit* yt = y;
  554. ErtsDigit d;
  555. ErtsDigit p;
  556. d = *x;
  557. x++;
  558. rt = r;
  559. switch(d) {
  560. case 0:
  561. rt = rt + n;
  562. break;
  563. case 1:
  564. while(n--) {
  565. DSUMc(*yt, *rt, c, p);
  566. *rt++ = p;
  567. yt++;
  568. }
  569. break;
  570. case 2:
  571. while(n--) {
  572. p = *yt;
  573. DSUMc(p, p, cp, p);
  574. DSUMc(p, *rt, c, p);
  575. *rt++ = p;
  576. yt++;
  577. }
  578. break;
  579. default:
  580. while(n--) {
  581. DMULc(d,*yt, cp, p);
  582. DSUMc(p,*rt, c, p);
  583. *rt++ = p;
  584. yt++;
  585. }
  586. break;
  587. }
  588. *rt = c + cp;
  589. r++;
  590. }
  591. if (*rt == 0)
  592. return (rt - r0);
  593. else
  594. return (rt - r0) + 1;
  595. }
  596. /*
  597. ** Square digits in x store in r (x & r may point into a common area)
  598. ** Assumption: x is destroyed if common area and digits in r are zero
  599. ** to the size of xl+1
  600. */
  601. static dsize_t I_sqr(ErtsDigit* x, dsize_t xl, ErtsDigit* r)
  602. {
  603. ErtsDigit d_next = *x;
  604. ErtsDigit d;
  605. ErtsDigit* r0 = r;
  606. ErtsDigit* s = r;
  607. if ((r + xl) == x) /* "Inline" operation */
  608. *x = 0;
  609. x++;
  610. while(xl--) {
  611. ErtsDigit* y = x;
  612. ErtsDigit y_0 = 0, y_1 = 0, y_2 = 0, y_3 = 0;
  613. ErtsDigit b0, b1;
  614. ErtsDigit z0, z1, z2;
  615. ErtsDigit t;
  616. dsize_t y_l = xl;
  617. s = r;
  618. d = d_next;
  619. d_next = *x;
  620. x++;
  621. DMUL(d, d, b1, b0);
  622. DSUMc(*s, b0, y_3, t);
  623. *s++ = t;
  624. z1 = b1;
  625. while(y_l--) {
  626. DMUL(d, *y, b1, b0);
  627. y++;
  628. DSUMc(b0, b0, y_0, z0);
  629. DSUMc(z0, z1, y_2, z2);
  630. DSUMc(*s, z2, y_3, t);
  631. *s++ = t;
  632. DSUMc(b1, b1, y_1, z1);
  633. }
  634. z0 = y_0;
  635. DSUMc(z0, z1, y_2, z2);
  636. DSUMc(*s, z2, y_3, t);
  637. *s = t;
  638. if (xl != 0) {
  639. s++;
  640. t = (y_1+y_2+y_3);
  641. *s = t;
  642. r += 2;
  643. }
  644. else {
  645. ASSERT((y_1+y_2+y_3) == 0);
  646. }
  647. }
  648. if (*s == 0)
  649. return (s - r0);
  650. else
  651. return (s - r0) + 1;
  652. }
  653. /*
  654. ** Multiply digits d with digits in x and store in r
  655. */
  656. static dsize_t D_mul(ErtsDigit* x, dsize_t xl, ErtsDigit d, ErtsDigit* r)
  657. {
  658. ErtsDigit c = 0;
  659. dsize_t rl = xl;
  660. ErtsDigit p;
  661. switch(d) {
  662. case 0:
  663. ZERO_DIGITS(r, 1);
  664. return 1;
  665. case 1:
  666. if (x != r)
  667. MOVE_DIGITS(r, x, xl);
  668. return xl;
  669. case 2:
  670. while(xl--) {
  671. p = *x;
  672. DSUMc(p, p, c, p);
  673. *r++ = p;
  674. x++;
  675. }
  676. break;
  677. default:
  678. while(xl--) {
  679. DMULc(d, *x, c, p);
  680. *r++ = p;
  681. x++;
  682. }
  683. break;
  684. }
  685. if (c == 0)
  686. return rl;
  687. *r = c;
  688. return rl+1;
  689. }
  690. /*
  691. ** Multiply and subtract
  692. ** calculate r(i) = x(i) - d*y(i)
  693. ** assumption: xl = yl || xl == yl+1
  694. **
  695. ** Return size of r
  696. ** 0 means borrow
  697. */
  698. static dsize_t D_mulsub(ErtsDigit* x, dsize_t xl, ErtsDigit d,
  699. ErtsDigit* y, dsize_t yl, ErtsDigit* r)
  700. {
  701. ErtsDigit c = 0;
  702. ErtsDigit b = 0;
  703. ErtsDigit c0;
  704. ErtsDigit* r0 = r;
  705. ErtsDigit s;
  706. ASSERT(xl == yl || xl == yl+1);
  707. xl -= yl;
  708. while(yl--) {
  709. DMULc(d, *y, c, c0);
  710. DSUBb(*x, c0, b, s);
  711. *r++ = s;
  712. x++;
  713. y++;
  714. }
  715. if (xl == 0) {
  716. if (c != 0 || b != 0)
  717. return 0;
  718. }
  719. else { /* xl == 1 */
  720. DSUBb(*x, c, b, s);
  721. *r++ = s;
  722. }
  723. if (b != 0) return 0;
  724. do {
  725. r--;
  726. } while(*r == 0 && r != r0);
  727. return (r - r0) + 1;
  728. }
  729. /*
  730. ** Divide digits in x with a digit,
  731. ** quotient is returned in q and remainder digit in r
  732. ** x and q may be equal
  733. */
  734. static dsize_t D_div(ErtsDigit* x, dsize_t xl, ErtsDigit d, ErtsDigit* q, ErtsDigit* r)
  735. {
  736. ErtsDigit* xp = x + (xl-1);
  737. ErtsDigit* qp = q + (xl-1);
  738. dsize_t qsz = xl;
  739. ErtsDigit a1;
  740. a1 = *xp;
  741. xp--;
  742. if (d > a1) {
  743. if (xl == 1) {
  744. *r = a1;
  745. *qp = 0;
  746. return 1;
  747. }
  748. qsz--;
  749. qp--;
  750. }
  751. do {
  752. ErtsDigit q0, a0, b0;
  753. ERTS_DECLARE_DUMMY(ErtsDigit b);
  754. ERTS_DECLARE_DUMMY(ErtsDigit b1);
  755. if (d > a1) {
  756. a0 = *xp;
  757. xp--;
  758. }
  759. else {
  760. a0 = a1; a1 = 0;
  761. }
  762. DDIV(a1, a0, d, q0);
  763. DMUL(d, q0, b1, b0);
  764. DSUB(a0,b0, b, a1);
  765. *qp = q0;
  766. qp--;
  767. } while (xp >= x);
  768. *r = a1;
  769. return qsz;
  770. }
  771. /*
  772. ** Divide digits in x with digits in y and return qutient in q
  773. ** and remainder in r
  774. ** assume that integer(x) > integer(y)
  775. ** Return remainder in x (length int rl)
  776. ** Return quotient size
  777. */
  778. static dsize_t I_div(ErtsDigit* x, dsize_t xl, ErtsDigit* y, dsize_t yl,
  779. ErtsDigit* q, ErtsDigit* r, dsize_t* rlp)
  780. {
  781. ErtsDigit* rp;
  782. ErtsDigit* qp;
  783. ErtsDigit b1 = y[yl-1];
  784. ErtsDigit b2 = y[yl-2];
  785. ErtsDigit a1;
  786. ErtsDigit a2;
  787. int r_signed = 0;
  788. dsize_t ql;
  789. dsize_t rl;
  790. if (x != r)
  791. MOVE_DIGITS(r, x, xl);
  792. rp = r + (xl-yl);
  793. rl = xl;
  794. ZERO_DIGITS(q, xl-yl+1);
  795. qp = q + (xl-yl);
  796. ql = 0;
  797. /* Adjust length */
  798. a1 = rp[yl-1];
  799. a2 = rp[yl-2];
  800. if (b1 < a1 || (b1 == a1 && b2 <= a2))
  801. ql = 1;
  802. do {
  803. ErtsDigit q0;
  804. dsize_t nsz = yl;
  805. dsize_t nnsz;
  806. a1 = rp[yl-1];
  807. a2 = rp[yl-2];
  808. if (b1 < a1)
  809. DDIV2(a1,a2,b1,b2,q0);
  810. else if (b1 > a1) {
  811. DDIV(a1,a2,b1,q0);
  812. nsz++;
  813. rp--;
  814. qp--;
  815. ql++;
  816. }
  817. else { /* (b1 == a1) */
  818. if (b2 <= a2)
  819. q0 = 1;
  820. else {
  821. q0 = D_MASK;
  822. nsz++;
  823. rp--;
  824. qp--;
  825. ql++;
  826. }
  827. }
  828. if (r_signed)
  829. ql = D_sub(qp, ql, q0, qp);
  830. else
  831. ql = D_add(qp, ql, q0, qp);
  832. if ((nnsz = D_mulsub(rp, nsz, q0, y, yl, rp)) == 0) {
  833. nnsz = Z_sub(r, rl, r);
  834. if (nsz > (rl-nnsz))
  835. nnsz = nsz - (rl-nnsz);
  836. else
  837. nnsz = 1;
  838. r_signed = !r_signed;
  839. }
  840. if ((nnsz == 1) && (*rp == 0))
  841. nnsz = 0;
  842. rp = rp - (yl-nnsz);
  843. rl -= (nsz-nnsz);
  844. qp = qp - (yl-nnsz);
  845. ql += (yl-nnsz);
  846. } while (I_comp(r, rl, y, yl) >= 0);
  847. ql -= (q - qp);
  848. qp = q;
  849. if (rl == 0)
  850. rl = 1;
  851. while(rl > 1 && r[rl-1] == 0) /* Remove "trailing zeroes" */
  852. --rl;
  853. if (r_signed && (rl > 1 || *r != 0)) {
  854. rl = I_sub(y, yl, r, rl, r);
  855. ql = D_sub(qp, ql, 1, qp);
  856. }
  857. *rlp = rl;
  858. return ql;
  859. }
  860. /*
  861. ** Remainder of digits in x and a digit d
  862. */
  863. static ErtsDigit D_rem(ErtsDigit* x, dsize_t xl, ErtsDigit d)
  864. {
  865. ErtsDigit rem = 0;
  866. x += (xl-1);
  867. do {
  868. if (rem != 0)
  869. DREM(rem, *x, d, rem);
  870. else
  871. DREM(0, *x, d, rem);
  872. x--;
  873. xl--;
  874. } while(xl > 0);
  875. return rem;
  876. }
  877. /*
  878. ** Remainder of x and y
  879. **
  880. ** Assumtions: xl >= yl, yl > 1
  881. ** r must contain at least xl number of digits
  882. */
  883. static dsize_t I_rem(ErtsDigit* x, dsize_t xl, ErtsDigit* y, dsize_t yl, ErtsDigit* r)
  884. {
  885. ErtsDigit* rp;
  886. ErtsDigit b1 = y[yl-1];
  887. ErtsDigit b2 = y[yl-2];
  888. ErtsDigit a1;
  889. ErtsDigit a2;
  890. int r_signed = 0;
  891. dsize_t rl;
  892. if (x != r)
  893. MOVE_DIGITS(r, x, xl);
  894. rp = r + (xl-yl);
  895. rl = xl;
  896. do {
  897. ErtsDigit q0;
  898. dsize_t nsz = yl;
  899. dsize_t nnsz;
  900. a1 = rp[yl-1];
  901. a2 = rp[yl-2];
  902. if (b1 < a1)
  903. DDIV2(a1,a2,b1,b2,q0);
  904. else if (b1 > a1) {
  905. DDIV(a1,a2,b1,q0);
  906. nsz++;
  907. rp--;
  908. }
  909. else { /* (b1 == a1) */
  910. if (b2 <= a2)
  911. q0 = 1;
  912. else {
  913. q0 = D_MASK;
  914. nsz++;
  915. rp--;
  916. }
  917. }
  918. if ((nnsz = D_mulsub(rp, nsz, q0, y, yl, rp)) == 0) {
  919. nnsz = Z_sub(r, rl, r);
  920. if (nsz > (rl-nnsz))
  921. nnsz = nsz - (rl-nnsz);
  922. else
  923. nnsz = 1;
  924. r_signed = !r_signed;
  925. }
  926. if (nnsz == 1 && *rp == 0)
  927. nnsz = 0;
  928. rp = rp - (yl-nnsz);
  929. rl -= (nsz-nnsz);
  930. } while (I_comp(r, rl, y, yl) >= 0);
  931. if (rl == 0)
  932. rl = 1;
  933. while(rl > 1 && r[rl-1] == 0) /* Remove "trailing zeroes" */
  934. --rl;
  935. if (r_signed && (rl > 1 || *r != 0))
  936. rl = I_sub(y, yl, r, rl, r);
  937. return rl;
  938. }
  939. /*
  940. ** Remove trailing digits from bitwise operations
  941. */
  942. static dsize_t I_btrail(ErtsDigit* r0, ErtsDigit* r, short sign)
  943. {
  944. /* convert negative numbers to one complement */
  945. if (sign) {
  946. dsize_t rl;
  947. ErtsDigit d;
  948. /* 1 remove all 0xffff words */
  949. do {
  950. r--;
  951. } while(((d = *r) == D_MASK) && (r != r0));
  952. /* 2 complement high digit */
  953. if (d == D_MASK)
  954. *r = 0;
  955. else {
  956. ErtsDigit prev_mask = 0;
  957. ErtsDigit mask = (DCONST(1) << (D_EXP-1));
  958. while((d & mask) == mask) {
  959. prev_mask = mask;
  960. mask = (prev_mask >> 1) | (DCONST(1)<<(D_EXP-1));
  961. }
  962. *r = ~d & ~prev_mask;
  963. }
  964. rl = (r - r0) + 1;
  965. while(r != r0) {
  966. r--;
  967. *r = ~*r;
  968. }
  969. return D_add(r0, rl, 1, r0);
  970. }
  971. do {
  972. r--;
  973. } while(*r == 0 && r != r0);
  974. return (r - r0) + 1;
  975. }
  976. /*
  977. ** Bitwise and
  978. */
  979. static dsize_t I_band(ErtsDigit* x, dsize_t xl, short xsgn,
  980. ErtsDigit* y, dsize_t yl, short ysgn, ErtsDigit* r)
  981. {
  982. ErtsDigit* r0 = r;
  983. short sign = xsgn && ysgn;
  984. ASSERT(xl >= yl);
  985. xl -= yl;
  986. if (!xsgn) {
  987. if (!ysgn) {
  988. while(yl--)
  989. *r++ = *x++ & *y++;
  990. }
  991. else {
  992. ErtsDigit b;
  993. ErtsDigit c;
  994. DSUB(*y,1,b,c);
  995. *r++ = *x++ & ~c;
  996. y++;
  997. yl--;
  998. while(yl--) {
  999. DSUBb(*y,0,b,c);
  1000. *r++ = *x++ & ~c;
  1001. y++;
  1002. }
  1003. while (xl--) {
  1004. *r++ = *x++;
  1005. }
  1006. }
  1007. }
  1008. else {
  1009. if (!ysgn) {
  1010. ErtsDigit b;
  1011. ErtsDigit c;
  1012. DSUB(*x,1,b,c);
  1013. *r = ~c & *y;
  1014. x++; y++; r++;
  1015. yl--;
  1016. while(yl--) {
  1017. DSUBb(*x,0,b,c);
  1018. *r++ = ~c & *y++;
  1019. x++;
  1020. }
  1021. }
  1022. else {
  1023. ErtsDigit b1, b2;
  1024. ErtsDigit c1, c2;
  1025. DSUB(*x,1,b1,c1);
  1026. DSUB(*y,1,b2,c2);
  1027. *r++ = ~c1 & ~c2;
  1028. x++; y++;
  1029. yl--;
  1030. while(yl--) {
  1031. DSUBb(*x,0,b1,c1);
  1032. DSUBb(*y,0,b2,c2);
  1033. *r++ = ~c1 & ~c2;
  1034. x++; y++;
  1035. }
  1036. while(xl--)
  1037. *r++ = ~*x++;
  1038. }
  1039. }
  1040. return I_btrail(r0, r, sign);
  1041. }
  1042. /*
  1043. * Bitwise 'or'.
  1044. */
  1045. static dsize_t
  1046. I_bor(ErtsDigit* x, dsize_t xl, short xsgn, ErtsDigit* y,
  1047. dsize_t yl, short ysgn, ErtsDigit* r)
  1048. {
  1049. ErtsDigit* r0 = r;
  1050. short sign = xsgn || ysgn;
  1051. ASSERT(xl >= yl);
  1052. xl -= yl;
  1053. if (!xsgn) {
  1054. if (!ysgn) {
  1055. while(yl--)
  1056. *r++ = *x++ | *y++;
  1057. while(xl--)
  1058. *r++ = *x++;
  1059. }
  1060. else {
  1061. ErtsDigit b;
  1062. ErtsDigit c;
  1063. DSUB(*y,1,b,c);
  1064. *r++ = *x++ | ~c;
  1065. y++;
  1066. yl--;
  1067. while(yl--) {
  1068. DSUBb(*y,0,b,c);
  1069. *r++ = *x++ | ~c;
  1070. y++;
  1071. }
  1072. }
  1073. }
  1074. else {
  1075. if (!ysgn) {
  1076. ErtsDigit b;
  1077. ErtsDigit c;
  1078. DSUB(*x,1,b,c);
  1079. *r++ = ~c | *y++;
  1080. x++;
  1081. yl--;
  1082. while(yl--) {
  1083. DSUBb(*x,0,b,c);
  1084. *r++ = ~c | *y++;
  1085. x++;
  1086. }
  1087. while(xl--) {
  1088. DSUBb(*x,0,b,c);
  1089. *r++ = ~c;
  1090. x++;
  1091. }
  1092. }
  1093. else {
  1094. ErtsDigit b1, b2;
  1095. ErtsDigit c1, c2;
  1096. DSUB(*x,1,b1,c1);
  1097. DSUB(*y,1,b2,c2);
  1098. *r++ = ~c1 | ~c2;
  1099. x++; y++;
  1100. yl--;
  1101. while(yl--) {
  1102. DSUBb(*x,0,b1,c1);
  1103. DSUBb(*y,0,b2,c2);
  1104. *r++ = ~c1 | ~c2;
  1105. x++; y++;
  1106. }
  1107. }
  1108. }
  1109. return I_btrail(r0, r, sign);
  1110. }
  1111. /*
  1112. ** Bitwise xor
  1113. */
  1114. static dsize_t I_bxor(ErtsDigit* x, dsize_t xl, short xsgn,
  1115. ErtsDigit* y, dsize_t yl, short ysgn, ErtsDigit* r)
  1116. {
  1117. ErtsDigit* r0 = r;
  1118. short sign = xsgn != ysgn;
  1119. ASSERT(xl >= yl);
  1120. xl -= yl;
  1121. if (!xsgn) {
  1122. if (!ysgn) {
  1123. while(yl--)
  1124. *r++ = *x++ ^ *y++;
  1125. while(xl--)
  1126. *r++ = *x++;
  1127. }
  1128. else {
  1129. ErtsDigit b;
  1130. ErtsDigit c;
  1131. DSUB(*y,1,b,c);
  1132. *r++ = *x++ ^ ~c;
  1133. y++;
  1134. yl--;
  1135. while(yl--) {
  1136. DSUBb(*y,0,b,c);
  1137. *r++ = *x++ ^ ~c;
  1138. y++;
  1139. }
  1140. while(xl--)
  1141. *r++ = ~*x++;
  1142. }
  1143. }
  1144. else {
  1145. if (!ysgn) {
  1146. ErtsDigit b;
  1147. ErtsDigit c;
  1148. DSUB(*x,1,b,c);
  1149. *r++ = ~c ^ *y++;
  1150. x++;
  1151. yl--;
  1152. while(yl--) {
  1153. DSUBb(*x,0,b,c);
  1154. *r++ = ~c ^ *y++;
  1155. x++;
  1156. }
  1157. while(xl--) {
  1158. DSUBb(*x,0,b,c);
  1159. *r++ = ~c;
  1160. x++;
  1161. }
  1162. }
  1163. else {
  1164. ErtsDigit b1, b2;
  1165. ErtsDigit c1, c2;
  1166. DSUB(*x,1,b1,c1);
  1167. DSUB(*y,1,b2,c2);
  1168. *r++ = ~c1 ^ ~c2;
  1169. x++; y++;
  1170. yl--;
  1171. while(yl--) {
  1172. DSUBb(*x,0,b1,c1);
  1173. DSUBb(*y,0,b2,c2);
  1174. *r++ = ~c1 ^ ~c2;
  1175. x++; y++;
  1176. }
  1177. while(xl--) {
  1178. DSUBb(*x,0,b1,c1);
  1179. *r++ = c1;
  1180. x++;
  1181. }
  1182. }
  1183. }
  1184. return I_btrail(r0, r, sign);
  1185. }
  1186. /*
  1187. ** Bitwise not simulated as
  1188. ** bnot -X == (X - 1)
  1189. ** bnot +X == -(X + 1)
  1190. */
  1191. static dsize_t I_bnot(ErtsDigit* x, dsize_t xl, short xsgn, ErtsDigit* r)
  1192. {
  1193. if (xsgn)
  1194. return D_add(x, xl, 1, r);
  1195. else
  1196. return D_sub(x, xl, 1, r);
  1197. }
  1198. /*
  1199. ** Arithmetic left shift or right
  1200. */
  1201. static dsize_t I_lshift(ErtsDigit* x, dsize_t xl, Sint y,
  1202. short sign, ErtsDigit* r)
  1203. {
  1204. if (y == 0) {
  1205. MOVE_DIGITS(r, x, xl);
  1206. return xl;
  1207. }
  1208. else if (xl == 1 && *x == 0) {
  1209. *r = 0;
  1210. return 1;
  1211. }
  1212. else {
  1213. Uint ay = (y < 0) ? -y : y;
  1214. Uint bw = ay / D_EXP;
  1215. Uint sw = ay % D_EXP;
  1216. dsize_t rl;
  1217. ErtsDigit a1=0;
  1218. ErtsDigit a0=0;
  1219. if (y > 0) { /* shift left */
  1220. rl = xl + bw + 1;
  1221. while(bw--)
  1222. *r++ = 0;
  1223. if (sw) { /* NOTE! x >> 32 is not = 0! */
  1224. while(xl--) {
  1225. a0 = (*x << sw) | a1;
  1226. a1 = (*x >> (D_EXP - sw));
  1227. *r++ = a0;
  1228. x++;
  1229. }
  1230. }
  1231. else {
  1232. while(xl--) {
  1233. *r++ = *x++;
  1234. }
  1235. }
  1236. if (a1 == 0)
  1237. return rl-1;
  1238. *r = a1;
  1239. return rl;
  1240. }
  1241. else { /* shift right */
  1242. ErtsDigit* r0 = r;
  1243. int add_one = 0;
  1244. if (xl <= bw) {
  1245. if (sign)
  1246. *r = 1;
  1247. else
  1248. *r = 0;
  1249. return 1;
  1250. }
  1251. if (sign) {
  1252. Uint zl = bw;
  1253. ErtsDigit* z = x;
  1254. while(zl--) {
  1255. if (*z != 0) {
  1256. add_one = 1;
  1257. break;
  1258. }
  1259. z++;
  1260. }
  1261. }
  1262. rl = xl - bw;
  1263. x += (xl-1);
  1264. r += (rl-1);
  1265. xl -= bw;
  1266. if (sw) { /* NOTE! x >> 32 is not = 0! */
  1267. while(xl--) {
  1268. a1 = (*x >> sw) | a0;
  1269. a0 = (*x << (D_EXP-sw));
  1270. *r-- = a1;
  1271. x--;
  1272. }
  1273. }
  1274. else {
  1275. while(xl--) {
  1276. *r-- = *x--;
  1277. }
  1278. }
  1279. if (sign && (a0 != 0))
  1280. add_one = 1;
  1281. if (r[rl] == 0) {
  1282. if (rl == 1) {
  1283. if (sign)
  1284. r[1] = 1;
  1285. return 1;
  1286. }
  1287. rl--;
  1288. }
  1289. if (add_one)
  1290. return D_add(r0, rl, 1, r0);
  1291. return rl;
  1292. }
  1293. }
  1294. }
  1295. /*
  1296. ** Return log(x)/log(2)
  1297. */
  1298. static int I_lg(ErtsDigit* x, dsize_t xl)
  1299. {
  1300. dsize_t sz = xl - 1;
  1301. ErtsDigit d = x[sz];
  1302. sz *= D_EXP;
  1303. while(d != 0) {
  1304. d >>= 1;
  1305. sz++;
  1306. }
  1307. return sz - 1;
  1308. }
  1309. /*
  1310. ** Create bigint on heap if necessary. Like the previously existing
  1311. ** make_small_or_big(), except for a HAlloc() instead of an
  1312. ** ArithAlloc().
  1313. ** NOTE: Only use erts_make_integer(), when order of heap fragments is
  1314. ** guaranteed to be correct.
  1315. */
  1316. Eterm
  1317. erts_make_integer(Uint x, Process *p)
  1318. {
  1319. Eterm* hp;
  1320. if (IS_USMALL(0,x))
  1321. return make_small(x);
  1322. else {
  1323. hp = HAlloc(p, BIG_UINT_HEAP_SIZE);
  1324. return uint_to_big(x,hp);
  1325. }
  1326. }
  1327. /*
  1328. * As erts_make_integer, but from a whole UWord.
  1329. */
  1330. Eterm
  1331. erts_make_integer_from_uword(UWord x, Process *p)
  1332. {
  1333. Eterm* hp;
  1334. if (IS_USMALL(0,x))
  1335. return make_small(x);
  1336. else {
  1337. hp = HAlloc(p, BIG_UWORD_HEAP_SIZE(x));
  1338. return uword_to_big(x,hp);
  1339. }
  1340. }
  1341. /*
  1342. ** convert Uint to bigint
  1343. ** (must only be used if x is to big to be stored as a small)
  1344. */
  1345. Eterm uint_to_big(Uint x, Eterm *y)
  1346. {
  1347. *y = make_pos_bignum_header(1);
  1348. BIG_DIGIT(y, 0) = x;
  1349. return make_big(y);
  1350. }
  1351. /*
  1352. ** convert UWord to bigint
  1353. ** (must only be used if x is to big to be stored as a small)
  1354. ** Allocation is tricky, the heap need has to be calculated
  1355. ** with the macro BIG_UWORD_HEAP_SIZE(x)
  1356. */
  1357. Eterm uword_to_big(UWord x, Eterm *y)
  1358. {
  1359. *y = make_pos_bignum_header(1);
  1360. BIG_DIGIT(y, 0) = x;
  1361. return make_big(y);
  1362. }
  1363. /*
  1364. ** convert signed int to bigint
  1365. */
  1366. Eterm small_to_big(Sint x, Eterm *y)
  1367. {
  1368. Uint xu;
  1369. if (x >= 0) {
  1370. xu = x;
  1371. *y = make_pos_bignum_header(1);
  1372. } else {
  1373. xu = -(Uint)x;
  1374. *y = make_neg_bignum_header(1);
  1375. }
  1376. BIG_DIGIT(y, 0) = xu;
  1377. return make_big(y);
  1378. }
  1379. Eterm erts_uint64_to_big(Uint64 x, Eterm **hpp)
  1380. {
  1381. Eterm *hp = *hpp;
  1382. #if defined(ARCH_32)
  1383. if (x >= (((Uint64) 1) << 32)) {
  1384. *hp = make_pos_bignum_header(2);
  1385. BIG_DIGIT(hp, 0) = (Uint) (x & ((Uint) 0xffffffff));
  1386. BIG_DIGIT(hp, 1) = (Uint) ((x >> 32) & ((Uint) 0xffffffff));
  1387. *hpp += 3;
  1388. }
  1389. else
  1390. #endif
  1391. {
  1392. *hp = make_pos_bignum_header(1);
  1393. BIG_DIGIT(hp, 0) = (Uint) x;
  1394. *hpp += 2;
  1395. }
  1396. return make_big(hp);
  1397. }
  1398. Eterm erts_sint64_to_big(Sint64 x, Eterm **hpp)
  1399. {
  1400. Eterm *hp = *hpp;
  1401. Uint64 ux;
  1402. int neg;
  1403. if (x >= 0) {
  1404. neg = 0;
  1405. ux = x;
  1406. }
  1407. else {
  1408. neg = 1;
  1409. ux = -(Uint64)x;
  1410. }
  1411. #if defined(ARCH_32)
  1412. if (ux >= (((Uint64) 1) << 32)) {
  1413. if (neg)
  1414. *hp = make_neg_bignum_header(2);
  1415. else
  1416. *hp = make_pos_bignum_header(2);
  1417. BIG_DIGIT(hp, 0) = (Uint) (ux & ((Uint) 0xffffffff));
  1418. BIG_DIGIT(hp, 1) = (Uint) ((ux >> 32) & ((Uint) 0xffffffff));
  1419. *hpp += 3;
  1420. }
  1421. else
  1422. #endif
  1423. {
  1424. if (neg)
  1425. *hp = make_neg_bignum_header(1);
  1426. else
  1427. *hp = make_pos_bignum_header(1);
  1428. BIG_DIGIT(hp, 0) = (Uint) ux;
  1429. *hpp += 2;
  1430. }
  1431. return make_big(hp);
  1432. }
  1433. Eterm
  1434. erts_uint64_array_to_big(Uint **hpp, int neg, int len, Uint64 *array)
  1435. {
  1436. Uint *headerp;
  1437. int i, pot_digits, digits;
  1438. headerp = *hpp;
  1439. pot_digits = digits = 0;
  1440. for (i = 0; i < len; i++) {
  1441. #if defined(ARCH_32)
  1442. Uint low_val = array[i] & ((Uint) 0xffffffff);
  1443. Uint high_val = (array[i] >> 32) & ((Uint) 0xffffffff);
  1444. BIG_DIGIT(headerp, pot_digits) = low_val;
  1445. pot_digits++;
  1446. if (low_val)
  1447. digits = pot_digits;
  1448. BIG_DIGIT(headerp, pot_digits) = high_val;
  1449. pot_digits++;
  1450. if (high_val)
  1451. digits = pot_digits;
  1452. #else
  1453. Uint val = array[i];
  1454. BIG_DIGIT(headerp, pot_digits) = val;
  1455. pot_digits++;
  1456. if (val)
  1457. digits = pot_digits;
  1458. #endif
  1459. }
  1460. if (neg)
  1461. *headerp = make_neg_bignum_header(digits);
  1462. else
  1463. *headerp = make_pos_bignum_header(digits);
  1464. *hpp = headerp + 1 + digits;
  1465. return make_big(headerp);
  1466. }
  1467. /*
  1468. ** Convert a bignum to a double float
  1469. */
  1470. int
  1471. big_to_double(Wterm x, double* resp)
  1472. {
  1473. double d = 0.0;
  1474. Eterm* xp = big_val(x);
  1475. dsize_t xl = BIG_SIZE(xp);
  1476. ErtsDigit* s = BIG_V(xp) + xl;
  1477. short xsgn = BIG_SIGN(xp);
  1478. double dbase = ((double)(D_MASK)+1);
  1479. #ifndef NO_FPE_SIGNALS
  1480. volatile unsigned long *fpexnp = erts_get_current_fp_exception();
  1481. #endif
  1482. __ERTS_SAVE_FP_EXCEPTION(fpexnp);
  1483. __ERTS_FP_CHECK_INIT(fpexnp);
  1484. while (xl--) {
  1485. d = d * dbase + *--s;
  1486. __ERTS_FP_ERROR(fpexnp, d, __ERTS_RESTORE_FP_EXCEPTION(fpexnp); return -1);
  1487. }
  1488. *resp = xsgn ? -d : d;
  1489. __ERTS_FP_ERROR(fpexnp,*resp,;);
  1490. __ERTS_RESTORE_FP_EXCEPTION(fpexnp);
  1491. return 0;
  1492. }
  1493. /*
  1494. * Logic has been copied from erl_bif_guard.c and slightly
  1495. * modified to use a static instead of dynamic heap
  1496. */
  1497. Eterm
  1498. double_to_big(double x, Eterm *heap, Uint hsz)
  1499. {
  1500. int is_negative;
  1501. int ds;
  1502. ErtsDigit* xp;
  1503. Eterm res;
  1504. int i;
  1505. size_t sz;
  1506. Eterm* hp;
  1507. double dbase;
  1508. if (x >= 0) {
  1509. is_negative = 0;
  1510. } else {
  1511. is_negative = 1;
  1512. x = -x;
  1513. }
  1514. /* Unscale & (calculate exponent) */
  1515. ds = 0;
  1516. dbase = ((double) (D_MASK) + 1);
  1517. while (x >= 1.0) {
  1518. x /= dbase; /* "shift" right */
  1519. ds++;
  1520. }
  1521. sz = BIG_NEED_SIZE(ds); /* number of words including arity */
  1522. hp = heap;
  1523. res = make_big(hp);
  1524. xp = (ErtsDigit*) (hp + 1);
  1525. ASSERT(ds < hsz);
  1526. for (i = ds - 1; i >= 0; i--) {
  1527. ErtsDigit d;
  1528. x *= dbase; /* "shift" left */
  1529. d = x; /* trunc */
  1530. xp[i] = d; /* store digit */
  1531. x -= d; /* remove integer part */
  1532. }
  1533. while ((ds & (BIG_DIGITS_PER_WORD - 1)) != 0) {
  1534. xp[ds++] = 0;
  1535. }
  1536. if (is_negative) {
  1537. *hp = make_neg_bignum_header(sz-1);
  1538. } else {
  1539. *hp = make_pos_bignum_header(sz-1);
  1540. }
  1541. return res;
  1542. }
  1543. /*
  1544. ** Estimate the number of decimal digits (include sign)
  1545. */
  1546. int big_decimal_estimate(Wterm x)
  1547. {
  1548. Eterm* xp = big_val(x);
  1549. int lg = I_lg(BIG_V(xp), BIG_SIZE(xp));
  1550. int lg10 = ((lg+1)*28/93)+1;
  1551. if (BIG_SIGN(xp)) lg10++; /* add sign */
  1552. return lg10+1; /* add null */
  1553. }
  1554. /*
  1555. ** Convert a bignum into a string of decimal numbers
  1556. */
  1557. static Uint write_big(Wterm x, void (*write_func)(void *, char), void *arg)
  1558. {
  1559. Eterm* xp = big_val(x);
  1560. ErtsDigit* dx = BIG_V(xp);
  1561. dsize_t xl = BIG_SIZE(xp);
  1562. short sign = BIG_SIGN(xp);
  1563. ErtsDigit rem;
  1564. Uint n = 0;
  1565. const Uint digits_per_Sint = get_digits_per_signed_int(10);
  1566. const Sint largest_pow_of_base = get_largest_power_of_base(10);
  1567. if (xl == 1 && *dx < largest_pow_of_base) {
  1568. rem = *dx;
  1569. if (rem == 0) {
  1570. (*write_func)(arg, '0'); n++;
  1571. } else {
  1572. while(rem) {
  1573. (*write_func)(arg, (rem % 10) + '0'); n++;
  1574. rem /= 10;
  1575. }
  1576. }
  1577. } else {
  1578. ErtsDigit* tmp = (ErtsDigit*) erts_alloc(ERTS_ALC_T_TMP,
  1579. sizeof(ErtsDigit)*xl);
  1580. dsize_t tmpl = xl;
  1581. MOVE_DIGITS(tmp, dx, xl);
  1582. while(1) {
  1583. tmpl = D_div(tmp, tmpl, largest_pow_of_base, tmp, &rem);
  1584. if (tmpl == 1 && *tmp == 0) {
  1585. while(rem) {
  1586. (*write_func)(arg, (rem % 10)+'0'); n++;
  1587. rem /= 10;
  1588. }
  1589. break;
  1590. } else {
  1591. Uint i = digits_per_Sint;
  1592. while(i--) {
  1593. (*write_func)(arg, (rem % 10)+'0'); n++;
  1594. rem /= 10;
  1595. }
  1596. }
  1597. }
  1598. erts_free(ERTS_ALC_T_TMP, (void *) tmp);
  1599. }
  1600. if (sign) {
  1601. (*write_func)(arg, '-'); n++;
  1602. }
  1603. return n;
  1604. }
  1605. struct big_list__ {
  1606. Eterm *hp;
  1607. Eterm res;
  1608. };
  1609. static void
  1610. write_list(void *arg, char c)
  1611. {
  1612. struct big_list__ *blp = (struct big_list__ *) arg;
  1613. blp->res = CONS(blp->hp, make_small(c), blp->res);
  1614. blp->hp += 2;
  1615. }
  1616. Eterm erts_big_to_list(Eterm x, Eterm **hpp)
  1617. {
  1618. struct big_list__ bl;
  1619. bl.hp = *hpp;
  1620. bl.res = NIL;
  1621. write_big(x, write_list, (void *) &bl);
  1622. *hpp = bl.hp;
  1623. return bl.res;
  1624. }
  1625. static void
  1626. write_string(void *arg, char c)
  1627. {
  1628. *(--(*((char **) arg))) = c;
  1629. }
  1630. char *erts_big_to_string(Wterm x, char *buf, Uint buf_sz)
  1631. {
  1632. char *big_str = buf + buf_sz - 1;
  1633. *big_str = '\0';
  1634. write_big(x, write_string, (void *) &big_str);
  1635. ASSERT(buf <= big_str && big_str <= buf + buf_sz - 1);
  1636. return big_str;
  1637. }
  1638. /* Bignum to binary bytes
  1639. * e.g. 1 bsl 64 -> "18446744073709551616"
  1640. */
  1641. Uint erts_big_to_binary_bytes(Eterm x, char *buf, Uint buf_sz)
  1642. {
  1643. char *big_str = buf + buf_sz;
  1644. Uint n;
  1645. n = write_big(x, write_string, (void *) &big_str);
  1646. ASSERT(buf <= big_str && big_str <= buf + buf_sz);
  1647. return n;
  1648. }
  1649. /*
  1650. ** Normalize a bignum given thing pointer length in digits and a sign
  1651. ** patch zero if odd length
  1652. */
  1653. static Eterm big_norm(Eterm *x, dsize_t xl, short sign)
  1654. {
  1655. Uint arity;
  1656. if (xl == 1) {
  1657. Uint y = BIG_DIGIT(x, 0);
  1658. if (D_EXP < SMALL_BITS || IS_USMALL(sign, y)) {
  1659. if (sign)
  1660. return make_small(-((Sint)y));
  1661. else
  1662. return make_small(y);
  1663. }
  1664. }
  1665. /* __alpha__: This was fixed */
  1666. if ((arity = BIG_NEED_SIZE(xl)-1) > BIG_ARITY_MAX)
  1667. return NIL; /* signal error (too big) */
  1668. if (sign) {
  1669. *x = make_neg_bignum_header(arity);
  1670. }
  1671. else {
  1672. *x = make_pos_bignum_header(arity);
  1673. }
  1674. return make_big(x);
  1675. }
  1676. /*
  1677. ** Compare bignums
  1678. */
  1679. int big_comp(Wterm x, Wterm y)
  1680. {
  1681. Eterm* xp = big_val(x);
  1682. Eterm* yp = big_val(y);
  1683. if (BIG_SIGN(xp) == BIG_SIGN(yp)) {
  1684. int c = I_comp(BIG_V(xp), BIG_SIZE(xp), BIG_V(yp), BIG_SIZE(yp));
  1685. if (BIG_SIGN(xp))
  1686. return -c;
  1687. else
  1688. return c;
  1689. }
  1690. else
  1691. return BIG_SIGN(xp) ? -1 : 1;
  1692. }
  1693. /*
  1694. ** Unsigned compare
  1695. */
  1696. int big_ucomp(Eterm x, Eterm y)
  1697. {
  1698. Eterm* xp = big_val(x);
  1699. Eterm* yp = big_val(y);
  1700. return I_comp(BIG_V(xp), BIG_SIZE(xp), BIG_V(yp), BIG_SIZE(yp));
  1701. }
  1702. /*
  1703. ** Return number of bytes in the bignum
  1704. */
  1705. dsize_t big_bytes(Eterm x)
  1706. {
  1707. Eterm* xp = big_val(x);
  1708. dsize_t sz = BIG_SIZE(xp);
  1709. ErtsDigit d = BIG_DIGIT(xp, sz-1);
  1710. sz = (sz-1) * sizeof(ErtsDigit);
  1711. while (d != 0) {
  1712. ++sz;
  1713. d >>= 8;
  1714. }
  1715. return sz;
  1716. }
  1717. /*
  1718. ** Load a bignum from bytes
  1719. ** xsz is the number of bytes in xp
  1720. ** *r is untouched if number fits in small
  1721. */
  1722. Eterm bytes_to_big(byte *xp, dsize_t xsz, int xsgn, Eterm *r)
  1723. {
  1724. ErtsDigit* rwp = BIG_V(r);
  1725. dsize_t rsz = 0;
  1726. ErtsDigit d;
  1727. int i;
  1728. while(xsz > sizeof(ErtsDigit)) {
  1729. d = 0;
  1730. for(i = sizeof(ErtsDigit); --i >= 0;)
  1731. d = (d << 8) | xp[i];
  1732. *rwp = d;
  1733. rwp++;
  1734. xsz -= sizeof(ErtsDigit);
  1735. xp += sizeof(ErtsDigit);
  1736. rsz++;
  1737. }
  1738. if (xsz > 0) {
  1739. d = 0;
  1740. for(i = xsz; --i >= 0;)
  1741. d = (d << 8) | xp[i];
  1742. if (++rsz == 1 && IS_USMALL(xsgn,d)) {
  1743. if (xsgn) d = -d;
  1744. return make_small(d);
  1745. }
  1746. *rwp = d;
  1747. rwp++;
  1748. }
  1749. if (rsz > BIG_ARITY_MAX)
  1750. return NIL;
  1751. if (xsgn) {
  1752. *r = make_neg_bignum_header(rsz);
  1753. }
  1754. else {
  1755. *r = make_pos_bignum_header(rsz);
  1756. }
  1757. return make_big(r);
  1758. }
  1759. /*
  1760. ** Store digits in the array of bytes pointed to by p
  1761. */
  1762. byte* big_to_bytes(Eterm x, byte *p)
  1763. {
  1764. ErtsDigit* xr = big_v(x);
  1765. dsize_t xl = big_size(x);
  1766. ErtsDigit d;
  1767. int i;
  1768. while(xl > 1) {
  1769. d = *xr;
  1770. xr++;
  1771. for(i = 0; i < sizeof(ErtsDigit); ++i) {
  1772. p[i] = d & 0xff;
  1773. d >>= 8;
  1774. }
  1775. p += sizeof(ErtsDigit);
  1776. xl--;
  1777. }
  1778. d = *xr;
  1779. do {
  1780. *p++ = d & 0xff;
  1781. d >>= 8;
  1782. } while (d != 0);
  1783. return p;
  1784. }
  1785. /*
  1786. * Converts a positive term (small or bignum) to an Uint.
  1787. *
  1788. * Fails returning 0 if the term is neither a small nor a bignum,
  1789. * if it's negative, or the big number does not fit in an Uint;
  1790. * in addition the error reason, BADARG or SYSTEM_LIMIT, will be
  1791. * stored in *up.
  1792. *
  1793. * Otherwise returns a non-zero value and the converted number
  1794. * in *up.
  1795. */
  1796. int
  1797. term_to_Uint(Eterm term, Uint *up)
  1798. {
  1799. if (is_small(term)) {
  1800. Sint i = signed_val(term);
  1801. if (i < 0) {
  1802. *up = BADARG;
  1803. return 0;
  1804. }
  1805. *up = (Uint) i;
  1806. return 1;
  1807. } else if (is_big(term)) {
  1808. ErtsDigit* xr = big_v(term);
  1809. dsize_t xl = big_size(term);
  1810. Uint uval = 0;
  1811. int n = 0;
  1812. if (big_sign(term)) {
  1813. *up = BADARG;
  1814. return 0;
  1815. } else if (xl*D_EXP > sizeof(Uint)*8) {
  1816. *up = SYSTEM_LIMIT;
  1817. return 0;
  1818. }
  1819. while (xl-- > 0) {
  1820. uval |= ((Uint)(*xr++)) << n;
  1821. n += D_EXP;
  1822. }
  1823. *up = uval;
  1824. return 1;
  1825. } else {
  1826. *up = BADARG;
  1827. return 0;
  1828. }
  1829. }
  1830. /* same as term_to_Uint()
  1831. but also accept larger bignums by masking
  1832. */
  1833. int
  1834. term_to_Uint_mask(Eterm term, Uint *up)
  1835. {
  1836. if (is_small(term)) {
  1837. Sint i = signed_val(term);
  1838. if (i < 0) {
  1839. *up = BADARG;
  1840. return 0;
  1841. }
  1842. *up = (Uint) i;
  1843. return 1;
  1844. } else if (is_big(term) && !big_sign(term)) {
  1845. ErtsDigit* xr = big_v(term);
  1846. ERTS_CT_ASSERT(sizeof(ErtsDigit) == sizeof(Uint));
  1847. *up = (Uint)*xr; /* just pick first word */
  1848. return 1;
  1849. } else {
  1850. *up = BADARG;
  1851. return 0;
  1852. }
  1853. }
  1854. int
  1855. term_to_UWord(Eterm term, UWord *up)
  1856. {
  1857. #if SIZEOF_VOID_P == ERTS_SIZEOF_ETERM
  1858. return term_to_Uint(term,up);
  1859. #else
  1860. if (is_small(term)) {
  1861. Sint i = signed_val(term);
  1862. if (i < 0) {
  1863. *up = BADARG;
  1864. return 0;
  1865. }
  1866. *up = (UWord) i;
  1867. return 1;
  1868. } else if (is_big(term)) {
  1869. ErtsDigit* xr = big_v(term);
  1870. dsize_t xl = big_size(term);
  1871. UWord uval = 0;
  1872. int n = 0;
  1873. if (big_sign(term)) {
  1874. *up = BADARG;
  1875. return 0;
  1876. } else if (xl*D_EXP > sizeof(UWord)*8) {
  1877. *up = SYSTEM_LIMIT;
  1878. return 0;
  1879. }
  1880. while (xl-- > 0) {
  1881. uval |= ((UWord)(*xr++)) << n;
  1882. n += D_EXP;
  1883. }
  1884. *up = uval;
  1885. return 1;
  1886. } else {
  1887. *up = BADARG;
  1888. return 0;
  1889. }
  1890. #endif
  1891. }
  1892. int
  1893. term_to_Uint64(Eterm term, Uint64 *up)
  1894. {
  1895. #if SIZEOF_VOID_P == 8
  1896. return term_to_UWord(term,up);
  1897. #else
  1898. if (is_small(term)) {
  1899. Sint i = signed_val(term);
  1900. if (i < 0) {
  1901. *up = BADARG;
  1902. return 0;
  1903. }
  1904. *up = (Uint64) i;
  1905. return 1;
  1906. } else if (is_big(term)) {
  1907. ErtsDigit* xr = big_v(term);
  1908. dsize_t xl = big_size(term);
  1909. Uint64 uval = 0;
  1910. int n = 0;
  1911. if (big_sign(term)) {
  1912. *up = BADARG;
  1913. return 0;
  1914. } else if (xl*D_EXP > sizeof(Uint64)*8) {
  1915. *up = SYSTEM_LIMIT;
  1916. return 0;
  1917. }
  1918. while (xl-- > 0) {
  1919. uval |= ((Uint64)(*xr++)) << n;
  1920. n += D_EXP;
  1921. }
  1922. *up = uval;
  1923. return 1;
  1924. } else {
  1925. *up = BADARG;
  1926. return 0;
  1927. }
  1928. #endif
  1929. }
  1930. int term_to_Sint(Eterm term, Sint *sp)
  1931. {
  1932. if (is_small(term)) {
  1933. *sp = signed_val(term);
  1934. return 1;
  1935. } else if (is_big(term)) {
  1936. ErtsDigit* xr = big_v(term);
  1937. dsize_t xl = big_size(term);
  1938. int sign = big_sign(term);
  1939. Uint uval = 0;
  1940. int n = 0;
  1941. if (xl*D_EXP > sizeof(Uint)*8) {
  1942. return 0;
  1943. }
  1944. while (xl-- > 0) {
  1945. uval |= ((Uint)(*xr++)) << n;
  1946. n += D_EXP;
  1947. }
  1948. if (sign) {
  1949. uval = -uval;
  1950. if ((Sint)uval > 0)
  1951. return 0;
  1952. } else {
  1953. if ((Sint)uval < 0)
  1954. return 0;
  1955. }
  1956. *sp = uval;
  1957. return 1;
  1958. } else {
  1959. return 0;
  1960. }
  1961. }
  1962. #if HAVE_INT64
  1963. int term_to_Sint64(Eterm term, Sint64 *sp)
  1964. {
  1965. #if ERTS_SIZEOF_ETERM == 8
  1966. return term_to_Sint(term, sp);
  1967. #else
  1968. if (is_small(term)) {
  1969. *sp = signed_val(term);
  1970. return 1;
  1971. } else if (is_big(term)) {
  1972. ErtsDigit* xr = big_v(term);
  1973. dsize_t xl = big_size(term);
  1974. int sign = big_sign(term);
  1975. Uint64 uval = 0;
  1976. int n = 0;
  1977. if (xl*D_EXP > sizeof(Uint64)*8) {
  1978. return 0;
  1979. }
  1980. while (xl-- > 0) {
  1981. uval |= ((Uint64)(*xr++)) << n;
  1982. n += D_EXP;
  1983. }
  1984. if (sign) {
  1985. uval = -uval;
  1986. if ((Sint64)uval > 0)
  1987. return 0;
  1988. } else {
  1989. if ((Sint64)uval < 0)
  1990. return 0;
  1991. }
  1992. *sp = uval;
  1993. return 1;
  1994. } else {
  1995. return 0;
  1996. }
  1997. #endif
  1998. }
  1999. #endif /* HAVE_INT64 */
  2000. /*
  2001. ** Add and subtract
  2002. */
  2003. static Eterm B_plus_minus(ErtsDigit *x, dsize_t xl, short xsgn,
  2004. ErtsDigit *y, dsize_t yl, short ysgn, Eterm *r)
  2005. {
  2006. if (xsgn == ysgn) {
  2007. if (xl > yl)
  2008. return big_norm(r, I_add(x,xl,y,yl,BIG_V(r)), xsgn);
  2009. else
  2010. return big_norm(r, I_add(y,yl,x,xl,BIG_V(r)), xsgn);
  2011. }
  2012. else {
  2013. int comp = I_comp(x, xl, y, yl);
  2014. if (comp == 0)
  2015. return make_small(0);
  2016. else if (comp > 0)
  2017. return big_norm(r, I_sub(x,xl,y,yl,BIG_V(r)), xsgn);
  2018. else
  2019. return big_norm(r, I_sub(y,yl,x,xl,BIG_V(r)), ysgn);
  2020. }
  2021. }
  2022. /*
  2023. ** Add bignums
  2024. */
  2025. Eterm big_plus(Wterm x, Wterm y, Eterm *r)
  2026. {
  2027. Eterm* xp = big_val(x);
  2028. Eterm* yp = big_val(y);
  2029. return B_plus_minus(BIG_V(xp),BIG_SIZE(xp),(short) BIG_SIGN(xp),
  2030. BIG_V(yp),BIG_SIZE(yp),(short) BIG_SIGN(yp), r);
  2031. }
  2032. /*
  2033. ** Subtract bignums
  2034. */
  2035. Eterm big_minus(Eterm x, Eterm y, Eterm *r)
  2036. {
  2037. Eterm* xp = big_val(x);
  2038. Eterm* yp = big_val(y);
  2039. return B_plus_minus(BIG_V(xp),BIG_SIZE(xp),(short) BIG_SIGN(xp),
  2040. BIG_V(yp),BIG_SIZE(yp),(short) !BIG_SIGN(yp), r);
  2041. }
  2042. /*
  2043. ** Multiply smallnums
  2044. */
  2045. Eterm small_times(Sint x, Sint y, Eterm *r)
  2046. {
  2047. short sign = (x<0) != (y<0);
  2048. ErtsDigit xu = (x > 0) ? x : -x;
  2049. ErtsDigit yu = (y > 0) ? y : -y;
  2050. ErtsDigit d1=0;
  2051. ErtsDigit d0;
  2052. Uint arity;
  2053. DMULc(xu, yu, d1, d0);
  2054. if (!d1 && ((D_EXP < SMALL_BITS) || IS_USMALL(sign, d0))) {
  2055. if (sign)
  2056. return make_small(-((Sint)d0));
  2057. else
  2058. return make_small(d0);
  2059. }
  2060. BIG_DIGIT(r,0) = d0;
  2061. arity = d1 ? 2 : 1;
  2062. if (sign)
  2063. *r = make_neg_bignum_header(arity);
  2064. else
  2065. *r = make_pos_bignum_header(arity);
  2066. if (d1)
  2067. BIG_DIGIT(r,1) = d1;
  2068. return make_big(r);
  2069. }
  2070. /*
  2071. ** Multiply bignums
  2072. */
  2073. Eterm big_times(Eterm x, Eterm y, Eterm *r)
  2074. {
  2075. Eterm* xp = big_val(x);
  2076. Eterm* yp = big_val(y);
  2077. short sign = BIG_SIGN(xp) != BIG_SIGN(yp);
  2078. dsize_t xsz = BIG_SIZE(xp);
  2079. dsize_t ysz = BIG_SIZE(yp);
  2080. dsize_t rsz;
  2081. if (ysz == 1)
  2082. rsz = D_mul(BIG_V(xp), xsz, BIG_DIGIT(yp, 0), BIG_V(r));
  2083. else if (xsz == 1)
  2084. rsz = D_mul(BIG_V(yp), ysz, BIG_DIGIT(xp, 0), BIG_V(r));
  2085. else if (xp == yp) {
  2086. ZERO_DIGITS(BIG_V(r), xsz+1);
  2087. rsz = I_sqr(BIG_V(xp), xsz, BIG_V(r));
  2088. }
  2089. else if (xsz >= ysz) {
  2090. ZERO_DIGITS(BIG_V(r), xsz);
  2091. rsz = I_mul(BIG_V(xp), xsz, BIG_V(yp), ysz, BIG_V(r));
  2092. }
  2093. else {
  2094. ZERO_DIGITS(BIG_V(r), ysz);
  2095. rsz = I_mul(BIG_V(yp), ysz, BIG_V(xp), xsz, BIG_V(r));
  2096. }
  2097. return big_norm(r, rsz, sign);
  2098. }
  2099. /*
  2100. ** Divide bignums
  2101. */
  2102. Eterm big_div(Eterm x, Eterm y, Eterm *q)
  2103. {
  2104. Eterm* xp = big_val(x);
  2105. Eterm* yp = big_val(y);
  2106. short sign = BIG_SIGN(xp) != BIG_SIGN(yp);
  2107. dsize_t xsz = BIG_SIZE(xp);
  2108. dsize_t ysz = BIG_SIZE(yp);
  2109. dsize_t qsz;
  2110. if (ysz == 1) {
  2111. ErtsDigit rem;
  2112. qsz = D_div(BIG_V(xp), xsz, BIG_DIGIT(yp,0), BIG_V(q), &rem);
  2113. }
  2114. else {
  2115. Eterm* remp;
  2116. dsize_t rem_sz;
  2117. qsz = xsz - ysz + 1;
  2118. remp = q + BIG_NEED_SIZE(qsz);
  2119. qsz = I_div(BIG_V(xp), xsz, BIG_V(yp), ysz, BIG_V(q), BIG_V(remp),
  2120. &rem_sz);
  2121. }
  2122. return big_norm(q, qsz, sign);
  2123. }
  2124. /*
  2125. ** Remainder
  2126. */
  2127. Eterm big_rem(Eterm x, Eterm y, Eterm *r)
  2128. {
  2129. Eterm* xp = big_val(x);
  2130. Eterm* yp = big_val(y);
  2131. short sign = BIG_SIGN(xp);
  2132. dsize_t xsz = BIG_SIZE(xp);
  2133. dsize_t ysz = BIG_SIZE(yp);
  2134. if (ysz == 1) {
  2135. ErtsDigit rem;
  2136. rem = D_rem(BIG_V(xp), xsz, BIG_DIGIT(yp,0));
  2137. if (IS_USMALL(sign, rem)) {
  2138. if (sign)
  2139. return make_small(-(Sint)rem);
  2140. else
  2141. return make_small(rem);
  2142. }
  2143. else {
  2144. if (sign)
  2145. *r = make_neg_bignum_header(1);
  2146. else
  2147. *r = make_pos_bignum_header(1);
  2148. BIG_DIGIT(r, 0) = rem;
  2149. return make_big(r);
  2150. }
  2151. }
  2152. else {
  2153. dsize_t rsz = I_rem(BIG_V(xp), xsz, BIG_V(yp), ysz, BIG_V(r));
  2154. return big_norm(r, rsz, sign);
  2155. }
  2156. }
  2157. Eterm big_band(Eterm x, Eterm y, Eterm *r)
  2158. {
  2159. Eterm* xp = big_val(x);
  2160. Eterm* yp = big_val(y);
  2161. short xsgn = BIG_SIGN(xp);
  2162. short ysgn = BIG_SIGN(yp);
  2163. short sign = xsgn && ysgn;
  2164. dsize_t xsz = BIG_SIZE(xp);
  2165. dsize_t ysz = BIG_SIZE(yp);
  2166. if (xsz >= ysz)
  2167. return big_norm(r,I_band(BIG_V(xp),xsz,xsgn,
  2168. BIG_V(yp),ysz,ysgn,
  2169. BIG_V(r)),sign);
  2170. else
  2171. return big_norm(r,I_band(BIG_V(yp),ysz,ysgn,
  2172. BIG_V(xp),xsz,xsgn,
  2173. BIG_V(r)),sign);
  2174. }
  2175. Eterm big_bor(Eterm x, Eterm y, Eterm *r)
  2176. {
  2177. Eterm* xp = big_val(x);
  2178. Eterm* yp = big_val(y);
  2179. short xsgn = BIG_SIGN(xp);
  2180. short ysgn = BIG_SIGN(yp);
  2181. short sign = (xsgn || ysgn);
  2182. dsize_t xsz = BIG_SIZE(xp);
  2183. dsize_t ysz = BIG_SIZE(yp);
  2184. if (xsz >= ysz)
  2185. return big_norm(r,I_bor(BIG_V(xp),xsz,xsgn,
  2186. BIG_V(yp),ysz,ysgn,
  2187. BIG_V(r)),sign);
  2188. else
  2189. return big_norm(r,I_bor(BIG_V(yp),ysz,ysgn,
  2190. BIG_V(xp),xsz,xsgn,
  2191. BIG_V(r)),sign);
  2192. }
  2193. Eterm big_bxor(Eterm x, Eterm y, Eterm *r)
  2194. {
  2195. Eterm* xp = big_val(x);
  2196. Eterm* yp = big_val(y);
  2197. short xsgn = BIG_SIGN(xp);
  2198. short ysgn = BIG_SIGN(yp);
  2199. short sign = (xsgn != ysgn);
  2200. dsize_t xsz = BIG_SIZE(xp);
  2201. dsize_t ysz = BIG_SIZE(yp);
  2202. if (xsz >= ysz)
  2203. return big_norm(r,I_bxor(BIG_V(xp),xsz,xsgn,
  2204. BIG_V(yp),ysz,ysgn,
  2205. BIG_V(r)),sign);
  2206. else
  2207. return big_norm(r,I_bxor(BIG_V(yp),ysz,ysgn,
  2208. BIG_V(xp),xsz,xsgn,
  2209. BIG_V(r)),sign);
  2210. }
  2211. Eterm big_bnot(Eterm x, Eterm *r)
  2212. {
  2213. Eterm* xp = big_val(x);
  2214. short sign = !BIG_SIGN(xp);
  2215. dsize_t xsz = BIG_SIZE(xp);
  2216. return big_norm(r, I_bnot(BIG_V(xp), xsz, sign, BIG_V(r)), sign);
  2217. }
  2218. Eterm big_lshift(Eterm x, Sint y, Eterm *r)
  2219. {
  2220. Eterm* xp = big_val(x);
  2221. short sign = BIG_SIGN(xp);
  2222. dsize_t xsz = BIG_SIZE(xp);
  2223. return big_norm(r, I_lshift(BIG_V(xp), xsz, y, sign, BIG_V(r)), sign);
  2224. }
  2225. /* add unsigned small int y to x */
  2226. Eterm big_plus_small(Eterm x, Uint y, Eterm *r)
  2227. {
  2228. Eterm* xp = big_val(x);
  2229. if (BIG_SIGN(xp))
  2230. return big_norm(r, D_sub(BIG_V(xp),BIG_SIZE(xp), (ErtsDigit) y,
  2231. BIG_V(r)), (short) BIG_SIGN(xp));
  2232. else
  2233. return big_norm(r, D_add(BIG_V(xp),BIG_SIZE(xp), (ErtsDigit) y,
  2234. BIG_V(r)), (short) BIG_SIGN(xp));
  2235. }
  2236. Eterm big_times_small(Eterm x, Uint y, Eterm *r)
  2237. {
  2238. Eterm* xp = big_val(x);
  2239. return big_norm(r, D_mul(BIG_V(xp),BIG_SIZE(xp), (ErtsDigit) y,
  2240. BIG_V(r)), (short) BIG_SIGN(xp));
  2241. }
  2242. /*
  2243. ** Expects the big to fit.
  2244. */
  2245. Uint32 big_to_uint32(Eterm b)
  2246. {
  2247. Uint u;
  2248. if (!term_to_Uint(b, &u)) {
  2249. ASSERT(0);
  2250. return 0;

Large files files are truncated, but you can click here to view the full file