PageRenderTime 45ms CodeModel.GetById 21ms RepoModel.GetById 0ms app.codeStats 1ms

/erts/emulator/beam/utils.c

https://github.com/bsmr-erlang/otp
C | 4791 lines | 3943 code | 486 blank | 362 comment | 788 complexity | c4cdd7b93ea09f08d9dfeb5b2b4c4923 MD5 | raw file
Possible License(s): BSD-3-Clause, LGPL-2.1, MPL-2.0-no-copyleft-exception, Apache-2.0
  1. /*
  2. * %CopyrightBegin%
  3. *
  4. * Copyright Ericsson AB 1996-2018. All Rights Reserved.
  5. *
  6. * Licensed under the Apache License, Version 2.0 (the "License");
  7. * you may not use this file except in compliance with the License.
  8. * You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. *
  18. * %CopyrightEnd%
  19. */
  20. #ifdef HAVE_CONFIG_H
  21. # include "config.h"
  22. #endif
  23. #define ERTS_DO_INCL_GLB_INLINE_FUNC_DEF
  24. #include "sys.h"
  25. #include "erl_vm.h"
  26. #include "global.h"
  27. #include "erl_process.h"
  28. #include "big.h"
  29. #include "bif.h"
  30. #include "erl_binary.h"
  31. #include "erl_bits.h"
  32. #include "erl_map.h"
  33. #include "packet_parser.h"
  34. #include "erl_gc.h"
  35. #define ERTS_WANT_DB_INTERNAL__
  36. #include "erl_db.h"
  37. #include "erl_threads.h"
  38. #include "register.h"
  39. #include "dist.h"
  40. #include "erl_printf.h"
  41. #include "erl_threads.h"
  42. #include "erl_lock_count.h"
  43. #include "erl_time.h"
  44. #include "erl_thr_progress.h"
  45. #include "erl_thr_queue.h"
  46. #include "erl_sched_spec_pre_alloc.h"
  47. #include "beam_bp.h"
  48. #include "erl_ptab.h"
  49. #include "erl_check_io.h"
  50. #include "erl_bif_unique.h"
  51. #include "erl_io_queue.h"
  52. #define ERTS_WANT_TIMER_WHEEL_API
  53. #include "erl_time.h"
  54. #ifdef HIPE
  55. # include "hipe_mode_switch.h"
  56. #endif
  57. #define ERTS_WANT_NFUNC_SCHED_INTERNALS__
  58. #include "erl_nfunc_sched.h"
  59. #include "erl_proc_sig_queue.h"
  60. #undef M_TRIM_THRESHOLD
  61. #undef M_TOP_PAD
  62. #undef M_MMAP_THRESHOLD
  63. #undef M_MMAP_MAX
  64. #if defined(__GLIBC__) && defined(HAVE_MALLOC_H)
  65. #include <malloc.h>
  66. #endif
  67. #if !defined(HAVE_MALLOPT)
  68. #undef HAVE_MALLOPT
  69. #define HAVE_MALLOPT 0
  70. #endif
  71. Eterm*
  72. erts_heap_alloc(Process* p, Uint need, Uint xtra)
  73. {
  74. ErlHeapFragment* bp;
  75. Uint n;
  76. #if defined(DEBUG) || defined(CHECK_FOR_HOLES)
  77. Uint i;
  78. #endif
  79. #ifdef FORCE_HEAP_FRAGS
  80. if (p->space_verified && p->space_verified_from!=NULL
  81. && HEAP_TOP(p) >= p->space_verified_from
  82. && HEAP_TOP(p) + need <= p->space_verified_from + p->space_verified
  83. && HEAP_LIMIT(p) - HEAP_TOP(p) >= need) {
  84. Uint consumed = need + (HEAP_TOP(p) - p->space_verified_from);
  85. ASSERT(consumed <= p->space_verified);
  86. p->space_verified -= consumed;
  87. p->space_verified_from += consumed;
  88. HEAP_TOP(p) = p->space_verified_from;
  89. return HEAP_TOP(p) - need;
  90. }
  91. p->space_verified = 0;
  92. p->space_verified_from = NULL;
  93. #endif /* FORCE_HEAP_FRAGS */
  94. n = need + xtra;
  95. bp = MBUF(p);
  96. if (bp != NULL && need <= (bp->alloc_size - bp->used_size)) {
  97. Eterm* ret = bp->mem + bp->used_size;
  98. bp->used_size += need;
  99. p->mbuf_sz += need;
  100. return ret;
  101. }
  102. #ifdef DEBUG
  103. n++;
  104. #endif
  105. bp = (ErlHeapFragment*)
  106. ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP_FRAG, ERTS_HEAP_FRAG_SIZE(n));
  107. #if defined(DEBUG) || defined(CHECK_FOR_HOLES)
  108. for (i = 0; i < n; i++) {
  109. bp->mem[i] = ERTS_HOLE_MARKER;
  110. }
  111. #endif
  112. #ifdef DEBUG
  113. n--;
  114. #endif
  115. bp->next = MBUF(p);
  116. MBUF(p) = bp;
  117. bp->alloc_size = n;
  118. bp->used_size = need;
  119. MBUF_SIZE(p) += need;
  120. bp->off_heap.first = NULL;
  121. bp->off_heap.overhead = 0;
  122. return bp->mem;
  123. }
  124. #ifdef CHECK_FOR_HOLES
  125. Eterm*
  126. erts_set_hole_marker(Eterm* ptr, Uint sz)
  127. {
  128. Eterm* p = ptr;
  129. Uint i;
  130. for (i = 0; i < sz; i++) {
  131. *p++ = ERTS_HOLE_MARKER;
  132. }
  133. return ptr;
  134. }
  135. #endif
  136. /*
  137. * Helper function for the ESTACK macros defined in global.h.
  138. */
  139. void
  140. erl_grow_estack(ErtsEStack* s, Uint need)
  141. {
  142. Uint old_size = (s->end - s->start);
  143. Uint new_size;
  144. Uint sp_offs = s->sp - s->start;
  145. if (need < old_size)
  146. new_size = 2*old_size;
  147. else
  148. new_size = ((need / old_size) + 2) * old_size;
  149. if (s->start != s->edefault) {
  150. s->start = erts_realloc(s->alloc_type, s->start,
  151. new_size*sizeof(Eterm));
  152. } else {
  153. Eterm* new_ptr = erts_alloc(s->alloc_type, new_size*sizeof(Eterm));
  154. sys_memcpy(new_ptr, s->start, old_size*sizeof(Eterm));
  155. s->start = new_ptr;
  156. }
  157. s->end = s->start + new_size;
  158. s->sp = s->start + sp_offs;
  159. }
  160. /*
  161. * Helper function for the WSTACK macros defined in global.h.
  162. */
  163. void
  164. erl_grow_wstack(ErtsWStack* s, Uint need)
  165. {
  166. Uint old_size = (s->wend - s->wstart);
  167. Uint new_size;
  168. Uint sp_offs = s->wsp - s->wstart;
  169. if (need < old_size)
  170. new_size = 2 * old_size;
  171. else
  172. new_size = ((need / old_size) + 2) * old_size;
  173. if (s->wstart != s->wdefault) {
  174. s->wstart = erts_realloc(s->alloc_type, s->wstart,
  175. new_size*sizeof(UWord));
  176. } else {
  177. UWord* new_ptr = erts_alloc(s->alloc_type, new_size*sizeof(UWord));
  178. sys_memcpy(new_ptr, s->wstart, old_size*sizeof(UWord));
  179. s->wstart = new_ptr;
  180. }
  181. s->wend = s->wstart + new_size;
  182. s->wsp = s->wstart + sp_offs;
  183. }
  184. /*
  185. * Helper function for the PSTACK macros defined in global.h.
  186. */
  187. void
  188. erl_grow_pstack(ErtsPStack* s, void* default_pstack, unsigned need_bytes)
  189. {
  190. Uint old_size = s->size;
  191. Uint new_size;
  192. if (need_bytes < old_size)
  193. new_size = 2 * old_size;
  194. else
  195. new_size = ((need_bytes / old_size) + 2) * old_size;
  196. if (s->pstart != default_pstack) {
  197. s->pstart = erts_realloc(s->alloc_type, s->pstart, new_size);
  198. } else {
  199. byte* new_ptr = erts_alloc(s->alloc_type, new_size);
  200. sys_memcpy(new_ptr, s->pstart, old_size);
  201. s->pstart = new_ptr;
  202. }
  203. s->size = new_size;
  204. }
  205. /*
  206. * Helper function for the EQUEUE macros defined in global.h.
  207. */
  208. void
  209. erl_grow_equeue(ErtsEQueue* q, Eterm* default_equeue)
  210. {
  211. Uint old_size = (q->end - q->start);
  212. Uint new_size = old_size * 2;
  213. Uint first_part = (q->end - q->front);
  214. Uint second_part = (q->back - q->start);
  215. Eterm* new_ptr = erts_alloc(q->alloc_type, new_size*sizeof(Eterm));
  216. ASSERT(q->back == q->front); // of course the queue is full now!
  217. if (first_part > 0)
  218. sys_memcpy(new_ptr, q->front, first_part*sizeof(Eterm));
  219. if (second_part > 0)
  220. sys_memcpy(new_ptr+first_part, q->start, second_part*sizeof(Eterm));
  221. if (q->start != default_equeue)
  222. erts_free(q->alloc_type, q->start);
  223. q->start = new_ptr;
  224. q->end = q->start + new_size;
  225. q->front = q->start;
  226. q->back = q->start + old_size;
  227. }
  228. /* CTYPE macros */
  229. #define LATIN1
  230. #define IS_DIGIT(c) ((c) >= '0' && (c) <= '9')
  231. #ifdef LATIN1
  232. #define IS_LOWER(c) (((c) >= 'a' && (c) <= 'z') \
  233. || ((c) >= 128+95 && (c) <= 255 && (c) != 247))
  234. #define IS_UPPER(c) (((c) >= 'A' && (c) <= 'Z') \
  235. || ((c) >= 128+64 && (c) <= 128+94 && (c) != 247-32))
  236. #else
  237. #define IS_LOWER(c) ((c) >= 'a' && (c) <= 'z')
  238. #define IS_UPPER(c) ((c) >= 'A' && (c) <= 'Z')
  239. #endif
  240. #define IS_ALNUM(c) (IS_DIGIT(c) || IS_LOWER(c) || IS_UPPER(c))
  241. /* We don't include 160 (non-breaking space). */
  242. #define IS_SPACE(c) (c == ' ' || c == '\n' || c == '\t' || c == '\r')
  243. #ifdef LATIN1
  244. #define IS_CNTRL(c) ((c) < ' ' || (c) == 127 \
  245. || ((c) >= 128 && (c) < 128+32))
  246. #else
  247. /* Treat all non-ASCII as control characters */
  248. #define IS_CNTRL(c) ((c) < ' ' || (c) >= 127)
  249. #endif
  250. #define IS_PRINT(c) (!IS_CNTRL(c))
  251. /*
  252. * Calculate length of a list.
  253. * Returns -1 if not a proper list (i.e. not terminated with NIL)
  254. */
  255. Sint
  256. erts_list_length(Eterm list)
  257. {
  258. Sint i = 0;
  259. while(is_list(list)) {
  260. i++;
  261. list = CDR(list_val(list));
  262. }
  263. if (is_not_nil(list)) {
  264. return -1;
  265. }
  266. return i;
  267. }
  268. static const struct {
  269. Sint64 mask;
  270. int bits;
  271. } fib_data[] = {{ERTS_I64_LITERAL(0x2), 1},
  272. {ERTS_I64_LITERAL(0xc), 2},
  273. {ERTS_I64_LITERAL(0xf0), 4},
  274. {ERTS_I64_LITERAL(0xff00), 8},
  275. {ERTS_I64_LITERAL(0xffff0000), 16},
  276. {ERTS_I64_LITERAL(0xffffffff00000000), 32}};
  277. static ERTS_INLINE int
  278. fit_in_bits(Sint64 value, int start)
  279. {
  280. int bits = 0;
  281. int i;
  282. for (i = start; i >= 0; i--) {
  283. if (value & fib_data[i].mask) {
  284. value >>= fib_data[i].bits;
  285. bits |= fib_data[i].bits;
  286. }
  287. }
  288. bits++;
  289. return bits;
  290. }
  291. int erts_fit_in_bits_int64(Sint64 value)
  292. {
  293. return fit_in_bits(value, 5);
  294. }
  295. int erts_fit_in_bits_int32(Sint32 value)
  296. {
  297. return fit_in_bits((Sint64) (Uint32) value, 4);
  298. }
  299. int erts_fit_in_bits_uint(Uint value)
  300. {
  301. #if ERTS_SIZEOF_ETERM == 4
  302. return fit_in_bits((Sint64) (Uint32) value, 4);
  303. #elif ERTS_SIZEOF_ETERM == 8
  304. return fit_in_bits(value, 5);
  305. #else
  306. # error "No way, Jose"
  307. #endif
  308. }
  309. int
  310. erts_print(fmtfn_t to, void *arg, char *format, ...)
  311. {
  312. int res;
  313. va_list arg_list;
  314. va_start(arg_list, format);
  315. {
  316. switch ((UWord)to) {
  317. case (UWord)ERTS_PRINT_STDOUT:
  318. res = erts_vprintf(format, arg_list);
  319. break;
  320. case (UWord)ERTS_PRINT_STDERR:
  321. res = erts_vfprintf(stderr, format, arg_list);
  322. break;
  323. case (UWord)ERTS_PRINT_FILE:
  324. res = erts_vfprintf((FILE *) arg, format, arg_list);
  325. break;
  326. case (UWord)ERTS_PRINT_SBUF:
  327. res = erts_vsprintf((char *) arg, format, arg_list);
  328. break;
  329. case (UWord)ERTS_PRINT_SNBUF:
  330. res = erts_vsnprintf(((erts_print_sn_buf *) arg)->buf,
  331. ((erts_print_sn_buf *) arg)->size,
  332. format,
  333. arg_list);
  334. break;
  335. case (UWord)ERTS_PRINT_DSBUF:
  336. res = erts_vdsprintf((erts_dsprintf_buf_t *) arg, format, arg_list);
  337. break;
  338. case (UWord)ERTS_PRINT_FD:
  339. res = erts_vfdprintf((int)(SWord) arg, format, arg_list);
  340. break;
  341. default:
  342. res = erts_vcbprintf(to, arg, format, arg_list);
  343. break;
  344. }
  345. }
  346. va_end(arg_list);
  347. return res;
  348. }
  349. int
  350. erts_putc(fmtfn_t to, void *arg, char c)
  351. {
  352. return erts_print(to, arg, "%c", c);
  353. }
  354. /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
  355. * Some Erlang term building utility functions (to be used when performance *
  356. * isn't critical). *
  357. * *
  358. * Add more functions like these here (and function prototypes in global.h) *
  359. * when needed. *
  360. * *
  361. \* */
  362. Eterm
  363. erts_bld_atom(Uint **hpp, Uint *szp, char *str)
  364. {
  365. if (hpp)
  366. return erts_atom_put((byte *) str, sys_strlen(str), ERTS_ATOM_ENC_LATIN1, 1);
  367. else
  368. return THE_NON_VALUE;
  369. }
  370. Eterm
  371. erts_bld_uint(Uint **hpp, Uint *szp, Uint ui)
  372. {
  373. Eterm res = THE_NON_VALUE;
  374. if (IS_USMALL(0, ui)) {
  375. if (hpp)
  376. res = make_small(ui);
  377. }
  378. else {
  379. if (szp)
  380. *szp += BIG_UINT_HEAP_SIZE;
  381. if (hpp) {
  382. res = uint_to_big(ui, *hpp);
  383. *hpp += BIG_UINT_HEAP_SIZE;
  384. }
  385. }
  386. return res;
  387. }
  388. /*
  389. * Erts_bld_uword is more or less similar to erts_bld_uint, but a pointer
  390. * can safely be passed.
  391. */
  392. Eterm
  393. erts_bld_uword(Uint **hpp, Uint *szp, UWord uw)
  394. {
  395. Eterm res = THE_NON_VALUE;
  396. if (IS_USMALL(0, uw)) {
  397. if (hpp)
  398. res = make_small((Uint) uw);
  399. }
  400. else {
  401. if (szp)
  402. *szp += BIG_UWORD_HEAP_SIZE(uw);
  403. if (hpp) {
  404. res = uword_to_big(uw, *hpp);
  405. *hpp += BIG_UWORD_HEAP_SIZE(uw);
  406. }
  407. }
  408. return res;
  409. }
  410. Eterm
  411. erts_bld_uint64(Uint **hpp, Uint *szp, Uint64 ui64)
  412. {
  413. Eterm res = THE_NON_VALUE;
  414. if (IS_USMALL(0, ui64)) {
  415. if (hpp)
  416. res = make_small((Uint) ui64);
  417. }
  418. else {
  419. if (szp)
  420. *szp += ERTS_UINT64_HEAP_SIZE(ui64);
  421. if (hpp)
  422. res = erts_uint64_to_big(ui64, hpp);
  423. }
  424. return res;
  425. }
  426. Eterm
  427. erts_bld_sint64(Uint **hpp, Uint *szp, Sint64 si64)
  428. {
  429. Eterm res = THE_NON_VALUE;
  430. if (IS_SSMALL(si64)) {
  431. if (hpp)
  432. res = make_small((Sint) si64);
  433. }
  434. else {
  435. if (szp)
  436. *szp += ERTS_SINT64_HEAP_SIZE(si64);
  437. if (hpp)
  438. res = erts_sint64_to_big(si64, hpp);
  439. }
  440. return res;
  441. }
  442. Eterm
  443. erts_bld_cons(Uint **hpp, Uint *szp, Eterm car, Eterm cdr)
  444. {
  445. Eterm res = THE_NON_VALUE;
  446. if (szp)
  447. *szp += 2;
  448. if (hpp) {
  449. res = CONS(*hpp, car, cdr);
  450. *hpp += 2;
  451. }
  452. return res;
  453. }
  454. Eterm
  455. erts_bld_tuple(Uint **hpp, Uint *szp, Uint arity, ...)
  456. {
  457. Eterm res = THE_NON_VALUE;
  458. ASSERT(arity < (((Uint)1) << (sizeof(Uint)*8 - _HEADER_ARITY_OFFS)));
  459. if (szp)
  460. *szp += arity + 1;
  461. if (hpp) {
  462. res = make_tuple(*hpp);
  463. *((*hpp)++) = make_arityval(arity);
  464. if (arity > 0) {
  465. Uint i;
  466. va_list argp;
  467. va_start(argp, arity);
  468. for (i = 0; i < arity; i++) {
  469. *((*hpp)++) = va_arg(argp, Eterm);
  470. }
  471. va_end(argp);
  472. }
  473. }
  474. return res;
  475. }
  476. Eterm erts_bld_tuplev(Uint **hpp, Uint *szp, Uint arity, Eterm terms[])
  477. {
  478. Eterm res = THE_NON_VALUE;
  479. /*
  480. * Note callers expect that 'terms' is *not* accessed if hpp == NULL.
  481. */
  482. ASSERT(arity < (((Uint)1) << (sizeof(Uint)*8 - _HEADER_ARITY_OFFS)));
  483. if (szp)
  484. *szp += arity + 1;
  485. if (hpp) {
  486. res = make_tuple(*hpp);
  487. *((*hpp)++) = make_arityval(arity);
  488. if (arity > 0) {
  489. Uint i;
  490. for (i = 0; i < arity; i++)
  491. *((*hpp)++) = terms[i];
  492. }
  493. }
  494. return res;
  495. }
  496. Eterm
  497. erts_bld_string_n(Uint **hpp, Uint *szp, const char *str, Sint len)
  498. {
  499. Eterm res = THE_NON_VALUE;
  500. Sint i = len;
  501. if (szp)
  502. *szp += len*2;
  503. if (hpp) {
  504. res = NIL;
  505. while (--i >= 0) {
  506. res = CONS(*hpp, make_small((byte) str[i]), res);
  507. *hpp += 2;
  508. }
  509. }
  510. return res;
  511. }
  512. Eterm
  513. erts_bld_list(Uint **hpp, Uint *szp, Sint length, Eterm terms[])
  514. {
  515. Eterm list = THE_NON_VALUE;
  516. if (szp)
  517. *szp += 2*length;
  518. if (hpp) {
  519. Sint i = length;
  520. list = NIL;
  521. while (--i >= 0) {
  522. list = CONS(*hpp, terms[i], list);
  523. *hpp += 2;
  524. }
  525. }
  526. return list;
  527. }
  528. Eterm
  529. erts_bld_2tup_list(Uint **hpp, Uint *szp,
  530. Sint length, Eterm terms1[], Uint terms2[])
  531. {
  532. Eterm res = THE_NON_VALUE;
  533. if (szp)
  534. *szp += 5*length;
  535. if (hpp) {
  536. Sint i = length;
  537. res = NIL;
  538. while (--i >= 0) {
  539. res = CONS(*hpp+3, TUPLE2(*hpp, terms1[i], terms2[i]), res);
  540. *hpp += 5;
  541. }
  542. }
  543. return res;
  544. }
  545. Eterm
  546. erts_bld_atom_uword_2tup_list(Uint **hpp, Uint *szp,
  547. Sint length, Eterm atoms[], UWord uints[])
  548. {
  549. Sint i;
  550. Eterm res = THE_NON_VALUE;
  551. if (szp) {
  552. *szp += 5*length;
  553. i = length;
  554. while (--i >= 0) {
  555. if (!IS_USMALL(0, uints[i]))
  556. *szp += BIG_UINT_HEAP_SIZE;
  557. }
  558. }
  559. if (hpp) {
  560. i = length;
  561. res = NIL;
  562. while (--i >= 0) {
  563. Eterm ui;
  564. if (IS_USMALL(0, uints[i]))
  565. ui = make_small(uints[i]);
  566. else {
  567. ui = uint_to_big(uints[i], *hpp);
  568. *hpp += BIG_UINT_HEAP_SIZE;
  569. }
  570. res = CONS(*hpp+3, TUPLE2(*hpp, atoms[i], ui), res);
  571. *hpp += 5;
  572. }
  573. }
  574. return res;
  575. }
  576. Eterm
  577. erts_bld_atom_2uint_3tup_list(Uint **hpp, Uint *szp, Sint length,
  578. Eterm atoms[], Uint uints1[], Uint uints2[])
  579. {
  580. Sint i;
  581. Eterm res = THE_NON_VALUE;
  582. if (szp) {
  583. *szp += 6*length;
  584. i = length;
  585. while (--i >= 0) {
  586. if (!IS_USMALL(0, uints1[i]))
  587. *szp += BIG_UINT_HEAP_SIZE;
  588. if (!IS_USMALL(0, uints2[i]))
  589. *szp += BIG_UINT_HEAP_SIZE;
  590. }
  591. }
  592. if (hpp) {
  593. i = length;
  594. res = NIL;
  595. while (--i >= 0) {
  596. Eterm ui1;
  597. Eterm ui2;
  598. if (IS_USMALL(0, uints1[i]))
  599. ui1 = make_small(uints1[i]);
  600. else {
  601. ui1 = uint_to_big(uints1[i], *hpp);
  602. *hpp += BIG_UINT_HEAP_SIZE;
  603. }
  604. if (IS_USMALL(0, uints2[i]))
  605. ui2 = make_small(uints2[i]);
  606. else {
  607. ui2 = uint_to_big(uints2[i], *hpp);
  608. *hpp += BIG_UINT_HEAP_SIZE;
  609. }
  610. res = CONS(*hpp+4, TUPLE3(*hpp, atoms[i], ui1, ui2), res);
  611. *hpp += 6;
  612. }
  613. }
  614. return res;
  615. }
  616. /* *\
  617. * *
  618. \* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
  619. /* make a hash index from an erlang term */
  620. /*
  621. ** There are two hash functions.
  622. **
  623. ** make_hash: A hash function that will give the same values for the same
  624. ** terms regardless of the internal representation. Small integers are
  625. ** hashed using the same algorithm as bignums and bignums are hashed
  626. ** independent of the CPU endianess.
  627. ** Make_hash also hashes pids, ports and references like 32 bit numbers
  628. ** (but with different constants).
  629. ** make_hash() is called from the bif erlang:phash/2
  630. **
  631. ** The idea behind the hash algorithm is to produce values suitable for
  632. ** linear dynamic hashing. We cannot choose the range at all while hashing
  633. ** (it's not even supplied to the hashing functions). The good old algorithm
  634. ** [H = H*C+X mod M, where H is the hash value, C is a "random" constant(or M),
  635. ** M is the range, preferably a prime, and X is each byte value] is therefore
  636. ** modified to:
  637. ** H = H*C+X mod 2^32, where C is a large prime. This gives acceptable
  638. ** "spreading" of the hashes, so that later modulo calculations also will give
  639. ** acceptable "spreading" in the range.
  640. ** We really need to hash on bytes, otherwise the
  641. ** upper bytes of a word will be less significant than the lower ones. That's
  642. ** not acceptable at all. For internal use one could maybe optimize by using
  643. ** another hash function, that is less strict but faster. That is, however, not
  644. ** implemented.
  645. **
  646. ** Short semi-formal description of make_hash:
  647. **
  648. ** In make_hash, the number N is treated like this:
  649. ** Abs(N) is hashed bytewise with the least significant byte, B(0), first.
  650. ** The number of bytes (J) to calculate hash on in N is
  651. ** (the number of _32_ bit words needed to store the unsigned
  652. ** value of abs(N)) * 4.
  653. ** X = FUNNY_NUMBER2
  654. ** If N < 0, Y = FUNNY_NUMBER4 else Y = FUNNY_NUMBER3.
  655. ** The hash value is Y*h(J) mod 2^32 where h(J) is calculated like
  656. ** h(0) = <initial hash>
  657. ** h(i) = h(i-1)*X + B(i-1)
  658. ** The above should hold regardless of internal representation.
  659. ** Pids are hashed like small numbers but with differrent constants, as are
  660. ** ports.
  661. ** References are hashed like ports but only on the least significant byte.
  662. ** Binaries are hashed on all bytes (not on the 15 first as in
  663. ** make_broken_hash()).
  664. ** Bytes in lists (possibly text strings) use a simpler multiplication inlined
  665. ** in the handling of lists, that is an optimization.
  666. ** Everything else is like in the old hash (make_broken_hash()).
  667. **
  668. ** make_hash2() is faster than make_hash, in particular for bignums
  669. ** and binaries, and produces better hash values.
  670. */
  671. /* some prime numbers just above 2 ^ 28 */
  672. #define FUNNY_NUMBER1 268440163
  673. #define FUNNY_NUMBER2 268439161
  674. #define FUNNY_NUMBER3 268435459
  675. #define FUNNY_NUMBER4 268436141
  676. #define FUNNY_NUMBER5 268438633
  677. #define FUNNY_NUMBER6 268437017
  678. #define FUNNY_NUMBER7 268438039
  679. #define FUNNY_NUMBER8 268437511
  680. #define FUNNY_NUMBER9 268439627
  681. #define FUNNY_NUMBER10 268440479
  682. #define FUNNY_NUMBER11 268440577
  683. #define FUNNY_NUMBER12 268440581
  684. #define FUNNY_NUMBER13 268440593
  685. #define FUNNY_NUMBER14 268440611
  686. static Uint32
  687. hash_binary_bytes(Eterm bin, Uint sz, Uint32 hash)
  688. {
  689. byte* ptr;
  690. Uint bitoffs;
  691. Uint bitsize;
  692. ERTS_GET_BINARY_BYTES(bin, ptr, bitoffs, bitsize);
  693. if (bitoffs == 0) {
  694. while (sz--) {
  695. hash = hash*FUNNY_NUMBER1 + *ptr++;
  696. }
  697. if (bitsize > 0) {
  698. byte b = *ptr;
  699. b >>= 8 - bitsize;
  700. hash = (hash*FUNNY_NUMBER1 + b) * FUNNY_NUMBER12 + bitsize;
  701. }
  702. } else {
  703. Uint previous = *ptr++;
  704. Uint b;
  705. Uint lshift = bitoffs;
  706. Uint rshift = 8 - lshift;
  707. while (sz--) {
  708. b = (previous << lshift) & 0xFF;
  709. previous = *ptr++;
  710. b |= previous >> rshift;
  711. hash = hash*FUNNY_NUMBER1 + b;
  712. }
  713. if (bitsize > 0) {
  714. b = (previous << lshift) & 0xFF;
  715. previous = *ptr++;
  716. b |= previous >> rshift;
  717. b >>= 8 - bitsize;
  718. hash = (hash*FUNNY_NUMBER1 + b) * FUNNY_NUMBER12 + bitsize;
  719. }
  720. }
  721. return hash;
  722. }
  723. Uint32 make_hash(Eterm term_arg)
  724. {
  725. DECLARE_WSTACK(stack);
  726. Eterm term = term_arg;
  727. Eterm hash = 0;
  728. unsigned op;
  729. #define MAKE_HASH_TUPLE_OP (FIRST_VACANT_TAG_DEF)
  730. #define MAKE_HASH_TERM_ARRAY_OP (FIRST_VACANT_TAG_DEF+1)
  731. #define MAKE_HASH_CDR_PRE_OP (FIRST_VACANT_TAG_DEF+2)
  732. #define MAKE_HASH_CDR_POST_OP (FIRST_VACANT_TAG_DEF+3)
  733. /*
  734. ** Convenience macro for calculating a bytewise hash on an unsigned 32 bit
  735. ** integer.
  736. ** If the endianess is known, we could be smarter here,
  737. ** but that gives no significant speedup (on a sparc at least)
  738. */
  739. #define UINT32_HASH_STEP(Expr, Prime1) \
  740. do { \
  741. Uint32 x = (Uint32) (Expr); \
  742. hash = \
  743. (((((hash)*(Prime1) + (x & 0xFF)) * (Prime1) + \
  744. ((x >> 8) & 0xFF)) * (Prime1) + \
  745. ((x >> 16) & 0xFF)) * (Prime1) + \
  746. (x >> 24)); \
  747. } while(0)
  748. #define UINT32_HASH_RET(Expr, Prime1, Prime2) \
  749. UINT32_HASH_STEP(Expr, Prime1); \
  750. hash = hash * (Prime2); \
  751. break
  752. /*
  753. * Significant additions needed for real 64 bit port with larger fixnums.
  754. */
  755. /*
  756. * Note, for the simple 64bit port, not utilizing the
  757. * larger word size this function will work without modification.
  758. */
  759. tail_recur:
  760. op = tag_val_def(term);
  761. for (;;) {
  762. switch (op) {
  763. case NIL_DEF:
  764. hash = hash*FUNNY_NUMBER3 + 1;
  765. break;
  766. case ATOM_DEF:
  767. hash = hash*FUNNY_NUMBER1 +
  768. (atom_tab(atom_val(term))->slot.bucket.hvalue);
  769. break;
  770. case SMALL_DEF:
  771. {
  772. Sint y1 = signed_val(term);
  773. Uint y2 = y1 < 0 ? -(Uint)y1 : y1;
  774. UINT32_HASH_STEP(y2, FUNNY_NUMBER2);
  775. #if defined(ARCH_64)
  776. if (y2 >> 32)
  777. UINT32_HASH_STEP(y2 >> 32, FUNNY_NUMBER2);
  778. #endif
  779. hash *= (y1 < 0 ? FUNNY_NUMBER4 : FUNNY_NUMBER3);
  780. break;
  781. }
  782. case BINARY_DEF:
  783. {
  784. Uint sz = binary_size(term);
  785. hash = hash_binary_bytes(term, sz, hash);
  786. hash = hash*FUNNY_NUMBER4 + sz;
  787. break;
  788. }
  789. case EXPORT_DEF:
  790. {
  791. Export* ep = *((Export **) (export_val(term) + 1));
  792. hash = hash * FUNNY_NUMBER11 + ep->info.mfa.arity;
  793. hash = hash*FUNNY_NUMBER1 +
  794. (atom_tab(atom_val(ep->info.mfa.module))->slot.bucket.hvalue);
  795. hash = hash*FUNNY_NUMBER1 +
  796. (atom_tab(atom_val(ep->info.mfa.function))->slot.bucket.hvalue);
  797. break;
  798. }
  799. case FUN_DEF:
  800. {
  801. ErlFunThing* funp = (ErlFunThing *) fun_val(term);
  802. Uint num_free = funp->num_free;
  803. hash = hash * FUNNY_NUMBER10 + num_free;
  804. hash = hash*FUNNY_NUMBER1 +
  805. (atom_tab(atom_val(funp->fe->module))->slot.bucket.hvalue);
  806. hash = hash*FUNNY_NUMBER2 + funp->fe->old_index;
  807. hash = hash*FUNNY_NUMBER2 + funp->fe->old_uniq;
  808. if (num_free > 0) {
  809. if (num_free > 1) {
  810. WSTACK_PUSH3(stack, (UWord) &funp->env[1], (num_free-1), MAKE_HASH_TERM_ARRAY_OP);
  811. }
  812. term = funp->env[0];
  813. goto tail_recur;
  814. }
  815. break;
  816. }
  817. case PID_DEF:
  818. UINT32_HASH_RET(internal_pid_number(term),FUNNY_NUMBER5,FUNNY_NUMBER6);
  819. case EXTERNAL_PID_DEF:
  820. UINT32_HASH_RET(external_pid_number(term),FUNNY_NUMBER5,FUNNY_NUMBER6);
  821. case PORT_DEF:
  822. UINT32_HASH_RET(internal_port_number(term),FUNNY_NUMBER9,FUNNY_NUMBER10);
  823. case EXTERNAL_PORT_DEF:
  824. UINT32_HASH_RET(external_port_number(term),FUNNY_NUMBER9,FUNNY_NUMBER10);
  825. case REF_DEF:
  826. UINT32_HASH_RET(internal_ref_numbers(term)[0],FUNNY_NUMBER9,FUNNY_NUMBER10);
  827. case EXTERNAL_REF_DEF:
  828. UINT32_HASH_RET(external_ref_numbers(term)[0],FUNNY_NUMBER9,FUNNY_NUMBER10);
  829. case FLOAT_DEF:
  830. {
  831. FloatDef ff;
  832. GET_DOUBLE(term, ff);
  833. if (ff.fd == 0.0f) {
  834. /* ensure positive 0.0 */
  835. ff.fd = erts_get_positive_zero_float();
  836. }
  837. hash = hash*FUNNY_NUMBER6 + (ff.fw[0] ^ ff.fw[1]);
  838. break;
  839. }
  840. case MAKE_HASH_CDR_PRE_OP:
  841. term = (Eterm) WSTACK_POP(stack);
  842. if (is_not_list(term)) {
  843. WSTACK_PUSH(stack, (UWord) MAKE_HASH_CDR_POST_OP);
  844. goto tail_recur;
  845. }
  846. /* fall through */
  847. case LIST_DEF:
  848. {
  849. Eterm* list = list_val(term);
  850. while(is_byte(*list)) {
  851. /* Optimization for strings.
  852. ** Note that this hash is different from a 'small' hash,
  853. ** as multiplications on a Sparc is so slow.
  854. */
  855. hash = hash*FUNNY_NUMBER2 + unsigned_val(*list);
  856. if (is_not_list(CDR(list))) {
  857. WSTACK_PUSH(stack, MAKE_HASH_CDR_POST_OP);
  858. term = CDR(list);
  859. goto tail_recur;
  860. }
  861. list = list_val(CDR(list));
  862. }
  863. WSTACK_PUSH2(stack, CDR(list), MAKE_HASH_CDR_PRE_OP);
  864. term = CAR(list);
  865. goto tail_recur;
  866. }
  867. case MAKE_HASH_CDR_POST_OP:
  868. hash *= FUNNY_NUMBER8;
  869. break;
  870. case BIG_DEF:
  871. /* Note that this is the exact same thing as the hashing of smalls.*/
  872. {
  873. Eterm* ptr = big_val(term);
  874. Uint n = BIG_SIZE(ptr);
  875. Uint k = n-1;
  876. ErtsDigit d;
  877. int is_neg = BIG_SIGN(ptr);
  878. Uint i;
  879. int j;
  880. for (i = 0; i < k; i++) {
  881. d = BIG_DIGIT(ptr, i);
  882. for(j = 0; j < sizeof(ErtsDigit); ++j) {
  883. hash = (hash*FUNNY_NUMBER2) + (d & 0xff);
  884. d >>= 8;
  885. }
  886. }
  887. d = BIG_DIGIT(ptr, k);
  888. k = sizeof(ErtsDigit);
  889. #if defined(ARCH_64)
  890. if (!(d >> 32))
  891. k /= 2;
  892. #endif
  893. for(j = 0; j < (int)k; ++j) {
  894. hash = (hash*FUNNY_NUMBER2) + (d & 0xff);
  895. d >>= 8;
  896. }
  897. hash *= is_neg ? FUNNY_NUMBER4 : FUNNY_NUMBER3;
  898. break;
  899. }
  900. case MAP_DEF:
  901. hash = hash*FUNNY_NUMBER13 + FUNNY_NUMBER14 + make_hash2(term);
  902. break;
  903. case TUPLE_DEF:
  904. {
  905. Eterm* ptr = tuple_val(term);
  906. Uint arity = arityval(*ptr);
  907. WSTACK_PUSH3(stack, (UWord) arity, (UWord)(ptr+1), (UWord) arity);
  908. op = MAKE_HASH_TUPLE_OP;
  909. }/*fall through*/
  910. case MAKE_HASH_TUPLE_OP:
  911. case MAKE_HASH_TERM_ARRAY_OP:
  912. {
  913. Uint i = (Uint) WSTACK_POP(stack);
  914. Eterm* ptr = (Eterm*) WSTACK_POP(stack);
  915. if (i != 0) {
  916. term = *ptr;
  917. WSTACK_PUSH3(stack, (UWord)(ptr+1), (UWord) i-1, (UWord) op);
  918. goto tail_recur;
  919. }
  920. if (op == MAKE_HASH_TUPLE_OP) {
  921. Uint32 arity = (Uint32) WSTACK_POP(stack);
  922. hash = hash*FUNNY_NUMBER9 + arity;
  923. }
  924. break;
  925. }
  926. default:
  927. erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_hash(0x%X,0x%X)\n", term, op);
  928. return 0;
  929. }
  930. if (WSTACK_ISEMPTY(stack)) break;
  931. op = WSTACK_POP(stack);
  932. }
  933. DESTROY_WSTACK(stack);
  934. return hash;
  935. #undef MAKE_HASH_TUPLE_OP
  936. #undef MAKE_HASH_TERM_ARRAY_OP
  937. #undef MAKE_HASH_CDR_PRE_OP
  938. #undef MAKE_HASH_CDR_POST_OP
  939. #undef UINT32_HASH_STEP
  940. #undef UINT32_HASH_RET
  941. }
  942. /* Hash function suggested by Bob Jenkins. */
  943. #define MIX(a,b,c) \
  944. do { \
  945. a -= b; a -= c; a ^= (c>>13); \
  946. b -= c; b -= a; b ^= (a<<8); \
  947. c -= a; c -= b; c ^= (b>>13); \
  948. a -= b; a -= c; a ^= (c>>12); \
  949. b -= c; b -= a; b ^= (a<<16); \
  950. c -= a; c -= b; c ^= (b>>5); \
  951. a -= b; a -= c; a ^= (c>>3); \
  952. b -= c; b -= a; b ^= (a<<10); \
  953. c -= a; c -= b; c ^= (b>>15); \
  954. } while(0)
  955. #define HCONST 0x9e3779b9UL /* the golden ratio; an arbitrary value */
  956. static Uint32
  957. block_hash(byte *k, Uint length, Uint32 initval)
  958. {
  959. Uint32 a,b,c;
  960. Uint len;
  961. /* Set up the internal state */
  962. len = length;
  963. a = b = HCONST;
  964. c = initval; /* the previous hash value */
  965. while (len >= 12)
  966. {
  967. a += (k[0] +((Uint32)k[1]<<8) +((Uint32)k[2]<<16) +((Uint32)k[3]<<24));
  968. b += (k[4] +((Uint32)k[5]<<8) +((Uint32)k[6]<<16) +((Uint32)k[7]<<24));
  969. c += (k[8] +((Uint32)k[9]<<8) +((Uint32)k[10]<<16)+((Uint32)k[11]<<24));
  970. MIX(a,b,c);
  971. k += 12; len -= 12;
  972. }
  973. c += length;
  974. switch(len) /* all the case statements fall through */
  975. {
  976. case 11: c+=((Uint32)k[10]<<24);
  977. case 10: c+=((Uint32)k[9]<<16);
  978. case 9 : c+=((Uint32)k[8]<<8);
  979. /* the first byte of c is reserved for the length */
  980. case 8 : b+=((Uint32)k[7]<<24);
  981. case 7 : b+=((Uint32)k[6]<<16);
  982. case 6 : b+=((Uint32)k[5]<<8);
  983. case 5 : b+=k[4];
  984. case 4 : a+=((Uint32)k[3]<<24);
  985. case 3 : a+=((Uint32)k[2]<<16);
  986. case 2 : a+=((Uint32)k[1]<<8);
  987. case 1 : a+=k[0];
  988. /* case 0: nothing left to add */
  989. }
  990. MIX(a,b,c);
  991. return c;
  992. }
  993. Uint32
  994. make_hash2(Eterm term)
  995. {
  996. Uint32 hash;
  997. Uint32 hash_xor_pairs;
  998. DeclareTmpHeapNoproc(tmp_big,2);
  999. ERTS_UNDEF(hash_xor_pairs, 0);
  1000. /* (HCONST * {2, ..., 22}) mod 2^32 */
  1001. #define HCONST_2 0x3c6ef372UL
  1002. #define HCONST_3 0xdaa66d2bUL
  1003. #define HCONST_4 0x78dde6e4UL
  1004. #define HCONST_5 0x1715609dUL
  1005. #define HCONST_6 0xb54cda56UL
  1006. #define HCONST_7 0x5384540fUL
  1007. #define HCONST_8 0xf1bbcdc8UL
  1008. #define HCONST_9 0x8ff34781UL
  1009. #define HCONST_10 0x2e2ac13aUL
  1010. #define HCONST_11 0xcc623af3UL
  1011. #define HCONST_12 0x6a99b4acUL
  1012. #define HCONST_13 0x08d12e65UL
  1013. #define HCONST_14 0xa708a81eUL
  1014. #define HCONST_15 0x454021d7UL
  1015. #define HCONST_16 0xe3779b90UL
  1016. #define HCONST_17 0x81af1549UL
  1017. #define HCONST_18 0x1fe68f02UL
  1018. #define HCONST_19 0xbe1e08bbUL
  1019. #define HCONST_20 0x5c558274UL
  1020. #define HCONST_21 0xfa8cfc2dUL
  1021. #define HCONST_22 0x98c475e6UL
  1022. #define HASH_MAP_TAIL (_make_header(1,_TAG_HEADER_REF))
  1023. #define HASH_MAP_PAIR (_make_header(2,_TAG_HEADER_REF))
  1024. #define HASH_CDR (_make_header(3,_TAG_HEADER_REF))
  1025. #define UINT32_HASH_2(Expr1, Expr2, AConst) \
  1026. do { \
  1027. Uint32 a,b; \
  1028. a = AConst + (Uint32) (Expr1); \
  1029. b = AConst + (Uint32) (Expr2); \
  1030. MIX(a,b,hash); \
  1031. } while(0)
  1032. #define UINT32_HASH(Expr, AConst) UINT32_HASH_2(Expr, 0, AConst)
  1033. #define SINT32_HASH(Expr, AConst) \
  1034. do { \
  1035. Sint32 y = (Sint32) (Expr); \
  1036. if (y < 0) { \
  1037. UINT32_HASH(-y, AConst); \
  1038. /* Negative numbers are unnecessarily mixed twice. */ \
  1039. } \
  1040. UINT32_HASH(y, AConst); \
  1041. } while(0)
  1042. #define IS_SSMALL28(x) (((Uint) (((x) >> (28-1)) + 1)) < 2)
  1043. #ifdef ARCH_64
  1044. # define POINTER_HASH(Ptr, AConst) UINT32_HASH_2((Uint32)(UWord)(Ptr), (((UWord)(Ptr)) >> 32), AConst)
  1045. #else
  1046. # define POINTER_HASH(Ptr, AConst) UINT32_HASH(Ptr, AConst)
  1047. #endif
  1048. /* Optimization. Simple cases before declaration of estack. */
  1049. if (primary_tag(term) == TAG_PRIMARY_IMMED1) {
  1050. switch (term & _TAG_IMMED1_MASK) {
  1051. case _TAG_IMMED1_IMMED2:
  1052. switch (term & _TAG_IMMED2_MASK) {
  1053. case _TAG_IMMED2_ATOM:
  1054. /* Fast, but the poor hash value should be mixed. */
  1055. return atom_tab(atom_val(term))->slot.bucket.hvalue;
  1056. }
  1057. break;
  1058. case _TAG_IMMED1_SMALL:
  1059. {
  1060. Sint x = signed_val(term);
  1061. if (SMALL_BITS > 28 && !IS_SSMALL28(x)) {
  1062. term = small_to_big(x, tmp_big);
  1063. break;
  1064. }
  1065. hash = 0;
  1066. SINT32_HASH(x, HCONST);
  1067. return hash;
  1068. }
  1069. }
  1070. };
  1071. {
  1072. Eterm tmp;
  1073. DECLARE_ESTACK(s);
  1074. UseTmpHeapNoproc(2);
  1075. hash = 0;
  1076. for (;;) {
  1077. switch (primary_tag(term)) {
  1078. case TAG_PRIMARY_LIST:
  1079. {
  1080. int c = 0;
  1081. Uint32 sh = 0;
  1082. Eterm* ptr = list_val(term);
  1083. while (is_byte(*ptr)) {
  1084. /* Optimization for strings. */
  1085. sh = (sh << 8) + unsigned_val(*ptr);
  1086. if (c == 3) {
  1087. UINT32_HASH(sh, HCONST_4);
  1088. c = sh = 0;
  1089. } else {
  1090. c++;
  1091. }
  1092. term = CDR(ptr);
  1093. if (is_not_list(term))
  1094. break;
  1095. ptr = list_val(term);
  1096. }
  1097. if (c > 0)
  1098. UINT32_HASH(sh, HCONST_4);
  1099. if (is_list(term)) {
  1100. tmp = CDR(ptr);
  1101. ESTACK_PUSH(s, tmp);
  1102. term = CAR(ptr);
  1103. }
  1104. }
  1105. break;
  1106. case TAG_PRIMARY_BOXED:
  1107. {
  1108. Eterm hdr = *boxed_val(term);
  1109. ASSERT(is_header(hdr));
  1110. switch (hdr & _TAG_HEADER_MASK) {
  1111. case ARITYVAL_SUBTAG:
  1112. {
  1113. int i;
  1114. int arity = header_arity(hdr);
  1115. Eterm* elem = tuple_val(term);
  1116. UINT32_HASH(arity, HCONST_9);
  1117. if (arity == 0) /* Empty tuple */
  1118. goto hash2_common;
  1119. for (i = arity; ; i--) {
  1120. term = elem[i];
  1121. if (i == 1)
  1122. break;
  1123. ESTACK_PUSH(s, term);
  1124. }
  1125. }
  1126. break;
  1127. case MAP_SUBTAG:
  1128. {
  1129. Eterm* ptr = boxed_val(term) + 1;
  1130. Uint size;
  1131. int i;
  1132. switch (hdr & _HEADER_MAP_SUBTAG_MASK) {
  1133. case HAMT_SUBTAG_HEAD_FLATMAP:
  1134. {
  1135. flatmap_t *mp = (flatmap_t *)flatmap_val(term);
  1136. Eterm *ks = flatmap_get_keys(mp);
  1137. Eterm *vs = flatmap_get_values(mp);
  1138. size = flatmap_get_size(mp);
  1139. UINT32_HASH(size, HCONST_16);
  1140. if (size == 0)
  1141. goto hash2_common;
  1142. /* We want a portable hash function that is *independent* of
  1143. * the order in which keys and values are encountered.
  1144. * We therefore calculate context independent hashes for all .
  1145. * key-value pairs and then xor them together.
  1146. */
  1147. ESTACK_PUSH(s, hash_xor_pairs);
  1148. ESTACK_PUSH(s, hash);
  1149. ESTACK_PUSH(s, HASH_MAP_TAIL);
  1150. hash = 0;
  1151. hash_xor_pairs = 0;
  1152. for (i = size - 1; i >= 0; i--) {
  1153. ESTACK_PUSH(s, HASH_MAP_PAIR);
  1154. ESTACK_PUSH(s, vs[i]);
  1155. ESTACK_PUSH(s, ks[i]);
  1156. }
  1157. goto hash2_common;
  1158. }
  1159. case HAMT_SUBTAG_HEAD_ARRAY:
  1160. case HAMT_SUBTAG_HEAD_BITMAP:
  1161. size = *ptr++;
  1162. UINT32_HASH(size, HCONST_16);
  1163. if (size == 0)
  1164. goto hash2_common;
  1165. ESTACK_PUSH(s, hash_xor_pairs);
  1166. ESTACK_PUSH(s, hash);
  1167. ESTACK_PUSH(s, HASH_MAP_TAIL);
  1168. hash = 0;
  1169. hash_xor_pairs = 0;
  1170. }
  1171. switch (hdr & _HEADER_MAP_SUBTAG_MASK) {
  1172. case HAMT_SUBTAG_HEAD_ARRAY:
  1173. i = 16;
  1174. break;
  1175. case HAMT_SUBTAG_HEAD_BITMAP:
  1176. case HAMT_SUBTAG_NODE_BITMAP:
  1177. i = hashmap_bitcount(MAP_HEADER_VAL(hdr));
  1178. break;
  1179. default:
  1180. erts_exit(ERTS_ERROR_EXIT, "bad header");
  1181. }
  1182. while (i) {
  1183. if (is_list(*ptr)) {
  1184. Eterm* cons = list_val(*ptr);
  1185. ESTACK_PUSH(s, HASH_MAP_PAIR);
  1186. ESTACK_PUSH(s, CDR(cons));
  1187. ESTACK_PUSH(s, CAR(cons));
  1188. }
  1189. else {
  1190. ASSERT(is_boxed(*ptr));
  1191. ESTACK_PUSH(s, *ptr);
  1192. }
  1193. i--; ptr++;
  1194. }
  1195. goto hash2_common;
  1196. }
  1197. break;
  1198. case EXPORT_SUBTAG:
  1199. {
  1200. Export* ep = *((Export **) (export_val(term) + 1));
  1201. UINT32_HASH_2
  1202. (ep->info.mfa.arity,
  1203. atom_tab(atom_val(ep->info.mfa.module))->slot.bucket.hvalue,
  1204. HCONST);
  1205. UINT32_HASH
  1206. (atom_tab(atom_val(ep->info.mfa.function))->slot.bucket.hvalue,
  1207. HCONST_14);
  1208. goto hash2_common;
  1209. }
  1210. case FUN_SUBTAG:
  1211. {
  1212. ErlFunThing* funp = (ErlFunThing *) fun_val(term);
  1213. Uint num_free = funp->num_free;
  1214. UINT32_HASH_2
  1215. (num_free,
  1216. atom_tab(atom_val(funp->fe->module))->slot.bucket.hvalue,
  1217. HCONST);
  1218. UINT32_HASH_2
  1219. (funp->fe->old_index, funp->fe->old_uniq, HCONST);
  1220. if (num_free == 0) {
  1221. goto hash2_common;
  1222. } else {
  1223. Eterm* bptr = funp->env + num_free - 1;
  1224. while (num_free-- > 1) {
  1225. term = *bptr--;
  1226. ESTACK_PUSH(s, term);
  1227. }
  1228. term = *bptr;
  1229. }
  1230. }
  1231. break;
  1232. case REFC_BINARY_SUBTAG:
  1233. case HEAP_BINARY_SUBTAG:
  1234. case SUB_BINARY_SUBTAG:
  1235. {
  1236. byte* bptr;
  1237. unsigned sz = binary_size(term);
  1238. Uint32 con = HCONST_13 + hash;
  1239. Uint bitoffs;
  1240. Uint bitsize;
  1241. ERTS_GET_BINARY_BYTES(term, bptr, bitoffs, bitsize);
  1242. if (sz == 0 && bitsize == 0) {
  1243. hash = con;
  1244. } else {
  1245. if (bitoffs == 0) {
  1246. hash = block_hash(bptr, sz, con);
  1247. if (bitsize > 0) {
  1248. UINT32_HASH_2(bitsize, (bptr[sz] >> (8 - bitsize)),
  1249. HCONST_15);
  1250. }
  1251. } else {
  1252. byte* buf = (byte *) erts_alloc(ERTS_ALC_T_TMP,
  1253. sz + (bitsize != 0));
  1254. erts_copy_bits(bptr, bitoffs, 1, buf, 0, 1, sz*8+bitsize);
  1255. hash = block_hash(buf, sz, con);
  1256. if (bitsize > 0) {
  1257. UINT32_HASH_2(bitsize, (buf[sz] >> (8 - bitsize)),
  1258. HCONST_15);
  1259. }
  1260. erts_free(ERTS_ALC_T_TMP, (void *) buf);
  1261. }
  1262. }
  1263. goto hash2_common;
  1264. }
  1265. break;
  1266. case POS_BIG_SUBTAG:
  1267. case NEG_BIG_SUBTAG:
  1268. {
  1269. Eterm* ptr = big_val(term);
  1270. Uint i = 0;
  1271. Uint n = BIG_SIZE(ptr);
  1272. Uint32 con = BIG_SIGN(ptr) ? HCONST_10 : HCONST_11;
  1273. #if D_EXP == 16
  1274. do {
  1275. Uint32 x, y;
  1276. x = i < n ? BIG_DIGIT(ptr, i++) : 0;
  1277. x += (Uint32)(i < n ? BIG_DIGIT(ptr, i++) : 0) << 16;
  1278. y = i < n ? BIG_DIGIT(ptr, i++) : 0;
  1279. y += (Uint32)(i < n ? BIG_DIGIT(ptr, i++) : 0) << 16;
  1280. UINT32_HASH_2(x, y, con);
  1281. } while (i < n);
  1282. #elif D_EXP == 32
  1283. do {
  1284. Uint32 x, y;
  1285. x = i < n ? BIG_DIGIT(ptr, i++) : 0;
  1286. y = i < n ? BIG_DIGIT(ptr, i++) : 0;
  1287. UINT32_HASH_2(x, y, con);
  1288. } while (i < n);
  1289. #elif D_EXP == 64
  1290. do {
  1291. Uint t;
  1292. Uint32 x, y;
  1293. ASSERT(i < n);
  1294. t = BIG_DIGIT(ptr, i++);
  1295. x = t & 0xffffffff;
  1296. y = t >> 32;
  1297. UINT32_HASH_2(x, y, con);
  1298. } while (i < n);
  1299. #else
  1300. #error "unsupported D_EXP size"
  1301. #endif
  1302. goto hash2_common;
  1303. }
  1304. break;
  1305. case REF_SUBTAG:
  1306. /* All parts of the ref should be hashed. */
  1307. UINT32_HASH(internal_ref_numbers(term)[0], HCONST_7);
  1308. goto hash2_common;
  1309. break;
  1310. case EXTERNAL_REF_SUBTAG:
  1311. /* All parts of the ref should be hashed. */
  1312. UINT32_HASH(external_ref_numbers(term)[0], HCONST_7);
  1313. goto hash2_common;
  1314. break;
  1315. case EXTERNAL_PID_SUBTAG:
  1316. /* Only 15 bits are hashed. */
  1317. UINT32_HASH(external_pid_number(term), HCONST_5);
  1318. goto hash2_common;
  1319. case EXTERNAL_PORT_SUBTAG:
  1320. /* Only 15 bits are hashed. */
  1321. UINT32_HASH(external_port_number(term), HCONST_6);
  1322. goto hash2_common;
  1323. case FLOAT_SUBTAG:
  1324. {
  1325. FloatDef ff;
  1326. GET_DOUBLE(term, ff);
  1327. if (ff.fd == 0.0f) {
  1328. /* ensure positive 0.0 */
  1329. ff.fd = erts_get_positive_zero_float();
  1330. }
  1331. #if defined(WORDS_BIGENDIAN) || defined(DOUBLE_MIDDLE_ENDIAN)
  1332. UINT32_HASH_2(ff.fw[0], ff.fw[1], HCONST_12);
  1333. #else
  1334. UINT32_HASH_2(ff.fw[1], ff.fw[0], HCONST_12);
  1335. #endif
  1336. goto hash2_common;
  1337. }
  1338. break;
  1339. default:
  1340. erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_hash2(0x%X)\n", term);
  1341. }
  1342. }
  1343. break;
  1344. case TAG_PRIMARY_IMMED1:
  1345. switch (term & _TAG_IMMED1_MASK) {
  1346. case _TAG_IMMED1_PID:
  1347. /* Only 15 bits are hashed. */
  1348. UINT32_HASH(internal_pid_number(term), HCONST_5);
  1349. goto hash2_common;
  1350. case _TAG_IMMED1_PORT:
  1351. /* Only 15 bits are hashed. */
  1352. UINT32_HASH(internal_port_number(term), HCONST_6);
  1353. goto hash2_common;
  1354. case _TAG_IMMED1_IMMED2:
  1355. switch (term & _TAG_IMMED2_MASK) {
  1356. case _TAG_IMMED2_ATOM:
  1357. if (hash == 0)
  1358. /* Fast, but the poor hash value should be mixed. */
  1359. hash = atom_tab(atom_val(term))->slot.bucket.hvalue;
  1360. else
  1361. UINT32_HASH(atom_tab(atom_val(term))->slot.bucket.hvalue,
  1362. HCONST_3);
  1363. goto hash2_common;
  1364. case _TAG_IMMED2_NIL:
  1365. if (hash == 0)
  1366. hash = 3468870702UL;
  1367. else
  1368. UINT32_HASH(NIL_DEF, HCONST_2);
  1369. goto hash2_common;
  1370. default:
  1371. erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_hash2(0x%X)\n", term);
  1372. }
  1373. case _TAG_IMMED1_SMALL:
  1374. {
  1375. Sint x = signed_val(term);
  1376. if (SMALL_BITS > 28 && !IS_SSMALL28(x)) {
  1377. term = small_to_big(x, tmp_big);
  1378. break;
  1379. }
  1380. SINT32_HASH(x, HCONST);
  1381. goto hash2_common;
  1382. }
  1383. }
  1384. break;
  1385. default:
  1386. erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_hash2(0x%X)\n", term);
  1387. hash2_common:
  1388. /* Uint32 hash always has the hash value of the previous term,
  1389. * compounded or otherwise.
  1390. */
  1391. if (ESTACK_ISEMPTY(s)) {
  1392. DESTROY_ESTACK(s);
  1393. UnUseTmpHeapNoproc(2);
  1394. return hash;
  1395. }
  1396. term = ESTACK_POP(s);
  1397. switch (term) {
  1398. case HASH_MAP_TAIL: {
  1399. hash = (Uint32) ESTACK_POP(s);
  1400. UINT32_HASH(hash_xor_pairs, HCONST_19);
  1401. hash_xor_pairs = (Uint32) ESTACK_POP(s);
  1402. goto hash2_common;
  1403. }
  1404. case HASH_MAP_PAIR:
  1405. hash_xor_pairs ^= hash;
  1406. hash = 0;
  1407. goto hash2_common;
  1408. default:
  1409. break;
  1410. }
  1411. }
  1412. }
  1413. }
  1414. }
  1415. /* Term hash function for internal use.
  1416. *
  1417. * Limitation #1: Is not "portable" in any way between different VM instances.
  1418. *
  1419. * Limitation #2: The hash value is only valid as long as the term exists
  1420. * somewhere in the VM. Why? Because external pids, ports and refs are hashed
  1421. * by mixing the node *pointer* value. If a node disappears and later reappears
  1422. * with a new ErlNode struct, externals from that node will hash different than
  1423. * before.
  1424. *
  1425. * One IMPORTANT property must hold (for hamt).
  1426. * EVERY BIT of the term that is significant for equality (see EQ)
  1427. * MUST BE USED AS INPUT FOR THE HASH. Two different terms must always have a
  1428. * chance of hashing different when salted: hash([Salt|A]) vs hash([Salt|B]).
  1429. *
  1430. * This is why we cannot use cached hash values for atoms for example.
  1431. *
  1432. */
  1433. #define CONST_HASH(AConst) \
  1434. do { /* Lightweight mixing of constant (type info) */ \
  1435. hash ^= AConst; \
  1436. hash = (hash << 17) ^ (hash >> (32-17)); \
  1437. } while (0)
  1438. Uint32
  1439. make_internal_hash(Eterm term, Uint32 salt)
  1440. {
  1441. Uint32 hash;
  1442. /* Optimization. Simple cases before declaration of estack. */
  1443. if (primary_tag(term) == TAG_PRIMARY_IMMED1) {
  1444. hash = salt;
  1445. #if ERTS_SIZEOF_ETERM == 8
  1446. UINT32_HASH_2((Uint32)term, (Uint32)(term >> 32), HCONST);
  1447. #elif ERTS_SIZEOF_ETERM == 4
  1448. UINT32_HASH(term, HCONST);
  1449. #else
  1450. # error "No you don't"
  1451. #endif
  1452. return hash;
  1453. }
  1454. {
  1455. Eterm tmp;
  1456. DECLARE_ESTACK(s);
  1457. hash = salt;
  1458. for (;;) {
  1459. switch (primary_tag(term)) {
  1460. case TAG_PRIMARY_LIST:
  1461. {
  1462. int c = 0;
  1463. Uint32 sh = 0;
  1464. Eterm* ptr = list_val(term);
  1465. while (is_byte(*ptr)) {
  1466. /* Optimization for strings. */
  1467. sh = (sh << 8) + unsigned_val(*ptr);
  1468. if (c == 3) {
  1469. UINT32_HASH(sh, HCONST_4);
  1470. c = sh = 0;
  1471. } else {
  1472. c++;
  1473. }
  1474. term = CDR(ptr);
  1475. if (is_not_list(term))
  1476. break;
  1477. ptr = list_val(term);
  1478. }
  1479. if (c > 0)
  1480. UINT32_HASH_2(sh, (Uint32)c, HCONST_22);
  1481. if (is_list(term)) {
  1482. tmp = CDR(ptr);
  1483. CONST_HASH(HCONST_17); /* Hash CAR in cons cell */
  1484. ESTACK_PUSH(s, tmp);
  1485. if (is_not_list(tmp)) {
  1486. ESTACK_PUSH(s, HASH_CDR);
  1487. }
  1488. term = CAR(ptr);
  1489. }
  1490. }
  1491. break;
  1492. case TAG_PRIMARY_BOXED:
  1493. {
  1494. Eterm hdr = *boxed_val(term);
  1495. ASSERT(is_header(hdr));
  1496. switch (hdr & _TAG_HEADER_MASK) {
  1497. case ARITYVAL_SUBTAG:
  1498. {
  1499. int i;
  1500. int arity = header_arity(hdr);
  1501. Eterm* elem = tuple_val(term);
  1502. UINT32_HASH(arity, HCONST_9);
  1503. if (arity == 0) /* Empty tuple */
  1504. goto pop_next;
  1505. for (i = arity; ; i--) {
  1506. term = elem[i];
  1507. if (i == 1)
  1508. break;
  1509. ESTACK_PUSH(s, term);
  1510. }
  1511. }
  1512. break;
  1513. case MAP_SUBTAG:
  1514. {
  1515. Eterm* ptr = boxed_val(term) + 1;
  1516. Uint size;
  1517. int i;
  1518. /*
  1519. * We rely on key-value iteration order being constant
  1520. * for identical maps (in this VM instance).
  1521. */
  1522. switch (hdr & _HEADER_MAP_SUBTAG_MASK) {
  1523. case HAMT_SUBTAG_HEAD_FLATMAP:
  1524. {
  1525. flatmap_t *mp = (flatmap_t *)flatmap_val(term);
  1526. Eterm *ks = flatmap_get_keys(mp);
  1527. Eterm *vs = flatmap_get_values(mp);
  1528. size = flatmap_get_size(mp);
  1529. UINT32_HASH(size, HCONST_16);
  1530. if (size == 0)
  1531. goto pop_next;
  1532. for (i = size - 1; i >= 0; i--) {
  1533. ESTACK_PUSH(s, vs[i]);
  1534. ESTACK_PUSH(s, ks[i]);
  1535. }
  1536. goto pop_next;
  1537. }
  1538. case HAMT_SUBTAG_HEAD_ARRAY:
  1539. case HAMT_SUBTAG_HEAD_BITMAP:
  1540. size = *ptr++;
  1541. UINT32_HASH(size, HCONST_16);
  1542. if (size == 0)
  1543. goto pop_next;
  1544. }
  1545. switch (hdr & _HEADER_MAP_SUBTAG_MASK) {
  1546. case HAMT_SUBTAG_HEAD_ARRAY:
  1547. i = 16;
  1548. break;
  1549. case HAMT_SUBTAG_HEAD_BITMAP:
  1550. case HAMT_SUBTAG_NODE_BITMAP:
  1551. i = hashmap_bitcount(MAP_HEADER_VAL(hdr));
  1552. break;
  1553. default:
  1554. erts_exit(ERTS_ERROR_EXIT, "bad header");
  1555. }
  1556. while (i) {
  1557. if (is_list(*ptr)) {
  1558. Eterm* cons = list_val(*ptr);
  1559. ESTACK_PUSH(s, CDR(cons));
  1560. ESTACK_PUSH(s, CAR(cons));
  1561. }
  1562. else {
  1563. ASSERT(is_boxed(*ptr));
  1564. ESTACK_PUSH(s, *ptr);
  1565. }
  1566. i--; ptr++;
  1567. }
  1568. goto pop_next;
  1569. }
  1570. break;
  1571. case EXPORT_SUBTAG:
  1572. {
  1573. Export* ep = *((Export **) (export_val(term) + 1));
  1574. /* Assumes Export entries never move */
  1575. POINTER_HASH(ep, HCONST_14);
  1576. goto pop_next;
  1577. }
  1578. case FUN_SUBTAG:
  1579. {
  1580. ErlFunThing* funp = (ErlFunThing *) fun_val(term);
  1581. Uint num_free = funp->num_free;
  1582. UINT32_HASH_2(num_free, funp->fe->module, HCONST_20);
  1583. UINT32_HASH_2(funp->fe->old_index, funp->fe->old_uniq, HCONST_21);
  1584. if (num_free == 0) {
  1585. goto pop_next;
  1586. } else {
  1587. Eterm* bptr = funp->env + num_free - 1;
  1588. while (num_free-- > 1) {
  1589. term = *bptr--;
  1590. ESTACK_PUSH(s, term);
  1591. }
  1592. term = *bptr;
  1593. }
  1594. }
  1595. break;
  1596. case REFC_BINARY_SUBTAG:
  1597. case HEAP_BINARY_SUBTAG:
  1598. case SUB_BINARY_SUBTAG:
  1599. {
  1600. byte* bptr;
  1601. Uint sz = binary_size(term);
  1602. Uint32 con = HCONST_13 + hash;
  1603. Uint bitoffs;
  1604. Uint bitsize;
  1605. ERTS_GET_BINARY_BYTES(term, bptr, bitoffs, bitsize);
  1606. if (sz == 0 && bitsize == 0) {
  1607. hash = con;
  1608. } else {
  1609. if (bitoffs == 0) {
  1610. hash = block_hash(bptr, sz, con);
  1611. if (bitsize > 0) {
  1612. UINT32_HASH_2(bitsize, (bptr[sz] >> (8 - bitsize)),
  1613. HCONST_15);
  1614. }
  1615. } else {
  1616. byte* buf = (byte *) erts_alloc(ERTS_ALC_T_TMP,
  1617. sz + (bitsize != 0));
  1618. erts_copy_bits(bptr, bitoffs, 1, buf, 0, 1, sz*8+bitsize);
  1619. hash = block_hash(buf, sz, con);
  1620. if (bitsize > 0) {
  1621. UINT32_HASH_2(bitsize, (buf[sz] >> (8 - bitsize)),
  1622. HCONST_15);
  1623. }
  1624. erts_free(ERTS_ALC_T_TMP, (void *) buf);
  1625. }
  1626. }
  1627. goto pop_next;
  1628. }
  1629. break;
  1630. case POS_BIG_SUBTAG:
  1631. case NEG_BIG_SUBTAG:
  1632. {
  1633. Eterm* ptr = big_val(term);
  1634. Uint i = 0;
  1635. Uint n = BIG_SIZE(ptr);
  1636. Uint32 con = BIG_SIGN(ptr) ? HCONST_10 : HCONST_11;
  1637. #if D_EXP == 16
  1638. do {
  1639. Uint32 x, y;
  1640. x = i < n ? BIG_DIGIT(ptr, i++) : 0;
  1641. x += (Uint32)(i < n ? BIG_DIGIT(ptr, i++) : 0) << 16;
  1642. y = i < n ? BIG_DIGIT(ptr, i++) : 0;
  1643. y += (Uint32)(i < n ? BIG_DIGIT(ptr, i++) : 0) << 16;
  1644. UINT32_HASH_2(x, y, con);
  1645. } while (i < n);
  1646. #elif D_EXP == 32
  1647. do {
  1648. Uint32 x, y;
  1649. x = i < n ? BIG_DIGIT(ptr, i++) : 0;
  1650. y = i < n ? BIG_DIGIT(ptr, i++) : 0;
  1651. UINT32_HASH_2(x, y, con);
  1652. } while (i < n);
  1653. #elif D_EXP == 64
  1654. do {
  1655. Uint t;
  1656. Uint32 x, y;
  1657. ASSERT(i < n);
  1658. t = BIG_DIGIT(ptr, i++);
  1659. x = t & 0xffffffff;
  1660. y = t >> 32;
  1661. UINT32_HASH_2(x, y, con);
  1662. } while (i < n);
  1663. #else
  1664. #error "unsupported D_EXP size"
  1665. #endif
  1666. goto pop_next;
  1667. }
  1668. break;
  1669. case REF_SUBTAG:
  1670. UINT32_HASH(internal_ref_numbers(term)[0], HCONST_7);
  1671. ASSERT(internal_ref_no_numbers(term) == 3);
  1672. UINT32_HASH_2(internal_ref_numbers(term)[1],
  1673. internal_ref_numbers(term)[2], HCONST_8);
  1674. goto pop_next;
  1675. case EXTERNAL_REF_SUBTAG:
  1676. {
  1677. ExternalThing* thing = external_thing_ptr(term);
  1678. ASSERT(external_thing_ref_no_numbers(thing) == 3);
  1679. /* See limitation #2 */
  1680. #ifdef ARCH_64
  1681. POINTER_HASH(thing->node, HCONST_7);
  1682. UINT32_HASH(external_thing_ref_numbers(thing)[0], HCONST_7);
  1683. #else
  1684. UINT32_HASH_2(thing->node,
  1685. external_thing_ref_numbers(thing)[0], HCONST_7);
  1686. #endif
  1687. UINT32_HASH_2(external_thing_ref_numbers(thing)[1],
  1688. external_thing_ref_numbers(thing)[2], HCONST_8);
  1689. goto pop_next;
  1690. }
  1691. case EXTERNAL_PID_SUBTAG: {
  1692. ExternalThing* thing = external_thing_ptr(term);
  1693. /* See limitation #2 */
  1694. #ifdef ARCH_64
  1695. POINTER_HASH(thing->node, HCONST_5);
  1696. UINT32_HASH(thing->data.ui[0], HCONST_5);
  1697. #else
  1698. UINT32_HASH_2(thing->node, thing->data.ui[0], HCONST_5);
  1699. #endif
  1700. goto pop_next;
  1701. }
  1702. case EXTERNAL_PORT_SUBTAG: {
  1703. ExternalThing* thing = external_thing_ptr(term);
  1704. /* See limitation #2 */
  1705. #ifdef ARCH_64
  1706. POINTER_HASH(thing->node, HCONST_6);
  1707. UINT32_HASH(thing->data.ui[0], HCONST_6);
  1708. #else
  1709. UINT32_HASH_2(thing->node, thing->data.ui[0], HCONST_6);
  1710. #endif
  1711. goto pop_next;
  1712. }
  1713. case FLOAT_SUBTAG:
  1714. {
  1715. FloatDef ff;
  1716. GET_DOUBLE(term, ff);
  1717. if (ff.fd == 0.0f) {
  1718. /* ensure positive 0.0 */
  1719. ff.fd = erts_get_positive_zero_float();
  1720. }
  1721. UINT32_HASH_2(ff.fw[0], ff.fw[1], HCONST_12);
  1722. goto pop_next;
  1723. }
  1724. default:
  1725. erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_internal_hash(0x%X, %lu)\n", term, salt);
  1726. }
  1727. }
  1728. break;
  1729. case TAG_PRIMARY_IMMED1:
  1730. #if ERTS_SIZEOF_ETERM == 8
  1731. UINT32_HASH_2((Uint32)term, (Uint32)(term >> 32), HCONST);
  1732. #else
  1733. UINT32_HASH(term, HCONST);
  1734. #endif
  1735. goto pop_next;
  1736. default:
  1737. erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_internal_hash(0x%X, %lu)\n", term, salt);
  1738. pop_next:
  1739. if (ESTACK_ISEMPTY(s)) {
  1740. DESTROY_ESTACK(s);
  1741. return hash;
  1742. }
  1743. term = ESTACK_POP(s);
  1744. switch (term) {
  1745. case HASH_CDR:
  1746. CONST_HASH(HCONST_18); /* Hash CDR i cons cell */
  1747. goto pop_next;
  1748. default:
  1749. break;
  1750. }
  1751. }
  1752. }
  1753. }
  1754. #undef CONST_HASH
  1755. #undef HASH_MAP_TAIL
  1756. #undef HASH_MAP_PAIR
  1757. #undef HASH_CDR
  1758. #undef UINT32_HASH_2
  1759. #undef UINT32_HASH
  1760. #undef SINT32_HASH
  1761. }
  1762. #undef HCONST
  1763. #undef MIX
  1764. /* error_logger !
  1765. {log, Level, format, [args], #{ gl, pid, time, error_logger => #{tag, emulator => true} }}
  1766. */
  1767. static Eterm
  1768. do_allocate_logger_message(Eterm gleader, ErtsMonotonicTime *ts, Eterm *pid,
  1769. Eterm **hp, ErlOffHeap **ohp,
  1770. ErlHeapFragment **bp, Uint sz)
  1771. {
  1772. Uint gl_sz;
  1773. gl_sz = IS_CONST(gleader) ? 0 : size_object(gleader);
  1774. sz = sz + gl_sz + 6 /*outer 5-tuple*/
  1775. + MAP2_SZ /* error_logger map */;
  1776. *pid = erts_get_current_pid();
  1777. if (is_nil(gleader) && is_non_value(*pid)) {
  1778. sz += MAP2_SZ /* metadata map no gl, no pid */;
  1779. } else if (is_nil(gleader) || is_non_value(*pid))
  1780. sz += MAP3_SZ /* metadata map no gl or no pid*/;
  1781. else
  1782. sz += MAP4_SZ /* metadata map w gl w pid*/;
  1783. *ts = ERTS_MONOTONIC_TO_USEC(erts_os_system_time());
  1784. erts_bld_sint64(NULL, &sz, *ts);
  1785. *bp = new_message_buffer(sz);
  1786. *ohp = &(*bp)->off_heap;
  1787. *hp = (*bp)->mem;
  1788. return copy_struct(gleader,gl_sz,hp,*ohp);
  1789. }
  1790. static void do_send_logger_message(Eterm gl, Eterm tag, Eterm format, Eterm args,
  1791. ErtsMonotonicTime ts, Eterm pid,
  1792. Eterm *hp, ErlHeapFragment *bp)
  1793. {
  1794. Eterm message, md, el_tag = tag;
  1795. Eterm time = erts_bld_sint64(&hp, NULL, ts);
  1796. /* This mapping is needed for the backwards compatible error_logger */
  1797. switch (tag) {
  1798. case am_info: el_tag = am_info_msg; break;
  1799. case am_warning: el_tag = am_warning_msg; break;
  1800. default:
  1801. ASSERT(am_error);
  1802. break;
  1803. }
  1804. md = MAP2(hp, am_emulator, am_true, ERTS_MAKE_AM("tag"), el_tag);
  1805. hp += MAP2_SZ;
  1806. if (is_nil(gl) && is_non_value(pid)) {
  1807. /* no gl and no pid, probably from a port */
  1808. md = MAP2(hp,
  1809. am_error_logger, md,
  1810. am_time, time);
  1811. hp += MAP2_SZ;
  1812. pid = NIL;
  1813. } else if (is_nil(gl)) {
  1814. /* no gl */
  1815. md = MAP3(hp,
  1816. am_error_logger, md,
  1817. am_pid, pid,
  1818. am_time, time);
  1819. hp += MAP3_SZ;
  1820. } else if (is_non_value(pid)) {
  1821. /* no gl */
  1822. md = MAP3(hp,
  1823. am_error_logger, md,
  1824. ERTS_MAKE_AM("gl"), gl,
  1825. am_time, time);
  1826. hp += MAP3_SZ;
  1827. pid = NIL;
  1828. } else {
  1829. md = MAP4(hp,
  1830. am_error_logger, md,
  1831. ERTS_MAKE_AM("gl"), gl,
  1832. am_pid, pid,
  1833. am_time, time);
  1834. hp += MAP4_SZ;
  1835. }
  1836. message = TUPLE5(hp, am_log, tag, format, args, md);
  1837. erts_queue_error_logger_message(pid, message, bp);
  1838. }
  1839. static int do_send_to_logger(Eterm tag, Eterm gl, char *buf, size_t len)
  1840. {
  1841. Uint sz;
  1842. Eterm list, args, format, pid;
  1843. ErtsMonotonicTime ts;
  1844. Eterm *hp = NULL;
  1845. ErlOffHeap *ohp = NULL;
  1846. ErlHeapFragment *bp = NULL;
  1847. sz = len * 2 /* message list */ + 2 /* cons surrounding message list */
  1848. + 8 /* "~s~n" */;
  1849. /* gleader size is accounted and allocated next */
  1850. gl = do_allocate_logger_message(gl, &ts, &pid, &hp, &ohp, &bp, sz);
  1851. list = buf_to_intlist(&hp, buf, len, NIL);
  1852. args = CONS(hp,list,NIL);
  1853. hp += 2;
  1854. format = buf_to_intlist(&hp, "~s~n", 4, NIL);
  1855. do_send_logger_message(gl, tag, format, args, ts, pid, hp, bp);
  1856. return 0;
  1857. }
  1858. static int do_send_term_to_logger(Eterm tag, Eterm gl,
  1859. char *buf, size_t len, Eterm args)
  1860. {
  1861. Uint sz;
  1862. Uint args_sz;
  1863. Eterm format, pid;
  1864. ErtsMonotonicTime ts;
  1865. Eterm *hp = NULL;
  1866. ErlOffHeap *ohp = NULL;
  1867. ErlHeapFragment *bp = NULL;
  1868. ASSERT(len > 0);
  1869. args_sz = size_object(args);
  1870. sz = len * 2 /* format */ + args_sz;
  1871. /* gleader size is accounted and allocated next */
  1872. gl = do_allocate_logger_message(gl, &ts, &pid, &hp, &ohp, &bp, sz);
  1873. format = buf_to_intlist(&hp, buf, len, NIL);
  1874. args = copy_struct(args, args_sz, &hp, ohp);
  1875. do_send_logger_message(gl, tag, format, args, ts, pid, hp, bp);
  1876. return 0;
  1877. }
  1878. static ERTS_INLINE int
  1879. send_info_to_logger(Eterm gleader, char *buf, size_t len)
  1880. {
  1881. return do_send_to_logger(am_info, gleader, buf, len);
  1882. }
  1883. static ERTS_INLINE int
  1884. send_warning_to_logger(Eterm gleader, char *buf, size_t len)
  1885. {
  1886. return do_send_to_logger(erts_error_logger_warnings, gleader, buf, len);
  1887. }
  1888. static ERTS_INLINE int
  1889. send_error_to_logger(Eterm gleader, char *buf, size_t len)
  1890. {
  1891. return do_send_to_logger(am_error, gleader, buf, len);
  1892. }
  1893. static ERTS_INLINE int
  1894. send_error_term_to_logger(Eterm gleader, char *buf, size_t len, Eterm args)
  1895. {
  1896. return do_send_term_to_logger(am_error, gleader, buf, len, args);
  1897. }
  1898. #define LOGGER_DSBUF_INC_SZ 256
  1899. static erts_dsprintf_buf_t *
  1900. grow_logger_dsbuf(erts_dsprintf_buf_t *dsbufp, size_t need)
  1901. {
  1902. size_t size;
  1903. size_t free_size = dsbufp->size - dsbufp->str_len;
  1904. ASSERT(dsbufp && dsbufp->str);
  1905. if (need <= free_size)
  1906. return dsbufp;
  1907. size = need - free_size + LOGGER_DSBUF_INC_SZ;
  1908. size = (((size + LOGGER_DSBUF_INC_SZ - 1) / LOGGER_DSBUF_INC_SZ)
  1909. * LOGGER_DSBUF_INC_SZ);
  1910. size += dsbufp->size;
  1911. ASSERT(dsbufp->str_len + need <= size);
  1912. dsbufp->str = (char *) erts_realloc(ERTS_ALC_T_LOGGER_DSBUF,
  1913. (void *) dsbufp->str,
  1914. size);
  1915. dsbufp->size = size;
  1916. return dsbufp;
  1917. }
  1918. erts_dsprintf_buf_t *
  1919. erts_create_logger_dsbuf(void)
  1920. {
  1921. erts_dsprintf_buf_t init = ERTS_DSPRINTF_BUF_INITER(grow_logger_dsbuf);
  1922. erts_dsprintf_buf_t *dsbufp = erts_alloc(ERTS_ALC_T_LOGGER_DSBUF,
  1923. sizeof(erts_dsprintf_buf_t));
  1924. sys_memcpy((void *) dsbufp, (void *) &init, sizeof(erts_dsprintf_buf_t));
  1925. dsbufp->str = (char *) erts_alloc(ERTS_ALC_T_LOGGER_DSBUF,
  1926. LOGGER_DSBUF_INC_SZ);
  1927. dsbufp->str[0] = '\0';
  1928. dsbufp->size = LOGGER_DSBUF_INC_SZ;
  1929. return dsbufp;
  1930. }
  1931. static ERTS_INLINE void
  1932. destroy_logger_dsbuf(erts_dsprintf_buf_t *dsbufp)
  1933. {
  1934. ASSERT(dsbufp && dsbufp->str);
  1935. erts_free(ERTS_ALC_T_LOGGER_DSBUF, (void *) dsbufp->str);
  1936. erts_free(ERTS_ALC_T_LOGGER_DSBUF, (void *) dsbufp);
  1937. }
  1938. int
  1939. erts_send_info_to_logger(Eterm gleader, erts_dsprintf_buf_t *dsbufp)
  1940. {
  1941. int res;
  1942. res = send_info_to_logger(gleader, dsbufp->str, dsbufp->str_len);
  1943. destroy_logger_dsbuf(dsbufp);
  1944. return res;
  1945. }
  1946. int
  1947. erts_send_warning_to_logger(Eterm gleader, erts_dsprintf_buf_t *dsbufp)
  1948. {
  1949. int res;
  1950. res = send_warning_to_logger(gleader, dsbufp->str, dsbufp->str_len);
  1951. destroy_logger_dsbuf(dsbufp);
  1952. return res;
  1953. }
  1954. int
  1955. erts_send_error_to_logger(Eterm gleader, erts_dsprintf_buf_t *dsbufp)
  1956. {
  1957. int res;
  1958. res = send_error_to_logger(gleader, dsbufp->str, dsbufp->str_len);
  1959. destroy_logger_dsbuf(dsbufp);
  1960. return res;
  1961. }
  1962. int
  1963. erts_send_error_term_to_logger(Eterm gleader, erts_dsprintf_buf_t *dsbufp, Eterm args)
  1964. {
  1965. int res;
  1966. res = send_error_term_to_logger(gleader, dsbufp->str, dsbufp->str_len, args);
  1967. destroy_logger_dsbuf(dsbufp);
  1968. return res;
  1969. }
  1970. int
  1971. erts_send_info_to_logger_str(Eterm gleader, char *str)
  1972. {
  1973. return send_info_to_logger(gleader, str, sys_strlen(str));
  1974. }
  1975. int
  1976. erts_send_warning_to_logger_str(Eterm gleader, char *str)
  1977. {
  1978. return send_warning_to_logger(gleader, str, sys_strlen(str));
  1979. }
  1980. int
  1981. erts_send_error_to_logger_str(Eterm gleader, char *str)
  1982. {
  1983. return send_error_to_logger(gleader, str, sys_strlen(str));
  1984. }
  1985. int
  1986. erts_send_info_to_logger_nogl(erts_dsprintf_buf_t *dsbuf)
  1987. {
  1988. return erts_send_info_to_logger(NIL, dsbuf);
  1989. }
  1990. int
  1991. erts_send_warning_to_logger_nogl(erts_dsprintf_buf_t *dsbuf)
  1992. {
  1993. return erts_send_warning_to_logger(NIL, dsbuf);
  1994. }
  1995. int
  1996. erts_send_error_to_logger_nogl(erts_dsprintf_buf_t *dsbuf)
  1997. {
  1998. return erts_send_error_to_logger(NIL, dsbuf);
  1999. }
  2000. int
  2001. erts_send_info_to_logger_str_nogl(char *str)
  2002. {
  2003. return erts_send_info_to_logger_str(NIL, str);
  2004. }
  2005. int
  2006. erts_send_warning_to_logger_str_nogl(char *str)
  2007. {
  2008. return erts_send_warning_to_logger_str(NIL, str);
  2009. }
  2010. int
  2011. erts_send_error_to_logger_str_nogl(char *str)
  2012. {
  2013. return erts_send_error_to_logger_str(NIL, str);
  2014. }
  2015. #define TMP_DSBUF_INC_SZ 256
  2016. static erts_dsprintf_buf_t *
  2017. grow_tmp_dsbuf(erts_dsprintf_buf_t *dsbufp, size_t need)
  2018. {
  2019. size_t size;
  2020. size_t free_size = dsbufp->size - dsbufp->str_len;
  2021. ASSERT(dsbufp);
  2022. if (need <= free_size)
  2023. return dsbufp;
  2024. size = need - free_size + TMP_DSBUF_INC_SZ;
  2025. size = ((size + TMP_DSBUF_INC_SZ - 1)/TMP_DSBUF_INC_SZ)*TMP_DSBUF_INC_SZ;
  2026. size += dsbufp->size;
  2027. ASSERT(dsbufp->str_len + need <= size);
  2028. dsbufp->str = (char *) erts_realloc(ERTS_ALC_T_TMP_DSBUF,
  2029. (void *) dsbufp->str,
  2030. size);
  2031. dsbufp->size = size;
  2032. return dsbufp;
  2033. }
  2034. erts_dsprintf_buf_t *
  2035. erts_create_tmp_dsbuf(Uint size)
  2036. {
  2037. Uint init_size = size ? size : TMP_DSBUF_INC_SZ;
  2038. erts_dsprintf_buf_t init = ERTS_DSPRINTF_BUF_INITER(grow_tmp_dsbuf);
  2039. erts_dsprintf_buf_t *dsbufp = erts_alloc(ERTS_ALC_T_TMP_DSBUF,
  2040. sizeof(erts_dsprintf_buf_t));
  2041. sys_memcpy((void *) dsbufp, (void *) &init, sizeof(erts_dsprintf_buf_t));
  2042. dsbufp->str = (char *) erts_alloc(ERTS_ALC_T_TMP_DSBUF, init_size);
  2043. dsbufp->str[0] = '\0';
  2044. dsbufp->size = init_size;
  2045. return dsbufp;
  2046. }
  2047. void
  2048. erts_destroy_tmp_dsbuf(erts_dsprintf_buf_t *dsbufp)
  2049. {
  2050. if (dsbufp->str)
  2051. erts_free(ERTS_ALC_T_TMP_DSBUF, (void *) dsbufp->str);
  2052. erts_free(ERTS_ALC_T_TMP_DSBUF, (void *) dsbufp);
  2053. }
  2054. /* eq and cmp are written as separate functions a eq is a little faster */
  2055. /*
  2056. * Test for equality of two terms.
  2057. * Returns 0 if not equal, or a non-zero value otherwise.
  2058. */
  2059. int eq(Eterm a, Eterm b)
  2060. {
  2061. DECLARE_WSTACK(stack);
  2062. Sint sz;
  2063. Eterm* aa;
  2064. Eterm* bb;
  2065. tailrecur:
  2066. if (is_same(a, b)) goto pop_next;
  2067. tailrecur_ne:
  2068. switch (primary_tag(a)) {
  2069. case TAG_PRIMARY_LIST:
  2070. if (is_list(b)) {
  2071. Eterm* aval = list_val(a);
  2072. Eterm* bval = list_val(b);
  2073. while (1) {
  2074. Eterm atmp = CAR(aval);
  2075. Eterm btmp = CAR(bval);
  2076. if (!is_same(atmp,btmp)) {
  2077. WSTACK_PUSH2(stack,(UWord) CDR(bval),(UWord) CDR(aval));
  2078. a = atmp;
  2079. b = btmp;
  2080. goto tailrecur_ne;
  2081. }
  2082. atmp = CDR(aval);
  2083. btmp = CDR(bval);
  2084. if (is_same(atmp,btmp)) {
  2085. goto pop_next;
  2086. }
  2087. if (is_not_list(atmp) || is_not_list(btmp)) {
  2088. a = atmp;
  2089. b = btmp;
  2090. goto tailrecur_ne;
  2091. }
  2092. aval = list_val(atmp);
  2093. bval = list_val(btmp);
  2094. }
  2095. }
  2096. break; /* not equal */
  2097. case TAG_PRIMARY_BOXED:
  2098. {
  2099. Eterm hdr = *boxed_val(a);
  2100. switch (hdr & _TAG_HEADER_MASK) {
  2101. case ARITYVAL_SUBTAG:
  2102. {
  2103. aa = tuple_val(a);
  2104. if (!is_boxed(b) || *boxed_val(b) != *aa)
  2105. goto not_equal;
  2106. bb = tuple_val(b);
  2107. if ((sz = arityval(*aa)) == 0) goto pop_next;
  2108. ++aa;
  2109. ++bb;
  2110. goto term_array;
  2111. }
  2112. case REFC_BINARY_SUBTAG:
  2113. case HEAP_BINARY_SUBTAG:
  2114. case SUB_BINARY_SUBTAG:
  2115. {
  2116. byte* a_ptr;
  2117. byte* b_ptr;
  2118. size_t a_size;
  2119. size_t b_size;
  2120. Uint a_bitsize;
  2121. Uint b_bitsize;
  2122. Uint a_bitoffs;
  2123. Uint b_bitoffs;
  2124. if (!is_binary(b)) {
  2125. goto not_equal;
  2126. }
  2127. a_size = binary_size(a);
  2128. b_size = binary_size(b);
  2129. if (a_size != b_size) {
  2130. goto not_equal;
  2131. }
  2132. ERTS_GET_BINARY_BYTES(a, a_ptr, a_bitoffs, a_bitsize);
  2133. ERTS_GET_BINARY_BYTES(b, b_ptr, b_bitoffs, b_bitsize);
  2134. if ((a_bitsize | b_bitsize | a_bitoffs | b_bitoffs) == 0) {
  2135. if (sys_memcmp(a_ptr, b_ptr, a_size) == 0) goto pop_next;
  2136. } else if (a_bitsize == b_bitsize) {
  2137. if (erts_cmp_bits(a_ptr, a_bitoffs, b_ptr, b_bitoffs,
  2138. (a_size << 3) + a_bitsize) == 0) goto pop_next;
  2139. }
  2140. break; /* not equal */
  2141. }
  2142. case EXPORT_SUBTAG:
  2143. {
  2144. if (is_export(b)) {
  2145. Export* a_exp = *((Export **) (export_val(a) + 1));
  2146. Export* b_exp = *((Export **) (export_val(b) + 1));
  2147. if (a_exp == b_exp) goto pop_next;
  2148. }
  2149. break; /* not equal */
  2150. }
  2151. case FUN_SUBTAG:
  2152. {
  2153. ErlFunThing* f1;
  2154. ErlFunThing* f2;
  2155. if (!is_fun(b))
  2156. goto not_equal;
  2157. f1 = (ErlFunThing *) fun_val(a);
  2158. f2 = (ErlFunThing *) fun_val(b);
  2159. if (f1->fe->module != f2->fe->module ||
  2160. f1->fe->old_index != f2->fe->old_index ||
  2161. f1->fe->old_uniq != f2->fe->old_uniq ||
  2162. f1->num_free != f2->num_free) {
  2163. goto not_equal;
  2164. }
  2165. if ((sz = f1->num_free) == 0) goto pop_next;
  2166. aa = f1->env;
  2167. bb = f2->env;
  2168. goto term_array;
  2169. }
  2170. case EXTERNAL_PID_SUBTAG:
  2171. case EXTERNAL_PORT_SUBTAG: {
  2172. ExternalThing *ap;
  2173. ExternalThing *bp;
  2174. if(!is_external(b))
  2175. goto not_equal;
  2176. ap = external_thing_ptr(a);
  2177. bp = external_thing_ptr(b);
  2178. if(ap->header == bp->header && ap->node == bp->node) {
  2179. ASSERT(1 == external_data_words(a));
  2180. ASSERT(1 == external_data_words(b));
  2181. if (ap->data.ui[0] == bp->data.ui[0]) goto pop_next;
  2182. }
  2183. break; /* not equal */
  2184. }
  2185. case EXTERNAL_REF_SUBTAG: {
  2186. /*
  2187. * Observe!
  2188. * When comparing refs we need to compare ref numbers
  2189. * (32-bit words) *not* ref data words.
  2190. */
  2191. Uint32 *anum;
  2192. Uint32 *bnum;
  2193. Uint common_len;
  2194. Uint alen;
  2195. Uint blen;
  2196. Uint i;
  2197. ExternalThing* athing;
  2198. ExternalThing* bthing;
  2199. if(!is_external_ref(b))
  2200. goto not_equal;
  2201. athing = external_thing_ptr(a);
  2202. bthing = external_thing_ptr(b);
  2203. if(athing->node != bthing->node)
  2204. goto not_equal;
  2205. anum = external_thing_ref_numbers(athing);
  2206. bnum = external_thing_ref_numbers(bthing);
  2207. alen = external_thing_ref_no_numbers(athing);
  2208. blen = external_thing_ref_no_numbers(bthing);
  2209. goto ref_common;
  2210. case REF_SUBTAG:
  2211. if (!is_internal_ref(b))
  2212. goto not_equal;
  2213. alen = internal_ref_no_numbers(a);
  2214. anum = internal_ref_numbers(a);
  2215. blen = internal_ref_no_numbers(b);
  2216. bnum = internal_ref_numbers(b);
  2217. ref_common:
  2218. ASSERT(alen > 0 && blen > 0);
  2219. if (anum[0] != bnum[0])
  2220. goto not_equal;
  2221. if (alen == 3 && blen == 3) {
  2222. /* Most refs are of length 3 */
  2223. if (anum[1] == bnum[1] && anum[2] == bnum[2]) {
  2224. goto pop_next;
  2225. } else {
  2226. goto not_equal;
  2227. }
  2228. }
  2229. common_len = alen;
  2230. if (blen < alen)
  2231. common_len = blen;
  2232. for (i = 1; i < common_len; i++)
  2233. if (anum[i] != bnum[i])
  2234. goto not_equal;
  2235. if(alen != blen) {
  2236. if (alen > blen) {
  2237. for (i = common_len; i < alen; i++)
  2238. if (anum[i] != 0)
  2239. goto not_equal;
  2240. }
  2241. else {
  2242. for (i = common_len; i < blen; i++)
  2243. if (bnum[i] != 0)
  2244. goto not_equal;
  2245. }
  2246. }
  2247. goto pop_next;
  2248. }
  2249. case POS_BIG_SUBTAG:
  2250. case NEG_BIG_SUBTAG:
  2251. {
  2252. int i;
  2253. if (!is_big(b))
  2254. goto not_equal;
  2255. aa = big_val(a);
  2256. bb = big_val(b);
  2257. if (*aa != *bb)
  2258. goto not_equal;
  2259. i = BIG_ARITY(aa);
  2260. while(i--) {
  2261. if (*++aa != *++bb)
  2262. goto not_equal;
  2263. }
  2264. goto pop_next;
  2265. }
  2266. case FLOAT_SUBTAG:
  2267. {
  2268. FloatDef af;
  2269. FloatDef bf;
  2270. if (is_float(b)) {
  2271. GET_DOUBLE(a, af);
  2272. GET_DOUBLE(b, bf);
  2273. if (af.fd == bf.fd) goto pop_next;
  2274. }
  2275. break; /* not equal */
  2276. }
  2277. case MAP_SUBTAG:
  2278. if (is_flatmap(a)) {
  2279. aa = flatmap_val(a);
  2280. if (!is_boxed(b) || *boxed_val(b) != *aa)
  2281. goto not_equal;
  2282. bb = flatmap_val(b);
  2283. sz = flatmap_get_size((flatmap_t*)aa);
  2284. if (sz != flatmap_get_size((flatmap_t*)bb)) goto not_equal;
  2285. if (sz == 0) goto pop_next;
  2286. aa += 2;
  2287. bb += 2;
  2288. sz += 1; /* increment for tuple-keys */
  2289. goto term_array;
  2290. } else {
  2291. if (!is_boxed(b) || *boxed_val(b) != hdr)
  2292. goto not_equal;
  2293. aa = hashmap_val(a) + 1;
  2294. bb = hashmap_val(b) + 1;
  2295. switch (hdr & _HEADER_MAP_SUBTAG_MASK) {
  2296. case HAMT_SUBTAG_HEAD_ARRAY:
  2297. aa++; bb++;
  2298. sz = 16;
  2299. break;
  2300. case HAMT_SUBTAG_HEAD_BITMAP:
  2301. aa++; bb++;
  2302. case HAMT_SUBTAG_NODE_BITMAP:
  2303. sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
  2304. ASSERT(sz > 0 && sz < 17);
  2305. break;
  2306. default:
  2307. erts_exit(ERTS_ERROR_EXIT, "Unknown hashmap subsubtag\n");
  2308. }
  2309. goto term_array;
  2310. }
  2311. default:
  2312. ASSERT(!"Unknown boxed subtab in EQ");
  2313. }
  2314. break;
  2315. }
  2316. }
  2317. goto not_equal;
  2318. term_array: /* arrays in 'aa' and 'bb', length in 'sz' */
  2319. ASSERT(sz != 0);
  2320. {
  2321. Eterm* ap = aa;
  2322. Eterm* bp = bb;
  2323. Sint i = sz;
  2324. for (;;) {
  2325. if (!is_same(*ap,*bp)) break;
  2326. if (--i == 0) goto pop_next;
  2327. ++ap;
  2328. ++bp;
  2329. }
  2330. a = *ap;
  2331. b = *bp;
  2332. if (is_both_immed(a,b)) {
  2333. goto not_equal;
  2334. }
  2335. if (i > 1) { /* push the rest */
  2336. WSTACK_PUSH3(stack, i-1, (UWord)(bp+1),
  2337. ((UWord)(ap+1)) | TAG_PRIMARY_HEADER);
  2338. /* We (ab)use TAG_PRIMARY_HEADER to recognize a term_array */
  2339. }
  2340. goto tailrecur_ne;
  2341. }
  2342. pop_next:
  2343. if (!WSTACK_ISEMPTY(stack)) {
  2344. UWord something = WSTACK_POP(stack);
  2345. if (primary_tag((Eterm) something) == TAG_PRIMARY_HEADER) { /* a term_array */
  2346. aa = (Eterm*) something;
  2347. bb = (Eterm*) WSTACK_POP(stack);
  2348. sz = WSTACK_POP(stack);
  2349. goto term_array;
  2350. }
  2351. a = something;
  2352. b = WSTACK_POP(stack);
  2353. goto tailrecur;
  2354. }
  2355. DESTROY_WSTACK(stack);
  2356. return 1;
  2357. not_equal:
  2358. DESTROY_WSTACK(stack);
  2359. return 0;
  2360. }
  2361. /*
  2362. * Compare objects.
  2363. * Returns 0 if equal, a negative value if a < b, or a positive number a > b.
  2364. *
  2365. * According to the Erlang Standard, types are orderered as follows:
  2366. * numbers < (characters) < atoms < refs < funs < ports < pids <
  2367. * tuples < maps < [] < conses < binaries.
  2368. *
  2369. * Note that characters are currently not implemented.
  2370. *
  2371. */
  2372. /* cmp(Eterm a, Eterm b)
  2373. * For compatibility with HiPE - arith-based compare.
  2374. */
  2375. Sint cmp(Eterm a, Eterm b)
  2376. {
  2377. return erts_cmp(a, b, 0, 0);
  2378. }
  2379. Sint erts_cmp_compound(Eterm a, Eterm b, int exact, int eq_only);
  2380. /* erts_cmp(Eterm a, Eterm b, int exact)
  2381. * exact = 1 -> term-based compare
  2382. * exact = 0 -> arith-based compare
  2383. */
  2384. Sint erts_cmp_compound(Eterm a, Eterm b, int exact, int eq_only)
  2385. {
  2386. #define PSTACK_TYPE struct erts_cmp_hashmap_state
  2387. struct erts_cmp_hashmap_state {
  2388. Sint wstack_rollback;
  2389. int was_exact;
  2390. Eterm *ap;
  2391. Eterm *bp;
  2392. Eterm min_key;
  2393. Sint cmp_res; /* result so far -1,0,+1 */
  2394. };
  2395. PSTACK_DECLARE(hmap_stack, 1);
  2396. WSTACK_DECLARE(stack);
  2397. WSTACK_DECLARE(b_stack); /* only used by hashmaps */
  2398. Eterm* aa;
  2399. Eterm* bb;
  2400. int i;
  2401. Sint j;
  2402. int a_tag;
  2403. int b_tag;
  2404. ErlNode *anode;
  2405. ErlNode *bnode;
  2406. Uint adata;
  2407. Uint bdata;
  2408. Uint alen;
  2409. Uint blen;
  2410. Uint32 *anum;
  2411. Uint32 *bnum;
  2412. /* The WSTACK contains naked Eterms and Operations marked with header-tags */
  2413. #define OP_BITS 4
  2414. #define OP_MASK 0xF
  2415. #define TERM_ARRAY_OP 0
  2416. #define SWITCH_EXACT_OFF_OP 1
  2417. #define HASHMAP_PHASE1_ARE_KEYS_EQUAL 2
  2418. #define HASHMAP_PHASE1_IS_MIN_KEY 3
  2419. #define HASHMAP_PHASE1_CMP_VALUES 4
  2420. #define HASHMAP_PHASE2_ARE_KEYS_EQUAL 5
  2421. #define HASHMAP_PHASE2_IS_MIN_KEY_A 6
  2422. #define HASHMAP_PHASE2_IS_MIN_KEY_B 7
  2423. #define OP_WORD(OP) (((OP) << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER)
  2424. #define TERM_ARRAY_OP_WORD(SZ) OP_WORD(((SZ) << OP_BITS) | TERM_ARRAY_OP)
  2425. #define GET_OP(WORD) (ASSERT(is_header(WORD)), ((WORD) >> _TAG_PRIMARY_SIZE) & OP_MASK)
  2426. #define GET_OP_ARG(WORD) (ASSERT(is_header(WORD)), ((WORD) >> (OP_BITS + _TAG_PRIMARY_SIZE)))
  2427. #define RETURN_NEQ(cmp) { j=(cmp); ASSERT(j != 0); goto not_equal; }
  2428. #define ON_CMP_GOTO(cmp) if ((j=(cmp)) == 0) goto pop_next; else goto not_equal
  2429. #undef CMP_NODES
  2430. #define CMP_NODES(AN, BN) \
  2431. do { \
  2432. if((AN) != (BN)) { \
  2433. if((AN)->sysname != (BN)->sysname) \
  2434. RETURN_NEQ(erts_cmp_atoms((AN)->sysname, (BN)->sysname)); \
  2435. ASSERT((AN)->creation != (BN)->creation); \
  2436. if ((AN)->creation != 0 && (BN)->creation != 0) \
  2437. RETURN_NEQ(((AN)->creation < (BN)->creation) ? -1 : 1); \
  2438. } \
  2439. } while (0)
  2440. bodyrecur:
  2441. j = 0;
  2442. tailrecur:
  2443. if (is_same(a,b)) { /* Equal values or pointers. */
  2444. goto pop_next;
  2445. }
  2446. tailrecur_ne:
  2447. /* deal with majority (?) cases by brute-force */
  2448. if (is_atom(a)) {
  2449. if (is_atom(b)) {
  2450. ON_CMP_GOTO(erts_cmp_atoms(a, b));
  2451. }
  2452. } else if (is_both_small(a, b)) {
  2453. ON_CMP_GOTO(signed_val(a) - signed_val(b));
  2454. }
  2455. /*
  2456. * Take care of cases where the types are the same.
  2457. */
  2458. a_tag = 42; /* Suppress warning */
  2459. switch (primary_tag(a)) {
  2460. case TAG_PRIMARY_IMMED1:
  2461. switch ((a & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
  2462. case (_TAG_IMMED1_PORT >> _TAG_PRIMARY_SIZE):
  2463. if (is_internal_port(b)) {
  2464. bnode = erts_this_node;
  2465. bdata = internal_port_data(b);
  2466. } else if (is_external_port(b)) {
  2467. bnode = external_port_node(b);
  2468. bdata = external_port_data(b);
  2469. } else {
  2470. a_tag = PORT_DEF;
  2471. goto mixed_types;
  2472. }
  2473. anode = erts_this_node;
  2474. adata = internal_port_data(a);
  2475. port_common:
  2476. CMP_NODES(anode, bnode);
  2477. ON_CMP_GOTO((Sint)(adata - bdata));
  2478. case (_TAG_IMMED1_PID >> _TAG_PRIMARY_SIZE):
  2479. if (is_internal_pid(b)) {
  2480. bnode = erts_this_node;
  2481. bdata = internal_pid_data(b);
  2482. } else if (is_external_pid(b)) {
  2483. bnode = external_pid_node(b);
  2484. bdata = external_pid_data(b);
  2485. } else {
  2486. a_tag = PID_DEF;
  2487. goto mixed_types;
  2488. }
  2489. anode = erts_this_node;
  2490. adata = internal_pid_data(a);
  2491. pid_common:
  2492. if (adata != bdata) {
  2493. RETURN_NEQ(adata < bdata ? -1 : 1);
  2494. }
  2495. CMP_NODES(anode, bnode);
  2496. goto pop_next;
  2497. case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
  2498. a_tag = SMALL_DEF;
  2499. goto mixed_types;
  2500. case (_TAG_IMMED1_IMMED2 >> _TAG_PRIMARY_SIZE): {
  2501. switch ((a & _TAG_IMMED2_MASK) >> _TAG_IMMED1_SIZE) {
  2502. case (_TAG_IMMED2_ATOM >> _TAG_IMMED1_SIZE):
  2503. a_tag = ATOM_DEF;
  2504. goto mixed_types;
  2505. case (_TAG_IMMED2_NIL >> _TAG_IMMED1_SIZE):
  2506. a_tag = NIL_DEF;
  2507. goto mixed_types;
  2508. }
  2509. }
  2510. }
  2511. case TAG_PRIMARY_LIST:
  2512. if (is_not_list(b)) {
  2513. a_tag = LIST_DEF;
  2514. goto mixed_types;
  2515. }
  2516. aa = list_val(a);
  2517. bb = list_val(b);
  2518. while (1) {
  2519. Eterm atmp = CAR(aa);
  2520. Eterm btmp = CAR(bb);
  2521. if (!is_same(atmp,btmp)) {
  2522. WSTACK_PUSH2(stack,(UWord) CDR(bb),(UWord) CDR(aa));
  2523. a = atmp;
  2524. b = btmp;
  2525. goto tailrecur_ne;
  2526. }
  2527. atmp = CDR(aa);
  2528. btmp = CDR(bb);
  2529. if (is_same(atmp,btmp)) {
  2530. goto pop_next;
  2531. }
  2532. if (is_not_list(atmp) || is_not_list(btmp)) {
  2533. a = atmp;
  2534. b = btmp;
  2535. goto tailrecur_ne;
  2536. }
  2537. aa = list_val(atmp);
  2538. bb = list_val(btmp);
  2539. }
  2540. case TAG_PRIMARY_BOXED:
  2541. {
  2542. Eterm ahdr = *boxed_val(a);
  2543. switch ((ahdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
  2544. case (_TAG_HEADER_ARITYVAL >> _TAG_PRIMARY_SIZE):
  2545. if (!is_tuple(b)) {
  2546. a_tag = TUPLE_DEF;
  2547. goto mixed_types;
  2548. }
  2549. aa = tuple_val(a);
  2550. bb = tuple_val(b);
  2551. /* compare the arities */
  2552. i = arityval(ahdr); /* get the arity*/
  2553. if (i != arityval(*bb)) {
  2554. RETURN_NEQ((int)(i - arityval(*bb)));
  2555. }
  2556. if (i == 0) {
  2557. goto pop_next;
  2558. }
  2559. ++aa;
  2560. ++bb;
  2561. goto term_array;
  2562. case (_TAG_HEADER_MAP >> _TAG_PRIMARY_SIZE) :
  2563. {
  2564. struct erts_cmp_hashmap_state* sp;
  2565. if (is_flatmap_header(ahdr)) {
  2566. if (!is_flatmap(b)) {
  2567. if (is_hashmap(b)) {
  2568. aa = (Eterm *)flatmap_val(a);
  2569. i = flatmap_get_size((flatmap_t*)aa) - hashmap_size(b);
  2570. ASSERT(i != 0);
  2571. RETURN_NEQ(i);
  2572. }
  2573. a_tag = MAP_DEF;
  2574. goto mixed_types;
  2575. }
  2576. aa = (Eterm *)flatmap_val(a);
  2577. bb = (Eterm *)flatmap_val(b);
  2578. i = flatmap_get_size((flatmap_t*)aa);
  2579. if (i != flatmap_get_size((flatmap_t*)bb)) {
  2580. RETURN_NEQ((int)(i - flatmap_get_size((flatmap_t*)bb)));
  2581. }
  2582. if (i == 0) {
  2583. goto pop_next;
  2584. }
  2585. aa += 2;
  2586. bb += 2;
  2587. if (exact) {
  2588. i += 1; /* increment for tuple-keys */
  2589. goto term_array;
  2590. }
  2591. else {
  2592. /* Value array */
  2593. WSTACK_PUSH3(stack,(UWord)(bb+1),(UWord)(aa+1),TERM_ARRAY_OP_WORD(i));
  2594. /* Switch back from 'exact' key compare */
  2595. WSTACK_PUSH(stack,OP_WORD(SWITCH_EXACT_OFF_OP));
  2596. /* Now do 'exact' compare of key tuples */
  2597. a = *aa;
  2598. b = *bb;
  2599. exact = 1;
  2600. goto bodyrecur;
  2601. }
  2602. }
  2603. if (!is_hashmap(b)) {
  2604. if (is_flatmap(b)) {
  2605. bb = (Eterm *)flatmap_val(b);
  2606. i = hashmap_size(a) - flatmap_get_size((flatmap_t*)bb);
  2607. ASSERT(i != 0);
  2608. RETURN_NEQ(i);
  2609. }
  2610. a_tag = MAP_DEF;
  2611. goto mixed_types;
  2612. }
  2613. i = hashmap_size(a) - hashmap_size(b);
  2614. if (i) {
  2615. RETURN_NEQ(i);
  2616. }
  2617. if (hashmap_size(a) == 0) {
  2618. goto pop_next;
  2619. }
  2620. /* Hashmap compare strategy:
  2621. Phase 1. While keys are identical
  2622. Do synchronous stepping through leafs of both trees in hash
  2623. order. Maintain value compare result of minimal key.
  2624. Phase 2. If key diff was found in phase 1
  2625. Ignore values from now on.
  2626. Continue iterate trees by always advancing the one
  2627. lagging behind hash-wise. Identical keys are skipped.
  2628. A minimal key can only be candidate as tie-breaker if we
  2629. have passed that hash value in the other tree (which means
  2630. the key did not exist in the other tree).
  2631. */
  2632. sp = PSTACK_PUSH(hmap_stack);
  2633. hashmap_iterator_init(&stack, a, 0);
  2634. hashmap_iterator_init(&b_stack, b, 0);
  2635. sp->ap = hashmap_iterator_next(&stack);
  2636. sp->bp = hashmap_iterator_next(&b_stack);
  2637. sp->cmp_res = 0;
  2638. ASSERT(sp->ap && sp->bp);
  2639. a = CAR(sp->ap);
  2640. b = CAR(sp->bp);
  2641. sp->was_exact = exact;
  2642. exact = 1;
  2643. WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE1_ARE_KEYS_EQUAL));
  2644. sp->wstack_rollback = WSTACK_COUNT(stack);
  2645. goto bodyrecur;
  2646. }
  2647. case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
  2648. if (!is_float(b)) {
  2649. a_tag = FLOAT_DEF;
  2650. goto mixed_types;
  2651. } else {
  2652. FloatDef af;
  2653. FloatDef bf;
  2654. GET_DOUBLE(a, af);
  2655. GET_DOUBLE(b, bf);
  2656. ON_CMP_GOTO(erts_float_comp(af.fd, bf.fd));
  2657. }
  2658. case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
  2659. case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
  2660. if (!is_big(b)) {
  2661. a_tag = BIG_DEF;
  2662. goto mixed_types;
  2663. }
  2664. ON_CMP_GOTO(big_comp(a, b));
  2665. case (_TAG_HEADER_EXPORT >> _TAG_PRIMARY_SIZE):
  2666. if (!is_export(b)) {
  2667. a_tag = EXPORT_DEF;
  2668. goto mixed_types;
  2669. } else {
  2670. Export* a_exp = *((Export **) (export_val(a) + 1));
  2671. Export* b_exp = *((Export **) (export_val(b) + 1));
  2672. if ((j = erts_cmp_atoms(a_exp->info.mfa.module,
  2673. b_exp->info.mfa.module)) != 0) {
  2674. RETURN_NEQ(j);
  2675. }
  2676. if ((j = erts_cmp_atoms(a_exp->info.mfa.function,
  2677. b_exp->info.mfa.function)) != 0) {
  2678. RETURN_NEQ(j);
  2679. }
  2680. ON_CMP_GOTO((Sint) a_exp->info.mfa.arity - (Sint) b_exp->info.mfa.arity);
  2681. }
  2682. break;
  2683. case (_TAG_HEADER_FUN >> _TAG_PRIMARY_SIZE):
  2684. if (!is_fun(b)) {
  2685. a_tag = FUN_DEF;
  2686. goto mixed_types;
  2687. } else {
  2688. ErlFunThing* f1 = (ErlFunThing *) fun_val(a);
  2689. ErlFunThing* f2 = (ErlFunThing *) fun_val(b);
  2690. Sint diff;
  2691. diff = erts_cmp_atoms((f1->fe)->module, (f2->fe)->module);
  2692. if (diff != 0) {
  2693. RETURN_NEQ(diff);
  2694. }
  2695. diff = f1->fe->old_index - f2->fe->old_index;
  2696. if (diff != 0) {
  2697. RETURN_NEQ(diff);
  2698. }
  2699. diff = f1->fe->old_uniq - f2->fe->old_uniq;
  2700. if (diff != 0) {
  2701. RETURN_NEQ(diff);
  2702. }
  2703. diff = f1->num_free - f2->num_free;
  2704. if (diff != 0) {
  2705. RETURN_NEQ(diff);
  2706. }
  2707. i = f1->num_free;
  2708. if (i == 0) goto pop_next;
  2709. aa = f1->env;
  2710. bb = f2->env;
  2711. goto term_array;
  2712. }
  2713. case (_TAG_HEADER_EXTERNAL_PID >> _TAG_PRIMARY_SIZE):
  2714. if (is_internal_pid(b)) {
  2715. bnode = erts_this_node;
  2716. bdata = internal_pid_data(b);
  2717. } else if (is_external_pid(b)) {
  2718. bnode = external_pid_node(b);
  2719. bdata = external_pid_data(b);
  2720. } else {
  2721. a_tag = EXTERNAL_PID_DEF;
  2722. goto mixed_types;
  2723. }
  2724. anode = external_pid_node(a);
  2725. adata = external_pid_data(a);
  2726. goto pid_common;
  2727. case (_TAG_HEADER_EXTERNAL_PORT >> _TAG_PRIMARY_SIZE):
  2728. if (is_internal_port(b)) {
  2729. bnode = erts_this_node;
  2730. bdata = internal_port_data(b);
  2731. } else if (is_external_port(b)) {
  2732. bnode = external_port_node(b);
  2733. bdata = external_port_data(b);
  2734. } else {
  2735. a_tag = EXTERNAL_PORT_DEF;
  2736. goto mixed_types;
  2737. }
  2738. anode = external_port_node(a);
  2739. adata = external_port_data(a);
  2740. goto port_common;
  2741. case (_TAG_HEADER_REF >> _TAG_PRIMARY_SIZE):
  2742. /*
  2743. * Note! When comparing refs we need to compare ref numbers
  2744. * (32-bit words), *not* ref data words.
  2745. */
  2746. if (is_internal_ref(b)) {
  2747. bnode = erts_this_node;
  2748. blen = internal_ref_no_numbers(b);
  2749. bnum = internal_ref_numbers(b);
  2750. } else if(is_external_ref(b)) {
  2751. ExternalThing* bthing = external_thing_ptr(b);
  2752. bnode = bthing->node;
  2753. bnum = external_thing_ref_numbers(bthing);
  2754. blen = external_thing_ref_no_numbers(bthing);
  2755. } else {
  2756. a_tag = REF_DEF;
  2757. goto mixed_types;
  2758. }
  2759. anode = erts_this_node;
  2760. alen = internal_ref_no_numbers(a);
  2761. anum = internal_ref_numbers(a);
  2762. ref_common:
  2763. CMP_NODES(anode, bnode);
  2764. ASSERT(alen > 0 && blen > 0);
  2765. if (alen != blen) {
  2766. if (alen > blen) {
  2767. do {
  2768. if (anum[alen - 1] != 0)
  2769. RETURN_NEQ(1);
  2770. alen--;
  2771. } while (alen > blen);
  2772. }
  2773. else {
  2774. do {
  2775. if (bnum[blen - 1] != 0)
  2776. RETURN_NEQ(-1);
  2777. blen--;
  2778. } while (alen < blen);
  2779. }
  2780. }
  2781. ASSERT(alen == blen);
  2782. for (i = (Sint) alen - 1; i >= 0; i--)
  2783. if (anum[i] != bnum[i])
  2784. RETURN_NEQ(anum[i] < bnum[i] ? -1 : 1);
  2785. goto pop_next;
  2786. case (_TAG_HEADER_EXTERNAL_REF >> _TAG_PRIMARY_SIZE):
  2787. if (is_internal_ref(b)) {
  2788. bnode = erts_this_node;
  2789. blen = internal_ref_no_numbers(b);
  2790. bnum = internal_ref_numbers(b);
  2791. } else if (is_external_ref(b)) {
  2792. ExternalThing* bthing = external_thing_ptr(b);
  2793. bnode = bthing->node;
  2794. bnum = external_thing_ref_numbers(bthing);
  2795. blen = external_thing_ref_no_numbers(bthing);
  2796. } else {
  2797. a_tag = EXTERNAL_REF_DEF;
  2798. goto mixed_types;
  2799. }
  2800. {
  2801. ExternalThing* athing = external_thing_ptr(a);
  2802. anode = athing->node;
  2803. anum = external_thing_ref_numbers(athing);
  2804. alen = external_thing_ref_no_numbers(athing);
  2805. }
  2806. goto ref_common;
  2807. default:
  2808. /* Must be a binary */
  2809. ASSERT(is_binary(a));
  2810. if (!is_binary(b)) {
  2811. a_tag = BINARY_DEF;
  2812. goto mixed_types;
  2813. } else {
  2814. Uint a_size = binary_size(a);
  2815. Uint b_size = binary_size(b);
  2816. Uint a_bitsize;
  2817. Uint b_bitsize;
  2818. Uint a_bitoffs;
  2819. Uint b_bitoffs;
  2820. Uint min_size;
  2821. int cmp;
  2822. byte* a_ptr;
  2823. byte* b_ptr;
  2824. if (eq_only && a_size != b_size) {
  2825. RETURN_NEQ(a_size - b_size);
  2826. }
  2827. ERTS_GET_BINARY_BYTES(a, a_ptr, a_bitoffs, a_bitsize);
  2828. ERTS_GET_BINARY_BYTES(b, b_ptr, b_bitoffs, b_bitsize);
  2829. if ((a_bitsize | b_bitsize | a_bitoffs | b_bitoffs) == 0) {
  2830. min_size = (a_size < b_size) ? a_size : b_size;
  2831. if ((cmp = sys_memcmp(a_ptr, b_ptr, min_size)) != 0) {
  2832. RETURN_NEQ(cmp);
  2833. }
  2834. }
  2835. else {
  2836. a_size = (a_size << 3) + a_bitsize;
  2837. b_size = (b_size << 3) + b_bitsize;
  2838. min_size = (a_size < b_size) ? a_size : b_size;
  2839. if ((cmp = erts_cmp_bits(a_ptr,a_bitoffs,
  2840. b_ptr,b_bitoffs,min_size)) != 0) {
  2841. RETURN_NEQ(cmp);
  2842. }
  2843. }
  2844. ON_CMP_GOTO((Sint)(a_size - b_size));
  2845. }
  2846. }
  2847. }
  2848. }
  2849. /*
  2850. * Take care of the case that the tags are different.
  2851. */
  2852. mixed_types:
  2853. {
  2854. FloatDef f1, f2;
  2855. Eterm big;
  2856. Eterm aw = a;
  2857. Eterm bw = b;
  2858. #define MAX_LOSSLESS_FLOAT ((double)((1LL << 53) - 2))
  2859. #define MIN_LOSSLESS_FLOAT ((double)(((1LL << 53) - 2)*-1))
  2860. #define BIG_ARITY_FLOAT_MAX (1024 / D_EXP) /* arity of max float as a bignum */
  2861. Eterm big_buf[BIG_NEED_SIZE(BIG_ARITY_FLOAT_MAX)];
  2862. b_tag = tag_val_def(bw);
  2863. switch(_NUMBER_CODE(a_tag, b_tag)) {
  2864. case SMALL_BIG:
  2865. j = big_sign(bw) ? 1 : -1;
  2866. break;
  2867. case BIG_SMALL:
  2868. j = big_sign(aw) ? -1 : 1;
  2869. break;
  2870. case SMALL_FLOAT:
  2871. if (exact) goto exact_fall_through;
  2872. GET_DOUBLE(bw, f2);
  2873. if (f2.fd < MAX_LOSSLESS_FLOAT && f2.fd > MIN_LOSSLESS_FLOAT) {
  2874. /* Float is within the no loss limit */
  2875. f1.fd = signed_val(aw);
  2876. j = erts_float_comp(f1.fd, f2.fd);
  2877. }
  2878. #if ERTS_SIZEOF_ETERM == 8
  2879. else if (f2.fd > (double) (MAX_SMALL + 1)) {
  2880. /* Float is a positive bignum, i.e. bigger */
  2881. j = -1;
  2882. } else if (f2.fd < (double) (MIN_SMALL - 1)) {
  2883. /* Float is a negative bignum, i.e. smaller */
  2884. j = 1;
  2885. } else {
  2886. /* Float is a Sint but less precise */
  2887. j = signed_val(aw) - (Sint) f2.fd;
  2888. }
  2889. #else
  2890. else {
  2891. /* If float is positive it is bigger than small */
  2892. j = (f2.fd > 0.0) ? -1 : 1;
  2893. }
  2894. #endif /* ERTS_SIZEOF_ETERM == 8 */
  2895. break;
  2896. case FLOAT_BIG:
  2897. if (exact) goto exact_fall_through;
  2898. {
  2899. Wterm tmp = aw;
  2900. aw = bw;
  2901. bw = tmp;
  2902. }/* fall through */
  2903. case BIG_FLOAT:
  2904. if (exact) goto exact_fall_through;
  2905. GET_DOUBLE(bw, f2);
  2906. if ((f2.fd < (double) (MAX_SMALL + 1))
  2907. && (f2.fd > (double) (MIN_SMALL - 1))) {
  2908. /* Float is a Sint */
  2909. j = big_sign(aw) ? -1 : 1;
  2910. } else if (big_arity(aw) > BIG_ARITY_FLOAT_MAX
  2911. || pow(2.0,(big_arity(aw)-1)*D_EXP) > fabs(f2.fd)) {
  2912. /* If bignum size shows that it is bigger than the abs float */
  2913. j = big_sign(aw) ? -1 : 1;
  2914. } else if (big_arity(aw) < BIG_ARITY_FLOAT_MAX
  2915. && (pow(2.0,(big_arity(aw))*D_EXP)-1.0) < fabs(f2.fd)) {
  2916. /* If bignum size shows that it is smaller than the abs float */
  2917. j = f2.fd < 0 ? 1 : -1;
  2918. } else if (f2.fd < MAX_LOSSLESS_FLOAT && f2.fd > MIN_LOSSLESS_FLOAT) {
  2919. /* Float is within the no loss limit */
  2920. if (big_to_double(aw, &f1.fd) < 0) {
  2921. j = big_sign(aw) ? -1 : 1;
  2922. } else {
  2923. j = erts_float_comp(f1.fd, f2.fd);
  2924. }
  2925. } else {
  2926. big = double_to_big(f2.fd, big_buf, sizeof(big_buf)/sizeof(Eterm));
  2927. j = big_comp(aw, big);
  2928. }
  2929. if (_NUMBER_CODE(a_tag, b_tag) == FLOAT_BIG) {
  2930. j = -j;
  2931. }
  2932. break;
  2933. case FLOAT_SMALL:
  2934. if (exact) goto exact_fall_through;
  2935. GET_DOUBLE(aw, f1);
  2936. if (f1.fd < MAX_LOSSLESS_FLOAT && f1.fd > MIN_LOSSLESS_FLOAT) {
  2937. /* Float is within the no loss limit */
  2938. f2.fd = signed_val(bw);
  2939. j = erts_float_comp(f1.fd, f2.fd);
  2940. }
  2941. #if ERTS_SIZEOF_ETERM == 8
  2942. else if (f1.fd > (double) (MAX_SMALL + 1)) {
  2943. /* Float is a positive bignum, i.e. bigger */
  2944. j = 1;
  2945. } else if (f1.fd < (double) (MIN_SMALL - 1)) {
  2946. /* Float is a negative bignum, i.e. smaller */
  2947. j = -1;
  2948. } else {
  2949. /* Float is a Sint but less precise it */
  2950. j = (Sint) f1.fd - signed_val(bw);
  2951. }
  2952. #else
  2953. else {
  2954. /* If float is positive it is bigger than small */
  2955. j = (f1.fd > 0.0) ? 1 : -1;
  2956. }
  2957. #endif /* ERTS_SIZEOF_ETERM == 8 */
  2958. break;
  2959. exact_fall_through:
  2960. default:
  2961. j = b_tag - a_tag;
  2962. }
  2963. }
  2964. if (j == 0) {
  2965. goto pop_next;
  2966. } else {
  2967. goto not_equal;
  2968. }
  2969. term_array: /* arrays in 'aa' and 'bb', length in 'i' */
  2970. ASSERT(i>0);
  2971. while (--i) {
  2972. a = *aa++;
  2973. b = *bb++;
  2974. if (!is_same(a, b)) {
  2975. if (is_atom(a) && is_atom(b)) {
  2976. if ((j = erts_cmp_atoms(a, b)) != 0) {
  2977. goto not_equal;
  2978. }
  2979. } else if (is_both_small(a, b)) {
  2980. if ((j = signed_val(a)-signed_val(b)) != 0) {
  2981. goto not_equal;
  2982. }
  2983. } else {
  2984. WSTACK_PUSH3(stack, (UWord)bb, (UWord)aa, TERM_ARRAY_OP_WORD(i));
  2985. goto tailrecur_ne;
  2986. }
  2987. }
  2988. }
  2989. a = *aa;
  2990. b = *bb;
  2991. goto tailrecur;
  2992. pop_next:
  2993. if (!WSTACK_ISEMPTY(stack)) {
  2994. UWord something = WSTACK_POP(stack);
  2995. struct erts_cmp_hashmap_state* sp;
  2996. if (primary_tag((Eterm) something) == TAG_PRIMARY_HEADER) { /* an operation */
  2997. switch (GET_OP(something)) {
  2998. case TERM_ARRAY_OP:
  2999. i = GET_OP_ARG(something);
  3000. aa = (Eterm*)WSTACK_POP(stack);
  3001. bb = (Eterm*) WSTACK_POP(stack);
  3002. goto term_array;
  3003. case SWITCH_EXACT_OFF_OP:
  3004. /* Done with exact compare of map keys, switch back */
  3005. ASSERT(exact);
  3006. exact = 0;
  3007. goto pop_next;
  3008. case HASHMAP_PHASE1_ARE_KEYS_EQUAL: {
  3009. sp = PSTACK_TOP(hmap_stack);
  3010. if (j) {
  3011. /* Key diff found, enter phase 2 */
  3012. if (hashmap_key_hash_cmp(sp->ap, sp->bp) < 0) {
  3013. sp->min_key = CAR(sp->ap);
  3014. sp->cmp_res = -1;
  3015. sp->ap = hashmap_iterator_next(&stack);
  3016. }
  3017. else {
  3018. sp->min_key = CAR(sp->bp);
  3019. sp->cmp_res = 1;
  3020. sp->bp = hashmap_iterator_next(&b_stack);
  3021. }
  3022. exact = 1; /* only exact key compares in phase 2 */
  3023. goto case_HASHMAP_PHASE2_LOOP;
  3024. }
  3025. /* No key diff found so far, compare values if min key */
  3026. if (sp->cmp_res) {
  3027. a = CAR(sp->ap);
  3028. b = sp->min_key;
  3029. exact = 1;
  3030. WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE1_IS_MIN_KEY));
  3031. sp->wstack_rollback = WSTACK_COUNT(stack);
  3032. goto bodyrecur;
  3033. }
  3034. /* no min key-value found yet */
  3035. a = CDR(sp->ap);
  3036. b = CDR(sp->bp);
  3037. exact = sp->was_exact;
  3038. WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE1_CMP_VALUES));
  3039. sp->wstack_rollback = WSTACK_COUNT(stack);
  3040. goto bodyrecur;
  3041. }
  3042. case HASHMAP_PHASE1_IS_MIN_KEY:
  3043. sp = PSTACK_TOP(hmap_stack);
  3044. if (j < 0) {
  3045. a = CDR(sp->ap);
  3046. b = CDR(sp->bp);
  3047. exact = sp->was_exact;
  3048. WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE1_CMP_VALUES));
  3049. sp->wstack_rollback = WSTACK_COUNT(stack);
  3050. goto bodyrecur;
  3051. }
  3052. goto case_HASHMAP_PHASE1_LOOP;
  3053. case HASHMAP_PHASE1_CMP_VALUES:
  3054. sp = PSTACK_TOP(hmap_stack);
  3055. if (j) {
  3056. sp->cmp_res = j;
  3057. sp->min_key = CAR(sp->ap);
  3058. }
  3059. case_HASHMAP_PHASE1_LOOP:
  3060. sp->ap = hashmap_iterator_next(&stack);
  3061. sp->bp = hashmap_iterator_next(&b_stack);
  3062. if (!sp->ap) {
  3063. /* end of maps with identical keys */
  3064. ASSERT(!sp->bp);
  3065. j = sp->cmp_res;
  3066. exact = sp->was_exact;
  3067. (void) PSTACK_POP(hmap_stack);
  3068. ON_CMP_GOTO(j);
  3069. }
  3070. a = CAR(sp->ap);
  3071. b = CAR(sp->bp);
  3072. exact = 1;
  3073. WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE1_ARE_KEYS_EQUAL));
  3074. sp->wstack_rollback = WSTACK_COUNT(stack);
  3075. goto bodyrecur;
  3076. case_HASHMAP_PHASE2_LOOP:
  3077. if (sp->ap && sp->bp) {
  3078. a = CAR(sp->ap);
  3079. b = CAR(sp->bp);
  3080. ASSERT(exact);
  3081. WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE2_ARE_KEYS_EQUAL));
  3082. sp->wstack_rollback = WSTACK_COUNT(stack);
  3083. goto bodyrecur;
  3084. }
  3085. goto case_HASHMAP_PHASE2_NEXT_STEP;
  3086. case HASHMAP_PHASE2_ARE_KEYS_EQUAL:
  3087. sp = PSTACK_TOP(hmap_stack);
  3088. if (j == 0) {
  3089. /* keys are equal, skip them */
  3090. sp->ap = hashmap_iterator_next(&stack);
  3091. sp->bp = hashmap_iterator_next(&b_stack);
  3092. goto case_HASHMAP_PHASE2_LOOP;
  3093. }
  3094. /* fall through */
  3095. case_HASHMAP_PHASE2_NEXT_STEP:
  3096. if (sp->ap || sp->bp) {
  3097. if (hashmap_key_hash_cmp(sp->ap, sp->bp) < 0) {
  3098. ASSERT(sp->ap);
  3099. a = CAR(sp->ap);
  3100. b = sp->min_key;
  3101. ASSERT(exact);
  3102. WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE2_IS_MIN_KEY_A));
  3103. }
  3104. else { /* hash_cmp > 0 */
  3105. ASSERT(sp->bp);
  3106. a = CAR(sp->bp);
  3107. b = sp->min_key;
  3108. ASSERT(exact);
  3109. WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE2_IS_MIN_KEY_B));
  3110. }
  3111. sp->wstack_rollback = WSTACK_COUNT(stack);
  3112. goto bodyrecur;
  3113. }
  3114. /* End of both maps */
  3115. j = sp->cmp_res;
  3116. exact = sp->was_exact;
  3117. (void) PSTACK_POP(hmap_stack);
  3118. ON_CMP_GOTO(j);
  3119. case HASHMAP_PHASE2_IS_MIN_KEY_A:
  3120. sp = PSTACK_TOP(hmap_stack);
  3121. if (j < 0) {
  3122. sp->min_key = CAR(sp->ap);
  3123. sp->cmp_res = -1;
  3124. }
  3125. sp->ap = hashmap_iterator_next(&stack);
  3126. goto case_HASHMAP_PHASE2_LOOP;
  3127. case HASHMAP_PHASE2_IS_MIN_KEY_B:
  3128. sp = PSTACK_TOP(hmap_stack);
  3129. if (j < 0) {
  3130. sp->min_key = CAR(sp->bp);
  3131. sp->cmp_res = 1;
  3132. }
  3133. sp->bp = hashmap_iterator_next(&b_stack);
  3134. goto case_HASHMAP_PHASE2_LOOP;
  3135. default:
  3136. ASSERT(!"Invalid cmp op");
  3137. } /* switch */
  3138. }
  3139. a = (Eterm) something;
  3140. b = (Eterm) WSTACK_POP(stack);
  3141. goto tailrecur;
  3142. }
  3143. ASSERT(PSTACK_IS_EMPTY(hmap_stack));
  3144. PSTACK_DESTROY(hmap_stack);
  3145. WSTACK_DESTROY(stack);
  3146. WSTACK_DESTROY(b_stack);
  3147. return 0;
  3148. not_equal:
  3149. if (!PSTACK_IS_EMPTY(hmap_stack) && !eq_only) {
  3150. WSTACK_ROLLBACK(stack, PSTACK_TOP(hmap_stack)->wstack_rollback);
  3151. goto pop_next;
  3152. }
  3153. PSTACK_DESTROY(hmap_stack);
  3154. WSTACK_DESTROY(stack);
  3155. WSTACK_DESTROY(b_stack);
  3156. return j;
  3157. #undef CMP_NODES
  3158. }
  3159. Eterm
  3160. store_external_or_ref_(Uint **hpp, ErlOffHeap* oh, Eterm ns)
  3161. {
  3162. struct erl_off_heap_header *ohhp;
  3163. Uint i;
  3164. Uint size;
  3165. Eterm *from_hp;
  3166. Eterm *to_hp = *hpp;
  3167. ASSERT(is_external(ns) || is_internal_ref(ns));
  3168. from_hp = boxed_val(ns);
  3169. size = thing_arityval(*from_hp) + 1;
  3170. *hpp += size;
  3171. for(i = 0; i < size; i++)
  3172. to_hp[i] = from_hp[i];
  3173. if (is_external_header(*from_hp)) {
  3174. ExternalThing *etp = (ExternalThing *) from_hp;
  3175. ASSERT(is_external(ns));
  3176. erts_ref_node_entry(etp->node, 2, make_boxed(to_hp));
  3177. }
  3178. else if (is_ordinary_ref_thing(from_hp))
  3179. return make_internal_ref(to_hp);
  3180. else {
  3181. ErtsMRefThing *mreft = (ErtsMRefThing *) from_hp;
  3182. ErtsMagicBinary *mb = mreft->mb;
  3183. ASSERT(is_magic_ref_thing(from_hp));
  3184. erts_refc_inc(&mb->intern.refc, 2);
  3185. OH_OVERHEAD(oh, mb->orig_size / sizeof(Eterm));
  3186. }
  3187. ohhp = (struct erl_off_heap_header*) to_hp;
  3188. ohhp->next = oh->first;
  3189. oh->first = ohhp;
  3190. return make_boxed(to_hp);
  3191. }
  3192. Eterm
  3193. store_external_or_ref_in_proc_(Process *proc, Eterm ns)
  3194. {
  3195. Uint sz;
  3196. Uint *hp;
  3197. ASSERT(is_external(ns) || is_internal_ref(ns));
  3198. sz = NC_HEAP_SIZE(ns);
  3199. ASSERT(sz > 0);
  3200. hp = HAlloc(proc, sz);
  3201. return store_external_or_ref_(&hp, &MSO(proc), ns);
  3202. }
  3203. void bin_write(fmtfn_t to, void *to_arg, byte* buf, size_t sz)
  3204. {
  3205. size_t i;
  3206. for (i=0;i<sz;i++) {
  3207. if (IS_DIGIT(buf[i]))
  3208. erts_print(to, to_arg, "%d,", buf[i]);
  3209. else if (IS_PRINT(buf[i])) {
  3210. erts_print(to, to_arg, "%c,", buf[i]);
  3211. }
  3212. else
  3213. erts_print(to, to_arg, "%d,", buf[i]);
  3214. }
  3215. erts_putc(to, to_arg, '\n');
  3216. }
  3217. /* Fill buf with the contents of bytelist list
  3218. * return number of chars in list
  3219. * or -1 for type error
  3220. * or -2 for not enough buffer space (buffer contains truncated result)
  3221. */
  3222. Sint
  3223. intlist_to_buf(Eterm list, char *buf, Sint len)
  3224. {
  3225. Eterm* listptr;
  3226. Sint sz = 0;
  3227. if (is_nil(list))
  3228. return 0;
  3229. if (is_not_list(list))
  3230. return -1;
  3231. listptr = list_val(list);
  3232. while (sz < len) {
  3233. if (!is_byte(*listptr))
  3234. return -1;
  3235. buf[sz++] = unsigned_val(*listptr);
  3236. if (is_nil(*(listptr + 1)))
  3237. return(sz);
  3238. if (is_not_list(*(listptr + 1)))
  3239. return -1;
  3240. listptr = list_val(*(listptr + 1));
  3241. }
  3242. return -2; /* not enough space */
  3243. }
  3244. /** @brief Fill buf with the UTF8 contents of the unicode list
  3245. * @param len Max number of characters to write.
  3246. * @param written NULL or bytes written.
  3247. * @return 0 ok,
  3248. * -1 type error,
  3249. * -2 list too long, only \c len characters written
  3250. */
  3251. int
  3252. erts_unicode_list_to_buf(Eterm list, byte *buf, Sint len, Sint* written)
  3253. {
  3254. Eterm* listptr;
  3255. Sint sz = 0;
  3256. Sint val;
  3257. int res;
  3258. while (1) {
  3259. if (is_nil(list)) {
  3260. res = 0;
  3261. break;
  3262. }
  3263. if (is_not_list(list)) {
  3264. res = -1;
  3265. break;
  3266. }
  3267. listptr = list_val(list);
  3268. if (len-- <= 0) {
  3269. res = -2;
  3270. break;
  3271. }
  3272. if (is_not_small(CAR(listptr))) {
  3273. res = -1;
  3274. break;
  3275. }
  3276. val = signed_val(CAR(listptr));
  3277. if (0 <= val && val < 0x80) {
  3278. buf[sz] = val;
  3279. sz++;
  3280. } else if (val < 0x800) {
  3281. buf[sz+0] = 0xC0 | (val >> 6);
  3282. buf[sz+1] = 0x80 | (val & 0x3F);
  3283. sz += 2;
  3284. } else if (val < 0x10000UL) {
  3285. if (0xD800 <= val && val <= 0xDFFF) {
  3286. res = -1;
  3287. break;
  3288. }
  3289. buf[sz+0] = 0xE0 | (val >> 12);
  3290. buf[sz+1] = 0x80 | ((val >> 6) & 0x3F);
  3291. buf[sz+2] = 0x80 | (val & 0x3F);
  3292. sz += 3;
  3293. } else if (val < 0x110000) {
  3294. buf[sz+0] = 0xF0 | (val >> 18);
  3295. buf[sz+1] = 0x80 | ((val >> 12) & 0x3F);
  3296. buf[sz+2] = 0x80 | ((val >> 6) & 0x3F);
  3297. buf[sz+3] = 0x80 | (val & 0x3F);
  3298. sz += 4;
  3299. } else {
  3300. res = -1;
  3301. break;
  3302. }
  3303. list = CDR(listptr);
  3304. }
  3305. if (written)
  3306. *written = sz;
  3307. return res;
  3308. }
  3309. Sint
  3310. erts_unicode_list_to_buf_len(Eterm list)
  3311. {
  3312. Eterm* listptr;
  3313. Sint sz = 0;
  3314. if (is_nil(list)) {
  3315. return 0;
  3316. }
  3317. if (is_not_list(list)) {
  3318. return -1;
  3319. }
  3320. listptr = list_val(list);
  3321. while (1) {
  3322. Sint val;
  3323. if (is_not_small(CAR(listptr))) {
  3324. return -1;
  3325. }
  3326. val = signed_val(CAR(listptr));
  3327. if (0 <= val && val < 0x80) {
  3328. sz++;
  3329. } else if (val < 0x800) {
  3330. sz += 2;
  3331. } else if (val < 0x10000UL) {
  3332. if (0xD800 <= val && val <= 0xDFFF) {
  3333. return -1;
  3334. }
  3335. sz += 3;
  3336. } else if (val < 0x110000) {
  3337. sz += 4;
  3338. } else {
  3339. return -1;
  3340. }
  3341. list = CDR(listptr);
  3342. if (is_nil(list)) {
  3343. return sz;
  3344. }
  3345. if (is_not_list(list)) {
  3346. return -1;
  3347. }
  3348. listptr = list_val(list);
  3349. }
  3350. }
  3351. /* Prints an integer in the given base, returning the number of digits printed.
  3352. *
  3353. * (*buf) is a pointer to the buffer, and is set to the start of the string
  3354. * when returning. */
  3355. int Sint_to_buf(Sint n, int base, char **buf, size_t buf_size)
  3356. {
  3357. char *p = &(*buf)[buf_size - 1];
  3358. int sign = 0, size = 0;
  3359. ASSERT(base >= 2 && base <= 36);
  3360. if (n == 0) {
  3361. *p-- = '0';
  3362. size++;
  3363. } else if (n < 0) {
  3364. sign = 1;
  3365. n = -n;
  3366. }
  3367. while (n != 0) {
  3368. int digit = n % base;
  3369. if (digit < 10) {
  3370. *p-- = '0' + digit;
  3371. } else {
  3372. *p-- = 'A' + (digit - 10);
  3373. }
  3374. size++;
  3375. n /= base;
  3376. }
  3377. if (sign) {
  3378. *p-- = '-';
  3379. size++;
  3380. }
  3381. *buf = p + 1;
  3382. return size;
  3383. }
  3384. /* Build a list of integers in some safe memory area
  3385. ** Memory must be pre allocated prio call 2*len in size
  3386. ** hp is a pointer to the "heap" pointer on return
  3387. ** this pointer is updated to point after the list
  3388. */
  3389. Eterm
  3390. buf_to_intlist(Eterm** hpp, const char *buf, size_t len, Eterm tail)
  3391. {
  3392. Eterm* hp = *hpp;
  3393. size_t i = len;
  3394. while(i != 0) {
  3395. --i;
  3396. tail = CONS(hp, make_small((Uint)(byte)buf[i]), tail);
  3397. hp += 2;
  3398. }
  3399. *hpp = hp;
  3400. return tail;
  3401. }
  3402. /*
  3403. ** Write io list in to a buffer.
  3404. **
  3405. ** An iolist is defined as:
  3406. **
  3407. ** iohead ::= Binary
  3408. ** | Byte (i.e integer in range [0..255]
  3409. ** | iolist
  3410. ** ;
  3411. **
  3412. ** iotail ::= []
  3413. ** | Binary (added by tony)
  3414. ** | iolist
  3415. ** ;
  3416. **
  3417. ** iolist ::= []
  3418. ** | Binary
  3419. ** | [ iohead | iotail]
  3420. ** ;
  3421. **
  3422. ** Return remaining bytes in buffer on success
  3423. ** ERTS_IOLIST_TO_BUF_OVERFLOW on overflow
  3424. ** ERTS_IOLIST_TO_BUF_TYPE_ERROR on type error (including that result would not be a whole number of bytes)
  3425. **
  3426. ** Note!
  3427. ** Do not detect indata errors in this fiunction that are not detected by erts_iolist_size!
  3428. **
  3429. ** A caller should be able to rely on a successful return from erts_iolist_to_buf
  3430. ** if erts_iolist_size is previously successfully called and erts_iolist_to_buf
  3431. ** is called with a buffer at least as large as the value given by erts_iolist_size.
  3432. **
  3433. */
  3434. typedef enum {
  3435. ERTS_IL2B_BCOPY_OK,
  3436. ERTS_IL2B_BCOPY_YIELD,
  3437. ERTS_IL2B_BCOPY_OVERFLOW,
  3438. ERTS_IL2B_BCOPY_TYPE_ERROR
  3439. } ErtsIL2BBCopyRes;
  3440. static ErtsIL2BBCopyRes
  3441. iolist_to_buf_bcopy(ErtsIOList2BufState *state, Eterm obj, int *yield_countp);
  3442. static ERTS_INLINE ErlDrvSizeT
  3443. iolist_to_buf(const int yield_support,
  3444. ErtsIOList2BufState *state,
  3445. Eterm obj,
  3446. char* buf,
  3447. ErlDrvSizeT alloced_len)
  3448. {
  3449. #undef IOLIST_TO_BUF_BCOPY
  3450. #define IOLIST_TO_BUF_BCOPY(CONSP) \
  3451. do { \
  3452. size_t size = binary_size(obj); \
  3453. if (size > 0) { \
  3454. Uint bitsize; \
  3455. byte* bptr; \
  3456. Uint bitoffs; \
  3457. Uint num_bits; \
  3458. if (yield_support) { \
  3459. size_t max_size = ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT; \
  3460. if (yield_count > 0) \
  3461. max_size *= yield_count+1; \
  3462. if (size > max_size) { \
  3463. state->objp = CONSP; \
  3464. goto L_bcopy_yield; \
  3465. } \
  3466. if (size >= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT) { \
  3467. int cost = (int) size; \
  3468. cost /= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT; \
  3469. yield_count -= cost; \
  3470. } \
  3471. } \
  3472. if (len < size) \
  3473. goto L_overflow; \
  3474. ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize); \
  3475. if (bitsize != 0) \
  3476. goto L_type_error; \
  3477. num_bits = 8*size; \
  3478. copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits); \
  3479. buf += size; \
  3480. len -= size; \
  3481. } \
  3482. } while (0)
  3483. ErlDrvSizeT res, len;
  3484. Eterm* objp = NULL;
  3485. int init_yield_count;
  3486. int yield_count;
  3487. DECLARE_ESTACK(s);
  3488. len = (ErlDrvSizeT) alloced_len;
  3489. if (!yield_support) {
  3490. yield_count = init_yield_count = 0; /* Shut up faulty warning... >:-( */
  3491. goto L_again;
  3492. }
  3493. else {
  3494. if (state->iolist.reds_left <= 0)
  3495. return ERTS_IOLIST_TO_BUF_YIELD;
  3496. ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
  3497. init_yield_count = (ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED
  3498. * state->iolist.reds_left);
  3499. yield_count = init_yield_count;
  3500. if (!state->iolist.estack.start)
  3501. goto L_again;
  3502. else {
  3503. int chk_stack;
  3504. /* Restart; restore state... */
  3505. ESTACK_RESTORE(s, &state->iolist.estack);
  3506. if (!state->bcopy.bptr)
  3507. chk_stack = 0;
  3508. else {
  3509. chk_stack = 1;
  3510. switch (iolist_to_buf_bcopy(state, THE_NON_VALUE, &yield_count)) {
  3511. case ERTS_IL2B_BCOPY_OK:
  3512. break;
  3513. case ERTS_IL2B_BCOPY_YIELD:
  3514. BUMP_ALL_REDS(state->iolist.c_p);
  3515. state->iolist.reds_left = 0;
  3516. ESTACK_SAVE(s, &state->iolist.estack);
  3517. return ERTS_IOLIST_TO_BUF_YIELD;
  3518. case ERTS_IL2B_BCOPY_OVERFLOW:
  3519. goto L_overflow;
  3520. case ERTS_IL2B_BCOPY_TYPE_ERROR:
  3521. goto L_type_error;
  3522. }
  3523. }
  3524. obj = state->iolist.obj;
  3525. buf = state->buf;
  3526. len = state->len;
  3527. objp = state->objp;
  3528. state->objp = NULL;
  3529. if (objp)
  3530. goto L_tail;
  3531. if (!chk_stack)
  3532. goto L_again;
  3533. /* check stack */
  3534. }
  3535. }
  3536. while (!ESTACK_ISEMPTY(s)) {
  3537. obj = ESTACK_POP(s);
  3538. L_again:
  3539. if (is_list(obj)) {
  3540. while (1) { /* Tail loop */
  3541. while (1) { /* Head loop */
  3542. if (yield_support && --yield_count <= 0)
  3543. goto L_yield;
  3544. objp = list_val(obj);
  3545. obj = CAR(objp);
  3546. if (is_byte(obj)) {
  3547. if (len == 0) {
  3548. goto L_overflow;
  3549. }
  3550. *buf++ = unsigned_val(obj);
  3551. len--;
  3552. } else if (is_binary(obj)) {
  3553. IOLIST_TO_BUF_BCOPY(objp);
  3554. } else if (is_list(obj)) {
  3555. ESTACK_PUSH(s, CDR(objp));
  3556. continue; /* Head loop */
  3557. } else if (is_not_nil(obj)) {
  3558. goto L_type_error;
  3559. }
  3560. break;
  3561. }
  3562. L_tail:
  3563. obj = CDR(objp);
  3564. if (is_list(obj)) {
  3565. continue; /* Tail loop */
  3566. } else if (is_binary(obj)) {
  3567. IOLIST_TO_BUF_BCOPY(NULL);
  3568. } else if (is_not_nil(obj)) {
  3569. goto L_type_error;
  3570. }
  3571. break;
  3572. }
  3573. } else if (is_binary(obj)) {
  3574. IOLIST_TO_BUF_BCOPY(NULL);
  3575. } else if (is_not_nil(obj)) {
  3576. goto L_type_error;
  3577. } else if (yield_support && --yield_count <= 0)
  3578. goto L_yield;
  3579. }
  3580. res = len;
  3581. L_return:
  3582. DESTROY_ESTACK(s);
  3583. if (yield_support) {
  3584. int reds;
  3585. CLEAR_SAVED_ESTACK(&state->iolist.estack);
  3586. reds = ((init_yield_count - yield_count - 1)
  3587. / ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED) + 1;
  3588. BUMP_REDS(state->iolist.c_p, reds);
  3589. state->iolist.reds_left -= reds;
  3590. if (state->iolist.reds_left < 0)
  3591. state->iolist.reds_left = 0;
  3592. }
  3593. return res;
  3594. L_type_error:
  3595. res = ERTS_IOLIST_TO_BUF_TYPE_ERROR;
  3596. goto L_return;
  3597. L_overflow:
  3598. res = ERTS_IOLIST_TO_BUF_OVERFLOW;
  3599. goto L_return;
  3600. L_bcopy_yield:
  3601. state->buf = buf;
  3602. state->len = len;
  3603. switch (iolist_to_buf_bcopy(state, obj, &yield_count)) {
  3604. case ERTS_IL2B_BCOPY_OK:
  3605. ERTS_INTERNAL_ERROR("Missing yield");
  3606. case ERTS_IL2B_BCOPY_YIELD:
  3607. BUMP_ALL_REDS(state->iolist.c_p);
  3608. state->iolist.reds_left = 0;
  3609. ESTACK_SAVE(s, &state->iolist.estack);
  3610. return ERTS_IOLIST_TO_BUF_YIELD;
  3611. case ERTS_IL2B_BCOPY_OVERFLOW:
  3612. goto L_overflow;
  3613. case ERTS_IL2B_BCOPY_TYPE_ERROR:
  3614. goto L_type_error;
  3615. }
  3616. L_yield:
  3617. BUMP_ALL_REDS(state->iolist.c_p);
  3618. state->iolist.reds_left = 0;
  3619. state->iolist.obj = obj;
  3620. state->buf = buf;
  3621. state->len = len;
  3622. ESTACK_SAVE(s, &state->iolist.estack);
  3623. return ERTS_IOLIST_TO_BUF_YIELD;
  3624. #undef IOLIST_TO_BUF_BCOPY
  3625. }
  3626. static ErtsIL2BBCopyRes
  3627. iolist_to_buf_bcopy(ErtsIOList2BufState *state, Eterm obj, int *yield_countp)
  3628. {
  3629. ErtsIL2BBCopyRes res;
  3630. char *buf = state->buf;
  3631. ErlDrvSizeT len = state->len;
  3632. byte* bptr;
  3633. size_t size;
  3634. size_t max_size;
  3635. Uint bitoffs;
  3636. Uint num_bits;
  3637. int yield_count = *yield_countp;
  3638. if (state->bcopy.bptr) {
  3639. bptr = state->bcopy.bptr;
  3640. size = state->bcopy.size;
  3641. bitoffs = state->bcopy.bitoffs;
  3642. state->bcopy.bptr = NULL;
  3643. }
  3644. else {
  3645. Uint bitsize;
  3646. ASSERT(is_binary(obj));
  3647. size = binary_size(obj);
  3648. if (size <= 0)
  3649. return ERTS_IL2B_BCOPY_OK;
  3650. if (len < size)
  3651. return ERTS_IL2B_BCOPY_OVERFLOW;
  3652. ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
  3653. if (bitsize != 0)
  3654. return ERTS_IL2B_BCOPY_TYPE_ERROR;
  3655. }
  3656. ASSERT(size > 0);
  3657. max_size = (size_t) ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT;
  3658. if (yield_count > 0)
  3659. max_size *= (size_t) (yield_count+1);
  3660. if (size <= max_size) {
  3661. if (size >= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT) {
  3662. int cost = (int) size;
  3663. cost /= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT;
  3664. yield_count -= cost;
  3665. }
  3666. res = ERTS_IL2B_BCOPY_OK;
  3667. }
  3668. else {
  3669. ASSERT(0 < max_size && max_size < size);
  3670. yield_count = 0;
  3671. state->bcopy.bptr = bptr + max_size;
  3672. state->bcopy.bitoffs = bitoffs;
  3673. state->bcopy.size = size - max_size;
  3674. size = max_size;
  3675. res = ERTS_IL2B_BCOPY_YIELD;
  3676. }
  3677. num_bits = 8*size;
  3678. copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits);
  3679. state->buf += size;
  3680. state->len -= size;
  3681. *yield_countp = yield_count;
  3682. return res;
  3683. }
  3684. ErlDrvSizeT erts_iolist_to_buf_yielding(ErtsIOList2BufState *state)
  3685. {
  3686. return iolist_to_buf(1, state, state->iolist.obj, state->buf, state->len);
  3687. }
  3688. ErlDrvSizeT erts_iolist_to_buf(Eterm obj, char* buf, ErlDrvSizeT alloced_len)
  3689. {
  3690. return iolist_to_buf(0, NULL, obj, buf, alloced_len);
  3691. }
  3692. /*
  3693. * Return 0 if successful, and non-zero if unsuccessful.
  3694. *
  3695. * It is vital that if erts_iolist_to_buf would return an error for
  3696. * any type of term data, this function should do so as well.
  3697. * Any input term error detected in erts_iolist_to_buf should also
  3698. * be detected in this function!
  3699. */
  3700. static ERTS_INLINE int
  3701. iolist_size(const int yield_support, ErtsIOListState *state, Eterm obj, ErlDrvSizeT* sizep)
  3702. {
  3703. int res, init_yield_count, yield_count;
  3704. Eterm* objp;
  3705. Uint size = (Uint) *sizep;
  3706. DECLARE_ESTACK(s);
  3707. if (!yield_support)
  3708. yield_count = init_yield_count = 0; /* Shut up faulty warning... >:-( */
  3709. else {
  3710. if (state->reds_left <= 0)
  3711. return ERTS_IOLIST_YIELD;
  3712. ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
  3713. init_yield_count = ERTS_IOLIST_SIZE_YIELDS_COUNT_PER_RED;
  3714. init_yield_count *= state->reds_left;
  3715. yield_count = init_yield_count;
  3716. if (state->estack.start) {
  3717. /* Restart; restore state... */
  3718. ESTACK_RESTORE(s, &state->estack);
  3719. size = (Uint) state->size;
  3720. obj = state->obj;
  3721. }
  3722. }
  3723. goto L_again;
  3724. #define SAFE_ADD(Var, Val) \
  3725. do { \
  3726. Uint valvar = (Val); \
  3727. Var += valvar; \
  3728. if (Var < valvar) { \
  3729. goto L_overflow_error; \
  3730. } \
  3731. } while (0)
  3732. while (!ESTACK_ISEMPTY(s)) {
  3733. obj = ESTACK_POP(s);
  3734. L_again:
  3735. if (is_list(obj)) {
  3736. while (1) { /* Tail loop */
  3737. while (1) { /* Head loop */
  3738. if (yield_support && --yield_count <= 0)
  3739. goto L_yield;
  3740. objp = list_val(obj);
  3741. /* Head */
  3742. obj = CAR(objp);
  3743. if (is_byte(obj)) {
  3744. size++;
  3745. if (size == 0) {
  3746. goto L_overflow_error;
  3747. }
  3748. } else if (is_binary(obj) && binary_bitsize(obj) == 0) {
  3749. SAFE_ADD(size, binary_size(obj));
  3750. } else if (is_list(obj)) {
  3751. ESTACK_PUSH(s, CDR(objp));
  3752. continue; /* Head loop */
  3753. } else if (is_not_nil(obj)) {
  3754. goto L_type_error;
  3755. }
  3756. break;
  3757. }
  3758. /* Tail */
  3759. obj = CDR(objp);
  3760. if (is_list(obj))
  3761. continue; /* Tail loop */
  3762. else if (is_binary(obj) && binary_bitsize(obj) == 0) {
  3763. SAFE_ADD(size, binary_size(obj));
  3764. } else if (is_not_nil(obj)) {
  3765. goto L_type_error;
  3766. }
  3767. break;
  3768. }
  3769. } else {
  3770. if (yield_support && --yield_count <= 0)
  3771. goto L_yield;
  3772. if (is_binary(obj) && binary_bitsize(obj) == 0) { /* Tail was binary */
  3773. SAFE_ADD(size, binary_size(obj));
  3774. } else if (is_not_nil(obj)) {
  3775. goto L_type_error;
  3776. }
  3777. }
  3778. }
  3779. #undef SAFE_ADD
  3780. *sizep = (ErlDrvSizeT) size;
  3781. res = ERTS_IOLIST_OK;
  3782. L_return:
  3783. DESTROY_ESTACK(s);
  3784. if (yield_support) {
  3785. int yc, reds;
  3786. CLEAR_SAVED_ESTACK(&state->estack);
  3787. yc = init_yield_count - yield_count;
  3788. reds = ((yc - 1) / ERTS_IOLIST_SIZE_YIELDS_COUNT_PER_RED) + 1;
  3789. BUMP_REDS(state->c_p, reds);
  3790. state->reds_left -= reds;
  3791. state->size = (ErlDrvSizeT) size;
  3792. state->have_size = 1;
  3793. }
  3794. return res;
  3795. L_overflow_error:
  3796. res = ERTS_IOLIST_OVERFLOW;
  3797. size = 0;
  3798. goto L_return;
  3799. L_type_error:
  3800. res = ERTS_IOLIST_TYPE;
  3801. size = 0;
  3802. goto L_return;
  3803. L_yield:
  3804. BUMP_ALL_REDS(state->c_p);
  3805. state->reds_left = 0;
  3806. state->size = size;
  3807. state->obj = obj;
  3808. ESTACK_SAVE(s, &state->estack);
  3809. return ERTS_IOLIST_YIELD;
  3810. }
  3811. int erts_iolist_size_yielding(ErtsIOListState *state)
  3812. {
  3813. ErlDrvSizeT size = state->size;
  3814. return iolist_size(1, state, state->obj, &size);
  3815. }
  3816. int erts_iolist_size(Eterm obj, ErlDrvSizeT* sizep)
  3817. {
  3818. *sizep = 0;
  3819. return iolist_size(0, NULL, obj, sizep);
  3820. }
  3821. /* return 0 if item is not a non-empty flat list of bytes
  3822. otherwise return the nonzero length of the list */
  3823. Sint
  3824. is_string(Eterm list)
  3825. {
  3826. Sint len = 0;
  3827. while(is_list(list)) {
  3828. Eterm* consp = list_val(list);
  3829. Eterm hd = CAR(consp);
  3830. if (!is_byte(hd))
  3831. return 0;
  3832. len++;
  3833. list = CDR(consp);
  3834. }
  3835. if (is_nil(list))
  3836. return len;
  3837. return 0;
  3838. }
  3839. static int trim_threshold;
  3840. static int top_pad;
  3841. static int mmap_threshold;
  3842. static int mmap_max;
  3843. Uint tot_bin_allocated;
  3844. void erts_init_utils(void)
  3845. {
  3846. }
  3847. void erts_init_utils_mem(void)
  3848. {
  3849. trim_threshold = -1;
  3850. top_pad = -1;
  3851. mmap_threshold = -1;
  3852. mmap_max = -1;
  3853. }
  3854. int
  3855. sys_alloc_opt(int opt, int value)
  3856. {
  3857. #if HAVE_MALLOPT
  3858. int m_opt;
  3859. int *curr_val;
  3860. switch(opt) {
  3861. case SYS_ALLOC_OPT_TRIM_THRESHOLD:
  3862. #ifdef M_TRIM_THRESHOLD
  3863. m_opt = M_TRIM_THRESHOLD;
  3864. curr_val = &trim_threshold;
  3865. break;
  3866. #else
  3867. return 0;
  3868. #endif
  3869. case SYS_ALLOC_OPT_TOP_PAD:
  3870. #ifdef M_TOP_PAD
  3871. m_opt = M_TOP_PAD;
  3872. curr_val = &top_pad;
  3873. break;
  3874. #else
  3875. return 0;
  3876. #endif
  3877. case SYS_ALLOC_OPT_MMAP_THRESHOLD:
  3878. #ifdef M_MMAP_THRESHOLD
  3879. m_opt = M_MMAP_THRESHOLD;
  3880. curr_val = &mmap_threshold;
  3881. break;
  3882. #else
  3883. return 0;
  3884. #endif
  3885. case SYS_ALLOC_OPT_MMAP_MAX:
  3886. #ifdef M_MMAP_MAX
  3887. m_opt = M_MMAP_MAX;
  3888. curr_val = &mmap_max;
  3889. break;
  3890. #else
  3891. return 0;
  3892. #endif
  3893. default:
  3894. return 0;
  3895. }
  3896. if(mallopt(m_opt, value)) {
  3897. *curr_val = value;
  3898. return 1;
  3899. }
  3900. #endif /* #if HAVE_MALLOPT */
  3901. return 0;
  3902. }
  3903. void
  3904. sys_alloc_stat(SysAllocStat *sasp)
  3905. {
  3906. sasp->trim_threshold = trim_threshold;
  3907. sasp->top_pad = top_pad;
  3908. sasp->mmap_threshold = mmap_threshold;
  3909. sasp->mmap_max = mmap_max;
  3910. }
  3911. char *
  3912. erts_read_env(char *key)
  3913. {
  3914. size_t value_len = 256;
  3915. char *value = erts_alloc(ERTS_ALC_T_TMP, value_len);
  3916. int res;
  3917. while (1) {
  3918. res = erts_sys_explicit_8bit_getenv(key, value, &value_len);
  3919. if (res >= 0) {
  3920. break;
  3921. }
  3922. value = erts_realloc(ERTS_ALC_T_TMP, value, value_len);
  3923. }
  3924. if (res != 1) {
  3925. erts_free(ERTS_ALC_T_TMP, value);
  3926. return NULL;
  3927. }
  3928. return value;
  3929. }
  3930. void
  3931. erts_free_read_env(void *value)
  3932. {
  3933. if (value)
  3934. erts_free(ERTS_ALC_T_TMP, value);
  3935. }
  3936. typedef struct {
  3937. size_t sz;
  3938. char *ptr;
  3939. } ErtsEmuArg;
  3940. typedef struct {
  3941. int argc;
  3942. ErtsEmuArg *arg;
  3943. size_t no_bytes;
  3944. } ErtsEmuArgs;
  3945. ErtsEmuArgs saved_emu_args = {0};
  3946. void
  3947. erts_save_emu_args(int argc, char **argv)
  3948. {
  3949. #ifdef DEBUG
  3950. char *end_ptr;
  3951. #endif
  3952. char *ptr;
  3953. int i;
  3954. size_t arg_sz[100];
  3955. size_t size;
  3956. ASSERT(!saved_emu_args.argc);
  3957. size = sizeof(ErtsEmuArg)*argc;
  3958. for (i = 0; i < argc; i++) {
  3959. size_t sz = sys_strlen(argv[i]);
  3960. if (i < sizeof(arg_sz)/sizeof(arg_sz[0]))
  3961. arg_sz[i] = sz;
  3962. size += sz+1;
  3963. }
  3964. ptr = (char *) malloc(size);
  3965. if (!ptr) {
  3966. ERTS_INTERNAL_ERROR("malloc failed to allocate memory!");
  3967. }
  3968. #ifdef DEBUG
  3969. end_ptr = ptr + size;
  3970. #endif
  3971. saved_emu_args.arg = (ErtsEmuArg *) ptr;
  3972. ptr += sizeof(ErtsEmuArg)*argc;
  3973. saved_emu_args.argc = argc;
  3974. saved_emu_args.no_bytes = 0;
  3975. for (i = 0; i < argc; i++) {
  3976. size_t sz;
  3977. if (i < sizeof(arg_sz)/sizeof(arg_sz[0]))
  3978. sz = arg_sz[i];
  3979. else
  3980. sz = sys_strlen(argv[i]);
  3981. saved_emu_args.arg[i].ptr = ptr;
  3982. saved_emu_args.arg[i].sz = sz;
  3983. saved_emu_args.no_bytes += sz;
  3984. ptr += sz+1;
  3985. sys_strcpy(saved_emu_args.arg[i].ptr, argv[i]);
  3986. }
  3987. ASSERT(ptr == end_ptr);
  3988. }
  3989. Eterm
  3990. erts_get_emu_args(Process *c_p)
  3991. {
  3992. #ifdef DEBUG
  3993. Eterm *end_hp;
  3994. #endif
  3995. int i;
  3996. Uint hsz;
  3997. Eterm *hp, res;
  3998. hsz = saved_emu_args.no_bytes*2;
  3999. hsz += saved_emu_args.argc*2;
  4000. hp = HAlloc(c_p, hsz);
  4001. #ifdef DEBUG
  4002. end_hp = hp + hsz;
  4003. #endif
  4004. res = NIL;
  4005. for (i = saved_emu_args.argc-1; i >= 0; i--) {
  4006. Eterm arg = buf_to_intlist(&hp,
  4007. saved_emu_args.arg[i].ptr,
  4008. saved_emu_args.arg[i].sz,
  4009. NIL);
  4010. res = CONS(hp, arg, res);
  4011. hp += 2;
  4012. }
  4013. ASSERT(hp == end_hp);
  4014. return res;
  4015. }
  4016. Eterm
  4017. erts_get_ethread_info(Process *c_p)
  4018. {
  4019. Uint sz, *szp;
  4020. Eterm res, *hp, **hpp, *end_hp = NULL;
  4021. sz = 0;
  4022. szp = &sz;
  4023. hpp = NULL;
  4024. while (1) {
  4025. Eterm tup, list, name;
  4026. #if defined(ETHR_NATIVE_ATOMIC32_IMPL) \
  4027. || defined(ETHR_NATIVE_ATOMIC64_IMPL) \
  4028. || defined(ETHR_NATIVE_DW_ATOMIC_IMPL)
  4029. char buf[1024];
  4030. int i;
  4031. char **str;
  4032. #endif
  4033. res = NIL;
  4034. #ifdef ETHR_X86_MEMBAR_H__
  4035. tup = erts_bld_tuple(hpp, szp, 2,
  4036. erts_bld_string(hpp, szp, "sse2"),
  4037. #ifdef ETHR_X86_RUNTIME_CONF_HAVE_SSE2__
  4038. erts_bld_string(hpp, szp,
  4039. (ETHR_X86_RUNTIME_CONF_HAVE_SSE2__
  4040. ? "yes" : "no"))
  4041. #else
  4042. erts_bld_string(hpp, szp, "yes")
  4043. #endif
  4044. );
  4045. res = erts_bld_cons(hpp, szp, tup, res);
  4046. tup = erts_bld_tuple(hpp, szp, 2,
  4047. erts_bld_string(hpp, szp,
  4048. "x86"
  4049. #ifdef ARCH_64
  4050. "_64"
  4051. #endif
  4052. " OOO"),
  4053. erts_bld_string(hpp, szp,
  4054. #ifdef ETHR_X86_OUT_OF_ORDER
  4055. "yes"
  4056. #else
  4057. "no"
  4058. #endif
  4059. ));
  4060. res = erts_bld_cons(hpp, szp, tup, res);
  4061. #endif
  4062. #ifdef ETHR_SPARC_V9_MEMBAR_H__
  4063. tup = erts_bld_tuple(hpp, szp, 2,
  4064. erts_bld_string(hpp, szp, "Sparc V9"),
  4065. erts_bld_string(hpp, szp,
  4066. #if defined(ETHR_SPARC_TSO)
  4067. "TSO"
  4068. #elif defined(ETHR_SPARC_PSO)
  4069. "PSO"
  4070. #elif defined(ETHR_SPARC_RMO)
  4071. "RMO"
  4072. #else
  4073. "undefined"
  4074. #endif
  4075. ));
  4076. res = erts_bld_cons(hpp, szp, tup, res);
  4077. #endif
  4078. #ifdef ETHR_PPC_MEMBAR_H__
  4079. tup = erts_bld_tuple(hpp, szp, 2,
  4080. erts_bld_string(hpp, szp, "lwsync"),
  4081. erts_bld_string(hpp, szp,
  4082. #if defined(ETHR_PPC_HAVE_LWSYNC)
  4083. "yes"
  4084. #elif defined(ETHR_PPC_HAVE_NO_LWSYNC)
  4085. "no"
  4086. #elif defined(ETHR_PPC_RUNTIME_CONF_HAVE_LWSYNC__)
  4087. ETHR_PPC_RUNTIME_CONF_HAVE_LWSYNC__ ? "yes" : "no"
  4088. #else
  4089. "undefined"
  4090. #endif
  4091. ));
  4092. res = erts_bld_cons(hpp, szp, tup, res);
  4093. #endif
  4094. tup = erts_bld_tuple(hpp, szp, 2,
  4095. erts_bld_string(hpp, szp, "Native rw-spinlocks"),
  4096. #ifdef ETHR_NATIVE_RWSPINLOCK_IMPL
  4097. erts_bld_string(hpp, szp, ETHR_NATIVE_RWSPINLOCK_IMPL)
  4098. #else
  4099. erts_bld_string(hpp, szp, "no")
  4100. #endif
  4101. );
  4102. res = erts_bld_cons(hpp, szp, tup, res);
  4103. tup = erts_bld_tuple(hpp, szp, 2,
  4104. erts_bld_string(hpp, szp, "Native spinlocks"),
  4105. #ifdef ETHR_NATIVE_SPINLOCK_IMPL
  4106. erts_bld_string(hpp, szp, ETHR_NATIVE_SPINLOCK_IMPL)
  4107. #else
  4108. erts_bld_string(hpp, szp, "no")
  4109. #endif
  4110. );
  4111. res = erts_bld_cons(hpp, szp, tup, res);
  4112. list = NIL;
  4113. #ifdef ETHR_NATIVE_DW_ATOMIC_IMPL
  4114. if (ethr_have_native_dw_atomic()) {
  4115. name = erts_bld_string(hpp, szp, ETHR_NATIVE_DW_ATOMIC_IMPL);
  4116. str = ethr_native_dw_atomic_ops();
  4117. for (i = 0; str[i]; i++) {
  4118. erts_snprintf(buf, sizeof(buf), "ethr_native_dw_atomic_%s()", str[i]);
  4119. list = erts_bld_cons(hpp, szp,
  4120. erts_bld_string(hpp, szp, buf),
  4121. list);
  4122. }
  4123. str = ethr_native_su_dw_atomic_ops();
  4124. for (i = 0; str[i]; i++) {
  4125. erts_snprintf(buf, sizeof(buf), "ethr_native_su_dw_atomic_%s()", str[i]);
  4126. list = erts_bld_cons(hpp, szp,
  4127. erts_bld_string(hpp, szp, buf),
  4128. list);
  4129. }
  4130. }
  4131. else
  4132. #endif
  4133. name = erts_bld_string(hpp, szp, "no");
  4134. tup = erts_bld_tuple(hpp, szp, 3,
  4135. erts_bld_string(hpp, szp, "Double word native atomics"),
  4136. name,
  4137. list);
  4138. res = erts_bld_cons(hpp, szp, tup, res);
  4139. list = NIL;
  4140. #ifdef ETHR_NATIVE_ATOMIC64_IMPL
  4141. name = erts_bld_string(hpp, szp, ETHR_NATIVE_ATOMIC64_IMPL);
  4142. str = ethr_native_atomic64_ops();
  4143. for (i = 0; str[i]; i++) {
  4144. erts_snprintf(buf, sizeof(buf), "ethr_native_atomic64_%s()", str[i]);
  4145. list = erts_bld_cons(hpp, szp,
  4146. erts_bld_string(hpp, szp, buf),
  4147. list);
  4148. }
  4149. #else
  4150. name = erts_bld_string(hpp, szp, "no");
  4151. #endif
  4152. tup = erts_bld_tuple(hpp, szp, 3,
  4153. erts_bld_string(hpp, szp, "64-bit native atomics"),
  4154. name,
  4155. list);
  4156. res = erts_bld_cons(hpp, szp, tup, res);
  4157. list = NIL;
  4158. #ifdef ETHR_NATIVE_ATOMIC32_IMPL
  4159. name = erts_bld_string(hpp, szp, ETHR_NATIVE_ATOMIC32_IMPL);
  4160. str = ethr_native_atomic32_ops();
  4161. for (i = 0; str[i]; i++) {
  4162. erts_snprintf(buf, sizeof(buf), "ethr_native_atomic32_%s()", str[i]);
  4163. list = erts_bld_cons(hpp, szp,
  4164. erts_bld_string(hpp, szp, buf),
  4165. list);
  4166. }
  4167. #else
  4168. name = erts_bld_string(hpp, szp, "no");
  4169. #endif
  4170. tup = erts_bld_tuple(hpp, szp, 3,
  4171. erts_bld_string(hpp, szp, "32-bit native atomics"),
  4172. name,
  4173. list);
  4174. res = erts_bld_cons(hpp, szp, tup, res);
  4175. if (hpp) {
  4176. HRelease(c_p, end_hp, *hpp)
  4177. return res;
  4178. }
  4179. hp = HAlloc(c_p, sz);
  4180. end_hp = hp + sz;
  4181. hpp = &hp;
  4182. szp = NULL;
  4183. }
  4184. }
  4185. /*
  4186. * To be used to silence unused result warnings, but do not abuse it.
  4187. */
  4188. void erts_silence_warn_unused_result(long unused)
  4189. {
  4190. }
  4191. /*
  4192. * Interval counts
  4193. */
  4194. void
  4195. erts_interval_init(erts_interval_t *icp)
  4196. {
  4197. erts_atomic64_init_nob(&icp->counter.atomic, 0);
  4198. }
  4199. static ERTS_INLINE Uint64
  4200. step_interval_nob(erts_interval_t *icp)
  4201. {
  4202. return (Uint64) erts_atomic64_inc_read_nob(&icp->counter.atomic);
  4203. }
  4204. static ERTS_INLINE Uint64
  4205. step_interval_relb(erts_interval_t *icp)
  4206. {
  4207. return (Uint64) erts_atomic64_inc_read_relb(&icp->counter.atomic);
  4208. }
  4209. static ERTS_INLINE Uint64
  4210. ensure_later_interval_nob(erts_interval_t *icp, Uint64 ic)
  4211. {
  4212. Uint64 curr_ic;
  4213. curr_ic = (Uint64) erts_atomic64_read_nob(&icp->counter.atomic);
  4214. if (curr_ic > ic)
  4215. return curr_ic;
  4216. return (Uint64) erts_atomic64_inc_read_nob(&icp->counter.atomic);
  4217. }
  4218. static ERTS_INLINE Uint64
  4219. ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic)
  4220. {
  4221. Uint64 curr_ic;
  4222. curr_ic = (Uint64) erts_atomic64_read_acqb(&icp->counter.atomic);
  4223. if (curr_ic > ic)
  4224. return curr_ic;
  4225. return (Uint64) erts_atomic64_inc_read_acqb(&icp->counter.atomic);
  4226. }
  4227. Uint64
  4228. erts_step_interval_nob(erts_interval_t *icp)
  4229. {
  4230. return step_interval_nob(icp);
  4231. }
  4232. Uint64
  4233. erts_step_interval_relb(erts_interval_t *icp)
  4234. {
  4235. return step_interval_relb(icp);
  4236. }
  4237. Uint64
  4238. erts_ensure_later_interval_nob(erts_interval_t *icp, Uint64 ic)
  4239. {
  4240. return ensure_later_interval_nob(icp, ic);
  4241. }
  4242. Uint64
  4243. erts_ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic)
  4244. {
  4245. return ensure_later_interval_acqb(icp, ic);
  4246. }
  4247. /*
  4248. * A millisecond timestamp without time correction where there's no hrtime
  4249. * - for tracing on "long" things...
  4250. */
  4251. Uint64 erts_timestamp_millis(void)
  4252. {
  4253. #ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
  4254. return ERTS_MONOTONIC_TO_MSEC(erts_os_monotonic_time());
  4255. #else
  4256. Uint64 res;
  4257. SysTimeval tv;
  4258. sys_gettimeofday(&tv);
  4259. res = (Uint64) tv.tv_sec*1000000;
  4260. res += (Uint64) tv.tv_usec;
  4261. return (res / 1000);
  4262. #endif
  4263. }
  4264. void *
  4265. erts_calc_stacklimit(char *prev_c, UWord stacksize)
  4266. {
  4267. /*
  4268. * We *don't* want this function inlined, i.e., it is
  4269. * risky to call this function from another function
  4270. * in utils.c
  4271. */
  4272. UWord pagesize = erts_sys_get_page_size();
  4273. char c;
  4274. char *start;
  4275. if (&c > prev_c) {
  4276. start = (char *) ((((UWord) prev_c) / pagesize) * pagesize);
  4277. return (void *) (start + stacksize);
  4278. }
  4279. else {
  4280. start = (char *) (((((UWord) prev_c) - 1) / pagesize + 1) * pagesize);
  4281. return (void *) (start - stacksize);
  4282. }
  4283. }
  4284. /*
  4285. * erts_check_below_limit() and
  4286. * erts_check_above_limit() are put
  4287. * in utils.c in order to prevent
  4288. * inlining.
  4289. */
  4290. int
  4291. erts_check_below_limit(char *ptr, char *limit)
  4292. {
  4293. return ptr < limit;
  4294. }
  4295. int
  4296. erts_check_above_limit(char *ptr, char *limit)
  4297. {
  4298. return ptr > limit;
  4299. }
  4300. void *
  4301. erts_ptr_id(void *ptr)
  4302. {
  4303. return ptr;
  4304. }