PageRenderTime 38ms CodeModel.GetById 18ms RepoModel.GetById 0ms app.codeStats 1ms

/erts/emulator/beam/utils.c

https://github.com/Bwooce/otp
C | 4112 lines | 3365 code | 447 blank | 300 comment | 646 complexity | e2f376dbbe07ec1f0ef6a6eb059cc676 MD5 | raw file
Possible License(s): LGPL-2.1, MPL-2.0-no-copyleft-exception, BSD-2-Clause
  1. /*
  2. * %CopyrightBegin%
  3. *
  4. * Copyright Ericsson AB 1996-2011. All Rights Reserved.
  5. *
  6. * The contents of this file are subject to the Erlang Public License,
  7. * Version 1.1, (the "License"); you may not use this file except in
  8. * compliance with the License. You should have received a copy of the
  9. * Erlang Public License along with this software. If not, it can be
  10. * retrieved online at http://www.erlang.org/.
  11. *
  12. * Software distributed under the License is distributed on an "AS IS"
  13. * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
  14. * the License for the specific language governing rights and limitations
  15. * under the License.
  16. *
  17. * %CopyrightEnd%
  18. */
  19. #ifdef HAVE_CONFIG_H
  20. # include "config.h"
  21. #endif
  22. #define ERTS_DO_INCL_GLB_INLINE_FUNC_DEF
  23. #include "sys.h"
  24. #include "erl_vm.h"
  25. #include "global.h"
  26. #include "erl_process.h"
  27. #include "big.h"
  28. #include "bif.h"
  29. #include "erl_binary.h"
  30. #include "erl_bits.h"
  31. #include "packet_parser.h"
  32. #include "erl_gc.h"
  33. #define ERTS_WANT_DB_INTERNAL__
  34. #include "erl_db.h"
  35. #include "erl_threads.h"
  36. #include "register.h"
  37. #include "dist.h"
  38. #include "erl_printf.h"
  39. #include "erl_threads.h"
  40. #include "erl_smp.h"
  41. #include "erl_time.h"
  42. #undef M_TRIM_THRESHOLD
  43. #undef M_TOP_PAD
  44. #undef M_MMAP_THRESHOLD
  45. #undef M_MMAP_MAX
  46. #if defined(__GLIBC__) && defined(HAVE_MALLOC_H)
  47. #include <malloc.h>
  48. #endif
  49. #if !defined(HAVE_MALLOPT)
  50. #undef HAVE_MALLOPT
  51. #define HAVE_MALLOPT 0
  52. #endif
  53. /* profile_scheduler mini message queue */
  54. typedef struct {
  55. Uint scheduler_id;
  56. Uint no_schedulers;
  57. Uint Ms;
  58. Uint s;
  59. Uint us;
  60. Eterm state;
  61. } profile_sched_msg;
  62. typedef struct {
  63. profile_sched_msg msg[2];
  64. Uint n;
  65. } profile_sched_msg_q;
  66. #ifdef ERTS_SMP
  67. static void
  68. dispatch_profile_msg_q(profile_sched_msg_q *psmq)
  69. {
  70. int i = 0;
  71. profile_sched_msg *msg = NULL;
  72. ASSERT(psmq != NULL);
  73. for (i = 0; i < psmq->n; i++) {
  74. msg = &(psmq->msg[i]);
  75. profile_scheduler_q(make_small(msg->scheduler_id), msg->state, am_undefined, msg->Ms, msg->s, msg->us);
  76. }
  77. }
  78. #endif
  79. Eterm*
  80. erts_heap_alloc(Process* p, Uint need, Uint xtra)
  81. {
  82. ErlHeapFragment* bp;
  83. Eterm* htop;
  84. Uint n;
  85. #if defined(DEBUG) || defined(CHECK_FOR_HOLES)
  86. Uint i;
  87. #endif
  88. #ifdef FORCE_HEAP_FRAGS
  89. if (p->space_verified && p->space_verified_from!=NULL
  90. && HEAP_TOP(p) >= p->space_verified_from
  91. && HEAP_TOP(p) + need <= p->space_verified_from + p->space_verified
  92. && HEAP_LIMIT(p) - HEAP_TOP(p) >= need) {
  93. Uint consumed = need + (HEAP_TOP(p) - p->space_verified_from);
  94. ASSERT(consumed <= p->space_verified);
  95. p->space_verified -= consumed;
  96. p->space_verified_from += consumed;
  97. HEAP_TOP(p) = p->space_verified_from;
  98. return HEAP_TOP(p) - need;
  99. }
  100. p->space_verified = 0;
  101. p->space_verified_from = NULL;
  102. #endif /* FORCE_HEAP_FRAGS */
  103. n = need + xtra;
  104. bp = MBUF(p);
  105. if (bp != NULL && need <= (bp->alloc_size - bp->used_size)) {
  106. Eterm* ret = bp->mem + bp->used_size;
  107. bp->used_size += need;
  108. return ret;
  109. }
  110. #ifdef DEBUG
  111. n++;
  112. #endif
  113. bp = (ErlHeapFragment*)
  114. ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP_FRAG, ERTS_HEAP_FRAG_SIZE(n));
  115. #if defined(DEBUG) || defined(CHECK_FOR_HOLES)
  116. for (i = 0; i < n; i++) {
  117. bp->mem[i] = ERTS_HOLE_MARKER;
  118. }
  119. #endif
  120. #ifdef DEBUG
  121. n--;
  122. #endif
  123. /*
  124. * When we have created a heap fragment, we are no longer allowed
  125. * to store anything more on the heap.
  126. */
  127. htop = HEAP_TOP(p);
  128. if (htop < HEAP_LIMIT(p)) {
  129. *htop = make_pos_bignum_header(HEAP_LIMIT(p)-htop-1);
  130. HEAP_TOP(p) = HEAP_LIMIT(p);
  131. }
  132. bp->next = MBUF(p);
  133. MBUF(p) = bp;
  134. bp->alloc_size = n;
  135. bp->used_size = need;
  136. MBUF_SIZE(p) += n;
  137. bp->off_heap.first = NULL;
  138. bp->off_heap.overhead = 0;
  139. return bp->mem;
  140. }
  141. #ifdef CHECK_FOR_HOLES
  142. Eterm*
  143. erts_set_hole_marker(Eterm* ptr, Uint sz)
  144. {
  145. Eterm* p = ptr;
  146. int i;
  147. for (i = 0; i < sz; i++) {
  148. *p++ = ERTS_HOLE_MARKER;
  149. }
  150. return ptr;
  151. }
  152. #endif
  153. /*
  154. * Helper function for the ESTACK macros defined in global.h.
  155. */
  156. void
  157. erl_grow_stack(Eterm** start, Eterm** sp, Eterm** end)
  158. {
  159. Uint old_size = (*end - *start);
  160. Uint new_size = old_size * 2;
  161. Uint sp_offs = *sp - *start;
  162. if (new_size > 2 * DEF_ESTACK_SIZE) {
  163. *start = erts_realloc(ERTS_ALC_T_ESTACK, (void *) *start, new_size*sizeof(Eterm));
  164. } else {
  165. Eterm* new_ptr = erts_alloc(ERTS_ALC_T_ESTACK, new_size*sizeof(Eterm));
  166. sys_memcpy(new_ptr, *start, old_size*sizeof(Eterm));
  167. *start = new_ptr;
  168. }
  169. *end = *start + new_size;
  170. *sp = *start + sp_offs;
  171. }
  172. /*
  173. * Helper function for the ESTACK macros defined in global.h.
  174. */
  175. void
  176. erl_grow_wstack(UWord** start, UWord** sp, UWord** end)
  177. {
  178. Uint old_size = (*end - *start);
  179. Uint new_size = old_size * 2;
  180. Uint sp_offs = *sp - *start;
  181. if (new_size > 2 * DEF_ESTACK_SIZE) {
  182. *start = erts_realloc(ERTS_ALC_T_ESTACK, (void *) *start, new_size*sizeof(UWord));
  183. } else {
  184. UWord* new_ptr = erts_alloc(ERTS_ALC_T_ESTACK, new_size*sizeof(UWord));
  185. sys_memcpy(new_ptr, *start, old_size*sizeof(UWord));
  186. *start = new_ptr;
  187. }
  188. *end = *start + new_size;
  189. *sp = *start + sp_offs;
  190. }
  191. /* CTYPE macros */
  192. #define LATIN1
  193. #define IS_DIGIT(c) ((c) >= '0' && (c) <= '9')
  194. #ifdef LATIN1
  195. #define IS_LOWER(c) (((c) >= 'a' && (c) <= 'z') \
  196. || ((c) >= 128+95 && (c) <= 255 && (c) != 247))
  197. #define IS_UPPER(c) (((c) >= 'A' && (c) <= 'Z') \
  198. || ((c) >= 128+64 && (c) <= 128+94 && (c) != 247-32))
  199. #else
  200. #define IS_LOWER(c) ((c) >= 'a' && (c) <= 'z')
  201. #define IS_UPPER(c) ((c) >= 'A' && (c) <= 'Z')
  202. #endif
  203. #define IS_ALNUM(c) (IS_DIGIT(c) || IS_LOWER(c) || IS_UPPER(c))
  204. /* We don't include 160 (non-breaking space). */
  205. #define IS_SPACE(c) (c == ' ' || c == '\n' || c == '\t' || c == '\r')
  206. #ifdef LATIN1
  207. #define IS_CNTRL(c) ((c) < ' ' || (c) == 127 \
  208. || ((c) >= 128 && (c) < 128+32))
  209. #else
  210. /* Treat all non-ASCII as control characters */
  211. #define IS_CNTRL(c) ((c) < ' ' || (c) >= 127)
  212. #endif
  213. #define IS_PRINT(c) (!IS_CNTRL(c))
  214. /*
  215. * Calculate length of a list.
  216. * Returns -1 if not a proper list (i.e. not terminated with NIL)
  217. */
  218. int
  219. list_length(Eterm list)
  220. {
  221. int i = 0;
  222. while(is_list(list)) {
  223. i++;
  224. list = CDR(list_val(list));
  225. }
  226. if (is_not_nil(list)) {
  227. return -1;
  228. }
  229. return i;
  230. }
  231. Uint erts_fit_in_bits(Uint n)
  232. {
  233. Uint i;
  234. i = 0;
  235. while (n > 0) {
  236. i++;
  237. n >>= 1;
  238. }
  239. return i;
  240. }
  241. int
  242. erts_print(int to, void *arg, char *format, ...)
  243. {
  244. int res;
  245. va_list arg_list;
  246. va_start(arg_list, format);
  247. if (to < ERTS_PRINT_MIN)
  248. res = -EINVAL;
  249. else {
  250. switch (to) {
  251. case ERTS_PRINT_STDOUT:
  252. res = erts_vprintf(format, arg_list);
  253. break;
  254. case ERTS_PRINT_STDERR:
  255. res = erts_vfprintf(stderr, format, arg_list);
  256. break;
  257. case ERTS_PRINT_FILE:
  258. res = erts_vfprintf((FILE *) arg, format, arg_list);
  259. break;
  260. case ERTS_PRINT_SBUF:
  261. res = erts_vsprintf((char *) arg, format, arg_list);
  262. break;
  263. case ERTS_PRINT_SNBUF:
  264. res = erts_vsnprintf(((erts_print_sn_buf *) arg)->buf,
  265. ((erts_print_sn_buf *) arg)->size,
  266. format,
  267. arg_list);
  268. break;
  269. case ERTS_PRINT_DSBUF:
  270. res = erts_vdsprintf((erts_dsprintf_buf_t *) arg, format, arg_list);
  271. break;
  272. case ERTS_PRINT_INVALID:
  273. res = -EINVAL;
  274. break;
  275. default:
  276. res = erts_vfdprintf((int) to, format, arg_list);
  277. break;
  278. }
  279. }
  280. va_end(arg_list);
  281. return res;
  282. }
  283. int
  284. erts_putc(int to, void *arg, char c)
  285. {
  286. return erts_print(to, arg, "%c", c);
  287. }
  288. /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
  289. * Some Erlang term building utility functions (to be used when performance *
  290. * isn't critical). *
  291. * *
  292. * Add more functions like these here (and function prototypes in global.h) *
  293. * when needed. *
  294. * *
  295. \* */
  296. Eterm
  297. erts_bld_atom(Uint **hpp, Uint *szp, char *str)
  298. {
  299. if (hpp)
  300. return am_atom_put(str, sys_strlen(str));
  301. else
  302. return THE_NON_VALUE;
  303. }
  304. Eterm
  305. erts_bld_uint(Uint **hpp, Uint *szp, Uint ui)
  306. {
  307. Eterm res = THE_NON_VALUE;
  308. if (IS_USMALL(0, ui)) {
  309. if (hpp)
  310. res = make_small(ui);
  311. }
  312. else {
  313. if (szp)
  314. *szp += BIG_UINT_HEAP_SIZE;
  315. if (hpp) {
  316. res = uint_to_big(ui, *hpp);
  317. *hpp += BIG_UINT_HEAP_SIZE;
  318. }
  319. }
  320. return res;
  321. }
  322. /*
  323. * Erts_bld_uword is more or less similar to erts_bld_uint, but a pointer
  324. * can safely be passed.
  325. */
  326. Eterm
  327. erts_bld_uword(Uint **hpp, Uint *szp, UWord uw)
  328. {
  329. Eterm res = THE_NON_VALUE;
  330. if (IS_USMALL(0, uw)) {
  331. if (hpp)
  332. res = make_small((Uint) uw);
  333. }
  334. else {
  335. if (szp)
  336. *szp += BIG_UWORD_HEAP_SIZE(uw);
  337. if (hpp) {
  338. res = uword_to_big(uw, *hpp);
  339. *hpp += BIG_UWORD_HEAP_SIZE(uw);
  340. }
  341. }
  342. return res;
  343. }
  344. Eterm
  345. erts_bld_uint64(Uint **hpp, Uint *szp, Uint64 ui64)
  346. {
  347. Eterm res = THE_NON_VALUE;
  348. if (IS_USMALL(0, ui64)) {
  349. if (hpp)
  350. res = make_small((Uint) ui64);
  351. }
  352. else {
  353. if (szp)
  354. *szp += ERTS_UINT64_HEAP_SIZE(ui64);
  355. if (hpp)
  356. res = erts_uint64_to_big(ui64, hpp);
  357. }
  358. return res;
  359. }
  360. Eterm
  361. erts_bld_sint64(Uint **hpp, Uint *szp, Sint64 si64)
  362. {
  363. Eterm res = THE_NON_VALUE;
  364. if (IS_SSMALL(si64)) {
  365. if (hpp)
  366. res = make_small((Sint) si64);
  367. }
  368. else {
  369. if (szp)
  370. *szp += ERTS_SINT64_HEAP_SIZE(si64);
  371. if (hpp)
  372. res = erts_sint64_to_big(si64, hpp);
  373. }
  374. return res;
  375. }
  376. Eterm
  377. erts_bld_cons(Uint **hpp, Uint *szp, Eterm car, Eterm cdr)
  378. {
  379. Eterm res = THE_NON_VALUE;
  380. if (szp)
  381. *szp += 2;
  382. if (hpp) {
  383. res = CONS(*hpp, car, cdr);
  384. *hpp += 2;
  385. }
  386. return res;
  387. }
  388. Eterm
  389. erts_bld_tuple(Uint **hpp, Uint *szp, Uint arity, ...)
  390. {
  391. Eterm res = THE_NON_VALUE;
  392. ASSERT(arity < (((Uint)1) << (sizeof(Uint)*8 - _HEADER_ARITY_OFFS)));
  393. if (szp)
  394. *szp += arity + 1;
  395. if (hpp) {
  396. res = make_tuple(*hpp);
  397. *((*hpp)++) = make_arityval(arity);
  398. if (arity > 0) {
  399. Uint i;
  400. va_list argp;
  401. va_start(argp, arity);
  402. for (i = 0; i < arity; i++) {
  403. *((*hpp)++) = va_arg(argp, Eterm);
  404. }
  405. va_end(argp);
  406. }
  407. }
  408. return res;
  409. }
  410. Eterm erts_bld_tuplev(Uint **hpp, Uint *szp, Uint arity, Eterm terms[])
  411. {
  412. Eterm res = THE_NON_VALUE;
  413. /*
  414. * Note callers expect that 'terms' is *not* accessed if hpp == NULL.
  415. */
  416. ASSERT(arity < (((Uint)1) << (sizeof(Uint)*8 - _HEADER_ARITY_OFFS)));
  417. if (szp)
  418. *szp += arity + 1;
  419. if (hpp) {
  420. res = make_tuple(*hpp);
  421. *((*hpp)++) = make_arityval(arity);
  422. if (arity > 0) {
  423. Uint i;
  424. for (i = 0; i < arity; i++)
  425. *((*hpp)++) = terms[i];
  426. }
  427. }
  428. return res;
  429. }
  430. Eterm
  431. erts_bld_string_n(Uint **hpp, Uint *szp, const char *str, Sint len)
  432. {
  433. Eterm res = THE_NON_VALUE;
  434. Sint i = len;
  435. if (szp)
  436. *szp += len*2;
  437. if (hpp) {
  438. res = NIL;
  439. while (--i >= 0) {
  440. res = CONS(*hpp, make_small((byte) str[i]), res);
  441. *hpp += 2;
  442. }
  443. }
  444. return res;
  445. }
  446. Eterm
  447. erts_bld_list(Uint **hpp, Uint *szp, Sint length, Eterm terms[])
  448. {
  449. Eterm list = THE_NON_VALUE;
  450. if (szp)
  451. *szp += 2*length;
  452. if (hpp) {
  453. Sint i = length;
  454. list = NIL;
  455. while (--i >= 0) {
  456. list = CONS(*hpp, terms[i], list);
  457. *hpp += 2;
  458. }
  459. }
  460. return list;
  461. }
  462. Eterm
  463. erts_bld_2tup_list(Uint **hpp, Uint *szp,
  464. Sint length, Eterm terms1[], Uint terms2[])
  465. {
  466. Eterm res = THE_NON_VALUE;
  467. if (szp)
  468. *szp += 5*length;
  469. if (hpp) {
  470. Sint i = length;
  471. res = NIL;
  472. while (--i >= 0) {
  473. res = CONS(*hpp+3, TUPLE2(*hpp, terms1[i], terms2[i]), res);
  474. *hpp += 5;
  475. }
  476. }
  477. return res;
  478. }
  479. Eterm
  480. erts_bld_atom_uint_2tup_list(Uint **hpp, Uint *szp,
  481. Sint length, Eterm atoms[], Uint uints[])
  482. {
  483. Sint i;
  484. Eterm res = THE_NON_VALUE;
  485. if (szp) {
  486. *szp += 5*length;
  487. i = length;
  488. while (--i >= 0) {
  489. if (!IS_USMALL(0, uints[i]))
  490. *szp += BIG_UINT_HEAP_SIZE;
  491. }
  492. }
  493. if (hpp) {
  494. i = length;
  495. res = NIL;
  496. while (--i >= 0) {
  497. Eterm ui;
  498. if (IS_USMALL(0, uints[i]))
  499. ui = make_small(uints[i]);
  500. else {
  501. ui = uint_to_big(uints[i], *hpp);
  502. *hpp += BIG_UINT_HEAP_SIZE;
  503. }
  504. res = CONS(*hpp+3, TUPLE2(*hpp, atoms[i], ui), res);
  505. *hpp += 5;
  506. }
  507. }
  508. return res;
  509. }
  510. Eterm
  511. erts_bld_atom_2uint_3tup_list(Uint **hpp, Uint *szp, Sint length,
  512. Eterm atoms[], Uint uints1[], Uint uints2[])
  513. {
  514. Sint i;
  515. Eterm res = THE_NON_VALUE;
  516. if (szp) {
  517. *szp += 6*length;
  518. i = length;
  519. while (--i >= 0) {
  520. if (!IS_USMALL(0, uints1[i]))
  521. *szp += BIG_UINT_HEAP_SIZE;
  522. if (!IS_USMALL(0, uints2[i]))
  523. *szp += BIG_UINT_HEAP_SIZE;
  524. }
  525. }
  526. if (hpp) {
  527. i = length;
  528. res = NIL;
  529. while (--i >= 0) {
  530. Eterm ui1;
  531. Eterm ui2;
  532. if (IS_USMALL(0, uints1[i]))
  533. ui1 = make_small(uints1[i]);
  534. else {
  535. ui1 = uint_to_big(uints1[i], *hpp);
  536. *hpp += BIG_UINT_HEAP_SIZE;
  537. }
  538. if (IS_USMALL(0, uints2[i]))
  539. ui2 = make_small(uints2[i]);
  540. else {
  541. ui2 = uint_to_big(uints2[i], *hpp);
  542. *hpp += BIG_UINT_HEAP_SIZE;
  543. }
  544. res = CONS(*hpp+4, TUPLE3(*hpp, atoms[i], ui1, ui2), res);
  545. *hpp += 6;
  546. }
  547. }
  548. return res;
  549. }
  550. /* *\
  551. * *
  552. \* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
  553. /* make a hash index from an erlang term */
  554. /*
  555. ** There are three hash functions.
  556. ** make_broken_hash: the one used for backward compatibility
  557. ** is called from the bif erlang:hash/2. Should never be used
  558. ** as it a) hashes only a part of binaries, b) hashes bignums really poorly,
  559. ** c) hashes bignums differently on different endian processors and d) hashes
  560. ** small integers with different weights on different bytes.
  561. **
  562. ** make_hash: A hash function that will give the same values for the same
  563. ** terms regardless of the internal representation. Small integers are
  564. ** hashed using the same algorithm as bignums and bignums are hashed
  565. ** independent of the CPU endianess.
  566. ** Make_hash also hashes pids, ports and references like 32 bit numbers
  567. ** (but with different constants).
  568. ** make_hash() is called from the bif erlang:phash/2
  569. **
  570. ** The idea behind the hash algorithm is to produce values suitable for
  571. ** linear dynamic hashing. We cannot choose the range at all while hashing
  572. ** (it's not even supplied to the hashing functions). The good old algorithm
  573. ** [H = H*C+X mod M, where H is the hash value, C is a "random" constant(or M),
  574. ** M is the range, preferably a prime, and X is each byte value] is therefore
  575. ** modified to:
  576. ** H = H*C+X mod 2^32, where C is a large prime. This gives acceptable
  577. ** "spreading" of the hashes, so that later modulo calculations also will give
  578. ** acceptable "spreading" in the range.
  579. ** We really need to hash on bytes, otherwise the
  580. ** upper bytes of a word will be less significant than the lower ones. That's
  581. ** not acceptable at all. For internal use one could maybe optimize by using
  582. ** another hash function, that is less strict but faster. That is, however, not
  583. ** implemented.
  584. **
  585. ** Short semi-formal description of make_hash:
  586. **
  587. ** In make_hash, the number N is treated like this:
  588. ** Abs(N) is hashed bytewise with the least significant byte, B(0), first.
  589. ** The number of bytes (J) to calculate hash on in N is
  590. ** (the number of _32_ bit words needed to store the unsigned
  591. ** value of abs(N)) * 4.
  592. ** X = FUNNY_NUMBER2
  593. ** If N < 0, Y = FUNNY_NUMBER4 else Y = FUNNY_NUMBER3.
  594. ** The hash value is Y*h(J) mod 2^32 where h(J) is calculated like
  595. ** h(0) = <initial hash>
  596. ** h(i) = h(i-i)*X + B(i-1)
  597. ** The above should hold regardless of internal representation.
  598. ** Pids are hashed like small numbers but with differrent constants, as are
  599. ** ports.
  600. ** References are hashed like ports but only on the least significant byte.
  601. ** Binaries are hashed on all bytes (not on the 15 first as in
  602. ** make_broken_hash()).
  603. ** Bytes in lists (possibly text strings) use a simpler multiplication inlined
  604. ** in the handling of lists, that is an optimization.
  605. ** Everything else is like in the old hash (make_broken_hash()).
  606. **
  607. ** make_hash2() is faster than make_hash, in particular for bignums
  608. ** and binaries, and produces better hash values.
  609. */
  610. /* some prime numbers just above 2 ^ 28 */
  611. #define FUNNY_NUMBER1 268440163
  612. #define FUNNY_NUMBER2 268439161
  613. #define FUNNY_NUMBER3 268435459
  614. #define FUNNY_NUMBER4 268436141
  615. #define FUNNY_NUMBER5 268438633
  616. #define FUNNY_NUMBER6 268437017
  617. #define FUNNY_NUMBER7 268438039
  618. #define FUNNY_NUMBER8 268437511
  619. #define FUNNY_NUMBER9 268439627
  620. #define FUNNY_NUMBER10 268440479
  621. #define FUNNY_NUMBER11 268440577
  622. #define FUNNY_NUMBER12 268440581
  623. static Uint32
  624. hash_binary_bytes(Eterm bin, Uint sz, Uint32 hash)
  625. {
  626. byte* ptr;
  627. Uint bitoffs;
  628. Uint bitsize;
  629. ERTS_GET_BINARY_BYTES(bin, ptr, bitoffs, bitsize);
  630. if (bitoffs == 0) {
  631. while (sz--) {
  632. hash = hash*FUNNY_NUMBER1 + *ptr++;
  633. }
  634. if (bitsize > 0) {
  635. byte b = *ptr;
  636. b >>= 8 - bitsize;
  637. hash = (hash*FUNNY_NUMBER1 + b) * FUNNY_NUMBER12 + bitsize;
  638. }
  639. } else {
  640. Uint previous = *ptr++;
  641. Uint b;
  642. Uint lshift = bitoffs;
  643. Uint rshift = 8 - lshift;
  644. while (sz--) {
  645. b = (previous << lshift) & 0xFF;
  646. previous = *ptr++;
  647. b |= previous >> rshift;
  648. hash = hash*FUNNY_NUMBER1 + b;
  649. }
  650. if (bitsize > 0) {
  651. b = (previous << lshift) & 0xFF;
  652. previous = *ptr++;
  653. b |= previous >> rshift;
  654. b >>= 8 - bitsize;
  655. hash = (hash*FUNNY_NUMBER1 + b) * FUNNY_NUMBER12 + bitsize;
  656. }
  657. }
  658. return hash;
  659. }
  660. Uint32 make_hash(Eterm term_arg)
  661. {
  662. DECLARE_WSTACK(stack);
  663. Eterm term = term_arg;
  664. Eterm hash = 0;
  665. unsigned op;
  666. /* Must not collide with the real tag_val_def's: */
  667. #define MAKE_HASH_TUPLE_OP 0x10
  668. #define MAKE_HASH_FUN_OP 0x11
  669. #define MAKE_HASH_CDR_PRE_OP 0x12
  670. #define MAKE_HASH_CDR_POST_OP 0x13
  671. /*
  672. ** Convenience macro for calculating a bytewise hash on an unsigned 32 bit
  673. ** integer.
  674. ** If the endianess is known, we could be smarter here,
  675. ** but that gives no significant speedup (on a sparc at least)
  676. */
  677. #define UINT32_HASH_STEP(Expr, Prime1) \
  678. do { \
  679. Uint32 x = (Uint32) (Expr); \
  680. hash = \
  681. (((((hash)*(Prime1) + (x & 0xFF)) * (Prime1) + \
  682. ((x >> 8) & 0xFF)) * (Prime1) + \
  683. ((x >> 16) & 0xFF)) * (Prime1) + \
  684. (x >> 24)); \
  685. } while(0)
  686. #define UINT32_HASH_RET(Expr, Prime1, Prime2) \
  687. UINT32_HASH_STEP(Expr, Prime1); \
  688. hash = hash * (Prime2); \
  689. break
  690. /*
  691. * Significant additions needed for real 64 bit port with larger fixnums.
  692. */
  693. /*
  694. * Note, for the simple 64bit port, not utilizing the
  695. * larger word size this function will work without modification.
  696. */
  697. tail_recur:
  698. op = tag_val_def(term);
  699. for (;;) {
  700. switch (op) {
  701. case NIL_DEF:
  702. hash = hash*FUNNY_NUMBER3 + 1;
  703. break;
  704. case ATOM_DEF:
  705. hash = hash*FUNNY_NUMBER1 +
  706. (atom_tab(atom_val(term))->slot.bucket.hvalue);
  707. break;
  708. case SMALL_DEF:
  709. {
  710. Sint y1 = signed_val(term);
  711. Uint y2 = y1 < 0 ? -(Uint)y1 : y1;
  712. UINT32_HASH_STEP(y2, FUNNY_NUMBER2);
  713. #if defined(ARCH_64) && !HALFWORD_HEAP
  714. if (y2 >> 32)
  715. UINT32_HASH_STEP(y2 >> 32, FUNNY_NUMBER2);
  716. #endif
  717. hash *= (y1 < 0 ? FUNNY_NUMBER4 : FUNNY_NUMBER3);
  718. break;
  719. }
  720. case BINARY_DEF:
  721. {
  722. Uint sz = binary_size(term);
  723. hash = hash_binary_bytes(term, sz, hash);
  724. hash = hash*FUNNY_NUMBER4 + sz;
  725. break;
  726. }
  727. case EXPORT_DEF:
  728. {
  729. Export* ep = *((Export **) (export_val(term) + 1));
  730. hash = hash * FUNNY_NUMBER11 + ep->code[2];
  731. hash = hash*FUNNY_NUMBER1 +
  732. (atom_tab(atom_val(ep->code[0]))->slot.bucket.hvalue);
  733. hash = hash*FUNNY_NUMBER1 +
  734. (atom_tab(atom_val(ep->code[1]))->slot.bucket.hvalue);
  735. break;
  736. }
  737. case FUN_DEF:
  738. {
  739. ErlFunThing* funp = (ErlFunThing *) fun_val(term);
  740. Uint num_free = funp->num_free;
  741. hash = hash * FUNNY_NUMBER10 + num_free;
  742. hash = hash*FUNNY_NUMBER1 +
  743. (atom_tab(atom_val(funp->fe->module))->slot.bucket.hvalue);
  744. hash = hash*FUNNY_NUMBER2 + funp->fe->old_index;
  745. hash = hash*FUNNY_NUMBER2 + funp->fe->old_uniq;
  746. if (num_free > 0) {
  747. if (num_free > 1) {
  748. WSTACK_PUSH3(stack, (UWord) &funp->env[1], (num_free-1), MAKE_HASH_FUN_OP);
  749. }
  750. term = funp->env[0];
  751. goto tail_recur;
  752. }
  753. break;
  754. }
  755. case PID_DEF:
  756. UINT32_HASH_RET(internal_pid_number(term),FUNNY_NUMBER5,FUNNY_NUMBER6);
  757. case EXTERNAL_PID_DEF:
  758. UINT32_HASH_RET(external_pid_number(term),FUNNY_NUMBER5,FUNNY_NUMBER6);
  759. case PORT_DEF:
  760. UINT32_HASH_RET(internal_port_number(term),FUNNY_NUMBER9,FUNNY_NUMBER10);
  761. case EXTERNAL_PORT_DEF:
  762. UINT32_HASH_RET(external_port_number(term),FUNNY_NUMBER9,FUNNY_NUMBER10);
  763. case REF_DEF:
  764. UINT32_HASH_RET(internal_ref_numbers(term)[0],FUNNY_NUMBER9,FUNNY_NUMBER10);
  765. case EXTERNAL_REF_DEF:
  766. UINT32_HASH_RET(external_ref_numbers(term)[0],FUNNY_NUMBER9,FUNNY_NUMBER10);
  767. case FLOAT_DEF:
  768. {
  769. FloatDef ff;
  770. GET_DOUBLE(term, ff);
  771. hash = hash*FUNNY_NUMBER6 + (ff.fw[0] ^ ff.fw[1]);
  772. break;
  773. }
  774. case MAKE_HASH_CDR_PRE_OP:
  775. term = (Eterm) WSTACK_POP(stack);
  776. if (is_not_list(term)) {
  777. WSTACK_PUSH(stack, (UWord) MAKE_HASH_CDR_POST_OP);
  778. goto tail_recur;
  779. }
  780. /* fall through */
  781. case LIST_DEF:
  782. {
  783. Eterm* list = list_val(term);
  784. while(is_byte(*list)) {
  785. /* Optimization for strings.
  786. ** Note that this hash is different from a 'small' hash,
  787. ** as multiplications on a Sparc is so slow.
  788. */
  789. hash = hash*FUNNY_NUMBER2 + unsigned_val(*list);
  790. if (is_not_list(CDR(list))) {
  791. WSTACK_PUSH(stack, MAKE_HASH_CDR_POST_OP);
  792. term = CDR(list);
  793. goto tail_recur;
  794. }
  795. list = list_val(CDR(list));
  796. }
  797. WSTACK_PUSH2(stack, CDR(list), MAKE_HASH_CDR_PRE_OP);
  798. term = CAR(list);
  799. goto tail_recur;
  800. }
  801. case MAKE_HASH_CDR_POST_OP:
  802. hash *= FUNNY_NUMBER8;
  803. break;
  804. case BIG_DEF:
  805. /* Note that this is the exact same thing as the hashing of smalls.*/
  806. {
  807. Eterm* ptr = big_val(term);
  808. Uint n = BIG_SIZE(ptr);
  809. Uint k = n-1;
  810. ErtsDigit d;
  811. int is_neg = BIG_SIGN(ptr);
  812. Uint i;
  813. int j;
  814. for (i = 0; i < k; i++) {
  815. d = BIG_DIGIT(ptr, i);
  816. for(j = 0; j < sizeof(ErtsDigit); ++j) {
  817. hash = (hash*FUNNY_NUMBER2) + (d & 0xff);
  818. d >>= 8;
  819. }
  820. }
  821. d = BIG_DIGIT(ptr, k);
  822. k = sizeof(ErtsDigit);
  823. #if defined(ARCH_64) && !HALFWORD_HEAP
  824. if (!(d >> 32))
  825. k /= 2;
  826. #endif
  827. for(j = 0; j < (int)k; ++j) {
  828. hash = (hash*FUNNY_NUMBER2) + (d & 0xff);
  829. d >>= 8;
  830. }
  831. hash *= is_neg ? FUNNY_NUMBER4 : FUNNY_NUMBER3;
  832. break;
  833. }
  834. case TUPLE_DEF:
  835. {
  836. Eterm* ptr = tuple_val(term);
  837. Uint arity = arityval(*ptr);
  838. WSTACK_PUSH3(stack, (UWord) arity, (UWord)(ptr+1), (UWord) arity);
  839. op = MAKE_HASH_TUPLE_OP;
  840. }/*fall through*/
  841. case MAKE_HASH_TUPLE_OP:
  842. case MAKE_HASH_FUN_OP:
  843. {
  844. Uint i = (Uint) WSTACK_POP(stack);
  845. Eterm* ptr = (Eterm*) WSTACK_POP(stack);
  846. if (i != 0) {
  847. term = *ptr;
  848. WSTACK_PUSH3(stack, (UWord)(ptr+1), (UWord) i-1, (UWord) op);
  849. goto tail_recur;
  850. }
  851. if (op == MAKE_HASH_TUPLE_OP) {
  852. Uint32 arity = (Uint32) WSTACK_POP(stack);
  853. hash = hash*FUNNY_NUMBER9 + arity;
  854. }
  855. break;
  856. }
  857. default:
  858. erl_exit(1, "Invalid tag in make_hash(0x%X,0x%X)\n", term, op);
  859. return 0;
  860. }
  861. if (WSTACK_ISEMPTY(stack)) break;
  862. op = WSTACK_POP(stack);
  863. }
  864. DESTROY_WSTACK(stack);
  865. return hash;
  866. #undef UINT32_HASH_STEP
  867. #undef UINT32_HASH_RET
  868. }
  869. /* Hash function suggested by Bob Jenkins. */
  870. #define MIX(a,b,c) \
  871. do { \
  872. a -= b; a -= c; a ^= (c>>13); \
  873. b -= c; b -= a; b ^= (a<<8); \
  874. c -= a; c -= b; c ^= (b>>13); \
  875. a -= b; a -= c; a ^= (c>>12); \
  876. b -= c; b -= a; b ^= (a<<16); \
  877. c -= a; c -= b; c ^= (b>>5); \
  878. a -= b; a -= c; a ^= (c>>3); \
  879. b -= c; b -= a; b ^= (a<<10); \
  880. c -= a; c -= b; c ^= (b>>15); \
  881. } while(0)
  882. #define HCONST 0x9e3779b9UL /* the golden ratio; an arbitrary value */
  883. Uint32
  884. block_hash(byte *k, unsigned length, Uint32 initval)
  885. {
  886. Uint32 a,b,c;
  887. unsigned len;
  888. /* Set up the internal state */
  889. len = length;
  890. a = b = HCONST;
  891. c = initval; /* the previous hash value */
  892. while (len >= 12)
  893. {
  894. a += (k[0] +((Uint32)k[1]<<8) +((Uint32)k[2]<<16) +((Uint32)k[3]<<24));
  895. b += (k[4] +((Uint32)k[5]<<8) +((Uint32)k[6]<<16) +((Uint32)k[7]<<24));
  896. c += (k[8] +((Uint32)k[9]<<8) +((Uint32)k[10]<<16)+((Uint32)k[11]<<24));
  897. MIX(a,b,c);
  898. k += 12; len -= 12;
  899. }
  900. c += length;
  901. switch(len) /* all the case statements fall through */
  902. {
  903. case 11: c+=((Uint32)k[10]<<24);
  904. case 10: c+=((Uint32)k[9]<<16);
  905. case 9 : c+=((Uint32)k[8]<<8);
  906. /* the first byte of c is reserved for the length */
  907. case 8 : b+=((Uint32)k[7]<<24);
  908. case 7 : b+=((Uint32)k[6]<<16);
  909. case 6 : b+=((Uint32)k[5]<<8);
  910. case 5 : b+=k[4];
  911. case 4 : a+=((Uint32)k[3]<<24);
  912. case 3 : a+=((Uint32)k[2]<<16);
  913. case 2 : a+=((Uint32)k[1]<<8);
  914. case 1 : a+=k[0];
  915. /* case 0: nothing left to add */
  916. }
  917. MIX(a,b,c);
  918. return c;
  919. }
  920. Uint32
  921. make_hash2(Eterm term)
  922. {
  923. Uint32 hash;
  924. DeclareTmpHeapNoproc(tmp_big,2);
  925. /* (HCONST * {2, ..., 14}) mod 2^32 */
  926. #define HCONST_2 0x3c6ef372UL
  927. #define HCONST_3 0xdaa66d2bUL
  928. #define HCONST_4 0x78dde6e4UL
  929. #define HCONST_5 0x1715609dUL
  930. #define HCONST_6 0xb54cda56UL
  931. #define HCONST_7 0x5384540fUL
  932. #define HCONST_8 0xf1bbcdc8UL
  933. #define HCONST_9 0x8ff34781UL
  934. #define HCONST_10 0x2e2ac13aUL
  935. #define HCONST_11 0xcc623af3UL
  936. #define HCONST_12 0x6a99b4acUL
  937. #define HCONST_13 0x08d12e65UL
  938. #define HCONST_14 0xa708a81eUL
  939. #define HCONST_15 0x454021d7UL
  940. #define UINT32_HASH_2(Expr1, Expr2, AConst) \
  941. do { \
  942. Uint32 a,b; \
  943. a = AConst + (Uint32) (Expr1); \
  944. b = AConst + (Uint32) (Expr2); \
  945. MIX(a,b,hash); \
  946. } while(0)
  947. #define UINT32_HASH(Expr, AConst) UINT32_HASH_2(Expr, 0, AConst)
  948. #define SINT32_HASH(Expr, AConst) \
  949. do { \
  950. Sint32 y = (Sint32) (Expr); \
  951. if (y < 0) { \
  952. UINT32_HASH(-y, AConst); \
  953. /* Negative numbers are unnecessarily mixed twice. */ \
  954. } \
  955. UINT32_HASH(y, AConst); \
  956. } while(0)
  957. #define IS_SSMALL28(x) (((Uint) (((x) >> (28-1)) + 1)) < 2)
  958. /* Optimization. Simple cases before declaration of estack. */
  959. if (primary_tag(term) == TAG_PRIMARY_IMMED1) {
  960. switch (term & _TAG_IMMED1_MASK) {
  961. case _TAG_IMMED1_IMMED2:
  962. switch (term & _TAG_IMMED2_MASK) {
  963. case _TAG_IMMED2_ATOM:
  964. /* Fast, but the poor hash value should be mixed. */
  965. return atom_tab(atom_val(term))->slot.bucket.hvalue;
  966. }
  967. break;
  968. case _TAG_IMMED1_SMALL:
  969. {
  970. Sint x = signed_val(term);
  971. if (SMALL_BITS > 28 && !IS_SSMALL28(x)) {
  972. term = small_to_big(x, tmp_big);
  973. break;
  974. }
  975. hash = 0;
  976. SINT32_HASH(x, HCONST);
  977. return hash;
  978. }
  979. }
  980. };
  981. {
  982. Eterm tmp;
  983. DECLARE_ESTACK(s);
  984. UseTmpHeapNoproc(2);
  985. hash = 0;
  986. for (;;) {
  987. switch (primary_tag(term)) {
  988. case TAG_PRIMARY_LIST:
  989. {
  990. int c = 0;
  991. Uint32 sh = 0;
  992. Eterm* ptr = list_val(term);
  993. while (is_byte(*ptr)) {
  994. /* Optimization for strings. */
  995. sh = (sh << 8) + unsigned_val(*ptr);
  996. if (c == 3) {
  997. UINT32_HASH(sh, HCONST_4);
  998. c = sh = 0;
  999. } else {
  1000. c++;
  1001. }
  1002. term = CDR(ptr);
  1003. if (is_not_list(term))
  1004. break;
  1005. ptr = list_val(term);
  1006. }
  1007. if (c > 0)
  1008. UINT32_HASH(sh, HCONST_4);
  1009. if (is_list(term)) {
  1010. term = *ptr;
  1011. tmp = *++ptr;
  1012. ESTACK_PUSH(s, tmp);
  1013. }
  1014. }
  1015. break;
  1016. case TAG_PRIMARY_BOXED:
  1017. {
  1018. Eterm hdr = *boxed_val(term);
  1019. ASSERT(is_header(hdr));
  1020. switch (hdr & _TAG_HEADER_MASK) {
  1021. case ARITYVAL_SUBTAG:
  1022. {
  1023. int i;
  1024. int arity = header_arity(hdr);
  1025. Eterm* elem = tuple_val(term);
  1026. UINT32_HASH(arity, HCONST_9);
  1027. if (arity == 0) /* Empty tuple */
  1028. goto hash2_common;
  1029. for (i = arity; i >= 2; i--) {
  1030. tmp = elem[i];
  1031. ESTACK_PUSH(s, tmp);
  1032. }
  1033. term = elem[1];
  1034. }
  1035. break;
  1036. case EXPORT_SUBTAG:
  1037. {
  1038. Export* ep = *((Export **) (export_val(term) + 1));
  1039. UINT32_HASH_2
  1040. (ep->code[2],
  1041. atom_tab(atom_val(ep->code[0]))->slot.bucket.hvalue,
  1042. HCONST);
  1043. UINT32_HASH
  1044. (atom_tab(atom_val(ep->code[1]))->slot.bucket.hvalue,
  1045. HCONST_14);
  1046. goto hash2_common;
  1047. }
  1048. case FUN_SUBTAG:
  1049. {
  1050. ErlFunThing* funp = (ErlFunThing *) fun_val(term);
  1051. Uint num_free = funp->num_free;
  1052. UINT32_HASH_2
  1053. (num_free,
  1054. atom_tab(atom_val(funp->fe->module))->slot.bucket.hvalue,
  1055. HCONST);
  1056. UINT32_HASH_2
  1057. (funp->fe->old_index, funp->fe->old_uniq, HCONST);
  1058. if (num_free == 0) {
  1059. goto hash2_common;
  1060. } else {
  1061. Eterm* bptr = funp->env + num_free - 1;
  1062. while (num_free-- > 1) {
  1063. term = *bptr--;
  1064. ESTACK_PUSH(s, term);
  1065. }
  1066. term = *bptr;
  1067. }
  1068. }
  1069. break;
  1070. case REFC_BINARY_SUBTAG:
  1071. case HEAP_BINARY_SUBTAG:
  1072. case SUB_BINARY_SUBTAG:
  1073. {
  1074. byte* bptr;
  1075. unsigned sz = binary_size(term);
  1076. Uint32 con = HCONST_13 + hash;
  1077. Uint bitoffs;
  1078. Uint bitsize;
  1079. ERTS_GET_BINARY_BYTES(term, bptr, bitoffs, bitsize);
  1080. if (sz == 0 && bitsize == 0) {
  1081. hash = con;
  1082. } else {
  1083. if (bitoffs == 0) {
  1084. hash = block_hash(bptr, sz, con);
  1085. if (bitsize > 0) {
  1086. UINT32_HASH_2(bitsize, (bptr[sz] >> (8 - bitsize)),
  1087. HCONST_15);
  1088. }
  1089. } else {
  1090. byte* buf = (byte *) erts_alloc(ERTS_ALC_T_TMP,
  1091. sz + (bitsize != 0));
  1092. erts_copy_bits(bptr, bitoffs, 1, buf, 0, 1, sz*8+bitsize);
  1093. hash = block_hash(buf, sz, con);
  1094. if (bitsize > 0) {
  1095. UINT32_HASH_2(bitsize, (buf[sz] >> (8 - bitsize)),
  1096. HCONST_15);
  1097. }
  1098. erts_free(ERTS_ALC_T_TMP, (void *) buf);
  1099. }
  1100. }
  1101. goto hash2_common;
  1102. }
  1103. break;
  1104. case POS_BIG_SUBTAG:
  1105. case NEG_BIG_SUBTAG:
  1106. {
  1107. Eterm* ptr = big_val(term);
  1108. Uint i = 0;
  1109. Uint n = BIG_SIZE(ptr);
  1110. Uint32 con = BIG_SIGN(ptr) ? HCONST_10 : HCONST_11;
  1111. #if D_EXP == 16
  1112. do {
  1113. Uint32 x, y;
  1114. x = i < n ? BIG_DIGIT(ptr, i++) : 0;
  1115. x += (Uint32)(i < n ? BIG_DIGIT(ptr, i++) : 0) << 16;
  1116. y = i < n ? BIG_DIGIT(ptr, i++) : 0;
  1117. y += (Uint32)(i < n ? BIG_DIGIT(ptr, i++) : 0) << 16;
  1118. UINT32_HASH_2(x, y, con);
  1119. } while (i < n);
  1120. #elif D_EXP == 32
  1121. do {
  1122. Uint32 x, y;
  1123. x = i < n ? BIG_DIGIT(ptr, i++) : 0;
  1124. y = i < n ? BIG_DIGIT(ptr, i++) : 0;
  1125. UINT32_HASH_2(x, y, con);
  1126. } while (i < n);
  1127. #elif D_EXP == 64
  1128. do {
  1129. Uint t;
  1130. Uint32 x, y;
  1131. t = i < n ? BIG_DIGIT(ptr, i++) : 0;
  1132. x = t & 0xffffffff;
  1133. y = t >> 32;
  1134. UINT32_HASH_2(x, y, con);
  1135. } while (i < n);
  1136. #else
  1137. #error "unsupported D_EXP size"
  1138. #endif
  1139. goto hash2_common;
  1140. }
  1141. break;
  1142. case REF_SUBTAG:
  1143. /* All parts of the ref should be hashed. */
  1144. UINT32_HASH(internal_ref_numbers(term)[0], HCONST_7);
  1145. goto hash2_common;
  1146. break;
  1147. case EXTERNAL_REF_SUBTAG:
  1148. /* All parts of the ref should be hashed. */
  1149. UINT32_HASH(external_ref_numbers(term)[0], HCONST_7);
  1150. goto hash2_common;
  1151. break;
  1152. case EXTERNAL_PID_SUBTAG:
  1153. /* Only 15 bits are hashed. */
  1154. UINT32_HASH(external_pid_number(term), HCONST_5);
  1155. goto hash2_common;
  1156. case EXTERNAL_PORT_SUBTAG:
  1157. /* Only 15 bits are hashed. */
  1158. UINT32_HASH(external_port_number(term), HCONST_6);
  1159. goto hash2_common;
  1160. case FLOAT_SUBTAG:
  1161. {
  1162. FloatDef ff;
  1163. GET_DOUBLE(term, ff);
  1164. #if defined(WORDS_BIGENDIAN)
  1165. UINT32_HASH_2(ff.fw[0], ff.fw[1], HCONST_12);
  1166. #else
  1167. UINT32_HASH_2(ff.fw[1], ff.fw[0], HCONST_12);
  1168. #endif
  1169. goto hash2_common;
  1170. }
  1171. break;
  1172. default:
  1173. erl_exit(1, "Invalid tag in make_hash2(0x%X)\n", term);
  1174. }
  1175. }
  1176. break;
  1177. case TAG_PRIMARY_IMMED1:
  1178. switch (term & _TAG_IMMED1_MASK) {
  1179. case _TAG_IMMED1_PID:
  1180. /* Only 15 bits are hashed. */
  1181. UINT32_HASH(internal_pid_number(term), HCONST_5);
  1182. goto hash2_common;
  1183. case _TAG_IMMED1_PORT:
  1184. /* Only 15 bits are hashed. */
  1185. UINT32_HASH(internal_port_number(term), HCONST_6);
  1186. goto hash2_common;
  1187. case _TAG_IMMED1_IMMED2:
  1188. switch (term & _TAG_IMMED2_MASK) {
  1189. case _TAG_IMMED2_ATOM:
  1190. if (hash == 0)
  1191. /* Fast, but the poor hash value should be mixed. */
  1192. hash = atom_tab(atom_val(term))->slot.bucket.hvalue;
  1193. else
  1194. UINT32_HASH(atom_tab(atom_val(term))->slot.bucket.hvalue,
  1195. HCONST_3);
  1196. goto hash2_common;
  1197. case _TAG_IMMED2_NIL:
  1198. if (hash == 0)
  1199. hash = 3468870702UL;
  1200. else
  1201. UINT32_HASH(NIL_DEF, HCONST_2);
  1202. goto hash2_common;
  1203. default:
  1204. erl_exit(1, "Invalid tag in make_hash2(0x%X)\n", term);
  1205. }
  1206. case _TAG_IMMED1_SMALL:
  1207. {
  1208. Sint x = signed_val(term);
  1209. if (SMALL_BITS > 28 && !IS_SSMALL28(x)) {
  1210. term = small_to_big(x, tmp_big);
  1211. break;
  1212. }
  1213. SINT32_HASH(x, HCONST);
  1214. goto hash2_common;
  1215. }
  1216. }
  1217. break;
  1218. default:
  1219. erl_exit(1, "Invalid tag in make_hash2(0x%X)\n", term);
  1220. hash2_common:
  1221. if (ESTACK_ISEMPTY(s)) {
  1222. DESTROY_ESTACK(s);
  1223. UnUseTmpHeapNoproc(2);
  1224. return hash;
  1225. }
  1226. term = ESTACK_POP(s);
  1227. }
  1228. }
  1229. }
  1230. #undef UINT32_HASH_2
  1231. #undef UINT32_HASH
  1232. #undef SINT32_HASH
  1233. }
  1234. #undef HCONST
  1235. #undef MIX
  1236. Uint32 make_broken_hash(Eterm term)
  1237. {
  1238. Uint32 hash = 0;
  1239. DECLARE_WSTACK(stack);
  1240. unsigned op;
  1241. tail_recur:
  1242. op = tag_val_def(term);
  1243. for (;;) {
  1244. switch (op) {
  1245. case NIL_DEF:
  1246. hash = hash*FUNNY_NUMBER3 + 1;
  1247. break;
  1248. case ATOM_DEF:
  1249. hash = hash*FUNNY_NUMBER1 +
  1250. (atom_tab(atom_val(term))->slot.bucket.hvalue);
  1251. break;
  1252. case SMALL_DEF:
  1253. #if defined(ARCH_64) && !HALFWORD_HEAP
  1254. {
  1255. Sint y1 = signed_val(term);
  1256. Uint y2 = y1 < 0 ? -(Uint)y1 : y1;
  1257. Uint32 y3 = (Uint32) (y2 >> 32);
  1258. int arity = 1;
  1259. #if defined(WORDS_BIGENDIAN)
  1260. if (!IS_SSMALL28(y1))
  1261. { /* like a bignum */
  1262. Uint32 y4 = (Uint32) y2;
  1263. hash = hash*FUNNY_NUMBER2 + ((y4 << 16) | (y4 >> 16));
  1264. if (y3)
  1265. {
  1266. hash = hash*FUNNY_NUMBER2 + ((y3 << 16) | (y3 >> 16));
  1267. arity++;
  1268. }
  1269. hash = hash * (y1 < 0 ? FUNNY_NUMBER3 : FUNNY_NUMBER2) + arity;
  1270. } else {
  1271. hash = hash*FUNNY_NUMBER2 + (((Uint) y1) & 0xfffffff);
  1272. }
  1273. #else
  1274. if (!IS_SSMALL28(y1))
  1275. { /* like a bignum */
  1276. hash = hash*FUNNY_NUMBER2 + ((Uint32) y2);
  1277. if (y3)
  1278. {
  1279. hash = hash*FUNNY_NUMBER2 + y3;
  1280. arity++;
  1281. }
  1282. hash = hash * (y1 < 0 ? FUNNY_NUMBER3 : FUNNY_NUMBER2) + arity;
  1283. } else {
  1284. hash = hash*FUNNY_NUMBER2 + (((Uint) y1) & 0xfffffff);
  1285. }
  1286. #endif
  1287. }
  1288. #else
  1289. hash = hash*FUNNY_NUMBER2 + unsigned_val(term);
  1290. #endif
  1291. break;
  1292. case BINARY_DEF:
  1293. {
  1294. size_t sz = binary_size(term);
  1295. size_t i = (sz < 15) ? sz : 15;
  1296. hash = hash_binary_bytes(term, i, hash);
  1297. hash = hash*FUNNY_NUMBER4 + sz;
  1298. break;
  1299. }
  1300. case EXPORT_DEF:
  1301. {
  1302. Export* ep = *((Export **) (export_val(term) + 1));
  1303. hash = hash * FUNNY_NUMBER11 + ep->code[2];
  1304. hash = hash*FUNNY_NUMBER1 +
  1305. (atom_tab(atom_val(ep->code[0]))->slot.bucket.hvalue);
  1306. hash = hash*FUNNY_NUMBER1 +
  1307. (atom_tab(atom_val(ep->code[1]))->slot.bucket.hvalue);
  1308. break;
  1309. }
  1310. case FUN_DEF:
  1311. {
  1312. ErlFunThing* funp = (ErlFunThing *) fun_val(term);
  1313. Uint num_free = funp->num_free;
  1314. hash = hash * FUNNY_NUMBER10 + num_free;
  1315. hash = hash*FUNNY_NUMBER1 +
  1316. (atom_tab(atom_val(funp->fe->module))->slot.bucket.hvalue);
  1317. hash = hash*FUNNY_NUMBER2 + funp->fe->old_index;
  1318. hash = hash*FUNNY_NUMBER2 + funp->fe->old_uniq;
  1319. if (num_free > 0) {
  1320. if (num_free > 1) {
  1321. WSTACK_PUSH3(stack, (UWord) &funp->env[1], (num_free-1), MAKE_HASH_FUN_OP);
  1322. }
  1323. term = funp->env[0];
  1324. goto tail_recur;
  1325. }
  1326. break;
  1327. }
  1328. case PID_DEF:
  1329. hash = hash*FUNNY_NUMBER5 + internal_pid_number(term);
  1330. break;
  1331. case EXTERNAL_PID_DEF:
  1332. hash = hash*FUNNY_NUMBER5 + external_pid_number(term);
  1333. break;
  1334. case PORT_DEF:
  1335. hash = hash*FUNNY_NUMBER9 + internal_port_number(term);
  1336. break;
  1337. case EXTERNAL_PORT_DEF:
  1338. hash = hash*FUNNY_NUMBER9 + external_port_number(term);
  1339. break;
  1340. case REF_DEF:
  1341. hash = hash*FUNNY_NUMBER9 + internal_ref_numbers(term)[0];
  1342. break;
  1343. case EXTERNAL_REF_DEF:
  1344. hash = hash*FUNNY_NUMBER9 + external_ref_numbers(term)[0];
  1345. break;
  1346. case FLOAT_DEF:
  1347. {
  1348. FloatDef ff;
  1349. GET_DOUBLE(term, ff);
  1350. hash = hash*FUNNY_NUMBER6 + (ff.fw[0] ^ ff.fw[1]);
  1351. }
  1352. break;
  1353. case MAKE_HASH_CDR_PRE_OP:
  1354. term = (Eterm) WSTACK_POP(stack);
  1355. if (is_not_list(term)) {
  1356. WSTACK_PUSH(stack, (UWord) MAKE_HASH_CDR_POST_OP);
  1357. goto tail_recur;
  1358. }
  1359. /*fall through*/
  1360. case LIST_DEF:
  1361. {
  1362. Eterm* list = list_val(term);
  1363. WSTACK_PUSH2(stack, (UWord) CDR(list),
  1364. (UWord) MAKE_HASH_CDR_PRE_OP);
  1365. term = CAR(list);
  1366. goto tail_recur;
  1367. }
  1368. case MAKE_HASH_CDR_POST_OP:
  1369. hash *= FUNNY_NUMBER8;
  1370. break;
  1371. case BIG_DEF:
  1372. {
  1373. Eterm* ptr = big_val(term);
  1374. int is_neg = BIG_SIGN(ptr);
  1375. Uint arity = BIG_ARITY(ptr);
  1376. Uint i = arity;
  1377. ptr++;
  1378. #if D_EXP == 16
  1379. /* hash over 32 bit LE */
  1380. while(i--) {
  1381. hash = hash*FUNNY_NUMBER2 + *ptr++;
  1382. }
  1383. #elif D_EXP == 32
  1384. #if defined(WORDS_BIGENDIAN)
  1385. while(i--) {
  1386. Uint d = *ptr++;
  1387. hash = hash*FUNNY_NUMBER2 + ((d << 16) | (d >> 16));
  1388. }
  1389. #else
  1390. while(i--) {
  1391. hash = hash*FUNNY_NUMBER2 + *ptr++;
  1392. }
  1393. #endif
  1394. #elif D_EXP == 64
  1395. {
  1396. Uint32 h = 0, l;
  1397. #if defined(WORDS_BIGENDIAN)
  1398. while(i--) {
  1399. Uint d = *ptr++;
  1400. l = d & 0xffffffff;
  1401. h = d >> 32;
  1402. hash = hash*FUNNY_NUMBER2 + ((l << 16) | (l >> 16));
  1403. if (h || i)
  1404. hash = hash*FUNNY_NUMBER2 + ((h << 16) | (h >> 16));
  1405. }
  1406. #else
  1407. while(i--) {
  1408. Uint d = *ptr++;
  1409. l = d & 0xffffffff;
  1410. h = d >> 32;
  1411. hash = hash*FUNNY_NUMBER2 + l;
  1412. if (h || i)
  1413. hash = hash*FUNNY_NUMBER2 + h;
  1414. }
  1415. #endif
  1416. /* adjust arity to match 32 bit mode */
  1417. arity = (arity << 1) - (h == 0);
  1418. }
  1419. #else
  1420. #error "unsupported D_EXP size"
  1421. #endif
  1422. hash = hash * (is_neg ? FUNNY_NUMBER3 : FUNNY_NUMBER2) + arity;
  1423. }
  1424. break;
  1425. case TUPLE_DEF:
  1426. {
  1427. Eterm* ptr = tuple_val(term);
  1428. Uint arity = arityval(*ptr);
  1429. WSTACK_PUSH3(stack, (UWord) arity, (UWord) (ptr+1), (UWord) arity);
  1430. op = MAKE_HASH_TUPLE_OP;
  1431. }/*fall through*/
  1432. case MAKE_HASH_TUPLE_OP:
  1433. case MAKE_HASH_FUN_OP:
  1434. {
  1435. Uint i = (Uint) WSTACK_POP(stack);
  1436. Eterm* ptr = (Eterm*) WSTACK_POP(stack);
  1437. if (i != 0) {
  1438. term = *ptr;
  1439. WSTACK_PUSH3(stack, (UWord)(ptr+1), (UWord) i-1, (UWord) op);
  1440. goto tail_recur;
  1441. }
  1442. if (op == MAKE_HASH_TUPLE_OP) {
  1443. Uint32 arity = (UWord) WSTACK_POP(stack);
  1444. hash = hash*FUNNY_NUMBER9 + arity;
  1445. }
  1446. break;
  1447. }
  1448. default:
  1449. erl_exit(1, "Invalid tag in make_broken_hash\n");
  1450. return 0;
  1451. }
  1452. if (WSTACK_ISEMPTY(stack)) break;
  1453. op = (Uint) WSTACK_POP(stack);
  1454. }
  1455. DESTROY_WSTACK(stack);
  1456. return hash;
  1457. #undef MAKE_HASH_TUPLE_OP
  1458. #undef MAKE_HASH_FUN_OP
  1459. #undef MAKE_HASH_CDR_PRE_OP
  1460. #undef MAKE_HASH_CDR_POST_OP
  1461. }
  1462. static int do_send_to_logger(Eterm tag, Eterm gleader, char *buf, int len)
  1463. {
  1464. /* error_logger !
  1465. {notify,{info_msg,gleader,{emulator,"~s~n",[<message as list>]}}} |
  1466. {notify,{error,gleader,{emulator,"~s~n",[<message as list>]}}} |
  1467. {notify,{warning_msg,gleader,{emulator,"~s~n",[<message as list>}]}} */
  1468. Eterm* hp;
  1469. Uint sz;
  1470. Uint gl_sz;
  1471. Eterm gl;
  1472. Eterm list,plist,format,tuple1,tuple2,tuple3;
  1473. ErlOffHeap *ohp;
  1474. ErlHeapFragment *bp = NULL;
  1475. #if !defined(ERTS_SMP)
  1476. Process *p;
  1477. #endif
  1478. ASSERT(is_atom(tag));
  1479. if (len <= 0) {
  1480. return -1;
  1481. }
  1482. #ifndef ERTS_SMP
  1483. if (
  1484. #ifdef USE_THREADS
  1485. !erts_get_scheduler_data() || /* Must be scheduler thread */
  1486. #endif
  1487. (p = erts_whereis_process(NULL, 0, am_error_logger, 0, 0)) == NULL
  1488. || p->status == P_RUNNING) {
  1489. /* buf *always* points to a null terminated string */
  1490. erts_fprintf(stderr, "(no error logger present) %T: \"%s\"\n",
  1491. tag, buf);
  1492. return 0;
  1493. }
  1494. /* So we have an error logger, lets build the message */
  1495. #endif
  1496. gl_sz = IS_CONST(gleader) ? 0 : size_object(gleader);
  1497. sz = len * 2 /* message list */+ 2 /* cons surrounding message list */
  1498. + gl_sz +
  1499. 3 /*outer 2-tuple*/ + 4 /* middle 3-tuple */ + 4 /*inner 3-tuple */ +
  1500. 8 /* "~s~n" */;
  1501. #ifndef ERTS_SMP
  1502. if (sz <= HeapWordsLeft(p)) {
  1503. ohp = &MSO(p);
  1504. hp = HEAP_TOP(p);
  1505. HEAP_TOP(p) += sz;
  1506. } else {
  1507. #endif
  1508. bp = new_message_buffer(sz);
  1509. ohp = &bp->off_heap;
  1510. hp = bp->mem;
  1511. #ifndef ERTS_SMP
  1512. }
  1513. #endif
  1514. gl = (is_nil(gleader)
  1515. ? am_noproc
  1516. : (IS_CONST(gleader)
  1517. ? gleader
  1518. : copy_struct(gleader,gl_sz,&hp,ohp)));
  1519. list = buf_to_intlist(&hp, buf, len, NIL);
  1520. plist = CONS(hp,list,NIL);
  1521. hp += 2;
  1522. format = buf_to_intlist(&hp, "~s~n", 4, NIL);
  1523. tuple1 = TUPLE3(hp, am_emulator, format, plist);
  1524. hp += 4;
  1525. tuple2 = TUPLE3(hp, tag, gl, tuple1);
  1526. hp += 4;
  1527. tuple3 = TUPLE2(hp, am_notify, tuple2);
  1528. #ifdef HARDDEBUG
  1529. erts_fprintf(stderr, "%T\n", tuple3);
  1530. #endif
  1531. #ifdef ERTS_SMP
  1532. {
  1533. Eterm from = erts_get_current_pid();
  1534. if (is_not_internal_pid(from))
  1535. from = NIL;
  1536. erts_queue_error_logger_message(from, tuple3, bp);
  1537. }
  1538. #else
  1539. erts_queue_message(p, NULL /* only used for smp build */, bp, tuple3, NIL);
  1540. #endif
  1541. return 0;
  1542. }
  1543. static ERTS_INLINE int
  1544. send_info_to_logger(Eterm gleader, char *buf, int len)
  1545. {
  1546. return do_send_to_logger(am_info_msg, gleader, buf, len);
  1547. }
  1548. static ERTS_INLINE int
  1549. send_warning_to_logger(Eterm gleader, char *buf, int len)
  1550. {
  1551. Eterm tag;
  1552. switch (erts_error_logger_warnings) {
  1553. case am_info: tag = am_info_msg; break;
  1554. case am_warning: tag = am_warning_msg; break;
  1555. default: tag = am_error; break;
  1556. }
  1557. return do_send_to_logger(tag, gleader, buf, len);
  1558. }
  1559. static ERTS_INLINE int
  1560. send_error_to_logger(Eterm gleader, char *buf, int len)
  1561. {
  1562. return do_send_to_logger(am_error, gleader, buf, len);
  1563. }
  1564. #define LOGGER_DSBUF_INC_SZ 256
  1565. static erts_dsprintf_buf_t *
  1566. grow_logger_dsbuf(erts_dsprintf_buf_t *dsbufp, size_t need)
  1567. {
  1568. size_t size;
  1569. size_t free_size = dsbufp->size - dsbufp->str_len;
  1570. ASSERT(dsbufp && dsbufp->str);
  1571. if (need <= free_size)
  1572. return dsbufp;
  1573. size = need - free_size + LOGGER_DSBUF_INC_SZ;
  1574. size = (((size + LOGGER_DSBUF_INC_SZ - 1) / LOGGER_DSBUF_INC_SZ)
  1575. * LOGGER_DSBUF_INC_SZ);
  1576. size += dsbufp->size;
  1577. ASSERT(dsbufp->str_len + need <= size);
  1578. dsbufp->str = (char *) erts_realloc(ERTS_ALC_T_LOGGER_DSBUF,
  1579. (void *) dsbufp->str,
  1580. size);
  1581. dsbufp->size = size;
  1582. return dsbufp;
  1583. }
  1584. erts_dsprintf_buf_t *
  1585. erts_create_logger_dsbuf(void)
  1586. {
  1587. erts_dsprintf_buf_t init = ERTS_DSPRINTF_BUF_INITER(grow_logger_dsbuf);
  1588. erts_dsprintf_buf_t *dsbufp = erts_alloc(ERTS_ALC_T_LOGGER_DSBUF,
  1589. sizeof(erts_dsprintf_buf_t));
  1590. sys_memcpy((void *) dsbufp, (void *) &init, sizeof(erts_dsprintf_buf_t));
  1591. dsbufp->str = (char *) erts_alloc(ERTS_ALC_T_LOGGER_DSBUF,
  1592. LOGGER_DSBUF_INC_SZ);
  1593. dsbufp->str[0] = '\0';
  1594. dsbufp->size = LOGGER_DSBUF_INC_SZ;
  1595. return dsbufp;
  1596. }
  1597. static ERTS_INLINE void
  1598. destroy_logger_dsbuf(erts_dsprintf_buf_t *dsbufp)
  1599. {
  1600. ASSERT(dsbufp && dsbufp->str);
  1601. erts_free(ERTS_ALC_T_LOGGER_DSBUF, (void *) dsbufp->str);
  1602. erts_free(ERTS_ALC_T_LOGGER_DSBUF, (void *) dsbufp);
  1603. }
  1604. int
  1605. erts_send_info_to_logger(Eterm gleader, erts_dsprintf_buf_t *dsbufp)
  1606. {
  1607. int res;
  1608. res = send_info_to_logger(gleader, dsbufp->str, dsbufp->str_len);
  1609. destroy_logger_dsbuf(dsbufp);
  1610. return res;
  1611. }
  1612. int
  1613. erts_send_warning_to_logger(Eterm gleader, erts_dsprintf_buf_t *dsbufp)
  1614. {
  1615. int res;
  1616. res = send_warning_to_logger(gleader, dsbufp->str, dsbufp->str_len);
  1617. destroy_logger_dsbuf(dsbufp);
  1618. return res;
  1619. }
  1620. int
  1621. erts_send_error_to_logger(Eterm gleader, erts_dsprintf_buf_t *dsbufp)
  1622. {
  1623. int res;
  1624. res = send_error_to_logger(gleader, dsbufp->str, dsbufp->str_len);
  1625. destroy_logger_dsbuf(dsbufp);
  1626. return res;
  1627. }
  1628. int
  1629. erts_send_info_to_logger_str(Eterm gleader, char *str)
  1630. {
  1631. return send_info_to_logger(gleader, str, sys_strlen(str));
  1632. }
  1633. int
  1634. erts_send_warning_to_logger_str(Eterm gleader, char *str)
  1635. {
  1636. return send_warning_to_logger(gleader, str, sys_strlen(str));
  1637. }
  1638. int
  1639. erts_send_error_to_logger_str(Eterm gleader, char *str)
  1640. {
  1641. return send_error_to_logger(gleader, str, sys_strlen(str));
  1642. }
  1643. int
  1644. erts_send_info_to_logger_nogl(erts_dsprintf_buf_t *dsbuf)
  1645. {
  1646. return erts_send_info_to_logger(NIL, dsbuf);
  1647. }
  1648. int
  1649. erts_send_warning_to_logger_nogl(erts_dsprintf_buf_t *dsbuf)
  1650. {
  1651. return erts_send_warning_to_logger(NIL, dsbuf);
  1652. }
  1653. int
  1654. erts_send_error_to_logger_nogl(erts_dsprintf_buf_t *dsbuf)
  1655. {
  1656. return erts_send_error_to_logger(NIL, dsbuf);
  1657. }
  1658. int
  1659. erts_send_info_to_logger_str_nogl(char *str)
  1660. {
  1661. return erts_send_info_to_logger_str(NIL, str);
  1662. }
  1663. int
  1664. erts_send_warning_to_logger_str_nogl(char *str)
  1665. {
  1666. return erts_send_warning_to_logger_str(NIL, str);
  1667. }
  1668. int
  1669. erts_send_error_to_logger_str_nogl(char *str)
  1670. {
  1671. return erts_send_error_to_logger_str(NIL, str);
  1672. }
  1673. #define TMP_DSBUF_INC_SZ 256
  1674. static erts_dsprintf_buf_t *
  1675. grow_tmp_dsbuf(erts_dsprintf_buf_t *dsbufp, size_t need)
  1676. {
  1677. size_t size;
  1678. size_t free_size = dsbufp->size - dsbufp->str_len;
  1679. ASSERT(dsbufp);
  1680. if (need <= free_size)
  1681. return dsbufp;
  1682. size = need - free_size + TMP_DSBUF_INC_SZ;
  1683. size = ((size + TMP_DSBUF_INC_SZ - 1)/TMP_DSBUF_INC_SZ)*TMP_DSBUF_INC_SZ;
  1684. size += dsbufp->size;
  1685. ASSERT(dsbufp->str_len + need <= size);
  1686. dsbufp->str = (char *) erts_realloc(ERTS_ALC_T_TMP_DSBUF,
  1687. (void *) dsbufp->str,
  1688. size);
  1689. dsbufp->size = size;
  1690. return dsbufp;
  1691. }
  1692. erts_dsprintf_buf_t *
  1693. erts_create_tmp_dsbuf(Uint size)
  1694. {
  1695. Uint init_size = size ? size : TMP_DSBUF_INC_SZ;
  1696. erts_dsprintf_buf_t init = ERTS_DSPRINTF_BUF_INITER(grow_tmp_dsbuf);
  1697. erts_dsprintf_buf_t *dsbufp = erts_alloc(ERTS_ALC_T_TMP_DSBUF,
  1698. sizeof(erts_dsprintf_buf_t));
  1699. sys_memcpy((void *) dsbufp, (void *) &init, sizeof(erts_dsprintf_buf_t));
  1700. dsbufp->str = (char *) erts_alloc(ERTS_ALC_T_TMP_DSBUF, init_size);
  1701. dsbufp->str[0] = '\0';
  1702. dsbufp->size = init_size;
  1703. return dsbufp;
  1704. }
  1705. void
  1706. erts_destroy_tmp_dsbuf(erts_dsprintf_buf_t *dsbufp)
  1707. {
  1708. if (dsbufp->str)
  1709. erts_free(ERTS_ALC_T_TMP_DSBUF, (void *) dsbufp->str);
  1710. erts_free(ERTS_ALC_T_TMP_DSBUF, (void *) dsbufp);
  1711. }
  1712. /* eq and cmp are written as separate functions a eq is a little faster */
  1713. /*
  1714. * Test for equality of two terms.
  1715. * Returns 0 if not equal, or a non-zero value otherwise.
  1716. */
  1717. #if HALFWORD_HEAP
  1718. int eq_rel(Eterm a, Eterm* a_base, Eterm b, Eterm* b_base)
  1719. #else
  1720. int eq(Eterm a, Eterm b)
  1721. #endif
  1722. {
  1723. DECLARE_WSTACK(stack);
  1724. Sint sz;
  1725. Eterm* aa;
  1726. Eterm* bb;
  1727. tailrecur:
  1728. if (is_same(a, a_base, b, b_base)) goto pop_next;
  1729. tailrecur_ne:
  1730. switch (primary_tag(a)) {
  1731. case TAG_PRIMARY_LIST:
  1732. if (is_list(b)) {
  1733. Eterm* aval = list_val_rel(a, a_base);
  1734. Eterm* bval = list_val_rel(b, b_base);
  1735. while (1) {
  1736. Eterm atmp = CAR(aval);
  1737. Eterm btmp = CAR(bval);
  1738. if (!is_same(atmp,a_base,btmp,b_base)) {
  1739. WSTACK_PUSH2(stack,(UWord) CDR(bval),(UWord) CDR(aval));
  1740. a = atmp;
  1741. b = btmp;
  1742. goto tailrecur_ne;
  1743. }
  1744. atmp = CDR(aval);
  1745. btmp = CDR(bval);
  1746. if (is_same(atmp,a_base,btmp,b_base)) {
  1747. goto pop_next;
  1748. }
  1749. if (is_not_list(atmp) || is_not_list(btmp)) {
  1750. a = atmp;
  1751. b = btmp;
  1752. goto tailrecur_ne;
  1753. }
  1754. aval = list_val_rel(atmp, a_base);
  1755. bval = list_val_rel(btmp, b_base);
  1756. }
  1757. }
  1758. break; /* not equal */
  1759. case TAG_PRIMARY_BOXED:
  1760. {
  1761. Eterm hdr = *boxed_val_rel(a,a_base);
  1762. switch (hdr & _TAG_HEADER_MASK) {
  1763. case ARITYVAL_SUBTAG:
  1764. {
  1765. aa = tuple_val_rel(a, a_base);
  1766. if (!is_boxed(b) || *boxed_val_rel(b,b_base) != *aa)
  1767. goto not_equal;
  1768. bb = tuple_val_rel(b,b_base);
  1769. if ((sz = arityval(*aa)) == 0) goto pop_next;
  1770. ++aa;
  1771. ++bb;
  1772. goto term_array;
  1773. }
  1774. case REFC_BINARY_SUBTAG:
  1775. case HEAP_BINARY_SUBTAG:
  1776. case SUB_BINARY_SUBTAG:
  1777. {
  1778. byte* a_ptr;
  1779. byte* b_ptr;
  1780. size_t a_size;
  1781. size_t b_size;
  1782. Uint a_bitsize;
  1783. Uint b_bitsize;
  1784. Uint a_bitoffs;
  1785. Uint b_bitoffs;
  1786. if (!is_binary_rel(b,b_base)) {
  1787. goto not_equal;
  1788. }
  1789. a_size = binary_size_rel(a,a_base);
  1790. b_size = binary_size_rel(b,b_base);
  1791. if (a_size != b_size) {
  1792. goto not_equal;
  1793. }
  1794. ERTS_GET_BINARY_BYTES_REL(a, a_ptr, a_bitoffs, a_bitsize, a_base);
  1795. ERTS_GET_BINARY_BYTES_REL(b, b_ptr, b_bitoffs, b_bitsize, b_base);
  1796. if ((a_bitsize | b_bitsize | a_bitoffs | b_bitoffs) == 0) {
  1797. if (sys_memcmp(a_ptr, b_ptr, a_size) == 0) goto pop_next;
  1798. } else if (a_bitsize == b_bitsize) {
  1799. if (erts_cmp_bits(a_ptr, a_bitoffs, b_ptr, b_bitoffs,
  1800. (a_size << 3) + a_bitsize) == 0) goto pop_next;
  1801. }
  1802. break; /* not equal */
  1803. }
  1804. case EXPORT_SUBTAG:
  1805. {
  1806. if (is_export_rel(b,b_base)) {
  1807. Export* a_exp = *((Export **) (export_val_rel(a,a_base) + 1));
  1808. Export* b_exp = *((Export **) (export_val_rel(b,b_base) + 1));
  1809. if (a_exp == b_exp) goto pop_next;
  1810. }
  1811. break; /* not equal */
  1812. }
  1813. case FUN_SUBTAG:
  1814. {
  1815. ErlFunThing* f1;
  1816. ErlFunThing* f2;
  1817. if (!is_fun_rel(b,b_base))
  1818. goto not_equal;
  1819. f1 = (ErlFunThing *) fun_val_rel(a,a_base);
  1820. f2 = (ErlFunThing *) fun_val_rel(b,b_base);
  1821. if (f1->fe->module != f2->fe->module ||
  1822. f1->fe->old_index != f2->fe->old_index ||
  1823. f1->fe->old_uniq != f2->fe->old_uniq ||
  1824. f1->num_free != f2->num_free) {
  1825. goto not_equal;
  1826. }
  1827. if ((sz = f1->num_free) == 0) goto pop_next;
  1828. aa = f1->env;
  1829. bb = f2->env;
  1830. goto term_array;
  1831. }
  1832. case EXTERNAL_PID_SUBTAG:
  1833. case EXTERNAL_PORT_SUBTAG: {
  1834. ExternalThing *ap;
  1835. ExternalThing *bp;
  1836. if(!is_external_rel(b,b_base))
  1837. goto not_equal;
  1838. ap = external_thing_ptr_rel(a,a_base);
  1839. bp = external_thing_ptr_rel(b,b_base);
  1840. if(ap->header == bp->header && ap->node == bp->node) {
  1841. ASSERT(1 == external_data_words_rel(a,a_base));
  1842. ASSERT(1 == external_data_words_rel(b,b_base));
  1843. if (ap->data.ui[0] == bp->data.ui[0]) goto pop_next;
  1844. }
  1845. break; /* not equal */
  1846. }
  1847. case EXTERNAL_REF_SUBTAG: {
  1848. /*
  1849. * Observe!
  1850. * When comparing refs we need to compare ref numbers
  1851. * (32-bit words) *not* ref data words.
  1852. */
  1853. Uint32 *anum;
  1854. Uint32 *bnum;
  1855. Uint common_len;
  1856. Uint alen;
  1857. Uint blen;
  1858. Uint i;
  1859. ExternalThing* athing;
  1860. ExternalThing* bthing;
  1861. if(!is_external_ref_rel(b,b_base))
  1862. goto not_equal;
  1863. athing = external_thing_ptr_rel(a,a_base);
  1864. bthing = external_thing_ptr_rel(b,b_base);
  1865. if(athing->node != bthing->node)
  1866. goto not_equal;
  1867. anum = external_thing_ref_numbers(athing);
  1868. bnum = external_thing_ref_numbers(bthing);
  1869. alen = external_thing_ref_no_of_numbers(athing);
  1870. blen = external_thing_ref_no_of_numbers(bthing);
  1871. goto ref_common;
  1872. case REF_SUBTAG:
  1873. if (!is_internal_ref_rel(b,b_base))
  1874. goto not_equal;
  1875. {
  1876. RefThing* athing = ref_thing_ptr_rel(a,a_base);
  1877. RefThing* bthing = ref_thing_ptr_rel(b,b_base);
  1878. alen = internal_thing_ref_no_of_numbers(athing);
  1879. blen = internal_thing_ref_no_of_numbers(bthing);
  1880. anum = internal_thing_ref_numbers(athing);
  1881. bnum = internal_thing_ref_numbers(bthing);
  1882. }
  1883. ref_common:
  1884. ASSERT(alen > 0 && blen > 0);
  1885. if (anum[0] != bnum[0])
  1886. goto not_equal;
  1887. if (alen == 3 && blen == 3) {
  1888. /* Most refs are of length 3 */
  1889. if (anum[1] == bnum[1] && anum[2] == bnum[2]) {
  1890. goto pop_next;
  1891. } else {
  1892. goto not_equal;
  1893. }
  1894. }
  1895. common_len = alen;
  1896. if (blen < alen)
  1897. common_len = blen;
  1898. for (i = 1; i < common_len; i++)
  1899. if (anum[i] != bnum[i])
  1900. goto not_equal;
  1901. if(alen != blen) {
  1902. if (alen > blen) {
  1903. for (i = common_len; i < alen; i++)
  1904. if (anum[i] != 0)
  1905. goto not_equal;
  1906. }
  1907. else {
  1908. for (i = common_len; i < blen; i++)
  1909. if (bnum[i] != 0)
  1910. goto not_equal;
  1911. }
  1912. }
  1913. goto pop_next;
  1914. }
  1915. case POS_BIG_SUBTAG:
  1916. case NEG_BIG_SUBTAG:
  1917. {
  1918. int i;
  1919. if (!is_big_rel(b,b_base))
  1920. goto not_equal;
  1921. aa = big_val_rel(a,a_base);
  1922. bb = big_val_rel(b,b_base);
  1923. if (*aa != *bb)
  1924. goto not_equal;
  1925. i = BIG_ARITY(aa);
  1926. while(i--) {
  1927. if (*++aa != *++bb)
  1928. goto not_equal;
  1929. }
  1930. goto pop_next;
  1931. }
  1932. case FLOAT_SUBTAG:
  1933. {
  1934. FloatDef af;
  1935. FloatDef bf;
  1936. if (is_float_rel(b,b_base)) {
  1937. GET_DOUBLE_REL(a, af, a_base);
  1938. GET_DOUBLE_REL(b, bf, b_base);
  1939. if (af.fd == bf.fd) goto pop_next;
  1940. }
  1941. break; /* not equal */
  1942. }
  1943. }
  1944. break;
  1945. }
  1946. }
  1947. goto not_equal;
  1948. term_array: /* arrays in 'aa' and 'bb', length in 'sz' */
  1949. ASSERT(sz != 0);
  1950. {
  1951. Eterm* ap = aa;
  1952. Eterm* bp = bb;
  1953. Sint i = sz;
  1954. for (;;) {
  1955. if (!is_same(*ap,a_base,*bp,b_base)) break;
  1956. if (--i == 0) goto pop_next;
  1957. ++ap;
  1958. ++bp;
  1959. }
  1960. a = *ap;
  1961. b = *bp;
  1962. if (is_both_immed(a,b)) {
  1963. goto not_equal;
  1964. }
  1965. if (i > 1) { /* push the rest */
  1966. WSTACK_PUSH3(stack, i-1, (UWord)(bp+1),
  1967. ((UWord)(ap+1)) | TAG_PRIMARY_HEADER);
  1968. /* We (ab)use TAG_PRIMARY_HEADER to recognize a term_array */
  1969. }
  1970. goto tailrecur_ne;
  1971. }
  1972. pop_next:
  1973. if (!WSTACK_ISEMPTY(stack)) {
  1974. UWord something = WSTACK_POP(stack);
  1975. if (primary_tag((Eterm) something) == TAG_PRIMARY_HEADER) { /* a term_array */
  1976. aa = (Eterm*) something;
  1977. bb = (Eterm*) WSTACK_POP(stack);
  1978. sz = WSTACK_POP(stack);
  1979. goto term_array;
  1980. }
  1981. a = something;
  1982. b = WSTACK_POP(stack);
  1983. goto tailrecur;
  1984. }
  1985. DESTROY_WSTACK(stack);
  1986. return 1;
  1987. not_equal:
  1988. DESTROY_WSTACK(stack);
  1989. return 0;
  1990. }
  1991. /*
  1992. * Lexically compare two strings of bytes (string s1 length l1 and s2 l2).
  1993. *
  1994. * s1 < s2 return -1
  1995. * s1 = s2 return 0
  1996. * s1 > s2 return +1
  1997. */
  1998. static int cmpbytes(byte *s1, int l1, byte *s2, int l2)
  1999. {
  2000. int i;
  2001. i = 0;
  2002. while((i < l1) && (i < l2)) {
  2003. if (s1[i] < s2[i]) return(-1);
  2004. if (s1[i] > s2[i]) return(1);
  2005. i++;
  2006. }
  2007. if (l1 < l2) return(-1);
  2008. if (l1 > l2) return(1);
  2009. return(0);
  2010. }
  2011. /*
  2012. * Compare objects.
  2013. * Returns 0 if equal, a negative value if a < b, or a positive number a > b.
  2014. *
  2015. * According to the Erlang Standard, types are orderered as follows:
  2016. * numbers < (characters) < atoms < refs < funs < ports < pids <
  2017. * tuples < [] < conses < binaries.
  2018. *
  2019. * Note that characters are currently not implemented.
  2020. *
  2021. */
  2022. #define float_comp(x,y) (((x)<(y)) ? -1 : (((x)==(y)) ? 0 : 1))
  2023. static int cmp_atoms(Eterm a, Eterm b)
  2024. {
  2025. Atom *aa = atom_tab(atom_val(a));
  2026. Atom *bb = atom_tab(atom_val(b));
  2027. int diff = aa->ord0 - bb->ord0;
  2028. if (diff)
  2029. return diff;
  2030. return cmpbytes(aa->name+3, aa->len-3,
  2031. bb->name+3, bb->len-3);
  2032. }
  2033. #if HALFWORD_HEAP
  2034. Sint cmp_rel(Eterm a, Eterm* a_base, Eterm b, Eterm* b_base)
  2035. #else
  2036. Sint cmp(Eterm a, Eterm b)
  2037. #endif
  2038. {
  2039. DECLARE_WSTACK(stack);
  2040. Eterm* aa;
  2041. Eterm* bb;
  2042. int i;
  2043. Sint j;
  2044. int a_tag;
  2045. int b_tag;
  2046. ErlNode *anode;
  2047. ErlNode *bnode;
  2048. Uint adata;
  2049. Uint bdata;
  2050. Uint alen;
  2051. Uint blen;
  2052. Uint32 *anum;
  2053. Uint32 *bnum;
  2054. #define RETURN_NEQ(cmp) { j=(cmp); ASSERT(j != 0); goto not_equal; }
  2055. #define ON_CMP_GOTO(cmp) if ((j=(cmp)) == 0) goto pop_next; else goto not_equal
  2056. #undef CMP_NODES
  2057. #define CMP_NODES(AN, BN) \
  2058. do { \
  2059. if((AN) != (BN)) { \
  2060. if((AN)->sysname != (BN)->sysname) \
  2061. RETURN_NEQ(cmp_atoms((AN)->sysname, (BN)->sysname)); \
  2062. ASSERT((AN)->creation != (BN)->creation); \
  2063. RETURN_NEQ(((AN)->creation < (BN)->creation) ? -1 : 1); \
  2064. } \
  2065. } while (0)
  2066. tailrecur:
  2067. if (is_same(a,a_base,b,b_base)) { /* Equal values or pointers. */
  2068. goto pop_next;
  2069. }
  2070. tailrecur_ne:
  2071. /* deal with majority (?) cases by brute-force */
  2072. if (is_atom(a)) {
  2073. if (is_atom(b)) {
  2074. ON_CMP_GOTO(cmp_atoms(a, b));
  2075. }
  2076. } else if (is_both_small(a, b)) {
  2077. ON_CMP_GOTO(signed_val(a) - signed_val(b));
  2078. }
  2079. /*
  2080. * Take care of cases where the types are the same.
  2081. */
  2082. a_tag = 42; /* Suppress warning */
  2083. switch (primary_tag(a)) {
  2084. case TAG_PRIMARY_IMMED1:
  2085. switch ((a & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
  2086. case (_TAG_IMMED1_PORT >> _TAG_PRIMARY_SIZE):
  2087. if (is_internal_port(b)) {
  2088. bnode = erts_this_node;
  2089. bdata = internal_port_data(b);
  2090. } else if (is_external_port_rel(b,b_base)) {
  2091. bnode = external_port_node_rel(b,b_base);
  2092. bdata = external_port_data_rel(b,b_base);
  2093. } else {
  2094. a_tag = PORT_DEF;
  2095. goto mixed_types;
  2096. }
  2097. anode = erts_this_node;
  2098. adata = internal_port_data(a);
  2099. port_common:
  2100. CMP_NODES(anode, bnode);
  2101. ON_CMP_GOTO((Sint)(adata - bdata));
  2102. case (_TAG_IMMED1_PID >> _TAG_PRIMARY_SIZE):
  2103. if (is_internal_pid(b)) {
  2104. bnode = erts_this_node;
  2105. bdata = internal_pid_data(b);
  2106. } else if (is_external_pid_rel(b,b_base)) {
  2107. bnode = external_pid_node_rel(b,b_base);
  2108. bdata = external_pid_data_rel(b,b_base);
  2109. } else {
  2110. a_tag = PID_DEF;
  2111. goto mixed_types;
  2112. }
  2113. anode = erts_this_node;
  2114. adata = internal_pid_data(a);
  2115. pid_common:
  2116. if (adata != bdata) {
  2117. RETURN_NEQ(adata < bdata ? -1 : 1);
  2118. }
  2119. CMP_NODES(anode, bnode);
  2120. goto pop_next;
  2121. case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
  2122. a_tag = SMALL_DEF;
  2123. goto mixed_types;
  2124. case (_TAG_IMMED1_IMMED2 >> _TAG_PRIMARY_SIZE): {
  2125. switch ((a & _TAG_IMMED2_MASK) >> _TAG_IMMED1_SIZE) {
  2126. case (_TAG_IMMED2_ATOM >> _TAG_IMMED1_SIZE):
  2127. a_tag = ATOM_DEF;
  2128. goto mixed_types;
  2129. case (_TAG_IMMED2_NIL >> _TAG_IMMED1_SIZE):
  2130. a_tag = NIL_DEF;
  2131. goto mixed_types;
  2132. }
  2133. }
  2134. }
  2135. case TAG_PRIMARY_LIST:
  2136. if (is_not_list(b)) {
  2137. a_tag = LIST_DEF;
  2138. goto mixed_types;
  2139. }
  2140. aa = list_val_rel(a,a_base);
  2141. bb = list_val_rel(b,b_base);
  2142. while (1) {
  2143. Eterm atmp = CAR(aa);
  2144. Eterm btmp = CAR(bb);
  2145. if (!is_same(atmp,a_base,btmp,b_base)) {
  2146. WSTACK_PUSH2(stack,(UWord) CDR(bb),(UWord) CDR(aa));
  2147. a = atmp;
  2148. b = btmp;
  2149. goto tailrecur_ne;
  2150. }
  2151. atmp = CDR(aa);
  2152. btmp = CDR(bb);
  2153. if (is_same(atmp,a_base,btmp,b_base)) {
  2154. goto pop_next;
  2155. }
  2156. if (is_not_list(atmp) || is_not_list(btmp)) {
  2157. a = atmp;
  2158. b = btmp;
  2159. goto tailrecur_ne;
  2160. }
  2161. aa = list_val_rel(atmp,a_base);
  2162. bb = list_val_rel(btmp,b_base);
  2163. }
  2164. case TAG_PRIMARY_BOXED:
  2165. {
  2166. Eterm ahdr = *boxed_val_rel(a,a_base);
  2167. switch ((ahdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
  2168. case (_TAG_HEADER_ARITYVAL >> _TAG_PRIMARY_SIZE):
  2169. if (!is_tuple_rel(b,b_base)) {
  2170. a_tag = TUPLE_DEF;
  2171. goto mixed_types;
  2172. }
  2173. aa = tuple_val_rel(a,a_base);
  2174. bb = tuple_val_rel(b,b_base);
  2175. /* compare the arities */
  2176. i = arityval(ahdr); /* get the arity*/
  2177. if (i != arityval(*bb)) {
  2178. RETURN_NEQ((int)(i - arityval(*bb)));
  2179. }
  2180. if (i == 0) {
  2181. goto pop_next;
  2182. }
  2183. ++aa;
  2184. ++bb;
  2185. goto term_array;
  2186. case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
  2187. if (!is_float_rel(b,b_base)) {
  2188. a_tag = FLOAT_DEF;
  2189. goto mixed_types;
  2190. } else {
  2191. FloatDef af;
  2192. FloatDef bf;
  2193. GET_DOUBLE_REL(a, af, a_base);
  2194. GET_DOUBLE_REL(b, bf, b_base);
  2195. ON_CMP_GOTO(float_comp(af.fd, bf.fd));
  2196. }
  2197. case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
  2198. case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
  2199. if (!is_big_rel(b,b_base)) {
  2200. a_tag = BIG_DEF;
  2201. goto mixed_types;
  2202. }
  2203. ON_CMP_GOTO(big_comp(rterm2wterm(a,a_base), rterm2wterm(b,b_base)));
  2204. case (_TAG_HEADER_EXPORT >> _TAG_PRIMARY_SIZE):
  2205. if (!is_export_rel(b,b_base)) {
  2206. a_tag = EXPORT_DEF;
  2207. goto mixed_types;
  2208. } else {
  2209. Export* a_exp = *((Export **) (export_val_rel(a,a_base) + 1));
  2210. Export* b_exp = *((Export **) (export_val_rel(b,b_base) + 1));
  2211. if ((j = cmp_atoms(a_exp->code[0], b_exp->code[0])) != 0) {
  2212. RETURN_NEQ(j);
  2213. }
  2214. if ((j = cmp_atoms(a_exp->code[1], b_exp->code[1])) != 0) {
  2215. RETURN_NEQ(j);
  2216. }
  2217. ON_CMP_GOTO((Sint) a_exp->code[2] - (Sint) b_exp->code[2]);
  2218. }
  2219. break;
  2220. case (_TAG_HEADER_FUN >> _TAG_PRIMARY_SIZE):
  2221. if (!is_fun_rel(b,b_base)) {
  2222. a_tag = FUN_DEF;
  2223. goto mixed_types;
  2224. } else {
  2225. ErlFunThing* f1 = (ErlFunThing *) fun_val_rel(a,a_base);
  2226. ErlFunThing* f2 = (ErlFunThing *) fun_val_rel(b,b_base);
  2227. Sint diff;
  2228. diff = cmpbytes(atom_tab(atom_val(f1->fe->module))->name,
  2229. atom_tab(atom_val(f1->fe->module))->len,
  2230. atom_tab(atom_val(f2->fe->module))->name,
  2231. atom_tab(atom_val(f2->fe->module))->len);
  2232. if (diff != 0) {
  2233. RETURN_NEQ(diff);
  2234. }
  2235. diff = f1->fe->old_index - f2->fe->old_index;
  2236. if (diff != 0) {
  2237. RETURN_NEQ(diff);
  2238. }
  2239. diff = f1->fe->old_uniq - f2->fe->old_uniq;
  2240. if (diff != 0) {
  2241. RETURN_NEQ(diff);
  2242. }
  2243. diff = f1->num_free - f2->num_free;
  2244. if (diff != 0) {
  2245. RETURN_NEQ(diff);
  2246. }
  2247. i = f1->num_free;
  2248. if (i == 0) goto pop_next;
  2249. aa = f1->env;
  2250. bb = f2->env;
  2251. goto term_array;
  2252. }
  2253. case (_TAG_HEADER_EXTERNAL_PID >> _TAG_PRIMARY_SIZE):
  2254. if (is_internal_pid(b)) {
  2255. bnode = erts_this_node;
  2256. bdata = internal_pid_data(b);
  2257. } else if (is_external_pid_rel(b,b_base)) {
  2258. bnode = external_pid_node_rel(b,b_base);
  2259. bdata = external_pid_data_rel(b,b_base);
  2260. } else {
  2261. a_tag = EXTERNAL_PID_DEF;
  2262. goto mixed_types;
  2263. }
  2264. anode = external_pid_node_rel(a,a_base);
  2265. adata = external_pid_data_rel(a,a_base);
  2266. goto pid_common;
  2267. case (_TAG_HEADER_EXTERNAL_PORT >> _TAG_PRIMARY_SIZE):
  2268. if (is_internal_port(b)) {
  2269. bnode = erts_this_node;
  2270. bdata = internal_port_data(b);
  2271. } else if (is_external_port_rel(b,b_base)) {
  2272. bnode = external_port_node_rel(b,b_base);
  2273. bdata = external_port_data_rel(b,b_base);
  2274. } else {
  2275. a_tag = EXTERNAL_PORT_DEF;
  2276. goto mixed_types;
  2277. }
  2278. anode = external_port_node_rel(a,a_base);
  2279. adata = external_port_data_rel(a,a_base);
  2280. goto port_common;
  2281. case (_TAG_HEADER_REF >> _TAG_PRIMARY_SIZE):
  2282. /*
  2283. * Note! When comparing refs we need to compare ref numbers
  2284. * (32-bit words), *not* ref data words.
  2285. */
  2286. if (is_internal_ref_rel(b,b_base)) {
  2287. RefThing* bthing = ref_thing_ptr_rel(b,b_base);
  2288. bnode = erts_this_node;
  2289. bnum = internal_thing_ref_numbers(bthing);
  2290. blen = internal_thing_ref_no_of_numbers(bthing);
  2291. } else if(is_external_ref_rel(b,b_base)) {
  2292. ExternalThing* bthing = external_thing_ptr_rel(b,b_base);
  2293. bnode = bthing->node;
  2294. bnum = external_thing_ref_numbers(bthing);
  2295. blen = external_thing_ref_no_of_numbers(bthing);
  2296. } else {
  2297. a_tag = REF_DEF;
  2298. goto mixed_types;
  2299. }
  2300. {
  2301. RefThing* athing = ref_thing_ptr_rel(a,a_base);
  2302. anode = erts_this_node;
  2303. anum = internal_thing_ref_numbers(athing);
  2304. alen = internal_thing_ref_no_of_numbers(athing);
  2305. }
  2306. ref_common:
  2307. CMP_NODES(anode, bnode);
  2308. ASSERT(alen > 0 && blen > 0);
  2309. if (alen != blen) {
  2310. if (alen > blen) {
  2311. do {
  2312. if (anum[alen - 1] != 0)
  2313. RETURN_NEQ(1);
  2314. alen--;
  2315. } while (alen > blen);
  2316. }
  2317. else {
  2318. do {
  2319. if (bnum[blen - 1] != 0)
  2320. RETURN_NEQ(-1);
  2321. blen--;
  2322. } while (alen < blen);
  2323. }
  2324. }
  2325. ASSERT(alen == blen);
  2326. for (i = (Sint) alen - 1; i >= 0; i--)
  2327. if (anum[i] != bnum[i])
  2328. RETURN_NEQ((Sint32) (anum[i] - bnum[i]));
  2329. goto pop_next;
  2330. case (_TAG_HEADER_EXTERNAL_REF >> _TAG_PRIMARY_SIZE):
  2331. if (is_internal_ref_rel(b,b_base)) {
  2332. RefThing* bthing = ref_thing_ptr_rel(b,b_base);
  2333. bnode = erts_this_node;
  2334. bnum = internal_thing_ref_numbers(bthing);
  2335. blen = internal_thing_ref_no_of_numbers(bthing);
  2336. } else if (is_external_ref_rel(b,b_base)) {
  2337. ExternalThing* bthing = external_thing_ptr_rel(b,b_base);
  2338. bnode = bthing->node;
  2339. bnum = external_thing_ref_numbers(bthing);
  2340. blen = external_thing_ref_no_of_numbers(bthing);
  2341. } else {
  2342. a_tag = EXTERNAL_REF_DEF;
  2343. goto mixed_types;
  2344. }
  2345. {
  2346. ExternalThing* athing = external_thing_ptr_rel(a,a_base);
  2347. anode = athing->node;
  2348. anum = external_thing_ref_numbers(athing);
  2349. alen = external_thing_ref_no_of_numbers(athing);
  2350. }
  2351. goto ref_common;
  2352. default:
  2353. /* Must be a binary */
  2354. ASSERT(is_binary_rel(a,a_base));
  2355. if (!is_binary_rel(b,b_base)) {
  2356. a_tag = BINARY_DEF;
  2357. goto mixed_types;
  2358. } else {
  2359. Uint a_size = binary_size_rel(a,a_base);
  2360. Uint b_size = binary_size_rel(b,b_base);
  2361. Uint a_bitsize;
  2362. Uint b_bitsize;
  2363. Uint a_bitoffs;
  2364. Uint b_bitoffs;
  2365. Uint min_size;
  2366. int cmp;
  2367. byte* a_ptr;
  2368. byte* b_ptr;
  2369. ERTS_GET_BINARY_BYTES_REL(a, a_ptr, a_bitoffs, a_bitsize, a_base);
  2370. ERTS_GET_BINARY_BYTES_REL(b, b_ptr, b_bitoffs, b_bitsize, b_base);
  2371. if ((a_bitsize | b_bitsize | a_bitoffs | b_bitoffs) == 0) {
  2372. min_size = (a_size < b_size) ? a_size : b_size;
  2373. if ((cmp = sys_memcmp(a_ptr, b_ptr, min_size)) != 0) {
  2374. RETURN_NEQ(cmp);
  2375. }
  2376. }
  2377. else {
  2378. a_size = (a_size << 3) + a_bitsize;
  2379. b_size = (b_size << 3) + b_bitsize;
  2380. min_size = (a_size < b_size) ? a_size : b_size;
  2381. if ((cmp = erts_cmp_bits(a_ptr,a_bitoffs,
  2382. b_ptr,b_bitoffs,min_size)) != 0) {
  2383. RETURN_NEQ(cmp);
  2384. }
  2385. }
  2386. ON_CMP_GOTO((Sint)(a_size - b_size));
  2387. }
  2388. }
  2389. }
  2390. }
  2391. /*
  2392. * Take care of the case that the tags are different.
  2393. */
  2394. mixed_types:
  2395. {
  2396. FloatDef f1, f2;
  2397. Eterm big;
  2398. #if HEAP_ON_C_STACK
  2399. Eterm big_buf[2]; /* If HEAP_ON_C_STACK */
  2400. #else
  2401. Eterm *big_buf = erts_get_scheduler_data()->cmp_tmp_heap;
  2402. #endif
  2403. #if HALFWORD_HEAP
  2404. Wterm aw = is_immed(a) ? a : rterm2wterm(a,a_base);
  2405. Wterm bw = is_immed(b) ? b : rterm2wterm(b,b_base);
  2406. #else
  2407. Eterm aw = a;
  2408. Eterm bw = b;
  2409. #endif
  2410. b_tag = tag_val_def(bw);
  2411. switch(_NUMBER_CODE(a_tag, b_tag)) {
  2412. case SMALL_BIG:
  2413. big = small_to_big(signed_val(a), big_buf);
  2414. j = big_comp(big, bw);
  2415. break;
  2416. case SMALL_FLOAT:
  2417. f1.fd = signed_val(a);
  2418. GET_DOUBLE(bw, f2);
  2419. j = float_comp(f1.fd, f2.fd);
  2420. break;
  2421. case BIG_SMALL:
  2422. big = small_to_big(signed_val(b), big_buf);
  2423. j = big_comp(aw, big);
  2424. break;
  2425. case BIG_FLOAT:
  2426. if (big_to_double(aw, &f1.fd) < 0) {
  2427. j = big_sign(a) ? -1 : 1;
  2428. } else {
  2429. GET_DOUBLE(bw, f2);
  2430. j = float_comp(f1.fd, f2.fd);
  2431. }
  2432. break;
  2433. case FLOAT_SMALL:
  2434. GET_DOUBLE(aw, f1);
  2435. f2.fd = signed_val(b);
  2436. j = float_comp(f1.fd, f2.fd);
  2437. break;
  2438. case FLOAT_BIG:
  2439. if (big_to_double(bw, &f2.fd) < 0) {
  2440. j = big_sign(b) ? 1 : -1;
  2441. } else {
  2442. GET_DOUBLE(aw, f1);
  2443. j = float_comp(f1.fd, f2.fd);
  2444. }
  2445. break;
  2446. default:
  2447. j = b_tag - a_tag;
  2448. }
  2449. }
  2450. if (j == 0) {
  2451. goto pop_next;
  2452. } else {
  2453. goto not_equal;
  2454. }
  2455. term_array: /* arrays in 'aa' and 'bb', length in 'i' */
  2456. ASSERT(i>0);
  2457. while (--i) {
  2458. a = *aa++;
  2459. b = *bb++;
  2460. if (a != b) {
  2461. if (is_atom(a) && is_atom(b)) {
  2462. if ((j = cmp_atoms(a, b)) != 0) {
  2463. goto not_equal;
  2464. }
  2465. } else if (is_both_small(a, b)) {
  2466. if ((j = signed_val(a)-signed_val(b)) != 0) {
  2467. goto not_equal;
  2468. }
  2469. } else {
  2470. /* (ab)Use TAG_PRIMARY_HEADER to recognize a term_array */
  2471. WSTACK_PUSH3(stack, i, (UWord)bb, (UWord)aa | TAG_PRIMARY_HEADER);
  2472. goto tailrecur_ne;
  2473. }
  2474. }
  2475. }
  2476. a = *aa;
  2477. b = *bb;
  2478. goto tailrecur;
  2479. pop_next:
  2480. if (!WSTACK_ISEMPTY(stack)) {
  2481. UWord something = WSTACK_POP(stack);
  2482. if (primary_tag((Eterm) something) == TAG_PRIMARY_HEADER) { /* a term_array */
  2483. aa = (Eterm*) something;
  2484. bb = (Eterm*) WSTACK_POP(stack);
  2485. i = WSTACK_POP(stack);
  2486. goto term_array;
  2487. }
  2488. a = (Eterm) something;
  2489. b = (Eterm) WSTACK_POP(stack);
  2490. goto tailrecur;
  2491. }
  2492. DESTROY_WSTACK(stack);
  2493. return 0;
  2494. not_equal:
  2495. DESTROY_ESTACK(stack);
  2496. return j;
  2497. #undef CMP_NODES
  2498. }
  2499. Eterm
  2500. store_external_or_ref_(Uint **hpp, ErlOffHeap* oh, Eterm ns)
  2501. {
  2502. Uint i;
  2503. Uint size;
  2504. Uint *from_hp;
  2505. Uint *to_hp = *hpp;
  2506. ASSERT(is_external(ns) || is_internal_ref(ns));
  2507. if(is_external(ns)) {
  2508. from_hp = external_val(ns);
  2509. size = thing_arityval(*from_hp) + 1;
  2510. *hpp += size;
  2511. for(i = 0; i < size; i++)
  2512. to_hp[i] = from_hp[i];
  2513. erts_refc_inc(&((ExternalThing *) to_hp)->node->refc, 2);
  2514. ((struct erl_off_heap_header*) to_hp)->next = oh->first;
  2515. oh->first = (struct erl_off_heap_header*) to_hp;
  2516. return make_external(to_hp);
  2517. }
  2518. /* Internal ref */
  2519. from_hp = internal_ref_val(ns);
  2520. size = thing_arityval(*from_hp) + 1;
  2521. *hpp += size;
  2522. for(i = 0; i < size; i++)
  2523. to_hp[i] = from_hp[i];
  2524. return make_internal_ref(to_hp);
  2525. }
  2526. Eterm
  2527. store_external_or_ref_in_proc_(Process *proc, Eterm ns)
  2528. {
  2529. Uint sz;
  2530. Uint *hp;
  2531. ASSERT(is_external(ns) || is_internal_ref(ns));
  2532. sz = NC_HEAP_SIZE(ns);
  2533. ASSERT(sz > 0);
  2534. hp = HAlloc(proc, sz);
  2535. return store_external_or_ref_(&hp, &MSO(proc), ns);
  2536. }
  2537. void bin_write(int to, void *to_arg, byte* buf, int sz)
  2538. {
  2539. int i;
  2540. for (i=0;i<sz;i++) {
  2541. if (IS_DIGIT(buf[i]))
  2542. erts_print(to, to_arg, "%d,", buf[i]);
  2543. else if (IS_PRINT(buf[i])) {
  2544. erts_print(to, to_arg, "%c,", buf[i]);
  2545. }
  2546. else
  2547. erts_print(to, to_arg, "%d,", buf[i]);
  2548. }
  2549. erts_putc(to, to_arg, '\n');
  2550. }
  2551. /* Fill buf with the contents of bytelist list
  2552. return number of chars in list or -1 for error */
  2553. int
  2554. intlist_to_buf(Eterm list, char *buf, int len)
  2555. {
  2556. Eterm* listptr;
  2557. int sz = 0;
  2558. if (is_nil(list))
  2559. return 0;
  2560. if (is_not_list(list))
  2561. return -1;
  2562. listptr = list_val(list);
  2563. while (sz < len) {
  2564. if (!is_byte(*listptr))
  2565. return -1;
  2566. buf[sz++] = unsigned_val(*listptr);
  2567. if (is_nil(*(listptr + 1)))
  2568. return(sz);
  2569. if (is_not_list(*(listptr + 1)))
  2570. return -1;
  2571. listptr = list_val(*(listptr + 1));
  2572. }
  2573. return -1; /* not enough space */
  2574. }
  2575. /*
  2576. ** Convert an integer to a byte list
  2577. ** return pointer to converted stuff (need not to be at start of buf!)
  2578. */
  2579. char* Sint_to_buf(Sint n, struct Sint_buf *buf)
  2580. {
  2581. char* p = &buf->s[sizeof(buf->s)-1];
  2582. int sign = 0;
  2583. *p-- = '\0'; /* null terminate */
  2584. if (n == 0)
  2585. *p-- = '0';
  2586. else if (n < 0) {
  2587. sign = 1;
  2588. n = -n;
  2589. }
  2590. while (n != 0) {
  2591. *p-- = (n % 10) + '0';
  2592. n /= 10;
  2593. }
  2594. if (sign)
  2595. *p-- = '-';
  2596. return p+1;
  2597. }
  2598. /* Build a list of integers in some safe memory area
  2599. ** Memory must be pre allocated prio call 2*len in size
  2600. ** hp is a pointer to the "heap" pointer on return
  2601. ** this pointer is updated to point after the list
  2602. */
  2603. Eterm
  2604. buf_to_intlist(Eterm** hpp, char *buf, int len, Eterm tail)
  2605. {
  2606. Eterm* hp = *hpp;
  2607. buf += (len-1);
  2608. while(len > 0) {
  2609. tail = CONS(hp, make_small((byte)*buf), tail);
  2610. hp += 2;
  2611. buf--;
  2612. len--;
  2613. }
  2614. *hpp = hp;
  2615. return tail;
  2616. }
  2617. /*
  2618. ** Write io list in to a buffer.
  2619. **
  2620. ** An iolist is defined as:
  2621. **
  2622. ** iohead ::= Binary
  2623. ** | Byte (i.e integer in range [0..255]
  2624. ** | iolist
  2625. ** ;
  2626. **
  2627. ** iotail ::= []
  2628. ** | Binary (added by tony)
  2629. ** | iolist
  2630. ** ;
  2631. **
  2632. ** iolist ::= []
  2633. ** | Binary
  2634. ** | [ iohead | iotail]
  2635. ** ;
  2636. **
  2637. ** Return remaining bytes in buffer on success
  2638. ** -1 on overflow
  2639. ** -2 on type error (including that result would not be a whole number of bytes)
  2640. */
  2641. int io_list_to_buf(Eterm obj, char* buf, int len)
  2642. {
  2643. Eterm* objp;
  2644. DECLARE_ESTACK(s);
  2645. goto L_again;
  2646. while (!ESTACK_ISEMPTY(s)) {
  2647. obj = ESTACK_POP(s);
  2648. L_again:
  2649. if (is_list(obj)) {
  2650. L_iter_list:
  2651. objp = list_val(obj);
  2652. obj = CAR(objp);
  2653. if (is_byte(obj)) {
  2654. if (len == 0) {
  2655. goto L_overflow;
  2656. }
  2657. *buf++ = unsigned_val(obj);
  2658. len--;
  2659. } else if (is_binary(obj)) {
  2660. byte* bptr;
  2661. size_t size = binary_size(obj);
  2662. Uint bitsize;
  2663. Uint bitoffs;
  2664. Uint num_bits;
  2665. if (len < size) {
  2666. goto L_overflow;
  2667. }
  2668. ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
  2669. if (bitsize != 0) {
  2670. goto L_type_error;
  2671. }
  2672. num_bits = 8*size;
  2673. copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits);
  2674. buf += size;
  2675. len -= size;
  2676. } else if (is_list(obj)) {
  2677. ESTACK_PUSH(s, CDR(objp));
  2678. goto L_iter_list; /* on head */
  2679. } else if (is_not_nil(obj)) {
  2680. goto L_type_error;
  2681. }
  2682. obj = CDR(objp);
  2683. if (is_list(obj)) {
  2684. goto L_iter_list; /* on tail */
  2685. } else if (is_binary(obj)) {
  2686. byte* bptr;
  2687. size_t size = binary_size(obj);
  2688. Uint bitsize;
  2689. Uint bitoffs;
  2690. Uint num_bits;
  2691. if (len < size) {
  2692. goto L_overflow;
  2693. }
  2694. ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
  2695. if (bitsize != 0) {
  2696. goto L_type_error;
  2697. }
  2698. num_bits = 8*size;
  2699. copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits);
  2700. buf += size;
  2701. len -= size;
  2702. } else if (is_not_nil(obj)) {
  2703. goto L_type_error;
  2704. }
  2705. } else if (is_binary(obj)) {
  2706. byte* bptr;
  2707. size_t size = binary_size(obj);
  2708. Uint bitsize;
  2709. Uint bitoffs;
  2710. Uint num_bits;
  2711. if (len < size) {
  2712. goto L_overflow;
  2713. }
  2714. ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
  2715. if (bitsize != 0) {
  2716. goto L_type_error;
  2717. }
  2718. num_bits = 8*size;
  2719. copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits);
  2720. buf += size;
  2721. len -= size;
  2722. } else if (is_not_nil(obj)) {
  2723. goto L_type_error;
  2724. }
  2725. }
  2726. DESTROY_ESTACK(s);
  2727. return len;
  2728. L_type_error:
  2729. DESTROY_ESTACK(s);
  2730. return -2;
  2731. L_overflow:
  2732. DESTROY_ESTACK(s);
  2733. return -1;
  2734. }
  2735. int io_list_len(Eterm obj)
  2736. {
  2737. Eterm* objp;
  2738. Sint len = 0;
  2739. DECLARE_ESTACK(s);
  2740. goto L_again;
  2741. while (!ESTACK_ISEMPTY(s)) {
  2742. obj = ESTACK_POP(s);
  2743. L_again:
  2744. if (is_list(obj)) {
  2745. L_iter_list:
  2746. objp = list_val(obj);
  2747. /* Head */
  2748. obj = CAR(objp);
  2749. if (is_byte(obj)) {
  2750. len++;
  2751. } else if (is_binary(obj) && binary_bitsize(obj) == 0) {
  2752. len += binary_size(obj);
  2753. } else if (is_list(obj)) {
  2754. ESTACK_PUSH(s, CDR(objp));
  2755. goto L_iter_list; /* on head */
  2756. } else if (is_not_nil(obj)) {
  2757. goto L_type_error;
  2758. }
  2759. /* Tail */
  2760. obj = CDR(objp);
  2761. if (is_list(obj))
  2762. goto L_iter_list; /* on tail */
  2763. else if (is_binary(obj) && binary_bitsize(obj) == 0) {
  2764. len += binary_size(obj);
  2765. } else if (is_not_nil(obj)) {
  2766. goto L_type_error;
  2767. }
  2768. } else if (is_binary(obj) && binary_bitsize(obj) == 0) { /* Tail was binary */
  2769. len += binary_size(obj);
  2770. } else if (is_not_nil(obj)) {
  2771. goto L_type_error;
  2772. }
  2773. }
  2774. DESTROY_ESTACK(s);
  2775. return len;
  2776. L_type_error:
  2777. DESTROY_ESTACK(s);
  2778. return -1;
  2779. }
  2780. /* return 0 if item is not a non-empty flat list of bytes */
  2781. int
  2782. is_string(Eterm list)
  2783. {
  2784. int len = 0;
  2785. while(is_list(list)) {
  2786. Eterm* consp = list_val(list);
  2787. Eterm hd = CAR(consp);
  2788. if (!is_byte(hd))
  2789. return 0;
  2790. len++;
  2791. list = CDR(consp);
  2792. }
  2793. if (is_nil(list))
  2794. return len;
  2795. return 0;
  2796. }
  2797. #ifdef ERTS_SMP
  2798. /*
  2799. * Process and Port timers in smp case
  2800. */
  2801. ERTS_SCHED_PREF_PRE_ALLOC_IMPL(ptimer_pre, ErtsSmpPTimer, 1000)
  2802. #define ERTS_PTMR_FLGS_ALLCD_SIZE \
  2803. 2
  2804. #define ERTS_PTMR_FLGS_ALLCD_MASK \
  2805. ((((Uint32) 1) << ERTS_PTMR_FLGS_ALLCD_SIZE) - 1)
  2806. #define ERTS_PTMR_FLGS_PREALLCD ((Uint32) 1)
  2807. #define ERTS_PTMR_FLGS_SLALLCD ((Uint32) 2)
  2808. #define ERTS_PTMR_FLGS_LLALLCD ((Uint32) 3)
  2809. #define ERTS_PTMR_FLG_CANCELLED (((Uint32) 1) << (ERTS_PTMR_FLGS_ALLCD_SIZE+0))
  2810. static void
  2811. init_ptimers(void)
  2812. {
  2813. init_ptimer_pre_alloc();
  2814. }
  2815. static ERTS_INLINE void
  2816. free_ptimer(ErtsSmpPTimer *ptimer)
  2817. {
  2818. switch (ptimer->timer.flags & ERTS_PTMR_FLGS_ALLCD_MASK) {
  2819. case ERTS_PTMR_FLGS_PREALLCD:
  2820. (void) ptimer_pre_free(ptimer);
  2821. break;
  2822. case ERTS_PTMR_FLGS_SLALLCD:
  2823. erts_free(ERTS_ALC_T_SL_PTIMER, (void *) ptimer);
  2824. break;
  2825. case ERTS_PTMR_FLGS_LLALLCD:
  2826. erts_free(ERTS_ALC_T_LL_PTIMER, (void *) ptimer);
  2827. break;
  2828. default:
  2829. erl_exit(ERTS_ABORT_EXIT,
  2830. "Internal error: Bad ptimer alloc type\n");
  2831. break;
  2832. }
  2833. }
  2834. /* Callback for process timeout cancelled */
  2835. static void
  2836. ptimer_cancelled(ErtsSmpPTimer *ptimer)
  2837. {
  2838. free_ptimer(ptimer);
  2839. }
  2840. /* Callback for process timeout */
  2841. static void
  2842. ptimer_timeout(ErtsSmpPTimer *ptimer)
  2843. {
  2844. if (is_internal_pid(ptimer->timer.id)) {
  2845. Process *p;
  2846. p = erts_pid2proc_opt(NULL,
  2847. 0,
  2848. ptimer->timer.id,
  2849. ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS,
  2850. ERTS_P2P_FLG_ALLOW_OTHER_X);
  2851. if (p) {
  2852. if (!p->is_exiting
  2853. && !(ptimer->timer.flags & ERTS_PTMR_FLG_CANCELLED)) {
  2854. ASSERT(*ptimer->timer.timer_ref == ptimer);
  2855. *ptimer->timer.timer_ref = NULL;
  2856. (*ptimer->timer.timeout_func)(p);
  2857. }
  2858. erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
  2859. }
  2860. }
  2861. else {
  2862. Port *p;
  2863. ASSERT(is_internal_port(ptimer->timer.id));
  2864. p = erts_id2port_sflgs(ptimer->timer.id,
  2865. NULL,
  2866. 0,
  2867. ERTS_PORT_SFLGS_DEAD);
  2868. if (p) {
  2869. if (!(ptimer->timer.flags & ERTS_PTMR_FLG_CANCELLED)) {
  2870. ASSERT(*ptimer->timer.timer_ref == ptimer);
  2871. *ptimer->timer.timer_ref = NULL;
  2872. (*ptimer->timer.timeout_func)(p);
  2873. }
  2874. erts_port_release(p);
  2875. }
  2876. }
  2877. free_ptimer(ptimer);
  2878. }
  2879. void
  2880. erts_create_smp_ptimer(ErtsSmpPTimer **timer_ref,
  2881. Eterm id,
  2882. ErlTimeoutProc timeout_func,
  2883. Uint timeout)
  2884. {
  2885. ErtsSmpPTimer *res = ptimer_pre_alloc();
  2886. if (res)
  2887. res->timer.flags = ERTS_PTMR_FLGS_PREALLCD;
  2888. else {
  2889. if (timeout < ERTS_ALC_MIN_LONG_LIVED_TIME) {
  2890. res = erts_alloc(ERTS_ALC_T_SL_PTIMER, sizeof(ErtsSmpPTimer));
  2891. res->timer.flags = ERTS_PTMR_FLGS_SLALLCD;
  2892. }
  2893. else {
  2894. res = erts_alloc(ERTS_ALC_T_LL_PTIMER, sizeof(ErtsSmpPTimer));
  2895. res->timer.flags = ERTS_PTMR_FLGS_LLALLCD;
  2896. }
  2897. }
  2898. res->timer.timeout_func = timeout_func;
  2899. res->timer.timer_ref = timer_ref;
  2900. res->timer.id = id;
  2901. res->timer.tm.active = 0; /* MUST be initalized */
  2902. ASSERT(!*timer_ref);
  2903. *timer_ref = res;
  2904. erts_set_timer(&res->timer.tm,
  2905. (ErlTimeoutProc) ptimer_timeout,
  2906. (ErlCancelProc) ptimer_cancelled,
  2907. (void*) res,
  2908. timeout);
  2909. }
  2910. void
  2911. erts_cancel_smp_ptimer(ErtsSmpPTimer *ptimer)
  2912. {
  2913. if (ptimer) {
  2914. ASSERT(*ptimer->timer.timer_ref == ptimer);
  2915. *ptimer->timer.timer_ref = NULL;
  2916. ptimer->timer.flags |= ERTS_PTMR_FLG_CANCELLED;
  2917. erts_cancel_timer(&ptimer->timer.tm);
  2918. }
  2919. }
  2920. #endif
  2921. static Sint trim_threshold;
  2922. static Sint top_pad;
  2923. static Sint mmap_threshold;
  2924. static Sint mmap_max;
  2925. Uint tot_bin_allocated;
  2926. void erts_init_utils(void)
  2927. {
  2928. #ifdef ERTS_SMP
  2929. init_ptimers();
  2930. #endif
  2931. }
  2932. void erts_init_utils_mem(void)
  2933. {
  2934. trim_threshold = -1;
  2935. top_pad = -1;
  2936. mmap_threshold = -1;
  2937. mmap_max = -1;
  2938. }
  2939. int
  2940. sys_alloc_opt(int opt, int value)
  2941. {
  2942. #if HAVE_MALLOPT
  2943. Sint m_opt;
  2944. Sint *curr_val;
  2945. switch(opt) {
  2946. case SYS_ALLOC_OPT_TRIM_THRESHOLD:
  2947. #ifdef M_TRIM_THRESHOLD
  2948. m_opt = M_TRIM_THRESHOLD;
  2949. curr_val = &trim_threshold;
  2950. break;
  2951. #else
  2952. return 0;
  2953. #endif
  2954. case SYS_ALLOC_OPT_TOP_PAD:
  2955. #ifdef M_TOP_PAD
  2956. m_opt = M_TOP_PAD;
  2957. curr_val = &top_pad;
  2958. break;
  2959. #else
  2960. return 0;
  2961. #endif
  2962. case SYS_ALLOC_OPT_MMAP_THRESHOLD:
  2963. #ifdef M_MMAP_THRESHOLD
  2964. m_opt = M_MMAP_THRESHOLD;
  2965. curr_val = &mmap_threshold;
  2966. break;
  2967. #else
  2968. return 0;
  2969. #endif
  2970. case SYS_ALLOC_OPT_MMAP_MAX:
  2971. #ifdef M_MMAP_MAX
  2972. m_opt = M_MMAP_MAX;
  2973. curr_val = &mmap_max;
  2974. break;
  2975. #else
  2976. return 0;
  2977. #endif
  2978. default:
  2979. return 0;
  2980. }
  2981. if(mallopt(m_opt, value)) {
  2982. *curr_val = (Sint) value;
  2983. return 1;
  2984. }
  2985. #endif /* #if HAVE_MALLOPT */
  2986. return 0;
  2987. }
  2988. void
  2989. sys_alloc_stat(SysAllocStat *sasp)
  2990. {
  2991. sasp->trim_threshold = trim_threshold;
  2992. sasp->top_pad = top_pad;
  2993. sasp->mmap_threshold = mmap_threshold;
  2994. sasp->mmap_max = mmap_max;
  2995. }
  2996. #ifdef ERTS_SMP
  2997. /* Local system block state */
  2998. struct {
  2999. int emergency;
  3000. long emergency_timeout;
  3001. erts_smp_cnd_t watchdog_cnd;
  3002. erts_smp_tid_t watchdog_tid;
  3003. int threads_to_block;
  3004. int have_blocker;
  3005. erts_smp_tid_t blocker_tid;
  3006. int recursive_block;
  3007. Uint32 allowed_activities;
  3008. erts_smp_tsd_key_t blockable_key;
  3009. erts_smp_mtx_t mtx;
  3010. erts_smp_cnd_t cnd;
  3011. #ifdef ERTS_ENABLE_LOCK_CHECK
  3012. int activity_changing;
  3013. int checking;
  3014. #endif
  3015. } system_block_state;
  3016. /* Global system block state */
  3017. erts_system_block_state_t erts_system_block_state;
  3018. static ERTS_INLINE int
  3019. is_blockable_thread(void)
  3020. {
  3021. return erts_smp_tsd_get(system_block_state.blockable_key) != NULL;
  3022. }
  3023. static ERTS_INLINE int
  3024. is_blocker(void)
  3025. {
  3026. return (system_block_state.have_blocker
  3027. && erts_smp_equal_tids(system_block_state.blocker_tid,
  3028. erts_smp_thr_self()));
  3029. }
  3030. #ifdef ERTS_ENABLE_LOCK_CHECK
  3031. int
  3032. erts_lc_is_blocking(void)
  3033. {
  3034. int res;
  3035. erts_smp_mtx_lock(&system_block_state.mtx);
  3036. res = erts_smp_pending_system_block() && is_blocker();
  3037. erts_smp_mtx_unlock(&system_block_state.mtx);
  3038. return res;
  3039. }
  3040. #endif
  3041. static ERTS_INLINE void
  3042. block_me(void (*prepare)(void *),
  3043. void (*resume)(void *),
  3044. void *arg,
  3045. int mtx_locked,
  3046. int want_to_block,
  3047. int update_act_changing,
  3048. profile_sched_msg_q *psmq)
  3049. {
  3050. if (prepare)
  3051. (*prepare)(arg);
  3052. /* Locks might be held... */
  3053. if (!mtx_locked)
  3054. erts_smp_mtx_lock(&system_block_state.mtx);
  3055. if (erts_smp_pending_system_block() && !is_blocker()) {
  3056. int is_blockable = is_blockable_thread();
  3057. ASSERT(is_blockable);
  3058. if (is_blockable)
  3059. system_block_state.threads_to_block--;
  3060. if (erts_system_profile_flags.scheduler && psmq) {
  3061. ErtsSchedulerData *esdp = erts_get_scheduler_data();
  3062. if (esdp) {
  3063. profile_sched_msg *msg = NULL;
  3064. ASSERT(psmq->n < 2);
  3065. msg = &((psmq->msg)[psmq->n]);
  3066. msg->scheduler_id = esdp->no;
  3067. get_now(&(msg->Ms), &(msg->s), &(msg->us));
  3068. msg->no_schedulers = 0;
  3069. msg->state = am_inactive;
  3070. psmq->n++;
  3071. }
  3072. }
  3073. #ifdef ERTS_ENABLE_LOCK_CHECK
  3074. if (update_act_changing)
  3075. system_block_state.activity_changing--;
  3076. #endif
  3077. erts_smp_cnd_broadcast(&system_block_state.cnd);
  3078. do {
  3079. erts_smp_cnd_wait(&system_block_state.cnd, &system_block_state.mtx);
  3080. } while (erts_smp_pending_system_block()
  3081. && !(want_to_block && !system_block_state.have_blocker));
  3082. #ifdef ERTS_ENABLE_LOCK_CHECK
  3083. if (update_act_changing)
  3084. system_block_state.activity_changing++;
  3085. #endif
  3086. if (erts_system_profile_flags.scheduler && psmq) {
  3087. ErtsSchedulerData *esdp = erts_get_scheduler_data();
  3088. if (esdp) {
  3089. profile_sched_msg *msg = NULL;
  3090. ASSERT(psmq->n < 2);
  3091. msg = &((psmq->msg)[psmq->n]);
  3092. msg->scheduler_id = esdp->no;
  3093. get_now(&(msg->Ms), &(msg->s), &(msg->us));
  3094. msg->no_schedulers = 0;
  3095. msg->state = am_active;
  3096. psmq->n++;
  3097. }
  3098. }
  3099. if (is_blockable)
  3100. system_block_state.threads_to_block++;
  3101. }
  3102. if (!mtx_locked)
  3103. erts_smp_mtx_unlock(&system_block_state.mtx);
  3104. if (resume)
  3105. (*resume)(arg);
  3106. }
  3107. void
  3108. erts_block_me(void (*prepare)(void *),
  3109. void (*resume)(void *),
  3110. void *arg)
  3111. {
  3112. profile_sched_msg_q psmq;
  3113. psmq.n = 0;
  3114. if (prepare)
  3115. (*prepare)(arg);
  3116. #ifdef ERTS_ENABLE_LOCK_CHECK
  3117. erts_lc_check_exact(NULL, 0); /* No locks should be locked */
  3118. #endif
  3119. block_me(NULL, NULL, NULL, 0, 0, 0, &psmq);
  3120. if (erts_system_profile_flags.scheduler && psmq.n > 0)
  3121. dispatch_profile_msg_q(&psmq);
  3122. if (resume)
  3123. (*resume)(arg);
  3124. }
  3125. void
  3126. erts_register_blockable_thread(void)
  3127. {
  3128. profile_sched_msg_q psmq;
  3129. psmq.n = 0;
  3130. if (!is_blockable_thread()) {
  3131. erts_smp_mtx_lock(&system_block_state.mtx);
  3132. system_block_state.threads_to_block++;
  3133. erts_smp_tsd_set(system_block_state.blockable_key,
  3134. (void *) &erts_system_block_state);
  3135. /* Someone might be waiting for us to block... */
  3136. if (erts_smp_pending_system_block())
  3137. block_me(NULL, NULL, NULL, 1, 0, 0, &psmq);
  3138. erts_smp_mtx_unlock(&system_block_state.mtx);
  3139. if (erts_system_profile_flags.scheduler && psmq.n > 0)
  3140. dispatch_profile_msg_q(&psmq);
  3141. }
  3142. }
  3143. void
  3144. erts_unregister_blockable_thread(void)
  3145. {
  3146. if (is_blockable_thread()) {
  3147. erts_smp_mtx_lock(&system_block_state.mtx);
  3148. system_block_state.threads_to_block--;
  3149. ASSERT(system_block_state.threads_to_block >= 0);
  3150. erts_smp_tsd_set(system_block_state.blockable_key, NULL);
  3151. /* Someone might be waiting for us to block... */
  3152. if (erts_smp_pending_system_block())
  3153. erts_smp_cnd_broadcast(&system_block_state.cnd);
  3154. erts_smp_mtx_unlock(&system_block_state.mtx);
  3155. }
  3156. }
  3157. void
  3158. erts_note_activity_begin(erts_activity_t activity)
  3159. {
  3160. erts_smp_mtx_lock(&system_block_state.mtx);
  3161. if (erts_smp_pending_system_block()) {
  3162. Uint32 broadcast = 0;
  3163. switch (activity) {
  3164. case ERTS_ACTIVITY_GC:
  3165. broadcast = (system_block_state.allowed_activities
  3166. & ERTS_BS_FLG_ALLOW_GC);
  3167. break;
  3168. case ERTS_ACTIVITY_IO:
  3169. broadcast = (system_block_state.allowed_activities
  3170. & ERTS_BS_FLG_ALLOW_IO);
  3171. break;
  3172. case ERTS_ACTIVITY_WAIT:
  3173. broadcast = 1;
  3174. break;
  3175. default:
  3176. abort();
  3177. break;
  3178. }
  3179. if (broadcast)
  3180. erts_smp_cnd_broadcast(&system_block_state.cnd);
  3181. }
  3182. erts_smp_mtx_unlock(&system_block_state.mtx);
  3183. }
  3184. void
  3185. erts_check_block(erts_activity_t old_activity,
  3186. erts_activity_t new_activity,
  3187. int locked,
  3188. void (*prepare)(void *),
  3189. void (*resume)(void *),
  3190. void *arg)
  3191. {
  3192. int do_block;
  3193. profile_sched_msg_q psmq;
  3194. psmq.n = 0;
  3195. if (!locked && prepare)
  3196. (*prepare)(arg);
  3197. erts_smp_mtx_lock(&system_block_state.mtx);
  3198. /* First check if it is ok to block... */
  3199. if (!locked)
  3200. do_block = 1;
  3201. else {
  3202. switch (old_activity) {
  3203. case ERTS_ACTIVITY_UNDEFINED:
  3204. do_block = 0;
  3205. break;
  3206. case ERTS_ACTIVITY_GC:
  3207. do_block = (system_block_state.allowed_activities
  3208. & ERTS_BS_FLG_ALLOW_GC);
  3209. break;
  3210. case ERTS_ACTIVITY_IO:
  3211. do_block = (system_block_state.allowed_activities
  3212. & ERTS_BS_FLG_ALLOW_IO);
  3213. break;
  3214. case ERTS_ACTIVITY_WAIT:
  3215. /* You are not allowed to leave activity waiting
  3216. * without supplying the possibility to block
  3217. * unlocked.
  3218. */
  3219. erts_set_activity_error(ERTS_ACT_ERR_LEAVE_WAIT_UNLOCKED,
  3220. __FILE__, __LINE__);
  3221. do_block = 0;
  3222. break;
  3223. default:
  3224. erts_set_activity_error(ERTS_ACT_ERR_LEAVE_UNKNOWN_ACTIVITY,
  3225. __FILE__, __LINE__);
  3226. do_block = 0;
  3227. break;
  3228. }
  3229. }
  3230. if (do_block) {
  3231. /* ... then check if it is necessary to block... */
  3232. switch (new_activity) {
  3233. case ERTS_ACTIVITY_UNDEFINED:
  3234. do_block = 1;
  3235. break;
  3236. case ERTS_ACTIVITY_GC:
  3237. do_block = !(system_block_state.allowed_activities
  3238. & ERTS_BS_FLG_ALLOW_GC);
  3239. break;
  3240. case ERTS_ACTIVITY_IO:
  3241. do_block = !(system_block_state.allowed_activities
  3242. & ERTS_BS_FLG_ALLOW_IO);
  3243. break;
  3244. case ERTS_ACTIVITY_WAIT:
  3245. /* No need to block if we are going to wait */
  3246. do_block = 0;
  3247. break;
  3248. default:
  3249. erts_set_activity_error(ERTS_ACT_ERR_ENTER_UNKNOWN_ACTIVITY,
  3250. __FILE__, __LINE__);
  3251. break;
  3252. }
  3253. }
  3254. if (do_block) {
  3255. #ifdef ERTS_ENABLE_LOCK_CHECK
  3256. if (!locked) {
  3257. /* Only system_block_state.mtx should be held */
  3258. erts_lc_check_exact(&system_block_state.mtx.lc, 1);
  3259. }
  3260. #endif
  3261. block_me(NULL, NULL, NULL, 1, 0, 1, &psmq);
  3262. }
  3263. erts_smp_mtx_unlock(&system_block_state.mtx);
  3264. if (erts_system_profile_flags.scheduler && psmq.n > 0)
  3265. dispatch_profile_msg_q(&psmq);
  3266. if (!locked && resume)
  3267. (*resume)(arg);
  3268. }
  3269. void
  3270. erts_set_activity_error(erts_activity_error_t error, char *file, int line)
  3271. {
  3272. switch (error) {
  3273. case ERTS_ACT_ERR_LEAVE_WAIT_UNLOCKED:
  3274. erl_exit(1, "%s:%d: Fatal error: Leaving activity waiting without "
  3275. "supplying the possibility to block unlocked.",
  3276. file, line);
  3277. break;
  3278. case ERTS_ACT_ERR_LEAVE_UNKNOWN_ACTIVITY:
  3279. erl_exit(1, "%s:%d: Fatal error: Leaving unknown activity.",
  3280. file, line);
  3281. break;
  3282. case ERTS_ACT_ERR_ENTER_UNKNOWN_ACTIVITY:
  3283. erl_exit(1, "%s:%d: Fatal error: Leaving unknown activity.",
  3284. file, line);
  3285. break;
  3286. default:
  3287. erl_exit(1, "%s:%d: Internal error in erts_smp_set_activity()",
  3288. file, line);
  3289. break;
  3290. }
  3291. }
  3292. static ERTS_INLINE erts_aint32_t
  3293. threads_not_under_control(void)
  3294. {
  3295. erts_aint32_t res = system_block_state.threads_to_block;
  3296. /* Waiting is always an allowed activity... */
  3297. res -= erts_smp_atomic32_read(&erts_system_block_state.in_activity.wait);
  3298. if (system_block_state.allowed_activities & ERTS_BS_FLG_ALLOW_GC)
  3299. res -= erts_smp_atomic32_read(&erts_system_block_state.in_activity.gc);
  3300. if (system_block_state.allowed_activities & ERTS_BS_FLG_ALLOW_IO)
  3301. res -= erts_smp_atomic32_read(&erts_system_block_state.in_activity.io);
  3302. if (res < 0) {
  3303. ASSERT(0);
  3304. return 0;
  3305. }
  3306. return res;
  3307. }
  3308. /*
  3309. * erts_block_system() blocks all threads registered as blockable.
  3310. * It doesn't return until either all threads have blocked (0 is returned)
  3311. * or it has timed out (ETIMEDOUT) is returned.
  3312. *
  3313. * If allowed activities == 0, blocked threads will release all locks
  3314. * before blocking.
  3315. *
  3316. * If allowed_activities is != 0, erts_block_system() will allow blockable
  3317. * threads to continue executing as long as they are doing an allowed
  3318. * activity. When they are done with the allowed activity they will block,
  3319. * *but* they will block holding locks. Therefore, the thread calling
  3320. * erts_block_system() must *not* try to aquire any locks that might be
  3321. * held by blocked threads holding locks from allowed activities.
  3322. *
  3323. * Currently allowed_activities are:
  3324. * * ERTS_BS_FLG_ALLOW_GC Thread continues with garbage
  3325. * collection and blocks with
  3326. * main process lock on current
  3327. * process locked.
  3328. * * ERTS_BS_FLG_ALLOW_IO Thread continues with I/O
  3329. */
  3330. void
  3331. erts_block_system(Uint32 allowed_activities)
  3332. {
  3333. int do_block;
  3334. profile_sched_msg_q psmq;
  3335. psmq.n = 0;
  3336. #ifdef ERTS_ENABLE_LOCK_CHECK
  3337. erts_lc_check_exact(NULL, 0); /* No locks should be locked */
  3338. #endif
  3339. erts_smp_mtx_lock(&system_block_state.mtx);
  3340. do_block = erts_smp_pending_system_block();
  3341. if (do_block
  3342. && system_block_state.have_blocker
  3343. && erts_smp_equal_tids(system_block_state.blocker_tid,
  3344. erts_smp_thr_self())) {
  3345. ASSERT(system_block_state.recursive_block >= 0);
  3346. system_block_state.recursive_block++;
  3347. /* You are not allowed to restrict allowed activites
  3348. in a recursive block! */
  3349. ERTS_SMP_LC_ASSERT((system_block_state.allowed_activities
  3350. & ~allowed_activities) == 0);
  3351. }
  3352. else {
  3353. erts_smp_atomic32_inc(&erts_system_block_state.do_block);
  3354. /* Someone else might be waiting for us to block... */
  3355. if (do_block) {
  3356. do_block_me:
  3357. block_me(NULL, NULL, NULL, 1, 1, 0, &psmq);
  3358. }
  3359. ASSERT(!system_block_state.have_blocker);
  3360. system_block_state.have_blocker = 1;
  3361. system_block_state.blocker_tid = erts_smp_thr_self();
  3362. system_block_state.allowed_activities = allowed_activities;
  3363. if (is_blockable_thread())
  3364. system_block_state.threads_to_block--;
  3365. while (threads_not_under_control() && !system_block_state.emergency)
  3366. erts_smp_cnd_wait(&system_block_state.cnd, &system_block_state.mtx);
  3367. if (system_block_state.emergency) {
  3368. system_block_state.have_blocker = 0;
  3369. goto do_block_me;
  3370. }
  3371. }
  3372. erts_smp_mtx_unlock(&system_block_state.mtx);
  3373. if (erts_system_profile_flags.scheduler && psmq.n > 0 )
  3374. dispatch_profile_msg_q(&psmq);
  3375. }
  3376. /*
  3377. * erts_emergency_block_system() should only be called when we are
  3378. * about to write a crash dump...
  3379. */
  3380. int
  3381. erts_emergency_block_system(long timeout, Uint32 allowed_activities)
  3382. {
  3383. int res = 0;
  3384. long another_blocker;
  3385. erts_smp_mtx_lock(&system_block_state.mtx);
  3386. if (system_block_state.emergency) {
  3387. /* Argh... */
  3388. res = EINVAL;
  3389. goto done;
  3390. }
  3391. another_blocker = erts_smp_pending_system_block();
  3392. system_block_state.emergency = 1;
  3393. erts_smp_atomic32_inc(&erts_system_block_state.do_block);
  3394. if (another_blocker) {
  3395. if (is_blocker()) {
  3396. erts_smp_atomic32_dec(&erts_system_block_state.do_block);
  3397. res = 0;
  3398. goto done;
  3399. }
  3400. /* kick the other blocker */
  3401. erts_smp_cnd_broadcast(&system_block_state.cnd);
  3402. while (system_block_state.have_blocker)
  3403. erts_smp_cnd_wait(&system_block_state.cnd, &system_block_state.mtx);
  3404. }
  3405. ASSERT(!system_block_state.have_blocker);
  3406. system_block_state.have_blocker = 1;
  3407. system_block_state.blocker_tid = erts_smp_thr_self();
  3408. system_block_state.allowed_activities = allowed_activities;
  3409. if (is_blockable_thread())
  3410. system_block_state.threads_to_block--;
  3411. if (timeout < 0) {
  3412. while (threads_not_under_control())
  3413. erts_smp_cnd_wait(&system_block_state.cnd, &system_block_state.mtx);
  3414. }
  3415. else {
  3416. system_block_state.emergency_timeout = timeout;
  3417. erts_smp_cnd_signal(&system_block_state.watchdog_cnd);
  3418. while (system_block_state.emergency_timeout >= 0
  3419. && threads_not_under_control()) {
  3420. erts_smp_cnd_wait(&system_block_state.cnd,
  3421. &system_block_state.mtx);
  3422. }
  3423. }
  3424. done:
  3425. erts_smp_mtx_unlock(&system_block_state.mtx);
  3426. return res;
  3427. }
  3428. void
  3429. erts_release_system(void)
  3430. {
  3431. long do_block;
  3432. profile_sched_msg_q psmq;
  3433. psmq.n = 0;
  3434. #ifdef ERTS_ENABLE_LOCK_CHECK
  3435. erts_lc_check_exact(NULL, 0); /* No locks should be locked */
  3436. #endif
  3437. erts_smp_mtx_lock(&system_block_state.mtx);
  3438. ASSERT(is_blocker());
  3439. ASSERT(system_block_state.recursive_block >= 0);
  3440. if (system_block_state.recursive_block)
  3441. system_block_state.recursive_block--;
  3442. else {
  3443. do_block = erts_smp_atomic32_dectest(&erts_system_block_state.do_block);
  3444. system_block_state.have_blocker = 0;
  3445. if (is_blockable_thread())
  3446. system_block_state.threads_to_block++;
  3447. else
  3448. do_block = 0;
  3449. /* Someone else might be waiting for us to block... */
  3450. if (do_block)
  3451. block_me(NULL, NULL, NULL, 1, 0, 0, &psmq);
  3452. else
  3453. erts_smp_cnd_broadcast(&system_block_state.cnd);
  3454. }
  3455. erts_smp_mtx_unlock(&system_block_state.mtx);
  3456. if (erts_system_profile_flags.scheduler && psmq.n > 0)
  3457. dispatch_profile_msg_q(&psmq);
  3458. }
  3459. #ifdef ERTS_ENABLE_LOCK_CHECK
  3460. void
  3461. erts_lc_activity_change_begin(void)
  3462. {
  3463. erts_smp_mtx_lock(&system_block_state.mtx);
  3464. system_block_state.activity_changing++;
  3465. erts_smp_mtx_unlock(&system_block_state.mtx);
  3466. }
  3467. void
  3468. erts_lc_activity_change_end(void)
  3469. {
  3470. erts_smp_mtx_lock(&system_block_state.mtx);
  3471. system_block_state.activity_changing--;
  3472. if (system_block_state.checking && !system_block_state.activity_changing)
  3473. erts_smp_cnd_broadcast(&system_block_state.cnd);
  3474. erts_smp_mtx_unlock(&system_block_state.mtx);
  3475. }
  3476. #endif
  3477. int
  3478. erts_is_system_blocked(erts_activity_t allowed_activities)
  3479. {
  3480. int blkd;
  3481. erts_smp_mtx_lock(&system_block_state.mtx);
  3482. blkd = (erts_smp_pending_system_block()
  3483. && system_block_state.have_blocker
  3484. && erts_smp_equal_tids(system_block_state.blocker_tid,
  3485. erts_smp_thr_self())
  3486. && !(system_block_state.allowed_activities & ~allowed_activities));
  3487. #ifdef ERTS_ENABLE_LOCK_CHECK
  3488. if (blkd) {
  3489. system_block_state.checking = 1;
  3490. while (system_block_state.activity_changing)
  3491. erts_smp_cnd_wait(&system_block_state.cnd, &system_block_state.mtx);
  3492. system_block_state.checking = 0;
  3493. blkd = !threads_not_under_control();
  3494. }
  3495. #endif
  3496. erts_smp_mtx_unlock(&system_block_state.mtx);
  3497. return blkd;
  3498. }
  3499. static void *
  3500. emergency_watchdog(void *unused)
  3501. {
  3502. erts_smp_mtx_lock(&system_block_state.mtx);
  3503. while (1) {
  3504. long timeout;
  3505. while (system_block_state.emergency_timeout < 0)
  3506. erts_smp_cnd_wait(&system_block_state.watchdog_cnd, &system_block_state.mtx);
  3507. timeout = system_block_state.emergency_timeout;
  3508. erts_smp_mtx_unlock(&system_block_state.mtx);
  3509. if (erts_disable_tolerant_timeofday)
  3510. erts_milli_sleep(timeout);
  3511. else {
  3512. SysTimeval to;
  3513. erts_get_timeval(&to);
  3514. to.tv_sec += timeout / 1000;
  3515. to.tv_usec += timeout % 1000;
  3516. while (1) {
  3517. SysTimeval curr;
  3518. erts_milli_sleep(timeout);
  3519. erts_get_timeval(&curr);
  3520. if (curr.tv_sec > to.tv_sec
  3521. || (curr.tv_sec == to.tv_sec && curr.tv_usec >= to.tv_usec)) {
  3522. break;
  3523. }
  3524. timeout = (to.tv_sec - curr.tv_sec)*1000;
  3525. timeout += (to.tv_usec - curr.tv_usec)/1000;
  3526. }
  3527. }
  3528. erts_smp_mtx_lock(&system_block_state.mtx);
  3529. system_block_state.emergency_timeout = -1;
  3530. erts_smp_cnd_broadcast(&system_block_state.cnd);
  3531. }
  3532. erts_smp_mtx_unlock(&system_block_state.mtx);
  3533. return NULL;
  3534. }
  3535. void
  3536. erts_system_block_init(void)
  3537. {
  3538. erts_smp_thr_opts_t thr_opts = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
  3539. /* Local state... */
  3540. system_block_state.emergency = 0;
  3541. system_block_state.emergency_timeout = -1;
  3542. erts_smp_cnd_init(&system_block_state.watchdog_cnd);
  3543. system_block_state.threads_to_block = 0;
  3544. system_block_state.have_blocker = 0;
  3545. /* system_block_state.block_tid */
  3546. system_block_state.recursive_block = 0;
  3547. system_block_state.allowed_activities = 0;
  3548. erts_smp_tsd_key_create(&system_block_state.blockable_key);
  3549. erts_smp_mtx_init(&system_block_state.mtx, "system_block");
  3550. erts_smp_cnd_init(&system_block_state.cnd);
  3551. #ifdef ERTS_ENABLE_LOCK_CHECK
  3552. system_block_state.activity_changing = 0;
  3553. system_block_state.checking = 0;
  3554. #endif
  3555. thr_opts.suggested_stack_size = 8;
  3556. erts_smp_thr_create(&system_block_state.watchdog_tid,
  3557. emergency_watchdog,
  3558. NULL,
  3559. &thr_opts);
  3560. /* Global state... */
  3561. erts_smp_atomic32_init(&erts_system_block_state.do_block, 0);
  3562. erts_smp_atomic32_init(&erts_system_block_state.in_activity.wait, 0);
  3563. erts_smp_atomic32_init(&erts_system_block_state.in_activity.gc, 0);
  3564. erts_smp_atomic32_init(&erts_system_block_state.in_activity.io, 0);
  3565. /* Make sure blockable threads unregister when exiting... */
  3566. erts_smp_install_exit_handler(erts_unregister_blockable_thread);
  3567. }
  3568. #endif /* #ifdef ERTS_SMP */
  3569. char *
  3570. erts_read_env(char *key)
  3571. {
  3572. size_t value_len = 256;
  3573. char *value = erts_alloc(ERTS_ALC_T_TMP, value_len);
  3574. int res;
  3575. while (1) {
  3576. res = erts_sys_getenv(key, value, &value_len);
  3577. if (res <= 0)
  3578. break;
  3579. value = erts_realloc(ERTS_ALC_T_TMP, value, value_len);
  3580. }
  3581. if (res != 0) {
  3582. erts_free(ERTS_ALC_T_TMP, value);
  3583. return NULL;
  3584. }
  3585. return value;
  3586. }
  3587. void
  3588. erts_free_read_env(void *value)
  3589. {
  3590. if (value)
  3591. erts_free(ERTS_ALC_T_TMP, value);
  3592. }
  3593. int
  3594. erts_write_env(char *key, char *value)
  3595. {
  3596. int ix, res;
  3597. size_t key_len = sys_strlen(key), value_len = sys_strlen(value);
  3598. char *key_value = erts_alloc_fnf(ERTS_ALC_T_TMP,
  3599. key_len + 1 + value_len + 1);
  3600. if (!key_value) {
  3601. errno = ENOMEM;
  3602. return -1;
  3603. }
  3604. sys_memcpy((void *) key_value, (void *) key, key_len);
  3605. ix = key_len;
  3606. key_value[ix++] = '=';
  3607. sys_memcpy((void *) key_value, (void *) value, value_len);
  3608. ix += value_len;
  3609. key_value[ix] = '\0';
  3610. res = erts_sys_putenv(key_value, key_len);
  3611. erts_free(ERTS_ALC_T_TMP, key_value);
  3612. return res;
  3613. }
  3614. /*
  3615. * To be used to silence unused result warnings, but do not abuse it.
  3616. */
  3617. void erts_silence_warn_unused_result(long unused)
  3618. {
  3619. }
  3620. #ifdef DEBUG
  3621. /*
  3622. * Handy functions when using a debugger - don't use in the code!
  3623. */
  3624. void upp(buf,sz)
  3625. byte* buf;
  3626. int sz;
  3627. {
  3628. bin_write(ERTS_PRINT_STDERR,NULL,buf,sz);
  3629. }
  3630. void pat(Eterm atom)
  3631. {
  3632. upp(atom_tab(atom_val(atom))->name,
  3633. atom_tab(atom_val(atom))->len);
  3634. }
  3635. void pinfo()
  3636. {
  3637. process_info(ERTS_PRINT_STDOUT, NULL);
  3638. }
  3639. void pp(p)
  3640. Process *p;
  3641. {
  3642. if(p)
  3643. print_process_info(ERTS_PRINT_STDERR, NULL, p);
  3644. }
  3645. void ppi(Eterm pid)
  3646. {
  3647. pp(erts_pid2proc_unlocked(pid));
  3648. }
  3649. void td(Eterm x)
  3650. {
  3651. erts_fprintf(stderr, "%T\n", x);
  3652. }
  3653. void
  3654. ps(Process* p, Eterm* stop)
  3655. {
  3656. Eterm* sp = STACK_START(p) - 1;
  3657. if (stop <= STACK_END(p)) {
  3658. stop = STACK_END(p) + 1;
  3659. }
  3660. while(sp >= stop) {
  3661. erts_printf("%p: %.75T\n", sp, *sp);
  3662. sp--;
  3663. }
  3664. }
  3665. #endif