PageRenderTime 87ms CodeModel.GetById 21ms RepoModel.GetById 0ms app.codeStats 1ms

/erts/emulator/beam/beam_emu.c

https://github.com/system/erlang-otp
C | 5921 lines | 4453 code | 636 blank | 832 comment | 852 complexity | a1d0c5b6e5b28b84d7bfa68f8386ca83 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, LGPL-2.1
  1. /* ``The contents of this file are subject to the Erlang Public License,
  2. * Version 1.1, (the "License"); you may not use this file except in
  3. * compliance with the License. You should have received a copy of the
  4. * Erlang Public License along with this software. If not, it can be
  5. * retrieved via the world wide web at http://www.erlang.org/.
  6. *
  7. * Software distributed under the License is distributed on an "AS IS"
  8. * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
  9. * the License for the specific language governing rights and limitations
  10. * under the License.
  11. *
  12. * The Initial Developer of the Original Code is Ericsson Utvecklings AB.
  13. * Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
  14. * AB. All Rights Reserved.''
  15. *
  16. * $Id$
  17. */
  18. #ifdef HAVE_CONFIG_H
  19. # include "config.h"
  20. #endif
  21. #include <stddef.h> /* offsetof() */
  22. #include "sys.h"
  23. #include "erl_vm.h"
  24. #include "global.h"
  25. #include "erl_process.h"
  26. #include "erl_nmgc.h"
  27. #include "error.h"
  28. #include "bif.h"
  29. #include "big.h"
  30. #include "beam_load.h"
  31. #include "erl_binary.h"
  32. #include "erl_bits.h"
  33. #include "dist.h"
  34. #include "beam_bp.h"
  35. #include "beam_catches.h"
  36. #ifdef HIPE
  37. #include "hipe_mode_switch.h"
  38. #include "hipe_bif1.h"
  39. #endif
  40. /* #define HARDDEBUG 1 */
  41. #if defined(NO_JUMP_TABLE)
  42. # define OpCase(OpCode) case op_##OpCode: lb_##OpCode
  43. # define CountCase(OpCode) case op_count_##OpCode
  44. # define OpCode(OpCode) ((Uint*)op_##OpCode)
  45. # define Goto(Rel) {Go = (int)(Rel); goto emulator_loop;}
  46. # define LabelAddr(Addr) &&##Addr
  47. #else
  48. # define OpCase(OpCode) lb_##OpCode
  49. # define CountCase(OpCode) lb_count_##OpCode
  50. # define Goto(Rel) goto *(Rel)
  51. # define LabelAddr(Label) &&Label
  52. # define OpCode(OpCode) (&&lb_##OpCode)
  53. #endif
  54. #ifdef ERTS_ENABLE_LOCK_CHECK
  55. # ifdef ERTS_SMP
  56. # define PROCESS_MAIN_CHK_LOCKS(P) \
  57. do { \
  58. if ((P)) \
  59. erts_proc_lc_chk_only_proc_main((P)); \
  60. else \
  61. erts_lc_check_exact(NULL, 0); \
  62. ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING); \
  63. } while (0)
  64. # define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
  65. if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN)
  66. # define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \
  67. if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN)
  68. # else
  69. # define ERTS_SMP_REQ_PROC_MAIN_LOCK(P)
  70. # define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P)
  71. # define PROCESS_MAIN_CHK_LOCKS(P) erts_lc_check_exact(NULL, 0)
  72. # endif
  73. #else
  74. # define PROCESS_MAIN_CHK_LOCKS(P)
  75. # define ERTS_SMP_REQ_PROC_MAIN_LOCK(P)
  76. # define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P)
  77. #endif
  78. /*
  79. * Shallow copy to heap if possible; otherwise,
  80. * move to heap via garbage collection.
  81. */
  82. #define MV_MSG_MBUF_INTO_PROC(M) \
  83. do { \
  84. if ((M)->bp) { \
  85. Uint need = (M)->bp->size; \
  86. if (E - HTOP >= need) { \
  87. Uint *htop = HTOP; \
  88. erts_move_msg_mbuf_to_heap(&htop, &MSO(c_p), (M)); \
  89. ASSERT(htop - HTOP == need); \
  90. HTOP = htop; \
  91. } \
  92. else { \
  93. SWAPOUT; \
  94. reg[0] = r(0); \
  95. PROCESS_MAIN_CHK_LOCKS(c_p); \
  96. FCALLS -= erts_garbage_collect(c_p, 0, NULL, 0); \
  97. PROCESS_MAIN_CHK_LOCKS(c_p); \
  98. r(0) = reg[0]; \
  99. SWAPIN; \
  100. ASSERT(!(M)->bp); \
  101. } \
  102. } \
  103. ASSERT(!(M)->bp); \
  104. } while (0)
  105. /*
  106. * Define macros for deep checking of terms.
  107. */
  108. #if defined(HARDDEBUG)
  109. # define CHECK_TERM(T) size_object(T)
  110. # define CHECK_ARGS(PC) \
  111. do { \
  112. int i_; \
  113. int Arity_ = PC[-1]; \
  114. if (Arity_ > 0) { \
  115. CHECK_TERM(r(0)); \
  116. } \
  117. for (i_ = 1; i_ < Arity_; i_++) { \
  118. CHECK_TERM(x(i_)); \
  119. } \
  120. } while (0)
  121. #else
  122. # define CHECK_TERM(T) ASSERT(!is_CP(T))
  123. # define CHECK_ARGS(T)
  124. #endif
  125. #ifndef MAX
  126. #define MAX(x, y) (((x) > (y)) ? (x) : (y))
  127. #endif
  128. #define GET_BIF_ADDRESS(p) ((BifFunction) (((Export *) p)->code[4]))
  129. /*
  130. * We reuse some of fields in the save area in the process structure.
  131. * This is safe to do, since this space is only activly used when
  132. * the process is switched out.
  133. */
  134. #define REDS_IN(p) ((p)->def_arg_reg[5])
  135. /*
  136. * Add a byte offset to a pointer to Eterm. This is useful when the
  137. * the loader has precalculated a byte offset.
  138. */
  139. #define ADD_BYTE_OFFSET(ptr, offset) \
  140. ((Eterm *) (((unsigned char *)ptr) + (offset)))
  141. /* We don't check the range if an ordinary switch is used */
  142. #ifdef NO_JUMP_TABLE
  143. #define VALID_INSTR(IP) (0 <= (int)(IP) && ((int)(IP) < (NUMBER_OF_OPCODES*2+10)))
  144. #else
  145. #define VALID_INSTR(IP) \
  146. ((Sint)LabelAddr(emulator_loop) <= (Sint)(IP) && \
  147. (Sint)(IP) < (Sint)LabelAddr(end_emulator_loop))
  148. #endif /* NO_JUMP_TABLE */
  149. #define SET_CP(p, ip) \
  150. ASSERT(VALID_INSTR(*(ip))); \
  151. (p)->cp = (ip)
  152. #define SET_I(ip) \
  153. ASSERT(VALID_INSTR(* (Eterm *)(ip))); \
  154. I = (ip)
  155. #define FetchArgs(S1, S2) tmp_arg1 = (S1); tmp_arg2 = (S2)
  156. /*
  157. * Store a result into a register given a destination descriptor.
  158. */
  159. #define StoreResult(Result, DestDesc) \
  160. do { \
  161. Eterm stb_reg; \
  162. stb_reg = (DestDesc); \
  163. CHECK_TERM(Result); \
  164. switch (beam_reg_tag(stb_reg)) { \
  165. case R_REG_DEF: \
  166. r(0) = (Result); break; \
  167. case X_REG_DEF: \
  168. xb(x_reg_offset(stb_reg)) = (Result); break; \
  169. default: \
  170. yb(y_reg_offset(stb_reg)) = (Result); break; \
  171. } \
  172. } while (0)
  173. #define StoreSimpleDest(Src, Dest) Dest = (Src)
  174. /*
  175. * Store a result into a register and execute the next instruction.
  176. * Dst points to the word with a destination descriptor, which MUST
  177. * be just before the next instruction.
  178. */
  179. #define StoreBifResult(Dst, Result) \
  180. do { \
  181. Eterm* stb_next; \
  182. Eterm stb_reg; \
  183. stb_reg = Arg(Dst); \
  184. I += (Dst) + 2; \
  185. stb_next = (Eterm *) *I; \
  186. CHECK_TERM(Result); \
  187. switch (beam_reg_tag(stb_reg)) { \
  188. case R_REG_DEF: \
  189. r(0) = (Result); Goto(stb_next); \
  190. case X_REG_DEF: \
  191. xb(x_reg_offset(stb_reg)) = (Result); Goto(stb_next); \
  192. default: \
  193. yb(y_reg_offset(stb_reg)) = (Result); Goto(stb_next); \
  194. } \
  195. } while (0)
  196. #define ClauseFail() goto lb_jump_f
  197. #define SAVE_CP(X) *(X) = make_cp(c_p->cp)
  198. #define RESTORE_CP(X) SET_CP(c_p, cp_val(*(X)))
  199. #define ISCATCHEND(instr) ((Eterm *) *(instr) == OpCode(catch_end_y))
  200. /*
  201. * Special Beam instructions.
  202. */
  203. Eterm beam_apply[2];
  204. Eterm beam_exit[1];
  205. Eterm* em_call_error_handler;
  206. Eterm* em_apply_bif;
  207. Eterm* em_call_traced_function;
  208. /* NOTE These should be the only variables containing trace instructions.
  209. ** Sometimes tests are form the instruction value, and sometimes
  210. ** for the refering variable (one of these), and rouge references
  211. ** will most likely cause chaos.
  212. */
  213. Eterm beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
  214. Eterm beam_return_trace[1]; /* OpCode(i_return_trace) */
  215. Eterm beam_exception_trace[1]; /* UGLY also OpCode(i_return_trace) */
  216. /*
  217. * All Beam instructions in numerical order.
  218. */
  219. #ifndef NO_JUMP_TABLE
  220. void** beam_ops;
  221. #endif
  222. #ifndef ERTS_SMP /* Not supported with smp emulator */
  223. extern int count_instructions;
  224. #endif
  225. #if defined(HYBRID)
  226. #define SWAPIN \
  227. g_htop = global_htop; \
  228. g_hend = global_hend; \
  229. HTOP = HEAP_TOP(c_p); \
  230. E = c_p->stop
  231. #define SWAPOUT \
  232. global_htop = g_htop; \
  233. global_hend = g_hend; \
  234. HEAP_TOP(c_p) = HTOP; \
  235. c_p->stop = E
  236. #else
  237. #define SWAPIN \
  238. HTOP = HEAP_TOP(c_p); \
  239. E = c_p->stop
  240. #define SWAPOUT \
  241. HEAP_TOP(c_p) = HTOP; \
  242. c_p->stop = E
  243. #endif
  244. #define PRE_BIF_SWAPOUT(P) \
  245. HEAP_TOP((P)) = HTOP; \
  246. (P)->stop = E; \
  247. PROCESS_MAIN_CHK_LOCKS((P)); \
  248. ERTS_SMP_UNREQ_PROC_MAIN_LOCK((P))
  249. #if defined(HYBRID)
  250. # define POST_BIF_GC_SWAPIN_0(_p, _res) \
  251. if ((_p)->mbuf) { \
  252. _res = erts_gc_after_bif_call((_p), (_res), NULL, 0); \
  253. } \
  254. SWAPIN
  255. # define POST_BIF_GC_SWAPIN(_p, _res, _regs, _arity) \
  256. if ((_p)->mbuf) { \
  257. _regs[0] = r(0); \
  258. _res = erts_gc_after_bif_call((_p), (_res), _regs, (_arity)); \
  259. r(0) = _regs[0]; \
  260. } \
  261. SWAPIN
  262. #else
  263. # define POST_BIF_GC_SWAPIN_0(_p, _res) \
  264. ERTS_SMP_REQ_PROC_MAIN_LOCK((_p)); \
  265. PROCESS_MAIN_CHK_LOCKS((_p)); \
  266. if ((_p)->mbuf) { \
  267. _res = erts_gc_after_bif_call((_p), (_res), NULL, 0); \
  268. E = (_p)->stop; \
  269. } \
  270. HTOP = HEAP_TOP((_p))
  271. # define POST_BIF_GC_SWAPIN(_p, _res, _regs, _arity) \
  272. ERTS_SMP_REQ_PROC_MAIN_LOCK((_p)); \
  273. PROCESS_MAIN_CHK_LOCKS((_p)); \
  274. if ((_p)->mbuf) { \
  275. _regs[0] = r(0); \
  276. _res = erts_gc_after_bif_call((_p), (_res), _regs, (_arity)); \
  277. r(0) = _regs[0]; \
  278. E = (_p)->stop; \
  279. } \
  280. HTOP = HEAP_TOP((_p))
  281. #endif
  282. #define SAVE_HTOP HEAP_TOP(c_p) = HTOP
  283. #define db(N) (N)
  284. #define tb(N) (N)
  285. #define xb(N) (*(Eterm *) (((unsigned char *)reg) + (N)))
  286. #define yb(N) (*(Eterm *) (((unsigned char *)E) + (N)))
  287. #define fb(N) (*(double *) (((unsigned char *)&(freg[0].fd)) + (N)))
  288. #define x(N) reg[N]
  289. #define y(N) E[N]
  290. #define r(N) x##N
  291. /*
  292. * Makes sure that there are StackNeed + HeapNeed + 1 words available
  293. * on the combined heap/stack segment, then allocates StackNeed + 1
  294. * words on the stack and saves CP.
  295. *
  296. * M is number of live registers to preserve during garbage collection
  297. */
  298. #define AH(StackNeed, HeapNeed, M) \
  299. do { \
  300. int needed; \
  301. needed = (StackNeed) + 1; \
  302. if (E - HTOP < (needed + (HeapNeed))) { \
  303. SWAPOUT; \
  304. reg[0] = r(0); \
  305. PROCESS_MAIN_CHK_LOCKS(c_p); \
  306. FCALLS -= erts_garbage_collect(c_p, needed + (HeapNeed), reg, (M)); \
  307. PROCESS_MAIN_CHK_LOCKS(c_p); \
  308. r(0) = reg[0]; \
  309. SWAPIN; \
  310. } \
  311. E -= needed; \
  312. SAVE_CP(E); \
  313. } while (0)
  314. #define Allocate(Ns, Live) AH(Ns, 0, Live)
  315. #define AllocateZero(Ns, Live) \
  316. do { Eterm* ptr; \
  317. int i = (Ns); \
  318. AH(i, 0, Live); \
  319. for (ptr = E + i; ptr > E; ptr--) { \
  320. make_blank(*ptr); \
  321. } \
  322. } while (0)
  323. #define AllocateHeap(Ns, Nh, Live) AH(Ns, Nh, Live)
  324. #define AllocateHeapZero(Ns, Nh, Live) \
  325. do { Eterm* ptr; \
  326. int i = (Ns); \
  327. AH(i, Nh, Live); \
  328. for (ptr = E + i; ptr > E; ptr--) { \
  329. make_blank(*ptr); \
  330. } \
  331. } while (0)
  332. #define AllocateInit(Ns, Live, Y) \
  333. do { AH(Ns, 0, Live); make_blank(Y); } while (0)
  334. /*
  335. * Like the AH macro, but allocates no additional heap space.
  336. */
  337. #define A(StackNeed, M) AH(StackNeed, 0, M)
  338. #define D(N) \
  339. RESTORE_CP(E); \
  340. E += (N) + 1;
  341. /*
  342. * Check if Nh words of heap are available; if not, do a garbage collection.
  343. * Live is number of active argument registers to be preserved.
  344. */
  345. #define TestHeap(Nh, Live) \
  346. do { \
  347. unsigned need = (Nh); \
  348. if (E - HTOP < need) { \
  349. SWAPOUT; \
  350. reg[0] = r(0); \
  351. PROCESS_MAIN_CHK_LOCKS(c_p); \
  352. FCALLS -= erts_garbage_collect(c_p, need, reg, (Live)); \
  353. PROCESS_MAIN_CHK_LOCKS(c_p); \
  354. r(0) = reg[0]; \
  355. SWAPIN; \
  356. } \
  357. } while (0)
  358. /*
  359. * Check if Nh words of heap are available; if not, do a garbage collection.
  360. * Live is number of active argument registers to be preserved.
  361. * Takes special care to preserve Extra if a garbage collection occurs.
  362. */
  363. #define TestHeapPreserve(Nh, Live, Extra) \
  364. do { \
  365. unsigned need = (Nh); \
  366. if (E - HTOP < need) { \
  367. SWAPOUT; \
  368. reg[0] = r(0); \
  369. reg[Live] = Extra; \
  370. PROCESS_MAIN_CHK_LOCKS(c_p); \
  371. FCALLS -= erts_garbage_collect(c_p, need, reg, (Live)+1); \
  372. PROCESS_MAIN_CHK_LOCKS(c_p); \
  373. if (Live > 0) { \
  374. r(0) = reg[0]; \
  375. } \
  376. Extra = reg[Live]; \
  377. SWAPIN; \
  378. } \
  379. } while (0)
  380. #ifdef HYBRID
  381. #ifdef INCREMENTAL
  382. #define TestGlobalHeap(Nh, Live, hp) \
  383. do { \
  384. unsigned need = (Nh); \
  385. ASSERT(global_heap <= g_htop && g_htop <= global_hend); \
  386. SWAPOUT; \
  387. reg[0] = r(0); \
  388. FCALLS -= need; \
  389. (hp) = IncAlloc(c_p,need,reg,(Live)); \
  390. r(0) = reg[0]; \
  391. SWAPIN; \
  392. } while (0)
  393. #else
  394. #define TestGlobalHeap(Nh, Live, hp) \
  395. do { \
  396. unsigned need = (Nh); \
  397. ASSERT(global_heap <= g_htop && g_htop <= global_hend); \
  398. if (g_hend - g_htop < need) { \
  399. SWAPOUT; \
  400. reg[0] = r(0); \
  401. FCALLS -= erts_global_garbage_collect(c_p, need, reg, (Live)); \
  402. r(0) = reg[0]; \
  403. SWAPIN; \
  404. } \
  405. (hp) = global_htop; \
  406. } while (0)
  407. #endif
  408. #endif /* HYBRID */
  409. #define Init(N) make_blank(yb(N))
  410. #define Init2(Y1, Y2) do { make_blank(Y1); make_blank(Y2); } while (0)
  411. #define Init3(Y1, Y2, Y3) \
  412. do { make_blank(Y1); make_blank(Y2); make_blank(Y3); } while (0)
  413. #define MakeFun(FunP, NumFree) \
  414. do { \
  415. SWAPOUT; \
  416. reg[0] = r(0); \
  417. r(0) = new_fun(c_p, reg, (ErlFunEntry *) FunP, NumFree); \
  418. SWAPIN; \
  419. } while (0)
  420. /*
  421. * Check that we haven't used the reductions and jump to function pointed to by
  422. * the I register. If we are out of reductions, do a context switch.
  423. */
  424. #define DispatchMacro() \
  425. do { \
  426. Eterm* dis_next; \
  427. dis_next = (Eterm *) *I; \
  428. CHECK_ARGS(I); \
  429. if (FCALLS > 0 || FCALLS > neg_o_reds) { \
  430. FCALLS--; \
  431. Goto(dis_next); \
  432. } else { \
  433. goto context_switch; \
  434. } \
  435. } while (0)
  436. #define DispatchMacroFun() \
  437. do { \
  438. Eterm* dis_next; \
  439. dis_next = (Eterm *) *I; \
  440. CHECK_ARGS(I); \
  441. if (FCALLS > 0 || FCALLS > neg_o_reds) { \
  442. FCALLS--; \
  443. Goto(dis_next); \
  444. } else { \
  445. goto context_switch_fun; \
  446. } \
  447. } while (0)
  448. #define DispatchMacrox() \
  449. do { \
  450. if (FCALLS > 0) { \
  451. Eterm* dis_next; \
  452. SET_I(((Export *) Arg(0))->address); \
  453. dis_next = (Eterm *) *I; \
  454. FCALLS--; \
  455. CHECK_ARGS(I); \
  456. Goto(dis_next); \
  457. } else if (c_p->ct != NULL && FCALLS > neg_o_reds) { \
  458. goto save_calls1; \
  459. } else { \
  460. SET_I(((Export *) Arg(0))->address); \
  461. CHECK_ARGS(I); \
  462. goto context_switch; \
  463. } \
  464. } while (0)
  465. #ifdef DEBUG
  466. /*
  467. * To simplify breakpoint setting, put the code in one place only and jump to it.
  468. */
  469. # define Dispatch() goto do_dispatch
  470. # define Dispatchx() goto do_dispatchx
  471. # define Dispatchfun() goto do_dispatchfun
  472. #else
  473. /*
  474. * Inline for speed.
  475. */
  476. # define Dispatch() DispatchMacro()
  477. # define Dispatchx() DispatchMacrox()
  478. # define Dispatchfun() DispatchMacroFun()
  479. #endif
  480. #define Self(R) R = c_p->id
  481. #define Node(R) R = erts_this_node->sysname
  482. #define Arg(N) I[(N)+1]
  483. #define Next(N) \
  484. I += (N) + 1; \
  485. ASSERT(VALID_INSTR(*I)); \
  486. Goto(*I)
  487. #define PreFetch(N, Dst) do { Dst = (Eterm *) *(I + N + 1); } while (0)
  488. #define NextPF(N, Dst) \
  489. I += N + 1; \
  490. ASSERT(VALID_INSTR(Dst)); \
  491. Goto(Dst)
  492. #define GetR(pos, tr) \
  493. do { \
  494. tr = Arg(pos); \
  495. switch (beam_reg_tag(tr)) { \
  496. case R_REG_DEF: tr = r(0); break; \
  497. case X_REG_DEF: tr = xb(x_reg_offset(tr)); break; \
  498. case Y_REG_DEF: ASSERT(y_reg_offset(tr) >= 1); tr = yb(y_reg_offset(tr)); break; \
  499. } \
  500. CHECK_TERM(tr); \
  501. } while (0)
  502. #define GetArg1(N, Dst) GetR((N), Dst)
  503. #define GetArg2(N, Dst1, Dst2) \
  504. do { \
  505. GetR(N, Dst1); \
  506. GetR((N)+1, Dst2); \
  507. } while (0)
  508. #define PutList(H, T, Dst, Store) \
  509. do { \
  510. HTOP[0] = (H); HTOP[1] = (T); \
  511. Store(make_list(HTOP), Dst); \
  512. HTOP += 2; \
  513. } while (0)
  514. #define Move(Src, Dst, Store) \
  515. do { \
  516. Eterm term = (Src); \
  517. Store(term, Dst); \
  518. } while (0)
  519. #define Move2(src1, dst1, src2, dst2) dst1 = (src1); dst2 = (src2)
  520. #define MoveGenDest(src, dstp) \
  521. if ((dstp) == NULL) { r(0) = (src); } else { *(dstp) = src; }
  522. #define MoveReturn(Src, Dest) \
  523. (Dest) = (Src); \
  524. I = c_p->cp; \
  525. ASSERT(VALID_INSTR(*c_p->cp)); \
  526. CHECK_TERM(r(0)); \
  527. Goto(*I)
  528. #define DeallocateReturn(Deallocate) \
  529. do { \
  530. int words_to_pop = (Deallocate); \
  531. SET_I(cp_val(*E)); \
  532. E = ADD_BYTE_OFFSET(E, words_to_pop); \
  533. CHECK_TERM(r(0)); \
  534. Goto(*I); \
  535. } while (0)
  536. #define MoveDeallocateReturn(Src, Dest, Deallocate) \
  537. (Dest) = (Src); \
  538. DeallocateReturn(Deallocate)
  539. #define MoveCall(Src, Dest, CallDest, Size) \
  540. (Dest) = (Src); \
  541. SET_CP(c_p, I+Size+1); \
  542. SET_I((Eterm *) CallDest); \
  543. Dispatch();
  544. #define MoveCallLast(Src, Dest, CallDest, Deallocate) \
  545. (Dest) = (Src); \
  546. RESTORE_CP(E); \
  547. E = ADD_BYTE_OFFSET(E, (Deallocate)); \
  548. SET_I((Eterm *) CallDest); \
  549. Dispatch();
  550. #define MoveCallOnly(Src, Dest, CallDest) \
  551. (Dest) = (Src); \
  552. SET_I((Eterm *) CallDest); \
  553. Dispatch();
  554. #define GetList(Src, H, T) do { \
  555. Eterm* tmp_ptr = list_val(Src); \
  556. H = CAR(tmp_ptr); \
  557. T = CDR(tmp_ptr); } while (0)
  558. #define GetTupleElement(Src, Element, Dest) \
  559. do { \
  560. tmp_arg1 = (Eterm) (((unsigned char *) tuple_val(Src)) + (Element)); \
  561. (Dest) = (*(Eterm *)tmp_arg1); \
  562. } while (0)
  563. #define ExtractNextElement(Dest) \
  564. tmp_arg1 += sizeof(Eterm); \
  565. (Dest) = (* (Eterm *) (((unsigned char *) tmp_arg1)))
  566. #define ExtractNextElement2(Dest) \
  567. do { \
  568. Eterm* ene_dstp = &(Dest); \
  569. ene_dstp[0] = ((Eterm *) tmp_arg1)[1]; \
  570. ene_dstp[1] = ((Eterm *) tmp_arg1)[2]; \
  571. tmp_arg1 += sizeof(Eterm) + sizeof(Eterm); \
  572. } while (0)
  573. #define ExtractNextElement3(Dest) \
  574. do { \
  575. Eterm* ene_dstp = &(Dest); \
  576. ene_dstp[0] = ((Eterm *) tmp_arg1)[1]; \
  577. ene_dstp[1] = ((Eterm *) tmp_arg1)[2]; \
  578. ene_dstp[2] = ((Eterm *) tmp_arg1)[3]; \
  579. tmp_arg1 += 3*sizeof(Eterm); \
  580. } while (0)
  581. #define ExtractNextElement4(Dest) \
  582. do { \
  583. Eterm* ene_dstp = &(Dest); \
  584. ene_dstp[0] = ((Eterm *) tmp_arg1)[1]; \
  585. ene_dstp[1] = ((Eterm *) tmp_arg1)[2]; \
  586. ene_dstp[2] = ((Eterm *) tmp_arg1)[3]; \
  587. ene_dstp[3] = ((Eterm *) tmp_arg1)[4]; \
  588. tmp_arg1 += 4*sizeof(Eterm); \
  589. } while (0)
  590. #define ExtractElement(Element, Dest) \
  591. do { \
  592. tmp_arg1 += (Element); \
  593. (Dest) = (* (Eterm *) tmp_arg1); \
  594. } while (0)
  595. #define PutTuple(Arity, Src, Dest) \
  596. ASSERT(is_arity_value(Arity)); \
  597. Dest = make_tuple(HTOP); \
  598. HTOP[0] = (Arity); \
  599. HTOP[1] = (Src); \
  600. HTOP += 2
  601. #define Put(Word) *HTOP++ = (Word)
  602. #define EqualImmed(X, Y, Action) if (X != Y) { Action; }
  603. #define IsFloat(Src, Fail) if (is_not_float(Src)) { Fail; }
  604. #define IsInteger(Src, Fail) if (is_not_integer(Src)) { Fail; }
  605. #define IsNumber(X, Fail) if (is_not_integer(X) && is_not_float(X)) { Fail; }
  606. #define IsConstant(X, Fail) if (is_list(X) || is_nil(X) || is_tuple(X)) { Fail; }
  607. #define IsAtom(Src, Fail) if (is_not_atom(Src)) { Fail; }
  608. #define IsIntegerAllocate(Src, Need, Alive, Fail) \
  609. if (is_not_integer(Src)) { Fail; } \
  610. A(Need, Alive)
  611. #define IsNil(Src, Fail) if (is_not_nil(Src)) { Fail; }
  612. #define IsList(Src, Fail) if (is_not_list(Src) && is_not_nil(Src)) { Fail; }
  613. #define IsNonemptyList(Src, Fail) if (is_not_list(Src)) { Fail; }
  614. #define IsNonemptyListAllocate(Src, Need, Alive, Fail) \
  615. if (is_not_list(Src)) { Fail; } \
  616. A(Need, Alive)
  617. #define IsNonemptyListTestHeap(Src, Need, Alive, Fail) \
  618. if (is_not_list(Src)) { Fail; } \
  619. TestHeap(Need, Alive)
  620. #define IsTuple(X, Action) if (is_not_tuple(X)) Action
  621. #define IsArity(Pointer, Arity, Fail) \
  622. if (*(Eterm *)(tmp_arg1 = (Eterm)tuple_val(Pointer)) != (Arity)) { Fail; }
  623. #define IsFunction(X, Action) \
  624. do { \
  625. if ( !(is_any_fun(X)) ) { \
  626. Action; \
  627. } \
  628. } while (0)
  629. #define IsFunction2(F, A, Action) \
  630. do { \
  631. if (is_function_2(c_p, F, A) != am_true ) {\
  632. Action; \
  633. } \
  634. } while (0)
  635. #define IsTupleOfArity(Src, Arity, Fail) \
  636. do { \
  637. if (is_not_tuple(Src) || *(Eterm *)(tmp_arg1 = (Eterm) tuple_val(Src)) != Arity) { \
  638. Fail; \
  639. } \
  640. } while (0)
  641. #define IsBoolean(X, Fail) if ((X) != am_true && (X) != am_false) { Fail; }
  642. #define IsBinary(Src, Fail) \
  643. if (is_not_binary(Src) || binary_bitsize(Src) != 0) { Fail; }
  644. #define IsBitstring(Src, Fail) \
  645. if (is_not_binary(Src)) { Fail; }
  646. #ifdef ARCH_64
  647. #define BsSafeMul(A, B, Fail, Target) \
  648. do { Uint64 _res = (A) * (B); \
  649. if (_res / B != A) { Fail; } \
  650. Target = _res; \
  651. } while (0)
  652. #else
  653. #define BsSafeMul(A, B, Fail, Target) \
  654. do { Uint64 _res = (Uint64)(A) * (Uint64)(B); \
  655. if ((_res >> (8*sizeof(Uint))) != 0) { Fail; } \
  656. Target = _res; \
  657. } while (0)
  658. #endif
  659. #define BsGetFieldSize(Bits, Unit, Fail, Target) \
  660. do { \
  661. Sint _signed_size; Uint _uint_size; \
  662. if (is_small(Bits)) { \
  663. _signed_size = signed_val(Bits); \
  664. if (_signed_size < 0) { Fail; } \
  665. _uint_size = (Uint) _signed_size; \
  666. } else { \
  667. if (!term_to_Uint(Bits, &temp_bits)) { Fail; } \
  668. _uint_size = temp_bits; \
  669. } \
  670. BsSafeMul(_uint_size, Unit, Fail, Target); \
  671. } while (0)
  672. #define BsGetUncheckedFieldSize(Bits, Unit, Fail, Target) \
  673. do { \
  674. Sint _signed_size; Uint _uint_size; \
  675. if (is_small(Bits)) { \
  676. _signed_size = signed_val(Bits); \
  677. if (_signed_size < 0) { Fail; } \
  678. _uint_size = (Uint) _signed_size; \
  679. } else { \
  680. if (!term_to_Uint(Bits, &temp_bits)) { Fail; } \
  681. _uint_size = (Uint) temp_bits; \
  682. } \
  683. Target = _uint_size * Unit; \
  684. } while (0)
  685. #define BsGetFloat2(Ms, Live, Sz, Flags, Dst, Store, Fail) \
  686. do { \
  687. ErlBinMatchBuffer *_mb; \
  688. Eterm _result; Sint _size; \
  689. if (!is_small(Sz) || (_size = unsigned_val(Sz)) > 64) { Fail; } \
  690. _size *= ((Flags) >> 3); \
  691. TestHeap(FLOAT_SIZE_OBJECT, Live); \
  692. _mb = ms_matchbuffer(Ms); \
  693. SWAPOUT; \
  694. _result = erts_bs_get_float_2(c_p, _size, (Flags), _mb); \
  695. HTOP = HEAP_TOP(c_p); \
  696. if (is_non_value(_result)) { Fail; } \
  697. else { Store(_result, Dst); } \
  698. } while (0)
  699. #define BsGetBinaryImm_2(Ms, Live, Sz, Flags, Dst, Store, Fail) \
  700. do { \
  701. ErlBinMatchBuffer *_mb; \
  702. Eterm _result; \
  703. TestHeap(heap_bin_size(ERL_ONHEAP_BIN_LIMIT), Live); \
  704. _mb = ms_matchbuffer(Ms); \
  705. SWAPOUT; \
  706. _result = erts_bs_get_binary_2(c_p, (Sz), (Flags), _mb); \
  707. HTOP = HEAP_TOP(c_p); \
  708. if (is_non_value(_result)) { Fail; } \
  709. else { Store(_result, Dst); } \
  710. } while (0)
  711. #define BsGetBinary_2(Ms, Live, Sz, Flags, Dst, Store, Fail) \
  712. do { \
  713. ErlBinMatchBuffer *_mb; \
  714. Eterm _result; Uint _size; \
  715. BsGetFieldSize(Sz, ((Flags) >> 3), Fail, _size); \
  716. TestHeap(ERL_SUB_BIN_SIZE, Live); \
  717. _mb = ms_matchbuffer(Ms); \
  718. SWAPOUT; \
  719. _result = erts_bs_get_binary_2(c_p, _size, (Flags), _mb); \
  720. HTOP = HEAP_TOP(c_p); \
  721. if (is_non_value(_result)) { Fail; } \
  722. else { Store(_result, Dst); } \
  723. } while (0)
  724. #define BsGetBinaryAll_2(Ms, Live, Unit, Dst, Store, Fail) \
  725. do { \
  726. ErlBinMatchBuffer *_mb; \
  727. Eterm _result; \
  728. TestHeap(ERL_SUB_BIN_SIZE, Live); \
  729. _mb = ms_matchbuffer(Ms); \
  730. if (((_mb->size - _mb->offset) % Unit) == 0) { \
  731. SWAPOUT; \
  732. _result = erts_bs_get_binary_all_2(c_p, _mb); \
  733. HTOP = HEAP_TOP(c_p); \
  734. ASSERT(is_value(_result)); \
  735. Store(_result, Dst); \
  736. } else { Fail; } \
  737. } while (0)
  738. #define BsSkipBits2(Ms, Bits, Unit, Fail) \
  739. do { \
  740. ErlBinMatchBuffer *_mb; \
  741. size_t new_offset; \
  742. Uint _size; \
  743. _mb = ms_matchbuffer(Ms); \
  744. BsGetFieldSize(Bits, Unit, Fail, _size); \
  745. new_offset = _mb->offset + _size; \
  746. if (new_offset <= _mb->size) { _mb->offset = new_offset; } \
  747. else { Fail; } \
  748. } while (0)
  749. #define BsSkipBitsAll2(Ms, Unit, Fail) \
  750. do { \
  751. ErlBinMatchBuffer *_mb; \
  752. _mb = ms_matchbuffer(Ms); \
  753. if (((_mb->size - _mb->offset) % Unit) == 0) {_mb->offset = _mb->size; } \
  754. else { Fail; } \
  755. } while (0)
  756. #define BsSkipBitsImm2(Ms, Bits, Fail) \
  757. do { \
  758. ErlBinMatchBuffer *_mb; \
  759. size_t new_offset; \
  760. _mb = ms_matchbuffer(Ms); \
  761. new_offset = _mb->offset + (Bits); \
  762. if (new_offset <= _mb->size) { _mb->offset = new_offset; } \
  763. else { Fail; } \
  764. } while (0)
  765. #define NewBsPutIntegerImm(Sz, Flags, Src) \
  766. do { \
  767. if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3((Src), (Sz), (Flags)))) { goto badarg; } \
  768. } while (0)
  769. #define NewBsPutInteger(Sz, Flags, Src) \
  770. do { \
  771. Sint _size; \
  772. BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \
  773. if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3((Src), _size, (Flags)))) \
  774. { goto badarg; } \
  775. } while (0)
  776. #define NewBsPutFloatImm(Sz, Flags, Src) \
  777. do { \
  778. if (!erts_new_bs_put_float(c_p, (Src), (Sz), (Flags))) { goto badarg; } \
  779. } while (0)
  780. #define NewBsPutFloat(Sz, Flags, Src) \
  781. do { \
  782. Sint _size; \
  783. BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \
  784. if (!erts_new_bs_put_float(c_p, (Src), _size, (Flags))) { goto badarg; } \
  785. } while (0)
  786. #define NewBsPutBinary(Sz, Flags, Src) \
  787. do { \
  788. Sint _size; \
  789. BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \
  790. if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2((Src), _size))) { goto badarg; } \
  791. } while (0)
  792. #define NewBsPutBinaryImm(Sz, Src) \
  793. do { \
  794. if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2((Src), (Sz)))) { goto badarg; } \
  795. } while (0)
  796. #define NewBsPutBinaryAll(Src, Unit) \
  797. do { \
  798. if (!erts_new_bs_put_binary_all(ERL_BITS_ARGS_2((Src), (Unit)))) { goto badarg; } \
  799. } while (0)
  800. #define IsPort(Src, Fail) if (is_not_port(Src)) { Fail; }
  801. #define IsPid(Src, Fail) if (is_not_pid(Src)) { Fail; }
  802. #define IsRef(Src, Fail) if (is_not_ref(Src)) { Fail; }
  803. static BifFunction translate_gc_bif(void* gcf);
  804. static Eterm* handle_error(Process* c_p, Eterm* pc, Eterm* reg, BifFunction bf);
  805. static Eterm* next_catch(Process* c_p, Eterm *reg);
  806. static void terminate_proc(Process* c_p, Eterm Value);
  807. static Eterm add_stacktrace(Process* c_p, Eterm Value, Eterm exc);
  808. static void save_stacktrace(Process* c_p, Eterm* pc, Eterm* reg,
  809. BifFunction bf, Eterm args);
  810. static struct StackTrace * get_trace_from_exc(Eterm exc);
  811. static Eterm make_arglist(Process* c_p, Eterm* reg, int a);
  812. static Eterm call_error_handler(Process* p, Eterm* ip, Eterm* reg);
  813. static Eterm call_breakpoint_handler(Process* p, Eterm* fi, Eterm* reg);
  814. static Uint* fixed_apply(Process* p, Eterm* reg, Uint arity);
  815. static Eterm* apply(Process* p, Eterm module, Eterm function,
  816. Eterm args, Eterm* reg);
  817. static int hibernate(Process* c_p, Eterm module, Eterm function,
  818. Eterm args, Eterm* reg);
  819. static Eterm* call_fun(Process* p, int arity, Eterm* reg, Eterm args);
  820. static Eterm* apply_fun(Process* p, Eterm fun, Eterm args, Eterm* reg);
  821. static Eterm new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free);
  822. #if defined(_OSE_) || defined(VXWORKS)
  823. static int init_done;
  824. #endif
  825. void
  826. init_emulator(void)
  827. {
  828. #if defined(_OSE_) || defined(VXWORKS)
  829. init_done = 0;
  830. #endif
  831. process_main();
  832. }
  833. /*
  834. * On certain platforms, make sure that the main variables really are placed
  835. * in registers.
  836. */
  837. #if defined(__GNUC__) && defined(sparc) && !defined(DEBUG)
  838. # define REG_x0 asm("%l0")
  839. # define REG_xregs asm("%l1")
  840. # define REG_htop asm("%l2")
  841. # define REG_stop asm("%l3")
  842. # define REG_I asm("%l4")
  843. # define REG_fcalls asm("%l5")
  844. # define REG_tmp_arg1 asm("%l6")
  845. # define REG_tmp_arg2 asm("%l7")
  846. #else
  847. # define REG_x0
  848. # define REG_xregs
  849. # define REG_htop
  850. # define REG_stop
  851. # define REG_I
  852. # define REG_fcalls
  853. # define REG_tmp_arg1
  854. # define REG_tmp_arg2
  855. #endif
  856. /*
  857. * process_main() is called twice:
  858. * The first call performs some initialisation, including exporting
  859. * the instructions' C labels to the loader.
  860. * The second call starts execution of BEAM code. This call never returns.
  861. */
  862. void process_main(void)
  863. {
  864. #if !defined(_OSE_) && !defined(VXWORKS)
  865. static int init_done = 0;
  866. #endif
  867. Process* c_p = NULL;
  868. int reds_used;
  869. /*
  870. * X register zero; also called r(0)
  871. */
  872. register Eterm x0 REG_x0 = NIL;
  873. /* Pointer to X registers: x(1)..x(N); reg[0] is used when doing GC,
  874. * in all other cases x0 is used.
  875. */
  876. register Eterm* reg REG_xregs = NULL;
  877. /*
  878. * Top of heap (next free location); grows upwards.
  879. */
  880. register Eterm* HTOP REG_htop = NULL;
  881. #ifdef HYBRID
  882. Eterm *g_htop;
  883. Eterm *g_hend;
  884. #endif
  885. /* Stack pointer. Grows downwards; points
  886. * to last item pushed (normally a saved
  887. * continuation pointer).
  888. */
  889. register Eterm* E REG_stop = NULL;
  890. /*
  891. * Pointer to next threaded instruction.
  892. */
  893. register Eterm *I REG_I = NULL;
  894. /* Number of reductions left. This function
  895. * returns to the scheduler when FCALLS reaches zero.
  896. */
  897. register Sint FCALLS REG_fcalls = 0;
  898. /*
  899. * Temporaries used for picking up arguments for instructions.
  900. */
  901. register Eterm tmp_arg1 REG_tmp_arg1 = NIL;
  902. register Eterm tmp_arg2 REG_tmp_arg2 = NIL;
  903. Eterm tmp_big[2]; /* Temporary buffer for small bignums. */
  904. #ifndef ERTS_SMP
  905. static Eterm save_reg[ERTS_X_REGS_ALLOCATED];
  906. /* X registers -- not used directly, but
  907. * through 'reg', because using it directly
  908. * needs two instructions on a SPARC,
  909. * while using it through reg needs only
  910. * one.
  911. */
  912. /*
  913. * Floating point registers.
  914. */
  915. static FloatDef freg[MAX_REG];
  916. #else
  917. /* X regisers and floating point registers are located in
  918. * scheduler specific data.
  919. */
  920. register FloatDef *freg;
  921. #endif
  922. /*
  923. * For keeping the negative old value of 'reds' when call saving is active.
  924. */
  925. int neg_o_reds = 0;
  926. Eterm (*arith_func)(Process* p, Eterm* reg, Uint live);
  927. #ifndef NO_JUMP_TABLE
  928. static void* opcodes[] = { DEFINE_OPCODES };
  929. #ifdef ERTS_OPCODE_COUNTER_SUPPORT
  930. static void* counting_opcodes[] = { DEFINE_COUNTING_OPCODES };
  931. #endif
  932. #else
  933. int Go;
  934. #endif
  935. Uint temp_bits; /* Temporary used by BsSkipBits2 & BsGetInteger2 */
  936. ERL_BITS_DECLARE_STATEP; /* Has to be last declaration */
  937. /*
  938. * Note: In this function, we attempt to place rarely executed code towards
  939. * the end of the function, in the hope that the cache hit rate will be better.
  940. * The initialization code is only run once, so it is at the very end.
  941. *
  942. * Note: c_p->arity must be set to reflect the number of useful terms in
  943. * c_p->arg_reg before calling the scheduler.
  944. */
  945. if (!init_done) {
  946. init_done = 1;
  947. goto init_emulator;
  948. }
  949. #ifndef ERTS_SMP
  950. reg = save_reg; /* XXX: probably wastes a register on x86 */
  951. #endif
  952. c_p = NULL;
  953. reds_used = 0;
  954. goto do_schedule1;
  955. do_schedule:
  956. reds_used = REDS_IN(c_p) - FCALLS;
  957. do_schedule1:
  958. PROCESS_MAIN_CHK_LOCKS(c_p);
  959. ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
  960. c_p = schedule(c_p, reds_used);
  961. ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
  962. PROCESS_MAIN_CHK_LOCKS(c_p);
  963. #ifdef ERTS_SMP
  964. reg = c_p->scheduler_data->save_reg;
  965. freg = c_p->scheduler_data->freg;
  966. #endif
  967. ERL_BITS_RELOAD_STATEP(c_p);
  968. {
  969. int reds;
  970. Eterm* argp;
  971. Eterm* next;
  972. int i;
  973. argp = c_p->arg_reg;
  974. for (i = c_p->arity - 1; i > 0; i--) {
  975. reg[i] = argp[i];
  976. CHECK_TERM(reg[i]);
  977. }
  978. /*
  979. * We put the original reduction count in the process structure, to reduce
  980. * the code size (referencing a field in a struct through a pointer stored
  981. * in a register gives smaller code than referencing a global variable).
  982. */
  983. SET_I(c_p->i);
  984. reds = c_p->fcalls;
  985. if (c_p->ct != NULL && (c_p->trace_flags & F_SENSITIVE) == 0) {
  986. neg_o_reds = -reds;
  987. FCALLS = REDS_IN(c_p) = 0;
  988. } else {
  989. neg_o_reds = 0;
  990. FCALLS = REDS_IN(c_p) = reds;
  991. }
  992. next = (Eterm *) *I;
  993. r(0) = c_p->arg_reg[0];
  994. #ifdef HARDDEBUG
  995. if (c_p->arity > 0) {
  996. CHECK_TERM(r(0));
  997. }
  998. #endif
  999. SWAPIN;
  1000. ASSERT(VALID_INSTR(next));
  1001. Goto(next);
  1002. }
  1003. #if defined(DEBUG) || defined(NO_JUMP_TABLE)
  1004. emulator_loop:
  1005. #endif
  1006. #ifdef NO_JUMP_TABLE
  1007. switch (Go) {
  1008. #endif
  1009. #include "beam_hot.h"
  1010. #define STORE_ARITH_RESULT(res) StoreBifResult(2, (res));
  1011. #define ARITH_FUNC(name) erts_gc_##name
  1012. OpCase(i_plus_jId):
  1013. {
  1014. Eterm result;
  1015. if (is_both_small(tmp_arg1, tmp_arg2)) {
  1016. Sint i = signed_val(tmp_arg1) + signed_val(tmp_arg2);
  1017. ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i));
  1018. if (MY_IS_SSMALL(i)) {
  1019. result = make_small(i);
  1020. STORE_ARITH_RESULT(result);
  1021. }
  1022. }
  1023. arith_func = ARITH_FUNC(mixed_plus);
  1024. goto do_big_arith2;
  1025. }
  1026. OpCase(i_minus_jId):
  1027. {
  1028. Eterm result;
  1029. if (is_both_small(tmp_arg1, tmp_arg2)) {
  1030. Sint i = signed_val(tmp_arg1) - signed_val(tmp_arg2);
  1031. ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i));
  1032. if (MY_IS_SSMALL(i)) {
  1033. result = make_small(i);
  1034. STORE_ARITH_RESULT(result);
  1035. }
  1036. }
  1037. arith_func = ARITH_FUNC(mixed_minus);
  1038. goto do_big_arith2;
  1039. }
  1040. OpCase(i_is_lt_f):
  1041. if (CMP_GE(tmp_arg1, tmp_arg2)) {
  1042. ClauseFail();
  1043. }
  1044. Next(1);
  1045. OpCase(i_is_ge_f):
  1046. if (CMP_LT(tmp_arg1, tmp_arg2)) {
  1047. ClauseFail();
  1048. }
  1049. Next(1);
  1050. OpCase(i_is_eq_f):
  1051. if (CMP_NE(tmp_arg1, tmp_arg2)) {
  1052. ClauseFail();
  1053. }
  1054. Next(1);
  1055. OpCase(i_is_ne_f):
  1056. if (CMP_EQ(tmp_arg1, tmp_arg2)) {
  1057. ClauseFail();
  1058. }
  1059. Next(1);
  1060. OpCase(i_is_eq_exact_f):
  1061. if (!EQ(tmp_arg1, tmp_arg2)) {
  1062. ClauseFail();
  1063. }
  1064. Next(1);
  1065. OpCase(i_move_call_only_fcr): {
  1066. r(0) = Arg(1);
  1067. }
  1068. /* FALL THROUGH */
  1069. OpCase(i_call_only_f): {
  1070. SET_I((Eterm *) Arg(0));
  1071. Dispatch();
  1072. }
  1073. OpCase(i_move_call_last_fPcr): {
  1074. r(0) = Arg(2);
  1075. }
  1076. /* FALL THROUGH */
  1077. OpCase(i_call_last_fP): {
  1078. RESTORE_CP(E);
  1079. E = ADD_BYTE_OFFSET(E, Arg(1));
  1080. SET_I((Eterm *) Arg(0));
  1081. Dispatch();
  1082. }
  1083. OpCase(i_move_call_crf): {
  1084. r(0) = Arg(0);
  1085. I++;
  1086. }
  1087. /* FALL THROUGH */
  1088. OpCase(i_call_f): {
  1089. SET_CP(c_p, I+2);
  1090. SET_I((Eterm *) Arg(0));
  1091. Dispatch();
  1092. }
  1093. OpCase(i_move_call_ext_last_ePcr): {
  1094. r(0) = Arg(2);
  1095. }
  1096. /* FALL THROUGH */
  1097. OpCase(i_call_ext_last_eP):
  1098. RESTORE_CP(E);
  1099. E = ADD_BYTE_OFFSET(E, Arg(1));
  1100. /*
  1101. * Note: The pointer to the export entry is never NULL; if the module
  1102. * is not loaded, it points to code which will invoke the error handler
  1103. * (see lb_call_error_handler below).
  1104. */
  1105. Dispatchx();
  1106. OpCase(i_move_call_ext_cre): {
  1107. r(0) = Arg(0);
  1108. I++;
  1109. }
  1110. /* FALL THROUGH */
  1111. OpCase(i_call_ext_e):
  1112. SET_CP(c_p, I+2);
  1113. Dispatchx();
  1114. OpCase(i_move_call_ext_only_ecr): {
  1115. r(0) = Arg(1);
  1116. }
  1117. /* FALL THROUGH */
  1118. OpCase(i_call_ext_only_e):
  1119. Dispatchx();
  1120. OpCase(init_y): {
  1121. Eterm* next;
  1122. PreFetch(1, next);
  1123. make_blank(yb(Arg(0)));
  1124. NextPF(1, next);
  1125. }
  1126. OpCase(i_trim_I): {
  1127. Eterm* next;
  1128. Uint words;
  1129. Uint cp;
  1130. words = Arg(0);
  1131. cp = E[0];
  1132. PreFetch(1, next);
  1133. E += words;
  1134. E[0] = cp;
  1135. NextPF(1, next);
  1136. }
  1137. OpCase(return):
  1138. SET_I(c_p->cp);
  1139. CHECK_TERM(r(0));
  1140. Goto(*I);
  1141. OpCase(test_heap_1_put_list_Iy): {
  1142. Eterm* next;
  1143. PreFetch(2, next);
  1144. TestHeap(Arg(0), 1);
  1145. PutList(yb(Arg(1)), r(0), r(0), StoreSimpleDest);
  1146. CHECK_TERM(r(0));
  1147. NextPF(2, next);
  1148. }
  1149. OpCase(put_string_IId):
  1150. {
  1151. unsigned char* s;
  1152. int len;
  1153. Eterm result;
  1154. len = Arg(0); /* Length. */
  1155. result = NIL;
  1156. for (s = (unsigned char *) Arg(1); len > 0; s--, len--) {
  1157. PutList(make_small(*s), result, result, StoreSimpleDest);
  1158. }
  1159. StoreBifResult(2, result);
  1160. }
  1161. /*
  1162. * Send is almost a standard call-BIF with two arguments, except for:
  1163. * 1) It cannot be traced.
  1164. * 2) There is no pointer to the send_2 function stored in
  1165. * the instruction.
  1166. */
  1167. OpCase(send): {
  1168. Eterm* next;
  1169. Eterm result;
  1170. PRE_BIF_SWAPOUT(c_p);
  1171. c_p->fcalls = FCALLS - 1;
  1172. result = send_2(c_p, r(0), x(1));
  1173. PreFetch(0, next);
  1174. POST_BIF_GC_SWAPIN(c_p, result, reg, 2);
  1175. FCALLS = c_p->fcalls;
  1176. if (is_value(result)) {
  1177. r(0) = result;
  1178. CHECK_TERM(r(0));
  1179. NextPF(0, next);
  1180. } else if (c_p->freason == RESCHEDULE) {
  1181. Eterm* argp;
  1182. c_p->arity = 2;
  1183. /*
  1184. * Moving c_p->arg to a register is shorter than using c_p->arg_reg
  1185. * directly, since c_p->arg_reg is a pointer (not an array)
  1186. * and the compiler generates code to fetch the pointer every time.
  1187. */
  1188. argp = c_p->arg_reg;
  1189. argp[0] = r(0);
  1190. argp[1] = x(1);
  1191. SWAPOUT;
  1192. c_p->i = I;
  1193. c_p->current = NULL;
  1194. goto do_schedule;
  1195. } else if (c_p->freason == TRAP) {
  1196. SET_CP(c_p, I+1);
  1197. SET_I(((Export *)(c_p->def_arg_reg[3]))->address);
  1198. SWAPIN;
  1199. r(0) = c_p->def_arg_reg[0];
  1200. x(1) = c_p->def_arg_reg[1];
  1201. Dispatch();
  1202. }
  1203. goto find_func_info;
  1204. }
  1205. OpCase(i_element_jssd): {
  1206. Eterm index;
  1207. Eterm tuple;
  1208. /*
  1209. * Inlined version of element/2 for speed.
  1210. */
  1211. GetArg2(1, index, tuple);
  1212. if (is_small(index) && is_tuple(tuple)) {
  1213. Eterm* tp = tuple_val(tuple);
  1214. if ((signed_val(index) >= 1) &&
  1215. (signed_val(index) <= arityval(*tp))) {
  1216. Eterm result = tp[signed_val(index)];
  1217. StoreBifResult(3, result);
  1218. }
  1219. }
  1220. }
  1221. /* Fall through */
  1222. OpCase(badarg_j):
  1223. badarg:
  1224. c_p->freason = BADARG;
  1225. goto lb_Cl_error;
  1226. OpCase(i_fast_element_jIsd): {
  1227. Eterm tuple;
  1228. /*
  1229. * Inlined version of element/2 for even more speed.
  1230. * The first argument is an untagged integer >= 1.
  1231. * The second argument is guaranteed to be a register operand.
  1232. */
  1233. GetArg1(2, tuple);
  1234. if (is_tuple(tuple)) {
  1235. Eterm* tp = tuple_val(tuple);
  1236. tmp_arg2 = Arg(1);
  1237. if (tmp_arg2 <= arityval(*tp)) {
  1238. Eterm result = tp[tmp_arg2];
  1239. StoreBifResult(3, result);
  1240. }
  1241. }
  1242. goto badarg;
  1243. }
  1244. OpCase(catch_yf):
  1245. c_p->catches++;
  1246. yb(Arg(0)) = Arg(1);
  1247. Next(2);
  1248. OpCase(catch_end_y): {
  1249. c_p->catches--;
  1250. make_blank(yb(Arg(0)));
  1251. if (is_non_value(r(0))) {
  1252. if (x(1) == am_throw) {
  1253. r(0) = x(2);
  1254. } else {
  1255. if (x(1) == am_error) {
  1256. SWAPOUT;
  1257. x(2) = add_stacktrace(c_p, x(2), x(3));
  1258. SWAPIN;
  1259. }
  1260. /* only x(2) is included in the rootset here */
  1261. if (E - HTOP < 3 || c_p->mbuf) { /* Force GC in case add_stacktrace()
  1262. * created heap fragments */
  1263. SWAPOUT;
  1264. PROCESS_MAIN_CHK_LOCKS(c_p);
  1265. FCALLS -= erts_garbage_collect(c_p, 3, reg+2, 1);
  1266. PROCESS_MAIN_CHK_LOCKS(c_p);
  1267. SWAPIN;
  1268. }
  1269. r(0) = TUPLE2(HTOP, am_EXIT, x(2));
  1270. HTOP += 3;
  1271. }
  1272. }
  1273. CHECK_TERM(r(0));
  1274. Next(1);
  1275. }
  1276. OpCase(try_end_y): {
  1277. c_p->catches--;
  1278. make_blank(yb(Arg(0)));
  1279. if (is_non_value(r(0))) {
  1280. r(0) = x(1);
  1281. x(1) = x(2);
  1282. x(2) = x(3);
  1283. }
  1284. Next(1);
  1285. }
  1286. /*
  1287. * Skeleton for receive statement:
  1288. *
  1289. * L1: <-------------------+
  1290. * <-----------+ |
  1291. * | |
  1292. * loop_rec L2 ------+---+ |
  1293. * ... | | |
  1294. * remove_message | | |
  1295. * jump L3 | | |
  1296. * ... | | |
  1297. * loop_rec_end L1 --+ | |
  1298. * L2: <---------------+ |
  1299. * wait L1 -----------------+ or wait_timeout
  1300. * timeout
  1301. *
  1302. * L3: Code after receive...
  1303. *
  1304. *
  1305. */
  1306. /*
  1307. * Pick up the next message and place it in x(0).
  1308. * If no message, jump to a wait or wait_timeout instruction.
  1309. */
  1310. OpCase(i_loop_rec_fr):
  1311. {
  1312. Eterm* next;
  1313. ErlMessage* msgp;
  1314. loop_rec__:
  1315. PROCESS_MAIN_CHK_LOCKS(c_p);
  1316. msgp = PEEK_MESSAGE(c_p);
  1317. if (!msgp) {
  1318. #ifdef ERTS_SMP
  1319. erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1320. /* Make sure messages wont pass exit signals... */
  1321. if (ERTS_PROC_PENDING_EXIT(c_p)) {
  1322. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1323. SWAPOUT;
  1324. goto do_schedule; /* Will be rescheduled for exit */
  1325. }
  1326. ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
  1327. msgp = PEEK_MESSAGE(c_p);
  1328. if (msgp)
  1329. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1330. else {
  1331. #endif
  1332. SET_I((Eterm *) Arg(0));
  1333. Goto(*I); /* Jump to a wait or wait_timeout instruction */
  1334. #ifdef ERTS_SMP
  1335. }
  1336. #endif
  1337. }
  1338. MV_MSG_MBUF_INTO_PROC(msgp);
  1339. PreFetch(1, next);
  1340. r(0) = ERL_MESSAGE_TERM(msgp);
  1341. NextPF(1, next);
  1342. }
  1343. /*
  1344. * Remove a (matched) message from the message queue.
  1345. */
  1346. OpCase(remove_message): {
  1347. Eterm* next;
  1348. ErlMessage* msgp;
  1349. PROCESS_MAIN_CHK_LOCKS(c_p);
  1350. PreFetch(0, next);
  1351. msgp = PEEK_MESSAGE(c_p);
  1352. if (c_p->ct != NULL) {
  1353. save_calls(c_p, &exp_receive);
  1354. }
  1355. if (ERL_MESSAGE_TOKEN(msgp) == NIL) {
  1356. SEQ_TRACE_TOKEN(c_p) = NIL;
  1357. } else if (ERL_MESSAGE_TOKEN(msgp) != am_undefined) {
  1358. Eterm msg;
  1359. SEQ_TRACE_TOKEN(c_p) = ERL_MESSAGE_TOKEN(msgp);
  1360. ASSERT(is_tuple(SEQ_TRACE_TOKEN(c_p)));
  1361. ASSERT(SEQ_TRACE_TOKEN_ARITY(c_p) == 5);
  1362. ASSERT(is_small(SEQ_TRACE_TOKEN_SERIAL(c_p)));
  1363. ASSERT(is_small(SEQ_TRACE_TOKEN_LASTCNT(c_p)));
  1364. ASSERT(is_small(SEQ_TRACE_TOKEN_FLAGS(c_p)));
  1365. ASSERT(is_pid(SEQ_TRACE_TOKEN_SENDER(c_p)));
  1366. c_p->seq_trace_lastcnt = unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p));
  1367. if (c_p->seq_trace_clock < unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p))) {
  1368. c_p->seq_trace_clock = unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p));
  1369. }
  1370. msg = ERL_MESSAGE_TERM(msgp);
  1371. seq_trace_output(SEQ_TRACE_TOKEN(c_p), msg, SEQ_TRACE_RECEIVE,
  1372. c_p->id, c_p);
  1373. }
  1374. UNLINK_MESSAGE(c_p, msgp);
  1375. JOIN_MESSAGE(c_p);
  1376. CANCEL_TIMER(c_p);
  1377. free_message(msgp);
  1378. PROCESS_MAIN_CHK_LOCKS(c_p);
  1379. NextPF(0, next);
  1380. }
  1381. /*
  1382. * Advance the save pointer to the next message (the current
  1383. * message didn't match), then jump to the loop_rec instruction.
  1384. */
  1385. OpCase(loop_rec_end_f): {
  1386. SET_I((Eterm *) Arg(0));
  1387. SAVE_MESSAGE(c_p);
  1388. goto loop_rec__;
  1389. }
  1390. /*
  1391. * Prepare to wait for a message or a timeout, whichever occurs first.
  1392. *
  1393. * Note: In order to keep the compatibility between 32 and 64 bits
  1394. * emulators, only timeout values that can be represented in 32 bits
  1395. * (unsigned) or less are allowed.
  1396. */
  1397. OpCase(i_wait_timeout_fs): {
  1398. erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1399. /* Fall through */
  1400. }
  1401. OpCase(i_wait_timeout_locked_fs): {
  1402. Eterm timeout_value;
  1403. /*
  1404. * If we have already set the timer, we must NOT set it again. Therefore,
  1405. * we must test the F_INSLPQUEUE flag as well as the F_TIMO flag.
  1406. */
  1407. if (c_p->flags & (F_INSLPQUEUE | F_TIMO)) {
  1408. goto wait2;
  1409. }
  1410. GetArg1(1, timeout_value);
  1411. if (timeout_value != make_small(0)) {
  1412. #if !defined(ARCH_64)
  1413. Uint time_val;
  1414. #endif
  1415. if (is_small(timeout_value) && signed_val(timeout_value) > 0 &&
  1416. #if defined(ARCH_64)
  1417. ((unsigned_val(timeout_value) >> 32) == 0)
  1418. #else
  1419. 1
  1420. #endif
  1421. ) {
  1422. /*
  1423. * The timer routiner will set c_p->i to the value in
  1424. * c_p->def_arg_reg[0]. Note that it is safe to use this
  1425. * location because there are no living x registers in
  1426. * a receive statement.
  1427. */
  1428. c_p->def_arg_reg[0] = (Eterm) (I+3);
  1429. set_timer(c_p, unsigned_val(timeout_value));
  1430. } else if (timeout_value == am_infinity) {
  1431. c_p->flags |= F_TIMO;
  1432. #if !defined(ARCH_64)
  1433. } else if (term_to_Uint(timeout_value, &time_val)) {
  1434. c_p->def_arg_reg[0] = (Eterm) (I+3);
  1435. set_timer(c_p, time_val);
  1436. #endif
  1437. } else { /* Wrong time */
  1438. OpCase(i_wait_error_locked): {
  1439. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1440. /* Fall through */
  1441. }
  1442. OpCase(i_wait_error): {
  1443. c_p->freason = EXC_TIMEOUT_VALUE;
  1444. goto find_func_info;
  1445. }
  1446. }
  1447. /*
  1448. * Prepare to wait indefinitely for a new message to arrive
  1449. * (or the time set above if falling through from above).
  1450. *
  1451. * When a new message arrives, control will be transferred
  1452. * the loop_rec instruction (at label L1). In case of
  1453. * of timeout, control will be transferred to the timeout
  1454. * instruction following the wait_timeout instruction.
  1455. */
  1456. OpCase(wait_locked_f):
  1457. OpCase(wait_f):
  1458. wait2: {
  1459. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  1460. c_p->i = (Eterm *) Arg(0); /* L1 */
  1461. SWAPOUT;
  1462. c_p->arity = 0;
  1463. c_p->status = P_WAITING;
  1464. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1465. c_p->current = NULL;
  1466. goto do_schedule;
  1467. }
  1468. OpCase(wait_unlocked_f): {
  1469. erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1470. goto wait2;
  1471. }
  1472. }
  1473. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1474. Next(2);
  1475. }
  1476. OpCase(i_wait_timeout_fI): {
  1477. erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1478. }
  1479. OpCase(i_wait_timeout_locked_fI):
  1480. {
  1481. /*
  1482. * If we have already set the timer, we must NOT set it again. Therefore,
  1483. * we must test the F_INSLPQUEUE flag as well as the F_TIMO flag.
  1484. */
  1485. if ((c_p->flags & (F_INSLPQUEUE | F_TIMO)) == 0) {
  1486. c_p->def_arg_reg[0] = (Eterm) (I+3);
  1487. set_timer(c_p, Arg(1));
  1488. }
  1489. goto wait2;
  1490. }
  1491. /*
  1492. * A timeout has occurred. Reset the save pointer so that the next
  1493. * receive statement will examine the first message first.
  1494. */
  1495. OpCase(timeout_locked): {
  1496. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1497. }
  1498. OpCase(timeout): {
  1499. Eterm* next;
  1500. PreFetch(0, next);
  1501. if (IS_TRACED_FL(c_p, F_TRACE_RECEIVE)) {
  1502. trace_receive(c_p, am_timeout);
  1503. }
  1504. if (c_p->ct != NULL) {
  1505. save_calls(c_p, &exp_timeout);
  1506. }
  1507. c_p->flags &= ~F_TIMO;
  1508. JOIN_MESSAGE(c_p);
  1509. NextPF(0, next);
  1510. }
  1511. OpCase(i_select_val_sfI):
  1512. GetArg1(0, tmp_arg1);
  1513. do_binary_search:
  1514. {
  1515. struct Pairs {
  1516. Eterm val;
  1517. Eterm* addr;
  1518. };
  1519. struct Pairs* low;
  1520. struct Pairs* high;
  1521. struct Pairs* mid;
  1522. int bdiff; /* int not long because the arrays aren't that large */
  1523. low = (struct Pairs *) &Arg(3);
  1524. high = low + Arg(2);
  1525. /* The pointer subtraction (high-low) below must produce
  1526. * a signed result, because high could be < low. That
  1527. * requires the compiler to insert quite a bit of code.
  1528. *
  1529. * However, high will be > low so the result will be
  1530. * positive. We can use that knowledge to optimise the
  1531. * entire sequence, from the initial comparison to the
  1532. * computation of mid.
  1533. *
  1534. * -- Mikael Pettersson, Acumem AB
  1535. *
  1536. * Original loop control code:
  1537. *
  1538. * while (low < high) {
  1539. * mid = low + (high-low) / 2;
  1540. *
  1541. */
  1542. while ((bdiff = (int)((char*)high - (char*)low)) > 0) {
  1543. unsigned int boffset = ((unsigned int)bdiff >> 1) & ~(sizeof(struct Pairs)-1);
  1544. mid = (struct Pairs*)((char*)low + boffset);
  1545. if (tmp_arg1 < mid->val) {
  1546. high = mid;
  1547. } else if (tmp_arg1 > mid->val) {
  1548. low = mid + 1;
  1549. } else {
  1550. SET_I(mid->addr);
  1551. Goto(*I);
  1552. }
  1553. }
  1554. SET_I((Eterm *) Arg(1));
  1555. Goto(*I);
  1556. }
  1557. OpCase(i_jump_on_val_zero_sfI):
  1558. {
  1559. Eterm index;
  1560. GetArg1(0, index);
  1561. if (is_small(index)) {
  1562. index = signed_val(index);
  1563. if (index < Arg(2)) {
  1564. SET_I((Eterm *) (&Arg(3))[index]);
  1565. Goto(*I);
  1566. }
  1567. }
  1568. SET_I((Eterm *) Arg(1));
  1569. Goto(*I);
  1570. }
  1571. OpCase(i_jump_on_val_sfII):
  1572. {
  1573. Eterm index;
  1574. GetArg1(0, index);
  1575. if (is_small(index)) {
  1576. index = (Uint) (signed_val(index) - Arg(3));
  1577. if (index < Arg(2)) {
  1578. SET_I((Eterm *) (&Arg(4))[index]);
  1579. Goto(*I);
  1580. }
  1581. }
  1582. SET_I((Eterm *) Arg(1));
  1583. Goto(*I);
  1584. }
  1585. /*
  1586. * All guards with zero arguments have special instructions:
  1587. * self/0
  1588. * node/0
  1589. *
  1590. * All other guard BIFs take one or two arguments.
  1591. */
  1592. /*
  1593. * Guard BIF in head. On failure, ignore the error and jump
  1594. * to the code for the next clause. We don't support tracing
  1595. * of guard BIFs.
  1596. */
  1597. OpCase(bif1_fbsd):
  1598. {
  1599. Eterm (*bf)(Process*, Eterm);
  1600. Eterm arg;
  1601. Eterm result;
  1602. GetArg1(2, arg);
  1603. bf = (BifFunction) Arg(1);
  1604. SAVE_HTOP;
  1605. c_p->fcalls = FCALLS;
  1606. PROCESS_MAIN_CHK_LOCKS(c_p);
  1607. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  1608. result = (*bf)(c_p, arg);
  1609. ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
  1610. PROCESS_MAIN_CHK_LOCKS(c_p);
  1611. ERTS_HOLE_CHECK(c_p);
  1612. FCALLS = c_p->fcalls;
  1613. if (is_value(result)) {
  1614. StoreBifResult(3, result);
  1615. }
  1616. SET_I((Eterm *) Arg(0));
  1617. Goto(*I);
  1618. }
  1619. /*
  1620. * Guard BIF in body. It can fail like any BIF. No trace support.
  1621. */
  1622. OpCase(bif1_body_bsd):
  1623. {
  1624. Eterm (*bf)(Process*, Eterm);
  1625. Eterm arg;
  1626. Eterm result;
  1627. GetArg1(1, arg);
  1628. bf = (BifFunction) Arg(0);
  1629. SAVE_HTOP;
  1630. c_p->fcalls = FCALLS;
  1631. PROCESS_MAIN_CHK_LOCKS(c_p);
  1632. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  1633. result = (*bf)(c_p, arg);
  1634. ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
  1635. PROCESS_MAIN_CHK_LOCKS(c_p);
  1636. ERTS_HOLE_CHECK(c_p);
  1637. FCALLS = c_p->fcalls;
  1638. if (is_value(result)) {
  1639. StoreBifResult(2, result);
  1640. }
  1641. reg[0] = arg;
  1642. SWAPOUT;
  1643. I = handle_error(c_p, I, reg, bf);
  1644. goto post_error_handling;
  1645. }
  1646. OpCase(i_gc_bif1_jIsId):
  1647. {
  1648. typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint);
  1649. GcBifFunction bf;
  1650. Eterm arg;
  1651. Eterm result;
  1652. Uint live = Arg(3);
  1653. GetArg1(2, arg);
  1654. reg[0] = r(0);
  1655. reg[live] = arg;
  1656. bf = (GcBifFunction) Arg(1);
  1657. c_p->fcalls = FCALLS;
  1658. SWAPOUT;
  1659. PROCESS_MAIN_CHK_LOCKS(c_p);
  1660. ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
  1661. result = (*bf)(c_p, reg, live);
  1662. ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
  1663. PROCESS_MAIN_CHK_LOCKS(c_p);
  1664. SWAPIN;
  1665. r(0) = reg[0];
  1666. ERTS_HOLE_CHECK(c_p);
  1667. FCALLS = c_p->fcalls;
  1668. if (is_value(result)) {
  1669. StoreBifResult(4, result);
  1670. }
  1671. if (Arg(0) != 0) {
  1672. SET_I((Eterm *) Arg(0));
  1673. Goto(*I);
  1674. }
  1675. reg[0] = arg;
  1676. I = handle_error(c_p, I, reg, translate_gc_bif((void *) bf));
  1677. goto post_error_handling;
  1678. }
  1679. /*
  1680. * Guards bifs and, or, xor in guards.
  1681. */
  1682. OpCase(i_bif2_fbd):
  1683. {
  1684. Eterm (*bf)(Process*, Eterm, Eterm);
  1685. Eterm result;
  1686. bf = (BifFunction) Arg(1);
  1687. SAVE_HTOP;
  1688. c_p->fcalls = FCALLS;
  1689. PROCESS_MAIN_CHK_LOCKS(c_p);
  1690. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  1691. result = (*bf)(c_p, tmp_arg1, tmp_arg2);
  1692. ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
  1693. PROCESS_MAIN_CHK_LOCKS(c_p);
  1694. ERTS_HOLE_CHECK(c_p);
  1695. FCALLS = c_p->fcalls;
  1696. if (is_value(result)) {
  1697. StoreBifResult(2, result);
  1698. }
  1699. SET_I((Eterm *) Arg(0));
  1700. Goto(*I);
  1701. }
  1702. /*
  1703. * Guards bifs and, or, xor, relational operators in body.
  1704. */
  1705. OpCase(i_bif2_body_bd):
  1706. {
  1707. Eterm (*bf)(Process*, Eterm, Eterm);
  1708. Eterm result;
  1709. bf = (BifFunction) Arg(0);
  1710. SAVE_HTOP;
  1711. PROCESS_MAIN_CHK_LOCKS(c_p);
  1712. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  1713. result = (*bf)(c_p, tmp_arg1, tmp_arg2);
  1714. ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
  1715. PROCESS_MAIN_CHK_LOCKS(c_p);
  1716. ERTS_HOLE_CHECK(c_p);
  1717. if (is_value(result)) {
  1718. ASSERT(!is_CP(result));
  1719. StoreBifResult(1, result);
  1720. }
  1721. reg[0] = tmp_arg1;
  1722. reg[1] = tmp_arg2;
  1723. SWAPOUT;
  1724. I = handle_error(c_p, I, reg, bf);
  1725. goto post_error_handling;
  1726. }
  1727. /*
  1728. * The most general BIF call. The BIF may build any amount of data
  1729. * on the heap. The result is always returned in r(0).
  1730. */
  1731. OpCase(call_bif0_e):
  1732. {
  1733. Eterm (*bf)(Process*, Uint*) = GET_BIF_ADDRESS(Arg(0));
  1734. PRE_BIF_SWAPOUT(c_p);
  1735. c_p->fcalls = FCALLS - 1;
  1736. if (FCALLS <= 0) {
  1737. save_calls(c_p, (Export *) Arg(0));
  1738. }
  1739. /*
  1740. * A BIF with no arguments cannot fail (especially not with badarg).
  1741. */
  1742. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  1743. r(0) = (*bf)(c_p, I);
  1744. ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(r(0)));
  1745. ERTS_HOLE_CHECK(c_p);
  1746. POST_BIF_GC_SWAPIN_0(c_p, r(0));
  1747. FCALLS = c_p->fcalls;
  1748. if (is_value(r(0))) {
  1749. CHECK_TERM(r(0));
  1750. Next(1);
  1751. }
  1752. else {
  1753. ASSERT(c_p->freason == TRAP);
  1754. goto call_bif_trap3;
  1755. }
  1756. }
  1757. OpCase(call_bif1_e):
  1758. {
  1759. Eterm (*bf)(Process*, Eterm, Uint*) = GET_BIF_ADDRESS(Arg(0));
  1760. Eterm result;
  1761. Eterm* next;
  1762. c_p->fcalls = FCALLS - 1;
  1763. if (FCALLS <= 0) {
  1764. save_calls(c_p, (Export *) Arg(0));
  1765. }
  1766. PreFetch(1, next);
  1767. PRE_BIF_SWAPOUT(c_p);
  1768. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  1769. result = (*bf)(c_p, r(0), I);
  1770. ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
  1771. ERTS_HOLE_CHECK(c_p);
  1772. POST_BIF_GC_SWAPIN(c_p, result, reg, 1);
  1773. FCALLS = c_p->fcalls;
  1774. if (is_value(result)) {
  1775. r(0) = result;
  1776. CHECK_TERM(r(0));
  1777. NextPF(1, next);
  1778. } else if (c_p->freason == RESCHEDULE) {
  1779. c_p->arity = 1;
  1780. goto suspend_bif;
  1781. } else if (c_p->freason == TRAP) {
  1782. goto call_bif_trap3;
  1783. }
  1784. /*
  1785. * Error handling. SWAPOUT is not needed because it was done above.
  1786. */
  1787. ASSERT(c_p->stop == E);
  1788. reg[0] = r(0);
  1789. I = handle_error(c_p, I, reg, bf);
  1790. goto post_error_handling;
  1791. }
  1792. OpCase(call_bif2_e):
  1793. {
  1794. Eterm (*bf)(Process*, Eterm, Eterm, Uint*) = GET_BIF_ADDRESS(Arg(0));
  1795. Eterm result;
  1796. Eterm* next;
  1797. PRE_BIF_SWAPOUT(c_p);
  1798. c_p->fcalls = FCALLS - 1;
  1799. if (FCALLS <= 0) {
  1800. save_calls(c_p, (Export *) Arg(0));
  1801. }
  1802. PreFetch(1, next);
  1803. CHECK_TERM(r(0));
  1804. CHECK_TERM(x(1));
  1805. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  1806. result = (*bf)(c_p, r(0), x(1), I);
  1807. ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
  1808. ERTS_HOLE_CHECK(c_p);
  1809. POST_BIF_GC_SWAPIN(c_p, result, reg, 2);
  1810. FCALLS = c_p->fcalls;
  1811. if (is_value(result)) {
  1812. r(0) = result;
  1813. CHECK_TERM(r(0));
  1814. NextPF(1, next);
  1815. } else if (c_p->freason == RESCHEDULE) {
  1816. c_p->arity = 2;
  1817. goto suspend_bif;
  1818. } else if (c_p->freason == TRAP) {
  1819. goto call_bif_trap3;
  1820. }
  1821. /*
  1822. * Error handling. SWAPOUT is not needed because it was done above.
  1823. */
  1824. ASSERT(c_p->stop == E);
  1825. reg[0] = r(0);
  1826. I = handle_error(c_p, I, reg, bf);
  1827. goto post_error_handling;
  1828. }
  1829. OpCase(call_bif3_e):
  1830. {
  1831. Eterm (*bf)(Process*, Eterm, Eterm, Eterm, Uint*) = GET_BIF_ADDRESS(Arg(0));
  1832. Eterm result;
  1833. Eterm* next;
  1834. PRE_BIF_SWAPOUT(c_p);
  1835. c_p->fcalls = FCALLS - 1;
  1836. if (FCALLS <= 0) {
  1837. save_calls(c_p, (Export *) Arg(0));
  1838. }
  1839. PreFetch(1, next);
  1840. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  1841. result = (*bf)(c_p, r(0), x(1), x(2), I);
  1842. ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
  1843. ERTS_HOLE_CHECK(c_p);
  1844. POST_BIF_GC_SWAPIN(c_p, result, reg, 3);
  1845. FCALLS = c_p->fcalls;
  1846. if (is_value(result)) {
  1847. r(0) = result;
  1848. CHECK_TERM(r(0));
  1849. NextPF(1, next);
  1850. } else if (c_p->freason == RESCHEDULE) {
  1851. c_p->arity = 3;
  1852. goto suspend_bif;
  1853. } else if (c_p->freason == TRAP) {
  1854. call_bif_trap3:
  1855. SET_CP(c_p, I+2);
  1856. SET_I(((Export *)(c_p->def_arg_reg[3]))->address);
  1857. SWAPIN;
  1858. r(0) = c_p->def_arg_reg[0];
  1859. x(1) = c_p->def_arg_reg[1];
  1860. x(2) = c_p->def_arg_reg[2];
  1861. Dispatch();
  1862. }
  1863. /*
  1864. * Error handling. SWAPOUT is not needed because it was done above.
  1865. */
  1866. ASSERT(c_p->stop == E);
  1867. reg[0] = r(0);
  1868. I = handle_error(c_p, I, reg, bf);
  1869. goto post_error_handling;
  1870. }
  1871. /*
  1872. * Arithmetic operations.
  1873. */
  1874. OpCase(i_times_jId):
  1875. {
  1876. arith_func = ARITH_FUNC(mixed_times);
  1877. goto do_big_arith2;
  1878. }
  1879. OpCase(i_m_div_jId):
  1880. {
  1881. arith_func = ARITH_FUNC(mixed_div);
  1882. goto do_big_arith2;
  1883. }
  1884. OpCase(i_int_div_jId):
  1885. {
  1886. Eterm result;
  1887. if (tmp_arg2 == SMALL_ZERO) {
  1888. goto badarith;
  1889. } else if (is_both_small(tmp_arg1, tmp_arg2)) {
  1890. Sint ires = signed_val(tmp_arg1) / signed_val(tmp_arg2);
  1891. if (MY_IS_SSMALL(ires)) {
  1892. result = make_small(ires);
  1893. STORE_ARITH_RESULT(result);
  1894. }
  1895. }
  1896. arith_func = ARITH_FUNC(int_div);
  1897. goto do_big_arith2;
  1898. }
  1899. OpCase(i_rem_jId):
  1900. {
  1901. Eterm result;
  1902. if (tmp_arg2 == SMALL_ZERO) {
  1903. goto badarith;
  1904. } else if (is_both_small(tmp_arg1, tmp_arg2)) {
  1905. result = make_small(signed_val(tmp_arg1) % signed_val(tmp_arg2));
  1906. STORE_ARITH_RESULT(result);
  1907. } else {
  1908. arith_func = ARITH_FUNC(int_rem);
  1909. goto do_big_arith2;
  1910. }
  1911. }
  1912. OpCase(i_band_jId):
  1913. {
  1914. Eterm result;
  1915. if (is_both_small(tmp_arg1, tmp_arg2)) {
  1916. /*
  1917. * No need to untag -- TAG & TAG == TAG.
  1918. */
  1919. result = tmp_arg1 & tmp_arg2;
  1920. STORE_ARITH_RESULT(result);
  1921. }
  1922. arith_func = ARITH_FUNC(band);
  1923. goto do_big_arith2;
  1924. }
  1925. do_big_arith2:
  1926. {
  1927. Eterm result;
  1928. Uint live = Arg(1);
  1929. SWAPOUT;
  1930. reg[0] = r(0);
  1931. reg[live] = tmp_arg1;
  1932. reg[live+1] = tmp_arg2;
  1933. result = arith_func(c_p, reg, live);
  1934. r(0) = reg[0];
  1935. SWAPIN;
  1936. ERTS_HOLE_CHECK(c_p);
  1937. if (is_value(result)) {
  1938. STORE_ARITH_RESULT(result);
  1939. }
  1940. goto lb_Cl_error;
  1941. }
  1942. /*
  1943. * An error occured in an arithmetic operation or test that could
  1944. * appear either in a head or in a body.
  1945. * In a head, execution should continue at failure address in Arg(0).
  1946. * In a body, Arg(0) == 0 and an exception should be raised.
  1947. */
  1948. lb_Cl_error: {
  1949. if (Arg(0) != 0) {
  1950. OpCase(jump_f): {
  1951. SET_I((Eterm *) Arg(0));
  1952. Goto(*I);
  1953. }
  1954. }
  1955. ASSERT(c_p->freason != BADMATCH || is_value(c_p->fvalue));
  1956. goto find_func_info;
  1957. }
  1958. OpCase(i_bor_jId):
  1959. {
  1960. Eterm result;
  1961. if (is_both_small(tmp_arg1, tmp_arg2)) {
  1962. /*
  1963. * No need to untag -- TAG | TAG == TAG.
  1964. */
  1965. result = tmp_arg1 | tmp_arg2;
  1966. STORE_ARITH_RESULT(result);
  1967. }
  1968. arith_func = ARITH_FUNC(bor);
  1969. goto do_big_arith2;
  1970. }
  1971. OpCase(i_bxor_jId):
  1972. {
  1973. Eterm result;
  1974. if (is_both_small(tmp_arg1, tmp_arg2)) {
  1975. /*
  1976. * We could extract the tag from one argument, but a tag extraction
  1977. * could mean a shift. Therefore, play it safe here.
  1978. */
  1979. result = make_small(signed_val(tmp_arg1) ^ signed_val(tmp_arg2));
  1980. STORE_ARITH_RESULT(result);
  1981. }
  1982. arith_func = ARITH_FUNC(bxor);
  1983. goto do_big_arith2;
  1984. }
  1985. {
  1986. Sint i;
  1987. Sint ires;
  1988. Eterm* bigp;
  1989. OpCase(i_bsr_jId):
  1990. if (is_small(tmp_arg2)) {
  1991. i = -signed_val(tmp_arg2);
  1992. if (is_small(tmp_arg1)) {
  1993. goto small_shift;
  1994. } else if (is_big(tmp_arg1)) {
  1995. if (i == 0) {
  1996. StoreBifResult(2, tmp_arg1);
  1997. }
  1998. goto big_shift;
  1999. }
  2000. }
  2001. goto badarith;
  2002. OpCase(i_bsl_jId):
  2003. if (is_small(tmp_arg2)) {
  2004. i = signed_val(tmp_arg2);
  2005. if (is_small(tmp_arg1)) {
  2006. small_shift:
  2007. ires = signed_val(tmp_arg1);
  2008. if (i == 0 || ires == 0) {
  2009. StoreBifResult(2, tmp_arg1);
  2010. } else if (i < 0) { /* Right shift */
  2011. i = -i;
  2012. if (i >= SMALL_BITS-1) {
  2013. tmp_arg1 = (ires < 0) ? SMALL_MINUS_ONE : SMALL_ZERO;
  2014. } else {
  2015. tmp_arg1 = make_small(ires >> i);
  2016. }
  2017. StoreBifResult(2, tmp_arg1);
  2018. } else if (i < SMALL_BITS-1) { /* Left shift */
  2019. if ((ires > 0 && ((~(Uint)0 << ((SMALL_BITS-1)-i)) & ires) == 0) ||
  2020. ((~(Uint)0 << ((SMALL_BITS-1)-i)) & ~ires) == 0) {
  2021. tmp_arg1 = make_small(ires << i);
  2022. StoreBifResult(2, tmp_arg1);
  2023. }
  2024. }
  2025. tmp_arg1 = small_to_big(ires, tmp_big);
  2026. big_shift:
  2027. if (i > 0) { /* Left shift. */
  2028. ires = big_size(tmp_arg1) + (i / D_EXP);
  2029. } else { /* Right shift. */
  2030. ires = big_size(tmp_arg1);
  2031. if (ires <= (-i / D_EXP))
  2032. ires = 3; /* ??? */
  2033. else
  2034. ires -= (-i / D_EXP);
  2035. }
  2036. {
  2037. Uint live = Arg(1);
  2038. ires = BIG_NEED_SIZE(ires+1);
  2039. SWAPOUT;
  2040. reg[0] = r(0);
  2041. reg[live] = tmp_arg1;
  2042. PROCESS_MAIN_CHK_LOCKS(c_p);
  2043. FCALLS -= erts_garbage_collect(c_p, ires, reg, live+1);
  2044. PROCESS_MAIN_CHK_LOCKS(c_p);
  2045. r(0) = reg[0];
  2046. SWAPIN;
  2047. bigp = HTOP;
  2048. tmp_arg1 = big_lshift(reg[live], i, bigp);
  2049. if (is_big(tmp_arg1)) {
  2050. HTOP += bignum_header_arity(*HTOP) + 1;
  2051. }
  2052. if (is_nil(tmp_arg1)) {
  2053. c_p->freason = SYSTEM_LIMIT;
  2054. goto lb_Cl_error;
  2055. }
  2056. ERTS_HOLE_CHECK(c_p);
  2057. StoreBifResult(2, tmp_arg1);
  2058. }
  2059. } else if (is_big(tmp_arg1)) {
  2060. if (i == 0) {
  2061. StoreBifResult(2, tmp_arg1);
  2062. }
  2063. goto big_shift;
  2064. }
  2065. }
  2066. goto badarith;
  2067. }
  2068. OpCase(i_int_bnot_jsId):
  2069. {
  2070. GetArg1(1, tmp_arg1);
  2071. if (is_small(tmp_arg1)) {
  2072. tmp_arg1 = make_small(~signed_val(tmp_arg1));
  2073. } else {
  2074. Uint live = Arg(2);
  2075. SWAPOUT;
  2076. reg[0] = r(0);
  2077. reg[live] = tmp_arg1;
  2078. tmp_arg1 = erts_gc_bnot(c_p, reg, live);
  2079. r(0) = reg[0];
  2080. SWAPIN;
  2081. ERTS_HOLE_CHECK(c_p);
  2082. if (is_nil(tmp_arg1)) {
  2083. goto lb_Cl_error;
  2084. }
  2085. }
  2086. StoreBifResult(3, tmp_arg1);
  2087. }
  2088. badarith:
  2089. c_p->freason = BADARITH;
  2090. goto lb_Cl_error;
  2091. OpCase(i_apply): {
  2092. Eterm* next;
  2093. SWAPOUT;
  2094. next = apply(c_p, r(0), x(1), x(2), reg);
  2095. SWAPIN;
  2096. if (next != NULL) {
  2097. r(0) = reg[0];
  2098. SET_CP(c_p, I+1);
  2099. SET_I(next);
  2100. Dispatch();
  2101. }
  2102. I = handle_error(c_p, I, reg, apply_3);
  2103. goto post_error_handling;
  2104. }
  2105. OpCase(i_apply_last_P): {
  2106. Eterm* next;
  2107. SWAPOUT;
  2108. next = apply(c_p, r(0), x(1), x(2), reg);
  2109. SWAPIN;
  2110. if (next != NULL) {
  2111. r(0) = reg[0];
  2112. SET_CP(c_p, (Eterm *) E[0]);
  2113. E = ADD_BYTE_OFFSET(E, Arg(0));
  2114. SET_I(next);
  2115. Dispatch();
  2116. }
  2117. I = handle_error(c_p, I, reg, apply_3);
  2118. goto post_error_handling;
  2119. }
  2120. OpCase(i_apply_only): {
  2121. Eterm* next;
  2122. SWAPOUT;
  2123. next = apply(c_p, r(0), x(1), x(2), reg);
  2124. SWAPIN;
  2125. if (next != NULL) {
  2126. r(0) = reg[0];
  2127. SET_I(next);
  2128. Dispatch();
  2129. }
  2130. I = handle_error(c_p, I, reg, apply_3);
  2131. goto post_error_handling;
  2132. }
  2133. OpCase(apply_I): {
  2134. Eterm* next;
  2135. reg[0] = r(0);
  2136. SWAPOUT;
  2137. next = fixed_apply(c_p, reg, Arg(0));
  2138. SWAPIN;
  2139. if (next != NULL) {
  2140. r(0) = reg[0];
  2141. SET_CP(c_p, I+2);
  2142. SET_I(next);
  2143. Dispatch();
  2144. }
  2145. I = handle_error(c_p, I, reg, apply_3);
  2146. goto post_error_handling;
  2147. }
  2148. OpCase(apply_last_IP): {
  2149. Eterm* next;
  2150. reg[0] = r(0);
  2151. SWAPOUT;
  2152. next = fixed_apply(c_p, reg, Arg(0));
  2153. SWAPIN;
  2154. if (next != NULL) {
  2155. r(0) = reg[0];
  2156. SET_CP(c_p, (Eterm *) E[0]);
  2157. E = ADD_BYTE_OFFSET(E, Arg(1));
  2158. SET_I(next);
  2159. Dispatch();
  2160. }
  2161. I = handle_error(c_p, I, reg, apply_3);
  2162. goto post_error_handling;
  2163. }
  2164. OpCase(i_apply_fun): {
  2165. Eterm* next;
  2166. SWAPOUT;
  2167. next = apply_fun(c_p, r(0), x(1), reg);
  2168. SWAPIN;
  2169. if (next != NULL) {
  2170. r(0) = reg[0];
  2171. SET_CP(c_p, I+1);
  2172. SET_I(next);
  2173. Dispatchfun();
  2174. }
  2175. goto find_func_info;
  2176. }
  2177. OpCase(i_apply_fun_last_P): {
  2178. Eterm* next;
  2179. SWAPOUT;
  2180. next = apply_fun(c_p, r(0), x(1), reg);
  2181. SWAPIN;
  2182. if (next != NULL) {
  2183. r(0) = reg[0];
  2184. SET_CP(c_p, (Eterm *) E[0]);
  2185. E = ADD_BYTE_OFFSET(E, Arg(0));
  2186. SET_I(next);
  2187. Dispatchfun();
  2188. }
  2189. goto find_func_info;
  2190. }
  2191. OpCase(i_apply_fun_only): {
  2192. Eterm* next;
  2193. SWAPOUT;
  2194. next = apply_fun(c_p, r(0), x(1), reg);
  2195. SWAPIN;
  2196. if (next != NULL) {
  2197. r(0) = reg[0];
  2198. SET_I(next);
  2199. Dispatchfun();
  2200. }
  2201. goto find_func_info;
  2202. }
  2203. OpCase(i_call_fun_I): {
  2204. Eterm* next;
  2205. SWAPOUT;
  2206. reg[0] = r(0);
  2207. next = call_fun(c_p, Arg(0), reg, THE_NON_VALUE);
  2208. SWAPIN;
  2209. if (next != NULL) {
  2210. r(0) = reg[0];
  2211. SET_CP(c_p, I+2);
  2212. SET_I(next);
  2213. Dispatchfun();
  2214. }
  2215. goto find_func_info;
  2216. }
  2217. OpCase(i_call_fun_last_IP): {
  2218. Eterm* next;
  2219. SWAPOUT;
  2220. reg[0] = r(0);
  2221. next = call_fun(c_p, Arg(0), reg, THE_NON_VALUE);
  2222. SWAPIN;
  2223. if (next != NULL) {
  2224. r(0) = reg[0];
  2225. SET_CP(c_p, (Eterm *) E[0]);
  2226. E = ADD_BYTE_OFFSET(E, Arg(1));
  2227. SET_I(next);
  2228. Dispatchfun();
  2229. }
  2230. goto find_func_info;
  2231. }
  2232. #ifdef DEBUG
  2233. /*
  2234. * Set a breakpoint here to get control just after a call instruction.
  2235. * I points to the first instruction in the called function.
  2236. *
  2237. * In gdb, use 'call dis(I-5, 1)' to show the name of the function.
  2238. */
  2239. do_dispatch:
  2240. DispatchMacro();
  2241. do_dispatchx:
  2242. DispatchMacrox();
  2243. do_dispatchfun:
  2244. DispatchMacroFun();
  2245. #endif
  2246. /*
  2247. * Jumped to from the Dispatch() macro when the reductions are used up.
  2248. *
  2249. * Since the I register points just beyond the FuncBegin instruction, we
  2250. * can get the module, function, and arity for the function being
  2251. * called from I[-3], I[-2], and I[-1] respectively.
  2252. */
  2253. context_switch_fun:
  2254. c_p->arity = I[-1] + 1;
  2255. goto context_switch2;
  2256. context_switch:
  2257. c_p->arity = I[-1];
  2258. context_switch2: /* Entry for fun calls. */
  2259. c_p->current = I-3; /* Pointer to Mod, Func, Arity */
  2260. {
  2261. Eterm* argp;
  2262. int i;
  2263. /*
  2264. * Make sure that there is enough room for the argument registers to be saved.
  2265. */
  2266. if (c_p->arity > c_p->max_arg_reg) {
  2267. /*
  2268. * Yes, this is an expensive operation, but you only pay it the first
  2269. * time you call a function with more than 6 arguments which is
  2270. * scheduled out. This is better than paying for 26 words of wasted
  2271. * space for most processes which never call functions with more than
  2272. * 6 arguments.
  2273. */
  2274. Uint size = c_p->arity * sizeof(c_p->arg_reg[0]);
  2275. if (c_p->arg_reg != c_p->def_arg_reg) {
  2276. ERTS_PROC_LESS_MEM(c_p->max_arg_reg * sizeof(c_p->arg_reg[0]));
  2277. ERTS_PROC_MORE_MEM(size);
  2278. c_p->arg_reg = (Eterm *) erts_realloc(ERTS_ALC_T_ARG_REG,
  2279. (void *) c_p->arg_reg,
  2280. size);
  2281. } else {
  2282. ERTS_PROC_MORE_MEM(size);
  2283. c_p->arg_reg = (Eterm *) erts_alloc(ERTS_ALC_T_ARG_REG, size);
  2284. }
  2285. c_p->max_arg_reg = c_p->arity;
  2286. }
  2287. /*
  2288. * Since REDS_IN(c_p) is stored in the save area (c_p->arg_reg) we must read it
  2289. * now before saving registers.
  2290. *
  2291. * The '+ 1' compensates for the last increment which was not done
  2292. * (beacuse the code for the Dispatch() macro becomes shorter that way).
  2293. */
  2294. reds_used = REDS_IN(c_p) - FCALLS + 1;
  2295. /*
  2296. * Save the argument registers and everything else.
  2297. */
  2298. argp = c_p->arg_reg;
  2299. for (i = c_p->arity - 1; i > 0; i--) {
  2300. argp[i] = reg[i];
  2301. }
  2302. c_p->arg_reg[0] = r(0);
  2303. SWAPOUT;
  2304. c_p->i = I;
  2305. erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
  2306. add_to_schedule_q(c_p);
  2307. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
  2308. goto do_schedule1;
  2309. }
  2310. OpCase(i_select_tuple_arity_sfI):
  2311. {
  2312. GetArg1(0, tmp_arg1);
  2313. if (is_tuple(tmp_arg1)) {
  2314. tmp_arg1 = *tuple_val(tmp_arg1);
  2315. goto do_binary_search;
  2316. }
  2317. SET_I((Eterm *) Arg(1));
  2318. Goto(*I);
  2319. }
  2320. OpCase(i_select_big_sf):
  2321. {
  2322. Eterm* bigp;
  2323. Uint arity;
  2324. Eterm* given;
  2325. Uint given_arity;
  2326. Uint given_size;
  2327. GetArg1(0, tmp_arg1);
  2328. if (is_big(tmp_arg1)) {
  2329. /*
  2330. * The loader has sorted the bignumbers in descending order
  2331. * on the arity word. Therefore, we know that the search
  2332. * has failed as soon as we encounter an arity word less than
  2333. * the arity word of the given number. There is a zero word
  2334. * (less than any valid arity word) stored after the last bignumber.
  2335. */
  2336. given = big_val(tmp_arg1);
  2337. given_arity = given[0];
  2338. given_size = thing_arityval(given_arity);
  2339. bigp = &Arg(2);
  2340. while ((arity = bigp[0]) > given_arity) {
  2341. bigp += thing_arityval(arity) + 2;
  2342. }
  2343. while (bigp[0] == given_arity) {
  2344. if (memcmp(bigp+1, given+1, sizeof(Eterm)*given_size) == 0) {
  2345. SET_I((Eterm *) bigp[given_size+1]);
  2346. Goto(*I);
  2347. }
  2348. bigp += thing_arityval(arity) + 2;
  2349. }
  2350. }
  2351. /*
  2352. * Failed.
  2353. */
  2354. SET_I((Eterm *) Arg(1));
  2355. Goto(*I);
  2356. }
  2357. #ifdef ARCH_64
  2358. OpCase(i_select_float_sfI):
  2359. {
  2360. Uint f;
  2361. int n;
  2362. struct ValLabel {
  2363. Uint f;
  2364. Eterm* addr;
  2365. };
  2366. struct ValLabel* ptr;
  2367. GetArg1(0, tmp_arg1);
  2368. ASSERT(is_float(tmp_arg1));
  2369. f = float_val(tmp_arg1)[1];
  2370. n = Arg(2);
  2371. ptr = (struct ValLabel *) &Arg(3);
  2372. while (n-- > 0) {
  2373. if (ptr->f == f) {
  2374. SET_I(ptr->addr);
  2375. Goto(*I);
  2376. }
  2377. ptr++;
  2378. }
  2379. SET_I((Eterm *) Arg(1));
  2380. Goto(*I);
  2381. }
  2382. #else
  2383. OpCase(i_select_float_sfI):
  2384. {
  2385. Uint fpart1;
  2386. Uint fpart2;
  2387. int n;
  2388. struct ValLabel {
  2389. Uint fpart1;
  2390. Uint fpart2;
  2391. Eterm* addr;
  2392. };
  2393. struct ValLabel* ptr;
  2394. GetArg1(0, tmp_arg1);
  2395. ASSERT(is_float(tmp_arg1));
  2396. fpart1 = float_val(tmp_arg1)[1];
  2397. fpart2 = float_val(tmp_arg1)[2];
  2398. n = Arg(2);
  2399. ptr = (struct ValLabel *) &Arg(3);
  2400. while (n-- > 0) {
  2401. if (ptr->fpart1 == fpart1 && ptr->fpart2 == fpart2) {
  2402. SET_I(ptr->addr);
  2403. Goto(*I);
  2404. }
  2405. ptr++;
  2406. }
  2407. SET_I((Eterm *) Arg(1));
  2408. Goto(*I);
  2409. }
  2410. #endif
  2411. OpCase(set_tuple_element_sdP): {
  2412. Eterm element;
  2413. Eterm tuple;
  2414. Eterm* next;
  2415. Eterm* p;
  2416. PreFetch(3, next);
  2417. GetArg2(0, element, tuple);
  2418. ASSERT(is_tuple(tuple));
  2419. p = (Eterm *) ((unsigned char *) tuple_val(tuple) + Arg(2));
  2420. *p = element;
  2421. NextPF(3, next);
  2422. }
  2423. OpCase(i_is_ne_exact_f):
  2424. if (EQ(tmp_arg1, tmp_arg2)) {
  2425. ClauseFail();
  2426. }
  2427. Next(1);
  2428. OpCase(normal_exit): {
  2429. SWAPOUT;
  2430. c_p->freason = EXC_NORMAL;
  2431. c_p->arity = 0; /* In case this process will ever be garbed again. */
  2432. ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
  2433. erts_do_exit_process(c_p, am_normal);
  2434. ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
  2435. goto do_schedule;
  2436. }
  2437. /*
  2438. * Suspend BIF and prepare BIF to be rescheduled.
  2439. */
  2440. suspend_bif: {
  2441. Eterm* argp = c_p->arg_reg;
  2442. argp[0] = r(0);
  2443. argp[1] = x(1);
  2444. argp[2] = x(2);
  2445. SWAPOUT;
  2446. c_p->i = I;
  2447. c_p->current = NULL;
  2448. goto do_schedule;
  2449. }
  2450. OpCase(raise_ss): {
  2451. /* This was not done very well in R10-0; then, we passed the tag in
  2452. the first argument and hoped that the existing c_p->ftrace was
  2453. still correct. But the ftrace-object already includes the tag
  2454. (or rather, the freason). Now, we pass the original ftrace in
  2455. the first argument. We also handle atom tags in the first
  2456. argument for backwards compatibility.
  2457. */
  2458. GetArg2(0, tmp_arg1, tmp_arg2);
  2459. c_p->fvalue = tmp_arg2;
  2460. if (c_p->freason == EXC_NULL) {
  2461. /* a safety check for the R10-0 case; should not happen */
  2462. c_p->ftrace = NIL;
  2463. c_p->freason = EXC_ERROR;
  2464. }
  2465. /* for R10-0 code, keep existing c_p->ftrace and hope it's correct */
  2466. switch (tmp_arg1) {
  2467. case am_throw:
  2468. c_p->freason = EXC_THROWN & ~EXF_SAVETRACE;
  2469. break;
  2470. case am_error:
  2471. c_p->freason = EXC_ERROR & ~EXF_SAVETRACE;
  2472. break;
  2473. case am_exit:
  2474. c_p->freason = EXC_EXIT & ~EXF_SAVETRACE;
  2475. break;
  2476. default:
  2477. {/* R10-1 and later
  2478. XXX note: should do sanity check on given trace if it can be
  2479. passed from a user! Currently only expecting generated calls.
  2480. */
  2481. struct StackTrace *s;
  2482. c_p->ftrace = tmp_arg1;
  2483. s = get_trace_from_exc(tmp_arg1);
  2484. if (s == NULL) {
  2485. c_p->freason = EXC_ERROR;
  2486. } else {
  2487. c_p->freason = PRIMARY_EXCEPTION(s->freason);
  2488. }
  2489. }
  2490. }
  2491. goto find_func_info;
  2492. }
  2493. OpCase(badmatch_s): {
  2494. GetArg1(0, tmp_arg1);
  2495. c_p->fvalue = tmp_arg1;
  2496. c_p->freason = BADMATCH;
  2497. }
  2498. /* Fall through here */
  2499. find_func_info: {
  2500. reg[0] = r(0);
  2501. SWAPOUT;
  2502. I = handle_error(c_p, I, reg, NULL);
  2503. goto post_error_handling;
  2504. }
  2505. OpCase(call_error_handler):
  2506. /*
  2507. * At this point, I points to the code[3] in the export entry for
  2508. * a function which is not loaded.
  2509. *
  2510. * code[0]: Module
  2511. * code[1]: Function
  2512. * code[2]: Arity
  2513. * code[3]: &&call_error_handler
  2514. * code[4]: Not used
  2515. */
  2516. SWAPOUT;
  2517. reg[0] = r(0);
  2518. tmp_arg1 = call_error_handler(c_p, I-3, reg);
  2519. r(0) = reg[0];
  2520. SWAPIN;
  2521. if (tmp_arg1) {
  2522. SET_I(c_p->i);
  2523. Dispatch();
  2524. }
  2525. /* Fall through */
  2526. OpCase(error_action_code): {
  2527. no_error_handler:
  2528. reg[0] = r(0);
  2529. SWAPOUT;
  2530. I = handle_error(c_p, NULL, reg, NULL);
  2531. post_error_handling:
  2532. if (I == 0) {
  2533. goto do_schedule;
  2534. } else {
  2535. r(0) = reg[0];
  2536. ASSERT(!is_value(r(0)));
  2537. if (c_p->mbuf) {
  2538. erts_garbage_collect(c_p, 0, reg+1, 3);
  2539. }
  2540. SWAPIN;
  2541. Goto(*I);
  2542. }
  2543. }
  2544. OpCase(apply_bif):
  2545. /*
  2546. * At this point, I points to the code[3] in the export entry for
  2547. * the BIF:
  2548. *
  2549. * code[0]: Module
  2550. * code[1]: Function
  2551. * code[2]: Arity
  2552. * code[3]: &&apply_bif
  2553. * code[4]: Function pointer to BIF function
  2554. */
  2555. {
  2556. BifFunction vbf;
  2557. c_p->current = I-3; /* In case we apply process_info/1,2. */
  2558. c_p->i = I; /* In case we apply check_process_code/2. */
  2559. c_p->arity = 0; /* To allow garbage collection on ourselves
  2560. * (check_process_code/2).
  2561. */
  2562. SWAPOUT;
  2563. c_p->fcalls = FCALLS - 1;
  2564. vbf = (BifFunction) Arg(0);
  2565. PROCESS_MAIN_CHK_LOCKS(c_p);
  2566. tmp_arg2 = I[-1];
  2567. ASSERT(tmp_arg2 <= 3);
  2568. ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
  2569. switch (tmp_arg2) {
  2570. case 3:
  2571. {
  2572. Eterm (*bf)(Process*, Eterm, Eterm, Eterm, Uint*) = vbf;
  2573. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  2574. tmp_arg1 = (*bf)(c_p, r(0), x(1), x(2), I);
  2575. ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(tmp_arg1));
  2576. PROCESS_MAIN_CHK_LOCKS(c_p);
  2577. }
  2578. break;
  2579. case 2:
  2580. {
  2581. Eterm (*bf)(Process*, Eterm, Eterm, Uint*) = vbf;
  2582. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  2583. tmp_arg1 = (*bf)(c_p, r(0), x(1), I);
  2584. ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(tmp_arg1));
  2585. PROCESS_MAIN_CHK_LOCKS(c_p);
  2586. }
  2587. break;
  2588. case 1:
  2589. {
  2590. Eterm (*bf)(Process*, Eterm, Uint*) = vbf;
  2591. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  2592. tmp_arg1 = (*bf)(c_p, r(0), I);
  2593. ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(tmp_arg1));
  2594. PROCESS_MAIN_CHK_LOCKS(c_p);
  2595. }
  2596. break;
  2597. case 0:
  2598. {
  2599. Eterm (*bf)(Process*, Uint*) = vbf;
  2600. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  2601. tmp_arg1 = (*bf)(c_p, I);
  2602. ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(tmp_arg1));
  2603. PROCESS_MAIN_CHK_LOCKS(c_p);
  2604. break;
  2605. }
  2606. }
  2607. ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
  2608. ERTS_HOLE_CHECK(c_p);
  2609. if (c_p->mbuf) {
  2610. reg[0] = r(0);
  2611. tmp_arg1 = erts_gc_after_bif_call(c_p, tmp_arg1, reg, tmp_arg2);
  2612. r(0) = reg[0];
  2613. }
  2614. SWAPIN; /* There might have been a garbage collection. */
  2615. FCALLS = c_p->fcalls;
  2616. if (is_value(tmp_arg1)) {
  2617. r(0) = tmp_arg1;
  2618. CHECK_TERM(r(0));
  2619. SET_I(c_p->cp);
  2620. Goto(*I);
  2621. } else if (c_p->freason == RESCHEDULE) {
  2622. Eterm* argp = c_p->arg_reg;
  2623. c_p->arity = I[-1];
  2624. argp[0] = r(0);
  2625. argp[1] = x(1);
  2626. argp[2] = x(2);
  2627. SWAPOUT;
  2628. c_p->i = I;
  2629. c_p->current = NULL;
  2630. goto do_schedule;
  2631. } else if (c_p->freason == TRAP) {
  2632. SET_I(((Export *)(c_p->def_arg_reg[3]))->address);
  2633. r(0) = c_p->def_arg_reg[0];
  2634. x(1) = c_p->def_arg_reg[1];
  2635. x(2) = c_p->def_arg_reg[2];
  2636. Dispatch();
  2637. }
  2638. reg[0] = r(0);
  2639. I = handle_error(c_p, c_p->cp, reg, vbf);
  2640. goto post_error_handling;
  2641. }
  2642. OpCase(i_get_sd):
  2643. {
  2644. Eterm arg;
  2645. Eterm result;
  2646. GetArg1(0, arg);
  2647. result = erts_pd_hash_get(c_p, arg);
  2648. StoreBifResult(1, result);
  2649. }
  2650. OpCase(i_put_tuple_only_Ad): {
  2651. tmp_arg1 = make_tuple(HTOP);
  2652. *HTOP++ = Arg(0);
  2653. StoreBifResult(1, tmp_arg1);
  2654. }
  2655. OpCase(case_end_s):
  2656. GetArg1(0, tmp_arg1);
  2657. c_p->fvalue = tmp_arg1;
  2658. c_p->freason = EXC_CASE_CLAUSE;
  2659. goto find_func_info;
  2660. OpCase(if_end):
  2661. c_p->freason = EXC_IF_CLAUSE;
  2662. goto find_func_info;
  2663. OpCase(i_func_info_IaaI): {
  2664. c_p->freason = EXC_FUNCTION_CLAUSE;
  2665. c_p->current = I + 2;
  2666. goto lb_error_action_code;
  2667. }
  2668. OpCase(try_case_end_s):
  2669. GetArg1(0, tmp_arg1);
  2670. c_p->fvalue = tmp_arg1;
  2671. c_p->freason = EXC_TRY_CLAUSE;
  2672. goto find_func_info;
  2673. /*
  2674. * Construction of binaries using new instructions.
  2675. */
  2676. {
  2677. Eterm new_binary;
  2678. Eterm num_bits_term;
  2679. Uint num_bits;
  2680. Uint alloc;
  2681. Uint num_bytes;
  2682. OpCase(i_bs_init_bits_heap_IIId): {
  2683. num_bits = Arg(0);
  2684. alloc = Arg(1);
  2685. I++;
  2686. goto do_bs_init_bits_known;
  2687. }
  2688. OpCase(i_bs_init_bits_IId): {
  2689. num_bits = Arg(0);
  2690. alloc = 0;
  2691. goto do_bs_init_bits_known;
  2692. }
  2693. OpCase(i_bs_init_bits_fail_heap_IjId): {
  2694. /* tmp_arg1 was fetched by an i_fetch instruction */
  2695. num_bits_term = tmp_arg1;
  2696. alloc = Arg(0);
  2697. I++;
  2698. goto do_bs_init_bits;
  2699. }
  2700. OpCase(i_bs_init_bits_fail_rjId): {
  2701. num_bits_term = r(0);
  2702. alloc = 0;
  2703. goto do_bs_init_bits;
  2704. }
  2705. OpCase(i_bs_init_bits_fail_yjId): {
  2706. num_bits_term = yb(Arg(0));
  2707. I++;
  2708. alloc = 0;
  2709. goto do_bs_init_bits;
  2710. }
  2711. OpCase(i_bs_init_bits_fail_xjId): {
  2712. num_bits_term = xb(Arg(0));
  2713. I++;
  2714. alloc = 0;
  2715. /* FALL THROUGH */
  2716. }
  2717. /* num_bits_term = Term for number of bits to build (small/big)
  2718. * alloc = Number of words to allocate on heap
  2719. * Operands: Fail Live Dst
  2720. */
  2721. do_bs_init_bits:
  2722. if (is_small(num_bits_term)) {
  2723. Sint size = signed_val(num_bits_term);
  2724. if (size < 0) {
  2725. goto badarg;
  2726. }
  2727. num_bits = (Uint) size;
  2728. } else if (is_big(num_bits_term) &&
  2729. !bignum_header_is_neg(*big_val(num_bits_term))) {
  2730. Uint bits;
  2731. if (!term_to_Uint(num_bits_term, &bits)) {
  2732. goto system_limit;
  2733. }
  2734. num_bits = (Eterm) bits;
  2735. } else {
  2736. goto badarg;
  2737. }
  2738. /* num_bits = Number of bits to build
  2739. * alloc = Number of extra words to allocate on heap
  2740. * Operands: NotUsed Live Dst
  2741. */
  2742. do_bs_init_bits_known:
  2743. num_bytes = (num_bits+7) >> 3;
  2744. if (num_bits & 7) {
  2745. alloc += ERL_SUB_BIN_SIZE;
  2746. }
  2747. if (num_bytes <= ERL_ONHEAP_BIN_LIMIT) {
  2748. alloc += heap_bin_size(num_bytes);
  2749. } else {
  2750. alloc += PROC_BIN_SIZE;
  2751. }
  2752. TestHeap(alloc, Arg(1));
  2753. /* num_bits = Number of bits to build
  2754. * num_bytes = Number of bytes to allocate in the binary
  2755. * alloc = Total number of words to allocate on heap
  2756. * Operands: NotUsed NotUsed Dst
  2757. */
  2758. if (num_bytes <= ERL_ONHEAP_BIN_LIMIT) {
  2759. ErlHeapBin* hb;
  2760. erts_bin_offset = 0;
  2761. erts_writable_bin = 0;
  2762. hb = (ErlHeapBin *) HTOP;
  2763. HTOP += heap_bin_size(num_bytes);
  2764. hb->thing_word = header_heap_bin(num_bytes);
  2765. hb->size = num_bytes;
  2766. erts_current_bin = (byte *) hb->data;
  2767. new_binary = make_binary(hb);
  2768. do_bits_sub_bin:
  2769. if (num_bits & 7) {
  2770. ErlSubBin* sb;
  2771. sb = (ErlSubBin *) HTOP;
  2772. HTOP += ERL_SUB_BIN_SIZE;
  2773. sb->thing_word = HEADER_SUB_BIN;
  2774. sb->size = num_bytes - 1;
  2775. sb->bitsize = num_bits & 7;
  2776. sb->offs = 0;
  2777. sb->bitoffs = 0;
  2778. sb->is_writable = 0;
  2779. sb->orig = new_binary;
  2780. new_binary = make_binary(sb);
  2781. }
  2782. StoreBifResult(2, new_binary);
  2783. } else {
  2784. Binary* bptr;
  2785. ProcBin* pb;
  2786. erts_bin_offset = 0;
  2787. erts_writable_bin = 0;
  2788. /*
  2789. * Allocate the binary struct itself.
  2790. */
  2791. bptr = erts_bin_nrml_alloc(num_bytes);
  2792. bptr->flags = 0;
  2793. bptr->orig_size = num_bytes;
  2794. erts_refc_init(&bptr->refc, 1);
  2795. erts_current_bin = (byte *) bptr->orig_bytes;
  2796. /*
  2797. * Now allocate the ProcBin on the heap.
  2798. */
  2799. pb = (ProcBin *) HTOP;
  2800. HTOP += PROC_BIN_SIZE;
  2801. pb->thing_word = HEADER_PROC_BIN;
  2802. pb->size = num_bytes;
  2803. pb->next = MSO(c_p).mso;
  2804. MSO(c_p).mso = pb;
  2805. pb->val = bptr;
  2806. pb->bytes = (byte*) bptr->orig_bytes;
  2807. pb->flags = 0;
  2808. MSO(c_p).overhead += pb->size / BINARY_OVERHEAD_FACTOR / sizeof(Eterm);
  2809. new_binary = make_binary(pb);
  2810. goto do_bits_sub_bin;
  2811. }
  2812. }
  2813. {
  2814. OpCase(i_bs_init_fail_heap_IjId): {
  2815. /* tmp_arg1 was fetched by an i_fetch instruction */
  2816. tmp_arg2 = Arg(0);
  2817. I++;
  2818. goto do_bs_init;
  2819. }
  2820. OpCase(i_bs_init_fail_rjId): {
  2821. tmp_arg1 = r(0);
  2822. tmp_arg2 = 0;
  2823. goto do_bs_init;
  2824. }
  2825. OpCase(i_bs_init_fail_yjId): {
  2826. tmp_arg1 = yb(Arg(0));
  2827. tmp_arg2 = 0;
  2828. I++;
  2829. goto do_bs_init;
  2830. }
  2831. OpCase(i_bs_init_fail_xjId): {
  2832. tmp_arg1 = xb(Arg(0));
  2833. tmp_arg2 = 0;
  2834. I++;
  2835. }
  2836. /* FALL THROUGH */
  2837. do_bs_init:
  2838. if (is_small(tmp_arg1)) {
  2839. Sint size = signed_val(tmp_arg1);
  2840. if (size < 0) {
  2841. goto badarg;
  2842. }
  2843. tmp_arg1 = (Eterm) size;
  2844. } else if (is_big(tmp_arg1) && !bignum_header_is_neg(*big_val(tmp_arg1))) {
  2845. Sint bytes;
  2846. if (!term_to_Sint(tmp_arg1, &bytes)) {
  2847. goto system_limit;
  2848. }
  2849. if (bytes < 0) {
  2850. goto badarg;
  2851. }
  2852. if ((bytes >> (8*sizeof(Uint)-3)) != 0) {
  2853. goto system_limit;
  2854. }
  2855. tmp_arg1 = (Eterm) bytes;
  2856. } else {
  2857. goto badarg;
  2858. }
  2859. if (tmp_arg1 <= ERL_ONHEAP_BIN_LIMIT) {
  2860. goto do_heap_bin_alloc;
  2861. } else {
  2862. goto do_proc_bin_alloc;
  2863. }
  2864. OpCase(i_bs_init_heap_IIId): {
  2865. tmp_arg1 = Arg(0);
  2866. tmp_arg2 = Arg(1);
  2867. I++;
  2868. goto do_proc_bin_alloc;
  2869. }
  2870. OpCase(i_bs_init_IId): {
  2871. tmp_arg1 = Arg(0);
  2872. tmp_arg2 = 0;
  2873. }
  2874. /* FALL THROUGH */
  2875. do_proc_bin_alloc: {
  2876. Binary* bptr;
  2877. ProcBin* pb;
  2878. erts_bin_offset = 0;
  2879. erts_writable_bin = 0;
  2880. TestHeap(tmp_arg2 + PROC_BIN_SIZE + ERL_SUB_BIN_SIZE, Arg(1));
  2881. /*
  2882. * Allocate the binary struct itself.
  2883. */
  2884. bptr = erts_bin_nrml_alloc(tmp_arg1);
  2885. bptr->flags = 0;
  2886. bptr->orig_size = tmp_arg1;
  2887. erts_refc_init(&bptr->refc, 1);
  2888. erts_current_bin = (byte *) bptr->orig_bytes;
  2889. /*
  2890. * Now allocate the ProcBin on the heap.
  2891. */
  2892. pb = (ProcBin *) HTOP;
  2893. HTOP += PROC_BIN_SIZE;
  2894. pb->thing_word = HEADER_PROC_BIN;
  2895. pb->size = tmp_arg1;
  2896. pb->next = MSO(c_p).mso;
  2897. MSO(c_p).mso = pb;
  2898. pb->val = bptr;
  2899. pb->bytes = (byte*) bptr->orig_bytes;
  2900. pb->flags = 0;
  2901. MSO(c_p).overhead += pb->size / BINARY_OVERHEAD_FACTOR / sizeof(Eterm);
  2902. StoreBifResult(2, make_binary(pb));
  2903. }
  2904. OpCase(i_bs_init_heap_bin_heap_IIId): {
  2905. tmp_arg1 = Arg(0);
  2906. tmp_arg2 = Arg(1);
  2907. I++;
  2908. goto do_heap_bin_alloc;
  2909. }
  2910. OpCase(i_bs_init_heap_bin_IId): {
  2911. tmp_arg1 = Arg(0);
  2912. tmp_arg2 = 0;
  2913. }
  2914. /* Fall through */
  2915. do_heap_bin_alloc:
  2916. {
  2917. ErlHeapBin* hb;
  2918. Uint bin_need;
  2919. bin_need = heap_bin_size(tmp_arg1);
  2920. erts_bin_offset = 0;
  2921. erts_writable_bin = 0;
  2922. TestHeap(bin_need+tmp_arg2+ERL_SUB_BIN_SIZE, Arg(1));
  2923. hb = (ErlHeapBin *) HTOP;
  2924. HTOP += bin_need;
  2925. hb->thing_word = header_heap_bin(tmp_arg1);
  2926. hb->size = tmp_arg1;
  2927. erts_current_bin = (byte *) hb->data;
  2928. tmp_arg1 = make_binary(hb);
  2929. StoreBifResult(2, tmp_arg1);
  2930. }
  2931. }
  2932. OpCase(i_bs_bits_to_bytes_rjd): {
  2933. tmp_arg1 = r(0);
  2934. goto do_bits_to_bytes;
  2935. }
  2936. OpCase(i_bs_bits_to_bytes_yjd): {
  2937. tmp_arg1 = yb(Arg(0));
  2938. I++;
  2939. goto do_bits_to_bytes;
  2940. OpCase(i_bs_bits_to_bytes_xjd): {
  2941. tmp_arg1 = xb(Arg(0));
  2942. I++;
  2943. }
  2944. do_bits_to_bytes:
  2945. {
  2946. if (is_valid_bit_size(tmp_arg1)) {
  2947. tmp_arg1 = make_small(unsigned_val(tmp_arg1) >> 3);
  2948. } else {
  2949. Uint bytes;
  2950. if (!term_to_Uint(tmp_arg1, &bytes)) {
  2951. goto badarg;
  2952. }
  2953. tmp_arg1 = bytes;
  2954. if ((tmp_arg1 & 0x07) != 0) {
  2955. goto badarg;
  2956. }
  2957. SWAPOUT;
  2958. tmp_arg1 = erts_make_integer(tmp_arg1 >> 3, c_p);
  2959. HTOP = HEAP_TOP(c_p);
  2960. }
  2961. StoreBifResult(1, tmp_arg1);
  2962. }
  2963. }
  2964. OpCase(i_bs_add_jId): {
  2965. Uint Unit = Arg(1);
  2966. if (is_both_small(tmp_arg1, tmp_arg2)) {
  2967. Sint Arg1 = signed_val(tmp_arg1);
  2968. Sint Arg2 = signed_val(tmp_arg2);
  2969. if (Arg1 >= 0 && Arg2 >= 0) {
  2970. BsSafeMul(Arg2, Unit, goto system_limit, tmp_arg1);
  2971. if ((Sint) tmp_arg1 < 0) {
  2972. goto system_limit;
  2973. }
  2974. tmp_arg1 += Arg1;
  2975. store_bs_add_result:
  2976. if (MY_IS_SSMALL((Sint) tmp_arg1)) {
  2977. tmp_arg1 = make_small(tmp_arg1);
  2978. } else {
  2979. /*
  2980. * May generate a heap fragment, but in this particular case it is OK,
  2981. * since the value will be stored into an x register (the GC will
  2982. * scan x registers for references to heap fragments) and there is no
  2983. * risk that value can be stored into a location that is not scanned
  2984. * for heap-fragment references (such as the heap).
  2985. */
  2986. SWAPOUT;
  2987. tmp_arg1 = erts_make_integer(tmp_arg1, c_p);
  2988. HTOP = HEAP_TOP(c_p);
  2989. }
  2990. StoreBifResult(2, tmp_arg1);
  2991. }
  2992. } else {
  2993. Uint a;
  2994. Uint b;
  2995. if (term_to_Uint(tmp_arg1, &a) && term_to_Uint(tmp_arg2, &b)) {
  2996. BsSafeMul(b, Unit, goto system_limit, tmp_arg1);
  2997. if ((Sint) tmp_arg1 < 0) {
  2998. goto system_limit;
  2999. }
  3000. tmp_arg1 += a;
  3001. goto store_bs_add_result;
  3002. }
  3003. }
  3004. /*
  3005. * Fall through to here if there were any errors in the arguments.
  3006. */
  3007. goto badarg;
  3008. }
  3009. OpCase(bs_put_string_II):
  3010. {
  3011. Eterm* next;
  3012. PreFetch(2, next);
  3013. erts_new_bs_put_string(ERL_BITS_ARGS_2((byte *) Arg(1), Arg(0)));
  3014. NextPF(2, next);
  3015. }
  3016. /*
  3017. * tmp_arg1 = Number of bytes to build
  3018. * tmp_arg2 = Source binary
  3019. * Operands: Fail ExtraHeap Live Unit Dst
  3020. */
  3021. OpCase(i_bs_append_jIIId): {
  3022. Uint live = Arg(2);
  3023. Uint res;
  3024. SWAPOUT;
  3025. reg[0] = r(0);
  3026. reg[live] = tmp_arg2;
  3027. res = erts_bs_append(c_p, reg, live, tmp_arg1, Arg(1), Arg(3));
  3028. r(0) = reg[0];
  3029. SWAPIN;
  3030. if (is_non_value(res)) {
  3031. /* c_p->freason is already set (may be either BADARG or SYSTEM_LIMIT). */
  3032. goto lb_Cl_error;
  3033. }
  3034. StoreBifResult(4, res);
  3035. }
  3036. /*
  3037. * tmp_arg1 = Number of bytes to build
  3038. * tmp_arg2 = Source binary
  3039. * Operands: Fail Unit Dst
  3040. */
  3041. OpCase(i_bs_private_append_jId): {
  3042. Eterm res;
  3043. res = erts_bs_private_append(c_p, tmp_arg2, tmp_arg1, Arg(1));
  3044. if (is_non_value(res)) {
  3045. goto system_limit;
  3046. }
  3047. StoreBifResult(2, res);
  3048. }
  3049. /*
  3050. * tmp_arg1 = Initial size of writable binary
  3051. * Operands: Live Dst
  3052. */
  3053. OpCase(bs_init_writable): {
  3054. SWAPOUT;
  3055. r(0) = erts_bs_init_writable(c_p, r(0));
  3056. SWAPIN;
  3057. Next(0);
  3058. }
  3059. /*
  3060. * Matching of binaries.
  3061. */
  3062. {
  3063. Eterm header;
  3064. Eterm* next;
  3065. Uint slots;
  3066. OpCase(i_bs_start_match2_rfIId): {
  3067. tmp_arg1 = r(0);
  3068. do_start_match:
  3069. slots = Arg(2);
  3070. if (!is_boxed(tmp_arg1)) {
  3071. ClauseFail();
  3072. }
  3073. PreFetch(4, next);
  3074. header = *boxed_val(tmp_arg1);
  3075. if (header_is_bin_matchstate(header)) {
  3076. ErlBinMatchState* ms = (ErlBinMatchState *) boxed_val(tmp_arg1);
  3077. Uint actual_slots = HEADER_NUM_SLOTS(header);
  3078. ms->save_offset[0] = ms->mb.offset;
  3079. if (actual_slots < slots) {
  3080. ErlBinMatchState* dst;
  3081. Uint live = Arg(1);
  3082. Uint wordsneeded = ERL_BIN_MATCHSTATE_SIZE(slots);
  3083. TestHeapPreserve(wordsneeded, live, tmp_arg1);
  3084. ms = (ErlBinMatchState *) boxed_val(tmp_arg1);
  3085. dst = (ErlBinMatchState *) HTOP;
  3086. *dst = *ms;
  3087. *HTOP = HEADER_BIN_MATCHSTATE(slots);
  3088. HTOP += wordsneeded;
  3089. StoreResult(make_matchstate(dst), Arg(3));
  3090. }
  3091. } else if (is_binary_header(header)) {
  3092. Eterm result;
  3093. Uint live = Arg(1);
  3094. Uint wordsneeded = ERL_BIN_MATCHSTATE_SIZE(slots);
  3095. TestHeapPreserve(wordsneeded, live, tmp_arg1);
  3096. SWAPOUT;
  3097. result = erts_bs_start_match_2(c_p, tmp_arg1, slots);
  3098. HTOP = HEAP_TOP(c_p);
  3099. if (is_non_value(result)) {
  3100. ClauseFail();
  3101. } else {
  3102. StoreResult(result, Arg(3));
  3103. }
  3104. } else {
  3105. ClauseFail();
  3106. }
  3107. NextPF(4, next);
  3108. }
  3109. OpCase(i_bs_start_match2_xfIId): {
  3110. tmp_arg1 = xb(Arg(0));
  3111. I++;
  3112. goto do_start_match;
  3113. }
  3114. OpCase(i_bs_start_match2_yfIId): {
  3115. tmp_arg1 = yb(Arg(0));
  3116. I++;
  3117. goto do_start_match;
  3118. }
  3119. }
  3120. OpCase(bs_test_zero_tail2_fr): {
  3121. Eterm* next;
  3122. ErlBinMatchBuffer *_mb;
  3123. PreFetch(1, next);
  3124. _mb = (ErlBinMatchBuffer*) ms_matchbuffer(r(0));
  3125. if (_mb->size != _mb->offset) {
  3126. ClauseFail();
  3127. }
  3128. NextPF(1, next);
  3129. }
  3130. OpCase(bs_test_zero_tail2_fx): {
  3131. Eterm* next;
  3132. ErlBinMatchBuffer *_mb;
  3133. PreFetch(2, next);
  3134. _mb = (ErlBinMatchBuffer*) ms_matchbuffer(xb(Arg(1)));
  3135. if (_mb->size != _mb->offset) {
  3136. ClauseFail();
  3137. }
  3138. NextPF(2, next);
  3139. }
  3140. OpCase(bs_test_tail_imm2_frI): {
  3141. Eterm* next;
  3142. ErlBinMatchBuffer *_mb;
  3143. PreFetch(2, next);
  3144. _mb = ms_matchbuffer(r(0));
  3145. if (_mb->size - _mb->offset != Arg(1)) {
  3146. ClauseFail();
  3147. }
  3148. NextPF(2, next);
  3149. }
  3150. OpCase(bs_test_tail_imm2_fxI): {
  3151. Eterm* next;
  3152. ErlBinMatchBuffer *_mb;
  3153. PreFetch(3, next);
  3154. _mb = ms_matchbuffer(xb(Arg(1)));
  3155. if (_mb->size - _mb->offset != Arg(2)) {
  3156. ClauseFail();
  3157. }
  3158. NextPF(3, next);
  3159. }
  3160. OpCase(bs_test_unit_frI): {
  3161. Eterm* next;
  3162. ErlBinMatchBuffer *_mb;
  3163. PreFetch(2, next);
  3164. _mb = ms_matchbuffer(r(0));
  3165. if ((_mb->size - _mb->offset) % Arg(1)) {
  3166. ClauseFail();
  3167. }
  3168. NextPF(2, next);
  3169. }
  3170. OpCase(bs_test_unit_fxI): {
  3171. Eterm* next;
  3172. ErlBinMatchBuffer *_mb;
  3173. PreFetch(3, next);
  3174. _mb = ms_matchbuffer(xb(Arg(1)));
  3175. if ((_mb->size - _mb->offset) % Arg(2)) {
  3176. ClauseFail();
  3177. }
  3178. NextPF(3, next);
  3179. }
  3180. OpCase(bs_test_unit8_fr): {
  3181. Eterm* next;
  3182. ErlBinMatchBuffer *_mb;
  3183. PreFetch(1, next);
  3184. _mb = ms_matchbuffer(r(0));
  3185. if ((_mb->size - _mb->offset) & 7) {
  3186. ClauseFail();
  3187. }
  3188. NextPF(1, next);
  3189. }
  3190. OpCase(bs_test_unit8_fx): {
  3191. Eterm* next;
  3192. ErlBinMatchBuffer *_mb;
  3193. PreFetch(2, next);
  3194. _mb = ms_matchbuffer(xb(Arg(1)));
  3195. if ((_mb->size - _mb->offset) & 7) {
  3196. ClauseFail();
  3197. }
  3198. NextPF(2, next);
  3199. }
  3200. OpCase(i_bs_get_integer_8_rfd): {
  3201. tmp_arg1 = r(0);
  3202. goto do_bs_get_integer_8;
  3203. }
  3204. OpCase(i_bs_get_integer_8_xfd): {
  3205. tmp_arg1 = xb(Arg(0));
  3206. I++;
  3207. }
  3208. do_bs_get_integer_8: {
  3209. ErlBinMatchBuffer *_mb;
  3210. Eterm _result;
  3211. _mb = ms_matchbuffer(tmp_arg1);
  3212. if (_mb->size - _mb->offset < 8) {
  3213. ClauseFail();
  3214. }
  3215. if (BIT_OFFSET(_mb->offset) != 0) {
  3216. _result = erts_bs_get_integer_2(c_p, 8, 0, _mb);
  3217. } else {
  3218. _result = make_small(_mb->base[BYTE_OFFSET(_mb->offset)]);
  3219. _mb->offset += 8;
  3220. }
  3221. StoreBifResult(1, _result);
  3222. }
  3223. OpCase(i_bs_get_integer_16_rfd): {
  3224. tmp_arg1 = r(0);
  3225. goto do_bs_get_integer_16;
  3226. }
  3227. OpCase(i_bs_get_integer_16_xfd): {
  3228. tmp_arg1 = xb(Arg(0));
  3229. I++;
  3230. }
  3231. do_bs_get_integer_16: {
  3232. ErlBinMatchBuffer *_mb;
  3233. Eterm _result;
  3234. _mb = ms_matchbuffer(tmp_arg1);
  3235. if (_mb->size - _mb->offset < 16) {
  3236. ClauseFail();
  3237. }
  3238. if (BIT_OFFSET(_mb->offset) != 0) {
  3239. _result = erts_bs_get_integer_2(c_p, 16, 0, _mb);
  3240. } else {
  3241. _result = make_small(get_int16(_mb->base+BYTE_OFFSET(_mb->offset)));
  3242. _mb->offset += 16;
  3243. }
  3244. StoreBifResult(1, _result);
  3245. }
  3246. OpCase(i_bs_get_integer_32_rfId): {
  3247. tmp_arg1 = r(0);
  3248. goto do_bs_get_integer_32;
  3249. }
  3250. OpCase(i_bs_get_integer_32_xfId): {
  3251. tmp_arg1 = xb(Arg(0));
  3252. I++;
  3253. }
  3254. do_bs_get_integer_32: {
  3255. ErlBinMatchBuffer *_mb;
  3256. Uint32 _integer;
  3257. Eterm _result;
  3258. _mb = ms_matchbuffer(tmp_arg1);
  3259. if (_mb->size - _mb->offset < 32) { ClauseFail(); }
  3260. if (BIT_OFFSET(_mb->offset) != 0) {
  3261. _integer = erts_bs_get_unaligned_uint32(c_p, _mb);
  3262. } else {
  3263. _integer = get_int32(_mb->base + _mb->offset/8);
  3264. }
  3265. _mb->offset += 32;
  3266. #ifndef ARCH_64
  3267. if (IS_USMALL(0, _integer)) {
  3268. #endif
  3269. _result = make_small(_integer);
  3270. #ifndef ARCH_64
  3271. } else {
  3272. TestHeap(BIG_UINT_HEAP_SIZE, Arg(1));
  3273. _result = uint_to_big((Uint) _integer, HTOP);
  3274. HTOP += BIG_UINT_HEAP_SIZE;
  3275. }
  3276. #endif
  3277. StoreBifResult(2, _result);
  3278. }
  3279. /* Operands: Size Live Fail Flags Dst */
  3280. OpCase(i_bs_get_integer_imm_rIIfId): {
  3281. tmp_arg1 = r(0);
  3282. /* Operands: Size Live Fail Flags Dst */
  3283. goto do_bs_get_integer_imm_test_heap;
  3284. }
  3285. /* Operands: x(Reg) Size Live Fail Flags Dst */
  3286. OpCase(i_bs_get_integer_imm_xIIfId): {
  3287. tmp_arg1 = xb(Arg(0));
  3288. I++;
  3289. /* Operands: Size Live Fail Flags Dst */
  3290. goto do_bs_get_integer_imm_test_heap;
  3291. }
  3292. /*
  3293. * tmp_arg1 = match context
  3294. * Operands: Size Live Fail Flags Dst
  3295. */
  3296. do_bs_get_integer_imm_test_heap: {
  3297. Uint wordsneeded;
  3298. tmp_arg2 = Arg(0);
  3299. wordsneeded = 1+WSIZE(NBYTES(tmp_arg2));
  3300. TestHeapPreserve(wordsneeded, Arg(1), tmp_arg1);
  3301. I += 2;
  3302. /* Operands: Fail Flags Dst */
  3303. goto do_bs_get_integer_imm;
  3304. }
  3305. /* Operands: Size Fail Flags Dst */
  3306. OpCase(i_bs_get_integer_small_imm_rIfId): {
  3307. tmp_arg1 = r(0);
  3308. tmp_arg2 = Arg(0);
  3309. I++;
  3310. /* Operands: Fail Flags Dst */
  3311. goto do_bs_get_integer_imm;
  3312. }
  3313. /* Operands: x(Reg) Size Fail Flags Dst */
  3314. OpCase(i_bs_get_integer_small_imm_xIfId): {
  3315. tmp_arg1 = xb(Arg(0));
  3316. tmp_arg2 = Arg(1);
  3317. I += 2;
  3318. /* Operands: Fail Flags Dst */
  3319. goto do_bs_get_integer_imm;
  3320. }
  3321. /*
  3322. * tmp_arg1 = match context
  3323. * tmp_arg2 = size of field
  3324. * Operands: Fail Flags Dst
  3325. */
  3326. do_bs_get_integer_imm: {
  3327. ErlBinMatchBuffer* mb;
  3328. Eterm result;
  3329. mb = ms_matchbuffer(tmp_arg1);
  3330. SWAPOUT;
  3331. result = erts_bs_get_integer_2(c_p, tmp_arg2, Arg(1), mb);
  3332. HTOP = HEAP_TOP(c_p);
  3333. if (is_non_value(result)) {
  3334. ClauseFail();
  3335. }
  3336. StoreBifResult(2, result);
  3337. }
  3338. /*
  3339. * tmp_arg1 = Match context
  3340. * tmp_arg2 = Size field
  3341. * Operands: Fail Live FlagsAndUnit Dst
  3342. */
  3343. OpCase(i_bs_get_integer_fIId): {
  3344. Uint flags;
  3345. Uint size;
  3346. ErlBinMatchBuffer* mb;
  3347. Eterm result;
  3348. flags = Arg(2);
  3349. BsGetFieldSize(tmp_arg2, (flags >> 3), ClauseFail(), size);
  3350. if (size >= SMALL_BITS) {
  3351. Uint wordsneeded = 1+WSIZE(NBYTES((Uint) size));
  3352. TestHeapPreserve(wordsneeded, Arg(1), tmp_arg1);
  3353. }
  3354. mb = ms_matchbuffer(tmp_arg1);
  3355. SWAPOUT;
  3356. result = erts_bs_get_integer_2(c_p, size, flags, mb);
  3357. HTOP = HEAP_TOP(c_p);
  3358. if (is_non_value(result)) {
  3359. ClauseFail();
  3360. }
  3361. StoreBifResult(3, result);
  3362. }
  3363. {
  3364. ErlBinMatchBuffer* mb;
  3365. ErlSubBin* sb;
  3366. Uint size;
  3367. Uint offs;
  3368. Uint orig;
  3369. Uint hole_size;
  3370. OpCase(bs_context_to_binary_r): {
  3371. tmp_arg1 = x0;
  3372. I -= 2;
  3373. goto do_context_to_binary;
  3374. }
  3375. /* Unfortunately, inlining can generate this instruction. */
  3376. OpCase(bs_context_to_binary_y): {
  3377. tmp_arg1 = yb(Arg(0));
  3378. goto do_context_to_binary0;
  3379. }
  3380. OpCase(bs_context_to_binary_x): {
  3381. tmp_arg1 = xb(Arg(0));
  3382. do_context_to_binary0:
  3383. I--;
  3384. }
  3385. do_context_to_binary:
  3386. if (is_boxed(tmp_arg1) && header_is_bin_matchstate(*boxed_val(tmp_arg1))) {
  3387. ErlBinMatchState* ms = (ErlBinMatchState *) boxed_val(tmp_arg1);
  3388. mb = &ms->mb;
  3389. offs = ms->save_offset[0];
  3390. size = mb->size - offs;
  3391. goto do_bs_get_binary_all_reuse_common;
  3392. }
  3393. Next(2);
  3394. OpCase(i_bs_get_binary_all_reuse_rfI): {
  3395. tmp_arg1 = x0;
  3396. goto do_bs_get_binary_all_reuse;
  3397. }
  3398. OpCase(i_bs_get_binary_all_reuse_xfI): {
  3399. tmp_arg1 = xb(Arg(0));
  3400. I++;
  3401. }
  3402. do_bs_get_binary_all_reuse:
  3403. mb = ms_matchbuffer(tmp_arg1);
  3404. size = mb->size - mb->offset;
  3405. if (size % Arg(1) != 0) {
  3406. ClauseFail();
  3407. }
  3408. offs = mb->offset;
  3409. do_bs_get_binary_all_reuse_common:
  3410. orig = mb->orig;
  3411. sb = (ErlSubBin *) boxed_val(tmp_arg1);
  3412. hole_size = 1 + header_arity(sb->thing_word) - ERL_SUB_BIN_SIZE;
  3413. sb->thing_word = HEADER_SUB_BIN;
  3414. sb->size = BYTE_OFFSET(size);
  3415. sb->bitsize = BIT_OFFSET(size);
  3416. sb->offs = BYTE_OFFSET(offs);
  3417. sb->bitoffs = BIT_OFFSET(offs);
  3418. sb->is_writable = 0;
  3419. sb->orig = orig;
  3420. if (hole_size) {
  3421. sb[1].thing_word = make_pos_bignum_header(hole_size-1);
  3422. }
  3423. Next(2);
  3424. }
  3425. {
  3426. OpCase(i_bs_match_string_rfII): {
  3427. tmp_arg1 = r(0);
  3428. goto do_bs_match_string;
  3429. }
  3430. OpCase(i_bs_match_string_xfII): {
  3431. tmp_arg1 = xb(Arg(0));
  3432. I++;
  3433. }
  3434. do_bs_match_string:
  3435. {
  3436. Eterm* next;
  3437. byte* bytes;
  3438. Uint bits;
  3439. ErlBinMatchBuffer* mb;
  3440. Uint offs;
  3441. PreFetch(3, next);
  3442. bits = Arg(1);
  3443. bytes = (byte *) Arg(2);
  3444. mb = ms_matchbuffer(tmp_arg1);
  3445. if (mb->size - mb->offset < bits) {
  3446. ClauseFail();
  3447. }
  3448. offs = mb->offset & 7;
  3449. if (offs == 0 && (bits & 7) == 0) {
  3450. if (sys_memcmp(bytes, mb->base+(mb->offset>>3), bits>>3)) {
  3451. ClauseFail();
  3452. }
  3453. } else if (erts_cmp_bits(bytes, 0, mb->base+(mb->offset>>3), mb->offset & 7, bits)) {
  3454. ClauseFail();
  3455. }
  3456. mb->offset += bits;
  3457. NextPF(3, next);
  3458. }
  3459. }
  3460. OpCase(i_bs_save2_rI): {
  3461. Eterm* next;
  3462. ErlBinMatchState *_ms;
  3463. PreFetch(1, next);
  3464. _ms = (ErlBinMatchState*) boxed_val((Eterm) r(0));
  3465. _ms->save_offset[Arg(0)] = _ms->mb.offset;
  3466. NextPF(1, next);
  3467. }
  3468. OpCase(i_bs_save2_xI): {
  3469. Eterm* next;
  3470. ErlBinMatchState *_ms;
  3471. PreFetch(2, next);
  3472. _ms = (ErlBinMatchState*) boxed_val((Eterm) xb(Arg(0)));
  3473. _ms->save_offset[Arg(1)] = _ms->mb.offset;
  3474. NextPF(2, next);
  3475. }
  3476. OpCase(i_bs_restore2_rI): {
  3477. Eterm* next;
  3478. ErlBinMatchState *_ms;
  3479. PreFetch(1, next);
  3480. _ms = (ErlBinMatchState*) boxed_val((Eterm) r(0));
  3481. _ms->mb.offset = _ms->save_offset[Arg(0)];
  3482. NextPF(1, next);
  3483. }
  3484. OpCase(i_bs_restore2_xI): {
  3485. Eterm* next;
  3486. ErlBinMatchState *_ms;
  3487. PreFetch(2, next);
  3488. _ms = (ErlBinMatchState*) boxed_val((Eterm) xb(Arg(0)));
  3489. _ms->mb.offset = _ms->save_offset[Arg(1)];
  3490. NextPF(2, next);
  3491. }
  3492. #include "beam_cold.h"
  3493. /*
  3494. * This instruction is probably never used (because it is combined with a
  3495. * a return). However, a future compiler might for some reason emit a
  3496. * deallocate not followed by a return, and that should work.
  3497. */
  3498. OpCase(deallocate_I): {
  3499. Eterm* next;
  3500. PreFetch(1, next);
  3501. D(Arg(0));
  3502. NextPF(1, next);
  3503. }
  3504. /*
  3505. * Trace and debugging support.
  3506. */
  3507. /*
  3508. * At this point, I points to the code[3] in the export entry for
  3509. * a trace-enabled function.
  3510. *
  3511. * code[0]: Module
  3512. * code[1]: Function
  3513. * code[2]: Arity
  3514. * code[3]: &&call_traced_function
  3515. * code[4]: Address of function.
  3516. */
  3517. OpCase(call_traced_function): {
  3518. if (IS_TRACED_FL(c_p, F_TRACE_CALLS)) {
  3519. unsigned offset = offsetof(Export, code) + 3*sizeof(Eterm);
  3520. Export* ep = (Export *) (((char *)I)-offset);
  3521. Uint32 flags;
  3522. SWAPOUT;
  3523. reg[0] = r(0);
  3524. PROCESS_MAIN_CHK_LOCKS(c_p);
  3525. ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
  3526. flags = erts_call_trace(c_p, ep->code, ep->match_prog_set, reg,
  3527. 0, &c_p->tracer_proc);
  3528. ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
  3529. PROCESS_MAIN_CHK_LOCKS(c_p);
  3530. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  3531. SWAPIN;
  3532. if (flags & MATCH_SET_RX_TRACE) {
  3533. ASSERT(c_p->htop <= E && E <= c_p->hend);
  3534. if (E - 3 < HTOP) {
  3535. /* SWAPOUT, SWAPIN was done and r(0) was saved above */
  3536. PROCESS_MAIN_CHK_LOCKS(c_p);
  3537. FCALLS -= erts_garbage_collect(c_p, 3, reg, ep->code[2]);
  3538. PROCESS_MAIN_CHK_LOCKS(c_p);
  3539. r(0) = reg[0];
  3540. SWAPIN;
  3541. }
  3542. E -= 3;
  3543. ASSERT(c_p->htop <= E && E <= c_p->hend);
  3544. ASSERT(is_CP((Eterm)(ep->code)));
  3545. ASSERT(is_internal_pid(c_p->tracer_proc) ||
  3546. is_internal_port(c_p->tracer_proc));
  3547. E[2] = make_cp(c_p->cp);
  3548. E[1] = am_true; /* Process tracer */
  3549. E[0] = make_cp(ep->code);
  3550. c_p->cp = (Eterm*)
  3551. make_cp(flags & MATCH_SET_EXCEPTION_TRACE
  3552. ? beam_exception_trace : beam_return_trace);
  3553. erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
  3554. c_p->trace_flags |= F_EXCEPTION_TRACE;
  3555. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
  3556. }
  3557. }
  3558. SET_I((Uint *) Arg(0));
  3559. Dispatch();
  3560. }
  3561. OpCase(return_trace): {
  3562. Uint* code = (Uint *) E[0];
  3563. SWAPOUT; /* Needed for shared heap */
  3564. ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
  3565. erts_trace_return(c_p, code, r(0), E+1/*Process tracer*/);
  3566. ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
  3567. SWAPIN;
  3568. c_p->cp = NULL;
  3569. SET_I((Eterm *) E[2]);
  3570. E += 3;
  3571. Goto(*I);
  3572. }
  3573. OpCase(i_count_breakpoint): {
  3574. Uint real_I;
  3575. ErtsCountBreak((Uint *) I, &real_I);
  3576. ASSERT(VALID_INSTR(real_I));
  3577. Goto(real_I);
  3578. }
  3579. OpCase(i_trace_breakpoint):
  3580. if (! IS_TRACED_FL(c_p, F_TRACE_CALLS)) {
  3581. Uint real_I;
  3582. ErtsBreakSkip((Uint *) I, &real_I);
  3583. Goto(real_I);
  3584. }
  3585. /* Fall through to next case */
  3586. OpCase(i_mtrace_breakpoint): {
  3587. Uint real_I;
  3588. Uint32 flags;
  3589. Eterm tracer_pid;
  3590. Uint *cpp;
  3591. int return_to_trace = 0, need = 0;
  3592. flags = 0;
  3593. SWAPOUT;
  3594. reg[0] = r(0);
  3595. if (*cp_val((Eterm)c_p->cp)
  3596. == (Uint) OpCode(return_trace)) {
  3597. cpp = (Uint*)&E[2];
  3598. } else if (*cp_val((Eterm)c_p->cp)
  3599. == (Uint) OpCode(i_return_to_trace)) {
  3600. return_to_trace = !0;
  3601. cpp = (Uint*)&E[0];
  3602. } else {
  3603. cpp = NULL;
  3604. }
  3605. if (cpp) {
  3606. /* This _IS_ a tail recursive call, if there are
  3607. * return_trace and/or i_return_to_trace stackframes
  3608. * on the stack, they are not intermixed with y registers
  3609. */
  3610. Eterm *cp_save = c_p->cp;
  3611. for (;;) {
  3612. ASSERT(is_CP(*cpp));
  3613. if (*cp_val(*cpp) == (Uint) OpCode(return_trace)) {
  3614. cpp += 3;
  3615. } else if (*cp_val(*cpp) == (Uint) OpCode(i_return_to_trace)) {
  3616. return_to_trace = !0;
  3617. cpp += 1;
  3618. } else
  3619. break;
  3620. }
  3621. c_p->cp = (Eterm *) *cpp;
  3622. ASSERT(is_CP((Eterm)c_p->cp));
  3623. ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
  3624. real_I = erts_trace_break(c_p, I, reg, &flags, &tracer_pid);
  3625. ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
  3626. SWAPIN; /* Needed by shared heap. */
  3627. c_p->cp = cp_save;
  3628. } else {
  3629. ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
  3630. real_I = erts_trace_break(c_p, I, reg, &flags, &tracer_pid);
  3631. ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
  3632. SWAPIN; /* Needed by shared heap. */
  3633. }
  3634. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  3635. if ((flags & MATCH_SET_RETURN_TO_TRACE) && !return_to_trace) {
  3636. need += 1;
  3637. }
  3638. if (flags & MATCH_SET_RX_TRACE) {
  3639. need += 3;
  3640. }
  3641. if (need) {
  3642. ASSERT(c_p->htop <= E && E <= c_p->hend);
  3643. if (E - need < HTOP) {
  3644. /* SWAPOUT was done and r(0) was saved above */
  3645. PROCESS_MAIN_CHK_LOCKS(c_p);
  3646. FCALLS -= erts_garbage_collect(c_p, need, reg, I[-1]);
  3647. PROCESS_MAIN_CHK_LOCKS(c_p);
  3648. r(0) = reg[0];
  3649. SWAPIN;
  3650. }
  3651. }
  3652. if ((flags & MATCH_SET_RETURN_TO_TRACE) && !return_to_trace) {
  3653. E -= 1;
  3654. ASSERT(c_p->htop <= E && E <= c_p->hend);
  3655. E[0] = make_cp(c_p->cp);
  3656. c_p->cp = (Eterm *) make_cp(beam_return_to_trace);
  3657. }
  3658. if (flags & MATCH_SET_RX_TRACE) {
  3659. E -= 3;
  3660. ASSERT(c_p->htop <= E && E <= c_p->hend);
  3661. ASSERT(is_CP((Eterm) (I - 3)));
  3662. ASSERT(am_true == tracer_pid ||
  3663. is_internal_pid(tracer_pid) || is_internal_port(tracer_pid));
  3664. E[2] = make_cp(c_p->cp);
  3665. E[1] = tracer_pid;
  3666. E[0] = make_cp(I - 3); /* We ARE at the beginning of an
  3667. instruction,
  3668. the funcinfo is above i. */
  3669. c_p->cp = (Eterm*)
  3670. make_cp(flags & MATCH_SET_EXCEPTION_TRACE
  3671. ? beam_exception_trace : beam_return_trace);
  3672. erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
  3673. c_p->trace_flags |= F_EXCEPTION_TRACE;
  3674. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
  3675. }
  3676. Goto(real_I);
  3677. }
  3678. OpCase(i_return_to_trace): {
  3679. if (IS_TRACED_FL(c_p, F_TRACE_RETURN_TO)) {
  3680. Uint *cpp = (Uint*) E;
  3681. for(;;) {
  3682. ASSERT(is_CP(*cpp));
  3683. if (*cp_val(*cpp) == (Uint) OpCode(return_trace)) {
  3684. do ++cpp; while(is_not_CP(*cpp));
  3685. cpp += 2;
  3686. } else if (*cp_val(*cpp) == (Uint) OpCode(i_return_to_trace)) {
  3687. do ++cpp; while(is_not_CP(*cpp));
  3688. } else break;
  3689. }
  3690. SWAPOUT; /* Needed for shared heap */
  3691. ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
  3692. erts_trace_return_to(c_p, cp_val(*cpp));
  3693. ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
  3694. SWAPIN;
  3695. }
  3696. c_p->cp = NULL;
  3697. SET_I((Eterm *) E[0]);
  3698. E += 1;
  3699. Goto(*I);
  3700. }
  3701. /*
  3702. * Instructions for module_info/0,1.
  3703. */
  3704. OpCase(i_module_info_0): {
  3705. SWAPOUT;
  3706. r(0) = erts_module_info_0(c_p, I[-3]);
  3707. HTOP = HEAP_TOP(c_p);
  3708. SET_I(c_p->cp);
  3709. Goto(*I);
  3710. }
  3711. OpCase(i_module_info_1): {
  3712. Eterm res;
  3713. SWAPOUT;
  3714. res = erts_module_info_1(c_p, I[-3], r(0));
  3715. HTOP = HEAP_TOP(c_p);
  3716. if (is_value(res)) {
  3717. r(0) = res;
  3718. SET_I(c_p->cp);
  3719. Goto(*I);
  3720. }
  3721. c_p->freason = EXC_FUNCTION_CLAUSE;
  3722. c_p->current = I-3;
  3723. goto lb_error_action_code;
  3724. }
  3725. /*
  3726. * Instructions for allocating on the message area.
  3727. */
  3728. OpCase(i_global_cons):
  3729. {
  3730. Eterm *next;
  3731. #ifdef HYBRID
  3732. Eterm *hp;
  3733. PreFetch(0,next);
  3734. TestGlobalHeap(2,2,hp);
  3735. hp[0] = r(0);
  3736. hp[1] = x(1);
  3737. r(0) = make_list(hp);
  3738. #ifndef INCREMENTAL
  3739. global_htop += 2;
  3740. #endif
  3741. NextPF(0,next);
  3742. #else
  3743. PreFetch(0,next);
  3744. c_p->freason = EXC_INTERNAL_ERROR;
  3745. goto find_func_info;
  3746. #endif
  3747. }
  3748. OpCase(i_global_tuple):
  3749. {
  3750. Eterm *next;
  3751. int len;
  3752. #ifdef HYBRID
  3753. Eterm list;
  3754. Eterm *hp;
  3755. #endif
  3756. if ((len = list_length(r(0))) < 0) {
  3757. goto badarg;
  3758. }
  3759. PreFetch(0,next);
  3760. #ifdef HYBRID
  3761. TestGlobalHeap(len + 1,1,hp);
  3762. list = r(0);
  3763. r(0) = make_tuple(hp);
  3764. *hp++ = make_arityval(len);
  3765. while(is_list(list))
  3766. {
  3767. Eterm* cons = list_val(list);
  3768. *hp++ = CAR(cons);
  3769. list = CDR(cons);
  3770. }
  3771. #ifndef INCREMENTAL
  3772. global_htop += len + 1;
  3773. #endif
  3774. NextPF(0,next);
  3775. #else
  3776. c_p->freason = EXC_INTERNAL_ERROR;
  3777. goto find_func_info;
  3778. #endif
  3779. }
  3780. OpCase(i_global_copy):
  3781. {
  3782. Eterm *next;
  3783. PreFetch(0,next);
  3784. #ifdef HYBRID
  3785. if (!IS_CONST(r(0)))
  3786. {
  3787. BM_SWAP_TIMER(system,copy);
  3788. SWAPOUT;
  3789. reg[0] = r(0);
  3790. reg[1] = NIL;
  3791. r(0) = copy_struct_lazy(c_p,r(0),0);
  3792. ASSERT(ma_src_top == 0);
  3793. ASSERT(ma_dst_top == 0);
  3794. ASSERT(ma_offset_top == 0);
  3795. SWAPIN;
  3796. BM_SWAP_TIMER(copy,system);
  3797. }
  3798. NextPF(0,next);
  3799. #else
  3800. c_p->freason = EXC_INTERNAL_ERROR;
  3801. goto find_func_info;
  3802. #endif
  3803. }
  3804. /*
  3805. * New floating point instructions.
  3806. */
  3807. OpCase(fmove_ql): {
  3808. Eterm fr = Arg(1);
  3809. Eterm* next;
  3810. PreFetch(2, next);
  3811. GET_DOUBLE(Arg(0), *(FloatDef*)ADD_BYTE_OFFSET(freg, fr));
  3812. NextPF(2, next);
  3813. }
  3814. OpCase(fmove_dl): {
  3815. Eterm targ1;
  3816. Eterm fr = Arg(1);
  3817. Eterm* next;
  3818. PreFetch(2, next);
  3819. GetR(0, targ1);
  3820. /* Arg(0) == HEADER_FLONUM */
  3821. GET_DOUBLE(targ1, *(FloatDef*)ADD_BYTE_OFFSET(freg, fr));
  3822. NextPF(2, next);
  3823. }
  3824. OpCase(fmove_new_ld): {
  3825. Eterm fr = Arg(0);
  3826. Eterm dest = make_float(HTOP);
  3827. PUT_DOUBLE(*(FloatDef*)ADD_BYTE_OFFSET(freg, fr), HTOP);
  3828. HTOP += FLOAT_SIZE_OBJECT;
  3829. StoreBifResult(1, dest);
  3830. }
  3831. OpCase(fconv_dl): {
  3832. Eterm targ1;
  3833. Eterm fr = Arg(1);
  3834. Eterm* next;
  3835. GetR(0, targ1);
  3836. PreFetch(2, next);
  3837. if (is_small(targ1)) {
  3838. fb(fr) = (double) signed_val(targ1);
  3839. } else if (is_big(targ1)) {
  3840. if (big_to_double(targ1, &fb(fr)) < 0) {
  3841. goto fbadarith;
  3842. }
  3843. } else if (is_float(targ1)) {
  3844. GET_DOUBLE(targ1, *(FloatDef*)ADD_BYTE_OFFSET(freg, fr));
  3845. } else {
  3846. goto fbadarith;
  3847. }
  3848. NextPF(2, next);
  3849. }
  3850. /*
  3851. * Old allocating fmove.
  3852. */
  3853. #ifdef NO_FPE_SIGNALS
  3854. OpCase(fclearerror):
  3855. OpCase(i_fcheckerror):
  3856. erl_exit(1, "fclearerror/i_fcheckerror without fpe signals (beam_emu)");
  3857. #else
  3858. OpCase(fclearerror): {
  3859. Eterm* next;
  3860. PreFetch(0, next);
  3861. ERTS_FP_CHECK_INIT(c_p);
  3862. NextPF(0, next);
  3863. }
  3864. OpCase(i_fcheckerror): {
  3865. Eterm* next;
  3866. PreFetch(0, next);
  3867. ERTS_FP_ERROR(c_p, freg[0].fd, goto fbadarith);
  3868. NextPF(0, next);
  3869. }
  3870. # undef ERTS_FP_CHECK_INIT
  3871. # undef ERTS_FP_ERROR
  3872. # define ERTS_FP_CHECK_INIT(p)
  3873. # define ERTS_FP_ERROR(p, a, b)
  3874. #endif
  3875. OpCase(i_fadd_lll): {
  3876. Eterm* next;
  3877. PreFetch(3, next);
  3878. ERTS_FP_CHECK_INIT(c_p);
  3879. fb(Arg(2)) = fb(Arg(0)) + fb(Arg(1));
  3880. ERTS_FP_ERROR(c_p, fb(Arg(2)), goto fbadarith);
  3881. NextPF(3, next);
  3882. }
  3883. OpCase(i_fsub_lll): {
  3884. Eterm* next;
  3885. PreFetch(3, next);
  3886. ERTS_FP_CHECK_INIT(c_p);
  3887. fb(Arg(2)) = fb(Arg(0)) - fb(Arg(1));
  3888. ERTS_FP_ERROR(c_p, fb(Arg(2)), goto fbadarith);
  3889. NextPF(3, next);
  3890. }
  3891. OpCase(i_fmul_lll): {
  3892. Eterm* next;
  3893. PreFetch(3, next);
  3894. ERTS_FP_CHECK_INIT(c_p);
  3895. fb(Arg(2)) = fb(Arg(0)) * fb(Arg(1));
  3896. ERTS_FP_ERROR(c_p, fb(Arg(2)), goto fbadarith);
  3897. NextPF(3, next);
  3898. }
  3899. OpCase(i_fdiv_lll): {
  3900. Eterm* next;
  3901. PreFetch(3, next);
  3902. ERTS_FP_CHECK_INIT(c_p);
  3903. fb(Arg(2)) = fb(Arg(0)) / fb(Arg(1));
  3904. ERTS_FP_ERROR(c_p, fb(Arg(2)), goto fbadarith);
  3905. NextPF(3, next);
  3906. }
  3907. OpCase(i_fnegate_ll): {
  3908. Eterm* next;
  3909. PreFetch(2, next);
  3910. ERTS_FP_CHECK_INIT(c_p);
  3911. fb(Arg(1)) = -fb(Arg(0));
  3912. ERTS_FP_ERROR(c_p, fb(Arg(1)), goto fbadarith);
  3913. NextPF(2, next);
  3914. fbadarith:
  3915. c_p->freason = BADARITH;
  3916. goto find_func_info;
  3917. }
  3918. #ifdef HIPE
  3919. {
  3920. unsigned cmd;
  3921. OpCase(hipe_trap_call): {
  3922. /*
  3923. * I[-5]: &&lb_i_func_info_IaaI
  3924. * I[-4]: Native code callee (inserted by HiPE)
  3925. * I[-3]: Module (tagged atom)
  3926. * I[-2]: Function (tagged atom)
  3927. * I[-1]: Arity (untagged integer)
  3928. * I[ 0]: &&lb_hipe_trap_call
  3929. * ... remainder of original BEAM code
  3930. */
  3931. ASSERT(I[-5] == (Uint) OpCode(i_func_info_IaaI));
  3932. c_p->hipe.ncallee = (void(*)(void)) I[-4];
  3933. cmd = HIPE_MODE_SWITCH_CMD_CALL | (I[-1] << 8);
  3934. ++hipe_trap_count;
  3935. goto L_hipe_mode_switch;
  3936. }
  3937. OpCase(hipe_trap_call_closure): {
  3938. ASSERT(I[-5] == (Uint) OpCode(i_func_info_IaaI));
  3939. c_p->hipe.ncallee = (void(*)(void)) I[-4];
  3940. cmd = HIPE_MODE_SWITCH_CMD_CALL_CLOSURE | (I[-1] << 8);
  3941. ++hipe_trap_count;
  3942. goto L_hipe_mode_switch;
  3943. }
  3944. OpCase(hipe_trap_return): {
  3945. cmd = HIPE_MODE_SWITCH_CMD_RETURN;
  3946. goto L_hipe_mode_switch;
  3947. }
  3948. OpCase(hipe_trap_throw): {
  3949. cmd = HIPE_MODE_SWITCH_CMD_THROW;
  3950. goto L_hipe_mode_switch;
  3951. }
  3952. OpCase(hipe_trap_resume): {
  3953. cmd = HIPE_MODE_SWITCH_CMD_RESUME;
  3954. goto L_hipe_mode_switch;
  3955. }
  3956. OpCase(hipe_trap_reschedule): {
  3957. cmd = HIPE_MODE_SWITCH_CMD_RESCHEDULE;
  3958. goto L_hipe_mode_switch;
  3959. }
  3960. L_hipe_mode_switch:
  3961. /* XXX: this abuse of def_arg_reg[] is horrid! */
  3962. SWAPOUT;
  3963. c_p->fcalls = FCALLS;
  3964. c_p->def_arg_reg[4] = -neg_o_reds;
  3965. reg[0] = r(0);
  3966. c_p = hipe_mode_switch(c_p, cmd, reg);
  3967. #ifdef ERTS_SMP
  3968. reg = c_p->scheduler_data->save_reg;
  3969. freg = c_p->scheduler_data->freg;
  3970. #endif
  3971. ERL_BITS_RELOAD_STATEP(c_p);
  3972. neg_o_reds = -c_p->def_arg_reg[4];
  3973. FCALLS = c_p->fcalls;
  3974. SWAPIN;
  3975. switch( c_p->def_arg_reg[3] ) {
  3976. case HIPE_MODE_SWITCH_RES_RETURN:
  3977. ASSERT(is_value(reg[0]));
  3978. MoveReturn(reg[0], r(0));
  3979. case HIPE_MODE_SWITCH_RES_CALL:
  3980. SET_I(c_p->i);
  3981. r(0) = reg[0];
  3982. Dispatch();
  3983. case HIPE_MODE_SWITCH_RES_CALL_CLOSURE:
  3984. /* This can be used to call any function value, but currently it's
  3985. only used to call closures referring to unloaded modules. */
  3986. {
  3987. Eterm *next;
  3988. next = call_fun(c_p, c_p->arity - 1, reg, THE_NON_VALUE);
  3989. SWAPIN;
  3990. if (next != NULL) {
  3991. r(0) = reg[0];
  3992. SET_I(next);
  3993. Dispatchfun();
  3994. }
  3995. goto find_func_info;
  3996. }
  3997. case HIPE_MODE_SWITCH_RES_THROW:
  3998. c_p->cp = NULL;
  3999. I = handle_error(c_p, I, reg, NULL);
  4000. goto post_error_handling;
  4001. default:
  4002. erl_exit(1, "hipe_mode_switch: result %u\n", c_p->def_arg_reg[3]);
  4003. }
  4004. }
  4005. OpCase(hipe_call_count): {
  4006. /*
  4007. * I[-5]: &&lb_i_func_info_IaaI
  4008. * I[-4]: pointer to struct hipe_call_count (inserted by HiPE)
  4009. * I[-3]: Module (tagged atom)
  4010. * I[-2]: Function (tagged atom)
  4011. * I[-1]: Arity (untagged integer)
  4012. * I[ 0]: &&lb_hipe_call_count
  4013. * ... remainder of original BEAM code
  4014. */
  4015. struct hipe_call_count *hcc = (struct hipe_call_count*)I[-4];
  4016. ASSERT(I[-5] == (Uint) OpCode(i_func_info_IaaI));
  4017. ASSERT(hcc != NULL);
  4018. ASSERT(VALID_INSTR(hcc->opcode));
  4019. ++(hcc->count);
  4020. Goto(hcc->opcode);
  4021. }
  4022. #endif /* HIPE */
  4023. OpCase(i_yield):
  4024. {
  4025. /* This is safe as long as REDS_IN(c_p) is never stored
  4026. * in c_p->arg_reg[0]. It is currently stored in c_p->def_arg_reg[5],
  4027. * which may be c_p->arg_reg[5], which is close, but no banana.
  4028. */
  4029. c_p->arg_reg[0] = am_true;
  4030. c_p->arity = 1; /* One living register (the 'true' return value) */
  4031. SWAPOUT;
  4032. c_p->i = I + 1; /* Next instruction */
  4033. erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
  4034. add_to_schedule_q(c_p);
  4035. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
  4036. c_p->current = NULL;
  4037. goto do_schedule;
  4038. }
  4039. OpCase(i_hibernate): {
  4040. SWAPOUT;
  4041. if (hibernate(c_p, r(0), x(1), x(2), reg)) {
  4042. goto do_schedule;
  4043. } else {
  4044. I = handle_error(c_p, I, reg, hibernate_3);
  4045. goto post_error_handling;
  4046. }
  4047. }
  4048. OpCase(i_debug_breakpoint): {
  4049. SWAPOUT;
  4050. reg[0] = r(0);
  4051. tmp_arg1 = call_breakpoint_handler(c_p, I-3, reg);
  4052. r(0) = reg[0];
  4053. SWAPIN;
  4054. if (tmp_arg1) {
  4055. SET_I(c_p->i);
  4056. Dispatch();
  4057. }
  4058. goto no_error_handler;
  4059. }
  4060. OpCase(system_limit_j):
  4061. system_limit:
  4062. c_p->freason = SYSTEM_LIMIT;
  4063. goto lb_Cl_error;
  4064. #ifdef ERTS_OPCODE_COUNTER_SUPPORT
  4065. DEFINE_COUNTING_LABELS;
  4066. #endif
  4067. #ifndef NO_JUMP_TABLE
  4068. #ifdef DEBUG
  4069. end_emulator_loop:
  4070. #endif
  4071. #endif
  4072. OpCase(int_code_end):
  4073. OpCase(label_L):
  4074. OpCase(too_old_compiler):
  4075. erl_exit(1, "meta op\n");
  4076. /*
  4077. * One-time initialization of Beam emulator.
  4078. */
  4079. init_emulator:
  4080. {
  4081. int i;
  4082. Export* ep;
  4083. #ifndef NO_JUMP_TABLE
  4084. #ifdef ERTS_OPCODE_COUNTER_SUPPORT
  4085. /* Are tables correctly generated by beam_makeops? */
  4086. ASSERT(sizeof(counting_opcodes) == sizeof(opcodes));
  4087. if (count_instructions) {
  4088. #ifdef DEBUG
  4089. counting_opcodes[op_catch_end_y] = LabelAddr(lb_catch_end_y);
  4090. #endif
  4091. counting_opcodes[op_i_func_info_IaaI] = LabelAddr(lb_i_func_info_IaaI);
  4092. beam_ops = counting_opcodes;
  4093. }
  4094. else
  4095. #endif /* #ifndef ERTS_OPCODE_COUNTER_SUPPORT */
  4096. {
  4097. beam_ops = opcodes;
  4098. }
  4099. #endif /* NO_JUMP_TABLE */
  4100. em_call_error_handler = OpCode(call_error_handler);
  4101. em_call_traced_function = OpCode(call_traced_function);
  4102. em_apply_bif = OpCode(apply_bif);
  4103. beam_apply[0] = (Eterm) OpCode(i_apply);
  4104. beam_apply[1] = (Eterm) OpCode(normal_exit);
  4105. beam_exit[0] = (Eterm) OpCode(error_action_code);
  4106. beam_return_to_trace[0] = (Eterm) OpCode(i_return_to_trace);
  4107. beam_return_trace[0] = (Eterm) OpCode(return_trace);
  4108. beam_exception_trace[0] = (Eterm) OpCode(return_trace); /* UGLY */
  4109. /*
  4110. * Enter all BIFs into the export table.
  4111. */
  4112. for (i = 0; i < BIF_SIZE; i++) {
  4113. ep = erts_export_put(bif_table[i].module,
  4114. bif_table[i].name,
  4115. bif_table[i].arity);
  4116. bif_export[i] = ep;
  4117. ep->code[3] = (Eterm) OpCode(apply_bif);
  4118. ep->code[4] = (Eterm) bif_table[i].f;
  4119. }
  4120. return;
  4121. }
  4122. #ifdef NO_JUMP_TABLE
  4123. default:
  4124. erl_exit(1, "unexpected op code %d\n",Go);
  4125. }
  4126. #endif
  4127. return; /* Never executed */
  4128. save_calls1:
  4129. {
  4130. Eterm* dis_next;
  4131. save_calls(c_p, (Export *) Arg(0));
  4132. SET_I(((Export *) Arg(0))->address);
  4133. dis_next = (Eterm *) *I;
  4134. FCALLS--;
  4135. Goto(dis_next);
  4136. }
  4137. }
  4138. static BifFunction
  4139. translate_gc_bif(void* gcf)
  4140. {
  4141. if (gcf == erts_gc_length_1) {
  4142. return length_1;
  4143. } else if (gcf == erts_gc_size_1) {
  4144. return size_1;
  4145. } else if (gcf == erts_gc_bit_size_1) {
  4146. return bit_size_1;
  4147. } else if (gcf == erts_gc_byte_size_1) {
  4148. return byte_size_1;
  4149. } else if (gcf == erts_gc_abs_1) {
  4150. return abs_1;
  4151. } else if (gcf == erts_gc_float_1) {
  4152. return float_1;
  4153. } else if (gcf == erts_gc_round_1) {
  4154. return round_1;
  4155. } else if (gcf == erts_gc_trunc_1) {
  4156. return round_1;
  4157. } else {
  4158. erl_exit(1, "bad gc bif");
  4159. }
  4160. }
  4161. /*
  4162. * Mapping from the error code 'class tag' to atoms.
  4163. */
  4164. Eterm exception_tag[NUMBER_EXC_TAGS] = {
  4165. am_error, /* 0 */
  4166. am_exit, /* 1 */
  4167. am_throw, /* 2 */
  4168. };
  4169. /*
  4170. * Mapping from error code 'index' to atoms.
  4171. */
  4172. Eterm error_atom[NUMBER_EXIT_CODES] = {
  4173. am_internal_error, /* 0 */
  4174. am_normal, /* 1 */
  4175. am_internal_error, /* 2 */
  4176. am_badarg, /* 3 */
  4177. am_badarith, /* 4 */
  4178. am_badmatch, /* 5 */
  4179. am_function_clause, /* 6 */
  4180. am_case_clause, /* 7 */
  4181. am_if_clause, /* 8 */
  4182. am_undef, /* 9 */
  4183. am_badfun, /* 10 */
  4184. am_badarity, /* 11 */
  4185. am_timeout_value, /* 12 */
  4186. am_noproc, /* 13 */
  4187. am_notalive, /* 14 */
  4188. am_system_limit, /* 15 */
  4189. am_try_clause, /* 16 */
  4190. };
  4191. /*
  4192. * To fully understand the error handling, one must keep in mind that
  4193. * when an exception is thrown, the search for a handler can jump back
  4194. * and forth between Beam and native code. Upon each mode switch, a
  4195. * dummy handler is inserted so that if an exception reaches that point,
  4196. * the handler is invoked (like any handler) and transfers control so
  4197. * that the search for a real handler is continued in the other mode.
  4198. * Therefore, c_p->freason and c_p->fvalue must still hold the exception
  4199. * info when the handler is executed, but normalized so that creation of
  4200. * error terms and saving of the stack trace is only done once, even if
  4201. * we pass through the error handling code several times.
  4202. *
  4203. * When a new exception is raised, the current stack trace information
  4204. * is quick-saved in a small structure allocated on the heap. Depending
  4205. * on how the exception is eventually caught (perhaps by causing the
  4206. * current process to terminate), the saved information may be used to
  4207. * create a symbolic (human-readable) representation of the stack trace
  4208. * at the point of the original exception.
  4209. */
  4210. static Eterm*
  4211. handle_error(Process* c_p, Eterm* pc, Eterm* reg, BifFunction bf)
  4212. {
  4213. Eterm* hp;
  4214. Eterm Value = c_p->fvalue;
  4215. Eterm Args = am_true;
  4216. c_p->i = pc; /* In case we call erl_exit(). */
  4217. ASSERT(c_p->freason != TRAP); /* Should have been handled earlier. */
  4218. ASSERT(c_p->freason != RESCHEDULE); /* Should have been handled earlier. */
  4219. /*
  4220. * Check if we have an arglist for the top level call. If so, this
  4221. * is encoded in Value, so we have to dig out the real Value as well
  4222. * as the Arglist.
  4223. */
  4224. if (c_p->freason & EXF_ARGLIST) {
  4225. Eterm* tp;
  4226. ASSERT(is_tuple(Value));
  4227. tp = tuple_val(Value);
  4228. Value = tp[1];
  4229. Args = tp[2];
  4230. }
  4231. /*
  4232. * Save the stack trace info if the EXF_SAVETRACE flag is set. The
  4233. * main reason for doing this separately is to allow throws to later
  4234. * become promoted to errors without losing the original stack
  4235. * trace, even if they have passed through one or more catch and
  4236. * rethrow. It also makes the creation of symbolic stack traces much
  4237. * more modular.
  4238. */
  4239. if (c_p->freason & EXF_SAVETRACE) {
  4240. save_stacktrace(c_p, pc, reg, bf, Args);
  4241. }
  4242. /*
  4243. * Throws that are not caught are turned into 'nocatch' errors
  4244. */
  4245. if ((c_p->freason & EXF_THROWN) && (c_p->catches <= 0) ) {
  4246. hp = HAlloc(c_p, 3);
  4247. Value = TUPLE2(hp, am_nocatch, Value);
  4248. c_p->freason = EXC_ERROR;
  4249. }
  4250. /* Get the fully expanded error term */
  4251. Value = expand_error_value(c_p, c_p->freason, Value);
  4252. /* Save final error term and stabilize the exception flags so no
  4253. further expansion is done. */
  4254. c_p->fvalue = Value;
  4255. c_p->freason = PRIMARY_EXCEPTION(c_p->freason);
  4256. /* Find a handler or die */
  4257. if ((c_p->catches > 0 || IS_TRACED_FL(c_p, F_EXCEPTION_TRACE))
  4258. && !(c_p->freason & EXF_PANIC)) {
  4259. Eterm *new_pc;
  4260. /* The Beam handler code (catch_end or try_end) checks reg[0]
  4261. for THE_NON_VALUE to see if the previous code finished
  4262. abnormally. If so, reg[1], reg[2] and reg[3] should hold the
  4263. exception class, term and trace, respectively. (If the
  4264. handler is just a trap to native code, these registers will
  4265. be ignored.) */
  4266. reg[0] = THE_NON_VALUE;
  4267. reg[1] = exception_tag[GET_EXC_CLASS(c_p->freason)];
  4268. reg[2] = Value;
  4269. reg[3] = c_p->ftrace;
  4270. if ( (new_pc = next_catch(c_p, reg))) return new_pc;
  4271. if (c_p->catches > 0) erl_exit(1, "Catch not found");
  4272. }
  4273. ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
  4274. terminate_proc(c_p, Value);
  4275. ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
  4276. return NULL;
  4277. }
  4278. /*
  4279. * Find the nearest catch handler
  4280. */
  4281. static Eterm*
  4282. next_catch(Process* c_p, Eterm *reg) {
  4283. int active_catches = c_p->catches > 0;
  4284. int have_return_to_trace = 0;
  4285. Eterm *ptr, *prev, *return_to_trace_ptr = NULL;
  4286. Uint i_return_trace = beam_return_trace[0];
  4287. Uint i_return_to_trace = beam_return_to_trace[0];
  4288. ptr = prev = c_p->stop;
  4289. ASSERT(is_CP(*ptr));
  4290. ASSERT(ptr <= STACK_START(c_p));
  4291. if (ptr == STACK_START(c_p)) return NULL;
  4292. if ((is_not_CP(*ptr) || (*cp_val(*ptr) != i_return_trace &&
  4293. *cp_val(*ptr) != i_return_to_trace))
  4294. && c_p->cp) {
  4295. /* Can not follow cp here - code may be unloaded */
  4296. Uint *cpp = cp_val((Eterm) c_p->cp);
  4297. if (cpp == beam_exception_trace) {
  4298. erts_trace_exception(c_p, (Eterm*) ptr[0],
  4299. reg[1], reg[2], ptr+1);
  4300. /* Skip return_trace parameters */
  4301. ptr += 2;
  4302. } else if (cpp == beam_return_trace) {
  4303. /* Skip return_trace parameters */
  4304. ptr += 2;
  4305. } else if (cpp == beam_return_to_trace) {
  4306. have_return_to_trace = !0; /* Record next cp */
  4307. }
  4308. }
  4309. while (ptr < STACK_START(c_p)) {
  4310. if (is_catch(*ptr)) {
  4311. if (active_catches) goto found_catch;
  4312. ptr++;
  4313. }
  4314. else if (is_CP(*ptr)) {
  4315. prev = ptr;
  4316. if (*cp_val(*prev) == i_return_trace) {
  4317. /* Skip stack frame variables */
  4318. while (++ptr, ptr < STACK_START(c_p) && is_not_CP(*ptr)) {
  4319. if (is_catch(*ptr) && active_catches) goto found_catch;
  4320. }
  4321. if (cp_val(*prev) == beam_exception_trace) {
  4322. erts_trace_exception(c_p, (Eterm*) ptr[0],
  4323. reg[1], reg[2], ptr+1);
  4324. }
  4325. /* Skip return_trace parameters */
  4326. ptr += 2;
  4327. } else if (*cp_val(*prev) == i_return_to_trace) {
  4328. /* Skip stack frame variables */
  4329. while (++ptr, ptr < STACK_START(c_p) && is_not_CP(*ptr)) {
  4330. if (is_catch(*ptr) && active_catches) goto found_catch;
  4331. }
  4332. have_return_to_trace = !0; /* Record next cp */
  4333. return_to_trace_ptr = NULL;
  4334. } else {
  4335. if (have_return_to_trace) {
  4336. /* Record this cp as possible return_to trace cp */
  4337. have_return_to_trace = 0;
  4338. return_to_trace_ptr = ptr;
  4339. } else return_to_trace_ptr = NULL;
  4340. ptr++;
  4341. }
  4342. } else ptr++;
  4343. }
  4344. return NULL;
  4345. found_catch:
  4346. ASSERT(ptr < STACK_START(c_p));
  4347. c_p->stop = prev;
  4348. if (IS_TRACED_FL(c_p, F_TRACE_RETURN_TO) && return_to_trace_ptr) {
  4349. /* The stackframe closest to the catch contained an
  4350. * return_to_trace entry, so since the execution now
  4351. * continues after the catch, a return_to trace message
  4352. * would be appropriate.
  4353. */
  4354. erts_trace_return_to(c_p, cp_val(*return_to_trace_ptr));
  4355. }
  4356. return catch_pc(*ptr);
  4357. }
  4358. /*
  4359. * Terminating the process when an exception is not caught
  4360. */
  4361. static void
  4362. terminate_proc(Process* c_p, Eterm Value)
  4363. {
  4364. /* Add a stacktrace if this is an error. */
  4365. if (GET_EXC_CLASS(c_p->freason) == EXTAG_ERROR) {
  4366. Value = add_stacktrace(c_p, Value, c_p->ftrace);
  4367. }
  4368. /* EXF_LOG is a primary exception flag */
  4369. if (c_p->freason & EXF_LOG) {
  4370. erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
  4371. erts_dsprintf(dsbufp, "Error in process %T ", c_p->id);
  4372. if (erts_is_alive())
  4373. erts_dsprintf(dsbufp, "on node %T ", erts_this_node->sysname);
  4374. erts_dsprintf(dsbufp,"with exit value: %0.*T\n", display_items, Value);
  4375. erts_send_error_to_logger(c_p->group_leader, dsbufp);
  4376. }
  4377. /*
  4378. * If we use a shared heap, the process will be garbage-collected.
  4379. * Must zero c_p->arity to indicate that there are no live registers.
  4380. */
  4381. c_p->arity = 0;
  4382. erts_do_exit_process(c_p, Value);
  4383. }
  4384. /*
  4385. * Build and add a symbolic stack trace to the error value.
  4386. */
  4387. static Eterm
  4388. add_stacktrace(Process* c_p, Eterm Value, Eterm exc) {
  4389. Eterm Where = build_stacktrace(c_p, exc);
  4390. Eterm* hp = HAlloc(c_p, 3);
  4391. return TUPLE2(hp, Value, Where);
  4392. }
  4393. /*
  4394. * Forming the correct error value from the internal error code.
  4395. * This does not update c_p->fvalue or c_p->freason.
  4396. */
  4397. Eterm
  4398. expand_error_value(Process* c_p, Uint freason, Eterm Value) {
  4399. Eterm* hp;
  4400. Uint r;
  4401. r = GET_EXC_INDEX(freason);
  4402. ASSERT(r < NUMBER_EXIT_CODES); /* range check */
  4403. ASSERT(is_value(Value));
  4404. switch (r) {
  4405. case (GET_EXC_INDEX(EXC_PRIMARY)):
  4406. /* Primary exceptions use fvalue as it is */
  4407. break;
  4408. case (GET_EXC_INDEX(EXC_BADMATCH)):
  4409. case (GET_EXC_INDEX(EXC_CASE_CLAUSE)):
  4410. case (GET_EXC_INDEX(EXC_TRY_CLAUSE)):
  4411. case (GET_EXC_INDEX(EXC_BADFUN)):
  4412. case (GET_EXC_INDEX(EXC_BADARITY)):
  4413. /* Some common exceptions: value -> {atom, value} */
  4414. ASSERT(is_value(Value));
  4415. hp = HAlloc(c_p, 3);
  4416. Value = TUPLE2(hp, error_atom[r], Value);
  4417. break;
  4418. default:
  4419. /* Other exceptions just use an atom as descriptor */
  4420. Value = error_atom[r];
  4421. break;
  4422. }
  4423. #ifdef DEBUG
  4424. ASSERT(Value != am_internal_error);
  4425. #endif
  4426. return Value;
  4427. }
  4428. /*
  4429. * Quick-saving the stack trace in an internal form on the heap. Note
  4430. * that c_p->ftrace will point to a cons cell which holds the given args
  4431. * and the saved data (encoded as a bignum).
  4432. *
  4433. * (It would be much better to put the arglist - when it exists - in the
  4434. * error value instead of in the actual trace; e.g. '{badarg, Args}'
  4435. * instead of using 'badarg' with Args in the trace. The arglist may
  4436. * contain very large values, and right now they will be kept alive as
  4437. * long as the stack trace is live. Preferably, the stack trace should
  4438. * always be small, so that it does not matter if it is long-lived.
  4439. * However, it is probably not possible to ever change the format of
  4440. * error terms.)
  4441. */
  4442. static void
  4443. save_stacktrace(Process* c_p, Eterm* pc, Eterm* reg, BifFunction bf,
  4444. Eterm args) {
  4445. struct StackTrace* s;
  4446. int sz;
  4447. int depth = erts_backtrace_depth; /* max depth (never negative) */
  4448. if (depth > 0) {
  4449. /* There will always be a current function */
  4450. depth --;
  4451. }
  4452. /* Create a container for the exception data */
  4453. sz = (offsetof(struct StackTrace, trace) + sizeof(Eterm)*depth
  4454. + sizeof(Eterm) - 1) / sizeof(Eterm);
  4455. s = (struct StackTrace *) HAlloc(c_p, 1 + sz);
  4456. /* The following fields are inside the bignum */
  4457. s->header = make_pos_bignum_header(sz);
  4458. s->freason = c_p->freason;
  4459. s->depth = 0;
  4460. /*
  4461. * If the failure was in a BIF other than 'error', 'exit' or
  4462. * 'throw', find the bif-table index and save the argument
  4463. * registers by consing up an arglist.
  4464. */
  4465. if (bf != NULL && bf != error_1 && bf != error_2
  4466. && bf != fault_1 && bf != fault_2
  4467. && bf != exit_1 && bf != throw_1) {
  4468. int i;
  4469. int a = 0;
  4470. for (i = 0; i < BIF_SIZE; i++) {
  4471. if (bf == bif_table[i].f || bf == bif_table[i].traced) {
  4472. Export *ep = bif_export[i];
  4473. s->current = ep->code;
  4474. a = bif_table[i].arity;
  4475. break;
  4476. }
  4477. }
  4478. if (i >= BIF_SIZE) {
  4479. /*
  4480. * The Bif does not really exist (no BIF entry). It is a
  4481. * TRAP and traps are called through apply_bif, which also
  4482. * sets c_p->current (luckily).
  4483. */
  4484. ASSERT(c_p->current);
  4485. s->current = c_p->current;
  4486. a = s->current[2];
  4487. ASSERT(s->current[2] <= 3);
  4488. }
  4489. /* Save first stack entry */
  4490. ASSERT(pc);
  4491. if (depth > 0) {
  4492. s->trace[s->depth++] = pc;
  4493. depth--;
  4494. }
  4495. s->pc = NULL;
  4496. args = make_arglist(c_p, reg, a); /* Overwrite CAR(c_p->ftrace) */
  4497. } else {
  4498. s->current = c_p->current;
  4499. /*
  4500. * For a function_clause error, the arguments are in the beam
  4501. * registers, c_p->cp is valid, and c_p->current is set.
  4502. */
  4503. if ( (GET_EXC_INDEX(s->freason)) ==
  4504. (GET_EXC_INDEX(EXC_FUNCTION_CLAUSE)) ) {
  4505. int a;
  4506. ASSERT(s->current);
  4507. a = s->current[2];
  4508. args = make_arglist(c_p, reg, a); /* Overwrite CAR(c_p->ftrace) */
  4509. /* Save first stack entry */
  4510. ASSERT(c_p->cp);
  4511. if (depth > 0) {
  4512. s->trace[s->depth++] = c_p->cp;
  4513. depth--;
  4514. }
  4515. s->pc = NULL; /* Ignore pc */
  4516. } else {
  4517. s->pc = pc;
  4518. }
  4519. }
  4520. /* Package args and stack trace */
  4521. {
  4522. Eterm *hp;
  4523. hp = HAlloc(c_p, 2);
  4524. c_p->ftrace = CONS(hp, args, make_big((Eterm *) s));
  4525. }
  4526. /* Save the actual stack trace */
  4527. if (depth > 0) {
  4528. Eterm *ptr, *prev = s->depth ? s->trace[s->depth-1] : NULL;
  4529. Uint i_return_trace = beam_return_trace[0];
  4530. Uint i_return_to_trace = beam_return_to_trace[0];
  4531. /*
  4532. * Traverse the stack backwards and add all unique continuation
  4533. * pointers to the buffer, up to the maximum stack trace size.
  4534. *
  4535. * Skip trace stack frames.
  4536. */
  4537. ptr = c_p->stop;
  4538. if (ptr < STACK_START(c_p)
  4539. && (is_not_CP(*ptr)|| (*cp_val(*ptr) != i_return_trace &&
  4540. *cp_val(*ptr) != i_return_to_trace))
  4541. && c_p->cp) {
  4542. /* Can not follow cp here - code may be unloaded */
  4543. Uint *cpp = cp_val((Eterm) c_p->cp);
  4544. if (cpp == beam_exception_trace || cpp == beam_return_trace) {
  4545. /* Skip return_trace parameters */
  4546. ptr += 2;
  4547. } else if (cpp == beam_return_to_trace) {
  4548. /* Skip return_to_trace parameters */
  4549. ptr += 1;
  4550. }
  4551. }
  4552. while (ptr < STACK_START(c_p) && depth > 0) {
  4553. if (is_CP(*ptr)) {
  4554. if (*cp_val(*ptr) == i_return_trace) {
  4555. /* Skip stack frame variables */
  4556. do ++ptr; while (is_not_CP(*ptr));
  4557. /* Skip return_trace parameters */
  4558. ptr += 2;
  4559. } else if (*cp_val(*ptr) == i_return_to_trace) {
  4560. /* Skip stack frame variables */
  4561. do ++ptr; while (is_not_CP(*ptr));
  4562. } else {
  4563. Eterm *cp = (Eterm *)(*ptr);
  4564. if (cp != prev) {
  4565. /* Record non-duplicates only */
  4566. prev = cp;
  4567. s->trace[s->depth++] = cp;
  4568. depth--;
  4569. }
  4570. ptr++;
  4571. }
  4572. } else ptr++;
  4573. }
  4574. }
  4575. }
  4576. /*
  4577. * Getting the relevant fields from the term pointed to by ftrace
  4578. */
  4579. static struct StackTrace *get_trace_from_exc(Eterm exc) {
  4580. if (exc == NIL) {
  4581. return NULL;
  4582. } else {
  4583. ASSERT(is_list(exc));
  4584. return (struct StackTrace *) big_val(CDR(list_val(exc)));
  4585. }
  4586. }
  4587. static Eterm get_args_from_exc(Eterm exc) {
  4588. if (exc == NIL) {
  4589. return NIL;
  4590. } else {
  4591. ASSERT(is_list(exc));
  4592. return CAR(list_val(exc));
  4593. }
  4594. }
  4595. static int is_raised_exc(Eterm exc) {
  4596. if (exc == NIL) {
  4597. return 0;
  4598. } else {
  4599. ASSERT(is_list(exc));
  4600. return bignum_header_is_neg(*big_val(CDR(list_val(exc))));
  4601. }
  4602. }
  4603. /*
  4604. * Creating a list with the argument registers
  4605. */
  4606. static Eterm
  4607. make_arglist(Process* c_p, Eterm* reg, int a) {
  4608. Eterm args = NIL;
  4609. Eterm* hp = HAlloc(c_p, 2*a);
  4610. while (a > 0) {
  4611. args = CONS(hp, reg[a-1], args);
  4612. hp += 2;
  4613. a--;
  4614. }
  4615. return args;
  4616. }
  4617. /*
  4618. * Building a symbolic representation of a saved stack trace. Note that
  4619. * the exception object 'exc', unless NIL, points to a cons cell which
  4620. * holds the given args and the quick-saved data (encoded as a bignum).
  4621. *
  4622. * If the bignum is negative, the given args is a complete stacktrace.
  4623. */
  4624. Eterm
  4625. build_stacktrace(Process* c_p, Eterm exc) {
  4626. struct StackTrace* s;
  4627. Eterm args;
  4628. int depth;
  4629. Eterm* current;
  4630. Eterm Where = NIL;
  4631. Eterm* next_p = &Where;
  4632. if (! (s = get_trace_from_exc(exc))) {
  4633. return NIL;
  4634. }
  4635. #ifdef HIPE
  4636. if (s->freason & EXF_NATIVE) {
  4637. return hipe_build_stacktrace(c_p, s);
  4638. }
  4639. #endif
  4640. if (is_raised_exc(exc)) {
  4641. return get_args_from_exc(exc);
  4642. }
  4643. /*
  4644. * Find the current function. If the saved s->pc is null, then the
  4645. * saved s->current should already contain the proper value.
  4646. */
  4647. if (s->pc != NULL) {
  4648. current = find_function_from_pc(s->pc);
  4649. } else {
  4650. current = s->current;
  4651. }
  4652. /*
  4653. * If current is still NULL, default to the initial function
  4654. * (e.g. spawn_link(erlang, abs, [1])).
  4655. */
  4656. if (current == NULL) {
  4657. current = c_p->initial;
  4658. args = am_true; /* Just in case */
  4659. } else {
  4660. args = get_args_from_exc(exc);
  4661. }
  4662. depth = s->depth;
  4663. /*
  4664. * Add the {M,F,A} for the current function
  4665. * (where A is arity or [Argument]).
  4666. */
  4667. {
  4668. int i;
  4669. Eterm mfa;
  4670. Uint heap_size = 6*(depth+1);
  4671. Eterm* hp = HAlloc(c_p, heap_size);
  4672. Eterm* hp_end = hp + heap_size;
  4673. if (args != am_true) {
  4674. /* We have an arglist - use it */
  4675. mfa = TUPLE3(hp, current[0], current[1], args);
  4676. } else {
  4677. Eterm arity = make_small(current[2]);
  4678. mfa = TUPLE3(hp, current[0], current[1], arity);
  4679. }
  4680. hp += 4;
  4681. ASSERT(*next_p == NIL);
  4682. *next_p = CONS(hp, mfa, NIL);
  4683. next_p = &CDR(list_val(*next_p));
  4684. hp += 2;
  4685. /*
  4686. * Finally, we go through the saved continuation pointers.
  4687. */
  4688. for (i = 0; i < depth; i++) {
  4689. Eterm *fi = find_function_from_pc((Eterm *) s->trace[i]);
  4690. if (fi == NULL) continue;
  4691. mfa = TUPLE3(hp, fi[0], fi[1], make_small(fi[2]));
  4692. hp += 4;
  4693. ASSERT(*next_p == NIL);
  4694. *next_p = CONS(hp, mfa, NIL);
  4695. next_p = &CDR(list_val(*next_p));
  4696. hp += 2;
  4697. }
  4698. ASSERT(hp <= hp_end);
  4699. HRelease(c_p, hp_end, hp);
  4700. }
  4701. return Where;
  4702. }
  4703. static Eterm
  4704. call_error_handler(Process* p, Eterm* fi, Eterm* reg)
  4705. {
  4706. Eterm* hp;
  4707. Export* ep;
  4708. int arity;
  4709. Eterm args;
  4710. Uint sz;
  4711. int i;
  4712. /*
  4713. * Search for the error_handler module.
  4714. */
  4715. ep = erts_find_function(p->error_handler, am_undefined_function, 3);
  4716. if (ep == NULL) { /* No error handler */
  4717. p->current = fi;
  4718. p->freason = EXC_UNDEF;
  4719. return 0;
  4720. }
  4721. p->i = ep->address;
  4722. /*
  4723. * Create a list with all arguments in the x registers.
  4724. */
  4725. arity = fi[2];
  4726. sz = 2 * arity;
  4727. if (HeapWordsLeft(p) < sz) {
  4728. erts_garbage_collect(p, sz, reg, arity);
  4729. }
  4730. hp = HEAP_TOP(p);
  4731. HEAP_TOP(p) += sz;
  4732. args = NIL;
  4733. for (i = arity-1; i >= 0; i--) {
  4734. args = CONS(hp, reg[i], args);
  4735. hp += 2;
  4736. }
  4737. /*
  4738. * Set up registers for call to error_handler:undefined_function/3.
  4739. */
  4740. reg[0] = fi[0];
  4741. reg[1] = fi[1];
  4742. reg[2] = args;
  4743. return 1;
  4744. }
  4745. static Eterm
  4746. call_breakpoint_handler(Process* p, Eterm* fi, Eterm* reg)
  4747. {
  4748. Eterm* hp;
  4749. Export* ep;
  4750. int arity;
  4751. Eterm args;
  4752. Uint sz;
  4753. int i;
  4754. /*
  4755. * Search for error handler module.
  4756. */
  4757. ep = erts_find_function(p->error_handler, am_breakpoint, 3);
  4758. if (ep == NULL) { /* No error handler */
  4759. p->current = fi;
  4760. p->freason = EXC_UNDEF;
  4761. return 0;
  4762. }
  4763. p->i = ep->address;
  4764. /*
  4765. * Create a list with all arguments in the x registers.
  4766. */
  4767. arity = fi[2];
  4768. sz = 2 * arity;
  4769. if (HeapWordsLeft(p) < sz) {
  4770. erts_garbage_collect(p, sz, reg, arity);
  4771. }
  4772. hp = HEAP_TOP(p);
  4773. HEAP_TOP(p) += sz;
  4774. args = NIL;
  4775. for (i = arity-1; i >= 0; i--) {
  4776. args = CONS(hp, reg[i], args);
  4777. hp += 2;
  4778. }
  4779. /*
  4780. * Set up registers for call to error_handler:breakpoint/3.
  4781. */
  4782. reg[0] = fi[0];
  4783. reg[1] = fi[1];
  4784. reg[2] = args;
  4785. return 1;
  4786. }
  4787. static Export*
  4788. apply_setup_error_handler(Process* p, Eterm module, Eterm function, Uint arity, Eterm* reg)
  4789. {
  4790. Export* ep;
  4791. /*
  4792. * Find the export table index for the error handler. Return NULL if
  4793. * there is no error handler module.
  4794. */
  4795. if ((ep = erts_find_export_entry(p->error_handler,
  4796. am_undefined_function, 3)) == NULL) {
  4797. return NULL;
  4798. } else {
  4799. int i;
  4800. Uint sz = 2*arity;
  4801. Eterm* hp;
  4802. Eterm args = NIL;
  4803. /*
  4804. * Always copy args from registers to a new list; this ensures
  4805. * that we have the same behaviour whether or not this was
  4806. * called from apply or fixed_apply (any additional last
  4807. * THIS-argument will be included, assuming that arity has been
  4808. * properly adjusted).
  4809. */
  4810. if (HeapWordsLeft(p) < sz) {
  4811. erts_garbage_collect(p, sz, reg, arity);
  4812. }
  4813. hp = HEAP_TOP(p);
  4814. HEAP_TOP(p) += sz;
  4815. for (i = arity-1; i >= 0; i--) {
  4816. args = CONS(hp, reg[i], args);
  4817. hp += 2;
  4818. }
  4819. reg[0] = module;
  4820. reg[1] = function;
  4821. reg[2] = args;
  4822. }
  4823. return ep;
  4824. }
  4825. static Uint*
  4826. apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg)
  4827. {
  4828. int arity;
  4829. Export* ep;
  4830. Eterm tmp, this;
  4831. /*
  4832. * Check the arguments which should be of the form apply(Module,
  4833. * Function, Arguments) where Function is an atom and
  4834. * Arguments is an arity long list of terms.
  4835. */
  4836. if (is_not_atom(function)) {
  4837. /*
  4838. * No need to test args here -- done below.
  4839. */
  4840. error:
  4841. p->freason = BADARG;
  4842. error2:
  4843. reg[0] = module;
  4844. reg[1] = function;
  4845. reg[2] = args;
  4846. return 0;
  4847. }
  4848. /* The module argument may be either an atom or an abstract module
  4849. * (currently implemented using tuples, but this might change).
  4850. */
  4851. this = THE_NON_VALUE;
  4852. if (is_not_atom(module)) {
  4853. Eterm* tp;
  4854. if (is_not_tuple(module)) goto error;
  4855. tp = tuple_val(module);
  4856. if (arityval(tp[0]) < 1) goto error;
  4857. this = module;
  4858. module = tp[1];
  4859. if (is_not_atom(module)) goto error;
  4860. }
  4861. /*
  4862. * Walk down the 3rd parameter of apply (the argument list) and copy
  4863. * the parameters to the x registers (reg[]). If the module argument
  4864. * was an abstract module, add 1 to the function arity and put the
  4865. * module argument in the n+1st x register as a THIS reference.
  4866. */
  4867. tmp = args;
  4868. arity = 0;
  4869. while (is_list(tmp)) {
  4870. if (arity < (MAX_REG - 1)) {
  4871. reg[arity++] = CAR(list_val(tmp));
  4872. tmp = CDR(list_val(tmp));
  4873. } else {
  4874. p->freason = SYSTEM_LIMIT;
  4875. goto error2;
  4876. }
  4877. }
  4878. if (is_not_nil(tmp)) { /* Must be well-formed list */
  4879. goto error;
  4880. }
  4881. if (this != THE_NON_VALUE) {
  4882. reg[arity++] = this;
  4883. }
  4884. /*
  4885. * Get the index into the export table, or failing that the export
  4886. * entry for the error handler.
  4887. *
  4888. * Note: All BIFs have export entries; thus, no special case is needed.
  4889. */
  4890. if ((ep = erts_find_export_entry(module, function, arity)) == NULL) {
  4891. if ((ep = apply_setup_error_handler(p, module, function, arity, reg)) == NULL) goto error;
  4892. } else if (p->ct != NULL) {
  4893. save_calls(p, ep);
  4894. }
  4895. return ep->address;
  4896. }
  4897. static Uint*
  4898. fixed_apply(Process* p, Eterm* reg, Uint arity)
  4899. {
  4900. Export* ep;
  4901. Eterm module;
  4902. Eterm function;
  4903. module = reg[arity]; /* The THIS pointer already in place */
  4904. function = reg[arity+1];
  4905. if (is_not_atom(function)) {
  4906. error:
  4907. p->freason = BADARG;
  4908. reg[0] = module;
  4909. reg[1] = function;
  4910. reg[2] = NIL;
  4911. return 0;
  4912. }
  4913. /* The module argument may be either an atom or an abstract module
  4914. * (currently implemented using tuples, but this might change).
  4915. */
  4916. if (is_not_atom(module)) {
  4917. Eterm* tp;
  4918. if (is_not_tuple(module)) goto error;
  4919. tp = tuple_val(module);
  4920. if (arityval(tp[0]) < 1) goto error;
  4921. module = tp[1];
  4922. if (is_not_atom(module)) goto error;
  4923. ++arity;
  4924. }
  4925. /*
  4926. * Get the index into the export table, or failing that the export
  4927. * entry for the error handler module.
  4928. *
  4929. * Note: All BIFs have export entries; thus, no special case is needed.
  4930. */
  4931. if ((ep = erts_find_export_entry(module, function, arity)) == NULL) {
  4932. if ((ep = apply_setup_error_handler(p, module, function, arity, reg)) == NULL)
  4933. goto error;
  4934. } else if (p->ct != NULL) {
  4935. save_calls(p, ep);
  4936. }
  4937. return ep->address;
  4938. }
  4939. static int
  4940. hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* reg)
  4941. {
  4942. int arity;
  4943. Eterm tmp;
  4944. if (is_not_atom(module) || is_not_atom(function)) {
  4945. /*
  4946. * No need to test args here -- done below.
  4947. */
  4948. error:
  4949. c_p->freason = BADARG;
  4950. error2:
  4951. reg[0] = module;
  4952. reg[1] = function;
  4953. reg[2] = args;
  4954. return 0;
  4955. }
  4956. arity = 0;
  4957. tmp = args;
  4958. while (is_list(tmp)) {
  4959. if (arity < MAX_REG) {
  4960. tmp = CDR(list_val(tmp));
  4961. arity++;
  4962. } else {
  4963. c_p->freason = SYSTEM_LIMIT;
  4964. goto error2;
  4965. }
  4966. }
  4967. if (is_not_nil(tmp)) { /* Must be well-formed list */
  4968. goto error;
  4969. }
  4970. /*
  4971. * At this point, arguments are known to be good.
  4972. */
  4973. if (c_p->arg_reg != c_p->def_arg_reg) {
  4974. /* Save some memory */
  4975. ERTS_PROC_LESS_MEM(c_p->max_arg_reg * sizeof(c_p->arg_reg[0]));
  4976. erts_free(ERTS_ALC_T_ARG_REG, c_p->arg_reg);
  4977. c_p->arg_reg = c_p->def_arg_reg;
  4978. c_p->max_arg_reg = sizeof(c_p->def_arg_reg)/sizeof(c_p->def_arg_reg[0]);
  4979. }
  4980. /*
  4981. * Arrange for the process to be resumed at the given MFA with
  4982. * the stack cleared.
  4983. */
  4984. c_p->arity = 3;
  4985. c_p->arg_reg[0] = module;
  4986. c_p->arg_reg[1] = function;
  4987. c_p->arg_reg[2] = args;
  4988. c_p->stop = STACK_START(c_p);
  4989. c_p->catches = 0;
  4990. c_p->i = beam_apply;
  4991. c_p->cp = (Eterm *) beam_apply+1;
  4992. /*
  4993. * If there are no waiting messages, garbage collect and
  4994. * shrink the heap.
  4995. */
  4996. erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
  4997. ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
  4998. if (c_p->msg.len > 0) {
  4999. add_to_schedule_q(c_p);
  5000. } else {
  5001. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
  5002. c_p->fvalue = NIL;
  5003. PROCESS_MAIN_CHK_LOCKS(c_p);
  5004. erts_garbage_collect_hibernate(c_p);
  5005. PROCESS_MAIN_CHK_LOCKS(c_p);
  5006. erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
  5007. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  5008. c_p->status = P_WAITING;
  5009. #ifdef ERTS_SMP
  5010. ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
  5011. if (c_p->msg.len > 0)
  5012. add_to_schedule_q(c_p);
  5013. #endif
  5014. }
  5015. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
  5016. c_p->current = bif_export[BIF_hibernate_3]->code;
  5017. return 1;
  5018. }
  5019. static Uint*
  5020. call_fun(Process* p, /* Current process. */
  5021. int arity, /* Number of arguments for Fun. */
  5022. Eterm* reg, /* Contents of registers. */
  5023. Eterm args) /* THE_NON_VALUE or pre-built list of arguments. */
  5024. {
  5025. Eterm fun = reg[arity];
  5026. Eterm hdr;
  5027. int i;
  5028. Eterm function;
  5029. Eterm* hp;
  5030. if (!is_boxed(fun)) {
  5031. goto badfun;
  5032. }
  5033. hdr = *boxed_val(fun);
  5034. if (is_fun_header(hdr)) {
  5035. ErlFunThing* funp = (ErlFunThing *) fun_val(fun);
  5036. ErlFunEntry* fe;
  5037. Eterm* code_ptr;
  5038. Eterm* var_ptr;
  5039. int actual_arity;
  5040. unsigned num_free;
  5041. fe = funp->fe;
  5042. num_free = funp->num_free;
  5043. code_ptr = fe->address;
  5044. actual_arity = (int) code_ptr[-1];
  5045. if (actual_arity == arity+num_free) {
  5046. if (num_free == 0) {
  5047. return code_ptr;
  5048. } else {
  5049. var_ptr = funp->env;
  5050. reg += arity;
  5051. i = 0;
  5052. do {
  5053. reg[i] = var_ptr[i];
  5054. i++;
  5055. } while (i < num_free);
  5056. reg[i] = fun;
  5057. return code_ptr;
  5058. }
  5059. return code_ptr;
  5060. } else {
  5061. /*
  5062. * Something wrong here. First build a list of the arguments.
  5063. */
  5064. if (is_non_value(args)) {
  5065. Uint sz = 2 * arity;
  5066. args = NIL;
  5067. if (HeapWordsLeft(p) < sz) {
  5068. erts_garbage_collect(p, sz, reg, arity+1);
  5069. fun = reg[arity];
  5070. }
  5071. hp = HEAP_TOP(p);
  5072. HEAP_TOP(p) += sz;
  5073. for (i = arity-1; i >= 0; i--) {
  5074. args = CONS(hp, reg[i], args);
  5075. hp += 2;
  5076. }
  5077. }
  5078. if (actual_arity >= 0) {
  5079. /*
  5080. * There is a fun defined, but the call has the wrong arity.
  5081. */
  5082. hp = HAlloc(p, 3);
  5083. p->freason = EXC_BADARITY;
  5084. p->fvalue = TUPLE2(hp, fun, args);
  5085. return NULL;
  5086. } else {
  5087. Export* ep;
  5088. Module* modp;
  5089. Eterm module;
  5090. /*
  5091. * No arity. There is no module loaded that defines the fun,
  5092. * either because the fun is newly created from the external
  5093. * representation (the module has never been loaded),
  5094. * or the module defining the fun has been unloaded.
  5095. */
  5096. module = fe->module;
  5097. if ((modp = erts_get_module(module)) != NULL && modp->code != NULL) {
  5098. /*
  5099. * There is a module loaded, but obviously the fun is not
  5100. * defined in it. We must not call the error_handler
  5101. * (or we will get into an infinite loop).
  5102. */
  5103. goto badfun;
  5104. }
  5105. /*
  5106. * No current code for this module. Call the error_handler module
  5107. * to attempt loading the module.
  5108. */
  5109. ep = erts_find_function(p->error_handler, am_undefined_lambda, 3);
  5110. if (ep == NULL) { /* No error handler */
  5111. p->current = NULL;
  5112. p->freason = EXC_UNDEF;
  5113. return NULL;
  5114. }
  5115. reg[0] = module;
  5116. reg[1] = fun;
  5117. reg[2] = args;
  5118. return ep->address;
  5119. }
  5120. }
  5121. } else if (is_export_header(hdr)) {
  5122. Export* ep = (Export *) (export_val(fun))[1];
  5123. int actual_arity = (int) ep->code[2];
  5124. if (arity == actual_arity) {
  5125. return ep->address;
  5126. } else {
  5127. /*
  5128. * Wrong arity. First build a list of the arguments.
  5129. */
  5130. if (is_non_value(args)) {
  5131. args = NIL;
  5132. hp = HAlloc(p, arity*2);
  5133. for (i = arity-1; i >= 0; i--) {
  5134. args = CONS(hp, reg[i], args);
  5135. hp += 2;
  5136. }
  5137. }
  5138. hp = HAlloc(p, 3);
  5139. p->freason = EXC_BADARITY;
  5140. p->fvalue = TUPLE2(hp, fun, args);
  5141. return NULL;
  5142. }
  5143. } else if (hdr == make_arityval(2)) {
  5144. Eterm* tp;
  5145. Export* ep;
  5146. Eterm module;
  5147. tp = tuple_val(fun);
  5148. module = tp[1];
  5149. function = tp[2];
  5150. if (!is_atom(module) || !is_atom(function)) {
  5151. goto badfun;
  5152. }
  5153. if ((ep = erts_find_export_entry(module, function, arity)) == NULL) {
  5154. ep = erts_find_export_entry(p->error_handler, am_undefined_function, 3);
  5155. if (ep == NULL) {
  5156. p->freason = EXC_UNDEF;
  5157. return 0;
  5158. }
  5159. if (is_non_value(args)) {
  5160. Uint sz = 2 * arity;
  5161. if (HeapWordsLeft(p) < sz) {
  5162. erts_garbage_collect(p, sz, reg, arity);
  5163. }
  5164. hp = HEAP_TOP(p);
  5165. HEAP_TOP(p) += sz;
  5166. args = NIL;
  5167. while (arity-- > 0) {
  5168. args = CONS(hp, reg[arity], args);
  5169. hp += 2;
  5170. }
  5171. }
  5172. reg[0] = module;
  5173. reg[1] = function;
  5174. reg[2] = args;
  5175. }
  5176. return ep->address;
  5177. } else {
  5178. badfun:
  5179. p->current = NULL;
  5180. p->freason = EXC_BADFUN;
  5181. p->fvalue = fun;
  5182. return NULL;
  5183. }
  5184. }
  5185. static Eterm*
  5186. apply_fun(Process* p, Eterm fun, Eterm args, Eterm* reg)
  5187. {
  5188. int arity;
  5189. Eterm tmp;
  5190. /*
  5191. * Walk down the 3rd parameter of apply (the argument list) and copy
  5192. * the parameters to the x registers (reg[]).
  5193. */
  5194. tmp = args;
  5195. arity = 0;
  5196. while (is_list(tmp)) {
  5197. if (arity < MAX_REG-1) {
  5198. reg[arity++] = CAR(list_val(tmp));
  5199. tmp = CDR(list_val(tmp));
  5200. } else {
  5201. p->freason = SYSTEM_LIMIT;
  5202. return NULL;
  5203. }
  5204. }
  5205. if (is_not_nil(tmp)) { /* Must be well-formed list */
  5206. p->freason = EXC_UNDEF;
  5207. return NULL;
  5208. }
  5209. reg[arity] = fun;
  5210. return call_fun(p, arity, reg, args);
  5211. }
  5212. static Eterm
  5213. new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free)
  5214. {
  5215. unsigned needed = ERL_FUN_SIZE + num_free;
  5216. ErlFunThing* funp;
  5217. Eterm* hp;
  5218. int i;
  5219. if (HEAP_LIMIT(p) - HEAP_TOP(p) <= needed) {
  5220. PROCESS_MAIN_CHK_LOCKS(p);
  5221. erts_garbage_collect(p, needed, reg, num_free);
  5222. PROCESS_MAIN_CHK_LOCKS(p);
  5223. }
  5224. hp = p->htop;
  5225. p->htop = hp + needed;
  5226. funp = (ErlFunThing *) hp;
  5227. hp = funp->env;
  5228. erts_refc_inc(&fe->refc, 2);
  5229. funp->thing_word = HEADER_FUN;
  5230. #ifndef HYBRID /* FIND ME! */
  5231. funp->next = MSO(p).funs;
  5232. MSO(p).funs = funp;
  5233. #endif
  5234. funp->fe = fe;
  5235. funp->num_free = num_free;
  5236. funp->creator = p->id;
  5237. #ifdef HIPE
  5238. funp->native_address = fe->native_address;
  5239. #endif
  5240. funp->arity = (int)fe->address[-1] - num_free;
  5241. for (i = 0; i < num_free; i++) {
  5242. *hp++ = reg[i];
  5243. }
  5244. return make_fun(funp);
  5245. }
  5246. int catchlevel(Process *p)
  5247. {
  5248. return p->catches;
  5249. }
  5250. /*
  5251. * Check if the given function is built-in (i.e. a BIF implemented in C).
  5252. *
  5253. * Returns 0 if not built-in, and a non-zero value if built-in.
  5254. */
  5255. int
  5256. erts_is_builtin(Eterm Mod, Eterm Name, int arity)
  5257. {
  5258. Export e;
  5259. Export* ep;
  5260. e.code[0] = Mod;
  5261. e.code[1] = Name;
  5262. e.code[2] = arity;
  5263. if ((ep = export_get(&e)) == NULL) {
  5264. return 0;
  5265. }
  5266. return ep->address == ep->code+3 && (ep->code[3] == (Uint) em_apply_bif);
  5267. }
  5268. /*
  5269. * Return the current number of reductions for the given process.
  5270. * To get the total number of reductions, p->reds must be added.
  5271. */
  5272. Uint
  5273. erts_current_reductions(Process *current, Process *p)
  5274. {
  5275. if (current != p) {
  5276. return 0;
  5277. } else if (current->fcalls < 0 && current->ct != NULL) {
  5278. return -current->fcalls;
  5279. } else {
  5280. return REDS_IN(current) - current->fcalls;
  5281. }
  5282. }