PageRenderTime 70ms CodeModel.GetById 26ms RepoModel.GetById 0ms app.codeStats 1ms

/erts/emulator/beam/beam_emu.c

https://github.com/system/erlang-otp
C | 5921 lines | 4453 code | 636 blank | 832 comment | 852 complexity | a1d0c5b6e5b28b84d7bfa68f8386ca83 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, LGPL-2.1

Large files files are truncated, but you can click here to view the full file

  1. /* ``The contents of this file are subject to the Erlang Public License,
  2. * Version 1.1, (the "License"); you may not use this file except in
  3. * compliance with the License. You should have received a copy of the
  4. * Erlang Public License along with this software. If not, it can be
  5. * retrieved via the world wide web at http://www.erlang.org/.
  6. *
  7. * Software distributed under the License is distributed on an "AS IS"
  8. * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
  9. * the License for the specific language governing rights and limitations
  10. * under the License.
  11. *
  12. * The Initial Developer of the Original Code is Ericsson Utvecklings AB.
  13. * Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
  14. * AB. All Rights Reserved.''
  15. *
  16. * $Id$
  17. */
  18. #ifdef HAVE_CONFIG_H
  19. # include "config.h"
  20. #endif
  21. #include <stddef.h> /* offsetof() */
  22. #include "sys.h"
  23. #include "erl_vm.h"
  24. #include "global.h"
  25. #include "erl_process.h"
  26. #include "erl_nmgc.h"
  27. #include "error.h"
  28. #include "bif.h"
  29. #include "big.h"
  30. #include "beam_load.h"
  31. #include "erl_binary.h"
  32. #include "erl_bits.h"
  33. #include "dist.h"
  34. #include "beam_bp.h"
  35. #include "beam_catches.h"
  36. #ifdef HIPE
  37. #include "hipe_mode_switch.h"
  38. #include "hipe_bif1.h"
  39. #endif
  40. /* #define HARDDEBUG 1 */
  41. #if defined(NO_JUMP_TABLE)
  42. # define OpCase(OpCode) case op_##OpCode: lb_##OpCode
  43. # define CountCase(OpCode) case op_count_##OpCode
  44. # define OpCode(OpCode) ((Uint*)op_##OpCode)
  45. # define Goto(Rel) {Go = (int)(Rel); goto emulator_loop;}
  46. # define LabelAddr(Addr) &&##Addr
  47. #else
  48. # define OpCase(OpCode) lb_##OpCode
  49. # define CountCase(OpCode) lb_count_##OpCode
  50. # define Goto(Rel) goto *(Rel)
  51. # define LabelAddr(Label) &&Label
  52. # define OpCode(OpCode) (&&lb_##OpCode)
  53. #endif
  54. #ifdef ERTS_ENABLE_LOCK_CHECK
  55. # ifdef ERTS_SMP
  56. # define PROCESS_MAIN_CHK_LOCKS(P) \
  57. do { \
  58. if ((P)) \
  59. erts_proc_lc_chk_only_proc_main((P)); \
  60. else \
  61. erts_lc_check_exact(NULL, 0); \
  62. ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING); \
  63. } while (0)
  64. # define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
  65. if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN)
  66. # define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \
  67. if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN)
  68. # else
  69. # define ERTS_SMP_REQ_PROC_MAIN_LOCK(P)
  70. # define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P)
  71. # define PROCESS_MAIN_CHK_LOCKS(P) erts_lc_check_exact(NULL, 0)
  72. # endif
  73. #else
  74. # define PROCESS_MAIN_CHK_LOCKS(P)
  75. # define ERTS_SMP_REQ_PROC_MAIN_LOCK(P)
  76. # define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P)
  77. #endif
  78. /*
  79. * Shallow copy to heap if possible; otherwise,
  80. * move to heap via garbage collection.
  81. */
  82. #define MV_MSG_MBUF_INTO_PROC(M) \
  83. do { \
  84. if ((M)->bp) { \
  85. Uint need = (M)->bp->size; \
  86. if (E - HTOP >= need) { \
  87. Uint *htop = HTOP; \
  88. erts_move_msg_mbuf_to_heap(&htop, &MSO(c_p), (M)); \
  89. ASSERT(htop - HTOP == need); \
  90. HTOP = htop; \
  91. } \
  92. else { \
  93. SWAPOUT; \
  94. reg[0] = r(0); \
  95. PROCESS_MAIN_CHK_LOCKS(c_p); \
  96. FCALLS -= erts_garbage_collect(c_p, 0, NULL, 0); \
  97. PROCESS_MAIN_CHK_LOCKS(c_p); \
  98. r(0) = reg[0]; \
  99. SWAPIN; \
  100. ASSERT(!(M)->bp); \
  101. } \
  102. } \
  103. ASSERT(!(M)->bp); \
  104. } while (0)
  105. /*
  106. * Define macros for deep checking of terms.
  107. */
  108. #if defined(HARDDEBUG)
  109. # define CHECK_TERM(T) size_object(T)
  110. # define CHECK_ARGS(PC) \
  111. do { \
  112. int i_; \
  113. int Arity_ = PC[-1]; \
  114. if (Arity_ > 0) { \
  115. CHECK_TERM(r(0)); \
  116. } \
  117. for (i_ = 1; i_ < Arity_; i_++) { \
  118. CHECK_TERM(x(i_)); \
  119. } \
  120. } while (0)
  121. #else
  122. # define CHECK_TERM(T) ASSERT(!is_CP(T))
  123. # define CHECK_ARGS(T)
  124. #endif
  125. #ifndef MAX
  126. #define MAX(x, y) (((x) > (y)) ? (x) : (y))
  127. #endif
  128. #define GET_BIF_ADDRESS(p) ((BifFunction) (((Export *) p)->code[4]))
  129. /*
  130. * We reuse some of fields in the save area in the process structure.
  131. * This is safe to do, since this space is only activly used when
  132. * the process is switched out.
  133. */
  134. #define REDS_IN(p) ((p)->def_arg_reg[5])
  135. /*
  136. * Add a byte offset to a pointer to Eterm. This is useful when the
  137. * the loader has precalculated a byte offset.
  138. */
  139. #define ADD_BYTE_OFFSET(ptr, offset) \
  140. ((Eterm *) (((unsigned char *)ptr) + (offset)))
  141. /* We don't check the range if an ordinary switch is used */
  142. #ifdef NO_JUMP_TABLE
  143. #define VALID_INSTR(IP) (0 <= (int)(IP) && ((int)(IP) < (NUMBER_OF_OPCODES*2+10)))
  144. #else
  145. #define VALID_INSTR(IP) \
  146. ((Sint)LabelAddr(emulator_loop) <= (Sint)(IP) && \
  147. (Sint)(IP) < (Sint)LabelAddr(end_emulator_loop))
  148. #endif /* NO_JUMP_TABLE */
  149. #define SET_CP(p, ip) \
  150. ASSERT(VALID_INSTR(*(ip))); \
  151. (p)->cp = (ip)
  152. #define SET_I(ip) \
  153. ASSERT(VALID_INSTR(* (Eterm *)(ip))); \
  154. I = (ip)
  155. #define FetchArgs(S1, S2) tmp_arg1 = (S1); tmp_arg2 = (S2)
  156. /*
  157. * Store a result into a register given a destination descriptor.
  158. */
  159. #define StoreResult(Result, DestDesc) \
  160. do { \
  161. Eterm stb_reg; \
  162. stb_reg = (DestDesc); \
  163. CHECK_TERM(Result); \
  164. switch (beam_reg_tag(stb_reg)) { \
  165. case R_REG_DEF: \
  166. r(0) = (Result); break; \
  167. case X_REG_DEF: \
  168. xb(x_reg_offset(stb_reg)) = (Result); break; \
  169. default: \
  170. yb(y_reg_offset(stb_reg)) = (Result); break; \
  171. } \
  172. } while (0)
  173. #define StoreSimpleDest(Src, Dest) Dest = (Src)
  174. /*
  175. * Store a result into a register and execute the next instruction.
  176. * Dst points to the word with a destination descriptor, which MUST
  177. * be just before the next instruction.
  178. */
  179. #define StoreBifResult(Dst, Result) \
  180. do { \
  181. Eterm* stb_next; \
  182. Eterm stb_reg; \
  183. stb_reg = Arg(Dst); \
  184. I += (Dst) + 2; \
  185. stb_next = (Eterm *) *I; \
  186. CHECK_TERM(Result); \
  187. switch (beam_reg_tag(stb_reg)) { \
  188. case R_REG_DEF: \
  189. r(0) = (Result); Goto(stb_next); \
  190. case X_REG_DEF: \
  191. xb(x_reg_offset(stb_reg)) = (Result); Goto(stb_next); \
  192. default: \
  193. yb(y_reg_offset(stb_reg)) = (Result); Goto(stb_next); \
  194. } \
  195. } while (0)
  196. #define ClauseFail() goto lb_jump_f
  197. #define SAVE_CP(X) *(X) = make_cp(c_p->cp)
  198. #define RESTORE_CP(X) SET_CP(c_p, cp_val(*(X)))
  199. #define ISCATCHEND(instr) ((Eterm *) *(instr) == OpCode(catch_end_y))
  200. /*
  201. * Special Beam instructions.
  202. */
  203. Eterm beam_apply[2];
  204. Eterm beam_exit[1];
  205. Eterm* em_call_error_handler;
  206. Eterm* em_apply_bif;
  207. Eterm* em_call_traced_function;
  208. /* NOTE These should be the only variables containing trace instructions.
  209. ** Sometimes tests are form the instruction value, and sometimes
  210. ** for the refering variable (one of these), and rouge references
  211. ** will most likely cause chaos.
  212. */
  213. Eterm beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
  214. Eterm beam_return_trace[1]; /* OpCode(i_return_trace) */
  215. Eterm beam_exception_trace[1]; /* UGLY also OpCode(i_return_trace) */
  216. /*
  217. * All Beam instructions in numerical order.
  218. */
  219. #ifndef NO_JUMP_TABLE
  220. void** beam_ops;
  221. #endif
  222. #ifndef ERTS_SMP /* Not supported with smp emulator */
  223. extern int count_instructions;
  224. #endif
  225. #if defined(HYBRID)
  226. #define SWAPIN \
  227. g_htop = global_htop; \
  228. g_hend = global_hend; \
  229. HTOP = HEAP_TOP(c_p); \
  230. E = c_p->stop
  231. #define SWAPOUT \
  232. global_htop = g_htop; \
  233. global_hend = g_hend; \
  234. HEAP_TOP(c_p) = HTOP; \
  235. c_p->stop = E
  236. #else
  237. #define SWAPIN \
  238. HTOP = HEAP_TOP(c_p); \
  239. E = c_p->stop
  240. #define SWAPOUT \
  241. HEAP_TOP(c_p) = HTOP; \
  242. c_p->stop = E
  243. #endif
  244. #define PRE_BIF_SWAPOUT(P) \
  245. HEAP_TOP((P)) = HTOP; \
  246. (P)->stop = E; \
  247. PROCESS_MAIN_CHK_LOCKS((P)); \
  248. ERTS_SMP_UNREQ_PROC_MAIN_LOCK((P))
  249. #if defined(HYBRID)
  250. # define POST_BIF_GC_SWAPIN_0(_p, _res) \
  251. if ((_p)->mbuf) { \
  252. _res = erts_gc_after_bif_call((_p), (_res), NULL, 0); \
  253. } \
  254. SWAPIN
  255. # define POST_BIF_GC_SWAPIN(_p, _res, _regs, _arity) \
  256. if ((_p)->mbuf) { \
  257. _regs[0] = r(0); \
  258. _res = erts_gc_after_bif_call((_p), (_res), _regs, (_arity)); \
  259. r(0) = _regs[0]; \
  260. } \
  261. SWAPIN
  262. #else
  263. # define POST_BIF_GC_SWAPIN_0(_p, _res) \
  264. ERTS_SMP_REQ_PROC_MAIN_LOCK((_p)); \
  265. PROCESS_MAIN_CHK_LOCKS((_p)); \
  266. if ((_p)->mbuf) { \
  267. _res = erts_gc_after_bif_call((_p), (_res), NULL, 0); \
  268. E = (_p)->stop; \
  269. } \
  270. HTOP = HEAP_TOP((_p))
  271. # define POST_BIF_GC_SWAPIN(_p, _res, _regs, _arity) \
  272. ERTS_SMP_REQ_PROC_MAIN_LOCK((_p)); \
  273. PROCESS_MAIN_CHK_LOCKS((_p)); \
  274. if ((_p)->mbuf) { \
  275. _regs[0] = r(0); \
  276. _res = erts_gc_after_bif_call((_p), (_res), _regs, (_arity)); \
  277. r(0) = _regs[0]; \
  278. E = (_p)->stop; \
  279. } \
  280. HTOP = HEAP_TOP((_p))
  281. #endif
  282. #define SAVE_HTOP HEAP_TOP(c_p) = HTOP
  283. #define db(N) (N)
  284. #define tb(N) (N)
  285. #define xb(N) (*(Eterm *) (((unsigned char *)reg) + (N)))
  286. #define yb(N) (*(Eterm *) (((unsigned char *)E) + (N)))
  287. #define fb(N) (*(double *) (((unsigned char *)&(freg[0].fd)) + (N)))
  288. #define x(N) reg[N]
  289. #define y(N) E[N]
  290. #define r(N) x##N
  291. /*
  292. * Makes sure that there are StackNeed + HeapNeed + 1 words available
  293. * on the combined heap/stack segment, then allocates StackNeed + 1
  294. * words on the stack and saves CP.
  295. *
  296. * M is number of live registers to preserve during garbage collection
  297. */
  298. #define AH(StackNeed, HeapNeed, M) \
  299. do { \
  300. int needed; \
  301. needed = (StackNeed) + 1; \
  302. if (E - HTOP < (needed + (HeapNeed))) { \
  303. SWAPOUT; \
  304. reg[0] = r(0); \
  305. PROCESS_MAIN_CHK_LOCKS(c_p); \
  306. FCALLS -= erts_garbage_collect(c_p, needed + (HeapNeed), reg, (M)); \
  307. PROCESS_MAIN_CHK_LOCKS(c_p); \
  308. r(0) = reg[0]; \
  309. SWAPIN; \
  310. } \
  311. E -= needed; \
  312. SAVE_CP(E); \
  313. } while (0)
  314. #define Allocate(Ns, Live) AH(Ns, 0, Live)
  315. #define AllocateZero(Ns, Live) \
  316. do { Eterm* ptr; \
  317. int i = (Ns); \
  318. AH(i, 0, Live); \
  319. for (ptr = E + i; ptr > E; ptr--) { \
  320. make_blank(*ptr); \
  321. } \
  322. } while (0)
  323. #define AllocateHeap(Ns, Nh, Live) AH(Ns, Nh, Live)
  324. #define AllocateHeapZero(Ns, Nh, Live) \
  325. do { Eterm* ptr; \
  326. int i = (Ns); \
  327. AH(i, Nh, Live); \
  328. for (ptr = E + i; ptr > E; ptr--) { \
  329. make_blank(*ptr); \
  330. } \
  331. } while (0)
  332. #define AllocateInit(Ns, Live, Y) \
  333. do { AH(Ns, 0, Live); make_blank(Y); } while (0)
  334. /*
  335. * Like the AH macro, but allocates no additional heap space.
  336. */
  337. #define A(StackNeed, M) AH(StackNeed, 0, M)
  338. #define D(N) \
  339. RESTORE_CP(E); \
  340. E += (N) + 1;
  341. /*
  342. * Check if Nh words of heap are available; if not, do a garbage collection.
  343. * Live is number of active argument registers to be preserved.
  344. */
  345. #define TestHeap(Nh, Live) \
  346. do { \
  347. unsigned need = (Nh); \
  348. if (E - HTOP < need) { \
  349. SWAPOUT; \
  350. reg[0] = r(0); \
  351. PROCESS_MAIN_CHK_LOCKS(c_p); \
  352. FCALLS -= erts_garbage_collect(c_p, need, reg, (Live)); \
  353. PROCESS_MAIN_CHK_LOCKS(c_p); \
  354. r(0) = reg[0]; \
  355. SWAPIN; \
  356. } \
  357. } while (0)
  358. /*
  359. * Check if Nh words of heap are available; if not, do a garbage collection.
  360. * Live is number of active argument registers to be preserved.
  361. * Takes special care to preserve Extra if a garbage collection occurs.
  362. */
  363. #define TestHeapPreserve(Nh, Live, Extra) \
  364. do { \
  365. unsigned need = (Nh); \
  366. if (E - HTOP < need) { \
  367. SWAPOUT; \
  368. reg[0] = r(0); \
  369. reg[Live] = Extra; \
  370. PROCESS_MAIN_CHK_LOCKS(c_p); \
  371. FCALLS -= erts_garbage_collect(c_p, need, reg, (Live)+1); \
  372. PROCESS_MAIN_CHK_LOCKS(c_p); \
  373. if (Live > 0) { \
  374. r(0) = reg[0]; \
  375. } \
  376. Extra = reg[Live]; \
  377. SWAPIN; \
  378. } \
  379. } while (0)
  380. #ifdef HYBRID
  381. #ifdef INCREMENTAL
  382. #define TestGlobalHeap(Nh, Live, hp) \
  383. do { \
  384. unsigned need = (Nh); \
  385. ASSERT(global_heap <= g_htop && g_htop <= global_hend); \
  386. SWAPOUT; \
  387. reg[0] = r(0); \
  388. FCALLS -= need; \
  389. (hp) = IncAlloc(c_p,need,reg,(Live)); \
  390. r(0) = reg[0]; \
  391. SWAPIN; \
  392. } while (0)
  393. #else
  394. #define TestGlobalHeap(Nh, Live, hp) \
  395. do { \
  396. unsigned need = (Nh); \
  397. ASSERT(global_heap <= g_htop && g_htop <= global_hend); \
  398. if (g_hend - g_htop < need) { \
  399. SWAPOUT; \
  400. reg[0] = r(0); \
  401. FCALLS -= erts_global_garbage_collect(c_p, need, reg, (Live)); \
  402. r(0) = reg[0]; \
  403. SWAPIN; \
  404. } \
  405. (hp) = global_htop; \
  406. } while (0)
  407. #endif
  408. #endif /* HYBRID */
  409. #define Init(N) make_blank(yb(N))
  410. #define Init2(Y1, Y2) do { make_blank(Y1); make_blank(Y2); } while (0)
  411. #define Init3(Y1, Y2, Y3) \
  412. do { make_blank(Y1); make_blank(Y2); make_blank(Y3); } while (0)
  413. #define MakeFun(FunP, NumFree) \
  414. do { \
  415. SWAPOUT; \
  416. reg[0] = r(0); \
  417. r(0) = new_fun(c_p, reg, (ErlFunEntry *) FunP, NumFree); \
  418. SWAPIN; \
  419. } while (0)
  420. /*
  421. * Check that we haven't used the reductions and jump to function pointed to by
  422. * the I register. If we are out of reductions, do a context switch.
  423. */
  424. #define DispatchMacro() \
  425. do { \
  426. Eterm* dis_next; \
  427. dis_next = (Eterm *) *I; \
  428. CHECK_ARGS(I); \
  429. if (FCALLS > 0 || FCALLS > neg_o_reds) { \
  430. FCALLS--; \
  431. Goto(dis_next); \
  432. } else { \
  433. goto context_switch; \
  434. } \
  435. } while (0)
  436. #define DispatchMacroFun() \
  437. do { \
  438. Eterm* dis_next; \
  439. dis_next = (Eterm *) *I; \
  440. CHECK_ARGS(I); \
  441. if (FCALLS > 0 || FCALLS > neg_o_reds) { \
  442. FCALLS--; \
  443. Goto(dis_next); \
  444. } else { \
  445. goto context_switch_fun; \
  446. } \
  447. } while (0)
  448. #define DispatchMacrox() \
  449. do { \
  450. if (FCALLS > 0) { \
  451. Eterm* dis_next; \
  452. SET_I(((Export *) Arg(0))->address); \
  453. dis_next = (Eterm *) *I; \
  454. FCALLS--; \
  455. CHECK_ARGS(I); \
  456. Goto(dis_next); \
  457. } else if (c_p->ct != NULL && FCALLS > neg_o_reds) { \
  458. goto save_calls1; \
  459. } else { \
  460. SET_I(((Export *) Arg(0))->address); \
  461. CHECK_ARGS(I); \
  462. goto context_switch; \
  463. } \
  464. } while (0)
  465. #ifdef DEBUG
  466. /*
  467. * To simplify breakpoint setting, put the code in one place only and jump to it.
  468. */
  469. # define Dispatch() goto do_dispatch
  470. # define Dispatchx() goto do_dispatchx
  471. # define Dispatchfun() goto do_dispatchfun
  472. #else
  473. /*
  474. * Inline for speed.
  475. */
  476. # define Dispatch() DispatchMacro()
  477. # define Dispatchx() DispatchMacrox()
  478. # define Dispatchfun() DispatchMacroFun()
  479. #endif
  480. #define Self(R) R = c_p->id
  481. #define Node(R) R = erts_this_node->sysname
  482. #define Arg(N) I[(N)+1]
  483. #define Next(N) \
  484. I += (N) + 1; \
  485. ASSERT(VALID_INSTR(*I)); \
  486. Goto(*I)
  487. #define PreFetch(N, Dst) do { Dst = (Eterm *) *(I + N + 1); } while (0)
  488. #define NextPF(N, Dst) \
  489. I += N + 1; \
  490. ASSERT(VALID_INSTR(Dst)); \
  491. Goto(Dst)
  492. #define GetR(pos, tr) \
  493. do { \
  494. tr = Arg(pos); \
  495. switch (beam_reg_tag(tr)) { \
  496. case R_REG_DEF: tr = r(0); break; \
  497. case X_REG_DEF: tr = xb(x_reg_offset(tr)); break; \
  498. case Y_REG_DEF: ASSERT(y_reg_offset(tr) >= 1); tr = yb(y_reg_offset(tr)); break; \
  499. } \
  500. CHECK_TERM(tr); \
  501. } while (0)
  502. #define GetArg1(N, Dst) GetR((N), Dst)
  503. #define GetArg2(N, Dst1, Dst2) \
  504. do { \
  505. GetR(N, Dst1); \
  506. GetR((N)+1, Dst2); \
  507. } while (0)
  508. #define PutList(H, T, Dst, Store) \
  509. do { \
  510. HTOP[0] = (H); HTOP[1] = (T); \
  511. Store(make_list(HTOP), Dst); \
  512. HTOP += 2; \
  513. } while (0)
  514. #define Move(Src, Dst, Store) \
  515. do { \
  516. Eterm term = (Src); \
  517. Store(term, Dst); \
  518. } while (0)
  519. #define Move2(src1, dst1, src2, dst2) dst1 = (src1); dst2 = (src2)
  520. #define MoveGenDest(src, dstp) \
  521. if ((dstp) == NULL) { r(0) = (src); } else { *(dstp) = src; }
  522. #define MoveReturn(Src, Dest) \
  523. (Dest) = (Src); \
  524. I = c_p->cp; \
  525. ASSERT(VALID_INSTR(*c_p->cp)); \
  526. CHECK_TERM(r(0)); \
  527. Goto(*I)
  528. #define DeallocateReturn(Deallocate) \
  529. do { \
  530. int words_to_pop = (Deallocate); \
  531. SET_I(cp_val(*E)); \
  532. E = ADD_BYTE_OFFSET(E, words_to_pop); \
  533. CHECK_TERM(r(0)); \
  534. Goto(*I); \
  535. } while (0)
  536. #define MoveDeallocateReturn(Src, Dest, Deallocate) \
  537. (Dest) = (Src); \
  538. DeallocateReturn(Deallocate)
  539. #define MoveCall(Src, Dest, CallDest, Size) \
  540. (Dest) = (Src); \
  541. SET_CP(c_p, I+Size+1); \
  542. SET_I((Eterm *) CallDest); \
  543. Dispatch();
  544. #define MoveCallLast(Src, Dest, CallDest, Deallocate) \
  545. (Dest) = (Src); \
  546. RESTORE_CP(E); \
  547. E = ADD_BYTE_OFFSET(E, (Deallocate)); \
  548. SET_I((Eterm *) CallDest); \
  549. Dispatch();
  550. #define MoveCallOnly(Src, Dest, CallDest) \
  551. (Dest) = (Src); \
  552. SET_I((Eterm *) CallDest); \
  553. Dispatch();
  554. #define GetList(Src, H, T) do { \
  555. Eterm* tmp_ptr = list_val(Src); \
  556. H = CAR(tmp_ptr); \
  557. T = CDR(tmp_ptr); } while (0)
  558. #define GetTupleElement(Src, Element, Dest) \
  559. do { \
  560. tmp_arg1 = (Eterm) (((unsigned char *) tuple_val(Src)) + (Element)); \
  561. (Dest) = (*(Eterm *)tmp_arg1); \
  562. } while (0)
  563. #define ExtractNextElement(Dest) \
  564. tmp_arg1 += sizeof(Eterm); \
  565. (Dest) = (* (Eterm *) (((unsigned char *) tmp_arg1)))
  566. #define ExtractNextElement2(Dest) \
  567. do { \
  568. Eterm* ene_dstp = &(Dest); \
  569. ene_dstp[0] = ((Eterm *) tmp_arg1)[1]; \
  570. ene_dstp[1] = ((Eterm *) tmp_arg1)[2]; \
  571. tmp_arg1 += sizeof(Eterm) + sizeof(Eterm); \
  572. } while (0)
  573. #define ExtractNextElement3(Dest) \
  574. do { \
  575. Eterm* ene_dstp = &(Dest); \
  576. ene_dstp[0] = ((Eterm *) tmp_arg1)[1]; \
  577. ene_dstp[1] = ((Eterm *) tmp_arg1)[2]; \
  578. ene_dstp[2] = ((Eterm *) tmp_arg1)[3]; \
  579. tmp_arg1 += 3*sizeof(Eterm); \
  580. } while (0)
  581. #define ExtractNextElement4(Dest) \
  582. do { \
  583. Eterm* ene_dstp = &(Dest); \
  584. ene_dstp[0] = ((Eterm *) tmp_arg1)[1]; \
  585. ene_dstp[1] = ((Eterm *) tmp_arg1)[2]; \
  586. ene_dstp[2] = ((Eterm *) tmp_arg1)[3]; \
  587. ene_dstp[3] = ((Eterm *) tmp_arg1)[4]; \
  588. tmp_arg1 += 4*sizeof(Eterm); \
  589. } while (0)
  590. #define ExtractElement(Element, Dest) \
  591. do { \
  592. tmp_arg1 += (Element); \
  593. (Dest) = (* (Eterm *) tmp_arg1); \
  594. } while (0)
  595. #define PutTuple(Arity, Src, Dest) \
  596. ASSERT(is_arity_value(Arity)); \
  597. Dest = make_tuple(HTOP); \
  598. HTOP[0] = (Arity); \
  599. HTOP[1] = (Src); \
  600. HTOP += 2
  601. #define Put(Word) *HTOP++ = (Word)
  602. #define EqualImmed(X, Y, Action) if (X != Y) { Action; }
  603. #define IsFloat(Src, Fail) if (is_not_float(Src)) { Fail; }
  604. #define IsInteger(Src, Fail) if (is_not_integer(Src)) { Fail; }
  605. #define IsNumber(X, Fail) if (is_not_integer(X) && is_not_float(X)) { Fail; }
  606. #define IsConstant(X, Fail) if (is_list(X) || is_nil(X) || is_tuple(X)) { Fail; }
  607. #define IsAtom(Src, Fail) if (is_not_atom(Src)) { Fail; }
  608. #define IsIntegerAllocate(Src, Need, Alive, Fail) \
  609. if (is_not_integer(Src)) { Fail; } \
  610. A(Need, Alive)
  611. #define IsNil(Src, Fail) if (is_not_nil(Src)) { Fail; }
  612. #define IsList(Src, Fail) if (is_not_list(Src) && is_not_nil(Src)) { Fail; }
  613. #define IsNonemptyList(Src, Fail) if (is_not_list(Src)) { Fail; }
  614. #define IsNonemptyListAllocate(Src, Need, Alive, Fail) \
  615. if (is_not_list(Src)) { Fail; } \
  616. A(Need, Alive)
  617. #define IsNonemptyListTestHeap(Src, Need, Alive, Fail) \
  618. if (is_not_list(Src)) { Fail; } \
  619. TestHeap(Need, Alive)
  620. #define IsTuple(X, Action) if (is_not_tuple(X)) Action
  621. #define IsArity(Pointer, Arity, Fail) \
  622. if (*(Eterm *)(tmp_arg1 = (Eterm)tuple_val(Pointer)) != (Arity)) { Fail; }
  623. #define IsFunction(X, Action) \
  624. do { \
  625. if ( !(is_any_fun(X)) ) { \
  626. Action; \
  627. } \
  628. } while (0)
  629. #define IsFunction2(F, A, Action) \
  630. do { \
  631. if (is_function_2(c_p, F, A) != am_true ) {\
  632. Action; \
  633. } \
  634. } while (0)
  635. #define IsTupleOfArity(Src, Arity, Fail) \
  636. do { \
  637. if (is_not_tuple(Src) || *(Eterm *)(tmp_arg1 = (Eterm) tuple_val(Src)) != Arity) { \
  638. Fail; \
  639. } \
  640. } while (0)
  641. #define IsBoolean(X, Fail) if ((X) != am_true && (X) != am_false) { Fail; }
  642. #define IsBinary(Src, Fail) \
  643. if (is_not_binary(Src) || binary_bitsize(Src) != 0) { Fail; }
  644. #define IsBitstring(Src, Fail) \
  645. if (is_not_binary(Src)) { Fail; }
  646. #ifdef ARCH_64
  647. #define BsSafeMul(A, B, Fail, Target) \
  648. do { Uint64 _res = (A) * (B); \
  649. if (_res / B != A) { Fail; } \
  650. Target = _res; \
  651. } while (0)
  652. #else
  653. #define BsSafeMul(A, B, Fail, Target) \
  654. do { Uint64 _res = (Uint64)(A) * (Uint64)(B); \
  655. if ((_res >> (8*sizeof(Uint))) != 0) { Fail; } \
  656. Target = _res; \
  657. } while (0)
  658. #endif
  659. #define BsGetFieldSize(Bits, Unit, Fail, Target) \
  660. do { \
  661. Sint _signed_size; Uint _uint_size; \
  662. if (is_small(Bits)) { \
  663. _signed_size = signed_val(Bits); \
  664. if (_signed_size < 0) { Fail; } \
  665. _uint_size = (Uint) _signed_size; \
  666. } else { \
  667. if (!term_to_Uint(Bits, &temp_bits)) { Fail; } \
  668. _uint_size = temp_bits; \
  669. } \
  670. BsSafeMul(_uint_size, Unit, Fail, Target); \
  671. } while (0)
  672. #define BsGetUncheckedFieldSize(Bits, Unit, Fail, Target) \
  673. do { \
  674. Sint _signed_size; Uint _uint_size; \
  675. if (is_small(Bits)) { \
  676. _signed_size = signed_val(Bits); \
  677. if (_signed_size < 0) { Fail; } \
  678. _uint_size = (Uint) _signed_size; \
  679. } else { \
  680. if (!term_to_Uint(Bits, &temp_bits)) { Fail; } \
  681. _uint_size = (Uint) temp_bits; \
  682. } \
  683. Target = _uint_size * Unit; \
  684. } while (0)
  685. #define BsGetFloat2(Ms, Live, Sz, Flags, Dst, Store, Fail) \
  686. do { \
  687. ErlBinMatchBuffer *_mb; \
  688. Eterm _result; Sint _size; \
  689. if (!is_small(Sz) || (_size = unsigned_val(Sz)) > 64) { Fail; } \
  690. _size *= ((Flags) >> 3); \
  691. TestHeap(FLOAT_SIZE_OBJECT, Live); \
  692. _mb = ms_matchbuffer(Ms); \
  693. SWAPOUT; \
  694. _result = erts_bs_get_float_2(c_p, _size, (Flags), _mb); \
  695. HTOP = HEAP_TOP(c_p); \
  696. if (is_non_value(_result)) { Fail; } \
  697. else { Store(_result, Dst); } \
  698. } while (0)
  699. #define BsGetBinaryImm_2(Ms, Live, Sz, Flags, Dst, Store, Fail) \
  700. do { \
  701. ErlBinMatchBuffer *_mb; \
  702. Eterm _result; \
  703. TestHeap(heap_bin_size(ERL_ONHEAP_BIN_LIMIT), Live); \
  704. _mb = ms_matchbuffer(Ms); \
  705. SWAPOUT; \
  706. _result = erts_bs_get_binary_2(c_p, (Sz), (Flags), _mb); \
  707. HTOP = HEAP_TOP(c_p); \
  708. if (is_non_value(_result)) { Fail; } \
  709. else { Store(_result, Dst); } \
  710. } while (0)
  711. #define BsGetBinary_2(Ms, Live, Sz, Flags, Dst, Store, Fail) \
  712. do { \
  713. ErlBinMatchBuffer *_mb; \
  714. Eterm _result; Uint _size; \
  715. BsGetFieldSize(Sz, ((Flags) >> 3), Fail, _size); \
  716. TestHeap(ERL_SUB_BIN_SIZE, Live); \
  717. _mb = ms_matchbuffer(Ms); \
  718. SWAPOUT; \
  719. _result = erts_bs_get_binary_2(c_p, _size, (Flags), _mb); \
  720. HTOP = HEAP_TOP(c_p); \
  721. if (is_non_value(_result)) { Fail; } \
  722. else { Store(_result, Dst); } \
  723. } while (0)
  724. #define BsGetBinaryAll_2(Ms, Live, Unit, Dst, Store, Fail) \
  725. do { \
  726. ErlBinMatchBuffer *_mb; \
  727. Eterm _result; \
  728. TestHeap(ERL_SUB_BIN_SIZE, Live); \
  729. _mb = ms_matchbuffer(Ms); \
  730. if (((_mb->size - _mb->offset) % Unit) == 0) { \
  731. SWAPOUT; \
  732. _result = erts_bs_get_binary_all_2(c_p, _mb); \
  733. HTOP = HEAP_TOP(c_p); \
  734. ASSERT(is_value(_result)); \
  735. Store(_result, Dst); \
  736. } else { Fail; } \
  737. } while (0)
  738. #define BsSkipBits2(Ms, Bits, Unit, Fail) \
  739. do { \
  740. ErlBinMatchBuffer *_mb; \
  741. size_t new_offset; \
  742. Uint _size; \
  743. _mb = ms_matchbuffer(Ms); \
  744. BsGetFieldSize(Bits, Unit, Fail, _size); \
  745. new_offset = _mb->offset + _size; \
  746. if (new_offset <= _mb->size) { _mb->offset = new_offset; } \
  747. else { Fail; } \
  748. } while (0)
  749. #define BsSkipBitsAll2(Ms, Unit, Fail) \
  750. do { \
  751. ErlBinMatchBuffer *_mb; \
  752. _mb = ms_matchbuffer(Ms); \
  753. if (((_mb->size - _mb->offset) % Unit) == 0) {_mb->offset = _mb->size; } \
  754. else { Fail; } \
  755. } while (0)
  756. #define BsSkipBitsImm2(Ms, Bits, Fail) \
  757. do { \
  758. ErlBinMatchBuffer *_mb; \
  759. size_t new_offset; \
  760. _mb = ms_matchbuffer(Ms); \
  761. new_offset = _mb->offset + (Bits); \
  762. if (new_offset <= _mb->size) { _mb->offset = new_offset; } \
  763. else { Fail; } \
  764. } while (0)
  765. #define NewBsPutIntegerImm(Sz, Flags, Src) \
  766. do { \
  767. if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3((Src), (Sz), (Flags)))) { goto badarg; } \
  768. } while (0)
  769. #define NewBsPutInteger(Sz, Flags, Src) \
  770. do { \
  771. Sint _size; \
  772. BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \
  773. if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3((Src), _size, (Flags)))) \
  774. { goto badarg; } \
  775. } while (0)
  776. #define NewBsPutFloatImm(Sz, Flags, Src) \
  777. do { \
  778. if (!erts_new_bs_put_float(c_p, (Src), (Sz), (Flags))) { goto badarg; } \
  779. } while (0)
  780. #define NewBsPutFloat(Sz, Flags, Src) \
  781. do { \
  782. Sint _size; \
  783. BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \
  784. if (!erts_new_bs_put_float(c_p, (Src), _size, (Flags))) { goto badarg; } \
  785. } while (0)
  786. #define NewBsPutBinary(Sz, Flags, Src) \
  787. do { \
  788. Sint _size; \
  789. BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \
  790. if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2((Src), _size))) { goto badarg; } \
  791. } while (0)
  792. #define NewBsPutBinaryImm(Sz, Src) \
  793. do { \
  794. if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2((Src), (Sz)))) { goto badarg; } \
  795. } while (0)
  796. #define NewBsPutBinaryAll(Src, Unit) \
  797. do { \
  798. if (!erts_new_bs_put_binary_all(ERL_BITS_ARGS_2((Src), (Unit)))) { goto badarg; } \
  799. } while (0)
  800. #define IsPort(Src, Fail) if (is_not_port(Src)) { Fail; }
  801. #define IsPid(Src, Fail) if (is_not_pid(Src)) { Fail; }
  802. #define IsRef(Src, Fail) if (is_not_ref(Src)) { Fail; }
  803. static BifFunction translate_gc_bif(void* gcf);
  804. static Eterm* handle_error(Process* c_p, Eterm* pc, Eterm* reg, BifFunction bf);
  805. static Eterm* next_catch(Process* c_p, Eterm *reg);
  806. static void terminate_proc(Process* c_p, Eterm Value);
  807. static Eterm add_stacktrace(Process* c_p, Eterm Value, Eterm exc);
  808. static void save_stacktrace(Process* c_p, Eterm* pc, Eterm* reg,
  809. BifFunction bf, Eterm args);
  810. static struct StackTrace * get_trace_from_exc(Eterm exc);
  811. static Eterm make_arglist(Process* c_p, Eterm* reg, int a);
  812. static Eterm call_error_handler(Process* p, Eterm* ip, Eterm* reg);
  813. static Eterm call_breakpoint_handler(Process* p, Eterm* fi, Eterm* reg);
  814. static Uint* fixed_apply(Process* p, Eterm* reg, Uint arity);
  815. static Eterm* apply(Process* p, Eterm module, Eterm function,
  816. Eterm args, Eterm* reg);
  817. static int hibernate(Process* c_p, Eterm module, Eterm function,
  818. Eterm args, Eterm* reg);
  819. static Eterm* call_fun(Process* p, int arity, Eterm* reg, Eterm args);
  820. static Eterm* apply_fun(Process* p, Eterm fun, Eterm args, Eterm* reg);
  821. static Eterm new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free);
  822. #if defined(_OSE_) || defined(VXWORKS)
  823. static int init_done;
  824. #endif
  825. void
  826. init_emulator(void)
  827. {
  828. #if defined(_OSE_) || defined(VXWORKS)
  829. init_done = 0;
  830. #endif
  831. process_main();
  832. }
  833. /*
  834. * On certain platforms, make sure that the main variables really are placed
  835. * in registers.
  836. */
  837. #if defined(__GNUC__) && defined(sparc) && !defined(DEBUG)
  838. # define REG_x0 asm("%l0")
  839. # define REG_xregs asm("%l1")
  840. # define REG_htop asm("%l2")
  841. # define REG_stop asm("%l3")
  842. # define REG_I asm("%l4")
  843. # define REG_fcalls asm("%l5")
  844. # define REG_tmp_arg1 asm("%l6")
  845. # define REG_tmp_arg2 asm("%l7")
  846. #else
  847. # define REG_x0
  848. # define REG_xregs
  849. # define REG_htop
  850. # define REG_stop
  851. # define REG_I
  852. # define REG_fcalls
  853. # define REG_tmp_arg1
  854. # define REG_tmp_arg2
  855. #endif
  856. /*
  857. * process_main() is called twice:
  858. * The first call performs some initialisation, including exporting
  859. * the instructions' C labels to the loader.
  860. * The second call starts execution of BEAM code. This call never returns.
  861. */
  862. void process_main(void)
  863. {
  864. #if !defined(_OSE_) && !defined(VXWORKS)
  865. static int init_done = 0;
  866. #endif
  867. Process* c_p = NULL;
  868. int reds_used;
  869. /*
  870. * X register zero; also called r(0)
  871. */
  872. register Eterm x0 REG_x0 = NIL;
  873. /* Pointer to X registers: x(1)..x(N); reg[0] is used when doing GC,
  874. * in all other cases x0 is used.
  875. */
  876. register Eterm* reg REG_xregs = NULL;
  877. /*
  878. * Top of heap (next free location); grows upwards.
  879. */
  880. register Eterm* HTOP REG_htop = NULL;
  881. #ifdef HYBRID
  882. Eterm *g_htop;
  883. Eterm *g_hend;
  884. #endif
  885. /* Stack pointer. Grows downwards; points
  886. * to last item pushed (normally a saved
  887. * continuation pointer).
  888. */
  889. register Eterm* E REG_stop = NULL;
  890. /*
  891. * Pointer to next threaded instruction.
  892. */
  893. register Eterm *I REG_I = NULL;
  894. /* Number of reductions left. This function
  895. * returns to the scheduler when FCALLS reaches zero.
  896. */
  897. register Sint FCALLS REG_fcalls = 0;
  898. /*
  899. * Temporaries used for picking up arguments for instructions.
  900. */
  901. register Eterm tmp_arg1 REG_tmp_arg1 = NIL;
  902. register Eterm tmp_arg2 REG_tmp_arg2 = NIL;
  903. Eterm tmp_big[2]; /* Temporary buffer for small bignums. */
  904. #ifndef ERTS_SMP
  905. static Eterm save_reg[ERTS_X_REGS_ALLOCATED];
  906. /* X registers -- not used directly, but
  907. * through 'reg', because using it directly
  908. * needs two instructions on a SPARC,
  909. * while using it through reg needs only
  910. * one.
  911. */
  912. /*
  913. * Floating point registers.
  914. */
  915. static FloatDef freg[MAX_REG];
  916. #else
  917. /* X regisers and floating point registers are located in
  918. * scheduler specific data.
  919. */
  920. register FloatDef *freg;
  921. #endif
  922. /*
  923. * For keeping the negative old value of 'reds' when call saving is active.
  924. */
  925. int neg_o_reds = 0;
  926. Eterm (*arith_func)(Process* p, Eterm* reg, Uint live);
  927. #ifndef NO_JUMP_TABLE
  928. static void* opcodes[] = { DEFINE_OPCODES };
  929. #ifdef ERTS_OPCODE_COUNTER_SUPPORT
  930. static void* counting_opcodes[] = { DEFINE_COUNTING_OPCODES };
  931. #endif
  932. #else
  933. int Go;
  934. #endif
  935. Uint temp_bits; /* Temporary used by BsSkipBits2 & BsGetInteger2 */
  936. ERL_BITS_DECLARE_STATEP; /* Has to be last declaration */
  937. /*
  938. * Note: In this function, we attempt to place rarely executed code towards
  939. * the end of the function, in the hope that the cache hit rate will be better.
  940. * The initialization code is only run once, so it is at the very end.
  941. *
  942. * Note: c_p->arity must be set to reflect the number of useful terms in
  943. * c_p->arg_reg before calling the scheduler.
  944. */
  945. if (!init_done) {
  946. init_done = 1;
  947. goto init_emulator;
  948. }
  949. #ifndef ERTS_SMP
  950. reg = save_reg; /* XXX: probably wastes a register on x86 */
  951. #endif
  952. c_p = NULL;
  953. reds_used = 0;
  954. goto do_schedule1;
  955. do_schedule:
  956. reds_used = REDS_IN(c_p) - FCALLS;
  957. do_schedule1:
  958. PROCESS_MAIN_CHK_LOCKS(c_p);
  959. ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
  960. c_p = schedule(c_p, reds_used);
  961. ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
  962. PROCESS_MAIN_CHK_LOCKS(c_p);
  963. #ifdef ERTS_SMP
  964. reg = c_p->scheduler_data->save_reg;
  965. freg = c_p->scheduler_data->freg;
  966. #endif
  967. ERL_BITS_RELOAD_STATEP(c_p);
  968. {
  969. int reds;
  970. Eterm* argp;
  971. Eterm* next;
  972. int i;
  973. argp = c_p->arg_reg;
  974. for (i = c_p->arity - 1; i > 0; i--) {
  975. reg[i] = argp[i];
  976. CHECK_TERM(reg[i]);
  977. }
  978. /*
  979. * We put the original reduction count in the process structure, to reduce
  980. * the code size (referencing a field in a struct through a pointer stored
  981. * in a register gives smaller code than referencing a global variable).
  982. */
  983. SET_I(c_p->i);
  984. reds = c_p->fcalls;
  985. if (c_p->ct != NULL && (c_p->trace_flags & F_SENSITIVE) == 0) {
  986. neg_o_reds = -reds;
  987. FCALLS = REDS_IN(c_p) = 0;
  988. } else {
  989. neg_o_reds = 0;
  990. FCALLS = REDS_IN(c_p) = reds;
  991. }
  992. next = (Eterm *) *I;
  993. r(0) = c_p->arg_reg[0];
  994. #ifdef HARDDEBUG
  995. if (c_p->arity > 0) {
  996. CHECK_TERM(r(0));
  997. }
  998. #endif
  999. SWAPIN;
  1000. ASSERT(VALID_INSTR(next));
  1001. Goto(next);
  1002. }
  1003. #if defined(DEBUG) || defined(NO_JUMP_TABLE)
  1004. emulator_loop:
  1005. #endif
  1006. #ifdef NO_JUMP_TABLE
  1007. switch (Go) {
  1008. #endif
  1009. #include "beam_hot.h"
  1010. #define STORE_ARITH_RESULT(res) StoreBifResult(2, (res));
  1011. #define ARITH_FUNC(name) erts_gc_##name
  1012. OpCase(i_plus_jId):
  1013. {
  1014. Eterm result;
  1015. if (is_both_small(tmp_arg1, tmp_arg2)) {
  1016. Sint i = signed_val(tmp_arg1) + signed_val(tmp_arg2);
  1017. ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i));
  1018. if (MY_IS_SSMALL(i)) {
  1019. result = make_small(i);
  1020. STORE_ARITH_RESULT(result);
  1021. }
  1022. }
  1023. arith_func = ARITH_FUNC(mixed_plus);
  1024. goto do_big_arith2;
  1025. }
  1026. OpCase(i_minus_jId):
  1027. {
  1028. Eterm result;
  1029. if (is_both_small(tmp_arg1, tmp_arg2)) {
  1030. Sint i = signed_val(tmp_arg1) - signed_val(tmp_arg2);
  1031. ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i));
  1032. if (MY_IS_SSMALL(i)) {
  1033. result = make_small(i);
  1034. STORE_ARITH_RESULT(result);
  1035. }
  1036. }
  1037. arith_func = ARITH_FUNC(mixed_minus);
  1038. goto do_big_arith2;
  1039. }
  1040. OpCase(i_is_lt_f):
  1041. if (CMP_GE(tmp_arg1, tmp_arg2)) {
  1042. ClauseFail();
  1043. }
  1044. Next(1);
  1045. OpCase(i_is_ge_f):
  1046. if (CMP_LT(tmp_arg1, tmp_arg2)) {
  1047. ClauseFail();
  1048. }
  1049. Next(1);
  1050. OpCase(i_is_eq_f):
  1051. if (CMP_NE(tmp_arg1, tmp_arg2)) {
  1052. ClauseFail();
  1053. }
  1054. Next(1);
  1055. OpCase(i_is_ne_f):
  1056. if (CMP_EQ(tmp_arg1, tmp_arg2)) {
  1057. ClauseFail();
  1058. }
  1059. Next(1);
  1060. OpCase(i_is_eq_exact_f):
  1061. if (!EQ(tmp_arg1, tmp_arg2)) {
  1062. ClauseFail();
  1063. }
  1064. Next(1);
  1065. OpCase(i_move_call_only_fcr): {
  1066. r(0) = Arg(1);
  1067. }
  1068. /* FALL THROUGH */
  1069. OpCase(i_call_only_f): {
  1070. SET_I((Eterm *) Arg(0));
  1071. Dispatch();
  1072. }
  1073. OpCase(i_move_call_last_fPcr): {
  1074. r(0) = Arg(2);
  1075. }
  1076. /* FALL THROUGH */
  1077. OpCase(i_call_last_fP): {
  1078. RESTORE_CP(E);
  1079. E = ADD_BYTE_OFFSET(E, Arg(1));
  1080. SET_I((Eterm *) Arg(0));
  1081. Dispatch();
  1082. }
  1083. OpCase(i_move_call_crf): {
  1084. r(0) = Arg(0);
  1085. I++;
  1086. }
  1087. /* FALL THROUGH */
  1088. OpCase(i_call_f): {
  1089. SET_CP(c_p, I+2);
  1090. SET_I((Eterm *) Arg(0));
  1091. Dispatch();
  1092. }
  1093. OpCase(i_move_call_ext_last_ePcr): {
  1094. r(0) = Arg(2);
  1095. }
  1096. /* FALL THROUGH */
  1097. OpCase(i_call_ext_last_eP):
  1098. RESTORE_CP(E);
  1099. E = ADD_BYTE_OFFSET(E, Arg(1));
  1100. /*
  1101. * Note: The pointer to the export entry is never NULL; if the module
  1102. * is not loaded, it points to code which will invoke the error handler
  1103. * (see lb_call_error_handler below).
  1104. */
  1105. Dispatchx();
  1106. OpCase(i_move_call_ext_cre): {
  1107. r(0) = Arg(0);
  1108. I++;
  1109. }
  1110. /* FALL THROUGH */
  1111. OpCase(i_call_ext_e):
  1112. SET_CP(c_p, I+2);
  1113. Dispatchx();
  1114. OpCase(i_move_call_ext_only_ecr): {
  1115. r(0) = Arg(1);
  1116. }
  1117. /* FALL THROUGH */
  1118. OpCase(i_call_ext_only_e):
  1119. Dispatchx();
  1120. OpCase(init_y): {
  1121. Eterm* next;
  1122. PreFetch(1, next);
  1123. make_blank(yb(Arg(0)));
  1124. NextPF(1, next);
  1125. }
  1126. OpCase(i_trim_I): {
  1127. Eterm* next;
  1128. Uint words;
  1129. Uint cp;
  1130. words = Arg(0);
  1131. cp = E[0];
  1132. PreFetch(1, next);
  1133. E += words;
  1134. E[0] = cp;
  1135. NextPF(1, next);
  1136. }
  1137. OpCase(return):
  1138. SET_I(c_p->cp);
  1139. CHECK_TERM(r(0));
  1140. Goto(*I);
  1141. OpCase(test_heap_1_put_list_Iy): {
  1142. Eterm* next;
  1143. PreFetch(2, next);
  1144. TestHeap(Arg(0), 1);
  1145. PutList(yb(Arg(1)), r(0), r(0), StoreSimpleDest);
  1146. CHECK_TERM(r(0));
  1147. NextPF(2, next);
  1148. }
  1149. OpCase(put_string_IId):
  1150. {
  1151. unsigned char* s;
  1152. int len;
  1153. Eterm result;
  1154. len = Arg(0); /* Length. */
  1155. result = NIL;
  1156. for (s = (unsigned char *) Arg(1); len > 0; s--, len--) {
  1157. PutList(make_small(*s), result, result, StoreSimpleDest);
  1158. }
  1159. StoreBifResult(2, result);
  1160. }
  1161. /*
  1162. * Send is almost a standard call-BIF with two arguments, except for:
  1163. * 1) It cannot be traced.
  1164. * 2) There is no pointer to the send_2 function stored in
  1165. * the instruction.
  1166. */
  1167. OpCase(send): {
  1168. Eterm* next;
  1169. Eterm result;
  1170. PRE_BIF_SWAPOUT(c_p);
  1171. c_p->fcalls = FCALLS - 1;
  1172. result = send_2(c_p, r(0), x(1));
  1173. PreFetch(0, next);
  1174. POST_BIF_GC_SWAPIN(c_p, result, reg, 2);
  1175. FCALLS = c_p->fcalls;
  1176. if (is_value(result)) {
  1177. r(0) = result;
  1178. CHECK_TERM(r(0));
  1179. NextPF(0, next);
  1180. } else if (c_p->freason == RESCHEDULE) {
  1181. Eterm* argp;
  1182. c_p->arity = 2;
  1183. /*
  1184. * Moving c_p->arg to a register is shorter than using c_p->arg_reg
  1185. * directly, since c_p->arg_reg is a pointer (not an array)
  1186. * and the compiler generates code to fetch the pointer every time.
  1187. */
  1188. argp = c_p->arg_reg;
  1189. argp[0] = r(0);
  1190. argp[1] = x(1);
  1191. SWAPOUT;
  1192. c_p->i = I;
  1193. c_p->current = NULL;
  1194. goto do_schedule;
  1195. } else if (c_p->freason == TRAP) {
  1196. SET_CP(c_p, I+1);
  1197. SET_I(((Export *)(c_p->def_arg_reg[3]))->address);
  1198. SWAPIN;
  1199. r(0) = c_p->def_arg_reg[0];
  1200. x(1) = c_p->def_arg_reg[1];
  1201. Dispatch();
  1202. }
  1203. goto find_func_info;
  1204. }
  1205. OpCase(i_element_jssd): {
  1206. Eterm index;
  1207. Eterm tuple;
  1208. /*
  1209. * Inlined version of element/2 for speed.
  1210. */
  1211. GetArg2(1, index, tuple);
  1212. if (is_small(index) && is_tuple(tuple)) {
  1213. Eterm* tp = tuple_val(tuple);
  1214. if ((signed_val(index) >= 1) &&
  1215. (signed_val(index) <= arityval(*tp))) {
  1216. Eterm result = tp[signed_val(index)];
  1217. StoreBifResult(3, result);
  1218. }
  1219. }
  1220. }
  1221. /* Fall through */
  1222. OpCase(badarg_j):
  1223. badarg:
  1224. c_p->freason = BADARG;
  1225. goto lb_Cl_error;
  1226. OpCase(i_fast_element_jIsd): {
  1227. Eterm tuple;
  1228. /*
  1229. * Inlined version of element/2 for even more speed.
  1230. * The first argument is an untagged integer >= 1.
  1231. * The second argument is guaranteed to be a register operand.
  1232. */
  1233. GetArg1(2, tuple);
  1234. if (is_tuple(tuple)) {
  1235. Eterm* tp = tuple_val(tuple);
  1236. tmp_arg2 = Arg(1);
  1237. if (tmp_arg2 <= arityval(*tp)) {
  1238. Eterm result = tp[tmp_arg2];
  1239. StoreBifResult(3, result);
  1240. }
  1241. }
  1242. goto badarg;
  1243. }
  1244. OpCase(catch_yf):
  1245. c_p->catches++;
  1246. yb(Arg(0)) = Arg(1);
  1247. Next(2);
  1248. OpCase(catch_end_y): {
  1249. c_p->catches--;
  1250. make_blank(yb(Arg(0)));
  1251. if (is_non_value(r(0))) {
  1252. if (x(1) == am_throw) {
  1253. r(0) = x(2);
  1254. } else {
  1255. if (x(1) == am_error) {
  1256. SWAPOUT;
  1257. x(2) = add_stacktrace(c_p, x(2), x(3));
  1258. SWAPIN;
  1259. }
  1260. /* only x(2) is included in the rootset here */
  1261. if (E - HTOP < 3 || c_p->mbuf) { /* Force GC in case add_stacktrace()
  1262. * created heap fragments */
  1263. SWAPOUT;
  1264. PROCESS_MAIN_CHK_LOCKS(c_p);
  1265. FCALLS -= erts_garbage_collect(c_p, 3, reg+2, 1);
  1266. PROCESS_MAIN_CHK_LOCKS(c_p);
  1267. SWAPIN;
  1268. }
  1269. r(0) = TUPLE2(HTOP, am_EXIT, x(2));
  1270. HTOP += 3;
  1271. }
  1272. }
  1273. CHECK_TERM(r(0));
  1274. Next(1);
  1275. }
  1276. OpCase(try_end_y): {
  1277. c_p->catches--;
  1278. make_blank(yb(Arg(0)));
  1279. if (is_non_value(r(0))) {
  1280. r(0) = x(1);
  1281. x(1) = x(2);
  1282. x(2) = x(3);
  1283. }
  1284. Next(1);
  1285. }
  1286. /*
  1287. * Skeleton for receive statement:
  1288. *
  1289. * L1: <-------------------+
  1290. * <-----------+ |
  1291. * | |
  1292. * loop_rec L2 ------+---+ |
  1293. * ... | | |
  1294. * remove_message | | |
  1295. * jump L3 | | |
  1296. * ... | | |
  1297. * loop_rec_end L1 --+ | |
  1298. * L2: <---------------+ |
  1299. * wait L1 -----------------+ or wait_timeout
  1300. * timeout
  1301. *
  1302. * L3: Code after receive...
  1303. *
  1304. *
  1305. */
  1306. /*
  1307. * Pick up the next message and place it in x(0).
  1308. * If no message, jump to a wait or wait_timeout instruction.
  1309. */
  1310. OpCase(i_loop_rec_fr):
  1311. {
  1312. Eterm* next;
  1313. ErlMessage* msgp;
  1314. loop_rec__:
  1315. PROCESS_MAIN_CHK_LOCKS(c_p);
  1316. msgp = PEEK_MESSAGE(c_p);
  1317. if (!msgp) {
  1318. #ifdef ERTS_SMP
  1319. erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1320. /* Make sure messages wont pass exit signals... */
  1321. if (ERTS_PROC_PENDING_EXIT(c_p)) {
  1322. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1323. SWAPOUT;
  1324. goto do_schedule; /* Will be rescheduled for exit */
  1325. }
  1326. ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
  1327. msgp = PEEK_MESSAGE(c_p);
  1328. if (msgp)
  1329. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1330. else {
  1331. #endif
  1332. SET_I((Eterm *) Arg(0));
  1333. Goto(*I); /* Jump to a wait or wait_timeout instruction */
  1334. #ifdef ERTS_SMP
  1335. }
  1336. #endif
  1337. }
  1338. MV_MSG_MBUF_INTO_PROC(msgp);
  1339. PreFetch(1, next);
  1340. r(0) = ERL_MESSAGE_TERM(msgp);
  1341. NextPF(1, next);
  1342. }
  1343. /*
  1344. * Remove a (matched) message from the message queue.
  1345. */
  1346. OpCase(remove_message): {
  1347. Eterm* next;
  1348. ErlMessage* msgp;
  1349. PROCESS_MAIN_CHK_LOCKS(c_p);
  1350. PreFetch(0, next);
  1351. msgp = PEEK_MESSAGE(c_p);
  1352. if (c_p->ct != NULL) {
  1353. save_calls(c_p, &exp_receive);
  1354. }
  1355. if (ERL_MESSAGE_TOKEN(msgp) == NIL) {
  1356. SEQ_TRACE_TOKEN(c_p) = NIL;
  1357. } else if (ERL_MESSAGE_TOKEN(msgp) != am_undefined) {
  1358. Eterm msg;
  1359. SEQ_TRACE_TOKEN(c_p) = ERL_MESSAGE_TOKEN(msgp);
  1360. ASSERT(is_tuple(SEQ_TRACE_TOKEN(c_p)));
  1361. ASSERT(SEQ_TRACE_TOKEN_ARITY(c_p) == 5);
  1362. ASSERT(is_small(SEQ_TRACE_TOKEN_SERIAL(c_p)));
  1363. ASSERT(is_small(SEQ_TRACE_TOKEN_LASTCNT(c_p)));
  1364. ASSERT(is_small(SEQ_TRACE_TOKEN_FLAGS(c_p)));
  1365. ASSERT(is_pid(SEQ_TRACE_TOKEN_SENDER(c_p)));
  1366. c_p->seq_trace_lastcnt = unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p));
  1367. if (c_p->seq_trace_clock < unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p))) {
  1368. c_p->seq_trace_clock = unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p));
  1369. }
  1370. msg = ERL_MESSAGE_TERM(msgp);
  1371. seq_trace_output(SEQ_TRACE_TOKEN(c_p), msg, SEQ_TRACE_RECEIVE,
  1372. c_p->id, c_p);
  1373. }
  1374. UNLINK_MESSAGE(c_p, msgp);
  1375. JOIN_MESSAGE(c_p);
  1376. CANCEL_TIMER(c_p);
  1377. free_message(msgp);
  1378. PROCESS_MAIN_CHK_LOCKS(c_p);
  1379. NextPF(0, next);
  1380. }
  1381. /*
  1382. * Advance the save pointer to the next message (the current
  1383. * message didn't match), then jump to the loop_rec instruction.
  1384. */
  1385. OpCase(loop_rec_end_f): {
  1386. SET_I((Eterm *) Arg(0));
  1387. SAVE_MESSAGE(c_p);
  1388. goto loop_rec__;
  1389. }
  1390. /*
  1391. * Prepare to wait for a message or a timeout, whichever occurs first.
  1392. *
  1393. * Note: In order to keep the compatibility between 32 and 64 bits
  1394. * emulators, only timeout values that can be represented in 32 bits
  1395. * (unsigned) or less are allowed.
  1396. */
  1397. OpCase(i_wait_timeout_fs): {
  1398. erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1399. /* Fall through */
  1400. }
  1401. OpCase(i_wait_timeout_locked_fs): {
  1402. Eterm timeout_value;
  1403. /*
  1404. * If we have already set the timer, we must NOT set it again. Therefore,
  1405. * we must test the F_INSLPQUEUE flag as well as the F_TIMO flag.
  1406. */
  1407. if (c_p->flags & (F_INSLPQUEUE | F_TIMO)) {
  1408. goto wait2;
  1409. }
  1410. GetArg1(1, timeout_value);
  1411. if (timeout_value != make_small(0)) {
  1412. #if !defined(ARCH_64)
  1413. Uint time_val;
  1414. #endif
  1415. if (is_small(timeout_value) && signed_val(timeout_value) > 0 &&
  1416. #if defined(ARCH_64)
  1417. ((unsigned_val(timeout_value) >> 32) == 0)
  1418. #else
  1419. 1
  1420. #endif
  1421. ) {
  1422. /*
  1423. * The timer routiner will set c_p->i to the value in
  1424. * c_p->def_arg_reg[0]. Note that it is safe to use this
  1425. * location because there are no living x registers in
  1426. * a receive statement.
  1427. */
  1428. c_p->def_arg_reg[0] = (Eterm) (I+3);
  1429. set_timer(c_p, unsigned_val(timeout_value));
  1430. } else if (timeout_value == am_infinity) {
  1431. c_p->flags |= F_TIMO;
  1432. #if !defined(ARCH_64)
  1433. } else if (term_to_Uint(timeout_value, &time_val)) {
  1434. c_p->def_arg_reg[0] = (Eterm) (I+3);
  1435. set_timer(c_p, time_val);
  1436. #endif
  1437. } else { /* Wrong time */
  1438. OpCase(i_wait_error_locked): {
  1439. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1440. /* Fall through */
  1441. }
  1442. OpCase(i_wait_error): {
  1443. c_p->freason = EXC_TIMEOUT_VALUE;
  1444. goto find_func_info;
  1445. }
  1446. }
  1447. /*
  1448. * Prepare to wait indefinitely for a new message to arrive
  1449. * (or the time set above if falling through from above).
  1450. *
  1451. * When a new message arrives, control will be transferred
  1452. * the loop_rec instruction (at label L1). In case of
  1453. * of timeout, control will be transferred to the timeout
  1454. * instruction following the wait_timeout instruction.
  1455. */
  1456. OpCase(wait_locked_f):
  1457. OpCase(wait_f):
  1458. wait2: {
  1459. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  1460. c_p->i = (Eterm *) Arg(0); /* L1 */
  1461. SWAPOUT;
  1462. c_p->arity = 0;
  1463. c_p->status = P_WAITING;
  1464. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1465. c_p->current = NULL;
  1466. goto do_schedule;
  1467. }
  1468. OpCase(wait_unlocked_f): {
  1469. erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1470. goto wait2;
  1471. }
  1472. }
  1473. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1474. Next(2);
  1475. }
  1476. OpCase(i_wait_timeout_fI): {
  1477. erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1478. }
  1479. OpCase(i_wait_timeout_locked_fI):
  1480. {
  1481. /*
  1482. * If we have already set the timer, we must NOT set it again. Therefore,
  1483. * we must test the F_INSLPQUEUE flag as well as the F_TIMO flag.
  1484. */
  1485. if ((c_p->flags & (F_INSLPQUEUE | F_TIMO)) == 0) {
  1486. c_p->def_arg_reg[0] = (Eterm) (I+3);
  1487. set_timer(c_p, Arg(1));
  1488. }
  1489. goto wait2;
  1490. }
  1491. /*
  1492. * A timeout has occurred. Reset the save pointer so that the next
  1493. * receive statement will examine the first message first.
  1494. */
  1495. OpCase(timeout_locked): {
  1496. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1497. }
  1498. OpCase(timeout): {
  1499. Eterm* next;
  1500. PreFetch(0, next);
  1501. if (IS_TRACED_FL(c_p, F_TRACE_RECEIVE)) {
  1502. trace_receive(c_p, am_timeout);
  1503. }
  1504. if (c_p->ct != NULL) {
  1505. save_calls(c_p, &exp_timeout);
  1506. }
  1507. c_p->flags &= ~F_TIMO;
  1508. JOIN_MESSAGE(c_p);
  1509. NextPF(0, next);
  1510. }
  1511. OpCase(i_select_val_sfI):
  1512. GetArg1(0, tmp_arg1);
  1513. do_binary_search:
  1514. {
  1515. struct Pairs {
  1516. Eterm val;
  1517. Eterm* addr;
  1518. };
  1519. struct Pairs* low;
  1520. struct Pairs* high;
  1521. struct Pairs* mid;
  1522. int bdiff; /* int not long because the arrays aren't that large */
  1523. low = (struct Pairs *) &Arg(3);
  1524. high = low + Arg(2);
  1525. /* The pointer subtraction (high-low) below must produce
  1526. * a signed result, because high could be < low. That
  1527. * requires the compiler to insert quite a bit of code.
  1528. *
  1529. * However, high will be > low so the result will be
  1530. * positive. We can use that knowledge to optimise the
  1531. * entire sequence, from the initial com…

Large files files are truncated, but you can click here to view the full file