PageRenderTime 72ms CodeModel.GetById 23ms RepoModel.GetById 1ms app.codeStats 1ms

/erts/emulator/beam/beam_emu.c

https://github.com/mortior/otp
C | 6388 lines | 4735 code | 676 blank | 977 comment | 908 complexity | 4997f2060c17a19d8c40913d9c3c4f1f MD5 | raw file
Possible License(s): MPL-2.0-no-copyleft-exception, BSD-2-Clause

Large files files are truncated, but you can click here to view the full file

  1. /*
  2. * %CopyrightBegin%
  3. *
  4. * Copyright Ericsson AB 1996-2010. All Rights Reserved.
  5. *
  6. * The contents of this file are subject to the Erlang Public License,
  7. * Version 1.1, (the "License"); you may not use this file except in
  8. * compliance with the License. You should have received a copy of the
  9. * Erlang Public License along with this software. If not, it can be
  10. * retrieved online at http://www.erlang.org/.
  11. *
  12. * Software distributed under the License is distributed on an "AS IS"
  13. * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
  14. * the License for the specific language governing rights and limitations
  15. * under the License.
  16. *
  17. * %CopyrightEnd%
  18. */
  19. #ifdef HAVE_CONFIG_H
  20. # include "config.h"
  21. #endif
  22. #include <stddef.h> /* offsetof() */
  23. #include "sys.h"
  24. #include "erl_vm.h"
  25. #include "global.h"
  26. #include "erl_process.h"
  27. #include "erl_nmgc.h"
  28. #include "error.h"
  29. #include "bif.h"
  30. #include "big.h"
  31. #include "beam_load.h"
  32. #include "erl_binary.h"
  33. #include "erl_bits.h"
  34. #include "dist.h"
  35. #include "beam_bp.h"
  36. #include "beam_catches.h"
  37. #ifdef HIPE
  38. #include "hipe_mode_switch.h"
  39. #include "hipe_bif1.h"
  40. #endif
  41. /* #define HARDDEBUG 1 */
  42. #if defined(NO_JUMP_TABLE)
  43. # define OpCase(OpCode) case op_##OpCode: lb_##OpCode
  44. # define CountCase(OpCode) case op_count_##OpCode
  45. # define OpCode(OpCode) ((Uint*)op_##OpCode)
  46. # define Goto(Rel) {Go = (int)(Rel); goto emulator_loop;}
  47. # define LabelAddr(Addr) &&##Addr
  48. #else
  49. # define OpCase(OpCode) lb_##OpCode
  50. # define CountCase(OpCode) lb_count_##OpCode
  51. # define Goto(Rel) goto *(Rel)
  52. # define LabelAddr(Label) &&Label
  53. # define OpCode(OpCode) (&&lb_##OpCode)
  54. #endif
  55. #ifdef ERTS_ENABLE_LOCK_CHECK
  56. # ifdef ERTS_SMP
  57. # define PROCESS_MAIN_CHK_LOCKS(P) \
  58. do { \
  59. if ((P)) { \
  60. erts_pix_lock_t *pix_lock__ = ERTS_PIX2PIXLOCK(internal_pid_index((P)->id));\
  61. erts_proc_lc_chk_only_proc_main((P)); \
  62. erts_pix_lock(pix_lock__); \
  63. ASSERT(0 < (P)->lock.refc && (P)->lock.refc < erts_no_schedulers*5);\
  64. erts_pix_unlock(pix_lock__); \
  65. } \
  66. else \
  67. erts_lc_check_exact(NULL, 0); \
  68. ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING); \
  69. } while (0)
  70. # define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
  71. if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN)
  72. # define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \
  73. if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN)
  74. # else
  75. # define ERTS_SMP_REQ_PROC_MAIN_LOCK(P)
  76. # define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P)
  77. # define PROCESS_MAIN_CHK_LOCKS(P) erts_lc_check_exact(NULL, 0)
  78. # endif
  79. #else
  80. # define PROCESS_MAIN_CHK_LOCKS(P)
  81. # define ERTS_SMP_REQ_PROC_MAIN_LOCK(P)
  82. # define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P)
  83. #endif
  84. /*
  85. * Define macros for deep checking of terms.
  86. */
  87. #if defined(HARDDEBUG)
  88. # define CHECK_TERM(T) size_object(T)
  89. # define CHECK_ARGS(PC) \
  90. do { \
  91. int i_; \
  92. int Arity_ = PC[-1]; \
  93. if (Arity_ > 0) { \
  94. CHECK_TERM(r(0)); \
  95. } \
  96. for (i_ = 1; i_ < Arity_; i_++) { \
  97. CHECK_TERM(x(i_)); \
  98. } \
  99. } while (0)
  100. #else
  101. # define CHECK_TERM(T) ASSERT(!is_CP(T))
  102. # define CHECK_ARGS(T)
  103. #endif
  104. #ifndef MAX
  105. #define MAX(x, y) (((x) > (y)) ? (x) : (y))
  106. #endif
  107. #define GET_BIF_ADDRESS(p) ((BifFunction) (((Export *) p)->code[4]))
  108. #define TermWords(t) (((t) / (sizeof(BeamInstr)/sizeof(Eterm))) + !!((t) % (sizeof(BeamInstr)/sizeof(Eterm))))
  109. /*
  110. * We reuse some of fields in the save area in the process structure.
  111. * This is safe to do, since this space is only activly used when
  112. * the process is switched out.
  113. */
  114. #define REDS_IN(p) ((p)->def_arg_reg[5])
  115. /*
  116. * Add a byte offset to a pointer to Eterm. This is useful when the
  117. * the loader has precalculated a byte offset.
  118. */
  119. #define ADD_BYTE_OFFSET(ptr, offset) \
  120. ((Eterm *) (((unsigned char *)ptr) + (offset)))
  121. /* We don't check the range if an ordinary switch is used */
  122. #ifdef NO_JUMP_TABLE
  123. #define VALID_INSTR(IP) (0 <= (int)(IP) && ((int)(IP) < (NUMBER_OF_OPCODES*2+10)))
  124. #else
  125. #define VALID_INSTR(IP) \
  126. ((SWord)LabelAddr(emulator_loop) <= (SWord)(IP) && \
  127. (SWord)(IP) < (SWord)LabelAddr(end_emulator_loop))
  128. #endif /* NO_JUMP_TABLE */
  129. #define SET_CP(p, ip) \
  130. ASSERT(VALID_INSTR(*(ip))); \
  131. (p)->cp = (ip)
  132. #define SET_I(ip) \
  133. ASSERT(VALID_INSTR(* (Eterm *)(ip))); \
  134. I = (ip)
  135. #define FetchArgs(S1, S2) tmp_arg1 = (S1); tmp_arg2 = (S2)
  136. /*
  137. * Store a result into a register given a destination descriptor.
  138. */
  139. #define StoreResult(Result, DestDesc) \
  140. do { \
  141. Eterm stb_reg; \
  142. stb_reg = (DestDesc); \
  143. CHECK_TERM(Result); \
  144. switch (beam_reg_tag(stb_reg)) { \
  145. case R_REG_DEF: \
  146. r(0) = (Result); break; \
  147. case X_REG_DEF: \
  148. xb(x_reg_offset(stb_reg)) = (Result); break; \
  149. default: \
  150. yb(y_reg_offset(stb_reg)) = (Result); break; \
  151. } \
  152. } while (0)
  153. #define StoreSimpleDest(Src, Dest) Dest = (Src)
  154. /*
  155. * Store a result into a register and execute the next instruction.
  156. * Dst points to the word with a destination descriptor, which MUST
  157. * be just before the next instruction.
  158. */
  159. #define StoreBifResult(Dst, Result) \
  160. do { \
  161. BeamInstr* stb_next; \
  162. Eterm stb_reg; \
  163. stb_reg = Arg(Dst); \
  164. I += (Dst) + 2; \
  165. stb_next = (BeamInstr *) *I; \
  166. CHECK_TERM(Result); \
  167. switch (beam_reg_tag(stb_reg)) { \
  168. case R_REG_DEF: \
  169. r(0) = (Result); Goto(stb_next); \
  170. case X_REG_DEF: \
  171. xb(x_reg_offset(stb_reg)) = (Result); Goto(stb_next); \
  172. default: \
  173. yb(y_reg_offset(stb_reg)) = (Result); Goto(stb_next); \
  174. } \
  175. } while (0)
  176. #define ClauseFail() goto lb_jump_f
  177. #define SAVE_CP(X) \
  178. do { \
  179. *(X) = make_cp(c_p->cp); \
  180. c_p->cp = 0; \
  181. } while(0)
  182. #define RESTORE_CP(X) SET_CP(c_p, (BeamInstr *) cp_val(*(X)))
  183. #define ISCATCHEND(instr) ((Eterm *) *(instr) == OpCode(catch_end_y))
  184. /*
  185. * Special Beam instructions.
  186. */
  187. BeamInstr beam_apply[2];
  188. BeamInstr beam_exit[1];
  189. BeamInstr beam_continue_exit[1];
  190. BeamInstr* em_call_error_handler;
  191. BeamInstr* em_apply_bif;
  192. BeamInstr* em_call_traced_function;
  193. /* NOTE These should be the only variables containing trace instructions.
  194. ** Sometimes tests are form the instruction value, and sometimes
  195. ** for the refering variable (one of these), and rouge references
  196. ** will most likely cause chaos.
  197. */
  198. BeamInstr beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
  199. BeamInstr beam_return_trace[1]; /* OpCode(i_return_trace) */
  200. BeamInstr beam_exception_trace[1]; /* UGLY also OpCode(i_return_trace) */
  201. BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
  202. /*
  203. * All Beam instructions in numerical order.
  204. */
  205. #ifndef NO_JUMP_TABLE
  206. void** beam_ops;
  207. #endif
  208. #ifndef ERTS_SMP /* Not supported with smp emulator */
  209. extern int count_instructions;
  210. #endif
  211. #if defined(HYBRID)
  212. #define SWAPIN \
  213. g_htop = global_htop; \
  214. g_hend = global_hend; \
  215. HTOP = HEAP_TOP(c_p); \
  216. E = c_p->stop
  217. #define SWAPOUT \
  218. global_htop = g_htop; \
  219. global_hend = g_hend; \
  220. HEAP_TOP(c_p) = HTOP; \
  221. c_p->stop = E
  222. #else
  223. #define SWAPIN \
  224. HTOP = HEAP_TOP(c_p); \
  225. E = c_p->stop
  226. #define SWAPOUT \
  227. HEAP_TOP(c_p) = HTOP; \
  228. c_p->stop = E
  229. /*
  230. * Use LIGHT_SWAPOUT when the called function
  231. * will call HeapOnlyAlloc() (and never HAlloc()).
  232. */
  233. #ifdef DEBUG
  234. # /* The stack pointer is used in an assertion. */
  235. # define LIGHT_SWAPOUT SWAPOUT
  236. #else
  237. # define LIGHT_SWAPOUT HEAP_TOP(c_p) = HTOP
  238. #endif
  239. /*
  240. * Use LIGHT_SWAPIN when we know that c_p->stop cannot
  241. * have been updated (i.e. if there cannot have been
  242. * a garbage-collection).
  243. */
  244. #define LIGHT_SWAPIN HTOP = HEAP_TOP(c_p)
  245. #endif
  246. #ifdef FORCE_HEAP_FRAGS
  247. # define HEAP_SPACE_VERIFIED(Words) do { \
  248. c_p->space_verified = (Words); \
  249. c_p->space_verified_from = HTOP; \
  250. }while(0)
  251. #else
  252. # define HEAP_SPACE_VERIFIED(Words) ((void)0)
  253. #endif
  254. #define PRE_BIF_SWAPOUT(P) \
  255. HEAP_TOP((P)) = HTOP; \
  256. (P)->stop = E; \
  257. PROCESS_MAIN_CHK_LOCKS((P)); \
  258. ERTS_SMP_UNREQ_PROC_MAIN_LOCK((P))
  259. #if defined(HYBRID)
  260. # define POST_BIF_GC_SWAPIN_0(_p, _res) \
  261. if (((_p)->mbuf) || (MSO(_p).overhead >= BIN_VHEAP_SZ(_p)) ) { \
  262. _res = erts_gc_after_bif_call((_p), (_res), NULL, 0); \
  263. } \
  264. SWAPIN
  265. # define POST_BIF_GC_SWAPIN(_p, _res, _regs, _arity) \
  266. if (((_p)->mbuf) || (MSO(_p).overhead >= BIN_VHEAP_SZ(_p)) ) { \
  267. _regs[0] = r(0); \
  268. _res = erts_gc_after_bif_call((_p), (_res), _regs, (_arity)); \
  269. r(0) = _regs[0]; \
  270. } \
  271. SWAPIN
  272. #else
  273. # define POST_BIF_GC_SWAPIN_0(_p, _res) \
  274. ERTS_SMP_REQ_PROC_MAIN_LOCK((_p)); \
  275. PROCESS_MAIN_CHK_LOCKS((_p)); \
  276. if (((_p)->mbuf) || (MSO(_p).overhead >= BIN_VHEAP_SZ(_p)) ) { \
  277. _res = erts_gc_after_bif_call((_p), (_res), NULL, 0); \
  278. E = (_p)->stop; \
  279. } \
  280. HTOP = HEAP_TOP((_p))
  281. # define POST_BIF_GC_SWAPIN(_p, _res, _regs, _arity) \
  282. ERTS_SMP_REQ_PROC_MAIN_LOCK((_p)); \
  283. PROCESS_MAIN_CHK_LOCKS((_p)); \
  284. if (((_p)->mbuf) || (MSO(_p).overhead >= BIN_VHEAP_SZ(_p)) ) { \
  285. _regs[0] = r(0); \
  286. _res = erts_gc_after_bif_call((_p), (_res), _regs, (_arity)); \
  287. r(0) = _regs[0]; \
  288. E = (_p)->stop; \
  289. } \
  290. HTOP = HEAP_TOP((_p))
  291. #endif
  292. #define db(N) (N)
  293. #define tb(N) (N)
  294. #define xb(N) (*(Eterm *) (((unsigned char *)reg) + (N)))
  295. #define yb(N) (*(Eterm *) (((unsigned char *)E) + (N)))
  296. #define fb(N) (*(double *) (((unsigned char *)&(freg[0].fd)) + (N)))
  297. #define x(N) reg[N]
  298. #define y(N) E[N]
  299. #define r(N) x##N
  300. /*
  301. * Makes sure that there are StackNeed + HeapNeed + 1 words available
  302. * on the combined heap/stack segment, then allocates StackNeed + 1
  303. * words on the stack and saves CP.
  304. *
  305. * M is number of live registers to preserve during garbage collection
  306. */
  307. #define AH(StackNeed, HeapNeed, M) \
  308. do { \
  309. int needed; \
  310. needed = (StackNeed) + 1; \
  311. if (E - HTOP < (needed + (HeapNeed))) { \
  312. SWAPOUT; \
  313. reg[0] = r(0); \
  314. PROCESS_MAIN_CHK_LOCKS(c_p); \
  315. FCALLS -= erts_garbage_collect(c_p, needed + (HeapNeed), reg, (M)); \
  316. PROCESS_MAIN_CHK_LOCKS(c_p); \
  317. r(0) = reg[0]; \
  318. SWAPIN; \
  319. } \
  320. E -= needed; \
  321. SAVE_CP(E); \
  322. } while (0)
  323. #define Allocate(Ns, Live) AH(Ns, 0, Live)
  324. #define AllocateZero(Ns, Live) \
  325. do { Eterm* ptr; \
  326. int i = (Ns); \
  327. AH(i, 0, Live); \
  328. for (ptr = E + i; ptr > E; ptr--) { \
  329. make_blank(*ptr); \
  330. } \
  331. } while (0)
  332. #define AllocateHeap(Ns, Nh, Live) AH(Ns, Nh, Live)
  333. #define AllocateHeapZero(Ns, Nh, Live) \
  334. do { Eterm* ptr; \
  335. int i = (Ns); \
  336. AH(i, Nh, Live); \
  337. for (ptr = E + i; ptr > E; ptr--) { \
  338. make_blank(*ptr); \
  339. } \
  340. } while (0)
  341. #define AllocateInit(Ns, Live, Y) \
  342. do { AH(Ns, 0, Live); make_blank(Y); } while (0)
  343. /*
  344. * Like the AH macro, but allocates no additional heap space.
  345. */
  346. #define A(StackNeed, M) AH(StackNeed, 0, M)
  347. #define D(N) \
  348. RESTORE_CP(E); \
  349. E += (N) + 1;
  350. #define TestBinVHeap(VNh, Nh, Live) \
  351. do { \
  352. unsigned need = (Nh); \
  353. if ((E - HTOP < need) || (MSO(c_p).overhead + (VNh) >= BIN_VHEAP_SZ(c_p))) {\
  354. SWAPOUT; \
  355. reg[0] = r(0); \
  356. PROCESS_MAIN_CHK_LOCKS(c_p); \
  357. FCALLS -= erts_garbage_collect(c_p, need, reg, (Live)); \
  358. PROCESS_MAIN_CHK_LOCKS(c_p); \
  359. r(0) = reg[0]; \
  360. SWAPIN; \
  361. } \
  362. HEAP_SPACE_VERIFIED(need); \
  363. } while (0)
  364. /*
  365. * Check if Nh words of heap are available; if not, do a garbage collection.
  366. * Live is number of active argument registers to be preserved.
  367. */
  368. #define TestHeap(Nh, Live) \
  369. do { \
  370. unsigned need = (Nh); \
  371. if (E - HTOP < need) { \
  372. SWAPOUT; \
  373. reg[0] = r(0); \
  374. PROCESS_MAIN_CHK_LOCKS(c_p); \
  375. FCALLS -= erts_garbage_collect(c_p, need, reg, (Live)); \
  376. PROCESS_MAIN_CHK_LOCKS(c_p); \
  377. r(0) = reg[0]; \
  378. SWAPIN; \
  379. } \
  380. HEAP_SPACE_VERIFIED(need); \
  381. } while (0)
  382. /*
  383. * Check if Nh words of heap are available; if not, do a garbage collection.
  384. * Live is number of active argument registers to be preserved.
  385. * Takes special care to preserve Extra if a garbage collection occurs.
  386. */
  387. #define TestHeapPreserve(Nh, Live, Extra) \
  388. do { \
  389. unsigned need = (Nh); \
  390. if (E - HTOP < need) { \
  391. SWAPOUT; \
  392. reg[0] = r(0); \
  393. reg[Live] = Extra; \
  394. PROCESS_MAIN_CHK_LOCKS(c_p); \
  395. FCALLS -= erts_garbage_collect(c_p, need, reg, (Live)+1); \
  396. PROCESS_MAIN_CHK_LOCKS(c_p); \
  397. if (Live > 0) { \
  398. r(0) = reg[0]; \
  399. } \
  400. Extra = reg[Live]; \
  401. SWAPIN; \
  402. } \
  403. HEAP_SPACE_VERIFIED(need); \
  404. } while (0)
  405. #ifdef HYBRID
  406. #ifdef INCREMENTAL
  407. #define TestGlobalHeap(Nh, Live, hp) \
  408. do { \
  409. unsigned need = (Nh); \
  410. ASSERT(global_heap <= g_htop && g_htop <= global_hend); \
  411. SWAPOUT; \
  412. reg[0] = r(0); \
  413. FCALLS -= need; \
  414. (hp) = IncAlloc(c_p,need,reg,(Live)); \
  415. r(0) = reg[0]; \
  416. SWAPIN; \
  417. } while (0)
  418. #else
  419. #define TestGlobalHeap(Nh, Live, hp) \
  420. do { \
  421. unsigned need = (Nh); \
  422. ASSERT(global_heap <= g_htop && g_htop <= global_hend); \
  423. if (g_hend - g_htop < need) { \
  424. SWAPOUT; \
  425. reg[0] = r(0); \
  426. FCALLS -= erts_global_garbage_collect(c_p, need, reg, (Live)); \
  427. r(0) = reg[0]; \
  428. SWAPIN; \
  429. } \
  430. (hp) = global_htop; \
  431. } while (0)
  432. #endif
  433. #endif /* HYBRID */
  434. #define Init(N) make_blank(yb(N))
  435. #define Init2(Y1, Y2) do { make_blank(Y1); make_blank(Y2); } while (0)
  436. #define Init3(Y1, Y2, Y3) \
  437. do { make_blank(Y1); make_blank(Y2); make_blank(Y3); } while (0)
  438. #define MakeFun(FunP, NumFree) \
  439. do { \
  440. SWAPOUT; \
  441. reg[0] = r(0); \
  442. r(0) = new_fun(c_p, reg, (ErlFunEntry *) FunP, NumFree); \
  443. SWAPIN; \
  444. } while (0)
  445. /*
  446. * Check that we haven't used the reductions and jump to function pointed to by
  447. * the I register. If we are out of reductions, do a context switch.
  448. */
  449. #define DispatchMacro() \
  450. do { \
  451. BeamInstr* dis_next; \
  452. dis_next = (BeamInstr *) *I; \
  453. CHECK_ARGS(I); \
  454. if (FCALLS > 0 || FCALLS > neg_o_reds) { \
  455. FCALLS--; \
  456. Goto(dis_next); \
  457. } else { \
  458. goto context_switch; \
  459. } \
  460. } while (0)
  461. #define DispatchMacroFun() \
  462. do { \
  463. BeamInstr* dis_next; \
  464. dis_next = (BeamInstr *) *I; \
  465. CHECK_ARGS(I); \
  466. if (FCALLS > 0 || FCALLS > neg_o_reds) { \
  467. FCALLS--; \
  468. Goto(dis_next); \
  469. } else { \
  470. goto context_switch_fun; \
  471. } \
  472. } while (0)
  473. #define DispatchMacrox() \
  474. do { \
  475. if (FCALLS > 0) { \
  476. Eterm* dis_next; \
  477. SET_I(((Export *) Arg(0))->address); \
  478. dis_next = (Eterm *) *I; \
  479. FCALLS--; \
  480. CHECK_ARGS(I); \
  481. Goto(dis_next); \
  482. } else if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p) \
  483. && FCALLS > neg_o_reds) { \
  484. goto save_calls1; \
  485. } else { \
  486. SET_I(((Export *) Arg(0))->address); \
  487. CHECK_ARGS(I); \
  488. goto context_switch; \
  489. } \
  490. } while (0)
  491. #ifdef DEBUG
  492. /*
  493. * To simplify breakpoint setting, put the code in one place only and jump to it.
  494. */
  495. # define Dispatch() goto do_dispatch
  496. # define Dispatchx() goto do_dispatchx
  497. # define Dispatchfun() goto do_dispatchfun
  498. #else
  499. /*
  500. * Inline for speed.
  501. */
  502. # define Dispatch() DispatchMacro()
  503. # define Dispatchx() DispatchMacrox()
  504. # define Dispatchfun() DispatchMacroFun()
  505. #endif
  506. #define Self(R) R = c_p->id
  507. #define Node(R) R = erts_this_node->sysname
  508. #define Arg(N) I[(N)+1]
  509. #define Next(N) \
  510. I += (N) + 1; \
  511. ASSERT(VALID_INSTR(*I)); \
  512. Goto(*I)
  513. #define PreFetch(N, Dst) do { Dst = (BeamInstr *) *(I + N + 1); } while (0)
  514. #define NextPF(N, Dst) \
  515. I += N + 1; \
  516. ASSERT(VALID_INSTR(Dst)); \
  517. Goto(Dst)
  518. #define GetR(pos, tr) \
  519. do { \
  520. tr = Arg(pos); \
  521. switch (beam_reg_tag(tr)) { \
  522. case R_REG_DEF: tr = r(0); break; \
  523. case X_REG_DEF: tr = xb(x_reg_offset(tr)); break; \
  524. case Y_REG_DEF: ASSERT(y_reg_offset(tr) >= 1); tr = yb(y_reg_offset(tr)); break; \
  525. } \
  526. CHECK_TERM(tr); \
  527. } while (0)
  528. #define GetArg1(N, Dst) GetR((N), Dst)
  529. #define GetArg2(N, Dst1, Dst2) \
  530. do { \
  531. GetR(N, Dst1); \
  532. GetR((N)+1, Dst2); \
  533. } while (0)
  534. #define PutList(H, T, Dst, Store) \
  535. do { \
  536. HTOP[0] = (H); HTOP[1] = (T); \
  537. Store(make_list(HTOP), Dst); \
  538. HTOP += 2; \
  539. } while (0)
  540. #define Move(Src, Dst, Store) \
  541. do { \
  542. Eterm term = (Src); \
  543. Store(term, Dst); \
  544. } while (0)
  545. #define Move2(src1, dst1, src2, dst2) dst1 = (src1); dst2 = (src2)
  546. #define MoveGenDest(src, dstp) \
  547. if ((dstp) == NULL) { r(0) = (src); } else { *(dstp) = src; }
  548. #define MoveReturn(Src, Dest) \
  549. (Dest) = (Src); \
  550. I = c_p->cp; \
  551. ASSERT(VALID_INSTR(*c_p->cp)); \
  552. c_p->cp = 0; \
  553. CHECK_TERM(r(0)); \
  554. Goto(*I)
  555. #define DeallocateReturn(Deallocate) \
  556. do { \
  557. int words_to_pop = (Deallocate); \
  558. SET_I((BeamInstr *) cp_val(*E)); \
  559. E = ADD_BYTE_OFFSET(E, words_to_pop); \
  560. CHECK_TERM(r(0)); \
  561. Goto(*I); \
  562. } while (0)
  563. #define MoveDeallocateReturn(Src, Dest, Deallocate) \
  564. (Dest) = (Src); \
  565. DeallocateReturn(Deallocate)
  566. #define MoveCall(Src, Dest, CallDest, Size) \
  567. (Dest) = (Src); \
  568. SET_CP(c_p, I+Size+1); \
  569. SET_I((BeamInstr *) CallDest); \
  570. Dispatch();
  571. #define MoveCallLast(Src, Dest, CallDest, Deallocate) \
  572. (Dest) = (Src); \
  573. RESTORE_CP(E); \
  574. E = ADD_BYTE_OFFSET(E, (Deallocate)); \
  575. SET_I((BeamInstr *) CallDest); \
  576. Dispatch();
  577. #define MoveCallOnly(Src, Dest, CallDest) \
  578. (Dest) = (Src); \
  579. SET_I((BeamInstr *) CallDest); \
  580. Dispatch();
  581. #define GetList(Src, H, T) do { \
  582. Eterm* tmp_ptr = list_val(Src); \
  583. H = CAR(tmp_ptr); \
  584. T = CDR(tmp_ptr); } while (0)
  585. #define GetTupleElement(Src, Element, Dest) \
  586. do { \
  587. tmp_arg1 = (Eterm) COMPRESS_POINTER(((unsigned char *) tuple_val(Src)) + \
  588. (Element)); \
  589. (Dest) = (*(Eterm *) EXPAND_POINTER(tmp_arg1)); \
  590. } while (0)
  591. #define ExtractNextElement(Dest) \
  592. tmp_arg1 += sizeof(Eterm); \
  593. (Dest) = (* (Eterm *) (((unsigned char *) EXPAND_POINTER(tmp_arg1))))
  594. #define ExtractNextElement2(Dest) \
  595. do { \
  596. Eterm* ene_dstp = &(Dest); \
  597. ene_dstp[0] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[1]; \
  598. ene_dstp[1] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[2]; \
  599. tmp_arg1 += sizeof(Eterm) + sizeof(Eterm); \
  600. } while (0)
  601. #define ExtractNextElement3(Dest) \
  602. do { \
  603. Eterm* ene_dstp = &(Dest); \
  604. ene_dstp[0] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[1]; \
  605. ene_dstp[1] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[2]; \
  606. ene_dstp[2] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[3]; \
  607. tmp_arg1 += 3*sizeof(Eterm); \
  608. } while (0)
  609. #define ExtractNextElement4(Dest) \
  610. do { \
  611. Eterm* ene_dstp = &(Dest); \
  612. ene_dstp[0] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[1]; \
  613. ene_dstp[1] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[2]; \
  614. ene_dstp[2] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[3]; \
  615. ene_dstp[3] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[4]; \
  616. tmp_arg1 += 4*sizeof(Eterm); \
  617. } while (0)
  618. #define ExtractElement(Element, Dest) \
  619. do { \
  620. tmp_arg1 += (Element); \
  621. (Dest) = (* (Eterm *) EXPAND_POINTER(tmp_arg1)); \
  622. } while (0)
  623. #define PutTuple(Arity, Src, Dest) \
  624. ASSERT(is_arity_value(Arity)); \
  625. Dest = make_tuple(HTOP); \
  626. HTOP[0] = (Arity); \
  627. HTOP[1] = (Src); \
  628. HTOP += 2
  629. #define Put(Word) *HTOP++ = (Word)
  630. #define EqualImmed(X, Y, Action) if (X != Y) { Action; }
  631. #define IsFloat(Src, Fail) if (is_not_float(Src)) { Fail; }
  632. #define IsInteger(Src, Fail) if (is_not_integer(Src)) { Fail; }
  633. #define IsNumber(X, Fail) if (is_not_integer(X) && is_not_float(X)) { Fail; }
  634. #define IsAtom(Src, Fail) if (is_not_atom(Src)) { Fail; }
  635. #define IsIntegerAllocate(Src, Need, Alive, Fail) \
  636. if (is_not_integer(Src)) { Fail; } \
  637. A(Need, Alive)
  638. #define IsNil(Src, Fail) if (is_not_nil(Src)) { Fail; }
  639. #define IsList(Src, Fail) if (is_not_list(Src) && is_not_nil(Src)) { Fail; }
  640. #define IsNonemptyList(Src, Fail) if (is_not_list(Src)) { Fail; }
  641. #define IsNonemptyListAllocate(Src, Need, Alive, Fail) \
  642. if (is_not_list(Src)) { Fail; } \
  643. A(Need, Alive)
  644. #define IsNonemptyListTestHeap(Src, Need, Alive, Fail) \
  645. if (is_not_list(Src)) { Fail; } \
  646. TestHeap(Need, Alive)
  647. #define IsTuple(X, Action) if (is_not_tuple(X)) Action
  648. #define IsArity(Pointer, Arity, Fail) \
  649. if (*(Eterm *) \
  650. EXPAND_POINTER(tmp_arg1 = (Eterm) \
  651. COMPRESS_POINTER(tuple_val(Pointer))) != (Arity)) \
  652. { \
  653. Fail; \
  654. }
  655. #define IsFunction(X, Action) \
  656. do { \
  657. if ( !(is_any_fun(X)) ) { \
  658. Action; \
  659. } \
  660. } while (0)
  661. #define IsFunction2(F, A, Action) \
  662. do { \
  663. if (is_function_2(c_p, F, A) != am_true ) {\
  664. Action; \
  665. } \
  666. } while (0)
  667. #define IsTupleOfArity(Src, Arity, Fail) \
  668. do { \
  669. if (is_not_tuple(Src) || \
  670. *(Eterm *) \
  671. EXPAND_POINTER(tmp_arg1 = \
  672. (Eterm) COMPRESS_POINTER(tuple_val(Src))) != Arity) { \
  673. Fail; \
  674. } \
  675. } while (0)
  676. #define IsBoolean(X, Fail) if ((X) != am_true && (X) != am_false) { Fail; }
  677. #define IsBinary(Src, Fail) \
  678. if (is_not_binary(Src) || binary_bitsize(Src) != 0) { Fail; }
  679. #define IsBitstring(Src, Fail) \
  680. if (is_not_binary(Src)) { Fail; }
  681. #if defined(ARCH_64) && !HALFWORD_HEAP
  682. #define BsSafeMul(A, B, Fail, Target) \
  683. do { Uint64 _res = (A) * (B); \
  684. if (_res / B != A) { Fail; } \
  685. Target = _res; \
  686. } while (0)
  687. #else
  688. #define BsSafeMul(A, B, Fail, Target) \
  689. do { Uint64 _res = (Uint64)(A) * (Uint64)(B); \
  690. if ((_res >> (8*sizeof(Uint))) != 0) { Fail; } \
  691. Target = _res; \
  692. } while (0)
  693. #endif
  694. #define BsGetFieldSize(Bits, Unit, Fail, Target) \
  695. do { \
  696. Sint _signed_size; Uint _uint_size; \
  697. if (is_small(Bits)) { \
  698. _signed_size = signed_val(Bits); \
  699. if (_signed_size < 0) { Fail; } \
  700. _uint_size = (Uint) _signed_size; \
  701. } else { \
  702. if (!term_to_Uint(Bits, &temp_bits)) { Fail; } \
  703. _uint_size = temp_bits; \
  704. } \
  705. BsSafeMul(_uint_size, Unit, Fail, Target); \
  706. } while (0)
  707. #define BsGetUncheckedFieldSize(Bits, Unit, Fail, Target) \
  708. do { \
  709. Sint _signed_size; Uint _uint_size; \
  710. if (is_small(Bits)) { \
  711. _signed_size = signed_val(Bits); \
  712. if (_signed_size < 0) { Fail; } \
  713. _uint_size = (Uint) _signed_size; \
  714. } else { \
  715. if (!term_to_Uint(Bits, &temp_bits)) { Fail; } \
  716. _uint_size = (Uint) temp_bits; \
  717. } \
  718. Target = _uint_size * Unit; \
  719. } while (0)
  720. #define BsGetFloat2(Ms, Live, Sz, Flags, Dst, Store, Fail) \
  721. do { \
  722. ErlBinMatchBuffer *_mb; \
  723. Eterm _result; Sint _size; \
  724. if (!is_small(Sz) || (_size = unsigned_val(Sz)) > 64) { Fail; } \
  725. _size *= ((Flags) >> 3); \
  726. TestHeap(FLOAT_SIZE_OBJECT, Live); \
  727. _mb = ms_matchbuffer(Ms); \
  728. LIGHT_SWAPOUT; \
  729. _result = erts_bs_get_float_2(c_p, _size, (Flags), _mb); \
  730. LIGHT_SWAPIN; \
  731. HEAP_SPACE_VERIFIED(0); \
  732. if (is_non_value(_result)) { Fail; } \
  733. else { Store(_result, Dst); } \
  734. } while (0)
  735. #define BsGetBinaryImm_2(Ms, Live, Sz, Flags, Dst, Store, Fail) \
  736. do { \
  737. ErlBinMatchBuffer *_mb; \
  738. Eterm _result; \
  739. TestHeap(heap_bin_size(ERL_ONHEAP_BIN_LIMIT), Live); \
  740. _mb = ms_matchbuffer(Ms); \
  741. LIGHT_SWAPOUT; \
  742. _result = erts_bs_get_binary_2(c_p, (Sz), (Flags), _mb); \
  743. LIGHT_SWAPIN; \
  744. HEAP_SPACE_VERIFIED(0); \
  745. if (is_non_value(_result)) { Fail; } \
  746. else { Store(_result, Dst); } \
  747. } while (0)
  748. #define BsGetBinary_2(Ms, Live, Sz, Flags, Dst, Store, Fail) \
  749. do { \
  750. ErlBinMatchBuffer *_mb; \
  751. Eterm _result; Uint _size; \
  752. BsGetFieldSize(Sz, ((Flags) >> 3), Fail, _size); \
  753. TestHeap(ERL_SUB_BIN_SIZE, Live); \
  754. _mb = ms_matchbuffer(Ms); \
  755. LIGHT_SWAPOUT; \
  756. _result = erts_bs_get_binary_2(c_p, _size, (Flags), _mb); \
  757. LIGHT_SWAPIN; \
  758. HEAP_SPACE_VERIFIED(0); \
  759. if (is_non_value(_result)) { Fail; } \
  760. else { Store(_result, Dst); } \
  761. } while (0)
  762. #define BsGetBinaryAll_2(Ms, Live, Unit, Dst, Store, Fail) \
  763. do { \
  764. ErlBinMatchBuffer *_mb; \
  765. Eterm _result; \
  766. TestHeap(ERL_SUB_BIN_SIZE, Live); \
  767. _mb = ms_matchbuffer(Ms); \
  768. if (((_mb->size - _mb->offset) % Unit) == 0) { \
  769. LIGHT_SWAPOUT; \
  770. _result = erts_bs_get_binary_all_2(c_p, _mb); \
  771. LIGHT_SWAPIN; \
  772. HEAP_SPACE_VERIFIED(0); \
  773. ASSERT(is_value(_result)); \
  774. Store(_result, Dst); \
  775. } else { \
  776. HEAP_SPACE_VERIFIED(0); \
  777. Fail; } \
  778. } while (0)
  779. #define BsSkipBits2(Ms, Bits, Unit, Fail) \
  780. do { \
  781. ErlBinMatchBuffer *_mb; \
  782. size_t new_offset; \
  783. Uint _size; \
  784. _mb = ms_matchbuffer(Ms); \
  785. BsGetFieldSize(Bits, Unit, Fail, _size); \
  786. new_offset = _mb->offset + _size; \
  787. if (new_offset <= _mb->size) { _mb->offset = new_offset; } \
  788. else { Fail; } \
  789. } while (0)
  790. #define BsSkipBitsAll2(Ms, Unit, Fail) \
  791. do { \
  792. ErlBinMatchBuffer *_mb; \
  793. _mb = ms_matchbuffer(Ms); \
  794. if (((_mb->size - _mb->offset) % Unit) == 0) {_mb->offset = _mb->size; } \
  795. else { Fail; } \
  796. } while (0)
  797. #define BsSkipBitsImm2(Ms, Bits, Fail) \
  798. do { \
  799. ErlBinMatchBuffer *_mb; \
  800. size_t new_offset; \
  801. _mb = ms_matchbuffer(Ms); \
  802. new_offset = _mb->offset + (Bits); \
  803. if (new_offset <= _mb->size) { _mb->offset = new_offset; } \
  804. else { Fail; } \
  805. } while (0)
  806. #define NewBsPutIntegerImm(Sz, Flags, Src) \
  807. do { \
  808. if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3((Src), (Sz), (Flags)))) { goto badarg; } \
  809. } while (0)
  810. #define NewBsPutInteger(Sz, Flags, Src) \
  811. do { \
  812. Sint _size; \
  813. BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \
  814. if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3((Src), _size, (Flags)))) \
  815. { goto badarg; } \
  816. } while (0)
  817. #define NewBsPutFloatImm(Sz, Flags, Src) \
  818. do { \
  819. if (!erts_new_bs_put_float(c_p, (Src), (Sz), (Flags))) { goto badarg; } \
  820. } while (0)
  821. #define NewBsPutFloat(Sz, Flags, Src) \
  822. do { \
  823. Sint _size; \
  824. BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \
  825. if (!erts_new_bs_put_float(c_p, (Src), _size, (Flags))) { goto badarg; } \
  826. } while (0)
  827. #define NewBsPutBinary(Sz, Flags, Src) \
  828. do { \
  829. Sint _size; \
  830. BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \
  831. if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2((Src), _size))) { goto badarg; } \
  832. } while (0)
  833. #define NewBsPutBinaryImm(Sz, Src) \
  834. do { \
  835. if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2((Src), (Sz)))) { goto badarg; } \
  836. } while (0)
  837. #define NewBsPutBinaryAll(Src, Unit) \
  838. do { \
  839. if (!erts_new_bs_put_binary_all(ERL_BITS_ARGS_2((Src), (Unit)))) { goto badarg; } \
  840. } while (0)
  841. #define IsPort(Src, Fail) if (is_not_port(Src)) { Fail; }
  842. #define IsPid(Src, Fail) if (is_not_pid(Src)) { Fail; }
  843. #define IsRef(Src, Fail) if (is_not_ref(Src)) { Fail; }
  844. static BifFunction translate_gc_bif(void* gcf);
  845. static BeamInstr* handle_error(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf);
  846. static BeamInstr* next_catch(Process* c_p, Eterm *reg);
  847. static void terminate_proc(Process* c_p, Eterm Value);
  848. static Eterm add_stacktrace(Process* c_p, Eterm Value, Eterm exc);
  849. static void save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg,
  850. BifFunction bf, Eterm args);
  851. static struct StackTrace * get_trace_from_exc(Eterm exc);
  852. static Eterm make_arglist(Process* c_p, Eterm* reg, int a);
  853. static Eterm call_error_handler(Process* p, BeamInstr* ip, Eterm* reg);
  854. static Eterm call_breakpoint_handler(Process* p, BeamInstr* fi, Eterm* reg);
  855. static BeamInstr* fixed_apply(Process* p, Eterm* reg, Uint arity);
  856. static BeamInstr* apply(Process* p, Eterm module, Eterm function,
  857. Eterm args, Eterm* reg);
  858. static int hibernate(Process* c_p, Eterm module, Eterm function,
  859. Eterm args, Eterm* reg);
  860. static BeamInstr* call_fun(Process* p, int arity, Eterm* reg, Eterm args);
  861. static BeamInstr* apply_fun(Process* p, Eterm fun, Eterm args, Eterm* reg);
  862. static Eterm new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free);
  863. #if defined(VXWORKS)
  864. static int init_done;
  865. #endif
  866. void
  867. init_emulator(void)
  868. {
  869. #if defined(VXWORKS)
  870. init_done = 0;
  871. #endif
  872. process_main();
  873. }
  874. /*
  875. * On certain platforms, make sure that the main variables really are placed
  876. * in registers.
  877. */
  878. #if defined(__GNUC__) && defined(sparc) && !defined(DEBUG)
  879. # define REG_x0 asm("%l0")
  880. # define REG_xregs asm("%l1")
  881. # define REG_htop asm("%l2")
  882. # define REG_stop asm("%l3")
  883. # define REG_I asm("%l4")
  884. # define REG_fcalls asm("%l5")
  885. # define REG_tmp_arg1 asm("%l6")
  886. # define REG_tmp_arg2 asm("%l7")
  887. #else
  888. # define REG_x0
  889. # define REG_xregs
  890. # define REG_htop
  891. # define REG_stop
  892. # define REG_I
  893. # define REG_fcalls
  894. # define REG_tmp_arg1
  895. # define REG_tmp_arg2
  896. #endif
  897. /*
  898. * process_main() is called twice:
  899. * The first call performs some initialisation, including exporting
  900. * the instructions' C labels to the loader.
  901. * The second call starts execution of BEAM code. This call never returns.
  902. */
  903. void process_main(void)
  904. {
  905. #if !defined(VXWORKS)
  906. static int init_done = 0;
  907. #endif
  908. Process* c_p = NULL;
  909. int reds_used;
  910. #ifdef DEBUG
  911. Eterm pid;
  912. #endif
  913. /*
  914. * X register zero; also called r(0)
  915. */
  916. register Eterm x0 REG_x0 = NIL;
  917. /* Pointer to X registers: x(1)..x(N); reg[0] is used when doing GC,
  918. * in all other cases x0 is used.
  919. */
  920. register Eterm* reg REG_xregs = NULL;
  921. /*
  922. * Top of heap (next free location); grows upwards.
  923. */
  924. register Eterm* HTOP REG_htop = NULL;
  925. #ifdef HYBRID
  926. Eterm *g_htop;
  927. Eterm *g_hend;
  928. #endif
  929. /* Stack pointer. Grows downwards; points
  930. * to last item pushed (normally a saved
  931. * continuation pointer).
  932. */
  933. register Eterm* E REG_stop = NULL;
  934. /*
  935. * Pointer to next threaded instruction.
  936. */
  937. register BeamInstr *I REG_I = NULL;
  938. /* Number of reductions left. This function
  939. * returns to the scheduler when FCALLS reaches zero.
  940. */
  941. register Sint FCALLS REG_fcalls = 0;
  942. /*
  943. * Temporaries used for picking up arguments for instructions.
  944. */
  945. register Eterm tmp_arg1 REG_tmp_arg1 = NIL;
  946. register Eterm tmp_arg2 REG_tmp_arg2 = NIL;
  947. #if HEAP_ON_C_STACK
  948. Eterm tmp_big[2]; /* Temporary buffer for small bignums if HEAP_ON_C_STACK. */
  949. #else
  950. Eterm *tmp_big; /* Temporary buffer for small bignums if !HEAP_ON_C_STACK. */
  951. #endif
  952. #ifndef ERTS_SMP
  953. #if !HALFWORD_HEAP
  954. static Eterm save_reg[ERTS_X_REGS_ALLOCATED];
  955. /* X registers -- not used directly, but
  956. * through 'reg', because using it directly
  957. * needs two instructions on a SPARC,
  958. * while using it through reg needs only
  959. * one.
  960. */
  961. #endif
  962. /*
  963. * Floating point registers.
  964. */
  965. static FloatDef freg[MAX_REG];
  966. #else
  967. /* X regisers and floating point registers are located in
  968. * scheduler specific data.
  969. */
  970. register FloatDef *freg;
  971. #endif
  972. /*
  973. * For keeping the negative old value of 'reds' when call saving is active.
  974. */
  975. int neg_o_reds = 0;
  976. Eterm (*arith_func)(Process* p, Eterm* reg, Uint live);
  977. #ifndef NO_JUMP_TABLE
  978. static void* opcodes[] = { DEFINE_OPCODES };
  979. #ifdef ERTS_OPCODE_COUNTER_SUPPORT
  980. static void* counting_opcodes[] = { DEFINE_COUNTING_OPCODES };
  981. #endif
  982. #else
  983. int Go;
  984. #endif
  985. Uint temp_bits; /* Temporary used by BsSkipBits2 & BsGetInteger2 */
  986. ERL_BITS_DECLARE_STATEP; /* Has to be last declaration */
  987. /*
  988. * Note: In this function, we attempt to place rarely executed code towards
  989. * the end of the function, in the hope that the cache hit rate will be better.
  990. * The initialization code is only run once, so it is at the very end.
  991. *
  992. * Note: c_p->arity must be set to reflect the number of useful terms in
  993. * c_p->arg_reg before calling the scheduler.
  994. */
  995. if (!init_done) {
  996. init_done = 1;
  997. goto init_emulator;
  998. }
  999. #ifndef ERTS_SMP
  1000. #if !HALFWORD_HEAP
  1001. reg = save_reg; /* XXX: probably wastes a register on x86 */
  1002. #else
  1003. /* Registers need to be heap allocated (correct memory range) for tracing to work */
  1004. reg = erts_alloc(ERTS_ALC_T_BEAM_REGISTER, ERTS_X_REGS_ALLOCATED * sizeof(Eterm));
  1005. #endif
  1006. #endif
  1007. c_p = NULL;
  1008. reds_used = 0;
  1009. goto do_schedule1;
  1010. do_schedule:
  1011. reds_used = REDS_IN(c_p) - FCALLS;
  1012. do_schedule1:
  1013. PROCESS_MAIN_CHK_LOCKS(c_p);
  1014. ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
  1015. c_p = schedule(c_p, reds_used);
  1016. #ifdef DEBUG
  1017. pid = c_p->id;
  1018. #endif
  1019. ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
  1020. PROCESS_MAIN_CHK_LOCKS(c_p);
  1021. #ifdef ERTS_SMP
  1022. reg = c_p->scheduler_data->save_reg;
  1023. freg = c_p->scheduler_data->freg;
  1024. #endif
  1025. #if !HEAP_ON_C_STACK
  1026. tmp_big = ERTS_PROC_GET_SCHDATA(c_p)->beam_emu_tmp_heap;
  1027. #endif
  1028. ERL_BITS_RELOAD_STATEP(c_p);
  1029. {
  1030. int reds;
  1031. Eterm* argp;
  1032. BeamInstr *next;
  1033. int i;
  1034. argp = c_p->arg_reg;
  1035. for (i = c_p->arity - 1; i > 0; i--) {
  1036. reg[i] = argp[i];
  1037. CHECK_TERM(reg[i]);
  1038. }
  1039. /*
  1040. * We put the original reduction count in the process structure, to reduce
  1041. * the code size (referencing a field in a struct through a pointer stored
  1042. * in a register gives smaller code than referencing a global variable).
  1043. */
  1044. SET_I(c_p->i);
  1045. reds = c_p->fcalls;
  1046. if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)
  1047. && (c_p->trace_flags & F_SENSITIVE) == 0) {
  1048. neg_o_reds = -reds;
  1049. FCALLS = REDS_IN(c_p) = 0;
  1050. } else {
  1051. neg_o_reds = 0;
  1052. FCALLS = REDS_IN(c_p) = reds;
  1053. }
  1054. next = (BeamInstr *) *I;
  1055. r(0) = c_p->arg_reg[0];
  1056. #ifdef HARDDEBUG
  1057. if (c_p->arity > 0) {
  1058. CHECK_TERM(r(0));
  1059. }
  1060. #endif
  1061. SWAPIN;
  1062. ASSERT(VALID_INSTR(next));
  1063. Goto(next);
  1064. }
  1065. #if defined(DEBUG) || defined(NO_JUMP_TABLE)
  1066. emulator_loop:
  1067. #endif
  1068. #ifdef NO_JUMP_TABLE
  1069. switch (Go) {
  1070. #endif
  1071. #include "beam_hot.h"
  1072. #define STORE_ARITH_RESULT(res) StoreBifResult(2, (res));
  1073. #define ARITH_FUNC(name) erts_gc_##name
  1074. OpCase(i_plus_jId):
  1075. {
  1076. Eterm result;
  1077. if (is_both_small(tmp_arg1, tmp_arg2)) {
  1078. Sint i = signed_val(tmp_arg1) + signed_val(tmp_arg2);
  1079. ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i));
  1080. if (MY_IS_SSMALL(i)) {
  1081. result = make_small(i);
  1082. STORE_ARITH_RESULT(result);
  1083. }
  1084. }
  1085. arith_func = ARITH_FUNC(mixed_plus);
  1086. goto do_big_arith2;
  1087. }
  1088. OpCase(i_minus_jId):
  1089. {
  1090. Eterm result;
  1091. if (is_both_small(tmp_arg1, tmp_arg2)) {
  1092. Sint i = signed_val(tmp_arg1) - signed_val(tmp_arg2);
  1093. ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i));
  1094. if (MY_IS_SSMALL(i)) {
  1095. result = make_small(i);
  1096. STORE_ARITH_RESULT(result);
  1097. }
  1098. }
  1099. arith_func = ARITH_FUNC(mixed_minus);
  1100. goto do_big_arith2;
  1101. }
  1102. OpCase(i_is_lt_f):
  1103. if (CMP_GE(tmp_arg1, tmp_arg2)) {
  1104. ClauseFail();
  1105. }
  1106. Next(1);
  1107. OpCase(i_is_ge_f):
  1108. if (CMP_LT(tmp_arg1, tmp_arg2)) {
  1109. ClauseFail();
  1110. }
  1111. Next(1);
  1112. OpCase(i_is_eq_f):
  1113. if (CMP_NE(tmp_arg1, tmp_arg2)) {
  1114. ClauseFail();
  1115. }
  1116. Next(1);
  1117. OpCase(i_is_ne_f):
  1118. if (CMP_EQ(tmp_arg1, tmp_arg2)) {
  1119. ClauseFail();
  1120. }
  1121. Next(1);
  1122. OpCase(i_is_eq_exact_f):
  1123. if (!EQ(tmp_arg1, tmp_arg2)) {
  1124. ClauseFail();
  1125. }
  1126. Next(1);
  1127. OpCase(i_move_call_only_fcr): {
  1128. r(0) = Arg(1);
  1129. }
  1130. /* FALL THROUGH */
  1131. OpCase(i_call_only_f): {
  1132. SET_I((BeamInstr *) Arg(0));
  1133. Dispatch();
  1134. }
  1135. OpCase(i_move_call_last_fPcr): {
  1136. r(0) = Arg(2);
  1137. }
  1138. /* FALL THROUGH */
  1139. OpCase(i_call_last_fP): {
  1140. RESTORE_CP(E);
  1141. E = ADD_BYTE_OFFSET(E, Arg(1));
  1142. SET_I((BeamInstr *) Arg(0));
  1143. Dispatch();
  1144. }
  1145. OpCase(i_move_call_crf): {
  1146. r(0) = Arg(0);
  1147. I++;
  1148. }
  1149. /* FALL THROUGH */
  1150. OpCase(i_call_f): {
  1151. SET_CP(c_p, I+2);
  1152. SET_I((BeamInstr *) Arg(0));
  1153. Dispatch();
  1154. }
  1155. OpCase(i_move_call_ext_last_ePcr): {
  1156. r(0) = Arg(2);
  1157. }
  1158. /* FALL THROUGH */
  1159. OpCase(i_call_ext_last_eP):
  1160. RESTORE_CP(E);
  1161. E = ADD_BYTE_OFFSET(E, Arg(1));
  1162. /*
  1163. * Note: The pointer to the export entry is never NULL; if the module
  1164. * is not loaded, it points to code which will invoke the error handler
  1165. * (see lb_call_error_handler below).
  1166. */
  1167. Dispatchx();
  1168. OpCase(i_move_call_ext_cre): {
  1169. r(0) = Arg(0);
  1170. I++;
  1171. }
  1172. /* FALL THROUGH */
  1173. OpCase(i_call_ext_e):
  1174. SET_CP(c_p, I+2);
  1175. Dispatchx();
  1176. OpCase(i_move_call_ext_only_ecr): {
  1177. r(0) = Arg(1);
  1178. }
  1179. /* FALL THROUGH */
  1180. OpCase(i_call_ext_only_e):
  1181. Dispatchx();
  1182. OpCase(init_y): {
  1183. BeamInstr *next;
  1184. PreFetch(1, next);
  1185. make_blank(yb(Arg(0)));
  1186. NextPF(1, next);
  1187. }
  1188. OpCase(i_trim_I): {
  1189. BeamInstr *next;
  1190. Uint words;
  1191. Uint cp;
  1192. words = Arg(0);
  1193. cp = E[0];
  1194. PreFetch(1, next);
  1195. E += words;
  1196. E[0] = cp;
  1197. NextPF(1, next);
  1198. }
  1199. OpCase(return): {
  1200. SET_I(c_p->cp);
  1201. /*
  1202. * We must clear the CP to make sure that a stale value do not
  1203. * create a false module dependcy preventing code upgrading.
  1204. * It also means that we can use the CP in stack backtraces.
  1205. */
  1206. c_p->cp = 0;
  1207. CHECK_TERM(r(0));
  1208. HEAP_SPACE_VERIFIED(0);
  1209. Goto(*I);
  1210. }
  1211. OpCase(test_heap_1_put_list_Iy): {
  1212. BeamInstr *next;
  1213. PreFetch(2, next);
  1214. TestHeap(Arg(0), 1);
  1215. PutList(yb(Arg(1)), r(0), r(0), StoreSimpleDest);
  1216. CHECK_TERM(r(0));
  1217. NextPF(2, next);
  1218. }
  1219. /*
  1220. * Send is almost a standard call-BIF with two arguments, except for:
  1221. * 1) It cannot be traced.
  1222. * 2) There is no pointer to the send_2 function stored in
  1223. * the instruction.
  1224. */
  1225. OpCase(send): {
  1226. BeamInstr *next;
  1227. Eterm result;
  1228. PRE_BIF_SWAPOUT(c_p);
  1229. c_p->fcalls = FCALLS - 1;
  1230. result = send_2(c_p, r(0), x(1));
  1231. PreFetch(0, next);
  1232. POST_BIF_GC_SWAPIN(c_p, result, reg, 2);
  1233. FCALLS = c_p->fcalls;
  1234. if (is_value(result)) {
  1235. r(0) = result;
  1236. CHECK_TERM(r(0));
  1237. NextPF(0, next);
  1238. } else if (c_p->freason == TRAP) {
  1239. SET_CP(c_p, I+1);
  1240. SET_I(*((BeamInstr **) (BeamInstr) ((c_p)->def_arg_reg + 3)));
  1241. SWAPIN;
  1242. r(0) = c_p->def_arg_reg[0];
  1243. x(1) = c_p->def_arg_reg[1];
  1244. Dispatch();
  1245. }
  1246. goto find_func_info;
  1247. }
  1248. OpCase(i_element_jssd): {
  1249. Eterm index;
  1250. Eterm tuple;
  1251. /*
  1252. * Inlined version of element/2 for speed.
  1253. */
  1254. GetArg2(1, index, tuple);
  1255. if (is_small(index) && is_tuple(tuple)) {
  1256. Eterm* tp = tuple_val(tuple);
  1257. if ((signed_val(index) >= 1) &&
  1258. (signed_val(index) <= arityval(*tp))) {
  1259. Eterm result = tp[signed_val(index)];
  1260. StoreBifResult(3, result);
  1261. }
  1262. }
  1263. }
  1264. /* Fall through */
  1265. OpCase(badarg_j):
  1266. badarg:
  1267. c_p->freason = BADARG;
  1268. goto lb_Cl_error;
  1269. OpCase(i_fast_element_jIsd): {
  1270. Eterm tuple;
  1271. /*
  1272. * Inlined version of element/2 for even more speed.
  1273. * The first argument is an untagged integer >= 1.
  1274. * The second argument is guaranteed to be a register operand.
  1275. */
  1276. GetArg1(2, tuple);
  1277. if (is_tuple(tuple)) {
  1278. Eterm* tp = tuple_val(tuple);
  1279. tmp_arg2 = Arg(1);
  1280. if (tmp_arg2 <= arityval(*tp)) {
  1281. Eterm result = tp[tmp_arg2];
  1282. StoreBifResult(3, result);
  1283. }
  1284. }
  1285. goto badarg;
  1286. }
  1287. OpCase(catch_yf):
  1288. c_p->catches++;
  1289. yb(Arg(0)) = Arg(1);
  1290. Next(2);
  1291. OpCase(catch_end_y): {
  1292. c_p->catches--;
  1293. make_blank(yb(Arg(0)));
  1294. if (is_non_value(r(0))) {
  1295. if (x(1) == am_throw) {
  1296. r(0) = x(2);
  1297. } else {
  1298. if (x(1) == am_error) {
  1299. SWAPOUT;
  1300. x(2) = add_stacktrace(c_p, x(2), x(3));
  1301. SWAPIN;
  1302. }
  1303. /* only x(2) is included in the rootset here */
  1304. if (E - HTOP < 3 || c_p->mbuf) { /* Force GC in case add_stacktrace()
  1305. * created heap fragments */
  1306. SWAPOUT;
  1307. PROCESS_MAIN_CHK_LOCKS(c_p);
  1308. FCALLS -= erts_garbage_collect(c_p, 3, reg+2, 1);
  1309. PROCESS_MAIN_CHK_LOCKS(c_p);
  1310. SWAPIN;
  1311. }
  1312. r(0) = TUPLE2(HTOP, am_EXIT, x(2));
  1313. HTOP += 3;
  1314. }
  1315. }
  1316. CHECK_TERM(r(0));
  1317. Next(1);
  1318. }
  1319. OpCase(try_end_y): {
  1320. c_p->catches--;
  1321. make_blank(yb(Arg(0)));
  1322. if (is_non_value(r(0))) {
  1323. r(0) = x(1);
  1324. x(1) = x(2);
  1325. x(2) = x(3);
  1326. }
  1327. Next(1);
  1328. }
  1329. /*
  1330. * Skeleton for receive statement:
  1331. *
  1332. * recv_mark L1 Optional
  1333. * call make_ref/monitor Optional
  1334. * ...
  1335. * recv_set L1 Optional
  1336. * L1: <-------------------+
  1337. * <-----------+ |
  1338. * | |
  1339. * loop_rec L2 ------+---+ |
  1340. * ... | | |
  1341. * remove_message | | |
  1342. * jump L3 | | |
  1343. * ... | | |
  1344. * loop_rec_end L1 --+ | |
  1345. * L2: <---------------+ |
  1346. * wait L1 -----------------+ or wait_timeout
  1347. * timeout
  1348. *
  1349. * L3: Code after receive...
  1350. *
  1351. *
  1352. */
  1353. OpCase(recv_mark_f): {
  1354. /*
  1355. * Save the current position in message buffer and the
  1356. * the label for the loop_rec/2 instruction for the
  1357. * the receive statement.
  1358. */
  1359. c_p->msg.mark = (BeamInstr *) Arg(0);
  1360. c_p->msg.saved_last = c_p->msg.last;
  1361. Next(1);
  1362. }
  1363. OpCase(i_recv_set): {
  1364. /*
  1365. * If the mark is valid (points to the loop_rec/2
  1366. * instruction that follows), we know that the saved
  1367. * position points to the first message that could
  1368. * possibly be matched out.
  1369. *
  1370. * If the mark is invalid, we do nothing, meaning that
  1371. * we will look through all messages in the message queue.
  1372. */
  1373. if (c_p->msg.mark == (BeamInstr *) (I+1)) {
  1374. c_p->msg.save = c_p->msg.saved_last;
  1375. }
  1376. I++;
  1377. /* Fall through to the loop_rec/2 instruction */
  1378. }
  1379. /*
  1380. * Pick up the next message and place it in x(0).
  1381. * If no message, jump to a wait or wait_timeout instruction.
  1382. */
  1383. OpCase(i_loop_rec_fr):
  1384. {
  1385. BeamInstr *next;
  1386. ErlMessage* msgp;
  1387. loop_rec__:
  1388. PROCESS_MAIN_CHK_LOCKS(c_p);
  1389. msgp = PEEK_MESSAGE(c_p);
  1390. if (!msgp) {
  1391. #ifdef ERTS_SMP
  1392. erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1393. /* Make sure messages wont pass exit signals... */
  1394. if (ERTS_PROC_PENDING_EXIT(c_p)) {
  1395. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1396. SWAPOUT;
  1397. goto do_schedule; /* Will be rescheduled for exit */
  1398. }
  1399. ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
  1400. msgp = PEEK_MESSAGE(c_p);
  1401. if (msgp)
  1402. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1403. else {
  1404. #endif
  1405. SET_I((BeamInstr *) Arg(0));
  1406. Goto(*I); /* Jump to a wait or wait_timeout instruction */
  1407. #ifdef ERTS_SMP
  1408. }
  1409. #endif
  1410. }
  1411. ErtsMoveMsgAttachmentIntoProc(msgp, c_p, E, HTOP, FCALLS,
  1412. {
  1413. SWAPOUT;
  1414. reg[0] = r(0);
  1415. PROCESS_MAIN_CHK_LOCKS(c_p);
  1416. },
  1417. {
  1418. PROCESS_MAIN_CHK_LOCKS(c_p);
  1419. r(0) = reg[0];
  1420. SWAPIN;
  1421. });
  1422. if (is_non_value(ERL_MESSAGE_TERM(msgp))) {
  1423. /*
  1424. * A corrupt distribution message that we weren't able to decode;
  1425. * remove it...
  1426. */
  1427. ASSERT(!msgp->data.attached);
  1428. UNLINK_MESSAGE(c_p, msgp);
  1429. free_message(msgp);
  1430. goto loop_rec__;
  1431. }
  1432. PreFetch(1, next);
  1433. r(0) = ERL_MESSAGE_TERM(msgp);
  1434. NextPF(1, next);
  1435. }
  1436. /*
  1437. * Remove a (matched) message from the message queue.
  1438. */
  1439. OpCase(remove_message): {
  1440. BeamInstr *next;
  1441. ErlMessage* msgp;
  1442. PROCESS_MAIN_CHK_LOCKS(c_p);
  1443. PreFetch(0, next);
  1444. msgp = PEEK_MESSAGE(c_p);
  1445. if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) {
  1446. save_calls(c_p, &exp_receive);
  1447. }
  1448. if (ERL_MESSAGE_TOKEN(msgp) == NIL) {
  1449. SEQ_TRACE_TOKEN(c_p) = NIL;
  1450. } else if (ERL_MESSAGE_TOKEN(msgp) != am_undefined) {
  1451. Eterm msg;
  1452. SEQ_TRACE_TOKEN(c_p) = ERL_MESSAGE_TOKEN(msgp);
  1453. ASSERT(is_tuple(SEQ_TRACE_TOKEN(c_p)));
  1454. ASSERT(SEQ_TRACE_TOKEN_ARITY(c_p) == 5);
  1455. ASSERT(is_small(SEQ_TRACE_TOKEN_SERIAL(c_p)));
  1456. ASSERT(is_small(SEQ_TRACE_TOKEN_LASTCNT(c_p)));
  1457. ASSERT(is_small(SEQ_TRACE_TOKEN_FLAGS(c_p)));
  1458. ASSERT(is_pid(SEQ_TRACE_TOKEN_SENDER(c_p)));
  1459. c_p->seq_trace_lastcnt = unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p));
  1460. if (c_p->seq_trace_clock < unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p))) {
  1461. c_p->seq_trace_clock = unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p));
  1462. }
  1463. msg = ERL_MESSAGE_TERM(msgp);
  1464. seq_trace_output(SEQ_TRACE_TOKEN(c_p), msg, SEQ_TRACE_RECEIVE,
  1465. c_p->id, c_p);
  1466. }
  1467. UNLINK_MESSAGE(c_p, msgp);
  1468. JOIN_MESSAGE(c_p);
  1469. CANCEL_TIMER(c_p);
  1470. free_message(msgp);
  1471. PROCESS_MAIN_CHK_LOCKS(c_p);
  1472. NextPF(0, next);
  1473. }
  1474. /*
  1475. * Advance the save pointer to the next message (the current
  1476. * message didn't match), then jump to the loop_rec instruction.
  1477. */
  1478. OpCase(loop_rec_end_f): {
  1479. SET_I((BeamInstr *) Arg(0));
  1480. SAVE_MESSAGE(c_p);
  1481. goto loop_rec__;
  1482. }
  1483. /*
  1484. * Prepare to wait for a message or a timeout, whichever occurs first.
  1485. *
  1486. * Note: In order to keep the compatibility between 32 and 64 bits
  1487. * emulators, only timeout values that can be represented in 32 bits
  1488. * (unsigned) or less…

Large files files are truncated, but you can click here to view the full file