PageRenderTime 67ms CodeModel.GetById 21ms RepoModel.GetById 0ms app.codeStats 1ms

/erts/emulator/beam/beam_emu.c

https://github.com/notarf/otp
C | 6367 lines | 4686 code | 718 blank | 963 comment | 866 complexity | 608cec9114c2670930fcf547226e6430 MD5 | raw file
Possible License(s): BSD-2-Clause

Large files files are truncated, but you can click here to view the full file

  1. /*
  2. * %CopyrightBegin%
  3. *
  4. * Copyright Ericsson AB 1996-2011. All Rights Reserved.
  5. *
  6. * The contents of this file are subject to the Erlang Public License,
  7. * Version 1.1, (the "License"); you may not use this file except in
  8. * compliance with the License. You should have received a copy of the
  9. * Erlang Public License along with this software. If not, it can be
  10. * retrieved online at http://www.erlang.org/.
  11. *
  12. * Software distributed under the License is distributed on an "AS IS"
  13. * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
  14. * the License for the specific language governing rights and limitations
  15. * under the License.
  16. *
  17. * %CopyrightEnd%
  18. */
  19. #ifdef HAVE_CONFIG_H
  20. # include "config.h"
  21. #endif
  22. #include <stddef.h> /* offsetof() */
  23. #include "sys.h"
  24. #include "erl_vm.h"
  25. #include "global.h"
  26. #include "erl_process.h"
  27. #include "erl_nmgc.h"
  28. #include "error.h"
  29. #include "bif.h"
  30. #include "big.h"
  31. #include "beam_load.h"
  32. #include "erl_binary.h"
  33. #include "erl_bits.h"
  34. #include "dist.h"
  35. #include "beam_bp.h"
  36. #include "beam_catches.h"
  37. #include "erl_thr_progress.h"
  38. #ifdef HIPE
  39. #include "hipe_mode_switch.h"
  40. #include "hipe_bif1.h"
  41. #endif
  42. /* #define HARDDEBUG 1 */
  43. #if defined(NO_JUMP_TABLE)
  44. # define OpCase(OpCode) case op_##OpCode
  45. # define CountCase(OpCode) case op_count_##OpCode
  46. # define OpCode(OpCode) ((Uint*)op_##OpCode)
  47. # define Goto(Rel) {Go = (int)(Rel); goto emulator_loop;}
  48. # define LabelAddr(Addr) &&##Addr
  49. #else
  50. # define OpCase(OpCode) lb_##OpCode
  51. # define CountCase(OpCode) lb_count_##OpCode
  52. # define Goto(Rel) goto *((void *)Rel)
  53. # define LabelAddr(Label) &&Label
  54. # define OpCode(OpCode) (&&lb_##OpCode)
  55. #endif
  56. #ifdef ERTS_ENABLE_LOCK_CHECK
  57. # ifdef ERTS_SMP
  58. # define PROCESS_MAIN_CHK_LOCKS(P) \
  59. do { \
  60. if ((P)) { \
  61. erts_pix_lock_t *pix_lock__ = ERTS_PIX2PIXLOCK(internal_pid_index((P)->id));\
  62. erts_proc_lc_chk_only_proc_main((P)); \
  63. erts_pix_lock(pix_lock__); \
  64. ASSERT(0 < (P)->lock.refc && (P)->lock.refc < erts_no_schedulers*5);\
  65. erts_pix_unlock(pix_lock__); \
  66. } \
  67. else \
  68. erts_lc_check_exact(NULL, 0); \
  69. ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); \
  70. } while (0)
  71. # define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
  72. if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN)
  73. # define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \
  74. if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN)
  75. # else
  76. # define ERTS_SMP_REQ_PROC_MAIN_LOCK(P)
  77. # define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P)
  78. # define PROCESS_MAIN_CHK_LOCKS(P) erts_lc_check_exact(NULL, 0)
  79. # endif
  80. #else
  81. # define PROCESS_MAIN_CHK_LOCKS(P)
  82. # define ERTS_SMP_REQ_PROC_MAIN_LOCK(P)
  83. # define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P)
  84. #endif
  85. /*
  86. * Define macros for deep checking of terms.
  87. */
  88. #if defined(HARDDEBUG)
  89. # define CHECK_TERM(T) size_object(T)
  90. # define CHECK_ARGS(PC) \
  91. do { \
  92. int i_; \
  93. int Arity_ = PC[-1]; \
  94. if (Arity_ > 0) { \
  95. CHECK_TERM(r(0)); \
  96. } \
  97. for (i_ = 1; i_ < Arity_; i_++) { \
  98. CHECK_TERM(x(i_)); \
  99. } \
  100. } while (0)
  101. #else
  102. # define CHECK_TERM(T) ASSERT(!is_CP(T))
  103. # define CHECK_ARGS(T)
  104. #endif
  105. #ifndef MAX
  106. #define MAX(x, y) (((x) > (y)) ? (x) : (y))
  107. #endif
  108. #define GET_BIF_ADDRESS(p) ((BifFunction) (((Export *) p)->code[4]))
  109. #define TermWords(t) (((t) / (sizeof(BeamInstr)/sizeof(Eterm))) + !!((t) % (sizeof(BeamInstr)/sizeof(Eterm))))
  110. /*
  111. * We reuse some of fields in the save area in the process structure.
  112. * This is safe to do, since this space is only activly used when
  113. * the process is switched out.
  114. */
  115. #define REDS_IN(p) ((p)->def_arg_reg[5])
  116. /*
  117. * Add a byte offset to a pointer to Eterm. This is useful when the
  118. * the loader has precalculated a byte offset.
  119. */
  120. #define ADD_BYTE_OFFSET(ptr, offset) \
  121. ((Eterm *) (((unsigned char *)ptr) + (offset)))
  122. /* We don't check the range if an ordinary switch is used */
  123. #ifdef NO_JUMP_TABLE
  124. #define VALID_INSTR(IP) (0 <= (int)(IP) && ((int)(IP) < (NUMBER_OF_OPCODES*2+10)))
  125. #else
  126. #define VALID_INSTR(IP) \
  127. ((SWord)LabelAddr(emulator_loop) <= (SWord)(IP) && \
  128. (SWord)(IP) < (SWord)LabelAddr(end_emulator_loop))
  129. #endif /* NO_JUMP_TABLE */
  130. #define SET_CP(p, ip) \
  131. ASSERT(VALID_INSTR(*(ip))); \
  132. (p)->cp = (ip)
  133. #define SET_I(ip) \
  134. ASSERT(VALID_INSTR(* (Eterm *)(ip))); \
  135. I = (ip)
  136. #define FetchArgs(S1, S2) tmp_arg1 = (S1); tmp_arg2 = (S2)
  137. /*
  138. * Store a result into a register given a destination descriptor.
  139. */
  140. #define StoreResult(Result, DestDesc) \
  141. do { \
  142. Eterm stb_reg; \
  143. stb_reg = (DestDesc); \
  144. CHECK_TERM(Result); \
  145. switch (beam_reg_tag(stb_reg)) { \
  146. case R_REG_DEF: \
  147. r(0) = (Result); break; \
  148. case X_REG_DEF: \
  149. xb(x_reg_offset(stb_reg)) = (Result); break; \
  150. default: \
  151. yb(y_reg_offset(stb_reg)) = (Result); break; \
  152. } \
  153. } while (0)
  154. #define StoreSimpleDest(Src, Dest) Dest = (Src)
  155. /*
  156. * Store a result into a register and execute the next instruction.
  157. * Dst points to the word with a destination descriptor, which MUST
  158. * be just before the next instruction.
  159. */
  160. #define StoreBifResult(Dst, Result) \
  161. do { \
  162. BeamInstr* stb_next; \
  163. Eterm stb_reg; \
  164. stb_reg = Arg(Dst); \
  165. I += (Dst) + 2; \
  166. stb_next = (BeamInstr *) *I; \
  167. CHECK_TERM(Result); \
  168. switch (beam_reg_tag(stb_reg)) { \
  169. case R_REG_DEF: \
  170. r(0) = (Result); Goto(stb_next); \
  171. case X_REG_DEF: \
  172. xb(x_reg_offset(stb_reg)) = (Result); Goto(stb_next); \
  173. default: \
  174. yb(y_reg_offset(stb_reg)) = (Result); Goto(stb_next); \
  175. } \
  176. } while (0)
  177. #define ClauseFail() goto jump_f
  178. #define SAVE_CP(X) \
  179. do { \
  180. *(X) = make_cp(c_p->cp); \
  181. c_p->cp = 0; \
  182. } while(0)
  183. #define RESTORE_CP(X) SET_CP(c_p, (BeamInstr *) cp_val(*(X)))
  184. #define ISCATCHEND(instr) ((Eterm *) *(instr) == OpCode(catch_end_y))
  185. /*
  186. * Special Beam instructions.
  187. */
  188. BeamInstr beam_apply[2];
  189. BeamInstr beam_exit[1];
  190. BeamInstr beam_continue_exit[1];
  191. BeamInstr* em_call_error_handler;
  192. BeamInstr* em_apply_bif;
  193. BeamInstr* em_call_traced_function;
  194. /* NOTE These should be the only variables containing trace instructions.
  195. ** Sometimes tests are form the instruction value, and sometimes
  196. ** for the refering variable (one of these), and rouge references
  197. ** will most likely cause chaos.
  198. */
  199. BeamInstr beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
  200. BeamInstr beam_return_trace[1]; /* OpCode(i_return_trace) */
  201. BeamInstr beam_exception_trace[1]; /* UGLY also OpCode(i_return_trace) */
  202. BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
  203. /*
  204. * We should warn only once for tuple funs.
  205. */
  206. static erts_smp_atomic_t warned_for_tuple_funs;
  207. /*
  208. * All Beam instructions in numerical order.
  209. */
  210. #ifndef NO_JUMP_TABLE
  211. void** beam_ops;
  212. #endif
  213. #ifndef ERTS_SMP /* Not supported with smp emulator */
  214. extern int count_instructions;
  215. #endif
  216. #if defined(HYBRID)
  217. #define SWAPIN \
  218. g_htop = global_htop; \
  219. g_hend = global_hend; \
  220. HTOP = HEAP_TOP(c_p); \
  221. E = c_p->stop
  222. #define SWAPOUT \
  223. global_htop = g_htop; \
  224. global_hend = g_hend; \
  225. HEAP_TOP(c_p) = HTOP; \
  226. c_p->stop = E
  227. #else
  228. #define SWAPIN \
  229. HTOP = HEAP_TOP(c_p); \
  230. E = c_p->stop
  231. #define SWAPOUT \
  232. HEAP_TOP(c_p) = HTOP; \
  233. c_p->stop = E
  234. /*
  235. * Use LIGHT_SWAPOUT when the called function
  236. * will call HeapOnlyAlloc() (and never HAlloc()).
  237. */
  238. #ifdef DEBUG
  239. # /* The stack pointer is used in an assertion. */
  240. # define LIGHT_SWAPOUT SWAPOUT
  241. #else
  242. # define LIGHT_SWAPOUT HEAP_TOP(c_p) = HTOP
  243. #endif
  244. /*
  245. * Use LIGHT_SWAPIN when we know that c_p->stop cannot
  246. * have been updated (i.e. if there cannot have been
  247. * a garbage-collection).
  248. */
  249. #define LIGHT_SWAPIN HTOP = HEAP_TOP(c_p)
  250. #endif
  251. #ifdef FORCE_HEAP_FRAGS
  252. # define HEAP_SPACE_VERIFIED(Words) do { \
  253. c_p->space_verified = (Words); \
  254. c_p->space_verified_from = HTOP; \
  255. }while(0)
  256. #else
  257. # define HEAP_SPACE_VERIFIED(Words) ((void)0)
  258. #endif
  259. #define PRE_BIF_SWAPOUT(P) \
  260. HEAP_TOP((P)) = HTOP; \
  261. (P)->stop = E; \
  262. PROCESS_MAIN_CHK_LOCKS((P)); \
  263. ERTS_SMP_UNREQ_PROC_MAIN_LOCK((P))
  264. #define db(N) (N)
  265. #define tb(N) (N)
  266. #define xb(N) (*(Eterm *) (((unsigned char *)reg) + (N)))
  267. #define yb(N) (*(Eterm *) (((unsigned char *)E) + (N)))
  268. #define fb(N) (*(double *) (((unsigned char *)&(freg[0].fd)) + (N)))
  269. #define Qb(N) (N)
  270. #define Ib(N) (N)
  271. #define x(N) reg[N]
  272. #define y(N) E[N]
  273. #define r(N) x##N
  274. /*
  275. * Makes sure that there are StackNeed + HeapNeed + 1 words available
  276. * on the combined heap/stack segment, then allocates StackNeed + 1
  277. * words on the stack and saves CP.
  278. *
  279. * M is number of live registers to preserve during garbage collection
  280. */
  281. #define AH(StackNeed, HeapNeed, M) \
  282. do { \
  283. int needed; \
  284. needed = (StackNeed) + 1; \
  285. if (E - HTOP < (needed + (HeapNeed))) { \
  286. SWAPOUT; \
  287. reg[0] = r(0); \
  288. PROCESS_MAIN_CHK_LOCKS(c_p); \
  289. FCALLS -= erts_garbage_collect(c_p, needed + (HeapNeed), reg, (M)); \
  290. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \
  291. PROCESS_MAIN_CHK_LOCKS(c_p); \
  292. r(0) = reg[0]; \
  293. SWAPIN; \
  294. } \
  295. E -= needed; \
  296. SAVE_CP(E); \
  297. } while (0)
  298. #define Allocate(Ns, Live) AH(Ns, 0, Live)
  299. #define AllocateZero(Ns, Live) \
  300. do { Eterm* ptr; \
  301. int i = (Ns); \
  302. AH(i, 0, Live); \
  303. for (ptr = E + i; ptr > E; ptr--) { \
  304. make_blank(*ptr); \
  305. } \
  306. } while (0)
  307. #define AllocateHeap(Ns, Nh, Live) AH(Ns, Nh, Live)
  308. #define AllocateHeapZero(Ns, Nh, Live) \
  309. do { Eterm* ptr; \
  310. int i = (Ns); \
  311. AH(i, Nh, Live); \
  312. for (ptr = E + i; ptr > E; ptr--) { \
  313. make_blank(*ptr); \
  314. } \
  315. } while (0)
  316. #define AllocateInit(Ns, Live, Y) \
  317. do { AH(Ns, 0, Live); make_blank(Y); } while (0)
  318. /*
  319. * Like the AH macro, but allocates no additional heap space.
  320. */
  321. #define A(StackNeed, M) AH(StackNeed, 0, M)
  322. #define D(N) \
  323. RESTORE_CP(E); \
  324. E += (N) + 1;
  325. #define TestBinVHeap(VNh, Nh, Live) \
  326. do { \
  327. unsigned need = (Nh); \
  328. if ((E - HTOP < need) || (MSO(c_p).overhead + (VNh) >= BIN_VHEAP_SZ(c_p))) {\
  329. SWAPOUT; \
  330. reg[0] = r(0); \
  331. PROCESS_MAIN_CHK_LOCKS(c_p); \
  332. FCALLS -= erts_garbage_collect(c_p, need, reg, (Live)); \
  333. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \
  334. PROCESS_MAIN_CHK_LOCKS(c_p); \
  335. r(0) = reg[0]; \
  336. SWAPIN; \
  337. } \
  338. HEAP_SPACE_VERIFIED(need); \
  339. } while (0)
  340. /*
  341. * Check if Nh words of heap are available; if not, do a garbage collection.
  342. * Live is number of active argument registers to be preserved.
  343. */
  344. #define TestHeap(Nh, Live) \
  345. do { \
  346. unsigned need = (Nh); \
  347. if (E - HTOP < need) { \
  348. SWAPOUT; \
  349. reg[0] = r(0); \
  350. PROCESS_MAIN_CHK_LOCKS(c_p); \
  351. FCALLS -= erts_garbage_collect(c_p, need, reg, (Live)); \
  352. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \
  353. PROCESS_MAIN_CHK_LOCKS(c_p); \
  354. r(0) = reg[0]; \
  355. SWAPIN; \
  356. } \
  357. HEAP_SPACE_VERIFIED(need); \
  358. } while (0)
  359. /*
  360. * Check if Nh words of heap are available; if not, do a garbage collection.
  361. * Live is number of active argument registers to be preserved.
  362. * Takes special care to preserve Extra if a garbage collection occurs.
  363. */
  364. #define TestHeapPreserve(Nh, Live, Extra) \
  365. do { \
  366. unsigned need = (Nh); \
  367. if (E - HTOP < need) { \
  368. SWAPOUT; \
  369. reg[0] = r(0); \
  370. reg[Live] = Extra; \
  371. PROCESS_MAIN_CHK_LOCKS(c_p); \
  372. FCALLS -= erts_garbage_collect(c_p, need, reg, (Live)+1); \
  373. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \
  374. PROCESS_MAIN_CHK_LOCKS(c_p); \
  375. if (Live > 0) { \
  376. r(0) = reg[0]; \
  377. } \
  378. Extra = reg[Live]; \
  379. SWAPIN; \
  380. } \
  381. HEAP_SPACE_VERIFIED(need); \
  382. } while (0)
  383. #define TestHeapPutList(Need, Reg) \
  384. do { \
  385. TestHeap((Need), 1); \
  386. PutList(Reg, r(0), r(0), StoreSimpleDest); \
  387. CHECK_TERM(r(0)); \
  388. } while (0)
  389. #ifdef HYBRID
  390. #ifdef INCREMENTAL
  391. #define TestGlobalHeap(Nh, Live, hp) \
  392. do { \
  393. unsigned need = (Nh); \
  394. ASSERT(global_heap <= g_htop && g_htop <= global_hend); \
  395. SWAPOUT; \
  396. reg[0] = r(0); \
  397. FCALLS -= need; \
  398. (hp) = IncAlloc(c_p,need,reg,(Live)); \
  399. r(0) = reg[0]; \
  400. SWAPIN; \
  401. } while (0)
  402. #else
  403. #define TestGlobalHeap(Nh, Live, hp) \
  404. do { \
  405. unsigned need = (Nh); \
  406. ASSERT(global_heap <= g_htop && g_htop <= global_hend); \
  407. if (g_hend - g_htop < need) { \
  408. SWAPOUT; \
  409. reg[0] = r(0); \
  410. FCALLS -= erts_global_garbage_collect(c_p, need, reg, (Live)); \
  411. r(0) = reg[0]; \
  412. SWAPIN; \
  413. } \
  414. (hp) = global_htop; \
  415. } while (0)
  416. #endif
  417. #endif /* HYBRID */
  418. #define Init(N) make_blank(yb(N))
  419. #define Init2(Y1, Y2) do { make_blank(Y1); make_blank(Y2); } while (0)
  420. #define Init3(Y1, Y2, Y3) \
  421. do { make_blank(Y1); make_blank(Y2); make_blank(Y3); } while (0)
  422. #define MakeFun(FunP, NumFree) \
  423. do { \
  424. SWAPOUT; \
  425. reg[0] = r(0); \
  426. r(0) = new_fun(c_p, reg, (ErlFunEntry *) FunP, NumFree); \
  427. SWAPIN; \
  428. } while (0)
  429. #define PutTuple(Dst, Arity) \
  430. do { \
  431. Dst = make_tuple(HTOP); \
  432. pt_arity = (Arity); \
  433. } while (0)
  434. /*
  435. * Check that we haven't used the reductions and jump to function pointed to by
  436. * the I register. If we are out of reductions, do a context switch.
  437. */
  438. #define DispatchMacro() \
  439. do { \
  440. BeamInstr* dis_next; \
  441. dis_next = (BeamInstr *) *I; \
  442. CHECK_ARGS(I); \
  443. if (FCALLS > 0 || FCALLS > neg_o_reds) { \
  444. FCALLS--; \
  445. Goto(dis_next); \
  446. } else { \
  447. goto context_switch; \
  448. } \
  449. } while (0)
  450. #define DispatchMacroFun() \
  451. do { \
  452. BeamInstr* dis_next; \
  453. dis_next = (BeamInstr *) *I; \
  454. CHECK_ARGS(I); \
  455. if (FCALLS > 0 || FCALLS > neg_o_reds) { \
  456. FCALLS--; \
  457. Goto(dis_next); \
  458. } else { \
  459. goto context_switch_fun; \
  460. } \
  461. } while (0)
  462. #define DispatchMacrox() \
  463. do { \
  464. if (FCALLS > 0) { \
  465. Eterm* dis_next; \
  466. SET_I(((Export *) Arg(0))->address); \
  467. dis_next = (Eterm *) *I; \
  468. FCALLS--; \
  469. CHECK_ARGS(I); \
  470. Goto(dis_next); \
  471. } else if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p) \
  472. && FCALLS > neg_o_reds) { \
  473. goto save_calls1; \
  474. } else { \
  475. SET_I(((Export *) Arg(0))->address); \
  476. CHECK_ARGS(I); \
  477. goto context_switch; \
  478. } \
  479. } while (0)
  480. #ifdef DEBUG
  481. /*
  482. * To simplify breakpoint setting, put the code in one place only and jump to it.
  483. */
  484. # define Dispatch() goto do_dispatch
  485. # define Dispatchx() goto do_dispatchx
  486. # define Dispatchfun() goto do_dispatchfun
  487. #else
  488. /*
  489. * Inline for speed.
  490. */
  491. # define Dispatch() DispatchMacro()
  492. # define Dispatchx() DispatchMacrox()
  493. # define Dispatchfun() DispatchMacroFun()
  494. #endif
  495. #define Self(R) R = c_p->id
  496. #define Node(R) R = erts_this_node->sysname
  497. #define Arg(N) I[(N)+1]
  498. #define Next(N) \
  499. I += (N) + 1; \
  500. ASSERT(VALID_INSTR(*I)); \
  501. Goto(*I)
  502. #define PreFetch(N, Dst) do { Dst = (BeamInstr *) *(I + N + 1); } while (0)
  503. #define NextPF(N, Dst) \
  504. I += N + 1; \
  505. ASSERT(VALID_INSTR(Dst)); \
  506. Goto(Dst)
  507. #define GetR(pos, tr) \
  508. do { \
  509. tr = Arg(pos); \
  510. switch (beam_reg_tag(tr)) { \
  511. case R_REG_DEF: tr = r(0); break; \
  512. case X_REG_DEF: tr = xb(x_reg_offset(tr)); break; \
  513. case Y_REG_DEF: ASSERT(y_reg_offset(tr) >= 1); tr = yb(y_reg_offset(tr)); break; \
  514. } \
  515. CHECK_TERM(tr); \
  516. } while (0)
  517. #define GetArg1(N, Dst) GetR((N), Dst)
  518. #define GetArg2(N, Dst1, Dst2) \
  519. do { \
  520. GetR(N, Dst1); \
  521. GetR((N)+1, Dst2); \
  522. } while (0)
  523. #define PutList(H, T, Dst, Store) \
  524. do { \
  525. HTOP[0] = (H); HTOP[1] = (T); \
  526. Store(make_list(HTOP), Dst); \
  527. HTOP += 2; \
  528. } while (0)
  529. #define Move(Src, Dst, Store) \
  530. do { \
  531. Eterm term = (Src); \
  532. Store(term, Dst); \
  533. } while (0)
  534. #define Move2(src1, dst1, src2, dst2) dst1 = (src1); dst2 = (src2)
  535. #define MoveGenDest(src, dstp) \
  536. if ((dstp) == NULL) { r(0) = (src); } else { *(dstp) = src; }
  537. #define MoveReturn(Src, Dest) \
  538. (Dest) = (Src); \
  539. I = c_p->cp; \
  540. ASSERT(VALID_INSTR(*c_p->cp)); \
  541. c_p->cp = 0; \
  542. CHECK_TERM(r(0)); \
  543. Goto(*I)
  544. #define DeallocateReturn(Deallocate) \
  545. do { \
  546. int words_to_pop = (Deallocate); \
  547. SET_I((BeamInstr *) cp_val(*E)); \
  548. E = ADD_BYTE_OFFSET(E, words_to_pop); \
  549. CHECK_TERM(r(0)); \
  550. Goto(*I); \
  551. } while (0)
  552. #define MoveDeallocateReturn(Src, Dest, Deallocate) \
  553. (Dest) = (Src); \
  554. DeallocateReturn(Deallocate)
  555. #define MoveCall(Src, Dest, CallDest, Size) \
  556. (Dest) = (Src); \
  557. SET_CP(c_p, I+Size+1); \
  558. SET_I((BeamInstr *) CallDest); \
  559. Dispatch();
  560. #define MoveCallLast(Src, Dest, CallDest, Deallocate) \
  561. (Dest) = (Src); \
  562. RESTORE_CP(E); \
  563. E = ADD_BYTE_OFFSET(E, (Deallocate)); \
  564. SET_I((BeamInstr *) CallDest); \
  565. Dispatch();
  566. #define MoveCallOnly(Src, Dest, CallDest) \
  567. (Dest) = (Src); \
  568. SET_I((BeamInstr *) CallDest); \
  569. Dispatch();
  570. #define MoveJump(Src) \
  571. r(0) = (Src); \
  572. SET_I((BeamInstr *) Arg(0)); \
  573. Goto(*I);
  574. #define GetList(Src, H, T) do { \
  575. Eterm* tmp_ptr = list_val(Src); \
  576. H = CAR(tmp_ptr); \
  577. T = CDR(tmp_ptr); } while (0)
  578. #define GetTupleElement(Src, Element, Dest) \
  579. do { \
  580. tmp_arg1 = (Eterm) COMPRESS_POINTER(((unsigned char *) tuple_val(Src)) + \
  581. (Element)); \
  582. (Dest) = (*(Eterm *) EXPAND_POINTER(tmp_arg1)); \
  583. } while (0)
  584. #define ExtractNextElement(Dest) \
  585. tmp_arg1 += sizeof(Eterm); \
  586. (Dest) = (* (Eterm *) (((unsigned char *) EXPAND_POINTER(tmp_arg1))))
  587. #define ExtractNextElement2(Dest) \
  588. do { \
  589. Eterm* ene_dstp = &(Dest); \
  590. ene_dstp[0] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[1]; \
  591. ene_dstp[1] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[2]; \
  592. tmp_arg1 += sizeof(Eterm) + sizeof(Eterm); \
  593. } while (0)
  594. #define ExtractNextElement3(Dest) \
  595. do { \
  596. Eterm* ene_dstp = &(Dest); \
  597. ene_dstp[0] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[1]; \
  598. ene_dstp[1] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[2]; \
  599. ene_dstp[2] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[3]; \
  600. tmp_arg1 += 3*sizeof(Eterm); \
  601. } while (0)
  602. #define ExtractNextElement4(Dest) \
  603. do { \
  604. Eterm* ene_dstp = &(Dest); \
  605. ene_dstp[0] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[1]; \
  606. ene_dstp[1] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[2]; \
  607. ene_dstp[2] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[3]; \
  608. ene_dstp[3] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[4]; \
  609. tmp_arg1 += 4*sizeof(Eterm); \
  610. } while (0)
  611. #define ExtractElement(Element, Dest) \
  612. do { \
  613. tmp_arg1 += (Element); \
  614. (Dest) = (* (Eterm *) EXPAND_POINTER(tmp_arg1)); \
  615. } while (0)
  616. #define EqualImmed(X, Y, Action) if (X != Y) { Action; }
  617. #define NotEqualImmed(X, Y, Action) if (X == Y) { Action; }
  618. #define IsFloat(Src, Fail) if (is_not_float(Src)) { Fail; }
  619. #define IsInteger(Src, Fail) if (is_not_integer(Src)) { Fail; }
  620. #define IsNumber(X, Fail) if (is_not_integer(X) && is_not_float(X)) { Fail; }
  621. #define IsAtom(Src, Fail) if (is_not_atom(Src)) { Fail; }
  622. #define IsIntegerAllocate(Src, Need, Alive, Fail) \
  623. if (is_not_integer(Src)) { Fail; } \
  624. A(Need, Alive)
  625. #define IsNil(Src, Fail) if (is_not_nil(Src)) { Fail; }
  626. #define IsList(Src, Fail) if (is_not_list(Src) && is_not_nil(Src)) { Fail; }
  627. #define IsNonemptyList(Src, Fail) if (is_not_list(Src)) { Fail; }
  628. #define IsNonemptyListAllocate(Src, Need, Alive, Fail) \
  629. if (is_not_list(Src)) { Fail; } \
  630. A(Need, Alive)
  631. #define IsNonemptyListTestHeap(Src, Need, Alive, Fail) \
  632. if (is_not_list(Src)) { Fail; } \
  633. TestHeap(Need, Alive)
  634. #define IsTuple(X, Action) if (is_not_tuple(X)) Action
  635. #define IsArity(Pointer, Arity, Fail) \
  636. if (*(Eterm *) \
  637. EXPAND_POINTER(tmp_arg1 = (Eterm) \
  638. COMPRESS_POINTER(tuple_val(Pointer))) != (Arity)) \
  639. { \
  640. Fail; \
  641. }
  642. #define IsFunction(X, Action) \
  643. do { \
  644. if ( !(is_any_fun(X)) ) { \
  645. Action; \
  646. } \
  647. } while (0)
  648. #define IsFunction2(F, A, Action) \
  649. do { \
  650. if (erl_is_function(c_p, F, A) != am_true ) { \
  651. Action; \
  652. } \
  653. } while (0)
  654. #define IsTupleOfArity(Src, Arity, Fail) \
  655. do { \
  656. if (is_not_tuple(Src) || \
  657. *(Eterm *) \
  658. EXPAND_POINTER(tmp_arg1 = \
  659. (Eterm) COMPRESS_POINTER(tuple_val(Src))) != Arity) { \
  660. Fail; \
  661. } \
  662. } while (0)
  663. #define IsBoolean(X, Fail) if ((X) != am_true && (X) != am_false) { Fail; }
  664. #define IsBinary(Src, Fail) \
  665. if (is_not_binary(Src) || binary_bitsize(Src) != 0) { Fail; }
  666. #define IsBitstring(Src, Fail) \
  667. if (is_not_binary(Src)) { Fail; }
  668. #if defined(ARCH_64) && !HALFWORD_HEAP
  669. #define BsSafeMul(A, B, Fail, Target) \
  670. do { Uint64 _res = (A) * (B); \
  671. if (_res / B != A) { Fail; } \
  672. Target = _res; \
  673. } while (0)
  674. #else
  675. #define BsSafeMul(A, B, Fail, Target) \
  676. do { Uint64 _res = (Uint64)(A) * (Uint64)(B); \
  677. if ((_res >> (8*sizeof(Uint))) != 0) { Fail; } \
  678. Target = _res; \
  679. } while (0)
  680. #endif
  681. #define BsGetFieldSize(Bits, Unit, Fail, Target) \
  682. do { \
  683. Sint _signed_size; Uint _uint_size; \
  684. if (is_small(Bits)) { \
  685. _signed_size = signed_val(Bits); \
  686. if (_signed_size < 0) { Fail; } \
  687. _uint_size = (Uint) _signed_size; \
  688. } else { \
  689. if (!term_to_Uint(Bits, &temp_bits)) { Fail; } \
  690. _uint_size = temp_bits; \
  691. } \
  692. BsSafeMul(_uint_size, Unit, Fail, Target); \
  693. } while (0)
  694. #define BsGetUncheckedFieldSize(Bits, Unit, Fail, Target) \
  695. do { \
  696. Sint _signed_size; Uint _uint_size; \
  697. if (is_small(Bits)) { \
  698. _signed_size = signed_val(Bits); \
  699. if (_signed_size < 0) { Fail; } \
  700. _uint_size = (Uint) _signed_size; \
  701. } else { \
  702. if (!term_to_Uint(Bits, &temp_bits)) { Fail; } \
  703. _uint_size = (Uint) temp_bits; \
  704. } \
  705. Target = _uint_size * Unit; \
  706. } while (0)
  707. #define BsGetFloat2(Ms, Live, Sz, Flags, Dst, Store, Fail) \
  708. do { \
  709. ErlBinMatchBuffer *_mb; \
  710. Eterm _result; Sint _size; \
  711. if (!is_small(Sz) || (_size = unsigned_val(Sz)) > 64) { Fail; } \
  712. _size *= ((Flags) >> 3); \
  713. TestHeap(FLOAT_SIZE_OBJECT, Live); \
  714. _mb = ms_matchbuffer(Ms); \
  715. LIGHT_SWAPOUT; \
  716. _result = erts_bs_get_float_2(c_p, _size, (Flags), _mb); \
  717. LIGHT_SWAPIN; \
  718. HEAP_SPACE_VERIFIED(0); \
  719. if (is_non_value(_result)) { Fail; } \
  720. else { Store(_result, Dst); } \
  721. } while (0)
  722. #define BsGetBinaryImm_2(Ms, Live, Sz, Flags, Dst, Store, Fail) \
  723. do { \
  724. ErlBinMatchBuffer *_mb; \
  725. Eterm _result; \
  726. TestHeap(heap_bin_size(ERL_ONHEAP_BIN_LIMIT), Live); \
  727. _mb = ms_matchbuffer(Ms); \
  728. LIGHT_SWAPOUT; \
  729. _result = erts_bs_get_binary_2(c_p, (Sz), (Flags), _mb); \
  730. LIGHT_SWAPIN; \
  731. HEAP_SPACE_VERIFIED(0); \
  732. if (is_non_value(_result)) { Fail; } \
  733. else { Store(_result, Dst); } \
  734. } while (0)
  735. #define BsGetBinary_2(Ms, Live, Sz, Flags, Dst, Store, Fail) \
  736. do { \
  737. ErlBinMatchBuffer *_mb; \
  738. Eterm _result; Uint _size; \
  739. BsGetFieldSize(Sz, ((Flags) >> 3), Fail, _size); \
  740. TestHeap(ERL_SUB_BIN_SIZE, Live); \
  741. _mb = ms_matchbuffer(Ms); \
  742. LIGHT_SWAPOUT; \
  743. _result = erts_bs_get_binary_2(c_p, _size, (Flags), _mb); \
  744. LIGHT_SWAPIN; \
  745. HEAP_SPACE_VERIFIED(0); \
  746. if (is_non_value(_result)) { Fail; } \
  747. else { Store(_result, Dst); } \
  748. } while (0)
  749. #define BsGetBinaryAll_2(Ms, Live, Unit, Dst, Store, Fail) \
  750. do { \
  751. ErlBinMatchBuffer *_mb; \
  752. Eterm _result; \
  753. TestHeap(ERL_SUB_BIN_SIZE, Live); \
  754. _mb = ms_matchbuffer(Ms); \
  755. if (((_mb->size - _mb->offset) % Unit) == 0) { \
  756. LIGHT_SWAPOUT; \
  757. _result = erts_bs_get_binary_all_2(c_p, _mb); \
  758. LIGHT_SWAPIN; \
  759. HEAP_SPACE_VERIFIED(0); \
  760. ASSERT(is_value(_result)); \
  761. Store(_result, Dst); \
  762. } else { \
  763. HEAP_SPACE_VERIFIED(0); \
  764. Fail; } \
  765. } while (0)
  766. #define BsSkipBits2(Ms, Bits, Unit, Fail) \
  767. do { \
  768. ErlBinMatchBuffer *_mb; \
  769. size_t new_offset; \
  770. Uint _size; \
  771. _mb = ms_matchbuffer(Ms); \
  772. BsGetFieldSize(Bits, Unit, Fail, _size); \
  773. new_offset = _mb->offset + _size; \
  774. if (new_offset <= _mb->size) { _mb->offset = new_offset; } \
  775. else { Fail; } \
  776. } while (0)
  777. #define BsSkipBitsAll2(Ms, Unit, Fail) \
  778. do { \
  779. ErlBinMatchBuffer *_mb; \
  780. _mb = ms_matchbuffer(Ms); \
  781. if (((_mb->size - _mb->offset) % Unit) == 0) {_mb->offset = _mb->size; } \
  782. else { Fail; } \
  783. } while (0)
  784. #define BsSkipBitsImm2(Ms, Bits, Fail) \
  785. do { \
  786. ErlBinMatchBuffer *_mb; \
  787. size_t new_offset; \
  788. _mb = ms_matchbuffer(Ms); \
  789. new_offset = _mb->offset + (Bits); \
  790. if (new_offset <= _mb->size) { _mb->offset = new_offset; } \
  791. else { Fail; } \
  792. } while (0)
  793. #define NewBsPutIntegerImm(Sz, Flags, Src) \
  794. do { \
  795. if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3((Src), (Sz), (Flags)))) { goto badarg; } \
  796. } while (0)
  797. #define NewBsPutInteger(Sz, Flags, Src) \
  798. do { \
  799. Sint _size; \
  800. BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \
  801. if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3((Src), _size, (Flags)))) \
  802. { goto badarg; } \
  803. } while (0)
  804. #define NewBsPutFloatImm(Sz, Flags, Src) \
  805. do { \
  806. if (!erts_new_bs_put_float(c_p, (Src), (Sz), (Flags))) { goto badarg; } \
  807. } while (0)
  808. #define NewBsPutFloat(Sz, Flags, Src) \
  809. do { \
  810. Sint _size; \
  811. BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \
  812. if (!erts_new_bs_put_float(c_p, (Src), _size, (Flags))) { goto badarg; } \
  813. } while (0)
  814. #define NewBsPutBinary(Sz, Flags, Src) \
  815. do { \
  816. Sint _size; \
  817. BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \
  818. if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2((Src), _size))) { goto badarg; } \
  819. } while (0)
  820. #define NewBsPutBinaryImm(Sz, Src) \
  821. do { \
  822. if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2((Src), (Sz)))) { goto badarg; } \
  823. } while (0)
  824. #define NewBsPutBinaryAll(Src, Unit) \
  825. do { \
  826. if (!erts_new_bs_put_binary_all(ERL_BITS_ARGS_2((Src), (Unit)))) { goto badarg; } \
  827. } while (0)
  828. #define IsPort(Src, Fail) if (is_not_port(Src)) { Fail; }
  829. #define IsPid(Src, Fail) if (is_not_pid(Src)) { Fail; }
  830. #define IsRef(Src, Fail) if (is_not_ref(Src)) { Fail; }
  831. /*
  832. * process_main() is already huge, so we want to avoid inlining
  833. * into it. Especially functions that are seldom used.
  834. */
  835. #ifdef __GNUC__
  836. # define NOINLINE __attribute__((__noinline__))
  837. #else
  838. # define NOINLINE
  839. #endif
  840. /*
  841. * The following functions are called directly by process_main().
  842. * Don't inline them.
  843. */
  844. static BifFunction translate_gc_bif(void* gcf) NOINLINE;
  845. static BeamInstr* handle_error(Process* c_p, BeamInstr* pc,
  846. Eterm* reg, BifFunction bf) NOINLINE;
  847. static BeamInstr* call_error_handler(Process* p, BeamInstr* ip,
  848. Eterm* reg, Eterm func) NOINLINE;
  849. static BeamInstr* fixed_apply(Process* p, Eterm* reg, Uint arity) NOINLINE;
  850. static BeamInstr* apply(Process* p, Eterm module, Eterm function,
  851. Eterm args, Eterm* reg) NOINLINE;
  852. static BeamInstr* call_fun(Process* p, int arity,
  853. Eterm* reg, Eterm args) NOINLINE;
  854. static BeamInstr* apply_fun(Process* p, Eterm fun,
  855. Eterm args, Eterm* reg) NOINLINE;
  856. static Eterm new_fun(Process* p, Eterm* reg,
  857. ErlFunEntry* fe, int num_free) NOINLINE;
  858. /*
  859. * Functions not directly called by process_main(). OK to inline.
  860. */
  861. static BeamInstr* next_catch(Process* c_p, Eterm *reg);
  862. static void terminate_proc(Process* c_p, Eterm Value);
  863. static Eterm add_stacktrace(Process* c_p, Eterm Value, Eterm exc);
  864. static void save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg,
  865. BifFunction bf, Eterm args);
  866. static struct StackTrace * get_trace_from_exc(Eterm exc);
  867. static Eterm make_arglist(Process* c_p, Eterm* reg, int a);
  868. #if defined(VXWORKS)
  869. static int init_done;
  870. #endif
  871. void
  872. init_emulator(void)
  873. {
  874. #if defined(VXWORKS)
  875. init_done = 0;
  876. #endif
  877. erts_smp_atomic_init_nob(&warned_for_tuple_funs, (erts_aint_t) 0);
  878. process_main();
  879. }
  880. /*
  881. * On certain platforms, make sure that the main variables really are placed
  882. * in registers.
  883. */
  884. #if defined(__GNUC__) && defined(sparc) && !defined(DEBUG)
  885. # define REG_x0 asm("%l0")
  886. # define REG_xregs asm("%l1")
  887. # define REG_htop asm("%l2")
  888. # define REG_stop asm("%l3")
  889. # define REG_I asm("%l4")
  890. # define REG_fcalls asm("%l5")
  891. # define REG_tmp_arg1 asm("%l6")
  892. # define REG_tmp_arg2 asm("%l7")
  893. #else
  894. # define REG_x0
  895. # define REG_xregs
  896. # define REG_htop
  897. # define REG_stop
  898. # define REG_I
  899. # define REG_fcalls
  900. # define REG_tmp_arg1
  901. # define REG_tmp_arg2
  902. #endif
  903. /*
  904. * process_main() is called twice:
  905. * The first call performs some initialisation, including exporting
  906. * the instructions' C labels to the loader.
  907. * The second call starts execution of BEAM code. This call never returns.
  908. */
  909. void process_main(void)
  910. {
  911. #if !defined(VXWORKS)
  912. static int init_done = 0;
  913. #endif
  914. Process* c_p = NULL;
  915. int reds_used;
  916. #ifdef DEBUG
  917. ERTS_DECLARE_DUMMY(Eterm pid);
  918. #endif
  919. /*
  920. * X register zero; also called r(0)
  921. */
  922. register Eterm x0 REG_x0 = NIL;
  923. /* Pointer to X registers: x(1)..x(N); reg[0] is used when doing GC,
  924. * in all other cases x0 is used.
  925. */
  926. register Eterm* reg REG_xregs = NULL;
  927. /*
  928. * Top of heap (next free location); grows upwards.
  929. */
  930. register Eterm* HTOP REG_htop = NULL;
  931. #ifdef HYBRID
  932. Eterm *g_htop;
  933. Eterm *g_hend;
  934. #endif
  935. /* Stack pointer. Grows downwards; points
  936. * to last item pushed (normally a saved
  937. * continuation pointer).
  938. */
  939. register Eterm* E REG_stop = NULL;
  940. /*
  941. * Pointer to next threaded instruction.
  942. */
  943. register BeamInstr *I REG_I = NULL;
  944. /* Number of reductions left. This function
  945. * returns to the scheduler when FCALLS reaches zero.
  946. */
  947. register Sint FCALLS REG_fcalls = 0;
  948. /*
  949. * Temporaries used for picking up arguments for instructions.
  950. */
  951. register Eterm tmp_arg1 REG_tmp_arg1 = NIL;
  952. register Eterm tmp_arg2 REG_tmp_arg2 = NIL;
  953. #if HEAP_ON_C_STACK
  954. Eterm tmp_big[2]; /* Temporary buffer for small bignums if HEAP_ON_C_STACK. */
  955. #else
  956. Eterm *tmp_big; /* Temporary buffer for small bignums if !HEAP_ON_C_STACK. */
  957. #endif
  958. /*
  959. * X registers and floating point registers are located in
  960. * scheduler specific data.
  961. */
  962. register FloatDef *freg;
  963. /*
  964. * For keeping the negative old value of 'reds' when call saving is active.
  965. */
  966. int neg_o_reds = 0;
  967. Eterm (*arith_func)(Process* p, Eterm* reg, Uint live);
  968. #ifndef NO_JUMP_TABLE
  969. static void* opcodes[] = { DEFINE_OPCODES };
  970. #ifdef ERTS_OPCODE_COUNTER_SUPPORT
  971. static void* counting_opcodes[] = { DEFINE_COUNTING_OPCODES };
  972. #endif
  973. #else
  974. int Go;
  975. #endif
  976. Uint temp_bits; /* Temporary used by BsSkipBits2 & BsGetInteger2 */
  977. Eterm pt_arity; /* Used by do_put_tuple */
  978. ERL_BITS_DECLARE_STATEP; /* Has to be last declaration */
  979. /*
  980. * Note: In this function, we attempt to place rarely executed code towards
  981. * the end of the function, in the hope that the cache hit rate will be better.
  982. * The initialization code is only run once, so it is at the very end.
  983. *
  984. * Note: c_p->arity must be set to reflect the number of useful terms in
  985. * c_p->arg_reg before calling the scheduler.
  986. */
  987. if (!init_done) {
  988. init_done = 1;
  989. goto init_emulator;
  990. }
  991. c_p = NULL;
  992. reds_used = 0;
  993. goto do_schedule1;
  994. do_schedule:
  995. reds_used = REDS_IN(c_p) - FCALLS;
  996. do_schedule1:
  997. PROCESS_MAIN_CHK_LOCKS(c_p);
  998. ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
  999. #if HALFWORD_HEAP
  1000. ASSERT(erts_get_scheduler_data()->num_tmp_heap_used == 0);
  1001. #endif
  1002. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  1003. c_p = schedule(c_p, reds_used);
  1004. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  1005. #ifdef DEBUG
  1006. pid = c_p->id; /* Save for debugging purpouses */
  1007. #endif
  1008. ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
  1009. PROCESS_MAIN_CHK_LOCKS(c_p);
  1010. reg = ERTS_PROC_GET_SCHDATA(c_p)->x_reg_array;
  1011. freg = ERTS_PROC_GET_SCHDATA(c_p)->f_reg_array;
  1012. #if !HEAP_ON_C_STACK
  1013. tmp_big = ERTS_PROC_GET_SCHDATA(c_p)->beam_emu_tmp_heap;
  1014. #endif
  1015. ERL_BITS_RELOAD_STATEP(c_p);
  1016. {
  1017. int reds;
  1018. Eterm* argp;
  1019. BeamInstr *next;
  1020. int i;
  1021. argp = c_p->arg_reg;
  1022. for (i = c_p->arity - 1; i > 0; i--) {
  1023. reg[i] = argp[i];
  1024. CHECK_TERM(reg[i]);
  1025. }
  1026. /*
  1027. * We put the original reduction count in the process structure, to reduce
  1028. * the code size (referencing a field in a struct through a pointer stored
  1029. * in a register gives smaller code than referencing a global variable).
  1030. */
  1031. SET_I(c_p->i);
  1032. reds = c_p->fcalls;
  1033. if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)
  1034. && (c_p->trace_flags & F_SENSITIVE) == 0) {
  1035. neg_o_reds = -reds;
  1036. FCALLS = REDS_IN(c_p) = 0;
  1037. } else {
  1038. neg_o_reds = 0;
  1039. FCALLS = REDS_IN(c_p) = reds;
  1040. }
  1041. next = (BeamInstr *) *I;
  1042. r(0) = c_p->arg_reg[0];
  1043. #ifdef HARDDEBUG
  1044. if (c_p->arity > 0) {
  1045. CHECK_TERM(r(0));
  1046. }
  1047. #endif
  1048. SWAPIN;
  1049. ASSERT(VALID_INSTR(next));
  1050. Goto(next);
  1051. }
  1052. #if defined(DEBUG) || defined(NO_JUMP_TABLE)
  1053. emulator_loop:
  1054. #endif
  1055. #ifdef NO_JUMP_TABLE
  1056. switch (Go) {
  1057. #endif
  1058. #include "beam_hot.h"
  1059. #define STORE_ARITH_RESULT(res) StoreBifResult(2, (res));
  1060. #define ARITH_FUNC(name) erts_gc_##name
  1061. {
  1062. Eterm increment_reg_val;
  1063. Eterm increment_val;
  1064. Uint live;
  1065. Eterm result;
  1066. OpCase(i_increment_yIId):
  1067. increment_reg_val = yb(Arg(0));
  1068. goto do_increment;
  1069. OpCase(i_increment_xIId):
  1070. increment_reg_val = xb(Arg(0));
  1071. goto do_increment;
  1072. OpCase(i_increment_rIId):
  1073. increment_reg_val = r(0);
  1074. I--;
  1075. do_increment:
  1076. increment_val = Arg(1);
  1077. if (is_small(increment_reg_val)) {
  1078. Sint i = signed_val(increment_reg_val) + increment_val;
  1079. ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i));
  1080. if (MY_IS_SSMALL(i)) {
  1081. result = make_small(i);
  1082. store_result:
  1083. StoreBifResult(3, result);
  1084. }
  1085. }
  1086. live = Arg(2);
  1087. SWAPOUT;
  1088. reg[0] = r(0);
  1089. reg[live] = increment_reg_val;
  1090. reg[live+1] = make_small(increment_val);
  1091. result = erts_gc_mixed_plus(c_p, reg, live);
  1092. r(0) = reg[0];
  1093. SWAPIN;
  1094. ERTS_HOLE_CHECK(c_p);
  1095. if (is_value(result)) {
  1096. goto store_result;
  1097. }
  1098. ASSERT(c_p->freason != BADMATCH || is_value(c_p->fvalue));
  1099. goto find_func_info;
  1100. }
  1101. OpCase(i_plus_jId):
  1102. {
  1103. Eterm result;
  1104. if (is_both_small(tmp_arg1, tmp_arg2)) {
  1105. Sint i = signed_val(tmp_arg1) + signed_val(tmp_arg2);
  1106. ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i));
  1107. if (MY_IS_SSMALL(i)) {
  1108. result = make_small(i);
  1109. STORE_ARITH_RESULT(result);
  1110. }
  1111. }
  1112. arith_func = ARITH_FUNC(mixed_plus);
  1113. goto do_big_arith2;
  1114. }
  1115. OpCase(i_minus_jId):
  1116. {
  1117. Eterm result;
  1118. if (is_both_small(tmp_arg1, tmp_arg2)) {
  1119. Sint i = signed_val(tmp_arg1) - signed_val(tmp_arg2);
  1120. ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i));
  1121. if (MY_IS_SSMALL(i)) {
  1122. result = make_small(i);
  1123. STORE_ARITH_RESULT(result);
  1124. }
  1125. }
  1126. arith_func = ARITH_FUNC(mixed_minus);
  1127. goto do_big_arith2;
  1128. }
  1129. OpCase(i_is_lt_f):
  1130. if (CMP_GE(tmp_arg1, tmp_arg2)) {
  1131. ClauseFail();
  1132. }
  1133. Next(1);
  1134. OpCase(i_is_ge_f):
  1135. if (CMP_LT(tmp_arg1, tmp_arg2)) {
  1136. ClauseFail();
  1137. }
  1138. Next(1);
  1139. OpCase(i_is_eq_f):
  1140. if (CMP_NE(tmp_arg1, tmp_arg2)) {
  1141. ClauseFail();
  1142. }
  1143. Next(1);
  1144. OpCase(i_is_ne_f):
  1145. if (CMP_EQ(tmp_arg1, tmp_arg2)) {
  1146. ClauseFail();
  1147. }
  1148. Next(1);
  1149. OpCase(i_is_eq_exact_f):
  1150. if (!EQ(tmp_arg1, tmp_arg2)) {
  1151. ClauseFail();
  1152. }
  1153. Next(1);
  1154. {
  1155. Eterm is_eq_exact_lit_val;
  1156. OpCase(i_is_eq_exact_literal_xfc):
  1157. is_eq_exact_lit_val = xb(Arg(0));
  1158. I++;
  1159. goto do_is_eq_exact_literal;
  1160. OpCase(i_is_eq_exact_literal_yfc):
  1161. is_eq_exact_lit_val = yb(Arg(0));
  1162. I++;
  1163. goto do_is_eq_exact_literal;
  1164. OpCase(i_is_eq_exact_literal_rfc):
  1165. is_eq_exact_lit_val = r(0);
  1166. do_is_eq_exact_literal:
  1167. if (!eq(Arg(1), is_eq_exact_lit_val)) {
  1168. ClauseFail();
  1169. }
  1170. Next(2);
  1171. }
  1172. {
  1173. Eterm is_ne_exact_lit_val;
  1174. OpCase(i_is_ne_exact_literal_xfc):
  1175. is_ne_exact_lit_val = xb(Arg(0));
  1176. I++;
  1177. goto do_is_ne_exact_literal;
  1178. OpCase(i_is_ne_exact_literal_yfc):
  1179. is_ne_exact_lit_val = yb(Arg(0));
  1180. I++;
  1181. goto do_is_ne_exact_literal;
  1182. OpCase(i_is_ne_exact_literal_rfc):
  1183. is_ne_exact_lit_val = r(0);
  1184. do_is_ne_exact_literal:
  1185. if (eq(Arg(1), is_ne_exact_lit_val)) {
  1186. ClauseFail();
  1187. }
  1188. Next(2);
  1189. }
  1190. OpCase(i_move_call_only_fcr): {
  1191. r(0) = Arg(1);
  1192. }
  1193. /* FALL THROUGH */
  1194. OpCase(i_call_only_f): {
  1195. SET_I((BeamInstr *) Arg(0));
  1196. Dispatch();
  1197. }
  1198. OpCase(i_move_call_last_fPcr): {
  1199. r(0) = Arg(2);
  1200. }
  1201. /* FALL THROUGH */
  1202. OpCase(i_call_last_fP): {
  1203. RESTORE_CP(E);
  1204. E = ADD_BYTE_OFFSET(E, Arg(1));
  1205. SET_I((BeamInstr *) Arg(0));
  1206. Dispatch();
  1207. }
  1208. OpCase(i_move_call_crf): {
  1209. r(0) = Arg(0);
  1210. I++;
  1211. }
  1212. /* FALL THROUGH */
  1213. OpCase(i_call_f): {
  1214. SET_CP(c_p, I+2);
  1215. SET_I((BeamInstr *) Arg(0));
  1216. Dispatch();
  1217. }
  1218. OpCase(i_move_call_ext_last_ePcr): {
  1219. r(0) = Arg(2);
  1220. }
  1221. /* FALL THROUGH */
  1222. OpCase(i_call_ext_last_eP):
  1223. RESTORE_CP(E);
  1224. E = ADD_BYTE_OFFSET(E, Arg(1));
  1225. /*
  1226. * Note: The pointer to the export entry is never NULL; if the module
  1227. * is not loaded, it points to code which will invoke the error handler
  1228. * (see lb_call_error_handler below).
  1229. */
  1230. Dispatchx();
  1231. OpCase(i_move_call_ext_cre): {
  1232. r(0) = Arg(0);
  1233. I++;
  1234. }
  1235. /* FALL THROUGH */
  1236. OpCase(i_call_ext_e):
  1237. SET_CP(c_p, I+2);
  1238. Dispatchx();
  1239. OpCase(i_move_call_ext_only_ecr): {
  1240. r(0) = Arg(1);
  1241. }
  1242. /* FALL THROUGH */
  1243. OpCase(i_call_ext_only_e):
  1244. Dispatchx();
  1245. OpCase(init_y): {
  1246. BeamInstr *next;
  1247. PreFetch(1, next);
  1248. make_blank(yb(Arg(0)));
  1249. NextPF(1, next);
  1250. }
  1251. OpCase(i_trim_I): {
  1252. BeamInstr *next;
  1253. Uint words;
  1254. Uint cp;
  1255. words = Arg(0);
  1256. cp = E[0];
  1257. PreFetch(1, next);
  1258. E += words;
  1259. E[0] = cp;
  1260. NextPF(1, next);
  1261. }
  1262. OpCase(move_x1_c): {
  1263. x(1) = Arg(0);
  1264. Next(1);
  1265. }
  1266. OpCase(move_x2_c): {
  1267. x(2) = Arg(0);
  1268. Next(1);
  1269. }
  1270. OpCase(return): {
  1271. SET_I(c_p->cp);
  1272. /*
  1273. * We must clear the CP to make sure that a stale value do not
  1274. * create a false module dependcy preventing code upgrading.
  1275. * It also means that we can use the CP in stack backtraces.
  1276. */
  1277. c_p->cp = 0;
  1278. CHECK_TERM(r(0));
  1279. HEAP_SPACE_VERIFIED(0);
  1280. Goto(*I);
  1281. }
  1282. /*
  1283. * Send is almost a standard call-BIF with two arguments, except for:
  1284. * 1) It cannot be traced.
  1285. * 2) There is no pointer to the send_2 function stored in
  1286. * the instruction.
  1287. */
  1288. OpCase(send): {
  1289. BeamInstr *next;
  1290. Eterm result;
  1291. PRE_BIF_SWAPOUT(c_p);
  1292. c_p->fcalls = FCALLS - 1;
  1293. reg[0] = r(0);
  1294. result = erl_send(c_p, r(0), x(1));
  1295. PreFetch(0, next);
  1296. ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
  1297. PROCESS_MAIN_CHK_LOCKS(c_p);
  1298. if (c_p->mbuf || MSO(c_p).overhead >= BIN_VHEAP_SZ(c_p)) {
  1299. result = erts_gc_after_bif_call(c_p, result, reg, 2);
  1300. r(0) = reg[0];
  1301. E = c_p->stop;
  1302. }
  1303. HTOP = HEAP_TOP(c_p);
  1304. FCALLS = c_p->fcalls;
  1305. if (is_value(result)) {
  1306. r(0) = result;
  1307. CHECK_TERM(r(0));
  1308. NextPF(0, next);
  1309. } else if (c_p->freason == TRAP) {
  1310. SET_CP(c_p, I+1);
  1311. SET_I(c_p->i);
  1312. SWAPIN;
  1313. r(0) = reg[0];
  1314. Dispatch();
  1315. }
  1316. goto find_func_info;
  1317. }
  1318. {
  1319. Eterm element_index;
  1320. Eterm element_tuple;
  1321. OpCase(i_element_xjsd):
  1322. element_tuple = xb(Arg(0));
  1323. I++;
  1324. goto do_element;
  1325. OpCase(i_element_yjsd):
  1326. element_tuple = yb(Arg(0));
  1327. I++;
  1328. goto do_element;
  1329. OpCase(i_element_rjsd):
  1330. element_tuple = r(0);
  1331. /* Fall through */
  1332. do_element:
  1333. GetArg1(1, element_index);
  1334. if (is_small(element_index) && is_tuple(element_tuple)) {
  1335. Eterm* tp = tuple_val(element_tuple);
  1336. if ((signed_val(element_index) >= 1) &&
  1337. (signed_val(element_index) <= arityval(*tp))) {
  1338. Eterm result = tp[signed_val(element_index)];
  1339. StoreBifResult(2, result);
  1340. }
  1341. }
  1342. }
  1343. /* Fall through */
  1344. OpCase(badarg_j):
  1345. badarg:
  1346. c_p->freason = BADARG;
  1347. goto lb_Cl_error;
  1348. {
  1349. Eterm fast_element_tuple;
  1350. OpCase(i_fast_element_rjId):
  1351. fast_element_tuple = r(0);
  1352. do_fast_element:
  1353. if (is_tuple(fast_element_tuple)) {
  1354. Eterm* tp = tuple_val(fast_element_tuple);
  1355. Eterm pos = Arg(1); /* Untagged integer >= 1 */
  1356. if (pos <= arityval(*tp)) {
  1357. Eterm result = tp[pos];
  1358. StoreBifResult(2, result);
  1359. }
  1360. }
  1361. goto badarg;
  1362. OpCase(i_fast_element_xjId):
  1363. fast_element_tuple = xb(Arg(0));
  1364. I++;
  1365. goto do_fast_element;
  1366. OpCase(i_fast_element_yjId):
  1367. fast_element_tuple = yb(Arg(0));
  1368. I++;
  1369. goto do_fast_element;
  1370. }
  1371. OpCase(catch_yf):
  1372. c_p->catches++;
  1373. yb(Arg(0)) = Arg(1);
  1374. Next(2);
  1375. OpCase(catch_end_y): {
  1376. c_p->catches--;
  1377. make_blank(yb(Arg(0)));
  1378. if (is_non_value(r(0))) {
  1379. if (x(1) == am_throw) {
  1380. r(0) = x(2);
  1381. } else {
  1382. if (x(1) == am_error) {
  1383. SWAPOUT;
  1384. x(2) = add_stacktrace(c_p, x(2), x(3));
  1385. SWAPIN;
  1386. }
  1387. /* only x(2) is included in the rootset here */
  1388. if (E - HTOP < 3 || c_p->mbuf) { /* Force GC in case add_stacktrace()
  1389. * created heap fragments */
  1390. SWAPOUT;
  1391. PROCESS_MAIN_CHK_LOCKS(c_p);
  1392. FCALLS -= erts_garbage_collect(c_p, 3, reg+2, 1);
  1393. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  1394. PROCESS_MAIN_CHK_LOCKS(c_p);
  1395. SWAPIN;
  1396. }
  1397. r(0) = TUPLE2(HTOP, am_EXIT, x(2));
  1398. HTOP += 3;
  1399. }
  1400. }
  1401. CHECK_TERM(r(0));
  1402. Next(1);
  1403. }
  1404. OpCase(try_end_y): {
  1405. c_p->catches--;
  1406. make_blank(yb(Arg(0)));
  1407. if (is_non_value(r(0))) {
  1408. r(0) = x(1);
  1409. x(1) = x(2);
  1410. x(2) = x(3);
  1411. }
  1412. Next(1);
  1413. }
  1414. /*
  1415. * Skeleton for receive statement:
  1416. *
  1417. * recv_mark L1 Optional
  1418. * call make_ref/monitor Optional
  1419. * ...
  1420. * recv_set L1 Optional
  1421. * L1: <-------------------+
  1422. * <-----------+ |
  1423. * | |
  1424. * loop_rec L2 ------+---+ |
  1425. * ... | | |
  1426. * remove_message | | |
  1427. * jump L3 | | |
  1428. * ... | | |
  1429. * loop_rec_end L1 --+ | |
  1430. * L2: <---------------+ |
  1431. * wait L1 -----------------+ or wait_timeout
  1432. * timeout
  1433. *
  1434. * L3: Code after receive...
  1435. *
  1436. *
  1437. */
  1438. OpCase(recv_mark_f): {
  1439. /*
  1440. * Save the current position in message buffer and the
  1441. * the label for the loop_rec/2 instruction for the
  1442. * the receive statement.
  1443. */
  1444. c_p->msg.mark = (BeamInstr *) Arg(0);
  1445. c_p->msg.saved_last = c_p->msg.last;
  1446. Next(1);
  1447. }
  1448. OpCase(i_recv_set): {
  1449. /*
  1450. * If the mark is valid (points to the loop_rec/2
  1451. * instruction that follows), we know that the saved
  1452. * position points to the first message that could
  1453. * possibly be matched out.
  1454. *
  1455. * If the mark is invalid, we do nothing, meaning that
  1456. * we will look through all messages in the message queue.
  1457. */
  1458. if (c_p->msg.mark == (BeamInstr *) (I+1)) {
  1459. c_p->msg.save = c_p->msg.saved_last;
  1460. }
  1461. I++;
  1462. /* Fall through to the loop_rec/2 instruction */
  1463. }
  1464. /*
  1465. * Pick up the next message and place it in x(0).
  1466. * If no message, jump to a wait or wait_timeout instruction.
  1467. */
  1468. OpCase(i_loop_rec_fr):
  1469. {
  1470. BeamInstr *next;
  1471. ErlMessage* msgp;
  1472. loop_rec__:
  1473. PROCESS_MAIN_CHK_LOCKS(c_p);
  1474. msgp = PEEK_MESSAGE(c_p);
  1475. if (!msgp) {
  1476. #ifdef ERTS_SMP
  1477. erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1478. /* Make sure messages wont pass exit signals... */
  1479. if (ERTS_PROC_PENDING_EXIT(c_p)) {
  1480. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1481. SWAPOUT;
  1482. goto do_schedule; /* Will be rescheduled for exit */
  1483. }
  1484. ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
  1485. msgp = PEEK_MESSAGE(c_p);
  1486. if (msgp)
  1487. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1488. else {
  1489. #endif
  1490. SET_I((BeamInstr *) Arg(0));
  1491. Goto(*I); /* Jump to a wait or wait_timeout instruction */
  1492. #ifdef ERTS_SMP
  1493. }
  1494. #endif
  1495. }
  1496. ErtsMoveMsgAttachmentIntoProc(msgp, c_p, E, HTOP, FCALLS,
  1497. {
  1498. SWAPOUT;
  1499. reg[0] = r(0);
  1500. PROCESS_MAIN_CHK_LOCKS(c_p);
  1501. },
  1502. {
  1503. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  1504. PROCESS_MAIN_CHK_LOCKS(c_p);
  1505. r(0) = reg[0];
  1506. SWAPIN;
  1507. });
  1508. if (is_non_value(ERL_MESSAGE_TERM(msgp))) {
  1509. /*
  1510. * A corrupt distribution message that we weren't able to decode;
  1511. * remove it...
  1512. */
  1513. ASSERT(!msgp->data.attached);
  1514. UNLINK_MESSAGE(c_p, msgp);
  1515. free_message(msgp);
  1516. goto loop_rec__;
  1517. }
  1518. PreFetch(1, next);
  1519. r(0) = ERL_MESSAGE_TERM(msgp);
  1520. NextPF(1, next);
  1521. }
  1522. /*
  1523. * Remove a (matched) message from the message queue.
  1524. */
  1525. OpCase(remove_message): {
  1526. BeamInstr *next;
  1527. ErlMessage* msgp;
  1528. PROCESS

Large files files are truncated, but you can click here to view the full file