PageRenderTime 83ms CodeModel.GetById 37ms RepoModel.GetById 0ms app.codeStats 2ms

/erts/emulator/beam/beam_emu.c

https://github.com/alricb/Erlang-OTP
C | 6512 lines | 4822 code | 725 blank | 965 comment | 897 complexity | 3402c782823432675709c3c74182efd8 MD5 | raw file
Possible License(s): BSD-2-Clause, LGPL-2.1, MPL-2.0-no-copyleft-exception

Large files files are truncated, but you can click here to view the full file

  1. /*
  2. * %CopyrightBegin%
  3. *
  4. * Copyright Ericsson AB 1996-2011. All Rights Reserved.
  5. *
  6. * The contents of this file are subject to the Erlang Public License,
  7. * Version 1.1, (the "License"); you may not use this file except in
  8. * compliance with the License. You should have received a copy of the
  9. * Erlang Public License along with this software. If not, it can be
  10. * retrieved online at http://www.erlang.org/.
  11. *
  12. * Software distributed under the License is distributed on an "AS IS"
  13. * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
  14. * the License for the specific language governing rights and limitations
  15. * under the License.
  16. *
  17. * %CopyrightEnd%
  18. */
  19. #ifdef HAVE_CONFIG_H
  20. # include "config.h"
  21. #endif
  22. #include <stddef.h> /* offsetof() */
  23. #include "sys.h"
  24. #include "erl_vm.h"
  25. #include "global.h"
  26. #include "erl_process.h"
  27. #include "erl_nmgc.h"
  28. #include "error.h"
  29. #include "bif.h"
  30. #include "big.h"
  31. #include "beam_load.h"
  32. #include "erl_binary.h"
  33. #include "erl_bits.h"
  34. #include "dist.h"
  35. #include "beam_bp.h"
  36. #include "beam_catches.h"
  37. #ifdef HIPE
  38. #include "hipe_mode_switch.h"
  39. #include "hipe_bif1.h"
  40. #endif
  41. /* #define HARDDEBUG 1 */
  42. #if defined(NO_JUMP_TABLE)
  43. # define OpCase(OpCode) case op_##OpCode: lb_##OpCode
  44. # define CountCase(OpCode) case op_count_##OpCode
  45. # define OpCode(OpCode) ((Uint*)op_##OpCode)
  46. # define Goto(Rel) {Go = (int)(Rel); goto emulator_loop;}
  47. # define LabelAddr(Addr) &&##Addr
  48. #else
  49. # define OpCase(OpCode) lb_##OpCode
  50. # define CountCase(OpCode) lb_count_##OpCode
  51. # define Goto(Rel) goto *(Rel)
  52. # define LabelAddr(Label) &&Label
  53. # define OpCode(OpCode) (&&lb_##OpCode)
  54. #endif
  55. #ifdef ERTS_ENABLE_LOCK_CHECK
  56. # ifdef ERTS_SMP
  57. # define PROCESS_MAIN_CHK_LOCKS(P) \
  58. do { \
  59. if ((P)) { \
  60. erts_pix_lock_t *pix_lock__ = ERTS_PIX2PIXLOCK(internal_pid_index((P)->id));\
  61. erts_proc_lc_chk_only_proc_main((P)); \
  62. erts_pix_lock(pix_lock__); \
  63. ASSERT(0 < (P)->lock.refc && (P)->lock.refc < erts_no_schedulers*5);\
  64. erts_pix_unlock(pix_lock__); \
  65. } \
  66. else \
  67. erts_lc_check_exact(NULL, 0); \
  68. ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING); \
  69. } while (0)
  70. # define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
  71. if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN)
  72. # define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \
  73. if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN)
  74. # else
  75. # define ERTS_SMP_REQ_PROC_MAIN_LOCK(P)
  76. # define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P)
  77. # define PROCESS_MAIN_CHK_LOCKS(P) erts_lc_check_exact(NULL, 0)
  78. # endif
  79. #else
  80. # define PROCESS_MAIN_CHK_LOCKS(P)
  81. # define ERTS_SMP_REQ_PROC_MAIN_LOCK(P)
  82. # define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P)
  83. #endif
  84. /*
  85. * Define macros for deep checking of terms.
  86. */
  87. #if defined(HARDDEBUG)
  88. # define CHECK_TERM(T) size_object(T)
  89. # define CHECK_ARGS(PC) \
  90. do { \
  91. int i_; \
  92. int Arity_ = PC[-1]; \
  93. if (Arity_ > 0) { \
  94. CHECK_TERM(r(0)); \
  95. } \
  96. for (i_ = 1; i_ < Arity_; i_++) { \
  97. CHECK_TERM(x(i_)); \
  98. } \
  99. } while (0)
  100. #else
  101. # define CHECK_TERM(T) ASSERT(!is_CP(T))
  102. # define CHECK_ARGS(T)
  103. #endif
  104. #ifndef MAX
  105. #define MAX(x, y) (((x) > (y)) ? (x) : (y))
  106. #endif
  107. #define GET_BIF_ADDRESS(p) ((BifFunction) (((Export *) p)->code[4]))
  108. #define TermWords(t) (((t) / (sizeof(BeamInstr)/sizeof(Eterm))) + !!((t) % (sizeof(BeamInstr)/sizeof(Eterm))))
  109. /*
  110. * We reuse some of fields in the save area in the process structure.
  111. * This is safe to do, since this space is only activly used when
  112. * the process is switched out.
  113. */
  114. #define REDS_IN(p) ((p)->def_arg_reg[5])
  115. /*
  116. * Add a byte offset to a pointer to Eterm. This is useful when the
  117. * the loader has precalculated a byte offset.
  118. */
  119. #define ADD_BYTE_OFFSET(ptr, offset) \
  120. ((Eterm *) (((unsigned char *)ptr) + (offset)))
  121. /* We don't check the range if an ordinary switch is used */
  122. #ifdef NO_JUMP_TABLE
  123. #define VALID_INSTR(IP) (0 <= (int)(IP) && ((int)(IP) < (NUMBER_OF_OPCODES*2+10)))
  124. #else
  125. #define VALID_INSTR(IP) \
  126. ((SWord)LabelAddr(emulator_loop) <= (SWord)(IP) && \
  127. (SWord)(IP) < (SWord)LabelAddr(end_emulator_loop))
  128. #endif /* NO_JUMP_TABLE */
  129. #define SET_CP(p, ip) \
  130. ASSERT(VALID_INSTR(*(ip))); \
  131. (p)->cp = (ip)
  132. #define SET_I(ip) \
  133. ASSERT(VALID_INSTR(* (Eterm *)(ip))); \
  134. I = (ip)
  135. #define FetchArgs(S1, S2) tmp_arg1 = (S1); tmp_arg2 = (S2)
  136. /*
  137. * Store a result into a register given a destination descriptor.
  138. */
  139. #define StoreResult(Result, DestDesc) \
  140. do { \
  141. Eterm stb_reg; \
  142. stb_reg = (DestDesc); \
  143. CHECK_TERM(Result); \
  144. switch (beam_reg_tag(stb_reg)) { \
  145. case R_REG_DEF: \
  146. r(0) = (Result); break; \
  147. case X_REG_DEF: \
  148. xb(x_reg_offset(stb_reg)) = (Result); break; \
  149. default: \
  150. yb(y_reg_offset(stb_reg)) = (Result); break; \
  151. } \
  152. } while (0)
  153. #define StoreSimpleDest(Src, Dest) Dest = (Src)
  154. /*
  155. * Store a result into a register and execute the next instruction.
  156. * Dst points to the word with a destination descriptor, which MUST
  157. * be just before the next instruction.
  158. */
  159. #define StoreBifResult(Dst, Result) \
  160. do { \
  161. BeamInstr* stb_next; \
  162. Eterm stb_reg; \
  163. stb_reg = Arg(Dst); \
  164. I += (Dst) + 2; \
  165. stb_next = (BeamInstr *) *I; \
  166. CHECK_TERM(Result); \
  167. switch (beam_reg_tag(stb_reg)) { \
  168. case R_REG_DEF: \
  169. r(0) = (Result); Goto(stb_next); \
  170. case X_REG_DEF: \
  171. xb(x_reg_offset(stb_reg)) = (Result); Goto(stb_next); \
  172. default: \
  173. yb(y_reg_offset(stb_reg)) = (Result); Goto(stb_next); \
  174. } \
  175. } while (0)
  176. #define ClauseFail() goto lb_jump_f
  177. #define SAVE_CP(X) \
  178. do { \
  179. *(X) = make_cp(c_p->cp); \
  180. c_p->cp = 0; \
  181. } while(0)
  182. #define RESTORE_CP(X) SET_CP(c_p, (BeamInstr *) cp_val(*(X)))
  183. #define ISCATCHEND(instr) ((Eterm *) *(instr) == OpCode(catch_end_y))
  184. /*
  185. * Special Beam instructions.
  186. */
  187. BeamInstr beam_apply[2];
  188. BeamInstr beam_exit[1];
  189. BeamInstr beam_continue_exit[1];
  190. BeamInstr* em_call_error_handler;
  191. BeamInstr* em_apply_bif;
  192. BeamInstr* em_call_traced_function;
  193. /* NOTE These should be the only variables containing trace instructions.
  194. ** Sometimes tests are form the instruction value, and sometimes
  195. ** for the refering variable (one of these), and rouge references
  196. ** will most likely cause chaos.
  197. */
  198. BeamInstr beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
  199. BeamInstr beam_return_trace[1]; /* OpCode(i_return_trace) */
  200. BeamInstr beam_exception_trace[1]; /* UGLY also OpCode(i_return_trace) */
  201. BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
  202. /*
  203. * All Beam instructions in numerical order.
  204. */
  205. #ifndef NO_JUMP_TABLE
  206. void** beam_ops;
  207. #endif
  208. #ifndef ERTS_SMP /* Not supported with smp emulator */
  209. extern int count_instructions;
  210. #endif
  211. #if defined(HYBRID)
  212. #define SWAPIN \
  213. g_htop = global_htop; \
  214. g_hend = global_hend; \
  215. HTOP = HEAP_TOP(c_p); \
  216. E = c_p->stop
  217. #define SWAPOUT \
  218. global_htop = g_htop; \
  219. global_hend = g_hend; \
  220. HEAP_TOP(c_p) = HTOP; \
  221. c_p->stop = E
  222. #else
  223. #define SWAPIN \
  224. HTOP = HEAP_TOP(c_p); \
  225. E = c_p->stop
  226. #define SWAPOUT \
  227. HEAP_TOP(c_p) = HTOP; \
  228. c_p->stop = E
  229. /*
  230. * Use LIGHT_SWAPOUT when the called function
  231. * will call HeapOnlyAlloc() (and never HAlloc()).
  232. */
  233. #ifdef DEBUG
  234. # /* The stack pointer is used in an assertion. */
  235. # define LIGHT_SWAPOUT SWAPOUT
  236. #else
  237. # define LIGHT_SWAPOUT HEAP_TOP(c_p) = HTOP
  238. #endif
  239. /*
  240. * Use LIGHT_SWAPIN when we know that c_p->stop cannot
  241. * have been updated (i.e. if there cannot have been
  242. * a garbage-collection).
  243. */
  244. #define LIGHT_SWAPIN HTOP = HEAP_TOP(c_p)
  245. #endif
  246. #ifdef FORCE_HEAP_FRAGS
  247. # define HEAP_SPACE_VERIFIED(Words) do { \
  248. c_p->space_verified = (Words); \
  249. c_p->space_verified_from = HTOP; \
  250. }while(0)
  251. #else
  252. # define HEAP_SPACE_VERIFIED(Words) ((void)0)
  253. #endif
  254. #define PRE_BIF_SWAPOUT(P) \
  255. HEAP_TOP((P)) = HTOP; \
  256. (P)->stop = E; \
  257. PROCESS_MAIN_CHK_LOCKS((P)); \
  258. ERTS_SMP_UNREQ_PROC_MAIN_LOCK((P))
  259. #if defined(HYBRID)
  260. # define POST_BIF_GC_SWAPIN_0(_p, _res) \
  261. if (((_p)->mbuf) || (MSO(_p).overhead >= BIN_VHEAP_SZ(_p)) ) { \
  262. _res = erts_gc_after_bif_call((_p), (_res), NULL, 0); \
  263. } \
  264. SWAPIN
  265. # define POST_BIF_GC_SWAPIN(_p, _res, _regs, _arity) \
  266. if (((_p)->mbuf) || (MSO(_p).overhead >= BIN_VHEAP_SZ(_p)) ) { \
  267. _regs[0] = r(0); \
  268. _res = erts_gc_after_bif_call((_p), (_res), _regs, (_arity)); \
  269. r(0) = _regs[0]; \
  270. } \
  271. SWAPIN
  272. #else
  273. # define POST_BIF_GC_SWAPIN_0(_p, _res) \
  274. ERTS_SMP_REQ_PROC_MAIN_LOCK((_p)); \
  275. PROCESS_MAIN_CHK_LOCKS((_p)); \
  276. ERTS_VERIFY_UNUSED_TEMP_ALLOC((_p)); \
  277. if (((_p)->mbuf) || (MSO(_p).overhead >= BIN_VHEAP_SZ(_p)) ) { \
  278. _res = erts_gc_after_bif_call((_p), (_res), NULL, 0); \
  279. E = (_p)->stop; \
  280. } \
  281. HTOP = HEAP_TOP((_p))
  282. # define POST_BIF_GC_SWAPIN(_p, _res, _regs, _arity) \
  283. ERTS_VERIFY_UNUSED_TEMP_ALLOC((_p)); \
  284. ERTS_SMP_REQ_PROC_MAIN_LOCK((_p)); \
  285. PROCESS_MAIN_CHK_LOCKS((_p)); \
  286. if (((_p)->mbuf) || (MSO(_p).overhead >= BIN_VHEAP_SZ(_p)) ) { \
  287. _regs[0] = r(0); \
  288. _res = erts_gc_after_bif_call((_p), (_res), _regs, (_arity)); \
  289. r(0) = _regs[0]; \
  290. E = (_p)->stop; \
  291. } \
  292. HTOP = HEAP_TOP((_p))
  293. #endif
  294. #define db(N) (N)
  295. #define tb(N) (N)
  296. #define xb(N) (*(Eterm *) (((unsigned char *)reg) + (N)))
  297. #define yb(N) (*(Eterm *) (((unsigned char *)E) + (N)))
  298. #define fb(N) (*(double *) (((unsigned char *)&(freg[0].fd)) + (N)))
  299. #define Qb(N) (N)
  300. #define Ib(N) (N)
  301. #define x(N) reg[N]
  302. #define y(N) E[N]
  303. #define r(N) x##N
  304. /*
  305. * Makes sure that there are StackNeed + HeapNeed + 1 words available
  306. * on the combined heap/stack segment, then allocates StackNeed + 1
  307. * words on the stack and saves CP.
  308. *
  309. * M is number of live registers to preserve during garbage collection
  310. */
  311. #define AH(StackNeed, HeapNeed, M) \
  312. do { \
  313. int needed; \
  314. needed = (StackNeed) + 1; \
  315. if (E - HTOP < (needed + (HeapNeed))) { \
  316. SWAPOUT; \
  317. reg[0] = r(0); \
  318. PROCESS_MAIN_CHK_LOCKS(c_p); \
  319. FCALLS -= erts_garbage_collect(c_p, needed + (HeapNeed), reg, (M)); \
  320. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \
  321. PROCESS_MAIN_CHK_LOCKS(c_p); \
  322. r(0) = reg[0]; \
  323. SWAPIN; \
  324. } \
  325. E -= needed; \
  326. SAVE_CP(E); \
  327. } while (0)
  328. #define Allocate(Ns, Live) AH(Ns, 0, Live)
  329. #define AllocateZero(Ns, Live) \
  330. do { Eterm* ptr; \
  331. int i = (Ns); \
  332. AH(i, 0, Live); \
  333. for (ptr = E + i; ptr > E; ptr--) { \
  334. make_blank(*ptr); \
  335. } \
  336. } while (0)
  337. #define AllocateHeap(Ns, Nh, Live) AH(Ns, Nh, Live)
  338. #define AllocateHeapZero(Ns, Nh, Live) \
  339. do { Eterm* ptr; \
  340. int i = (Ns); \
  341. AH(i, Nh, Live); \
  342. for (ptr = E + i; ptr > E; ptr--) { \
  343. make_blank(*ptr); \
  344. } \
  345. } while (0)
  346. #define AllocateInit(Ns, Live, Y) \
  347. do { AH(Ns, 0, Live); make_blank(Y); } while (0)
  348. /*
  349. * Like the AH macro, but allocates no additional heap space.
  350. */
  351. #define A(StackNeed, M) AH(StackNeed, 0, M)
  352. #define D(N) \
  353. RESTORE_CP(E); \
  354. E += (N) + 1;
  355. #define TestBinVHeap(VNh, Nh, Live) \
  356. do { \
  357. unsigned need = (Nh); \
  358. if ((E - HTOP < need) || (MSO(c_p).overhead + (VNh) >= BIN_VHEAP_SZ(c_p))) {\
  359. SWAPOUT; \
  360. reg[0] = r(0); \
  361. PROCESS_MAIN_CHK_LOCKS(c_p); \
  362. FCALLS -= erts_garbage_collect(c_p, need, reg, (Live)); \
  363. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \
  364. PROCESS_MAIN_CHK_LOCKS(c_p); \
  365. r(0) = reg[0]; \
  366. SWAPIN; \
  367. } \
  368. HEAP_SPACE_VERIFIED(need); \
  369. } while (0)
  370. /*
  371. * Check if Nh words of heap are available; if not, do a garbage collection.
  372. * Live is number of active argument registers to be preserved.
  373. */
  374. #define TestHeap(Nh, Live) \
  375. do { \
  376. unsigned need = (Nh); \
  377. if (E - HTOP < need) { \
  378. SWAPOUT; \
  379. reg[0] = r(0); \
  380. PROCESS_MAIN_CHK_LOCKS(c_p); \
  381. FCALLS -= erts_garbage_collect(c_p, need, reg, (Live)); \
  382. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \
  383. PROCESS_MAIN_CHK_LOCKS(c_p); \
  384. r(0) = reg[0]; \
  385. SWAPIN; \
  386. } \
  387. HEAP_SPACE_VERIFIED(need); \
  388. } while (0)
  389. /*
  390. * Check if Nh words of heap are available; if not, do a garbage collection.
  391. * Live is number of active argument registers to be preserved.
  392. * Takes special care to preserve Extra if a garbage collection occurs.
  393. */
  394. #define TestHeapPreserve(Nh, Live, Extra) \
  395. do { \
  396. unsigned need = (Nh); \
  397. if (E - HTOP < need) { \
  398. SWAPOUT; \
  399. reg[0] = r(0); \
  400. reg[Live] = Extra; \
  401. PROCESS_MAIN_CHK_LOCKS(c_p); \
  402. FCALLS -= erts_garbage_collect(c_p, need, reg, (Live)+1); \
  403. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \
  404. PROCESS_MAIN_CHK_LOCKS(c_p); \
  405. if (Live > 0) { \
  406. r(0) = reg[0]; \
  407. } \
  408. Extra = reg[Live]; \
  409. SWAPIN; \
  410. } \
  411. HEAP_SPACE_VERIFIED(need); \
  412. } while (0)
  413. #define TestHeapPutList(Need, Reg) \
  414. do { \
  415. TestHeap((Need), 1); \
  416. PutList(Reg, r(0), r(0), StoreSimpleDest); \
  417. CHECK_TERM(r(0)); \
  418. } while (0)
  419. #ifdef HYBRID
  420. #ifdef INCREMENTAL
  421. #define TestGlobalHeap(Nh, Live, hp) \
  422. do { \
  423. unsigned need = (Nh); \
  424. ASSERT(global_heap <= g_htop && g_htop <= global_hend); \
  425. SWAPOUT; \
  426. reg[0] = r(0); \
  427. FCALLS -= need; \
  428. (hp) = IncAlloc(c_p,need,reg,(Live)); \
  429. r(0) = reg[0]; \
  430. SWAPIN; \
  431. } while (0)
  432. #else
  433. #define TestGlobalHeap(Nh, Live, hp) \
  434. do { \
  435. unsigned need = (Nh); \
  436. ASSERT(global_heap <= g_htop && g_htop <= global_hend); \
  437. if (g_hend - g_htop < need) { \
  438. SWAPOUT; \
  439. reg[0] = r(0); \
  440. FCALLS -= erts_global_garbage_collect(c_p, need, reg, (Live)); \
  441. r(0) = reg[0]; \
  442. SWAPIN; \
  443. } \
  444. (hp) = global_htop; \
  445. } while (0)
  446. #endif
  447. #endif /* HYBRID */
  448. #define Init(N) make_blank(yb(N))
  449. #define Init2(Y1, Y2) do { make_blank(Y1); make_blank(Y2); } while (0)
  450. #define Init3(Y1, Y2, Y3) \
  451. do { make_blank(Y1); make_blank(Y2); make_blank(Y3); } while (0)
  452. #define MakeFun(FunP, NumFree) \
  453. do { \
  454. SWAPOUT; \
  455. reg[0] = r(0); \
  456. r(0) = new_fun(c_p, reg, (ErlFunEntry *) FunP, NumFree); \
  457. SWAPIN; \
  458. } while (0)
  459. #define PutTuple(Dst, Arity) \
  460. do { \
  461. Dst = make_tuple(HTOP); \
  462. pt_arity = (Arity); \
  463. } while (0)
  464. /*
  465. * Check that we haven't used the reductions and jump to function pointed to by
  466. * the I register. If we are out of reductions, do a context switch.
  467. */
  468. #define DispatchMacro() \
  469. do { \
  470. BeamInstr* dis_next; \
  471. dis_next = (BeamInstr *) *I; \
  472. CHECK_ARGS(I); \
  473. if (FCALLS > 0 || FCALLS > neg_o_reds) { \
  474. FCALLS--; \
  475. Goto(dis_next); \
  476. } else { \
  477. goto context_switch; \
  478. } \
  479. } while (0)
  480. #define DispatchMacroFun() \
  481. do { \
  482. BeamInstr* dis_next; \
  483. dis_next = (BeamInstr *) *I; \
  484. CHECK_ARGS(I); \
  485. if (FCALLS > 0 || FCALLS > neg_o_reds) { \
  486. FCALLS--; \
  487. Goto(dis_next); \
  488. } else { \
  489. goto context_switch_fun; \
  490. } \
  491. } while (0)
  492. #define DispatchMacrox() \
  493. do { \
  494. if (FCALLS > 0) { \
  495. Eterm* dis_next; \
  496. SET_I(((Export *) Arg(0))->address); \
  497. dis_next = (Eterm *) *I; \
  498. FCALLS--; \
  499. CHECK_ARGS(I); \
  500. Goto(dis_next); \
  501. } else if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p) \
  502. && FCALLS > neg_o_reds) { \
  503. goto save_calls1; \
  504. } else { \
  505. SET_I(((Export *) Arg(0))->address); \
  506. CHECK_ARGS(I); \
  507. goto context_switch; \
  508. } \
  509. } while (0)
  510. #ifdef DEBUG
  511. /*
  512. * To simplify breakpoint setting, put the code in one place only and jump to it.
  513. */
  514. # define Dispatch() goto do_dispatch
  515. # define Dispatchx() goto do_dispatchx
  516. # define Dispatchfun() goto do_dispatchfun
  517. #else
  518. /*
  519. * Inline for speed.
  520. */
  521. # define Dispatch() DispatchMacro()
  522. # define Dispatchx() DispatchMacrox()
  523. # define Dispatchfun() DispatchMacroFun()
  524. #endif
  525. #define Self(R) R = c_p->id
  526. #define Node(R) R = erts_this_node->sysname
  527. #define Arg(N) I[(N)+1]
  528. #define Next(N) \
  529. I += (N) + 1; \
  530. ASSERT(VALID_INSTR(*I)); \
  531. Goto(*I)
  532. #define PreFetch(N, Dst) do { Dst = (BeamInstr *) *(I + N + 1); } while (0)
  533. #define NextPF(N, Dst) \
  534. I += N + 1; \
  535. ASSERT(VALID_INSTR(Dst)); \
  536. Goto(Dst)
  537. #define GetR(pos, tr) \
  538. do { \
  539. tr = Arg(pos); \
  540. switch (beam_reg_tag(tr)) { \
  541. case R_REG_DEF: tr = r(0); break; \
  542. case X_REG_DEF: tr = xb(x_reg_offset(tr)); break; \
  543. case Y_REG_DEF: ASSERT(y_reg_offset(tr) >= 1); tr = yb(y_reg_offset(tr)); break; \
  544. } \
  545. CHECK_TERM(tr); \
  546. } while (0)
  547. #define GetArg1(N, Dst) GetR((N), Dst)
  548. #define GetArg2(N, Dst1, Dst2) \
  549. do { \
  550. GetR(N, Dst1); \
  551. GetR((N)+1, Dst2); \
  552. } while (0)
  553. #define PutList(H, T, Dst, Store) \
  554. do { \
  555. HTOP[0] = (H); HTOP[1] = (T); \
  556. Store(make_list(HTOP), Dst); \
  557. HTOP += 2; \
  558. } while (0)
  559. #define Move(Src, Dst, Store) \
  560. do { \
  561. Eterm term = (Src); \
  562. Store(term, Dst); \
  563. } while (0)
  564. #define Move2(src1, dst1, src2, dst2) dst1 = (src1); dst2 = (src2)
  565. #define MoveGenDest(src, dstp) \
  566. if ((dstp) == NULL) { r(0) = (src); } else { *(dstp) = src; }
  567. #define MoveReturn(Src, Dest) \
  568. (Dest) = (Src); \
  569. I = c_p->cp; \
  570. ASSERT(VALID_INSTR(*c_p->cp)); \
  571. c_p->cp = 0; \
  572. CHECK_TERM(r(0)); \
  573. Goto(*I)
  574. #define DeallocateReturn(Deallocate) \
  575. do { \
  576. int words_to_pop = (Deallocate); \
  577. SET_I((BeamInstr *) cp_val(*E)); \
  578. E = ADD_BYTE_OFFSET(E, words_to_pop); \
  579. CHECK_TERM(r(0)); \
  580. Goto(*I); \
  581. } while (0)
  582. #define MoveDeallocateReturn(Src, Dest, Deallocate) \
  583. (Dest) = (Src); \
  584. DeallocateReturn(Deallocate)
  585. #define MoveCall(Src, Dest, CallDest, Size) \
  586. (Dest) = (Src); \
  587. SET_CP(c_p, I+Size+1); \
  588. SET_I((BeamInstr *) CallDest); \
  589. Dispatch();
  590. #define MoveCallLast(Src, Dest, CallDest, Deallocate) \
  591. (Dest) = (Src); \
  592. RESTORE_CP(E); \
  593. E = ADD_BYTE_OFFSET(E, (Deallocate)); \
  594. SET_I((BeamInstr *) CallDest); \
  595. Dispatch();
  596. #define MoveCallOnly(Src, Dest, CallDest) \
  597. (Dest) = (Src); \
  598. SET_I((BeamInstr *) CallDest); \
  599. Dispatch();
  600. #define MoveJump(Src) \
  601. r(0) = (Src); \
  602. SET_I((BeamInstr *) Arg(0)); \
  603. Goto(*I);
  604. #define GetList(Src, H, T) do { \
  605. Eterm* tmp_ptr = list_val(Src); \
  606. H = CAR(tmp_ptr); \
  607. T = CDR(tmp_ptr); } while (0)
  608. #define GetTupleElement(Src, Element, Dest) \
  609. do { \
  610. tmp_arg1 = (Eterm) COMPRESS_POINTER(((unsigned char *) tuple_val(Src)) + \
  611. (Element)); \
  612. (Dest) = (*(Eterm *) EXPAND_POINTER(tmp_arg1)); \
  613. } while (0)
  614. #define ExtractNextElement(Dest) \
  615. tmp_arg1 += sizeof(Eterm); \
  616. (Dest) = (* (Eterm *) (((unsigned char *) EXPAND_POINTER(tmp_arg1))))
  617. #define ExtractNextElement2(Dest) \
  618. do { \
  619. Eterm* ene_dstp = &(Dest); \
  620. ene_dstp[0] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[1]; \
  621. ene_dstp[1] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[2]; \
  622. tmp_arg1 += sizeof(Eterm) + sizeof(Eterm); \
  623. } while (0)
  624. #define ExtractNextElement3(Dest) \
  625. do { \
  626. Eterm* ene_dstp = &(Dest); \
  627. ene_dstp[0] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[1]; \
  628. ene_dstp[1] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[2]; \
  629. ene_dstp[2] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[3]; \
  630. tmp_arg1 += 3*sizeof(Eterm); \
  631. } while (0)
  632. #define ExtractNextElement4(Dest) \
  633. do { \
  634. Eterm* ene_dstp = &(Dest); \
  635. ene_dstp[0] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[1]; \
  636. ene_dstp[1] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[2]; \
  637. ene_dstp[2] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[3]; \
  638. ene_dstp[3] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[4]; \
  639. tmp_arg1 += 4*sizeof(Eterm); \
  640. } while (0)
  641. #define ExtractElement(Element, Dest) \
  642. do { \
  643. tmp_arg1 += (Element); \
  644. (Dest) = (* (Eterm *) EXPAND_POINTER(tmp_arg1)); \
  645. } while (0)
  646. #define EqualImmed(X, Y, Action) if (X != Y) { Action; }
  647. #define NotEqualImmed(X, Y, Action) if (X == Y) { Action; }
  648. #define IsFloat(Src, Fail) if (is_not_float(Src)) { Fail; }
  649. #define IsInteger(Src, Fail) if (is_not_integer(Src)) { Fail; }
  650. #define IsNumber(X, Fail) if (is_not_integer(X) && is_not_float(X)) { Fail; }
  651. #define IsAtom(Src, Fail) if (is_not_atom(Src)) { Fail; }
  652. #define IsIntegerAllocate(Src, Need, Alive, Fail) \
  653. if (is_not_integer(Src)) { Fail; } \
  654. A(Need, Alive)
  655. #define IsNil(Src, Fail) if (is_not_nil(Src)) { Fail; }
  656. #define IsList(Src, Fail) if (is_not_list(Src) && is_not_nil(Src)) { Fail; }
  657. #define IsNonemptyList(Src, Fail) if (is_not_list(Src)) { Fail; }
  658. #define IsNonemptyListAllocate(Src, Need, Alive, Fail) \
  659. if (is_not_list(Src)) { Fail; } \
  660. A(Need, Alive)
  661. #define IsNonemptyListTestHeap(Src, Need, Alive, Fail) \
  662. if (is_not_list(Src)) { Fail; } \
  663. TestHeap(Need, Alive)
  664. #define IsTuple(X, Action) if (is_not_tuple(X)) Action
  665. #define IsArity(Pointer, Arity, Fail) \
  666. if (*(Eterm *) \
  667. EXPAND_POINTER(tmp_arg1 = (Eterm) \
  668. COMPRESS_POINTER(tuple_val(Pointer))) != (Arity)) \
  669. { \
  670. Fail; \
  671. }
  672. #define IsFunction(X, Action) \
  673. do { \
  674. if ( !(is_any_fun(X)) ) { \
  675. Action; \
  676. } \
  677. } while (0)
  678. #define IsFunction2(F, A, Action) \
  679. do { \
  680. if (is_function_2(c_p, F, A) != am_true ) {\
  681. Action; \
  682. } \
  683. } while (0)
  684. #define IsTupleOfArity(Src, Arity, Fail) \
  685. do { \
  686. if (is_not_tuple(Src) || \
  687. *(Eterm *) \
  688. EXPAND_POINTER(tmp_arg1 = \
  689. (Eterm) COMPRESS_POINTER(tuple_val(Src))) != Arity) { \
  690. Fail; \
  691. } \
  692. } while (0)
  693. #define IsBoolean(X, Fail) if ((X) != am_true && (X) != am_false) { Fail; }
  694. #define IsBinary(Src, Fail) \
  695. if (is_not_binary(Src) || binary_bitsize(Src) != 0) { Fail; }
  696. #define IsBitstring(Src, Fail) \
  697. if (is_not_binary(Src)) { Fail; }
  698. #if defined(ARCH_64) && !HALFWORD_HEAP
  699. #define BsSafeMul(A, B, Fail, Target) \
  700. do { Uint64 _res = (A) * (B); \
  701. if (_res / B != A) { Fail; } \
  702. Target = _res; \
  703. } while (0)
  704. #else
  705. #define BsSafeMul(A, B, Fail, Target) \
  706. do { Uint64 _res = (Uint64)(A) * (Uint64)(B); \
  707. if ((_res >> (8*sizeof(Uint))) != 0) { Fail; } \
  708. Target = _res; \
  709. } while (0)
  710. #endif
  711. #define BsGetFieldSize(Bits, Unit, Fail, Target) \
  712. do { \
  713. Sint _signed_size; Uint _uint_size; \
  714. if (is_small(Bits)) { \
  715. _signed_size = signed_val(Bits); \
  716. if (_signed_size < 0) { Fail; } \
  717. _uint_size = (Uint) _signed_size; \
  718. } else { \
  719. if (!term_to_Uint(Bits, &temp_bits)) { Fail; } \
  720. _uint_size = temp_bits; \
  721. } \
  722. BsSafeMul(_uint_size, Unit, Fail, Target); \
  723. } while (0)
  724. #define BsGetUncheckedFieldSize(Bits, Unit, Fail, Target) \
  725. do { \
  726. Sint _signed_size; Uint _uint_size; \
  727. if (is_small(Bits)) { \
  728. _signed_size = signed_val(Bits); \
  729. if (_signed_size < 0) { Fail; } \
  730. _uint_size = (Uint) _signed_size; \
  731. } else { \
  732. if (!term_to_Uint(Bits, &temp_bits)) { Fail; } \
  733. _uint_size = (Uint) temp_bits; \
  734. } \
  735. Target = _uint_size * Unit; \
  736. } while (0)
  737. #define BsGetFloat2(Ms, Live, Sz, Flags, Dst, Store, Fail) \
  738. do { \
  739. ErlBinMatchBuffer *_mb; \
  740. Eterm _result; Sint _size; \
  741. if (!is_small(Sz) || (_size = unsigned_val(Sz)) > 64) { Fail; } \
  742. _size *= ((Flags) >> 3); \
  743. TestHeap(FLOAT_SIZE_OBJECT, Live); \
  744. _mb = ms_matchbuffer(Ms); \
  745. LIGHT_SWAPOUT; \
  746. _result = erts_bs_get_float_2(c_p, _size, (Flags), _mb); \
  747. LIGHT_SWAPIN; \
  748. HEAP_SPACE_VERIFIED(0); \
  749. if (is_non_value(_result)) { Fail; } \
  750. else { Store(_result, Dst); } \
  751. } while (0)
  752. #define BsGetBinaryImm_2(Ms, Live, Sz, Flags, Dst, Store, Fail) \
  753. do { \
  754. ErlBinMatchBuffer *_mb; \
  755. Eterm _result; \
  756. TestHeap(heap_bin_size(ERL_ONHEAP_BIN_LIMIT), Live); \
  757. _mb = ms_matchbuffer(Ms); \
  758. LIGHT_SWAPOUT; \
  759. _result = erts_bs_get_binary_2(c_p, (Sz), (Flags), _mb); \
  760. LIGHT_SWAPIN; \
  761. HEAP_SPACE_VERIFIED(0); \
  762. if (is_non_value(_result)) { Fail; } \
  763. else { Store(_result, Dst); } \
  764. } while (0)
  765. #define BsGetBinary_2(Ms, Live, Sz, Flags, Dst, Store, Fail) \
  766. do { \
  767. ErlBinMatchBuffer *_mb; \
  768. Eterm _result; Uint _size; \
  769. BsGetFieldSize(Sz, ((Flags) >> 3), Fail, _size); \
  770. TestHeap(ERL_SUB_BIN_SIZE, Live); \
  771. _mb = ms_matchbuffer(Ms); \
  772. LIGHT_SWAPOUT; \
  773. _result = erts_bs_get_binary_2(c_p, _size, (Flags), _mb); \
  774. LIGHT_SWAPIN; \
  775. HEAP_SPACE_VERIFIED(0); \
  776. if (is_non_value(_result)) { Fail; } \
  777. else { Store(_result, Dst); } \
  778. } while (0)
  779. #define BsGetBinaryAll_2(Ms, Live, Unit, Dst, Store, Fail) \
  780. do { \
  781. ErlBinMatchBuffer *_mb; \
  782. Eterm _result; \
  783. TestHeap(ERL_SUB_BIN_SIZE, Live); \
  784. _mb = ms_matchbuffer(Ms); \
  785. if (((_mb->size - _mb->offset) % Unit) == 0) { \
  786. LIGHT_SWAPOUT; \
  787. _result = erts_bs_get_binary_all_2(c_p, _mb); \
  788. LIGHT_SWAPIN; \
  789. HEAP_SPACE_VERIFIED(0); \
  790. ASSERT(is_value(_result)); \
  791. Store(_result, Dst); \
  792. } else { \
  793. HEAP_SPACE_VERIFIED(0); \
  794. Fail; } \
  795. } while (0)
  796. #define BsSkipBits2(Ms, Bits, Unit, Fail) \
  797. do { \
  798. ErlBinMatchBuffer *_mb; \
  799. size_t new_offset; \
  800. Uint _size; \
  801. _mb = ms_matchbuffer(Ms); \
  802. BsGetFieldSize(Bits, Unit, Fail, _size); \
  803. new_offset = _mb->offset + _size; \
  804. if (new_offset <= _mb->size) { _mb->offset = new_offset; } \
  805. else { Fail; } \
  806. } while (0)
  807. #define BsSkipBitsAll2(Ms, Unit, Fail) \
  808. do { \
  809. ErlBinMatchBuffer *_mb; \
  810. _mb = ms_matchbuffer(Ms); \
  811. if (((_mb->size - _mb->offset) % Unit) == 0) {_mb->offset = _mb->size; } \
  812. else { Fail; } \
  813. } while (0)
  814. #define BsSkipBitsImm2(Ms, Bits, Fail) \
  815. do { \
  816. ErlBinMatchBuffer *_mb; \
  817. size_t new_offset; \
  818. _mb = ms_matchbuffer(Ms); \
  819. new_offset = _mb->offset + (Bits); \
  820. if (new_offset <= _mb->size) { _mb->offset = new_offset; } \
  821. else { Fail; } \
  822. } while (0)
  823. #define NewBsPutIntegerImm(Sz, Flags, Src) \
  824. do { \
  825. if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3((Src), (Sz), (Flags)))) { goto badarg; } \
  826. } while (0)
  827. #define NewBsPutInteger(Sz, Flags, Src) \
  828. do { \
  829. Sint _size; \
  830. BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \
  831. if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3((Src), _size, (Flags)))) \
  832. { goto badarg; } \
  833. } while (0)
  834. #define NewBsPutFloatImm(Sz, Flags, Src) \
  835. do { \
  836. if (!erts_new_bs_put_float(c_p, (Src), (Sz), (Flags))) { goto badarg; } \
  837. } while (0)
  838. #define NewBsPutFloat(Sz, Flags, Src) \
  839. do { \
  840. Sint _size; \
  841. BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \
  842. if (!erts_new_bs_put_float(c_p, (Src), _size, (Flags))) { goto badarg; } \
  843. } while (0)
  844. #define NewBsPutBinary(Sz, Flags, Src) \
  845. do { \
  846. Sint _size; \
  847. BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \
  848. if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2((Src), _size))) { goto badarg; } \
  849. } while (0)
  850. #define NewBsPutBinaryImm(Sz, Src) \
  851. do { \
  852. if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2((Src), (Sz)))) { goto badarg; } \
  853. } while (0)
  854. #define NewBsPutBinaryAll(Src, Unit) \
  855. do { \
  856. if (!erts_new_bs_put_binary_all(ERL_BITS_ARGS_2((Src), (Unit)))) { goto badarg; } \
  857. } while (0)
  858. #define IsPort(Src, Fail) if (is_not_port(Src)) { Fail; }
  859. #define IsPid(Src, Fail) if (is_not_pid(Src)) { Fail; }
  860. #define IsRef(Src, Fail) if (is_not_ref(Src)) { Fail; }
  861. /*
  862. * process_main() is already huge, so we want to avoid inlining
  863. * into it. Especially functions that are seldom used.
  864. */
  865. #ifdef __GNUC__
  866. # define NOINLINE __attribute__((__noinline__))
  867. #else
  868. # define NOINLINE
  869. #endif
  870. /*
  871. * The following functions are called directly by process_main().
  872. * Don't inline them.
  873. */
  874. static BifFunction translate_gc_bif(void* gcf) NOINLINE;
  875. static BeamInstr* handle_error(Process* c_p, BeamInstr* pc,
  876. Eterm* reg, BifFunction bf) NOINLINE;
  877. static BeamInstr* call_error_handler(Process* p, BeamInstr* ip,
  878. Eterm* reg, Eterm func) NOINLINE;
  879. static BeamInstr* fixed_apply(Process* p, Eterm* reg, Uint arity) NOINLINE;
  880. static BeamInstr* apply(Process* p, Eterm module, Eterm function,
  881. Eterm args, Eterm* reg) NOINLINE;
  882. static BeamInstr* call_fun(Process* p, int arity,
  883. Eterm* reg, Eterm args) NOINLINE;
  884. static BeamInstr* apply_fun(Process* p, Eterm fun,
  885. Eterm args, Eterm* reg) NOINLINE;
  886. static Eterm new_fun(Process* p, Eterm* reg,
  887. ErlFunEntry* fe, int num_free) NOINLINE;
  888. /*
  889. * Functions not directly called by process_main(). OK to inline.
  890. */
  891. static BeamInstr* next_catch(Process* c_p, Eterm *reg);
  892. static void terminate_proc(Process* c_p, Eterm Value);
  893. static Eterm add_stacktrace(Process* c_p, Eterm Value, Eterm exc);
  894. static void save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg,
  895. BifFunction bf, Eterm args);
  896. static struct StackTrace * get_trace_from_exc(Eterm exc);
  897. static Eterm make_arglist(Process* c_p, Eterm* reg, int a);
  898. #if defined(VXWORKS)
  899. static int init_done;
  900. #endif
  901. void
  902. init_emulator(void)
  903. {
  904. #if defined(VXWORKS)
  905. init_done = 0;
  906. #endif
  907. process_main();
  908. }
  909. /*
  910. * On certain platforms, make sure that the main variables really are placed
  911. * in registers.
  912. */
  913. #if defined(__GNUC__) && defined(sparc) && !defined(DEBUG)
  914. # define REG_x0 asm("%l0")
  915. # define REG_xregs asm("%l1")
  916. # define REG_htop asm("%l2")
  917. # define REG_stop asm("%l3")
  918. # define REG_I asm("%l4")
  919. # define REG_fcalls asm("%l5")
  920. # define REG_tmp_arg1 asm("%l6")
  921. # define REG_tmp_arg2 asm("%l7")
  922. #else
  923. # define REG_x0
  924. # define REG_xregs
  925. # define REG_htop
  926. # define REG_stop
  927. # define REG_I
  928. # define REG_fcalls
  929. # define REG_tmp_arg1
  930. # define REG_tmp_arg2
  931. #endif
  932. /*
  933. * process_main() is called twice:
  934. * The first call performs some initialisation, including exporting
  935. * the instructions' C labels to the loader.
  936. * The second call starts execution of BEAM code. This call never returns.
  937. */
  938. void process_main(void)
  939. {
  940. #if !defined(VXWORKS)
  941. static int init_done = 0;
  942. #endif
  943. Process* c_p = NULL;
  944. int reds_used;
  945. #ifdef DEBUG
  946. Eterm pid;
  947. #endif
  948. /*
  949. * X register zero; also called r(0)
  950. */
  951. register Eterm x0 REG_x0 = NIL;
  952. /* Pointer to X registers: x(1)..x(N); reg[0] is used when doing GC,
  953. * in all other cases x0 is used.
  954. */
  955. register Eterm* reg REG_xregs = NULL;
  956. /*
  957. * Top of heap (next free location); grows upwards.
  958. */
  959. register Eterm* HTOP REG_htop = NULL;
  960. #ifdef HYBRID
  961. Eterm *g_htop;
  962. Eterm *g_hend;
  963. #endif
  964. /* Stack pointer. Grows downwards; points
  965. * to last item pushed (normally a saved
  966. * continuation pointer).
  967. */
  968. register Eterm* E REG_stop = NULL;
  969. /*
  970. * Pointer to next threaded instruction.
  971. */
  972. register BeamInstr *I REG_I = NULL;
  973. /* Number of reductions left. This function
  974. * returns to the scheduler when FCALLS reaches zero.
  975. */
  976. register Sint FCALLS REG_fcalls = 0;
  977. /*
  978. * Temporaries used for picking up arguments for instructions.
  979. */
  980. register Eterm tmp_arg1 REG_tmp_arg1 = NIL;
  981. register Eterm tmp_arg2 REG_tmp_arg2 = NIL;
  982. #if HEAP_ON_C_STACK
  983. Eterm tmp_big[2]; /* Temporary buffer for small bignums if HEAP_ON_C_STACK. */
  984. #else
  985. Eterm *tmp_big; /* Temporary buffer for small bignums if !HEAP_ON_C_STACK. */
  986. #endif
  987. /*
  988. * X registers and floating point registers are located in
  989. * scheduler specific data.
  990. */
  991. register FloatDef *freg;
  992. /*
  993. * For keeping the negative old value of 'reds' when call saving is active.
  994. */
  995. int neg_o_reds = 0;
  996. Eterm (*arith_func)(Process* p, Eterm* reg, Uint live);
  997. #ifndef NO_JUMP_TABLE
  998. static void* opcodes[] = { DEFINE_OPCODES };
  999. #ifdef ERTS_OPCODE_COUNTER_SUPPORT
  1000. static void* counting_opcodes[] = { DEFINE_COUNTING_OPCODES };
  1001. #endif
  1002. #else
  1003. int Go;
  1004. #endif
  1005. Uint temp_bits; /* Temporary used by BsSkipBits2 & BsGetInteger2 */
  1006. Eterm pt_arity; /* Used by do_put_tuple */
  1007. ERL_BITS_DECLARE_STATEP; /* Has to be last declaration */
  1008. /*
  1009. * Note: In this function, we attempt to place rarely executed code towards
  1010. * the end of the function, in the hope that the cache hit rate will be better.
  1011. * The initialization code is only run once, so it is at the very end.
  1012. *
  1013. * Note: c_p->arity must be set to reflect the number of useful terms in
  1014. * c_p->arg_reg before calling the scheduler.
  1015. */
  1016. if (!init_done) {
  1017. init_done = 1;
  1018. goto init_emulator;
  1019. }
  1020. c_p = NULL;
  1021. reds_used = 0;
  1022. goto do_schedule1;
  1023. do_schedule:
  1024. reds_used = REDS_IN(c_p) - FCALLS;
  1025. do_schedule1:
  1026. PROCESS_MAIN_CHK_LOCKS(c_p);
  1027. ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
  1028. #if HALFWORD_HEAP
  1029. ASSERT(erts_get_scheduler_data()->num_tmp_heap_used == 0);
  1030. #endif
  1031. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  1032. c_p = schedule(c_p, reds_used);
  1033. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  1034. #ifdef DEBUG
  1035. pid = c_p->id;
  1036. #endif
  1037. ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
  1038. PROCESS_MAIN_CHK_LOCKS(c_p);
  1039. reg = ERTS_PROC_GET_SCHDATA(c_p)->x_reg_array;
  1040. freg = ERTS_PROC_GET_SCHDATA(c_p)->f_reg_array;
  1041. #if !HEAP_ON_C_STACK
  1042. tmp_big = ERTS_PROC_GET_SCHDATA(c_p)->beam_emu_tmp_heap;
  1043. #endif
  1044. ERL_BITS_RELOAD_STATEP(c_p);
  1045. {
  1046. int reds;
  1047. Eterm* argp;
  1048. BeamInstr *next;
  1049. int i;
  1050. argp = c_p->arg_reg;
  1051. for (i = c_p->arity - 1; i > 0; i--) {
  1052. reg[i] = argp[i];
  1053. CHECK_TERM(reg[i]);
  1054. }
  1055. /*
  1056. * We put the original reduction count in the process structure, to reduce
  1057. * the code size (referencing a field in a struct through a pointer stored
  1058. * in a register gives smaller code than referencing a global variable).
  1059. */
  1060. SET_I(c_p->i);
  1061. reds = c_p->fcalls;
  1062. if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)
  1063. && (c_p->trace_flags & F_SENSITIVE) == 0) {
  1064. neg_o_reds = -reds;
  1065. FCALLS = REDS_IN(c_p) = 0;
  1066. } else {
  1067. neg_o_reds = 0;
  1068. FCALLS = REDS_IN(c_p) = reds;
  1069. }
  1070. next = (BeamInstr *) *I;
  1071. r(0) = c_p->arg_reg[0];
  1072. #ifdef HARDDEBUG
  1073. if (c_p->arity > 0) {
  1074. CHECK_TERM(r(0));
  1075. }
  1076. #endif
  1077. SWAPIN;
  1078. ASSERT(VALID_INSTR(next));
  1079. Goto(next);
  1080. }
  1081. #if defined(DEBUG) || defined(NO_JUMP_TABLE)
  1082. emulator_loop:
  1083. #endif
  1084. #ifdef NO_JUMP_TABLE
  1085. switch (Go) {
  1086. #endif
  1087. #include "beam_hot.h"
  1088. #define STORE_ARITH_RESULT(res) StoreBifResult(2, (res));
  1089. #define ARITH_FUNC(name) erts_gc_##name
  1090. {
  1091. Eterm increment_reg_val;
  1092. Eterm increment_val;
  1093. Uint live;
  1094. Eterm result;
  1095. OpCase(i_increment_yIId):
  1096. increment_reg_val = yb(Arg(0));
  1097. goto do_increment;
  1098. OpCase(i_increment_xIId):
  1099. increment_reg_val = xb(Arg(0));
  1100. goto do_increment;
  1101. OpCase(i_increment_rIId):
  1102. increment_reg_val = r(0);
  1103. I--;
  1104. do_increment:
  1105. increment_val = Arg(1);
  1106. if (is_small(increment_reg_val)) {
  1107. Sint i = signed_val(increment_reg_val) + increment_val;
  1108. ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i));
  1109. if (MY_IS_SSMALL(i)) {
  1110. result = make_small(i);
  1111. store_result:
  1112. StoreBifResult(3, result);
  1113. }
  1114. }
  1115. live = Arg(2);
  1116. SWAPOUT;
  1117. reg[0] = r(0);
  1118. reg[live] = increment_reg_val;
  1119. reg[live+1] = make_small(increment_val);
  1120. result = erts_gc_mixed_plus(c_p, reg, live);
  1121. r(0) = reg[0];
  1122. SWAPIN;
  1123. ERTS_HOLE_CHECK(c_p);
  1124. if (is_value(result)) {
  1125. goto store_result;
  1126. }
  1127. ASSERT(c_p->freason != BADMATCH || is_value(c_p->fvalue));
  1128. goto find_func_info;
  1129. }
  1130. OpCase(i_plus_jId):
  1131. {
  1132. Eterm result;
  1133. if (is_both_small(tmp_arg1, tmp_arg2)) {
  1134. Sint i = signed_val(tmp_arg1) + signed_val(tmp_arg2);
  1135. ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i));
  1136. if (MY_IS_SSMALL(i)) {
  1137. result = make_small(i);
  1138. STORE_ARITH_RESULT(result);
  1139. }
  1140. }
  1141. arith_func = ARITH_FUNC(mixed_plus);
  1142. goto do_big_arith2;
  1143. }
  1144. OpCase(i_minus_jId):
  1145. {
  1146. Eterm result;
  1147. if (is_both_small(tmp_arg1, tmp_arg2)) {
  1148. Sint i = signed_val(tmp_arg1) - signed_val(tmp_arg2);
  1149. ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i));
  1150. if (MY_IS_SSMALL(i)) {
  1151. result = make_small(i);
  1152. STORE_ARITH_RESULT(result);
  1153. }
  1154. }
  1155. arith_func = ARITH_FUNC(mixed_minus);
  1156. goto do_big_arith2;
  1157. }
  1158. OpCase(i_is_lt_f):
  1159. if (CMP_GE(tmp_arg1, tmp_arg2)) {
  1160. ClauseFail();
  1161. }
  1162. Next(1);
  1163. OpCase(i_is_ge_f):
  1164. if (CMP_LT(tmp_arg1, tmp_arg2)) {
  1165. ClauseFail();
  1166. }
  1167. Next(1);
  1168. OpCase(i_is_eq_f):
  1169. if (CMP_NE(tmp_arg1, tmp_arg2)) {
  1170. ClauseFail();
  1171. }
  1172. Next(1);
  1173. OpCase(i_is_ne_f):
  1174. if (CMP_EQ(tmp_arg1, tmp_arg2)) {
  1175. ClauseFail();
  1176. }
  1177. Next(1);
  1178. OpCase(i_is_eq_exact_f):
  1179. if (!EQ(tmp_arg1, tmp_arg2)) {
  1180. ClauseFail();
  1181. }
  1182. Next(1);
  1183. {
  1184. Eterm is_eq_exact_lit_val;
  1185. OpCase(i_is_eq_exact_literal_xfc):
  1186. is_eq_exact_lit_val = xb(Arg(0));
  1187. I++;
  1188. goto do_is_eq_exact_literal;
  1189. OpCase(i_is_eq_exact_literal_yfc):
  1190. is_eq_exact_lit_val = yb(Arg(0));
  1191. I++;
  1192. goto do_is_eq_exact_literal;
  1193. OpCase(i_is_eq_exact_literal_rfc):
  1194. is_eq_exact_lit_val = r(0);
  1195. do_is_eq_exact_literal:
  1196. if (!eq(Arg(1), is_eq_exact_lit_val)) {
  1197. ClauseFail();
  1198. }
  1199. Next(2);
  1200. }
  1201. {
  1202. Eterm is_ne_exact_lit_val;
  1203. OpCase(i_is_ne_exact_literal_xfc):
  1204. is_ne_exact_lit_val = xb(Arg(0));
  1205. I++;
  1206. goto do_is_ne_exact_literal;
  1207. OpCase(i_is_ne_exact_literal_yfc):
  1208. is_ne_exact_lit_val = yb(Arg(0));
  1209. I++;
  1210. goto do_is_ne_exact_literal;
  1211. OpCase(i_is_ne_exact_literal_rfc):
  1212. is_ne_exact_lit_val = r(0);
  1213. do_is_ne_exact_literal:
  1214. if (eq(Arg(1), is_ne_exact_lit_val)) {
  1215. ClauseFail();
  1216. }
  1217. Next(2);
  1218. }
  1219. OpCase(i_move_call_only_fcr): {
  1220. r(0) = Arg(1);
  1221. }
  1222. /* FALL THROUGH */
  1223. OpCase(i_call_only_f): {
  1224. SET_I((BeamInstr *) Arg(0));
  1225. Dispatch();
  1226. }
  1227. OpCase(i_move_call_last_fPcr): {
  1228. r(0) = Arg(2);
  1229. }
  1230. /* FALL THROUGH */
  1231. OpCase(i_call_last_fP): {
  1232. RESTORE_CP(E);
  1233. E = ADD_BYTE_OFFSET(E, Arg(1));
  1234. SET_I((BeamInstr *) Arg(0));
  1235. Dispatch();
  1236. }
  1237. OpCase(i_move_call_crf): {
  1238. r(0) = Arg(0);
  1239. I++;
  1240. }
  1241. /* FALL THROUGH */
  1242. OpCase(i_call_f): {
  1243. SET_CP(c_p, I+2);
  1244. SET_I((BeamInstr *) Arg(0));
  1245. Dispatch();
  1246. }
  1247. OpCase(i_move_call_ext_last_ePcr): {
  1248. r(0) = Arg(2);
  1249. }
  1250. /* FALL THROUGH */
  1251. OpCase(i_call_ext_last_eP):
  1252. RESTORE_CP(E);
  1253. E = ADD_BYTE_OFFSET(E, Arg(1));
  1254. /*
  1255. * Note: The pointer to the export entry is never NULL; if the module
  1256. * is not loaded, it points to code which will invoke the error handler
  1257. * (see lb_call_error_handler below).
  1258. */
  1259. Dispatchx();
  1260. OpCase(i_move_call_ext_cre): {
  1261. r(0) = Arg(0);
  1262. I++;
  1263. }
  1264. /* FALL THROUGH */
  1265. OpCase(i_call_ext_e):
  1266. SET_CP(c_p, I+2);
  1267. Dispatchx();
  1268. OpCase(i_move_call_ext_only_ecr): {
  1269. r(0) = Arg(1);
  1270. }
  1271. /* FALL THROUGH */
  1272. OpCase(i_call_ext_only_e):
  1273. Dispatchx();
  1274. OpCase(init_y): {
  1275. BeamInstr *next;
  1276. PreFetch(1, next);
  1277. make_blank(yb(Arg(0)));
  1278. NextPF(1, next);
  1279. }
  1280. OpCase(i_trim_I): {
  1281. BeamInstr *next;
  1282. Uint words;
  1283. Uint cp;
  1284. words = Arg(0);
  1285. cp = E[0];
  1286. PreFetch(1, next);
  1287. E += words;
  1288. E[0] = cp;
  1289. NextPF(1, next);
  1290. }
  1291. OpCase(move_x1_c): {
  1292. x(1) = Arg(0);
  1293. Next(1);
  1294. }
  1295. OpCase(move_x2_c): {
  1296. x(2) = Arg(0);
  1297. Next(1);
  1298. }
  1299. OpCase(return): {
  1300. SET_I(c_p->cp);
  1301. /*
  1302. * We must clear the CP to make sure that a stale value do not
  1303. * create a false module dependcy preventing code upgrading.
  1304. * It also means that we can use the CP in stack backtraces.
  1305. */
  1306. c_p->cp = 0;
  1307. CHECK_TERM(r(0));
  1308. HEAP_SPACE_VERIFIED(0);
  1309. Goto(*I);
  1310. }
  1311. /*
  1312. * Send is almost a standard call-BIF with two arguments, except for:
  1313. * 1) It cannot be traced.
  1314. * 2) There is no pointer to the send_2 function stored in
  1315. * the instruction.
  1316. */
  1317. OpCase(send): {
  1318. BeamInstr *next;
  1319. Eterm result;
  1320. PRE_BIF_SWAPOUT(c_p);
  1321. c_p->fcalls = FCALLS - 1;
  1322. result = send_2(c_p, r(0), x(1));
  1323. PreFetch(0, next);
  1324. POST_BIF_GC_SWAPIN(c_p, result, reg, 2);
  1325. FCALLS = c_p->fcalls;
  1326. if (is_value(result)) {
  1327. r(0) = result;
  1328. CHECK_TERM(r(0));
  1329. NextPF(0, next);
  1330. } else if (c_p->freason == TRAP) {
  1331. SET_CP(c_p, I+1);
  1332. SET_I(*((BeamInstr **) (BeamInstr) ((c_p)->def_arg_reg + 3)));
  1333. SWAPIN;
  1334. r(0) = c_p->def_arg_reg[0];
  1335. x(1) = c_p->def_arg_reg[1];
  1336. Dispatch();
  1337. }
  1338. goto find_func_info;
  1339. }
  1340. {
  1341. Eterm element_index;
  1342. Eterm element_tuple;
  1343. OpCase(i_element_xjsd):
  1344. element_tuple = xb(Arg(0));
  1345. I++;
  1346. goto do_element;
  1347. OpCase(i_element_yjsd):
  1348. element_tuple = yb(Arg(0));
  1349. I++;
  1350. goto do_element;
  1351. OpCase(i_element_rjsd):
  1352. element_tuple = r(0);
  1353. /* Fall through */
  1354. do_element:
  1355. GetArg1(1, element_index);
  1356. if (is_small(element_index) && is_tuple(element_tuple)) {
  1357. Eterm* tp = tuple_val(element_tuple);
  1358. if ((signed_val(element_index) >= 1) &&
  1359. (signed_val(element_index) <= arityval(*tp))) {
  1360. Eterm result = tp[signed_val(element_index)];
  1361. StoreBifResult(2, result);
  1362. }
  1363. }
  1364. }
  1365. /* Fall through */
  1366. OpCase(badarg_j):
  1367. badarg:
  1368. c_p->freason = BADARG;
  1369. goto lb_Cl_error;
  1370. {
  1371. Eterm fast_element_tuple;
  1372. OpCase(i_fast_element_rjId):
  1373. fast_element_tuple = r(0);
  1374. do_fast_element:
  1375. if (is_tuple(fast_element_tuple)) {
  1376. Eterm* tp = tuple_val(fast_element_tuple);
  1377. Eterm pos = Arg(1); /* Untagged integer >= 1 */
  1378. if (pos <= arityval(*tp)) {
  1379. Eterm result = tp[pos];
  1380. StoreBifResult(2, result);
  1381. }
  1382. }
  1383. goto badarg;
  1384. OpCase(i_fast_element_xjId):
  1385. fast_element_tuple = xb(Arg(0));
  1386. I++;
  1387. goto do_fast_element;
  1388. OpCase(i_fast_element_yjId):
  1389. fast_element_tuple = yb(Arg(0));
  1390. I++;
  1391. goto do_fast_element;
  1392. }
  1393. OpCase(catch_yf):
  1394. c_p->catches++;
  1395. yb(Arg(0)) = Arg(1);
  1396. Next(2);
  1397. OpCase(catch_end_y): {
  1398. c_p->catches--;
  1399. make_blank(yb(Arg(0)));
  1400. if (is_non_value(r(0))) {
  1401. if (x(1) == am_throw) {
  1402. r(0) = x(2);
  1403. } else {
  1404. if (x(1) == am_error) {
  1405. SWAPOUT;
  1406. x(2) = add_stacktrace(c_p, x(2), x(3));
  1407. SWAPIN;
  1408. }
  1409. /* only x(2) is included in the rootset here */
  1410. if (E - HTOP < 3 || c_p->mbuf) { /* Force GC in case add_stacktrace()
  1411. * created heap fragments */
  1412. SWAPOUT;
  1413. PROCESS_MAIN_CHK_LOCKS(c_p);
  1414. FCALLS -= erts_garbage_collect(c_p, 3, reg+2, 1);
  1415. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  1416. PROCESS_MAIN_CHK_LOCKS(c_p);
  1417. SWAPIN;
  1418. }
  1419. r(0) = TUPLE2(HTOP, am_EXIT, x(2));
  1420. HTOP += 3;
  1421. }
  1422. }
  1423. CHECK_TERM(r(0));
  1424. Next(1);
  1425. }
  1426. OpCase(try_end_y): {
  1427. c_p->catches--;
  1428. make_blank(yb(Arg(0)));
  1429. if (is_non_value(r(0))) {
  1430. r(0) = x(1);
  1431. x(1) = x(2);
  1432. x(2) = x(3);
  1433. }
  1434. Next(1);
  1435. }
  1436. /*
  1437. * Skeleton for receive statement:
  1438. *
  1439. * recv_mark L1 Optional
  1440. * call make_ref/monitor Optional
  1441. * ...
  1442. * recv_set L1 Optional
  1443. * L1: <-------------------+
  1444. * <-----------+ |
  1445. * | |
  1446. * loop_rec L2 ------+---+ |
  1447. * ... | | |
  1448. * remove_message | | |
  1449. * jump L3 | | |
  1450. * ... | | |
  1451. * loop_rec_end L1 --+ | |
  1452. * L2: <---------------+ |
  1453. * wait L1 -----------------+ or wait_timeout
  1454. * timeout
  1455. *
  1456. * L3: Code after receive...
  1457. *
  1458. *
  1459. */
  1460. OpCase(recv_mark_f): {
  1461. /*
  1462. * Save the current position in message buffer and the
  1463. * the label for the loop_rec/2 instruction for the
  1464. * the receive statement.
  1465. */
  1466. c_p->msg.mark = (BeamInstr *) Arg(0);
  1467. c_p->msg.saved_last = c_p->msg.last;
  1468. Next(1);
  1469. }
  1470. OpCase(i_recv_set): {
  1471. /*
  1472. * If the mark is valid (points to the loop_rec/2
  1473. * instruction that follows), we know that the saved
  1474. * position points to the first message that could
  1475. * possibly be matched out.
  1476. *
  1477. * If the mark is invalid, we do nothing, meaning that
  1478. * we will look through all messages in the message queue.
  1479. */
  1480. if (c_p->msg.mark == (BeamInstr *) (I+1)) {
  1481. c_p->msg.save = c_p->msg.saved_last;
  1482. }
  1483. I++;
  1484. /* Fall through to the loop_rec/2 instruction */
  1485. }
  1486. /*
  1487. * Pick up the next message and place it in x(0).
  1488. * If no message, jump to a wait or wait_timeout instruction.
  1489. */
  1490. OpCase(i_loop_rec_fr):
  1491. {
  1492. BeamInstr *next;
  1493. ErlMessage* msgp;
  1494. loop_rec__:
  1495. PROCESS_MAIN_CHK_LOCKS(c_p);
  1496. msgp = PEEK_MESSAGE(c_p);
  1497. if (!msgp) {
  1498. #ifdef ERTS_SMP
  1499. erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1500. /* Make sure messages wont pass exit signals... */
  1501. if (ERTS_PROC_PENDING_EXIT(c_p)) {
  1502. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1503. SWAPOUT;
  1504. goto do_schedule; /* Will be rescheduled for exit */
  1505. }
  1506. ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
  1507. msgp = PEEK_MESSAGE(c_p);
  1508. if (msgp)
  1509. erts_smp_proc_unlock(c_p, ERTS_P

Large files files are truncated, but you can click here to view the full file