PageRenderTime 71ms CodeModel.GetById 31ms RepoModel.GetById 0ms app.codeStats 1ms

/erts/emulator/beam/beam_emu.c

http://github.com/mfoemmel/erlang-otp
C | 6118 lines | 4531 code | 655 blank | 932 comment | 860 complexity | 73fedbd6b1989946491589a2370cb943 MD5 | raw file
Possible License(s): LGPL-2.1, MPL-2.0-no-copyleft-exception

Large files files are truncated, but you can click here to view the full file

  1. /*
  2. * %CopyrightBegin%
  3. *
  4. * Copyright Ericsson AB 1996-2009. All Rights Reserved.
  5. *
  6. * The contents of this file are subject to the Erlang Public License,
  7. * Version 1.1, (the "License"); you may not use this file except in
  8. * compliance with the License. You should have received a copy of the
  9. * Erlang Public License along with this software. If not, it can be
  10. * retrieved online at http://www.erlang.org/.
  11. *
  12. * Software distributed under the License is distributed on an "AS IS"
  13. * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
  14. * the License for the specific language governing rights and limitations
  15. * under the License.
  16. *
  17. * %CopyrightEnd%
  18. */
  19. #ifdef HAVE_CONFIG_H
  20. # include "config.h"
  21. #endif
  22. #include <stddef.h> /* offsetof() */
  23. #include "sys.h"
  24. #include "erl_vm.h"
  25. #include "global.h"
  26. #include "erl_process.h"
  27. #include "erl_nmgc.h"
  28. #include "error.h"
  29. #include "bif.h"
  30. #include "big.h"
  31. #include "beam_load.h"
  32. #include "erl_binary.h"
  33. #include "erl_bits.h"
  34. #include "dist.h"
  35. #include "beam_bp.h"
  36. #include "beam_catches.h"
  37. #ifdef HIPE
  38. #include "hipe_mode_switch.h"
  39. #include "hipe_bif1.h"
  40. #endif
  41. /* #define HARDDEBUG 1 */
  42. #if defined(NO_JUMP_TABLE)
  43. # define OpCase(OpCode) case op_##OpCode: lb_##OpCode
  44. # define CountCase(OpCode) case op_count_##OpCode
  45. # define OpCode(OpCode) ((Uint*)op_##OpCode)
  46. # define Goto(Rel) {Go = (int)(Rel); goto emulator_loop;}
  47. # define LabelAddr(Addr) &&##Addr
  48. #else
  49. # define OpCase(OpCode) lb_##OpCode
  50. # define CountCase(OpCode) lb_count_##OpCode
  51. # define Goto(Rel) goto *(Rel)
  52. # define LabelAddr(Label) &&Label
  53. # define OpCode(OpCode) (&&lb_##OpCode)
  54. #endif
  55. #ifdef ERTS_ENABLE_LOCK_CHECK
  56. # ifdef ERTS_SMP
  57. # define PROCESS_MAIN_CHK_LOCKS(P) \
  58. do { \
  59. if ((P)) { \
  60. erts_pix_lock_t *pix_lock__ = ERTS_PIX2PIXLOCK(internal_pid_index((P)->id));\
  61. erts_proc_lc_chk_only_proc_main((P)); \
  62. erts_pix_lock(pix_lock__); \
  63. ASSERT(0 < (P)->lock.refc && (P)->lock.refc < erts_no_schedulers*5);\
  64. erts_pix_unlock(pix_lock__); \
  65. } \
  66. else \
  67. erts_lc_check_exact(NULL, 0); \
  68. ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING); \
  69. } while (0)
  70. # define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
  71. if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN)
  72. # define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \
  73. if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN)
  74. # else
  75. # define ERTS_SMP_REQ_PROC_MAIN_LOCK(P)
  76. # define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P)
  77. # define PROCESS_MAIN_CHK_LOCKS(P) erts_lc_check_exact(NULL, 0)
  78. # endif
  79. #else
  80. # define PROCESS_MAIN_CHK_LOCKS(P)
  81. # define ERTS_SMP_REQ_PROC_MAIN_LOCK(P)
  82. # define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P)
  83. #endif
  84. /*
  85. * Define macros for deep checking of terms.
  86. */
  87. #if defined(HARDDEBUG)
  88. # define CHECK_TERM(T) size_object(T)
  89. # define CHECK_ARGS(PC) \
  90. do { \
  91. int i_; \
  92. int Arity_ = PC[-1]; \
  93. if (Arity_ > 0) { \
  94. CHECK_TERM(r(0)); \
  95. } \
  96. for (i_ = 1; i_ < Arity_; i_++) { \
  97. CHECK_TERM(x(i_)); \
  98. } \
  99. } while (0)
  100. #else
  101. # define CHECK_TERM(T) ASSERT(!is_CP(T))
  102. # define CHECK_ARGS(T)
  103. #endif
  104. #ifndef MAX
  105. #define MAX(x, y) (((x) > (y)) ? (x) : (y))
  106. #endif
  107. #define GET_BIF_ADDRESS(p) ((BifFunction) (((Export *) p)->code[4]))
  108. /*
  109. * We reuse some of fields in the save area in the process structure.
  110. * This is safe to do, since this space is only activly used when
  111. * the process is switched out.
  112. */
  113. #define REDS_IN(p) ((p)->def_arg_reg[5])
  114. /*
  115. * Add a byte offset to a pointer to Eterm. This is useful when the
  116. * the loader has precalculated a byte offset.
  117. */
  118. #define ADD_BYTE_OFFSET(ptr, offset) \
  119. ((Eterm *) (((unsigned char *)ptr) + (offset)))
  120. /* We don't check the range if an ordinary switch is used */
  121. #ifdef NO_JUMP_TABLE
  122. #define VALID_INSTR(IP) (0 <= (int)(IP) && ((int)(IP) < (NUMBER_OF_OPCODES*2+10)))
  123. #else
  124. #define VALID_INSTR(IP) \
  125. ((Sint)LabelAddr(emulator_loop) <= (Sint)(IP) && \
  126. (Sint)(IP) < (Sint)LabelAddr(end_emulator_loop))
  127. #endif /* NO_JUMP_TABLE */
  128. #define SET_CP(p, ip) \
  129. ASSERT(VALID_INSTR(*(ip))); \
  130. (p)->cp = (ip)
  131. #define SET_I(ip) \
  132. ASSERT(VALID_INSTR(* (Eterm *)(ip))); \
  133. I = (ip)
  134. #define FetchArgs(S1, S2) tmp_arg1 = (S1); tmp_arg2 = (S2)
  135. /*
  136. * Store a result into a register given a destination descriptor.
  137. */
  138. #define StoreResult(Result, DestDesc) \
  139. do { \
  140. Eterm stb_reg; \
  141. stb_reg = (DestDesc); \
  142. CHECK_TERM(Result); \
  143. switch (beam_reg_tag(stb_reg)) { \
  144. case R_REG_DEF: \
  145. r(0) = (Result); break; \
  146. case X_REG_DEF: \
  147. xb(x_reg_offset(stb_reg)) = (Result); break; \
  148. default: \
  149. yb(y_reg_offset(stb_reg)) = (Result); break; \
  150. } \
  151. } while (0)
  152. #define StoreSimpleDest(Src, Dest) Dest = (Src)
  153. /*
  154. * Store a result into a register and execute the next instruction.
  155. * Dst points to the word with a destination descriptor, which MUST
  156. * be just before the next instruction.
  157. */
  158. #define StoreBifResult(Dst, Result) \
  159. do { \
  160. Eterm* stb_next; \
  161. Eterm stb_reg; \
  162. stb_reg = Arg(Dst); \
  163. I += (Dst) + 2; \
  164. stb_next = (Eterm *) *I; \
  165. CHECK_TERM(Result); \
  166. switch (beam_reg_tag(stb_reg)) { \
  167. case R_REG_DEF: \
  168. r(0) = (Result); Goto(stb_next); \
  169. case X_REG_DEF: \
  170. xb(x_reg_offset(stb_reg)) = (Result); Goto(stb_next); \
  171. default: \
  172. yb(y_reg_offset(stb_reg)) = (Result); Goto(stb_next); \
  173. } \
  174. } while (0)
  175. #define ClauseFail() goto lb_jump_f
  176. #define SAVE_CP(X) \
  177. do { \
  178. *(X) = make_cp(c_p->cp); \
  179. c_p->cp = 0; \
  180. } while(0)
  181. #define RESTORE_CP(X) SET_CP(c_p, cp_val(*(X)))
  182. #define ISCATCHEND(instr) ((Eterm *) *(instr) == OpCode(catch_end_y))
  183. /*
  184. * Special Beam instructions.
  185. */
  186. Eterm beam_apply[2];
  187. Eterm beam_exit[1];
  188. Eterm beam_continue_exit[1];
  189. Eterm* em_call_error_handler;
  190. Eterm* em_apply_bif;
  191. Eterm* em_call_traced_function;
  192. /* NOTE These should be the only variables containing trace instructions.
  193. ** Sometimes tests are form the instruction value, and sometimes
  194. ** for the refering variable (one of these), and rouge references
  195. ** will most likely cause chaos.
  196. */
  197. Eterm beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
  198. Eterm beam_return_trace[1]; /* OpCode(i_return_trace) */
  199. Eterm beam_exception_trace[1]; /* UGLY also OpCode(i_return_trace) */
  200. /*
  201. * All Beam instructions in numerical order.
  202. */
  203. #ifndef NO_JUMP_TABLE
  204. void** beam_ops;
  205. #endif
  206. #ifndef ERTS_SMP /* Not supported with smp emulator */
  207. extern int count_instructions;
  208. #endif
  209. #if defined(HYBRID)
  210. #define SWAPIN \
  211. g_htop = global_htop; \
  212. g_hend = global_hend; \
  213. HTOP = HEAP_TOP(c_p); \
  214. E = c_p->stop
  215. #define SWAPOUT \
  216. global_htop = g_htop; \
  217. global_hend = g_hend; \
  218. HEAP_TOP(c_p) = HTOP; \
  219. c_p->stop = E
  220. #else
  221. #define SWAPIN \
  222. HTOP = HEAP_TOP(c_p); \
  223. E = c_p->stop
  224. #define SWAPOUT \
  225. HEAP_TOP(c_p) = HTOP; \
  226. c_p->stop = E
  227. /*
  228. * Use LIGHT_SWAPOUT when the called function
  229. * will call HeapOnlyAlloc() (and never HAlloc()).
  230. */
  231. #ifdef DEBUG
  232. # /* The stack pointer is used in an assertion. */
  233. # define LIGHT_SWAPOUT SWAPOUT
  234. #else
  235. # define LIGHT_SWAPOUT HEAP_TOP(c_p) = HTOP
  236. #endif
  237. /*
  238. * Use LIGHT_SWAPIN when we know that c_p->stop cannot
  239. * have been updated (i.e. if there cannot have been
  240. * a garbage-collection).
  241. */
  242. #define LIGHT_SWAPIN HTOP = HEAP_TOP(c_p)
  243. #endif
  244. #define PRE_BIF_SWAPOUT(P) \
  245. HEAP_TOP((P)) = HTOP; \
  246. (P)->stop = E; \
  247. PROCESS_MAIN_CHK_LOCKS((P)); \
  248. ERTS_SMP_UNREQ_PROC_MAIN_LOCK((P))
  249. #if defined(HYBRID)
  250. # define POST_BIF_GC_SWAPIN_0(_p, _res) \
  251. if ((_p)->mbuf) { \
  252. _res = erts_gc_after_bif_call((_p), (_res), NULL, 0); \
  253. } \
  254. SWAPIN
  255. # define POST_BIF_GC_SWAPIN(_p, _res, _regs, _arity) \
  256. if ((_p)->mbuf) { \
  257. _regs[0] = r(0); \
  258. _res = erts_gc_after_bif_call((_p), (_res), _regs, (_arity)); \
  259. r(0) = _regs[0]; \
  260. } \
  261. SWAPIN
  262. #else
  263. # define POST_BIF_GC_SWAPIN_0(_p, _res) \
  264. ERTS_SMP_REQ_PROC_MAIN_LOCK((_p)); \
  265. PROCESS_MAIN_CHK_LOCKS((_p)); \
  266. if ((_p)->mbuf) { \
  267. _res = erts_gc_after_bif_call((_p), (_res), NULL, 0); \
  268. E = (_p)->stop; \
  269. } \
  270. HTOP = HEAP_TOP((_p))
  271. # define POST_BIF_GC_SWAPIN(_p, _res, _regs, _arity) \
  272. ERTS_SMP_REQ_PROC_MAIN_LOCK((_p)); \
  273. PROCESS_MAIN_CHK_LOCKS((_p)); \
  274. if ((_p)->mbuf) { \
  275. _regs[0] = r(0); \
  276. _res = erts_gc_after_bif_call((_p), (_res), _regs, (_arity)); \
  277. r(0) = _regs[0]; \
  278. E = (_p)->stop; \
  279. } \
  280. HTOP = HEAP_TOP((_p))
  281. #endif
  282. #define db(N) (N)
  283. #define tb(N) (N)
  284. #define xb(N) (*(Eterm *) (((unsigned char *)reg) + (N)))
  285. #define yb(N) (*(Eterm *) (((unsigned char *)E) + (N)))
  286. #define fb(N) (*(double *) (((unsigned char *)&(freg[0].fd)) + (N)))
  287. #define x(N) reg[N]
  288. #define y(N) E[N]
  289. #define r(N) x##N
  290. /*
  291. * Makes sure that there are StackNeed + HeapNeed + 1 words available
  292. * on the combined heap/stack segment, then allocates StackNeed + 1
  293. * words on the stack and saves CP.
  294. *
  295. * M is number of live registers to preserve during garbage collection
  296. */
  297. #define AH(StackNeed, HeapNeed, M) \
  298. do { \
  299. int needed; \
  300. needed = (StackNeed) + 1; \
  301. if (E - HTOP < (needed + (HeapNeed))) { \
  302. SWAPOUT; \
  303. reg[0] = r(0); \
  304. PROCESS_MAIN_CHK_LOCKS(c_p); \
  305. FCALLS -= erts_garbage_collect(c_p, needed + (HeapNeed), reg, (M)); \
  306. PROCESS_MAIN_CHK_LOCKS(c_p); \
  307. r(0) = reg[0]; \
  308. SWAPIN; \
  309. } \
  310. E -= needed; \
  311. SAVE_CP(E); \
  312. } while (0)
  313. #define Allocate(Ns, Live) AH(Ns, 0, Live)
  314. #define AllocateZero(Ns, Live) \
  315. do { Eterm* ptr; \
  316. int i = (Ns); \
  317. AH(i, 0, Live); \
  318. for (ptr = E + i; ptr > E; ptr--) { \
  319. make_blank(*ptr); \
  320. } \
  321. } while (0)
  322. #define AllocateHeap(Ns, Nh, Live) AH(Ns, Nh, Live)
  323. #define AllocateHeapZero(Ns, Nh, Live) \
  324. do { Eterm* ptr; \
  325. int i = (Ns); \
  326. AH(i, Nh, Live); \
  327. for (ptr = E + i; ptr > E; ptr--) { \
  328. make_blank(*ptr); \
  329. } \
  330. } while (0)
  331. #define AllocateInit(Ns, Live, Y) \
  332. do { AH(Ns, 0, Live); make_blank(Y); } while (0)
  333. /*
  334. * Like the AH macro, but allocates no additional heap space.
  335. */
  336. #define A(StackNeed, M) AH(StackNeed, 0, M)
  337. #define D(N) \
  338. RESTORE_CP(E); \
  339. E += (N) + 1;
  340. /*
  341. * Check if Nh words of heap are available; if not, do a garbage collection.
  342. * Live is number of active argument registers to be preserved.
  343. */
  344. #define TestHeap(Nh, Live) \
  345. do { \
  346. unsigned need = (Nh); \
  347. if (E - HTOP < need) { \
  348. SWAPOUT; \
  349. reg[0] = r(0); \
  350. PROCESS_MAIN_CHK_LOCKS(c_p); \
  351. FCALLS -= erts_garbage_collect(c_p, need, reg, (Live)); \
  352. PROCESS_MAIN_CHK_LOCKS(c_p); \
  353. r(0) = reg[0]; \
  354. SWAPIN; \
  355. } \
  356. } while (0)
  357. /*
  358. * Check if Nh words of heap are available; if not, do a garbage collection.
  359. * Live is number of active argument registers to be preserved.
  360. * Takes special care to preserve Extra if a garbage collection occurs.
  361. */
  362. #define TestHeapPreserve(Nh, Live, Extra) \
  363. do { \
  364. unsigned need = (Nh); \
  365. if (E - HTOP < need) { \
  366. SWAPOUT; \
  367. reg[0] = r(0); \
  368. reg[Live] = Extra; \
  369. PROCESS_MAIN_CHK_LOCKS(c_p); \
  370. FCALLS -= erts_garbage_collect(c_p, need, reg, (Live)+1); \
  371. PROCESS_MAIN_CHK_LOCKS(c_p); \
  372. if (Live > 0) { \
  373. r(0) = reg[0]; \
  374. } \
  375. Extra = reg[Live]; \
  376. SWAPIN; \
  377. } \
  378. } while (0)
  379. #ifdef HYBRID
  380. #ifdef INCREMENTAL
  381. #define TestGlobalHeap(Nh, Live, hp) \
  382. do { \
  383. unsigned need = (Nh); \
  384. ASSERT(global_heap <= g_htop && g_htop <= global_hend); \
  385. SWAPOUT; \
  386. reg[0] = r(0); \
  387. FCALLS -= need; \
  388. (hp) = IncAlloc(c_p,need,reg,(Live)); \
  389. r(0) = reg[0]; \
  390. SWAPIN; \
  391. } while (0)
  392. #else
  393. #define TestGlobalHeap(Nh, Live, hp) \
  394. do { \
  395. unsigned need = (Nh); \
  396. ASSERT(global_heap <= g_htop && g_htop <= global_hend); \
  397. if (g_hend - g_htop < need) { \
  398. SWAPOUT; \
  399. reg[0] = r(0); \
  400. FCALLS -= erts_global_garbage_collect(c_p, need, reg, (Live)); \
  401. r(0) = reg[0]; \
  402. SWAPIN; \
  403. } \
  404. (hp) = global_htop; \
  405. } while (0)
  406. #endif
  407. #endif /* HYBRID */
  408. #define Init(N) make_blank(yb(N))
  409. #define Init2(Y1, Y2) do { make_blank(Y1); make_blank(Y2); } while (0)
  410. #define Init3(Y1, Y2, Y3) \
  411. do { make_blank(Y1); make_blank(Y2); make_blank(Y3); } while (0)
  412. #define MakeFun(FunP, NumFree) \
  413. do { \
  414. SWAPOUT; \
  415. reg[0] = r(0); \
  416. r(0) = new_fun(c_p, reg, (ErlFunEntry *) FunP, NumFree); \
  417. SWAPIN; \
  418. } while (0)
  419. /*
  420. * Check that we haven't used the reductions and jump to function pointed to by
  421. * the I register. If we are out of reductions, do a context switch.
  422. */
  423. #define DispatchMacro() \
  424. do { \
  425. Eterm* dis_next; \
  426. dis_next = (Eterm *) *I; \
  427. CHECK_ARGS(I); \
  428. if (FCALLS > 0 || FCALLS > neg_o_reds) { \
  429. FCALLS--; \
  430. Goto(dis_next); \
  431. } else { \
  432. goto context_switch; \
  433. } \
  434. } while (0)
  435. #define DispatchMacroFun() \
  436. do { \
  437. Eterm* dis_next; \
  438. dis_next = (Eterm *) *I; \
  439. CHECK_ARGS(I); \
  440. if (FCALLS > 0 || FCALLS > neg_o_reds) { \
  441. FCALLS--; \
  442. Goto(dis_next); \
  443. } else { \
  444. goto context_switch_fun; \
  445. } \
  446. } while (0)
  447. #define DispatchMacrox() \
  448. do { \
  449. if (FCALLS > 0) { \
  450. Eterm* dis_next; \
  451. SET_I(((Export *) Arg(0))->address); \
  452. dis_next = (Eterm *) *I; \
  453. FCALLS--; \
  454. CHECK_ARGS(I); \
  455. Goto(dis_next); \
  456. } else if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p) \
  457. && FCALLS > neg_o_reds) { \
  458. goto save_calls1; \
  459. } else { \
  460. SET_I(((Export *) Arg(0))->address); \
  461. CHECK_ARGS(I); \
  462. goto context_switch; \
  463. } \
  464. } while (0)
  465. #ifdef DEBUG
  466. /*
  467. * To simplify breakpoint setting, put the code in one place only and jump to it.
  468. */
  469. # define Dispatch() goto do_dispatch
  470. # define Dispatchx() goto do_dispatchx
  471. # define Dispatchfun() goto do_dispatchfun
  472. #else
  473. /*
  474. * Inline for speed.
  475. */
  476. # define Dispatch() DispatchMacro()
  477. # define Dispatchx() DispatchMacrox()
  478. # define Dispatchfun() DispatchMacroFun()
  479. #endif
  480. #define Self(R) R = c_p->id
  481. #define Node(R) R = erts_this_node->sysname
  482. #define Arg(N) I[(N)+1]
  483. #define Next(N) \
  484. I += (N) + 1; \
  485. ASSERT(VALID_INSTR(*I)); \
  486. Goto(*I)
  487. #define PreFetch(N, Dst) do { Dst = (Eterm *) *(I + N + 1); } while (0)
  488. #define NextPF(N, Dst) \
  489. I += N + 1; \
  490. ASSERT(VALID_INSTR(Dst)); \
  491. Goto(Dst)
  492. #define GetR(pos, tr) \
  493. do { \
  494. tr = Arg(pos); \
  495. switch (beam_reg_tag(tr)) { \
  496. case R_REG_DEF: tr = r(0); break; \
  497. case X_REG_DEF: tr = xb(x_reg_offset(tr)); break; \
  498. case Y_REG_DEF: ASSERT(y_reg_offset(tr) >= 1); tr = yb(y_reg_offset(tr)); break; \
  499. } \
  500. CHECK_TERM(tr); \
  501. } while (0)
  502. #define GetArg1(N, Dst) GetR((N), Dst)
  503. #define GetArg2(N, Dst1, Dst2) \
  504. do { \
  505. GetR(N, Dst1); \
  506. GetR((N)+1, Dst2); \
  507. } while (0)
  508. #define PutList(H, T, Dst, Store) \
  509. do { \
  510. HTOP[0] = (H); HTOP[1] = (T); \
  511. Store(make_list(HTOP), Dst); \
  512. HTOP += 2; \
  513. } while (0)
  514. #define Move(Src, Dst, Store) \
  515. do { \
  516. Eterm term = (Src); \
  517. Store(term, Dst); \
  518. } while (0)
  519. #define Move2(src1, dst1, src2, dst2) dst1 = (src1); dst2 = (src2)
  520. #define MoveGenDest(src, dstp) \
  521. if ((dstp) == NULL) { r(0) = (src); } else { *(dstp) = src; }
  522. #define MoveReturn(Src, Dest) \
  523. (Dest) = (Src); \
  524. I = c_p->cp; \
  525. ASSERT(VALID_INSTR(*c_p->cp)); \
  526. c_p->cp = 0; \
  527. CHECK_TERM(r(0)); \
  528. Goto(*I)
  529. #define DeallocateReturn(Deallocate) \
  530. do { \
  531. int words_to_pop = (Deallocate); \
  532. SET_I(cp_val(*E)); \
  533. E = ADD_BYTE_OFFSET(E, words_to_pop); \
  534. CHECK_TERM(r(0)); \
  535. Goto(*I); \
  536. } while (0)
  537. #define MoveDeallocateReturn(Src, Dest, Deallocate) \
  538. (Dest) = (Src); \
  539. DeallocateReturn(Deallocate)
  540. #define MoveCall(Src, Dest, CallDest, Size) \
  541. (Dest) = (Src); \
  542. SET_CP(c_p, I+Size+1); \
  543. SET_I((Eterm *) CallDest); \
  544. Dispatch();
  545. #define MoveCallLast(Src, Dest, CallDest, Deallocate) \
  546. (Dest) = (Src); \
  547. RESTORE_CP(E); \
  548. E = ADD_BYTE_OFFSET(E, (Deallocate)); \
  549. SET_I((Eterm *) CallDest); \
  550. Dispatch();
  551. #define MoveCallOnly(Src, Dest, CallDest) \
  552. (Dest) = (Src); \
  553. SET_I((Eterm *) CallDest); \
  554. Dispatch();
  555. #define GetList(Src, H, T) do { \
  556. Eterm* tmp_ptr = list_val(Src); \
  557. H = CAR(tmp_ptr); \
  558. T = CDR(tmp_ptr); } while (0)
  559. #define GetTupleElement(Src, Element, Dest) \
  560. do { \
  561. tmp_arg1 = (Eterm) (((unsigned char *) tuple_val(Src)) + (Element)); \
  562. (Dest) = (*(Eterm *)tmp_arg1); \
  563. } while (0)
  564. #define ExtractNextElement(Dest) \
  565. tmp_arg1 += sizeof(Eterm); \
  566. (Dest) = (* (Eterm *) (((unsigned char *) tmp_arg1)))
  567. #define ExtractNextElement2(Dest) \
  568. do { \
  569. Eterm* ene_dstp = &(Dest); \
  570. ene_dstp[0] = ((Eterm *) tmp_arg1)[1]; \
  571. ene_dstp[1] = ((Eterm *) tmp_arg1)[2]; \
  572. tmp_arg1 += sizeof(Eterm) + sizeof(Eterm); \
  573. } while (0)
  574. #define ExtractNextElement3(Dest) \
  575. do { \
  576. Eterm* ene_dstp = &(Dest); \
  577. ene_dstp[0] = ((Eterm *) tmp_arg1)[1]; \
  578. ene_dstp[1] = ((Eterm *) tmp_arg1)[2]; \
  579. ene_dstp[2] = ((Eterm *) tmp_arg1)[3]; \
  580. tmp_arg1 += 3*sizeof(Eterm); \
  581. } while (0)
  582. #define ExtractNextElement4(Dest) \
  583. do { \
  584. Eterm* ene_dstp = &(Dest); \
  585. ene_dstp[0] = ((Eterm *) tmp_arg1)[1]; \
  586. ene_dstp[1] = ((Eterm *) tmp_arg1)[2]; \
  587. ene_dstp[2] = ((Eterm *) tmp_arg1)[3]; \
  588. ene_dstp[3] = ((Eterm *) tmp_arg1)[4]; \
  589. tmp_arg1 += 4*sizeof(Eterm); \
  590. } while (0)
  591. #define ExtractElement(Element, Dest) \
  592. do { \
  593. tmp_arg1 += (Element); \
  594. (Dest) = (* (Eterm *) tmp_arg1); \
  595. } while (0)
  596. #define PutTuple(Arity, Src, Dest) \
  597. ASSERT(is_arity_value(Arity)); \
  598. Dest = make_tuple(HTOP); \
  599. HTOP[0] = (Arity); \
  600. HTOP[1] = (Src); \
  601. HTOP += 2
  602. #define Put(Word) *HTOP++ = (Word)
  603. #define EqualImmed(X, Y, Action) if (X != Y) { Action; }
  604. #define IsFloat(Src, Fail) if (is_not_float(Src)) { Fail; }
  605. #define IsInteger(Src, Fail) if (is_not_integer(Src)) { Fail; }
  606. #define IsNumber(X, Fail) if (is_not_integer(X) && is_not_float(X)) { Fail; }
  607. #define IsAtom(Src, Fail) if (is_not_atom(Src)) { Fail; }
  608. #define IsIntegerAllocate(Src, Need, Alive, Fail) \
  609. if (is_not_integer(Src)) { Fail; } \
  610. A(Need, Alive)
  611. #define IsNil(Src, Fail) if (is_not_nil(Src)) { Fail; }
  612. #define IsList(Src, Fail) if (is_not_list(Src) && is_not_nil(Src)) { Fail; }
  613. #define IsNonemptyList(Src, Fail) if (is_not_list(Src)) { Fail; }
  614. #define IsNonemptyListAllocate(Src, Need, Alive, Fail) \
  615. if (is_not_list(Src)) { Fail; } \
  616. A(Need, Alive)
  617. #define IsNonemptyListTestHeap(Src, Need, Alive, Fail) \
  618. if (is_not_list(Src)) { Fail; } \
  619. TestHeap(Need, Alive)
  620. #define IsTuple(X, Action) if (is_not_tuple(X)) Action
  621. #define IsArity(Pointer, Arity, Fail) \
  622. if (*(Eterm *)(tmp_arg1 = (Eterm)tuple_val(Pointer)) != (Arity)) { Fail; }
  623. #define IsFunction(X, Action) \
  624. do { \
  625. if ( !(is_any_fun(X)) ) { \
  626. Action; \
  627. } \
  628. } while (0)
  629. #define IsFunction2(F, A, Action) \
  630. do { \
  631. if (is_function_2(c_p, F, A) != am_true ) {\
  632. Action; \
  633. } \
  634. } while (0)
  635. #define IsTupleOfArity(Src, Arity, Fail) \
  636. do { \
  637. if (is_not_tuple(Src) || *(Eterm *)(tmp_arg1 = (Eterm) tuple_val(Src)) != Arity) { \
  638. Fail; \
  639. } \
  640. } while (0)
  641. #define IsBoolean(X, Fail) if ((X) != am_true && (X) != am_false) { Fail; }
  642. #define IsBinary(Src, Fail) \
  643. if (is_not_binary(Src) || binary_bitsize(Src) != 0) { Fail; }
  644. #define IsBitstring(Src, Fail) \
  645. if (is_not_binary(Src)) { Fail; }
  646. #ifdef ARCH_64
  647. #define BsSafeMul(A, B, Fail, Target) \
  648. do { Uint64 _res = (A) * (B); \
  649. if (_res / B != A) { Fail; } \
  650. Target = _res; \
  651. } while (0)
  652. #else
  653. #define BsSafeMul(A, B, Fail, Target) \
  654. do { Uint64 _res = (Uint64)(A) * (Uint64)(B); \
  655. if ((_res >> (8*sizeof(Uint))) != 0) { Fail; } \
  656. Target = _res; \
  657. } while (0)
  658. #endif
  659. #define BsGetFieldSize(Bits, Unit, Fail, Target) \
  660. do { \
  661. Sint _signed_size; Uint _uint_size; \
  662. if (is_small(Bits)) { \
  663. _signed_size = signed_val(Bits); \
  664. if (_signed_size < 0) { Fail; } \
  665. _uint_size = (Uint) _signed_size; \
  666. } else { \
  667. if (!term_to_Uint(Bits, &temp_bits)) { Fail; } \
  668. _uint_size = temp_bits; \
  669. } \
  670. BsSafeMul(_uint_size, Unit, Fail, Target); \
  671. } while (0)
  672. #define BsGetUncheckedFieldSize(Bits, Unit, Fail, Target) \
  673. do { \
  674. Sint _signed_size; Uint _uint_size; \
  675. if (is_small(Bits)) { \
  676. _signed_size = signed_val(Bits); \
  677. if (_signed_size < 0) { Fail; } \
  678. _uint_size = (Uint) _signed_size; \
  679. } else { \
  680. if (!term_to_Uint(Bits, &temp_bits)) { Fail; } \
  681. _uint_size = (Uint) temp_bits; \
  682. } \
  683. Target = _uint_size * Unit; \
  684. } while (0)
  685. #define BsGetFloat2(Ms, Live, Sz, Flags, Dst, Store, Fail) \
  686. do { \
  687. ErlBinMatchBuffer *_mb; \
  688. Eterm _result; Sint _size; \
  689. if (!is_small(Sz) || (_size = unsigned_val(Sz)) > 64) { Fail; } \
  690. _size *= ((Flags) >> 3); \
  691. TestHeap(FLOAT_SIZE_OBJECT, Live); \
  692. _mb = ms_matchbuffer(Ms); \
  693. LIGHT_SWAPOUT; \
  694. _result = erts_bs_get_float_2(c_p, _size, (Flags), _mb); \
  695. LIGHT_SWAPIN; \
  696. if (is_non_value(_result)) { Fail; } \
  697. else { Store(_result, Dst); } \
  698. } while (0)
  699. #define BsGetBinaryImm_2(Ms, Live, Sz, Flags, Dst, Store, Fail) \
  700. do { \
  701. ErlBinMatchBuffer *_mb; \
  702. Eterm _result; \
  703. TestHeap(heap_bin_size(ERL_ONHEAP_BIN_LIMIT), Live); \
  704. _mb = ms_matchbuffer(Ms); \
  705. LIGHT_SWAPOUT; \
  706. _result = erts_bs_get_binary_2(c_p, (Sz), (Flags), _mb); \
  707. LIGHT_SWAPIN; \
  708. if (is_non_value(_result)) { Fail; } \
  709. else { Store(_result, Dst); } \
  710. } while (0)
  711. #define BsGetBinary_2(Ms, Live, Sz, Flags, Dst, Store, Fail) \
  712. do { \
  713. ErlBinMatchBuffer *_mb; \
  714. Eterm _result; Uint _size; \
  715. BsGetFieldSize(Sz, ((Flags) >> 3), Fail, _size); \
  716. TestHeap(ERL_SUB_BIN_SIZE, Live); \
  717. _mb = ms_matchbuffer(Ms); \
  718. LIGHT_SWAPOUT; \
  719. _result = erts_bs_get_binary_2(c_p, _size, (Flags), _mb); \
  720. LIGHT_SWAPIN; \
  721. if (is_non_value(_result)) { Fail; } \
  722. else { Store(_result, Dst); } \
  723. } while (0)
  724. #define BsGetBinaryAll_2(Ms, Live, Unit, Dst, Store, Fail) \
  725. do { \
  726. ErlBinMatchBuffer *_mb; \
  727. Eterm _result; \
  728. TestHeap(ERL_SUB_BIN_SIZE, Live); \
  729. _mb = ms_matchbuffer(Ms); \
  730. if (((_mb->size - _mb->offset) % Unit) == 0) { \
  731. LIGHT_SWAPOUT; \
  732. _result = erts_bs_get_binary_all_2(c_p, _mb); \
  733. LIGHT_SWAPIN; \
  734. ASSERT(is_value(_result)); \
  735. Store(_result, Dst); \
  736. } else { Fail; } \
  737. } while (0)
  738. #define BsSkipBits2(Ms, Bits, Unit, Fail) \
  739. do { \
  740. ErlBinMatchBuffer *_mb; \
  741. size_t new_offset; \
  742. Uint _size; \
  743. _mb = ms_matchbuffer(Ms); \
  744. BsGetFieldSize(Bits, Unit, Fail, _size); \
  745. new_offset = _mb->offset + _size; \
  746. if (new_offset <= _mb->size) { _mb->offset = new_offset; } \
  747. else { Fail; } \
  748. } while (0)
  749. #define BsSkipBitsAll2(Ms, Unit, Fail) \
  750. do { \
  751. ErlBinMatchBuffer *_mb; \
  752. _mb = ms_matchbuffer(Ms); \
  753. if (((_mb->size - _mb->offset) % Unit) == 0) {_mb->offset = _mb->size; } \
  754. else { Fail; } \
  755. } while (0)
  756. #define BsSkipBitsImm2(Ms, Bits, Fail) \
  757. do { \
  758. ErlBinMatchBuffer *_mb; \
  759. size_t new_offset; \
  760. _mb = ms_matchbuffer(Ms); \
  761. new_offset = _mb->offset + (Bits); \
  762. if (new_offset <= _mb->size) { _mb->offset = new_offset; } \
  763. else { Fail; } \
  764. } while (0)
  765. #define NewBsPutIntegerImm(Sz, Flags, Src) \
  766. do { \
  767. if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3((Src), (Sz), (Flags)))) { goto badarg; } \
  768. } while (0)
  769. #define NewBsPutInteger(Sz, Flags, Src) \
  770. do { \
  771. Sint _size; \
  772. BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \
  773. if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3((Src), _size, (Flags)))) \
  774. { goto badarg; } \
  775. } while (0)
  776. #define NewBsPutFloatImm(Sz, Flags, Src) \
  777. do { \
  778. if (!erts_new_bs_put_float(c_p, (Src), (Sz), (Flags))) { goto badarg; } \
  779. } while (0)
  780. #define NewBsPutFloat(Sz, Flags, Src) \
  781. do { \
  782. Sint _size; \
  783. BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \
  784. if (!erts_new_bs_put_float(c_p, (Src), _size, (Flags))) { goto badarg; } \
  785. } while (0)
  786. #define NewBsPutBinary(Sz, Flags, Src) \
  787. do { \
  788. Sint _size; \
  789. BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \
  790. if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2((Src), _size))) { goto badarg; } \
  791. } while (0)
  792. #define NewBsPutBinaryImm(Sz, Src) \
  793. do { \
  794. if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2((Src), (Sz)))) { goto badarg; } \
  795. } while (0)
  796. #define NewBsPutBinaryAll(Src, Unit) \
  797. do { \
  798. if (!erts_new_bs_put_binary_all(ERL_BITS_ARGS_2((Src), (Unit)))) { goto badarg; } \
  799. } while (0)
  800. #define IsPort(Src, Fail) if (is_not_port(Src)) { Fail; }
  801. #define IsPid(Src, Fail) if (is_not_pid(Src)) { Fail; }
  802. #define IsRef(Src, Fail) if (is_not_ref(Src)) { Fail; }
  803. static BifFunction translate_gc_bif(void* gcf);
  804. static Eterm* handle_error(Process* c_p, Eterm* pc, Eterm* reg, BifFunction bf);
  805. static Eterm* next_catch(Process* c_p, Eterm *reg);
  806. static void terminate_proc(Process* c_p, Eterm Value);
  807. static Eterm add_stacktrace(Process* c_p, Eterm Value, Eterm exc);
  808. static void save_stacktrace(Process* c_p, Eterm* pc, Eterm* reg,
  809. BifFunction bf, Eterm args);
  810. static struct StackTrace * get_trace_from_exc(Eterm exc);
  811. static Eterm make_arglist(Process* c_p, Eterm* reg, int a);
  812. static Eterm call_error_handler(Process* p, Eterm* ip, Eterm* reg);
  813. static Eterm call_breakpoint_handler(Process* p, Eterm* fi, Eterm* reg);
  814. static Uint* fixed_apply(Process* p, Eterm* reg, Uint arity);
  815. static Eterm* apply(Process* p, Eterm module, Eterm function,
  816. Eterm args, Eterm* reg);
  817. static int hibernate(Process* c_p, Eterm module, Eterm function,
  818. Eterm args, Eterm* reg);
  819. static Eterm* call_fun(Process* p, int arity, Eterm* reg, Eterm args);
  820. static Eterm* apply_fun(Process* p, Eterm fun, Eterm args, Eterm* reg);
  821. static Eterm new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free);
  822. #if defined(_OSE_) || defined(VXWORKS)
  823. static int init_done;
  824. #endif
  825. void
  826. init_emulator(void)
  827. {
  828. #if defined(_OSE_) || defined(VXWORKS)
  829. init_done = 0;
  830. #endif
  831. process_main();
  832. }
  833. /*
  834. * On certain platforms, make sure that the main variables really are placed
  835. * in registers.
  836. */
  837. #if defined(__GNUC__) && defined(sparc) && !defined(DEBUG)
  838. # define REG_x0 asm("%l0")
  839. # define REG_xregs asm("%l1")
  840. # define REG_htop asm("%l2")
  841. # define REG_stop asm("%l3")
  842. # define REG_I asm("%l4")
  843. # define REG_fcalls asm("%l5")
  844. # define REG_tmp_arg1 asm("%l6")
  845. # define REG_tmp_arg2 asm("%l7")
  846. #else
  847. # define REG_x0
  848. # define REG_xregs
  849. # define REG_htop
  850. # define REG_stop
  851. # define REG_I
  852. # define REG_fcalls
  853. # define REG_tmp_arg1
  854. # define REG_tmp_arg2
  855. #endif
  856. /*
  857. * process_main() is called twice:
  858. * The first call performs some initialisation, including exporting
  859. * the instructions' C labels to the loader.
  860. * The second call starts execution of BEAM code. This call never returns.
  861. */
  862. void process_main(void)
  863. {
  864. #if !defined(_OSE_) && !defined(VXWORKS)
  865. static int init_done = 0;
  866. #endif
  867. Process* c_p = NULL;
  868. int reds_used;
  869. #ifdef DEBUG
  870. Eterm pid;
  871. #endif
  872. /*
  873. * X register zero; also called r(0)
  874. */
  875. register Eterm x0 REG_x0 = NIL;
  876. /* Pointer to X registers: x(1)..x(N); reg[0] is used when doing GC,
  877. * in all other cases x0 is used.
  878. */
  879. register Eterm* reg REG_xregs = NULL;
  880. /*
  881. * Top of heap (next free location); grows upwards.
  882. */
  883. register Eterm* HTOP REG_htop = NULL;
  884. #ifdef HYBRID
  885. Eterm *g_htop;
  886. Eterm *g_hend;
  887. #endif
  888. /* Stack pointer. Grows downwards; points
  889. * to last item pushed (normally a saved
  890. * continuation pointer).
  891. */
  892. register Eterm* E REG_stop = NULL;
  893. /*
  894. * Pointer to next threaded instruction.
  895. */
  896. register Eterm *I REG_I = NULL;
  897. /* Number of reductions left. This function
  898. * returns to the scheduler when FCALLS reaches zero.
  899. */
  900. register Sint FCALLS REG_fcalls = 0;
  901. /*
  902. * Temporaries used for picking up arguments for instructions.
  903. */
  904. register Eterm tmp_arg1 REG_tmp_arg1 = NIL;
  905. register Eterm tmp_arg2 REG_tmp_arg2 = NIL;
  906. Eterm tmp_big[2]; /* Temporary buffer for small bignums. */
  907. #ifndef ERTS_SMP
  908. static Eterm save_reg[ERTS_X_REGS_ALLOCATED];
  909. /* X registers -- not used directly, but
  910. * through 'reg', because using it directly
  911. * needs two instructions on a SPARC,
  912. * while using it through reg needs only
  913. * one.
  914. */
  915. /*
  916. * Floating point registers.
  917. */
  918. static FloatDef freg[MAX_REG];
  919. #else
  920. /* X regisers and floating point registers are located in
  921. * scheduler specific data.
  922. */
  923. register FloatDef *freg;
  924. #endif
  925. /*
  926. * For keeping the negative old value of 'reds' when call saving is active.
  927. */
  928. int neg_o_reds = 0;
  929. Eterm (*arith_func)(Process* p, Eterm* reg, Uint live);
  930. #ifndef NO_JUMP_TABLE
  931. static void* opcodes[] = { DEFINE_OPCODES };
  932. #ifdef ERTS_OPCODE_COUNTER_SUPPORT
  933. static void* counting_opcodes[] = { DEFINE_COUNTING_OPCODES };
  934. #endif
  935. #else
  936. int Go;
  937. #endif
  938. Uint temp_bits; /* Temporary used by BsSkipBits2 & BsGetInteger2 */
  939. ERL_BITS_DECLARE_STATEP; /* Has to be last declaration */
  940. /*
  941. * Note: In this function, we attempt to place rarely executed code towards
  942. * the end of the function, in the hope that the cache hit rate will be better.
  943. * The initialization code is only run once, so it is at the very end.
  944. *
  945. * Note: c_p->arity must be set to reflect the number of useful terms in
  946. * c_p->arg_reg before calling the scheduler.
  947. */
  948. if (!init_done) {
  949. init_done = 1;
  950. goto init_emulator;
  951. }
  952. #ifndef ERTS_SMP
  953. reg = save_reg; /* XXX: probably wastes a register on x86 */
  954. #endif
  955. c_p = NULL;
  956. reds_used = 0;
  957. goto do_schedule1;
  958. do_schedule:
  959. reds_used = REDS_IN(c_p) - FCALLS;
  960. do_schedule1:
  961. PROCESS_MAIN_CHK_LOCKS(c_p);
  962. ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
  963. c_p = schedule(c_p, reds_used);
  964. #ifdef DEBUG
  965. pid = c_p->id;
  966. #endif
  967. ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
  968. PROCESS_MAIN_CHK_LOCKS(c_p);
  969. #ifdef ERTS_SMP
  970. reg = c_p->scheduler_data->save_reg;
  971. freg = c_p->scheduler_data->freg;
  972. #endif
  973. ERL_BITS_RELOAD_STATEP(c_p);
  974. {
  975. int reds;
  976. Eterm* argp;
  977. Eterm* next;
  978. int i;
  979. argp = c_p->arg_reg;
  980. for (i = c_p->arity - 1; i > 0; i--) {
  981. reg[i] = argp[i];
  982. CHECK_TERM(reg[i]);
  983. }
  984. /*
  985. * We put the original reduction count in the process structure, to reduce
  986. * the code size (referencing a field in a struct through a pointer stored
  987. * in a register gives smaller code than referencing a global variable).
  988. */
  989. SET_I(c_p->i);
  990. reds = c_p->fcalls;
  991. if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)
  992. && (c_p->trace_flags & F_SENSITIVE) == 0) {
  993. neg_o_reds = -reds;
  994. FCALLS = REDS_IN(c_p) = 0;
  995. } else {
  996. neg_o_reds = 0;
  997. FCALLS = REDS_IN(c_p) = reds;
  998. }
  999. next = (Eterm *) *I;
  1000. r(0) = c_p->arg_reg[0];
  1001. #ifdef HARDDEBUG
  1002. if (c_p->arity > 0) {
  1003. CHECK_TERM(r(0));
  1004. }
  1005. #endif
  1006. SWAPIN;
  1007. ASSERT(VALID_INSTR(next));
  1008. Goto(next);
  1009. }
  1010. #if defined(DEBUG) || defined(NO_JUMP_TABLE)
  1011. emulator_loop:
  1012. #endif
  1013. #ifdef NO_JUMP_TABLE
  1014. switch (Go) {
  1015. #endif
  1016. #include "beam_hot.h"
  1017. #define STORE_ARITH_RESULT(res) StoreBifResult(2, (res));
  1018. #define ARITH_FUNC(name) erts_gc_##name
  1019. OpCase(i_plus_jId):
  1020. {
  1021. Eterm result;
  1022. if (is_both_small(tmp_arg1, tmp_arg2)) {
  1023. Sint i = signed_val(tmp_arg1) + signed_val(tmp_arg2);
  1024. ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i));
  1025. if (MY_IS_SSMALL(i)) {
  1026. result = make_small(i);
  1027. STORE_ARITH_RESULT(result);
  1028. }
  1029. }
  1030. arith_func = ARITH_FUNC(mixed_plus);
  1031. goto do_big_arith2;
  1032. }
  1033. OpCase(i_minus_jId):
  1034. {
  1035. Eterm result;
  1036. if (is_both_small(tmp_arg1, tmp_arg2)) {
  1037. Sint i = signed_val(tmp_arg1) - signed_val(tmp_arg2);
  1038. ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i));
  1039. if (MY_IS_SSMALL(i)) {
  1040. result = make_small(i);
  1041. STORE_ARITH_RESULT(result);
  1042. }
  1043. }
  1044. arith_func = ARITH_FUNC(mixed_minus);
  1045. goto do_big_arith2;
  1046. }
  1047. OpCase(i_is_lt_f):
  1048. if (CMP_GE(tmp_arg1, tmp_arg2)) {
  1049. ClauseFail();
  1050. }
  1051. Next(1);
  1052. OpCase(i_is_ge_f):
  1053. if (CMP_LT(tmp_arg1, tmp_arg2)) {
  1054. ClauseFail();
  1055. }
  1056. Next(1);
  1057. OpCase(i_is_eq_f):
  1058. if (CMP_NE(tmp_arg1, tmp_arg2)) {
  1059. ClauseFail();
  1060. }
  1061. Next(1);
  1062. OpCase(i_is_ne_f):
  1063. if (CMP_EQ(tmp_arg1, tmp_arg2)) {
  1064. ClauseFail();
  1065. }
  1066. Next(1);
  1067. OpCase(i_is_eq_exact_f):
  1068. if (!EQ(tmp_arg1, tmp_arg2)) {
  1069. ClauseFail();
  1070. }
  1071. Next(1);
  1072. OpCase(i_move_call_only_fcr): {
  1073. r(0) = Arg(1);
  1074. }
  1075. /* FALL THROUGH */
  1076. OpCase(i_call_only_f): {
  1077. SET_I((Eterm *) Arg(0));
  1078. Dispatch();
  1079. }
  1080. OpCase(i_move_call_last_fPcr): {
  1081. r(0) = Arg(2);
  1082. }
  1083. /* FALL THROUGH */
  1084. OpCase(i_call_last_fP): {
  1085. RESTORE_CP(E);
  1086. E = ADD_BYTE_OFFSET(E, Arg(1));
  1087. SET_I((Eterm *) Arg(0));
  1088. Dispatch();
  1089. }
  1090. OpCase(i_move_call_crf): {
  1091. r(0) = Arg(0);
  1092. I++;
  1093. }
  1094. /* FALL THROUGH */
  1095. OpCase(i_call_f): {
  1096. SET_CP(c_p, I+2);
  1097. SET_I((Eterm *) Arg(0));
  1098. Dispatch();
  1099. }
  1100. OpCase(i_move_call_ext_last_ePcr): {
  1101. r(0) = Arg(2);
  1102. }
  1103. /* FALL THROUGH */
  1104. OpCase(i_call_ext_last_eP):
  1105. RESTORE_CP(E);
  1106. E = ADD_BYTE_OFFSET(E, Arg(1));
  1107. /*
  1108. * Note: The pointer to the export entry is never NULL; if the module
  1109. * is not loaded, it points to code which will invoke the error handler
  1110. * (see lb_call_error_handler below).
  1111. */
  1112. Dispatchx();
  1113. OpCase(i_move_call_ext_cre): {
  1114. r(0) = Arg(0);
  1115. I++;
  1116. }
  1117. /* FALL THROUGH */
  1118. OpCase(i_call_ext_e):
  1119. SET_CP(c_p, I+2);
  1120. Dispatchx();
  1121. OpCase(i_move_call_ext_only_ecr): {
  1122. r(0) = Arg(1);
  1123. }
  1124. /* FALL THROUGH */
  1125. OpCase(i_call_ext_only_e):
  1126. Dispatchx();
  1127. OpCase(init_y): {
  1128. Eterm* next;
  1129. PreFetch(1, next);
  1130. make_blank(yb(Arg(0)));
  1131. NextPF(1, next);
  1132. }
  1133. OpCase(i_trim_I): {
  1134. Eterm* next;
  1135. Uint words;
  1136. Uint cp;
  1137. words = Arg(0);
  1138. cp = E[0];
  1139. PreFetch(1, next);
  1140. E += words;
  1141. E[0] = cp;
  1142. NextPF(1, next);
  1143. }
  1144. OpCase(return): {
  1145. SET_I(c_p->cp);
  1146. /*
  1147. * We must clear the CP to make sure that a stale value do not
  1148. * create a false module dependcy preventing code upgrading.
  1149. * It also means that we can use the CP in stack backtraces.
  1150. */
  1151. c_p->cp = 0;
  1152. CHECK_TERM(r(0));
  1153. Goto(*I);
  1154. }
  1155. OpCase(test_heap_1_put_list_Iy): {
  1156. Eterm* next;
  1157. PreFetch(2, next);
  1158. TestHeap(Arg(0), 1);
  1159. PutList(yb(Arg(1)), r(0), r(0), StoreSimpleDest);
  1160. CHECK_TERM(r(0));
  1161. NextPF(2, next);
  1162. }
  1163. OpCase(put_string_IId):
  1164. {
  1165. unsigned char* s;
  1166. int len;
  1167. Eterm result;
  1168. len = Arg(0); /* Length. */
  1169. result = NIL;
  1170. for (s = (unsigned char *) Arg(1); len > 0; s--, len--) {
  1171. PutList(make_small(*s), result, result, StoreSimpleDest);
  1172. }
  1173. StoreBifResult(2, result);
  1174. }
  1175. /*
  1176. * Send is almost a standard call-BIF with two arguments, except for:
  1177. * 1) It cannot be traced.
  1178. * 2) There is no pointer to the send_2 function stored in
  1179. * the instruction.
  1180. */
  1181. OpCase(send): {
  1182. Eterm* next;
  1183. Eterm result;
  1184. PRE_BIF_SWAPOUT(c_p);
  1185. c_p->fcalls = FCALLS - 1;
  1186. result = send_2(c_p, r(0), x(1));
  1187. PreFetch(0, next);
  1188. POST_BIF_GC_SWAPIN(c_p, result, reg, 2);
  1189. FCALLS = c_p->fcalls;
  1190. if (is_value(result)) {
  1191. r(0) = result;
  1192. CHECK_TERM(r(0));
  1193. NextPF(0, next);
  1194. } else if (c_p->freason == TRAP) {
  1195. SET_CP(c_p, I+1);
  1196. SET_I(((Export *)(c_p->def_arg_reg[3]))->address);
  1197. SWAPIN;
  1198. r(0) = c_p->def_arg_reg[0];
  1199. x(1) = c_p->def_arg_reg[1];
  1200. Dispatch();
  1201. }
  1202. goto find_func_info;
  1203. }
  1204. OpCase(i_element_jssd): {
  1205. Eterm index;
  1206. Eterm tuple;
  1207. /*
  1208. * Inlined version of element/2 for speed.
  1209. */
  1210. GetArg2(1, index, tuple);
  1211. if (is_small(index) && is_tuple(tuple)) {
  1212. Eterm* tp = tuple_val(tuple);
  1213. if ((signed_val(index) >= 1) &&
  1214. (signed_val(index) <= arityval(*tp))) {
  1215. Eterm result = tp[signed_val(index)];
  1216. StoreBifResult(3, result);
  1217. }
  1218. }
  1219. }
  1220. /* Fall through */
  1221. OpCase(badarg_j):
  1222. badarg:
  1223. c_p->freason = BADARG;
  1224. goto lb_Cl_error;
  1225. OpCase(i_fast_element_jIsd): {
  1226. Eterm tuple;
  1227. /*
  1228. * Inlined version of element/2 for even more speed.
  1229. * The first argument is an untagged integer >= 1.
  1230. * The second argument is guaranteed to be a register operand.
  1231. */
  1232. GetArg1(2, tuple);
  1233. if (is_tuple(tuple)) {
  1234. Eterm* tp = tuple_val(tuple);
  1235. tmp_arg2 = Arg(1);
  1236. if (tmp_arg2 <= arityval(*tp)) {
  1237. Eterm result = tp[tmp_arg2];
  1238. StoreBifResult(3, result);
  1239. }
  1240. }
  1241. goto badarg;
  1242. }
  1243. OpCase(catch_yf):
  1244. c_p->catches++;
  1245. yb(Arg(0)) = Arg(1);
  1246. Next(2);
  1247. OpCase(catch_end_y): {
  1248. c_p->catches--;
  1249. make_blank(yb(Arg(0)));
  1250. if (is_non_value(r(0))) {
  1251. if (x(1) == am_throw) {
  1252. r(0) = x(2);
  1253. } else {
  1254. if (x(1) == am_error) {
  1255. SWAPOUT;
  1256. x(2) = add_stacktrace(c_p, x(2), x(3));
  1257. SWAPIN;
  1258. }
  1259. /* only x(2) is included in the rootset here */
  1260. if (E - HTOP < 3 || c_p->mbuf) { /* Force GC in case add_stacktrace()
  1261. * created heap fragments */
  1262. SWAPOUT;
  1263. PROCESS_MAIN_CHK_LOCKS(c_p);
  1264. FCALLS -= erts_garbage_collect(c_p, 3, reg+2, 1);
  1265. PROCESS_MAIN_CHK_LOCKS(c_p);
  1266. SWAPIN;
  1267. }
  1268. r(0) = TUPLE2(HTOP, am_EXIT, x(2));
  1269. HTOP += 3;
  1270. }
  1271. }
  1272. CHECK_TERM(r(0));
  1273. Next(1);
  1274. }
  1275. OpCase(try_end_y): {
  1276. c_p->catches--;
  1277. make_blank(yb(Arg(0)));
  1278. if (is_non_value(r(0))) {
  1279. r(0) = x(1);
  1280. x(1) = x(2);
  1281. x(2) = x(3);
  1282. }
  1283. Next(1);
  1284. }
  1285. /*
  1286. * Skeleton for receive statement:
  1287. *
  1288. * L1: <-------------------+
  1289. * <-----------+ |
  1290. * | |
  1291. * loop_rec L2 ------+---+ |
  1292. * ... | | |
  1293. * remove_message | | |
  1294. * jump L3 | | |
  1295. * ... | | |
  1296. * loop_rec_end L1 --+ | |
  1297. * L2: <---------------+ |
  1298. * wait L1 -----------------+ or wait_timeout
  1299. * timeout
  1300. *
  1301. * L3: Code after receive...
  1302. *
  1303. *
  1304. */
  1305. /*
  1306. * Pick up the next message and place it in x(0).
  1307. * If no message, jump to a wait or wait_timeout instruction.
  1308. */
  1309. OpCase(i_loop_rec_fr):
  1310. {
  1311. Eterm* next;
  1312. ErlMessage* msgp;
  1313. loop_rec__:
  1314. PROCESS_MAIN_CHK_LOCKS(c_p);
  1315. msgp = PEEK_MESSAGE(c_p);
  1316. if (!msgp) {
  1317. #ifdef ERTS_SMP
  1318. erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1319. /* Make sure messages wont pass exit signals... */
  1320. if (ERTS_PROC_PENDING_EXIT(c_p)) {
  1321. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1322. SWAPOUT;
  1323. goto do_schedule; /* Will be rescheduled for exit */
  1324. }
  1325. ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
  1326. msgp = PEEK_MESSAGE(c_p);
  1327. if (msgp)
  1328. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1329. else {
  1330. #endif
  1331. SET_I((Eterm *) Arg(0));
  1332. Goto(*I); /* Jump to a wait or wait_timeout instruction */
  1333. #ifdef ERTS_SMP
  1334. }
  1335. #endif
  1336. }
  1337. ErtsMoveMsgAttachmentIntoProc(msgp, c_p, E, HTOP, FCALLS,
  1338. {
  1339. SWAPOUT;
  1340. reg[0] = r(0);
  1341. PROCESS_MAIN_CHK_LOCKS(c_p);
  1342. },
  1343. {
  1344. PROCESS_MAIN_CHK_LOCKS(c_p);
  1345. r(0) = reg[0];
  1346. SWAPIN;
  1347. });
  1348. if (is_non_value(ERL_MESSAGE_TERM(msgp))) {
  1349. /*
  1350. * A corrupt distribution message that we weren't able to decode;
  1351. * remove it...
  1352. */
  1353. ASSERT(!msgp->data.attached);
  1354. UNLINK_MESSAGE(c_p, msgp);
  1355. free_message(msgp);
  1356. goto loop_rec__;
  1357. }
  1358. PreFetch(1, next);
  1359. r(0) = ERL_MESSAGE_TERM(msgp);
  1360. NextPF(1, next);
  1361. }
  1362. /*
  1363. * Remove a (matched) message from the message queue.
  1364. */
  1365. OpCase(remove_message): {
  1366. Eterm* next;
  1367. ErlMessage* msgp;
  1368. PROCESS_MAIN_CHK_LOCKS(c_p);
  1369. PreFetch(0, next);
  1370. msgp = PEEK_MESSAGE(c_p);
  1371. if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) {
  1372. save_calls(c_p, &exp_receive);
  1373. }
  1374. if (ERL_MESSAGE_TOKEN(msgp) == NIL) {
  1375. SEQ_TRACE_TOKEN(c_p) = NIL;
  1376. } else if (ERL_MESSAGE_TOKEN(msgp) != am_undefined) {
  1377. Eterm msg;
  1378. SEQ_TRACE_TOKEN(c_p) = ERL_MESSAGE_TOKEN(msgp);
  1379. ASSERT(is_tuple(SEQ_TRACE_TOKEN(c_p)));
  1380. ASSERT(SEQ_TRACE_TOKEN_ARITY(c_p) == 5);
  1381. ASSERT(is_small(SEQ_TRACE_TOKEN_SERIAL(c_p)));
  1382. ASSERT(is_small(SEQ_TRACE_TOKEN_LASTCNT(c_p)));
  1383. ASSERT(is_small(SEQ_TRACE_TOKEN_FLAGS(c_p)));
  1384. ASSERT(is_pid(SEQ_TRACE_TOKEN_SENDER(c_p)));
  1385. c_p->seq_trace_lastcnt = unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p));
  1386. if (c_p->seq_trace_clock < unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p))) {
  1387. c_p->seq_trace_clock = unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p));
  1388. }
  1389. msg = ERL_MESSAGE_TERM(msgp);
  1390. seq_trace_output(SEQ_TRACE_TOKEN(c_p), msg, SEQ_TRACE_RECEIVE,
  1391. c_p->id, c_p);
  1392. }
  1393. UNLINK_MESSAGE(c_p, msgp);
  1394. JOIN_MESSAGE(c_p);
  1395. CANCEL_TIMER(c_p);
  1396. free_message(msgp);
  1397. PROCESS_MAIN_CHK_LOCKS(c_p);
  1398. NextPF(0, next);
  1399. }
  1400. /*
  1401. * Advance the save pointer to the next message (the current
  1402. * message didn't match), then jump to the loop_rec instruction.
  1403. */
  1404. OpCase(loop_rec_end_f): {
  1405. SET_I((Eterm *) Arg(0));
  1406. SAVE_MESSAGE(c_p);
  1407. goto loop_rec__;
  1408. }
  1409. /*
  1410. * Prepare to wait for a message or a timeout, whichever occurs first.
  1411. *
  1412. * Note: In order to keep the compatibility between 32 and 64 bits
  1413. * emulators, only timeout values that can be represented in 32 bits
  1414. * (unsigned) or less are allowed.
  1415. */
  1416. OpCase(i_wait_timeout_fs): {
  1417. erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1418. /* Fall through */
  1419. }
  1420. OpCase(i_wait_timeout_locked_fs): {
  1421. Eterm timeout_value;
  1422. /*
  1423. * If we have already set the timer, we must NOT set it again. Therefore,
  1424. * we must test the F_INSLPQUEUE flag as well as the F_TIMO flag.
  1425. */
  1426. if (c_p->flags & (F_INSLPQUEUE | F_TIMO)) {
  1427. goto wait2;
  1428. }
  1429. GetArg1(1, timeout_value);
  1430. if (timeout_value != make_small(0)) {
  1431. #if !defined(ARCH_64)
  1432. Uint time_val;
  1433. #endif
  1434. if (is_small(timeout_value) && signed_val(timeout_value) > 0 &&
  1435. #if defined(ARCH_64)
  1436. ((unsigned_val(timeout_value) >> 32) == 0)
  1437. #else
  1438. 1
  1439. #endif
  1440. ) {
  1441. /*
  1442. * The timer routiner will set c_p->i to the value in
  1443. * c_p->def_arg_reg[0]. Note that it is safe to use this
  1444. * location because there are no living x registers in
  1445. * a receive statement.
  1446. */
  1447. c_p->def_arg_reg[0] = (Eterm) (I+3);
  1448. set_timer(c_p, unsigned_val(timeout_value));
  1449. } else if (timeout_value == am_infinity) {
  1450. c_p->flags |= F_TIMO;
  1451. #if !defined(ARCH_64)
  1452. } else if (term_to_Uint(timeout_value, &time_val)) {
  1453. c_p->def_arg_reg[0] = (Eterm) (I+3);
  1454. set_timer(c_p, time_val);
  1455. #endif
  1456. } else { /* Wrong time */
  1457. OpCase(i_wait_error_locked): {
  1458. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1459. /* Fall through */
  1460. }
  1461. OpCase(i_wait_error): {
  1462. c_p->freason = EXC_TIMEOUT_VALUE;
  1463. goto find_func_info;
  1464. }
  1465. }
  1466. /*
  1467. * Prepare to wait indefinitely for a new message to arrive
  1468. * (or the time set above if falling through from above).
  1469. *
  1470. * When a new message arrives, control will be transferred
  1471. * the loop_rec instruction (at label L1). In case of
  1472. * of timeout, control will be transferred to the timeout
  1473. * instruction following the wait_timeout instruction.
  1474. */
  1475. OpCase(wait_locked_f):
  1476. OpCase(wait_f):
  1477. wait2: {
  1478. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  1479. c_p->i = (Eterm *) Arg(0); /* L1 */
  1480. SWAPOUT;
  1481. c_p->arity = 0;
  1482. c_p->status = P_WAITING;
  1483. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1484. c_p->current = NULL;
  1485. goto do_schedule;
  1486. }
  1487. OpCase(wait_unlocked_f): {
  1488. erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1489. goto wait2;
  1490. }
  1491. }
  1492. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1493. Next(2);
  1494. }
  1495. OpCase(i_wait_timeout_fI): {
  1496. erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1497. }
  1498. OpCase(i_wait_timeout_locked_fI):
  1499. {
  1500. /*
  1501. * If we have already set the timer, we must NOT set it again. Therefore,
  1502. * we must test the F_INSLPQUEUE flag as well as the F_TIMO flag.
  1503. */
  1504. if ((c_p->flags & (F_INSLPQUEUE | F_TIMO)) == 0) {
  1505. c_p->def_arg_reg[0] = (Eterm) (I+3);
  1506. set_timer(c_p, Arg(1));
  1507. }
  1508. goto wait2;
  1509. }
  1510. /*
  1511. * A timeout has occurred. Reset the save pointer so that the next
  1512. * receive statement will examine the first message first.
  1513. */
  1514. OpCase(timeout_locked): {
  1515. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1516. }
  1517. OpCase(timeout): {
  1518. Eterm* next;
  1519. PreFetch(0, next);
  1520. if (IS_TRACED_FL(c_p, F_TRACE_RECEIVE)) {
  1521. trace_receive(c_p, am_timeout);
  1522. }
  1523. if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) {
  1524. save_calls(c_p, &exp_timeout);
  1525. }
  1526. c_p->flags &= ~F_TIMO;
  1527. JOIN_MESSAGE(c_p);
  1528. NextPF(0, next);
  1529. }
  1530. OpCase(i_select_val_sfI):
  1531. GetArg1(0, tmp_arg1);
  1532. do_binary_search:
  1533. {
  1534. struct Pairs {
  1535. Eterm val;
  1536. Eterm* addr;
  1537. };
  1538. struct Pairs* low;
  1539. struct Pairs* high;
  1540. struct Pairs* mid;
  1541. int bdiff; /* int not long because the arrays aren't that large */
  1542. low = (struct Pairs *) &Arg(3);
  1543. high = low + Arg(2);
  1544. /* The pointer subtraction (high-low) below must produc…

Large files files are truncated, but you can click here to view the full file