/erts/emulator/beam/beam_emu.c

https://github.com/bsmr-erlang/otp · C · 3286 lines · 2213 code · 381 blank · 692 comment · 494 complexity · 9f0915967c123074b12c48ba79d366f4 MD5 · raw file

Large files are truncated click here to view the full file

  1. /*
  2. * %CopyrightBegin%
  3. *
  4. * Copyright Ericsson AB 1996-2018. All Rights Reserved.
  5. *
  6. * Licensed under the Apache License, Version 2.0 (the "License");
  7. * you may not use this file except in compliance with the License.
  8. * You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. *
  18. * %CopyrightEnd%
  19. */
  20. #ifdef HAVE_CONFIG_H
  21. # include "config.h"
  22. #endif
  23. #include <stddef.h> /* offsetof() */
  24. #include "sys.h"
  25. #include "erl_vm.h"
  26. #include "global.h"
  27. #include "erl_process.h"
  28. #include "error.h"
  29. #include "bif.h"
  30. #include "big.h"
  31. #include "beam_load.h"
  32. #include "erl_binary.h"
  33. #include "erl_map.h"
  34. #include "erl_bits.h"
  35. #include "dist.h"
  36. #include "beam_bp.h"
  37. #include "beam_catches.h"
  38. #include "erl_thr_progress.h"
  39. #include "erl_nfunc_sched.h"
  40. #ifdef HIPE
  41. #include "hipe_mode_switch.h"
  42. #include "hipe_bif1.h"
  43. #endif
  44. #include "dtrace-wrapper.h"
  45. #include "erl_proc_sig_queue.h"
  46. /* #define HARDDEBUG 1 */
  47. #if defined(NO_JUMP_TABLE)
  48. # define OpCase(OpCode) case op_##OpCode
  49. # define CountCase(OpCode) case op_count_##OpCode
  50. # define IsOpCode(InstrWord, OpCode) (BeamCodeAddr(InstrWord) == (BeamInstr)op_##OpCode)
  51. # define Goto(Rel) {Go = BeamCodeAddr(Rel); goto emulator_loop;}
  52. # define GotoPF(Rel) Goto(Rel)
  53. #else
  54. # define OpCase(OpCode) lb_##OpCode
  55. # define CountCase(OpCode) lb_count_##OpCode
  56. # define IsOpCode(InstrWord, OpCode) (BeamCodeAddr(InstrWord) == (BeamInstr)&&lb_##OpCode)
  57. # define Goto(Rel) goto *((void *)BeamCodeAddr(Rel))
  58. # define GotoPF(Rel) goto *((void *)Rel)
  59. # define LabelAddr(Label) &&Label
  60. #endif
  61. #ifdef ERTS_ENABLE_LOCK_CHECK
  62. # define PROCESS_MAIN_CHK_LOCKS(P) \
  63. do { \
  64. if ((P)) \
  65. erts_proc_lc_chk_only_proc_main((P)); \
  66. ERTS_LC_ASSERT(!erts_thr_progress_is_blocking()); \
  67. } while (0)
  68. # define ERTS_REQ_PROC_MAIN_LOCK(P) \
  69. do { \
  70. if ((P)) \
  71. erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN, \
  72. __FILE__, __LINE__); \
  73. } while (0)
  74. # define ERTS_UNREQ_PROC_MAIN_LOCK(P) \
  75. do { \
  76. if ((P)) \
  77. erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN); \
  78. } while (0)
  79. #else
  80. # define PROCESS_MAIN_CHK_LOCKS(P)
  81. # define ERTS_REQ_PROC_MAIN_LOCK(P)
  82. # define ERTS_UNREQ_PROC_MAIN_LOCK(P)
  83. #endif
  84. /*
  85. * Define macros for deep checking of terms.
  86. */
  87. #if defined(HARDDEBUG)
  88. # define CHECK_TERM(T) size_object(T)
  89. # define CHECK_ARGS(PC) \
  90. do { \
  91. int i_; \
  92. int Arity_ = PC[-1]; \
  93. for (i_ = 0; i_ < Arity_; i_++) { \
  94. CHECK_TERM(x(i_)); \
  95. } \
  96. } while (0)
  97. #else
  98. # define CHECK_TERM(T) ASSERT(!is_CP(T))
  99. # define CHECK_ARGS(T)
  100. #endif
  101. #define CHECK_ALIGNED(Dst) ASSERT((((Uint)&Dst) & (sizeof(Uint)-1)) == 0)
  102. #define GET_BIF_MODULE(p) (p->info.mfa.module)
  103. #define GET_BIF_FUNCTION(p) (p->info.mfa.function)
  104. #define GET_BIF_ARITY(p) (p->info.mfa.arity)
  105. #define GET_BIF_ADDRESS(p) ((BifFunction) (p->beam[1]))
  106. #define TermWords(t) (((t) / (sizeof(BeamInstr)/sizeof(Eterm))) + !!((t) % (sizeof(BeamInstr)/sizeof(Eterm))))
  107. /*
  108. * We reuse some of fields in the save area in the process structure.
  109. * This is safe to do, since this space is only actively used when
  110. * the process is switched out.
  111. */
  112. #define REDS_IN(p) ((p)->def_arg_reg[5])
  113. /*
  114. * Add a byte offset to a pointer to Eterm. This is useful when the
  115. * the loader has precalculated a byte offset.
  116. */
  117. #define ADD_BYTE_OFFSET(ptr, offset) \
  118. ((Eterm *) (((unsigned char *)ptr) + (offset)))
  119. /* We don't check the range if an ordinary switch is used */
  120. #ifdef NO_JUMP_TABLE
  121. # define VALID_INSTR(IP) (BeamCodeAddr(IP) < (NUMBER_OF_OPCODES*2+10))
  122. #else
  123. # define VALID_INSTR(IP) \
  124. ((BeamInstr)LabelAddr(emulator_loop) <= BeamCodeAddr(IP) && \
  125. BeamCodeAddr(IP) < (BeamInstr)LabelAddr(end_emulator_loop))
  126. #endif /* NO_JUMP_TABLE */
  127. #define SET_CP(p, ip) \
  128. ASSERT(VALID_INSTR(*(ip))); \
  129. (p)->cp = (ip)
  130. #define SET_I(ip) \
  131. ASSERT(VALID_INSTR(* (Eterm *)(ip))); \
  132. I = (ip)
  133. /*
  134. * Register target (X or Y register).
  135. */
  136. #define REG_TARGET_PTR(Target) (((Target) & 1) ? &yb((Target)-1) : &xb(Target))
  137. /*
  138. * Special Beam instructions.
  139. */
  140. BeamInstr beam_apply[2];
  141. BeamInstr beam_exit[1];
  142. BeamInstr beam_continue_exit[1];
  143. /* NOTE These should be the only variables containing trace instructions.
  144. ** Sometimes tests are form the instruction value, and sometimes
  145. ** for the referring variable (one of these), and rouge references
  146. ** will most likely cause chaos.
  147. */
  148. BeamInstr beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
  149. BeamInstr beam_return_trace[1]; /* OpCode(i_return_trace) */
  150. BeamInstr beam_exception_trace[1]; /* UGLY also OpCode(i_return_trace) */
  151. BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
  152. /*
  153. * All Beam instructions in numerical order.
  154. */
  155. #ifndef NO_JUMP_TABLE
  156. void** beam_ops;
  157. #endif
  158. #define SWAPIN \
  159. HTOP = HEAP_TOP(c_p); \
  160. E = c_p->stop
  161. #define SWAPOUT \
  162. HEAP_TOP(c_p) = HTOP; \
  163. c_p->stop = E
  164. #define HEAVY_SWAPIN \
  165. SWAPIN; \
  166. FCALLS = c_p->fcalls
  167. #define HEAVY_SWAPOUT \
  168. SWAPOUT; \
  169. c_p->fcalls = FCALLS
  170. /*
  171. * Use LIGHT_SWAPOUT when the called function
  172. * will call HeapOnlyAlloc() (and never HAlloc()).
  173. */
  174. #ifdef DEBUG
  175. # /* The stack pointer is used in an assertion. */
  176. # define LIGHT_SWAPOUT SWAPOUT
  177. # define DEBUG_SWAPOUT SWAPOUT
  178. # define DEBUG_SWAPIN SWAPIN
  179. #else
  180. # define LIGHT_SWAPOUT HEAP_TOP(c_p) = HTOP
  181. # define DEBUG_SWAPOUT
  182. # define DEBUG_SWAPIN
  183. #endif
  184. /*
  185. * Use LIGHT_SWAPIN when we know that c_p->stop cannot
  186. * have been updated (i.e. if there cannot have been
  187. * a garbage-collection).
  188. */
  189. #define LIGHT_SWAPIN HTOP = HEAP_TOP(c_p)
  190. #ifdef FORCE_HEAP_FRAGS
  191. # define HEAP_SPACE_VERIFIED(Words) do { \
  192. c_p->space_verified = (Words); \
  193. c_p->space_verified_from = HTOP; \
  194. }while(0)
  195. #else
  196. # define HEAP_SPACE_VERIFIED(Words) ((void)0)
  197. #endif
  198. #define PRE_BIF_SWAPOUT(P) \
  199. HEAP_TOP((P)) = HTOP; \
  200. (P)->stop = E; \
  201. PROCESS_MAIN_CHK_LOCKS((P)); \
  202. ERTS_UNREQ_PROC_MAIN_LOCK((P))
  203. #define db(N) (N)
  204. #define fb(N) ((Sint)(Sint32)(N))
  205. #define jb(N) ((Sint)(Sint32)(N))
  206. #define tb(N) (N)
  207. #define xb(N) (*ADD_BYTE_OFFSET(reg, N))
  208. #define yb(N) (*ADD_BYTE_OFFSET(E, N))
  209. #define Sb(N) (*REG_TARGET_PTR(N))
  210. #define lb(N) (*(double *) (((unsigned char *)&(freg[0].fd)) + (N)))
  211. #define Qb(N) (N)
  212. #define Ib(N) (N)
  213. #define x(N) reg[N]
  214. #define y(N) E[N]
  215. #define r(N) x(N)
  216. #define Q(N) (N*sizeof(Eterm *))
  217. #define l(N) (freg[N].fd)
  218. /*
  219. * Check that we haven't used the reductions and jump to function pointed to by
  220. * the I register. If we are out of reductions, do a context switch.
  221. */
  222. #define DispatchMacro() \
  223. do { \
  224. BeamInstr dis_next; \
  225. dis_next = *I; \
  226. CHECK_ARGS(I); \
  227. if (FCALLS > 0 || FCALLS > neg_o_reds) { \
  228. FCALLS--; \
  229. Goto(dis_next); \
  230. } else { \
  231. goto context_switch; \
  232. } \
  233. } while (0) \
  234. #define DispatchMacroFun() \
  235. do { \
  236. BeamInstr dis_next; \
  237. dis_next = *I; \
  238. CHECK_ARGS(I); \
  239. if (FCALLS > 0 || FCALLS > neg_o_reds) { \
  240. FCALLS--; \
  241. Goto(dis_next); \
  242. } else { \
  243. goto context_switch_fun; \
  244. } \
  245. } while (0)
  246. #define DispatchMacrox() \
  247. do { \
  248. if (FCALLS > 0) { \
  249. BeamInstr dis_next; \
  250. SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]); \
  251. dis_next = *I; \
  252. FCALLS--; \
  253. CHECK_ARGS(I); \
  254. Goto(dis_next); \
  255. } else if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p) \
  256. && FCALLS > neg_o_reds) { \
  257. goto save_calls1; \
  258. } else { \
  259. SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]); \
  260. CHECK_ARGS(I); \
  261. goto context_switch; \
  262. } \
  263. } while (0)
  264. #ifdef DEBUG
  265. /*
  266. * To simplify breakpoint setting, put the code in one place only and jump to it.
  267. */
  268. # define Dispatch() goto do_dispatch
  269. # define Dispatchx() goto do_dispatchx
  270. # define Dispatchfun() goto do_dispatchfun
  271. #else
  272. /*
  273. * Inline for speed.
  274. */
  275. # define Dispatch() DispatchMacro()
  276. # define Dispatchx() DispatchMacrox()
  277. # define Dispatchfun() DispatchMacroFun()
  278. #endif
  279. #define Arg(N) I[(N)+1]
  280. #define GetSource(raw, dst) \
  281. do { \
  282. dst = raw; \
  283. switch (loader_tag(dst)) { \
  284. case LOADER_X_REG: \
  285. dst = x(loader_x_reg_index(dst)); \
  286. break; \
  287. case LOADER_Y_REG: \
  288. ASSERT(loader_y_reg_index(dst) >= 1); \
  289. dst = y(loader_y_reg_index(dst)); \
  290. break; \
  291. } \
  292. CHECK_TERM(dst); \
  293. } while (0)
  294. #define PUT_TERM_REG(term, desc) \
  295. do { \
  296. switch (loader_tag(desc)) { \
  297. case LOADER_X_REG: \
  298. x(loader_x_reg_index(desc)) = (term); \
  299. break; \
  300. case LOADER_Y_REG: \
  301. y(loader_y_reg_index(desc)) = (term); \
  302. break; \
  303. default: \
  304. ASSERT(0); \
  305. break; \
  306. } \
  307. } while(0)
  308. #define DispatchReturn \
  309. do { \
  310. if (FCALLS > 0 || FCALLS > neg_o_reds) { \
  311. FCALLS--; \
  312. Goto(*I); \
  313. } \
  314. else { \
  315. c_p->current = NULL; \
  316. c_p->arity = 1; \
  317. goto context_switch3; \
  318. } \
  319. } while (0)
  320. #ifdef DEBUG
  321. /* Better static type testing by the C compiler */
  322. # define BEAM_IS_TUPLE(Src) is_tuple(Src)
  323. #else
  324. /* Better performance */
  325. # define BEAM_IS_TUPLE(Src) is_boxed(Src)
  326. #endif
  327. /*
  328. * process_main() is already huge, so we want to avoid inlining
  329. * seldom used functions into it.
  330. */
  331. static void init_emulator_finish(void) ERTS_NOINLINE;
  332. static ErtsCodeMFA *ubif2mfa(void* uf) ERTS_NOINLINE;
  333. static BeamInstr* handle_error(Process* c_p, BeamInstr* pc,
  334. Eterm* reg, ErtsCodeMFA* bif_mfa) ERTS_NOINLINE;
  335. static BeamInstr* call_error_handler(Process* p, ErtsCodeMFA* mfa,
  336. Eterm* reg, Eterm func) ERTS_NOINLINE;
  337. static BeamInstr* fixed_apply(Process* p, Eterm* reg, Uint arity,
  338. BeamInstr *I, Uint offs) ERTS_NOINLINE;
  339. static BeamInstr* apply(Process* p, Eterm* reg,
  340. BeamInstr *I, Uint offs) ERTS_NOINLINE;
  341. static BeamInstr* call_fun(Process* p, int arity,
  342. Eterm* reg, Eterm args) ERTS_NOINLINE;
  343. static BeamInstr* apply_fun(Process* p, Eterm fun,
  344. Eterm args, Eterm* reg) ERTS_NOINLINE;
  345. static Eterm new_fun(Process* p, Eterm* reg,
  346. ErlFunEntry* fe, int num_free) ERTS_NOINLINE;
  347. static int is_function2(Eterm Term, Uint arity);
  348. static Eterm erts_gc_new_map(Process* p, Eterm* reg, Uint live,
  349. Uint n, BeamInstr* ptr) ERTS_NOINLINE;
  350. static Eterm erts_gc_new_small_map_lit(Process* p, Eterm* reg, Eterm keys_literal,
  351. Uint live, BeamInstr* ptr) ERTS_NOINLINE;
  352. static Eterm erts_gc_update_map_assoc(Process* p, Eterm* reg, Uint live,
  353. Uint n, BeamInstr* new_p) ERTS_NOINLINE;
  354. static Eterm erts_gc_update_map_exact(Process* p, Eterm* reg, Uint live,
  355. Uint n, Eterm* new_p) ERTS_NOINLINE;
  356. static Eterm get_map_element(Eterm map, Eterm key);
  357. static Eterm get_map_element_hash(Eterm map, Eterm key, Uint32 hx);
  358. /*
  359. * Functions not directly called by process_main(). OK to inline.
  360. */
  361. static BeamInstr* next_catch(Process* c_p, Eterm *reg);
  362. static void terminate_proc(Process* c_p, Eterm Value);
  363. static Eterm add_stacktrace(Process* c_p, Eterm Value, Eterm exc);
  364. static void save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg,
  365. ErtsCodeMFA *bif_mfa, Eterm args);
  366. static struct StackTrace * get_trace_from_exc(Eterm exc);
  367. static Eterm make_arglist(Process* c_p, Eterm* reg, int a);
  368. void
  369. init_emulator(void)
  370. {
  371. process_main(0, 0);
  372. }
  373. /*
  374. * On certain platforms, make sure that the main variables really are placed
  375. * in registers.
  376. */
  377. #if defined(__GNUC__) && defined(sparc) && !defined(DEBUG)
  378. # define REG_xregs asm("%l1")
  379. # define REG_htop asm("%l2")
  380. # define REG_stop asm("%l3")
  381. # define REG_I asm("%l4")
  382. # define REG_fcalls asm("%l5")
  383. #elif defined(__GNUC__) && defined(__amd64__) && !defined(DEBUG)
  384. # define REG_xregs asm("%r12")
  385. # define REG_htop
  386. # define REG_stop asm("%r13")
  387. # define REG_I asm("%rbx")
  388. # define REG_fcalls asm("%r14")
  389. #else
  390. # define REG_xregs
  391. # define REG_htop
  392. # define REG_stop
  393. # define REG_I
  394. # define REG_fcalls
  395. #endif
  396. #ifdef USE_VM_PROBES
  397. # define USE_VM_CALL_PROBES
  398. #endif
  399. #ifdef USE_VM_CALL_PROBES
  400. #define DTRACE_LOCAL_CALL(p, mfa) \
  401. if (DTRACE_ENABLED(local_function_entry)) { \
  402. DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
  403. DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
  404. int depth = STACK_START(p) - STACK_TOP(p); \
  405. dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
  406. DTRACE3(local_function_entry, process_name, mfa_buf, depth); \
  407. }
  408. #define DTRACE_GLOBAL_CALL(p, mfa) \
  409. if (DTRACE_ENABLED(global_function_entry)) { \
  410. DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
  411. DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
  412. int depth = STACK_START(p) - STACK_TOP(p); \
  413. dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
  414. DTRACE3(global_function_entry, process_name, mfa_buf, depth); \
  415. }
  416. #define DTRACE_RETURN(p, mfa) \
  417. if (DTRACE_ENABLED(function_return)) { \
  418. DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
  419. DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
  420. int depth = STACK_START(p) - STACK_TOP(p); \
  421. dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
  422. DTRACE3(function_return, process_name, mfa_buf, depth); \
  423. }
  424. #define DTRACE_BIF_ENTRY(p, mfa) \
  425. if (DTRACE_ENABLED(bif_entry)) { \
  426. DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
  427. DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
  428. dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
  429. DTRACE2(bif_entry, process_name, mfa_buf); \
  430. }
  431. #define DTRACE_BIF_RETURN(p, mfa) \
  432. if (DTRACE_ENABLED(bif_return)) { \
  433. DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
  434. DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
  435. dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
  436. DTRACE2(bif_return, process_name, mfa_buf); \
  437. }
  438. #define DTRACE_NIF_ENTRY(p, mfa) \
  439. if (DTRACE_ENABLED(nif_entry)) { \
  440. DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
  441. DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
  442. dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
  443. DTRACE2(nif_entry, process_name, mfa_buf); \
  444. }
  445. #define DTRACE_NIF_RETURN(p, mfa) \
  446. if (DTRACE_ENABLED(nif_return)) { \
  447. DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
  448. DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
  449. dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
  450. DTRACE2(nif_return, process_name, mfa_buf); \
  451. }
  452. #define DTRACE_GLOBAL_CALL_FROM_EXPORT(p,e) \
  453. do { \
  454. if (DTRACE_ENABLED(global_function_entry)) { \
  455. BeamInstr* fp = (BeamInstr *) (((Export *) (e))->addressv[erts_active_code_ix()]); \
  456. DTRACE_GLOBAL_CALL((p), erts_code_to_codemfa(fp)); \
  457. } \
  458. } while(0)
  459. #define DTRACE_RETURN_FROM_PC(p) \
  460. do { \
  461. ErtsCodeMFA* cmfa; \
  462. if (DTRACE_ENABLED(function_return) && (cmfa = find_function_from_pc((p)->cp))) { \
  463. DTRACE_RETURN((p), cmfa); \
  464. } \
  465. } while(0)
  466. #else /* USE_VM_PROBES */
  467. #define DTRACE_LOCAL_CALL(p, mfa) do {} while (0)
  468. #define DTRACE_GLOBAL_CALL(p, mfa) do {} while (0)
  469. #define DTRACE_GLOBAL_CALL_FROM_EXPORT(p, e) do {} while (0)
  470. #define DTRACE_RETURN(p, mfa) do {} while (0)
  471. #define DTRACE_RETURN_FROM_PC(p) do {} while (0)
  472. #define DTRACE_BIF_ENTRY(p, mfa) do {} while (0)
  473. #define DTRACE_BIF_RETURN(p, mfa) do {} while (0)
  474. #define DTRACE_NIF_ENTRY(p, mfa) do {} while (0)
  475. #define DTRACE_NIF_RETURN(p, mfa) do {} while (0)
  476. #endif /* USE_VM_PROBES */
  477. #ifdef DEBUG
  478. #define ERTS_DBG_CHK_REDS(P, FC) \
  479. do { \
  480. if (ERTS_PROC_GET_SAVED_CALLS_BUF((P))) { \
  481. ASSERT(FC <= 0); \
  482. ASSERT(erts_proc_sched_data(c_p)->virtual_reds \
  483. <= 0 - (FC)); \
  484. } \
  485. else { \
  486. ASSERT(FC <= CONTEXT_REDS); \
  487. ASSERT(erts_proc_sched_data(c_p)->virtual_reds \
  488. <= CONTEXT_REDS - (FC)); \
  489. } \
  490. } while (0)
  491. #else
  492. #define ERTS_DBG_CHK_REDS(P, FC)
  493. #endif
  494. #ifdef NO_FPE_SIGNALS
  495. # define ERTS_NO_FPE_CHECK_INIT ERTS_FP_CHECK_INIT
  496. # define ERTS_NO_FPE_ERROR ERTS_FP_ERROR
  497. #else
  498. # define ERTS_NO_FPE_CHECK_INIT(p)
  499. # define ERTS_NO_FPE_ERROR(p, a, b)
  500. #endif
  501. /*
  502. * process_main() is called twice:
  503. * The first call performs some initialisation, including exporting
  504. * the instructions' C labels to the loader.
  505. * The second call starts execution of BEAM code. This call never returns.
  506. */
  507. ERTS_NO_RETPOLINE
  508. void process_main(Eterm * x_reg_array, FloatDef* f_reg_array)
  509. {
  510. static int init_done = 0;
  511. Process* c_p = NULL;
  512. int reds_used;
  513. #ifdef DEBUG
  514. ERTS_DECLARE_DUMMY(Eterm pid);
  515. #endif
  516. /* Pointer to X registers: x(1)..x(N); reg[0] is used when doing GC,
  517. * in all other cases x0 is used.
  518. */
  519. register Eterm* reg REG_xregs = x_reg_array;
  520. /*
  521. * Top of heap (next free location); grows upwards.
  522. */
  523. register Eterm* HTOP REG_htop = NULL;
  524. /* Stack pointer. Grows downwards; points
  525. * to last item pushed (normally a saved
  526. * continuation pointer).
  527. */
  528. register Eterm* E REG_stop = NULL;
  529. /*
  530. * Pointer to next threaded instruction.
  531. */
  532. register BeamInstr *I REG_I = NULL;
  533. /* Number of reductions left. This function
  534. * returns to the scheduler when FCALLS reaches zero.
  535. */
  536. register Sint FCALLS REG_fcalls = 0;
  537. /*
  538. * X registers and floating point registers are located in
  539. * scheduler specific data.
  540. */
  541. register FloatDef *freg = f_reg_array;
  542. /*
  543. * For keeping the negative old value of 'reds' when call saving is active.
  544. */
  545. int neg_o_reds = 0;
  546. #ifdef ERTS_OPCODE_COUNTER_SUPPORT
  547. static void* counting_opcodes[] = { DEFINE_COUNTING_OPCODES };
  548. #else
  549. #ifndef NO_JUMP_TABLE
  550. static void* opcodes[] = { DEFINE_OPCODES };
  551. #else
  552. register BeamInstr Go;
  553. #endif
  554. #endif
  555. Uint64 start_time = 0; /* Monitor long schedule */
  556. BeamInstr* start_time_i = NULL;
  557. ERTS_MSACC_DECLARE_CACHE_X() /* a cached value of the tsd pointer for msacc */
  558. ERL_BITS_DECLARE_STATEP; /* Has to be last declaration */
  559. /*
  560. * Note: In this function, we attempt to place rarely executed code towards
  561. * the end of the function, in the hope that the cache hit rate will be better.
  562. * The initialization code is only run once, so it is at the very end.
  563. *
  564. * Note: c_p->arity must be set to reflect the number of useful terms in
  565. * c_p->arg_reg before calling the scheduler.
  566. */
  567. if (ERTS_UNLIKELY(!init_done)) {
  568. /* This should only be reached during the init phase when only the main
  569. * process is running. I.e. there is no race for init_done.
  570. */
  571. init_done = 1;
  572. goto init_emulator;
  573. }
  574. c_p = NULL;
  575. reds_used = 0;
  576. goto do_schedule1;
  577. do_schedule:
  578. ASSERT(c_p->arity < 6);
  579. ASSERT(c_p->debug_reds_in == REDS_IN(c_p));
  580. if (!ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
  581. reds_used = REDS_IN(c_p) - FCALLS;
  582. else
  583. reds_used = REDS_IN(c_p) - (CONTEXT_REDS + FCALLS);
  584. ASSERT(reds_used >= 0);
  585. do_schedule1:
  586. if (start_time != 0) {
  587. Sint64 diff = erts_timestamp_millis() - start_time;
  588. if (diff > 0 && (Uint) diff > erts_system_monitor_long_schedule) {
  589. ErtsCodeMFA *inptr = find_function_from_pc(start_time_i);
  590. ErtsCodeMFA *outptr = find_function_from_pc(c_p->i);
  591. monitor_long_schedule_proc(c_p,inptr,outptr,(Uint) diff);
  592. }
  593. }
  594. PROCESS_MAIN_CHK_LOCKS(c_p);
  595. ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
  596. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  597. c_p = erts_schedule(NULL, c_p, reds_used);
  598. ASSERT(!(c_p->flags & F_HIPE_MODE));
  599. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  600. start_time = 0;
  601. #ifdef DEBUG
  602. pid = c_p->common.id; /* Save for debugging purposes */
  603. #endif
  604. ERTS_REQ_PROC_MAIN_LOCK(c_p);
  605. PROCESS_MAIN_CHK_LOCKS(c_p);
  606. ERTS_MSACC_UPDATE_CACHE_X();
  607. if (erts_system_monitor_long_schedule != 0) {
  608. start_time = erts_timestamp_millis();
  609. start_time_i = c_p->i;
  610. }
  611. ERL_BITS_RELOAD_STATEP(c_p);
  612. {
  613. int reds;
  614. Eterm* argp;
  615. BeamInstr next;
  616. int i;
  617. argp = c_p->arg_reg;
  618. for (i = c_p->arity - 1; i >= 0; i--) {
  619. reg[i] = argp[i];
  620. CHECK_TERM(reg[i]);
  621. }
  622. /*
  623. * We put the original reduction count in the process structure, to reduce
  624. * the code size (referencing a field in a struct through a pointer stored
  625. * in a register gives smaller code than referencing a global variable).
  626. */
  627. SET_I(c_p->i);
  628. REDS_IN(c_p) = reds = c_p->fcalls;
  629. #ifdef DEBUG
  630. c_p->debug_reds_in = reds;
  631. #endif
  632. if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) {
  633. neg_o_reds = -CONTEXT_REDS;
  634. FCALLS = neg_o_reds + reds;
  635. } else {
  636. neg_o_reds = 0;
  637. FCALLS = reds;
  638. }
  639. ERTS_DBG_CHK_REDS(c_p, FCALLS);
  640. next = *I;
  641. SWAPIN;
  642. ASSERT(VALID_INSTR(next));
  643. #ifdef USE_VM_PROBES
  644. if (DTRACE_ENABLED(process_scheduled)) {
  645. DTRACE_CHARBUF(process_buf, DTRACE_TERM_BUF_SIZE);
  646. DTRACE_CHARBUF(fun_buf, DTRACE_TERM_BUF_SIZE);
  647. dtrace_proc_str(c_p, process_buf);
  648. if (ERTS_PROC_IS_EXITING(c_p)) {
  649. sys_strcpy(fun_buf, "<exiting>");
  650. } else {
  651. ErtsCodeMFA *cmfa = find_function_from_pc(c_p->i);
  652. if (cmfa) {
  653. dtrace_fun_decode(c_p, cmfa,
  654. NULL, fun_buf);
  655. } else {
  656. erts_snprintf(fun_buf, sizeof(DTRACE_CHARBUF_NAME(fun_buf)),
  657. "<unknown/%p>", next);
  658. }
  659. }
  660. DTRACE2(process_scheduled, process_buf, fun_buf);
  661. }
  662. #endif
  663. Goto(next);
  664. }
  665. #if defined(DEBUG) || defined(NO_JUMP_TABLE)
  666. emulator_loop:
  667. #endif
  668. #ifdef NO_JUMP_TABLE
  669. switch (Go) {
  670. #endif
  671. #include "beam_hot.h"
  672. #ifdef DEBUG
  673. /*
  674. * Set a breakpoint here to get control just after a call instruction.
  675. * I points to the first instruction in the called function.
  676. *
  677. * In gdb, use 'call dis(I-5, 1)' to show the name of the function.
  678. */
  679. do_dispatch:
  680. DispatchMacro();
  681. do_dispatchx:
  682. DispatchMacrox();
  683. do_dispatchfun:
  684. DispatchMacroFun();
  685. #endif
  686. /*
  687. * Jumped to from the Dispatch() macro when the reductions are used up.
  688. *
  689. * Since the I register points just beyond the FuncBegin instruction, we
  690. * can get the module, function, and arity for the function being
  691. * called from I[-3], I[-2], and I[-1] respectively.
  692. */
  693. context_switch_fun:
  694. /* Add one for the environment of the fun */
  695. c_p->arity = erts_code_to_codemfa(I)->arity + 1;
  696. goto context_switch2;
  697. context_switch:
  698. c_p->arity = erts_code_to_codemfa(I)->arity;
  699. context_switch2: /* Entry for fun calls. */
  700. c_p->current = erts_code_to_codemfa(I);
  701. context_switch3:
  702. {
  703. Eterm* argp;
  704. int i;
  705. if (erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_EXITING) {
  706. c_p->i = beam_exit;
  707. c_p->arity = 0;
  708. c_p->current = NULL;
  709. goto do_schedule;
  710. }
  711. /*
  712. * Make sure that there is enough room for the argument registers to be saved.
  713. */
  714. if (c_p->arity > c_p->max_arg_reg) {
  715. /*
  716. * Yes, this is an expensive operation, but you only pay it the first
  717. * time you call a function with more than 6 arguments which is
  718. * scheduled out. This is better than paying for 26 words of wasted
  719. * space for most processes which never call functions with more than
  720. * 6 arguments.
  721. */
  722. Uint size = c_p->arity * sizeof(c_p->arg_reg[0]);
  723. if (c_p->arg_reg != c_p->def_arg_reg) {
  724. c_p->arg_reg = (Eterm *) erts_realloc(ERTS_ALC_T_ARG_REG,
  725. (void *) c_p->arg_reg,
  726. size);
  727. } else {
  728. c_p->arg_reg = (Eterm *) erts_alloc(ERTS_ALC_T_ARG_REG, size);
  729. }
  730. c_p->max_arg_reg = c_p->arity;
  731. }
  732. /*
  733. * Since REDS_IN(c_p) is stored in the save area (c_p->arg_reg) we must read it
  734. * now before saving registers.
  735. *
  736. * The '+ 1' compensates for the last increment which was not done
  737. * (beacuse the code for the Dispatch() macro becomes shorter that way).
  738. */
  739. ASSERT(c_p->debug_reds_in == REDS_IN(c_p));
  740. if (!ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
  741. reds_used = REDS_IN(c_p) - FCALLS;
  742. else
  743. reds_used = REDS_IN(c_p) - (CONTEXT_REDS + FCALLS);
  744. ASSERT(reds_used >= 0);
  745. /*
  746. * Save the argument registers and everything else.
  747. */
  748. argp = c_p->arg_reg;
  749. for (i = c_p->arity - 1; i >= 0; i--) {
  750. argp[i] = reg[i];
  751. }
  752. SWAPOUT;
  753. c_p->i = I;
  754. goto do_schedule1;
  755. }
  756. #include "beam_warm.h"
  757. OpCase(normal_exit): {
  758. HEAVY_SWAPOUT;
  759. c_p->freason = EXC_NORMAL;
  760. c_p->arity = 0; /* In case this process will ever be garbed again. */
  761. ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
  762. erts_do_exit_process(c_p, am_normal);
  763. ERTS_REQ_PROC_MAIN_LOCK(c_p);
  764. HEAVY_SWAPIN;
  765. goto do_schedule;
  766. }
  767. OpCase(continue_exit): {
  768. HEAVY_SWAPOUT;
  769. ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
  770. erts_continue_exit_process(c_p);
  771. ERTS_REQ_PROC_MAIN_LOCK(c_p);
  772. HEAVY_SWAPIN;
  773. goto do_schedule;
  774. }
  775. find_func_info: {
  776. SWAPOUT;
  777. I = handle_error(c_p, I, reg, NULL);
  778. goto post_error_handling;
  779. }
  780. OpCase(call_error_handler):
  781. /*
  782. * At this point, I points to the code[3] in the export entry for
  783. * a function which is not loaded.
  784. *
  785. * code[0]: Module
  786. * code[1]: Function
  787. * code[2]: Arity
  788. * code[3]: &&call_error_handler
  789. * code[4]: Not used
  790. */
  791. HEAVY_SWAPOUT;
  792. I = call_error_handler(c_p, erts_code_to_codemfa(I),
  793. reg, am_undefined_function);
  794. HEAVY_SWAPIN;
  795. if (I) {
  796. Goto(*I);
  797. }
  798. /* Fall through */
  799. OpCase(error_action_code): {
  800. handle_error:
  801. SWAPOUT;
  802. I = handle_error(c_p, NULL, reg, NULL);
  803. post_error_handling:
  804. if (I == 0) {
  805. goto do_schedule;
  806. } else {
  807. ASSERT(!is_value(r(0)));
  808. SWAPIN;
  809. Goto(*I);
  810. }
  811. }
  812. OpCase(i_func_info_IaaI): {
  813. ErtsCodeInfo *ci = (ErtsCodeInfo*)I;
  814. c_p->freason = EXC_FUNCTION_CLAUSE;
  815. c_p->current = &ci->mfa;
  816. goto handle_error;
  817. }
  818. #include "beam_cold.h"
  819. #ifdef ERTS_OPCODE_COUNTER_SUPPORT
  820. DEFINE_COUNTING_LABELS;
  821. #endif
  822. #ifndef NO_JUMP_TABLE
  823. #ifdef DEBUG
  824. end_emulator_loop:
  825. #endif
  826. #endif
  827. OpCase(int_code_end):
  828. OpCase(label_L):
  829. OpCase(on_load):
  830. OpCase(line_I):
  831. erts_exit(ERTS_ERROR_EXIT, "meta op\n");
  832. /*
  833. * One-time initialization of Beam emulator.
  834. */
  835. init_emulator:
  836. {
  837. #ifndef NO_JUMP_TABLE
  838. #ifdef ERTS_OPCODE_COUNTER_SUPPORT
  839. #ifdef DEBUG
  840. counting_opcodes[op_catch_end_y] = LabelAddr(lb_catch_end_y);
  841. #endif
  842. counting_opcodes[op_i_func_info_IaaI] = LabelAddr(lb_i_func_info_IaaI);
  843. beam_ops = counting_opcodes;
  844. #else /* #ifndef ERTS_OPCODE_COUNTER_SUPPORT */
  845. beam_ops = opcodes;
  846. #endif /* ERTS_OPCODE_COUNTER_SUPPORT */
  847. #endif /* NO_JUMP_TABLE */
  848. init_emulator_finish();
  849. return;
  850. }
  851. #ifdef NO_JUMP_TABLE
  852. default:
  853. erts_exit(ERTS_ERROR_EXIT, "unexpected op code %d\n",Go);
  854. }
  855. #endif
  856. return; /* Never executed */
  857. save_calls1:
  858. {
  859. BeamInstr dis_next;
  860. save_calls(c_p, (Export *) Arg(0));
  861. SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]);
  862. dis_next = *I;
  863. FCALLS--;
  864. Goto(dis_next);
  865. }
  866. }
  867. /*
  868. * One-time initialization of emulator. Does not need to be
  869. * in process_main().
  870. */
  871. static void
  872. init_emulator_finish(void)
  873. {
  874. int i;
  875. Export* ep;
  876. #if defined(ARCH_64) && defined(CODE_MODEL_SMALL)
  877. for (i = 0; i < NUMBER_OF_OPCODES; i++) {
  878. BeamInstr instr = BeamOpCodeAddr(i);
  879. if (instr >= (1ull << 32)) {
  880. erts_exit(ERTS_ERROR_EXIT,
  881. "This run-time was supposed be compiled with all code below 2Gb,\n"
  882. "but the instruction '%s' is located at %016lx.\n",
  883. opc[i].name, instr);
  884. }
  885. }
  886. #endif
  887. beam_apply[0] = BeamOpCodeAddr(op_i_apply);
  888. beam_apply[1] = BeamOpCodeAddr(op_normal_exit);
  889. beam_exit[0] = BeamOpCodeAddr(op_error_action_code);
  890. beam_continue_exit[0] = BeamOpCodeAddr(op_continue_exit);
  891. beam_return_to_trace[0] = BeamOpCodeAddr(op_i_return_to_trace);
  892. beam_return_trace[0] = BeamOpCodeAddr(op_return_trace);
  893. beam_exception_trace[0] = BeamOpCodeAddr(op_return_trace); /* UGLY */
  894. beam_return_time_trace[0] = BeamOpCodeAddr(op_i_return_time_trace);
  895. /*
  896. * Enter all BIFs into the export table.
  897. */
  898. for (i = 0; i < BIF_SIZE; i++) {
  899. ep = erts_export_put(bif_table[i].module,
  900. bif_table[i].name,
  901. bif_table[i].arity);
  902. bif_export[i] = ep;
  903. ep->beam[0] = BeamOpCodeAddr(op_apply_bif);
  904. ep->beam[1] = (BeamInstr) bif_table[i].f;
  905. /* XXX: set func info for bifs */
  906. ep->info.op = BeamOpCodeAddr(op_i_func_info_IaaI);
  907. }
  908. }
  909. /*
  910. * erts_dirty_process_main() is what dirty schedulers execute. Since they handle
  911. * only NIF calls they do not need to be able to execute all BEAM
  912. * instructions.
  913. */
  914. void erts_dirty_process_main(ErtsSchedulerData *esdp)
  915. {
  916. Process* c_p = NULL;
  917. ErtsMonotonicTime start_time;
  918. #ifdef DEBUG
  919. ERTS_DECLARE_DUMMY(Eterm pid);
  920. #endif
  921. /* Pointer to X registers: x(1)..x(N); reg[0] is used when doing GC,
  922. * in all other cases x0 is used.
  923. */
  924. register Eterm* reg REG_xregs = NULL;
  925. /*
  926. * Top of heap (next free location); grows upwards.
  927. */
  928. register Eterm* HTOP REG_htop = NULL;
  929. /* Stack pointer. Grows downwards; points
  930. * to last item pushed (normally a saved
  931. * continuation pointer).
  932. */
  933. register Eterm* E REG_stop = NULL;
  934. /*
  935. * Pointer to next threaded instruction.
  936. */
  937. register BeamInstr *I REG_I = NULL;
  938. ERTS_MSACC_DECLARE_CACHE_X() /* a cached value of the tsd pointer for msacc */
  939. /*
  940. * start_time always positive for dirty CPU schedulers,
  941. * and negative for dirty I/O schedulers.
  942. */
  943. if (ERTS_SCHEDULER_IS_DIRTY_CPU(esdp)) {
  944. start_time = erts_get_monotonic_time(NULL);
  945. ASSERT(start_time >= 0);
  946. }
  947. else {
  948. start_time = ERTS_SINT64_MIN;
  949. ASSERT(start_time < 0);
  950. }
  951. goto do_dirty_schedule;
  952. context_switch:
  953. c_p->current = erts_code_to_codemfa(I); /* Pointer to Mod, Func, Arity */
  954. c_p->arity = c_p->current->arity;
  955. {
  956. int reds_used;
  957. Eterm* argp;
  958. int i;
  959. /*
  960. * Make sure that there is enough room for the argument registers to be saved.
  961. */
  962. if (c_p->arity > c_p->max_arg_reg) {
  963. /*
  964. * Yes, this is an expensive operation, but you only pay it the first
  965. * time you call a function with more than 6 arguments which is
  966. * scheduled out. This is better than paying for 26 words of wasted
  967. * space for most processes which never call functions with more than
  968. * 6 arguments.
  969. */
  970. Uint size = c_p->arity * sizeof(c_p->arg_reg[0]);
  971. if (c_p->arg_reg != c_p->def_arg_reg) {
  972. c_p->arg_reg = (Eterm *) erts_realloc(ERTS_ALC_T_ARG_REG,
  973. (void *) c_p->arg_reg,
  974. size);
  975. } else {
  976. c_p->arg_reg = (Eterm *) erts_alloc(ERTS_ALC_T_ARG_REG, size);
  977. }
  978. c_p->max_arg_reg = c_p->arity;
  979. }
  980. /*
  981. * Save the argument registers and everything else.
  982. */
  983. argp = c_p->arg_reg;
  984. for (i = c_p->arity - 1; i >= 0; i--) {
  985. argp[i] = reg[i];
  986. }
  987. SWAPOUT;
  988. c_p->i = I;
  989. do_dirty_schedule:
  990. if (start_time < 0) {
  991. /*
  992. * Dirty I/O scheduler:
  993. * One reduction consumed regardless of
  994. * time spent in the dirty NIF.
  995. */
  996. reds_used = esdp->virtual_reds + 1;
  997. }
  998. else {
  999. /*
  1000. * Dirty CPU scheduler:
  1001. * Reductions based on time consumed by
  1002. * the dirty NIF.
  1003. */
  1004. Sint64 treds;
  1005. treds = erts_time2reds(start_time,
  1006. erts_get_monotonic_time(esdp));
  1007. treds += esdp->virtual_reds;
  1008. reds_used = treds > INT_MAX ? INT_MAX : (int) treds;
  1009. }
  1010. if (c_p && ERTS_PROC_GET_PENDING_SUSPEND(c_p))
  1011. erts_proc_sig_handle_pending_suspend(c_p);
  1012. PROCESS_MAIN_CHK_LOCKS(c_p);
  1013. ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
  1014. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  1015. c_p = erts_schedule(esdp, c_p, reds_used);
  1016. if (start_time >= 0) {
  1017. start_time = erts_get_monotonic_time(esdp);
  1018. ASSERT(start_time >= 0);
  1019. }
  1020. }
  1021. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  1022. #ifdef DEBUG
  1023. pid = c_p->common.id; /* Save for debugging purposes */
  1024. #endif
  1025. ERTS_REQ_PROC_MAIN_LOCK(c_p);
  1026. PROCESS_MAIN_CHK_LOCKS(c_p);
  1027. ASSERT(!(c_p->flags & F_HIPE_MODE));
  1028. ERTS_MSACC_UPDATE_CACHE_X();
  1029. /*
  1030. * Set fcalls even though we ignore it, so we don't
  1031. * confuse code accessing it...
  1032. */
  1033. if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
  1034. c_p->fcalls = 0;
  1035. else
  1036. c_p->fcalls = CONTEXT_REDS;
  1037. if (erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_DIRTY_RUNNING_SYS) {
  1038. erts_execute_dirty_system_task(c_p);
  1039. goto do_dirty_schedule;
  1040. }
  1041. else {
  1042. ErtsCodeMFA *codemfa;
  1043. Eterm* argp;
  1044. int i, exiting;
  1045. reg = esdp->x_reg_array;
  1046. argp = c_p->arg_reg;
  1047. for (i = c_p->arity - 1; i >= 0; i--) {
  1048. reg[i] = argp[i];
  1049. CHECK_TERM(reg[i]);
  1050. }
  1051. /*
  1052. * We put the original reduction count in the process structure, to reduce
  1053. * the code size (referencing a field in a struct through a pointer stored
  1054. * in a register gives smaller code than referencing a global variable).
  1055. */
  1056. I = c_p->i;
  1057. SWAPIN;
  1058. #ifdef USE_VM_PROBES
  1059. if (DTRACE_ENABLED(process_scheduled)) {
  1060. DTRACE_CHARBUF(process_buf, DTRACE_TERM_BUF_SIZE);
  1061. DTRACE_CHARBUF(fun_buf, DTRACE_TERM_BUF_SIZE);
  1062. dtrace_proc_str(c_p, process_buf);
  1063. if (ERTS_PROC_IS_EXITING(c_p)) {
  1064. sys_strcpy(fun_buf, "<exiting>");
  1065. } else {
  1066. ErtsCodeMFA *cmfa = find_function_from_pc(c_p->i);
  1067. if (cmfa) {
  1068. dtrace_fun_decode(c_p, cmfa, NULL, fun_buf);
  1069. } else {
  1070. erts_snprintf(fun_buf, sizeof(DTRACE_CHARBUF_NAME(fun_buf)),
  1071. "<unknown/%p>", *I);
  1072. }
  1073. }
  1074. DTRACE2(process_scheduled, process_buf, fun_buf);
  1075. }
  1076. #endif
  1077. /*
  1078. * call_nif is always first instruction in function:
  1079. *
  1080. * I[-3]: Module
  1081. * I[-2]: Function
  1082. * I[-1]: Arity
  1083. * I[0]: &&call_nif
  1084. * I[1]: Function pointer to NIF function
  1085. * I[2]: Pointer to erl_module_nif
  1086. * I[3]: Function pointer to dirty NIF
  1087. *
  1088. * This layout is determined by the NifExport struct
  1089. */
  1090. ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_NIF);
  1091. codemfa = erts_code_to_codemfa(I);
  1092. DTRACE_NIF_ENTRY(c_p, codemfa);
  1093. c_p->current = codemfa;
  1094. SWAPOUT;
  1095. PROCESS_MAIN_CHK_LOCKS(c_p);
  1096. ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
  1097. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  1098. if (BeamIsOpCode(*I, op_apply_bif)) {
  1099. exiting = erts_call_dirty_bif(esdp, c_p, I, reg);
  1100. }
  1101. else {
  1102. ASSERT(BeamIsOpCode(*I, op_call_nif));
  1103. exiting = erts_call_dirty_nif(esdp, c_p, I, reg);
  1104. }
  1105. ASSERT(!(c_p->flags & F_HIBERNATE_SCHED));
  1106. PROCESS_MAIN_CHK_LOCKS(c_p);
  1107. ERTS_REQ_PROC_MAIN_LOCK(c_p);
  1108. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  1109. ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR);
  1110. if (exiting)
  1111. goto do_dirty_schedule;
  1112. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  1113. DTRACE_NIF_RETURN(c_p, codemfa);
  1114. ERTS_HOLE_CHECK(c_p);
  1115. SWAPIN;
  1116. I = c_p->i;
  1117. goto context_switch;
  1118. }
  1119. }
  1120. static ErtsCodeMFA *
  1121. ubif2mfa(void* uf)
  1122. {
  1123. int i;
  1124. for (i = 0; erts_u_bifs[i].bif; i++) {
  1125. if (erts_u_bifs[i].bif == uf)
  1126. return &bif_export[erts_u_bifs[i].exp_ix]->info.mfa;
  1127. }
  1128. erts_exit(ERTS_ERROR_EXIT, "bad u bif: %p\n", uf);
  1129. return NULL;
  1130. }
  1131. /*
  1132. * Mapping from the error code 'class tag' to atoms.
  1133. */
  1134. Eterm exception_tag[NUMBER_EXC_TAGS] = {
  1135. am_error, /* 0 */
  1136. am_exit, /* 1 */
  1137. am_throw, /* 2 */
  1138. };
  1139. /*
  1140. * Mapping from error code 'index' to atoms.
  1141. */
  1142. Eterm error_atom[NUMBER_EXIT_CODES] = {
  1143. am_internal_error, /* 0 */
  1144. am_normal, /* 1 */
  1145. am_internal_error, /* 2 */
  1146. am_badarg, /* 3 */
  1147. am_badarith, /* 4 */
  1148. am_badmatch, /* 5 */
  1149. am_function_clause, /* 6 */
  1150. am_case_clause, /* 7 */
  1151. am_if_clause, /* 8 */
  1152. am_undef, /* 9 */
  1153. am_badfun, /* 10 */
  1154. am_badarity, /* 11 */
  1155. am_timeout_value, /* 12 */
  1156. am_noproc, /* 13 */
  1157. am_notalive, /* 14 */
  1158. am_system_limit, /* 15 */
  1159. am_try_clause, /* 16 */
  1160. am_notsup, /* 17 */
  1161. am_badmap, /* 18 */
  1162. am_badkey, /* 19 */
  1163. };
  1164. /*
  1165. * To fully understand the error handling, one must keep in mind that
  1166. * when an exception is thrown, the search for a handler can jump back
  1167. * and forth between Beam and native code. Upon each mode switch, a
  1168. * dummy handler is inserted so that if an exception reaches that point,
  1169. * the handler is invoked (like any handler) and transfers control so
  1170. * that the search for a real handler is continued in the other mode.
  1171. * Therefore, c_p->freason and c_p->fvalue must still hold the exception
  1172. * info when the handler is executed, but normalized so that creation of
  1173. * error terms and saving of the stack trace is only done once, even if
  1174. * we pass through the error handling code several times.
  1175. *
  1176. * When a new exception is raised, the current stack trace information
  1177. * is quick-saved in a small structure allocated on the heap. Depending
  1178. * on how the exception is eventually caught (perhaps by causing the
  1179. * current process to terminate), the saved information may be used to
  1180. * create a symbolic (human-readable) representation of the stack trace
  1181. * at the point of the original exception.
  1182. */
  1183. static BeamInstr*
  1184. handle_error(Process* c_p, BeamInstr* pc, Eterm* reg, ErtsCodeMFA *bif_mfa)
  1185. {
  1186. Eterm* hp;
  1187. Eterm Value = c_p->fvalue;
  1188. Eterm Args = am_true;
  1189. ASSERT(c_p->freason != TRAP); /* Should have been handled earlier. */
  1190. if (c_p->freason & EXF_RESTORE_NIF)
  1191. erts_nif_export_restore_error(c_p, &pc, reg, &bif_mfa);
  1192. #ifdef DEBUG
  1193. if (bif_mfa) {
  1194. /* Verify that bif_mfa does not point into our nif export */
  1195. NifExport *nep = ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p);
  1196. ASSERT(!nep || !ErtsInArea(bif_mfa, (char *)nep, sizeof(NifExport)));
  1197. }
  1198. #endif
  1199. c_p->i = pc; /* In case we call erts_exit(). */
  1200. /*
  1201. * Check if we have an arglist for the top level call. If so, this
  1202. * is encoded in Value, so we have to dig out the real Value as well
  1203. * as the Arglist.
  1204. */
  1205. if (c_p->freason & EXF_ARGLIST) {
  1206. Eterm* tp;
  1207. ASSERT(is_tuple(Value));
  1208. tp = tuple_val(Value);
  1209. Value = tp[1];
  1210. Args = tp[2];
  1211. }
  1212. /*
  1213. * Save the stack trace info if the EXF_SAVETRACE flag is set. The
  1214. * main reason for doing this separately is to allow throws to later
  1215. * become promoted to errors without losing the original stack
  1216. * trace, even if they have passed through one or more catch and
  1217. * rethrow. It also makes the creation of symbolic stack traces much
  1218. * more modular.
  1219. */
  1220. if (c_p->freason & EXF_SAVETRACE) {
  1221. save_stacktrace(c_p, pc, reg, bif_mfa, Args);
  1222. }
  1223. /*
  1224. * Throws that are not caught are turned into 'nocatch' errors
  1225. */
  1226. if ((c_p->freason & EXF_THROWN) && (c_p->catches <= 0) ) {
  1227. hp = HAlloc(c_p, 3);
  1228. Value = TUPLE2(hp, am_nocatch, Value);
  1229. c_p->freason = EXC_ERROR;
  1230. }
  1231. /* Get the fully expanded error term */
  1232. Value = expand_error_value(c_p, c_p->freason, Value);
  1233. /* Save final error term and stabilize the exception flags so no
  1234. further expansion is done. */
  1235. c_p->fvalue = Value;
  1236. c_p->freason = PRIMARY_EXCEPTION(c_p->freason);
  1237. /* Find a handler or die */
  1238. if ((c_p->catches > 0 || IS_TRACED_FL(c_p, F_EXCEPTION_TRACE))
  1239. && !(c_p->freason & EXF_PANIC)) {
  1240. BeamInstr *new_pc;
  1241. /* The Beam handler code (catch_end or try_end) checks reg[0]
  1242. for THE_NON_VALUE to see if the previous code finished
  1243. abnormally. If so, reg[1], reg[2] and reg[3] should hold the
  1244. exception class, term and trace, respectively. (If the
  1245. handler is just a trap to native code, these registers will
  1246. be ignored.) */
  1247. reg[0] = THE_NON_VALUE;
  1248. reg[1] = exception_tag[GET_EXC_CLASS(c_p->freason)];
  1249. reg[2] = Value;
  1250. reg[3] = c_p->ftrace;
  1251. if ((new_pc = next_catch(c_p, reg))) {
  1252. c_p->cp = 0; /* To avoid keeping stale references. */
  1253. ERTS_RECV_MARK_CLEAR(c_p); /* No longer safe to use this position */
  1254. return new_pc;
  1255. }
  1256. if (c_p->catches > 0) erts_exit(ERTS_ERROR_EXIT, "Catch not found");
  1257. }
  1258. ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
  1259. terminate_proc(c_p, Value);
  1260. ERTS_REQ_PROC_MAIN_LOCK(c_p);
  1261. return NULL;
  1262. }
  1263. /*
  1264. * Find the nearest catch handler
  1265. */
  1266. static BeamInstr*
  1267. next_catch(Process* c_p, Eterm *reg) {
  1268. int active_catches = c_p->catches > 0;
  1269. int have_return_to_trace = 0;
  1270. Eterm *ptr, *prev, *return_to_trace_ptr = NULL;
  1271. BeamInstr i_return_trace = beam_return_trace[0];
  1272. BeamInstr i_return_to_trace = beam_return_to_trace[0];
  1273. BeamInstr i_return_time_trace = beam_return_time_trace[0];
  1274. ptr = prev = c_p->stop;
  1275. ASSERT(ptr <= STACK_START(c_p));
  1276. /* This function is only called if we have active catch tags or have
  1277. * previously called a function that was exception-traced. As the exception
  1278. * trace flag isn't cleared after the traced function returns (and the
  1279. * catch tag inserted by it is gone), it's possible to land here with an
  1280. * empty stack, and the process should simply die when that happens. */
  1281. if (ptr == STACK_START(c_p)) {
  1282. ASSERT(!active_catches && IS_TRACED_FL(c_p, F_EXCEPTION_TRACE));
  1283. return NULL;
  1284. }
  1285. /*
  1286. * Better safe than sorry here. In debug builds, produce a core
  1287. * dump if the top of the stack doesn't point to a continuation
  1288. * pointer. In other builds, ignore a non-CP at the top of stack.
  1289. */
  1290. ASSERT(is_CP(*ptr));
  1291. if ((is_not_CP(*ptr) || (*cp_val(*ptr) != i_return_trace &&
  1292. *cp_val(*ptr) != i_return_to_trace &&
  1293. *cp_val(*ptr) != i_return_time_trace ))
  1294. && c_p->cp) {
  1295. /* Can not follow cp here - code may be unloaded */
  1296. BeamInstr *cpp = c_p->cp;
  1297. if (cpp == beam_exception_trace) {
  1298. ErtsCodeMFA *mfa = (ErtsCodeMFA*)cp_val(ptr[0]);
  1299. erts_trace_exception(c_p, mfa,
  1300. reg[1], reg[2],
  1301. ERTS_TRACER_FROM_ETERM(ptr+1));
  1302. /* Skip return_trace parameters */
  1303. ptr += 2;
  1304. } else if (cpp == beam_return_trace) {
  1305. /* Skip return_trace parameters */
  1306. ptr += 2;
  1307. } else if (cpp == beam_return_time_trace) {
  1308. /* Skip return_trace parameters */
  1309. ptr += 1;
  1310. } else if (cpp == beam_return_to_trace) {
  1311. have_return_to_trace = !0; /* Record next cp */
  1312. }
  1313. }
  1314. while (ptr < STACK_START(c_p)) {
  1315. if (is_catch(*ptr)) {
  1316. if (active_catches) goto found_catch;
  1317. ptr++;
  1318. }
  1319. else if (is_CP(*ptr)) {
  1320. prev = ptr;
  1321. if (*cp_val(*prev) == i_return_trace) {
  1322. /* Skip stack frame variables */
  1323. while (++ptr, ptr < STACK_START(c_p) && is_not_CP(*ptr)) {
  1324. if (is_catch(*ptr) && active_catches) goto found_catch;
  1325. }
  1326. if (cp_val(*prev) == beam_exception_trace) {
  1327. ErtsCodeMFA *mfa = (ErtsCodeMFA*)cp_val(ptr[0]);
  1328. erts_trace_exception(c_p, mfa,
  1329. reg[1], reg[2],
  1330. ERTS_TRACER_FROM_ETERM(ptr+1));
  1331. }
  1332. /* Skip return_trace parameters */
  1333. ptr += 2;
  1334. } else if (*cp_val(*prev) == i_return_to_trace) {
  1335. /* Skip stack frame variables */
  1336. while (++ptr, ptr < STACK_START(c_p) && is_not_CP(*ptr)) {
  1337. if (is_catch(*ptr) && active_catches) goto found_catch;
  1338. }
  1339. have_return_to_trace = !0; /* Record next cp */
  1340. return_to_trace_ptr = NULL;
  1341. } else if (*cp_val(*prev) == i_return_time_trace) {
  1342. /* Skip stack frame variables */
  1343. while (++ptr, ptr < STACK_START(c_p) && is_not_CP(*ptr)) {
  1344. if (is_catch(*ptr) && active_catches) goto found_catch;
  1345. }
  1346. /* Skip return_trace parameters */
  1347. ptr += 1;
  1348. } else {
  1349. if (have_return_to_trace) {
  1350. /* Record this cp as possible return_to trace cp */
  1351. have_return_to_trace = 0;
  1352. return_to_trace_ptr = ptr;
  1353. } else return_to_trace_ptr = NULL;
  1354. ptr++;
  1355. }
  1356. } else ptr++;
  1357. }
  1358. return NULL;
  1359. found_catch:
  1360. ASSERT(ptr < STACK_START(c_p));
  1361. c_p->stop = prev;
  1362. if (IS_TRACED_FL(c_p, F_TRACE_RETURN_TO) && return_to_trace_ptr) {
  1363. /* The stackframe closest to the catch contained an
  1364. * return_to_trace entry, so since the execution now
  1365. * continues after the catch, a return_to trace message
  1366. * would be appropriate.
  1367. */
  1368. erts_trace_return_to(c_p, cp_val(*return_to_trace_ptr));
  1369. }
  1370. return catch_pc(*ptr);
  1371. }
  1372. /*
  1373. * Terminating the process when an exception is not caught
  1374. */
  1375. static void
  1376. terminate_proc(Process* c_p, Eterm Value)
  1377. {
  1378. Eterm *hp;
  1379. Eterm Args = NIL;
  1380. /* Add a stacktrace if this is an error. */
  1381. if (GET_EXC_CLASS(c_p->freason) == EXTAG_ERROR) {
  1382. Value = add_stacktrace(c_p, Value, c_p->ftrace);
  1383. }
  1384. /* EXF_LOG is a primary exception flag */
  1385. if (c_p->freason & EXF_LOG) {
  1386. int alive = erts_is_alive;
  1387. erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
  1388. /* Build the format message */
  1389. erts_dsprintf(dsbufp, "Error in process ~p ");
  1390. if (alive)
  1391. erts_dsprintf(dsbufp, "on node ~p ");
  1392. erts_dsprintf(dsbufp, "with exit value:~n~p~n");
  1393. /* Build the args in reverse order */
  1394. hp = HAlloc(c_p, 2);
  1395. Args = CONS(hp, Value, Args);
  1396. if (alive) {
  1397. hp = HAlloc(c_p, 2);
  1398. Args = CONS(hp, erts_this_node->sysname, Args);
  1399. }
  1400. hp = HAlloc(c_p, 2);
  1401. Args = CONS(hp, c_p->common.id, Args);
  1402. erts_send_error_term_to_logger(c_p->group_leader, dsbufp, Args);
  1403. }
  1404. /*
  1405. * If we use a shared heap, the process will be garbage-collected.
  1406. * Must zero c_p->arity to indicate that there are no live registers.
  1407. */
  1408. c_p->arity = 0;
  1409. erts_do_exit_process(c_p, Value);
  1410. }
  1411. /*
  1412. * Build and add a symbolic stack trace to the error value.
  1413. */
  1414. static Eterm
  1415. add_stacktrace(Process* c_p, Eterm Value, Eterm exc) {
  1416. Eterm Where = build_stacktrace(c_p, exc);
  1417. Eterm* hp = HAlloc(c_p, 3);
  1418. return TUPLE2(hp, Value, Where);
  1419. }
  1420. /*
  1421. * Forming the correct error value from the internal error code.
  1422. * This does not update c_p->fvalue or c_p->freason.
  1423. */
  1424. Eterm
  1425. expand_error_value(Process* c_p, Uint freason, Eterm Value) {
  1426. Eterm* hp;
  1427. Uint r;
  1428. r = GET_EXC_INDEX(freason);
  1429. ASSERT(r < NUMBER_EXIT_CODES); /* range check */
  1430. ASSERT(is_value(Value));
  1431. switch (r) {
  1432. case (GET_EXC_INDEX(EXC_PRIMARY)):
  1433. /* Primary exceptions use fvalue as it is */
  1434. break;
  1435. case (GET_EXC_INDEX(EXC_BADMATCH)):
  1436. case (GET_EXC_INDEX(EXC_CASE_CLAUSE)):
  1437. case (GET_EXC_INDEX(EXC_TRY_CLAUSE)):
  1438. case (GET_EXC_INDEX(EXC_BADFUN)):
  1439. case (GET_EXC_INDEX(EXC_BADARITY)):
  1440. case (GET_EXC_INDEX(EXC_BADMAP)):
  1441. case (GET_EXC_INDEX(EXC_BADKEY)):
  1442. /* Some comm…