PageRenderTime 119ms CodeModel.GetById 30ms RepoModel.GetById 0ms app.codeStats 1ms

/erts/emulator/beam/beam_emu.c

https://github.com/bsmr-erlang/otp
C | 3286 lines | 2213 code | 381 blank | 692 comment | 488 complexity | 9f0915967c123074b12c48ba79d366f4 MD5 | raw file
Possible License(s): BSD-3-Clause, LGPL-2.1, MPL-2.0-no-copyleft-exception, Apache-2.0
  1. /*
  2. * %CopyrightBegin%
  3. *
  4. * Copyright Ericsson AB 1996-2018. All Rights Reserved.
  5. *
  6. * Licensed under the Apache License, Version 2.0 (the "License");
  7. * you may not use this file except in compliance with the License.
  8. * You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. *
  18. * %CopyrightEnd%
  19. */
  20. #ifdef HAVE_CONFIG_H
  21. # include "config.h"
  22. #endif
  23. #include <stddef.h> /* offsetof() */
  24. #include "sys.h"
  25. #include "erl_vm.h"
  26. #include "global.h"
  27. #include "erl_process.h"
  28. #include "error.h"
  29. #include "bif.h"
  30. #include "big.h"
  31. #include "beam_load.h"
  32. #include "erl_binary.h"
  33. #include "erl_map.h"
  34. #include "erl_bits.h"
  35. #include "dist.h"
  36. #include "beam_bp.h"
  37. #include "beam_catches.h"
  38. #include "erl_thr_progress.h"
  39. #include "erl_nfunc_sched.h"
  40. #ifdef HIPE
  41. #include "hipe_mode_switch.h"
  42. #include "hipe_bif1.h"
  43. #endif
  44. #include "dtrace-wrapper.h"
  45. #include "erl_proc_sig_queue.h"
  46. /* #define HARDDEBUG 1 */
  47. #if defined(NO_JUMP_TABLE)
  48. # define OpCase(OpCode) case op_##OpCode
  49. # define CountCase(OpCode) case op_count_##OpCode
  50. # define IsOpCode(InstrWord, OpCode) (BeamCodeAddr(InstrWord) == (BeamInstr)op_##OpCode)
  51. # define Goto(Rel) {Go = BeamCodeAddr(Rel); goto emulator_loop;}
  52. # define GotoPF(Rel) Goto(Rel)
  53. #else
  54. # define OpCase(OpCode) lb_##OpCode
  55. # define CountCase(OpCode) lb_count_##OpCode
  56. # define IsOpCode(InstrWord, OpCode) (BeamCodeAddr(InstrWord) == (BeamInstr)&&lb_##OpCode)
  57. # define Goto(Rel) goto *((void *)BeamCodeAddr(Rel))
  58. # define GotoPF(Rel) goto *((void *)Rel)
  59. # define LabelAddr(Label) &&Label
  60. #endif
  61. #ifdef ERTS_ENABLE_LOCK_CHECK
  62. # define PROCESS_MAIN_CHK_LOCKS(P) \
  63. do { \
  64. if ((P)) \
  65. erts_proc_lc_chk_only_proc_main((P)); \
  66. ERTS_LC_ASSERT(!erts_thr_progress_is_blocking()); \
  67. } while (0)
  68. # define ERTS_REQ_PROC_MAIN_LOCK(P) \
  69. do { \
  70. if ((P)) \
  71. erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN, \
  72. __FILE__, __LINE__); \
  73. } while (0)
  74. # define ERTS_UNREQ_PROC_MAIN_LOCK(P) \
  75. do { \
  76. if ((P)) \
  77. erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN); \
  78. } while (0)
  79. #else
  80. # define PROCESS_MAIN_CHK_LOCKS(P)
  81. # define ERTS_REQ_PROC_MAIN_LOCK(P)
  82. # define ERTS_UNREQ_PROC_MAIN_LOCK(P)
  83. #endif
  84. /*
  85. * Define macros for deep checking of terms.
  86. */
  87. #if defined(HARDDEBUG)
  88. # define CHECK_TERM(T) size_object(T)
  89. # define CHECK_ARGS(PC) \
  90. do { \
  91. int i_; \
  92. int Arity_ = PC[-1]; \
  93. for (i_ = 0; i_ < Arity_; i_++) { \
  94. CHECK_TERM(x(i_)); \
  95. } \
  96. } while (0)
  97. #else
  98. # define CHECK_TERM(T) ASSERT(!is_CP(T))
  99. # define CHECK_ARGS(T)
  100. #endif
  101. #define CHECK_ALIGNED(Dst) ASSERT((((Uint)&Dst) & (sizeof(Uint)-1)) == 0)
  102. #define GET_BIF_MODULE(p) (p->info.mfa.module)
  103. #define GET_BIF_FUNCTION(p) (p->info.mfa.function)
  104. #define GET_BIF_ARITY(p) (p->info.mfa.arity)
  105. #define GET_BIF_ADDRESS(p) ((BifFunction) (p->beam[1]))
  106. #define TermWords(t) (((t) / (sizeof(BeamInstr)/sizeof(Eterm))) + !!((t) % (sizeof(BeamInstr)/sizeof(Eterm))))
  107. /*
  108. * We reuse some of fields in the save area in the process structure.
  109. * This is safe to do, since this space is only actively used when
  110. * the process is switched out.
  111. */
  112. #define REDS_IN(p) ((p)->def_arg_reg[5])
  113. /*
  114. * Add a byte offset to a pointer to Eterm. This is useful when the
  115. * the loader has precalculated a byte offset.
  116. */
  117. #define ADD_BYTE_OFFSET(ptr, offset) \
  118. ((Eterm *) (((unsigned char *)ptr) + (offset)))
  119. /* We don't check the range if an ordinary switch is used */
  120. #ifdef NO_JUMP_TABLE
  121. # define VALID_INSTR(IP) (BeamCodeAddr(IP) < (NUMBER_OF_OPCODES*2+10))
  122. #else
  123. # define VALID_INSTR(IP) \
  124. ((BeamInstr)LabelAddr(emulator_loop) <= BeamCodeAddr(IP) && \
  125. BeamCodeAddr(IP) < (BeamInstr)LabelAddr(end_emulator_loop))
  126. #endif /* NO_JUMP_TABLE */
  127. #define SET_CP(p, ip) \
  128. ASSERT(VALID_INSTR(*(ip))); \
  129. (p)->cp = (ip)
  130. #define SET_I(ip) \
  131. ASSERT(VALID_INSTR(* (Eterm *)(ip))); \
  132. I = (ip)
  133. /*
  134. * Register target (X or Y register).
  135. */
  136. #define REG_TARGET_PTR(Target) (((Target) & 1) ? &yb((Target)-1) : &xb(Target))
  137. /*
  138. * Special Beam instructions.
  139. */
  140. BeamInstr beam_apply[2];
  141. BeamInstr beam_exit[1];
  142. BeamInstr beam_continue_exit[1];
  143. /* NOTE These should be the only variables containing trace instructions.
  144. ** Sometimes tests are form the instruction value, and sometimes
  145. ** for the referring variable (one of these), and rouge references
  146. ** will most likely cause chaos.
  147. */
  148. BeamInstr beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
  149. BeamInstr beam_return_trace[1]; /* OpCode(i_return_trace) */
  150. BeamInstr beam_exception_trace[1]; /* UGLY also OpCode(i_return_trace) */
  151. BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
  152. /*
  153. * All Beam instructions in numerical order.
  154. */
  155. #ifndef NO_JUMP_TABLE
  156. void** beam_ops;
  157. #endif
  158. #define SWAPIN \
  159. HTOP = HEAP_TOP(c_p); \
  160. E = c_p->stop
  161. #define SWAPOUT \
  162. HEAP_TOP(c_p) = HTOP; \
  163. c_p->stop = E
  164. #define HEAVY_SWAPIN \
  165. SWAPIN; \
  166. FCALLS = c_p->fcalls
  167. #define HEAVY_SWAPOUT \
  168. SWAPOUT; \
  169. c_p->fcalls = FCALLS
  170. /*
  171. * Use LIGHT_SWAPOUT when the called function
  172. * will call HeapOnlyAlloc() (and never HAlloc()).
  173. */
  174. #ifdef DEBUG
  175. # /* The stack pointer is used in an assertion. */
  176. # define LIGHT_SWAPOUT SWAPOUT
  177. # define DEBUG_SWAPOUT SWAPOUT
  178. # define DEBUG_SWAPIN SWAPIN
  179. #else
  180. # define LIGHT_SWAPOUT HEAP_TOP(c_p) = HTOP
  181. # define DEBUG_SWAPOUT
  182. # define DEBUG_SWAPIN
  183. #endif
  184. /*
  185. * Use LIGHT_SWAPIN when we know that c_p->stop cannot
  186. * have been updated (i.e. if there cannot have been
  187. * a garbage-collection).
  188. */
  189. #define LIGHT_SWAPIN HTOP = HEAP_TOP(c_p)
  190. #ifdef FORCE_HEAP_FRAGS
  191. # define HEAP_SPACE_VERIFIED(Words) do { \
  192. c_p->space_verified = (Words); \
  193. c_p->space_verified_from = HTOP; \
  194. }while(0)
  195. #else
  196. # define HEAP_SPACE_VERIFIED(Words) ((void)0)
  197. #endif
  198. #define PRE_BIF_SWAPOUT(P) \
  199. HEAP_TOP((P)) = HTOP; \
  200. (P)->stop = E; \
  201. PROCESS_MAIN_CHK_LOCKS((P)); \
  202. ERTS_UNREQ_PROC_MAIN_LOCK((P))
  203. #define db(N) (N)
  204. #define fb(N) ((Sint)(Sint32)(N))
  205. #define jb(N) ((Sint)(Sint32)(N))
  206. #define tb(N) (N)
  207. #define xb(N) (*ADD_BYTE_OFFSET(reg, N))
  208. #define yb(N) (*ADD_BYTE_OFFSET(E, N))
  209. #define Sb(N) (*REG_TARGET_PTR(N))
  210. #define lb(N) (*(double *) (((unsigned char *)&(freg[0].fd)) + (N)))
  211. #define Qb(N) (N)
  212. #define Ib(N) (N)
  213. #define x(N) reg[N]
  214. #define y(N) E[N]
  215. #define r(N) x(N)
  216. #define Q(N) (N*sizeof(Eterm *))
  217. #define l(N) (freg[N].fd)
  218. /*
  219. * Check that we haven't used the reductions and jump to function pointed to by
  220. * the I register. If we are out of reductions, do a context switch.
  221. */
  222. #define DispatchMacro() \
  223. do { \
  224. BeamInstr dis_next; \
  225. dis_next = *I; \
  226. CHECK_ARGS(I); \
  227. if (FCALLS > 0 || FCALLS > neg_o_reds) { \
  228. FCALLS--; \
  229. Goto(dis_next); \
  230. } else { \
  231. goto context_switch; \
  232. } \
  233. } while (0) \
  234. #define DispatchMacroFun() \
  235. do { \
  236. BeamInstr dis_next; \
  237. dis_next = *I; \
  238. CHECK_ARGS(I); \
  239. if (FCALLS > 0 || FCALLS > neg_o_reds) { \
  240. FCALLS--; \
  241. Goto(dis_next); \
  242. } else { \
  243. goto context_switch_fun; \
  244. } \
  245. } while (0)
  246. #define DispatchMacrox() \
  247. do { \
  248. if (FCALLS > 0) { \
  249. BeamInstr dis_next; \
  250. SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]); \
  251. dis_next = *I; \
  252. FCALLS--; \
  253. CHECK_ARGS(I); \
  254. Goto(dis_next); \
  255. } else if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p) \
  256. && FCALLS > neg_o_reds) { \
  257. goto save_calls1; \
  258. } else { \
  259. SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]); \
  260. CHECK_ARGS(I); \
  261. goto context_switch; \
  262. } \
  263. } while (0)
  264. #ifdef DEBUG
  265. /*
  266. * To simplify breakpoint setting, put the code in one place only and jump to it.
  267. */
  268. # define Dispatch() goto do_dispatch
  269. # define Dispatchx() goto do_dispatchx
  270. # define Dispatchfun() goto do_dispatchfun
  271. #else
  272. /*
  273. * Inline for speed.
  274. */
  275. # define Dispatch() DispatchMacro()
  276. # define Dispatchx() DispatchMacrox()
  277. # define Dispatchfun() DispatchMacroFun()
  278. #endif
  279. #define Arg(N) I[(N)+1]
  280. #define GetSource(raw, dst) \
  281. do { \
  282. dst = raw; \
  283. switch (loader_tag(dst)) { \
  284. case LOADER_X_REG: \
  285. dst = x(loader_x_reg_index(dst)); \
  286. break; \
  287. case LOADER_Y_REG: \
  288. ASSERT(loader_y_reg_index(dst) >= 1); \
  289. dst = y(loader_y_reg_index(dst)); \
  290. break; \
  291. } \
  292. CHECK_TERM(dst); \
  293. } while (0)
  294. #define PUT_TERM_REG(term, desc) \
  295. do { \
  296. switch (loader_tag(desc)) { \
  297. case LOADER_X_REG: \
  298. x(loader_x_reg_index(desc)) = (term); \
  299. break; \
  300. case LOADER_Y_REG: \
  301. y(loader_y_reg_index(desc)) = (term); \
  302. break; \
  303. default: \
  304. ASSERT(0); \
  305. break; \
  306. } \
  307. } while(0)
  308. #define DispatchReturn \
  309. do { \
  310. if (FCALLS > 0 || FCALLS > neg_o_reds) { \
  311. FCALLS--; \
  312. Goto(*I); \
  313. } \
  314. else { \
  315. c_p->current = NULL; \
  316. c_p->arity = 1; \
  317. goto context_switch3; \
  318. } \
  319. } while (0)
  320. #ifdef DEBUG
  321. /* Better static type testing by the C compiler */
  322. # define BEAM_IS_TUPLE(Src) is_tuple(Src)
  323. #else
  324. /* Better performance */
  325. # define BEAM_IS_TUPLE(Src) is_boxed(Src)
  326. #endif
  327. /*
  328. * process_main() is already huge, so we want to avoid inlining
  329. * seldom used functions into it.
  330. */
  331. static void init_emulator_finish(void) ERTS_NOINLINE;
  332. static ErtsCodeMFA *ubif2mfa(void* uf) ERTS_NOINLINE;
  333. static BeamInstr* handle_error(Process* c_p, BeamInstr* pc,
  334. Eterm* reg, ErtsCodeMFA* bif_mfa) ERTS_NOINLINE;
  335. static BeamInstr* call_error_handler(Process* p, ErtsCodeMFA* mfa,
  336. Eterm* reg, Eterm func) ERTS_NOINLINE;
  337. static BeamInstr* fixed_apply(Process* p, Eterm* reg, Uint arity,
  338. BeamInstr *I, Uint offs) ERTS_NOINLINE;
  339. static BeamInstr* apply(Process* p, Eterm* reg,
  340. BeamInstr *I, Uint offs) ERTS_NOINLINE;
  341. static BeamInstr* call_fun(Process* p, int arity,
  342. Eterm* reg, Eterm args) ERTS_NOINLINE;
  343. static BeamInstr* apply_fun(Process* p, Eterm fun,
  344. Eterm args, Eterm* reg) ERTS_NOINLINE;
  345. static Eterm new_fun(Process* p, Eterm* reg,
  346. ErlFunEntry* fe, int num_free) ERTS_NOINLINE;
  347. static int is_function2(Eterm Term, Uint arity);
  348. static Eterm erts_gc_new_map(Process* p, Eterm* reg, Uint live,
  349. Uint n, BeamInstr* ptr) ERTS_NOINLINE;
  350. static Eterm erts_gc_new_small_map_lit(Process* p, Eterm* reg, Eterm keys_literal,
  351. Uint live, BeamInstr* ptr) ERTS_NOINLINE;
  352. static Eterm erts_gc_update_map_assoc(Process* p, Eterm* reg, Uint live,
  353. Uint n, BeamInstr* new_p) ERTS_NOINLINE;
  354. static Eterm erts_gc_update_map_exact(Process* p, Eterm* reg, Uint live,
  355. Uint n, Eterm* new_p) ERTS_NOINLINE;
  356. static Eterm get_map_element(Eterm map, Eterm key);
  357. static Eterm get_map_element_hash(Eterm map, Eterm key, Uint32 hx);
  358. /*
  359. * Functions not directly called by process_main(). OK to inline.
  360. */
  361. static BeamInstr* next_catch(Process* c_p, Eterm *reg);
  362. static void terminate_proc(Process* c_p, Eterm Value);
  363. static Eterm add_stacktrace(Process* c_p, Eterm Value, Eterm exc);
  364. static void save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg,
  365. ErtsCodeMFA *bif_mfa, Eterm args);
  366. static struct StackTrace * get_trace_from_exc(Eterm exc);
  367. static Eterm make_arglist(Process* c_p, Eterm* reg, int a);
  368. void
  369. init_emulator(void)
  370. {
  371. process_main(0, 0);
  372. }
  373. /*
  374. * On certain platforms, make sure that the main variables really are placed
  375. * in registers.
  376. */
  377. #if defined(__GNUC__) && defined(sparc) && !defined(DEBUG)
  378. # define REG_xregs asm("%l1")
  379. # define REG_htop asm("%l2")
  380. # define REG_stop asm("%l3")
  381. # define REG_I asm("%l4")
  382. # define REG_fcalls asm("%l5")
  383. #elif defined(__GNUC__) && defined(__amd64__) && !defined(DEBUG)
  384. # define REG_xregs asm("%r12")
  385. # define REG_htop
  386. # define REG_stop asm("%r13")
  387. # define REG_I asm("%rbx")
  388. # define REG_fcalls asm("%r14")
  389. #else
  390. # define REG_xregs
  391. # define REG_htop
  392. # define REG_stop
  393. # define REG_I
  394. # define REG_fcalls
  395. #endif
  396. #ifdef USE_VM_PROBES
  397. # define USE_VM_CALL_PROBES
  398. #endif
  399. #ifdef USE_VM_CALL_PROBES
  400. #define DTRACE_LOCAL_CALL(p, mfa) \
  401. if (DTRACE_ENABLED(local_function_entry)) { \
  402. DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
  403. DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
  404. int depth = STACK_START(p) - STACK_TOP(p); \
  405. dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
  406. DTRACE3(local_function_entry, process_name, mfa_buf, depth); \
  407. }
  408. #define DTRACE_GLOBAL_CALL(p, mfa) \
  409. if (DTRACE_ENABLED(global_function_entry)) { \
  410. DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
  411. DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
  412. int depth = STACK_START(p) - STACK_TOP(p); \
  413. dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
  414. DTRACE3(global_function_entry, process_name, mfa_buf, depth); \
  415. }
  416. #define DTRACE_RETURN(p, mfa) \
  417. if (DTRACE_ENABLED(function_return)) { \
  418. DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
  419. DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
  420. int depth = STACK_START(p) - STACK_TOP(p); \
  421. dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
  422. DTRACE3(function_return, process_name, mfa_buf, depth); \
  423. }
  424. #define DTRACE_BIF_ENTRY(p, mfa) \
  425. if (DTRACE_ENABLED(bif_entry)) { \
  426. DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
  427. DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
  428. dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
  429. DTRACE2(bif_entry, process_name, mfa_buf); \
  430. }
  431. #define DTRACE_BIF_RETURN(p, mfa) \
  432. if (DTRACE_ENABLED(bif_return)) { \
  433. DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
  434. DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
  435. dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
  436. DTRACE2(bif_return, process_name, mfa_buf); \
  437. }
  438. #define DTRACE_NIF_ENTRY(p, mfa) \
  439. if (DTRACE_ENABLED(nif_entry)) { \
  440. DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
  441. DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
  442. dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
  443. DTRACE2(nif_entry, process_name, mfa_buf); \
  444. }
  445. #define DTRACE_NIF_RETURN(p, mfa) \
  446. if (DTRACE_ENABLED(nif_return)) { \
  447. DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
  448. DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
  449. dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
  450. DTRACE2(nif_return, process_name, mfa_buf); \
  451. }
  452. #define DTRACE_GLOBAL_CALL_FROM_EXPORT(p,e) \
  453. do { \
  454. if (DTRACE_ENABLED(global_function_entry)) { \
  455. BeamInstr* fp = (BeamInstr *) (((Export *) (e))->addressv[erts_active_code_ix()]); \
  456. DTRACE_GLOBAL_CALL((p), erts_code_to_codemfa(fp)); \
  457. } \
  458. } while(0)
  459. #define DTRACE_RETURN_FROM_PC(p) \
  460. do { \
  461. ErtsCodeMFA* cmfa; \
  462. if (DTRACE_ENABLED(function_return) && (cmfa = find_function_from_pc((p)->cp))) { \
  463. DTRACE_RETURN((p), cmfa); \
  464. } \
  465. } while(0)
  466. #else /* USE_VM_PROBES */
  467. #define DTRACE_LOCAL_CALL(p, mfa) do {} while (0)
  468. #define DTRACE_GLOBAL_CALL(p, mfa) do {} while (0)
  469. #define DTRACE_GLOBAL_CALL_FROM_EXPORT(p, e) do {} while (0)
  470. #define DTRACE_RETURN(p, mfa) do {} while (0)
  471. #define DTRACE_RETURN_FROM_PC(p) do {} while (0)
  472. #define DTRACE_BIF_ENTRY(p, mfa) do {} while (0)
  473. #define DTRACE_BIF_RETURN(p, mfa) do {} while (0)
  474. #define DTRACE_NIF_ENTRY(p, mfa) do {} while (0)
  475. #define DTRACE_NIF_RETURN(p, mfa) do {} while (0)
  476. #endif /* USE_VM_PROBES */
  477. #ifdef DEBUG
  478. #define ERTS_DBG_CHK_REDS(P, FC) \
  479. do { \
  480. if (ERTS_PROC_GET_SAVED_CALLS_BUF((P))) { \
  481. ASSERT(FC <= 0); \
  482. ASSERT(erts_proc_sched_data(c_p)->virtual_reds \
  483. <= 0 - (FC)); \
  484. } \
  485. else { \
  486. ASSERT(FC <= CONTEXT_REDS); \
  487. ASSERT(erts_proc_sched_data(c_p)->virtual_reds \
  488. <= CONTEXT_REDS - (FC)); \
  489. } \
  490. } while (0)
  491. #else
  492. #define ERTS_DBG_CHK_REDS(P, FC)
  493. #endif
  494. #ifdef NO_FPE_SIGNALS
  495. # define ERTS_NO_FPE_CHECK_INIT ERTS_FP_CHECK_INIT
  496. # define ERTS_NO_FPE_ERROR ERTS_FP_ERROR
  497. #else
  498. # define ERTS_NO_FPE_CHECK_INIT(p)
  499. # define ERTS_NO_FPE_ERROR(p, a, b)
  500. #endif
  501. /*
  502. * process_main() is called twice:
  503. * The first call performs some initialisation, including exporting
  504. * the instructions' C labels to the loader.
  505. * The second call starts execution of BEAM code. This call never returns.
  506. */
  507. ERTS_NO_RETPOLINE
  508. void process_main(Eterm * x_reg_array, FloatDef* f_reg_array)
  509. {
  510. static int init_done = 0;
  511. Process* c_p = NULL;
  512. int reds_used;
  513. #ifdef DEBUG
  514. ERTS_DECLARE_DUMMY(Eterm pid);
  515. #endif
  516. /* Pointer to X registers: x(1)..x(N); reg[0] is used when doing GC,
  517. * in all other cases x0 is used.
  518. */
  519. register Eterm* reg REG_xregs = x_reg_array;
  520. /*
  521. * Top of heap (next free location); grows upwards.
  522. */
  523. register Eterm* HTOP REG_htop = NULL;
  524. /* Stack pointer. Grows downwards; points
  525. * to last item pushed (normally a saved
  526. * continuation pointer).
  527. */
  528. register Eterm* E REG_stop = NULL;
  529. /*
  530. * Pointer to next threaded instruction.
  531. */
  532. register BeamInstr *I REG_I = NULL;
  533. /* Number of reductions left. This function
  534. * returns to the scheduler when FCALLS reaches zero.
  535. */
  536. register Sint FCALLS REG_fcalls = 0;
  537. /*
  538. * X registers and floating point registers are located in
  539. * scheduler specific data.
  540. */
  541. register FloatDef *freg = f_reg_array;
  542. /*
  543. * For keeping the negative old value of 'reds' when call saving is active.
  544. */
  545. int neg_o_reds = 0;
  546. #ifdef ERTS_OPCODE_COUNTER_SUPPORT
  547. static void* counting_opcodes[] = { DEFINE_COUNTING_OPCODES };
  548. #else
  549. #ifndef NO_JUMP_TABLE
  550. static void* opcodes[] = { DEFINE_OPCODES };
  551. #else
  552. register BeamInstr Go;
  553. #endif
  554. #endif
  555. Uint64 start_time = 0; /* Monitor long schedule */
  556. BeamInstr* start_time_i = NULL;
  557. ERTS_MSACC_DECLARE_CACHE_X() /* a cached value of the tsd pointer for msacc */
  558. ERL_BITS_DECLARE_STATEP; /* Has to be last declaration */
  559. /*
  560. * Note: In this function, we attempt to place rarely executed code towards
  561. * the end of the function, in the hope that the cache hit rate will be better.
  562. * The initialization code is only run once, so it is at the very end.
  563. *
  564. * Note: c_p->arity must be set to reflect the number of useful terms in
  565. * c_p->arg_reg before calling the scheduler.
  566. */
  567. if (ERTS_UNLIKELY(!init_done)) {
  568. /* This should only be reached during the init phase when only the main
  569. * process is running. I.e. there is no race for init_done.
  570. */
  571. init_done = 1;
  572. goto init_emulator;
  573. }
  574. c_p = NULL;
  575. reds_used = 0;
  576. goto do_schedule1;
  577. do_schedule:
  578. ASSERT(c_p->arity < 6);
  579. ASSERT(c_p->debug_reds_in == REDS_IN(c_p));
  580. if (!ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
  581. reds_used = REDS_IN(c_p) - FCALLS;
  582. else
  583. reds_used = REDS_IN(c_p) - (CONTEXT_REDS + FCALLS);
  584. ASSERT(reds_used >= 0);
  585. do_schedule1:
  586. if (start_time != 0) {
  587. Sint64 diff = erts_timestamp_millis() - start_time;
  588. if (diff > 0 && (Uint) diff > erts_system_monitor_long_schedule) {
  589. ErtsCodeMFA *inptr = find_function_from_pc(start_time_i);
  590. ErtsCodeMFA *outptr = find_function_from_pc(c_p->i);
  591. monitor_long_schedule_proc(c_p,inptr,outptr,(Uint) diff);
  592. }
  593. }
  594. PROCESS_MAIN_CHK_LOCKS(c_p);
  595. ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
  596. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  597. c_p = erts_schedule(NULL, c_p, reds_used);
  598. ASSERT(!(c_p->flags & F_HIPE_MODE));
  599. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  600. start_time = 0;
  601. #ifdef DEBUG
  602. pid = c_p->common.id; /* Save for debugging purposes */
  603. #endif
  604. ERTS_REQ_PROC_MAIN_LOCK(c_p);
  605. PROCESS_MAIN_CHK_LOCKS(c_p);
  606. ERTS_MSACC_UPDATE_CACHE_X();
  607. if (erts_system_monitor_long_schedule != 0) {
  608. start_time = erts_timestamp_millis();
  609. start_time_i = c_p->i;
  610. }
  611. ERL_BITS_RELOAD_STATEP(c_p);
  612. {
  613. int reds;
  614. Eterm* argp;
  615. BeamInstr next;
  616. int i;
  617. argp = c_p->arg_reg;
  618. for (i = c_p->arity - 1; i >= 0; i--) {
  619. reg[i] = argp[i];
  620. CHECK_TERM(reg[i]);
  621. }
  622. /*
  623. * We put the original reduction count in the process structure, to reduce
  624. * the code size (referencing a field in a struct through a pointer stored
  625. * in a register gives smaller code than referencing a global variable).
  626. */
  627. SET_I(c_p->i);
  628. REDS_IN(c_p) = reds = c_p->fcalls;
  629. #ifdef DEBUG
  630. c_p->debug_reds_in = reds;
  631. #endif
  632. if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) {
  633. neg_o_reds = -CONTEXT_REDS;
  634. FCALLS = neg_o_reds + reds;
  635. } else {
  636. neg_o_reds = 0;
  637. FCALLS = reds;
  638. }
  639. ERTS_DBG_CHK_REDS(c_p, FCALLS);
  640. next = *I;
  641. SWAPIN;
  642. ASSERT(VALID_INSTR(next));
  643. #ifdef USE_VM_PROBES
  644. if (DTRACE_ENABLED(process_scheduled)) {
  645. DTRACE_CHARBUF(process_buf, DTRACE_TERM_BUF_SIZE);
  646. DTRACE_CHARBUF(fun_buf, DTRACE_TERM_BUF_SIZE);
  647. dtrace_proc_str(c_p, process_buf);
  648. if (ERTS_PROC_IS_EXITING(c_p)) {
  649. sys_strcpy(fun_buf, "<exiting>");
  650. } else {
  651. ErtsCodeMFA *cmfa = find_function_from_pc(c_p->i);
  652. if (cmfa) {
  653. dtrace_fun_decode(c_p, cmfa,
  654. NULL, fun_buf);
  655. } else {
  656. erts_snprintf(fun_buf, sizeof(DTRACE_CHARBUF_NAME(fun_buf)),
  657. "<unknown/%p>", next);
  658. }
  659. }
  660. DTRACE2(process_scheduled, process_buf, fun_buf);
  661. }
  662. #endif
  663. Goto(next);
  664. }
  665. #if defined(DEBUG) || defined(NO_JUMP_TABLE)
  666. emulator_loop:
  667. #endif
  668. #ifdef NO_JUMP_TABLE
  669. switch (Go) {
  670. #endif
  671. #include "beam_hot.h"
  672. #ifdef DEBUG
  673. /*
  674. * Set a breakpoint here to get control just after a call instruction.
  675. * I points to the first instruction in the called function.
  676. *
  677. * In gdb, use 'call dis(I-5, 1)' to show the name of the function.
  678. */
  679. do_dispatch:
  680. DispatchMacro();
  681. do_dispatchx:
  682. DispatchMacrox();
  683. do_dispatchfun:
  684. DispatchMacroFun();
  685. #endif
  686. /*
  687. * Jumped to from the Dispatch() macro when the reductions are used up.
  688. *
  689. * Since the I register points just beyond the FuncBegin instruction, we
  690. * can get the module, function, and arity for the function being
  691. * called from I[-3], I[-2], and I[-1] respectively.
  692. */
  693. context_switch_fun:
  694. /* Add one for the environment of the fun */
  695. c_p->arity = erts_code_to_codemfa(I)->arity + 1;
  696. goto context_switch2;
  697. context_switch:
  698. c_p->arity = erts_code_to_codemfa(I)->arity;
  699. context_switch2: /* Entry for fun calls. */
  700. c_p->current = erts_code_to_codemfa(I);
  701. context_switch3:
  702. {
  703. Eterm* argp;
  704. int i;
  705. if (erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_EXITING) {
  706. c_p->i = beam_exit;
  707. c_p->arity = 0;
  708. c_p->current = NULL;
  709. goto do_schedule;
  710. }
  711. /*
  712. * Make sure that there is enough room for the argument registers to be saved.
  713. */
  714. if (c_p->arity > c_p->max_arg_reg) {
  715. /*
  716. * Yes, this is an expensive operation, but you only pay it the first
  717. * time you call a function with more than 6 arguments which is
  718. * scheduled out. This is better than paying for 26 words of wasted
  719. * space for most processes which never call functions with more than
  720. * 6 arguments.
  721. */
  722. Uint size = c_p->arity * sizeof(c_p->arg_reg[0]);
  723. if (c_p->arg_reg != c_p->def_arg_reg) {
  724. c_p->arg_reg = (Eterm *) erts_realloc(ERTS_ALC_T_ARG_REG,
  725. (void *) c_p->arg_reg,
  726. size);
  727. } else {
  728. c_p->arg_reg = (Eterm *) erts_alloc(ERTS_ALC_T_ARG_REG, size);
  729. }
  730. c_p->max_arg_reg = c_p->arity;
  731. }
  732. /*
  733. * Since REDS_IN(c_p) is stored in the save area (c_p->arg_reg) we must read it
  734. * now before saving registers.
  735. *
  736. * The '+ 1' compensates for the last increment which was not done
  737. * (beacuse the code for the Dispatch() macro becomes shorter that way).
  738. */
  739. ASSERT(c_p->debug_reds_in == REDS_IN(c_p));
  740. if (!ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
  741. reds_used = REDS_IN(c_p) - FCALLS;
  742. else
  743. reds_used = REDS_IN(c_p) - (CONTEXT_REDS + FCALLS);
  744. ASSERT(reds_used >= 0);
  745. /*
  746. * Save the argument registers and everything else.
  747. */
  748. argp = c_p->arg_reg;
  749. for (i = c_p->arity - 1; i >= 0; i--) {
  750. argp[i] = reg[i];
  751. }
  752. SWAPOUT;
  753. c_p->i = I;
  754. goto do_schedule1;
  755. }
  756. #include "beam_warm.h"
  757. OpCase(normal_exit): {
  758. HEAVY_SWAPOUT;
  759. c_p->freason = EXC_NORMAL;
  760. c_p->arity = 0; /* In case this process will ever be garbed again. */
  761. ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
  762. erts_do_exit_process(c_p, am_normal);
  763. ERTS_REQ_PROC_MAIN_LOCK(c_p);
  764. HEAVY_SWAPIN;
  765. goto do_schedule;
  766. }
  767. OpCase(continue_exit): {
  768. HEAVY_SWAPOUT;
  769. ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
  770. erts_continue_exit_process(c_p);
  771. ERTS_REQ_PROC_MAIN_LOCK(c_p);
  772. HEAVY_SWAPIN;
  773. goto do_schedule;
  774. }
  775. find_func_info: {
  776. SWAPOUT;
  777. I = handle_error(c_p, I, reg, NULL);
  778. goto post_error_handling;
  779. }
  780. OpCase(call_error_handler):
  781. /*
  782. * At this point, I points to the code[3] in the export entry for
  783. * a function which is not loaded.
  784. *
  785. * code[0]: Module
  786. * code[1]: Function
  787. * code[2]: Arity
  788. * code[3]: &&call_error_handler
  789. * code[4]: Not used
  790. */
  791. HEAVY_SWAPOUT;
  792. I = call_error_handler(c_p, erts_code_to_codemfa(I),
  793. reg, am_undefined_function);
  794. HEAVY_SWAPIN;
  795. if (I) {
  796. Goto(*I);
  797. }
  798. /* Fall through */
  799. OpCase(error_action_code): {
  800. handle_error:
  801. SWAPOUT;
  802. I = handle_error(c_p, NULL, reg, NULL);
  803. post_error_handling:
  804. if (I == 0) {
  805. goto do_schedule;
  806. } else {
  807. ASSERT(!is_value(r(0)));
  808. SWAPIN;
  809. Goto(*I);
  810. }
  811. }
  812. OpCase(i_func_info_IaaI): {
  813. ErtsCodeInfo *ci = (ErtsCodeInfo*)I;
  814. c_p->freason = EXC_FUNCTION_CLAUSE;
  815. c_p->current = &ci->mfa;
  816. goto handle_error;
  817. }
  818. #include "beam_cold.h"
  819. #ifdef ERTS_OPCODE_COUNTER_SUPPORT
  820. DEFINE_COUNTING_LABELS;
  821. #endif
  822. #ifndef NO_JUMP_TABLE
  823. #ifdef DEBUG
  824. end_emulator_loop:
  825. #endif
  826. #endif
  827. OpCase(int_code_end):
  828. OpCase(label_L):
  829. OpCase(on_load):
  830. OpCase(line_I):
  831. erts_exit(ERTS_ERROR_EXIT, "meta op\n");
  832. /*
  833. * One-time initialization of Beam emulator.
  834. */
  835. init_emulator:
  836. {
  837. #ifndef NO_JUMP_TABLE
  838. #ifdef ERTS_OPCODE_COUNTER_SUPPORT
  839. #ifdef DEBUG
  840. counting_opcodes[op_catch_end_y] = LabelAddr(lb_catch_end_y);
  841. #endif
  842. counting_opcodes[op_i_func_info_IaaI] = LabelAddr(lb_i_func_info_IaaI);
  843. beam_ops = counting_opcodes;
  844. #else /* #ifndef ERTS_OPCODE_COUNTER_SUPPORT */
  845. beam_ops = opcodes;
  846. #endif /* ERTS_OPCODE_COUNTER_SUPPORT */
  847. #endif /* NO_JUMP_TABLE */
  848. init_emulator_finish();
  849. return;
  850. }
  851. #ifdef NO_JUMP_TABLE
  852. default:
  853. erts_exit(ERTS_ERROR_EXIT, "unexpected op code %d\n",Go);
  854. }
  855. #endif
  856. return; /* Never executed */
  857. save_calls1:
  858. {
  859. BeamInstr dis_next;
  860. save_calls(c_p, (Export *) Arg(0));
  861. SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]);
  862. dis_next = *I;
  863. FCALLS--;
  864. Goto(dis_next);
  865. }
  866. }
  867. /*
  868. * One-time initialization of emulator. Does not need to be
  869. * in process_main().
  870. */
  871. static void
  872. init_emulator_finish(void)
  873. {
  874. int i;
  875. Export* ep;
  876. #if defined(ARCH_64) && defined(CODE_MODEL_SMALL)
  877. for (i = 0; i < NUMBER_OF_OPCODES; i++) {
  878. BeamInstr instr = BeamOpCodeAddr(i);
  879. if (instr >= (1ull << 32)) {
  880. erts_exit(ERTS_ERROR_EXIT,
  881. "This run-time was supposed be compiled with all code below 2Gb,\n"
  882. "but the instruction '%s' is located at %016lx.\n",
  883. opc[i].name, instr);
  884. }
  885. }
  886. #endif
  887. beam_apply[0] = BeamOpCodeAddr(op_i_apply);
  888. beam_apply[1] = BeamOpCodeAddr(op_normal_exit);
  889. beam_exit[0] = BeamOpCodeAddr(op_error_action_code);
  890. beam_continue_exit[0] = BeamOpCodeAddr(op_continue_exit);
  891. beam_return_to_trace[0] = BeamOpCodeAddr(op_i_return_to_trace);
  892. beam_return_trace[0] = BeamOpCodeAddr(op_return_trace);
  893. beam_exception_trace[0] = BeamOpCodeAddr(op_return_trace); /* UGLY */
  894. beam_return_time_trace[0] = BeamOpCodeAddr(op_i_return_time_trace);
  895. /*
  896. * Enter all BIFs into the export table.
  897. */
  898. for (i = 0; i < BIF_SIZE; i++) {
  899. ep = erts_export_put(bif_table[i].module,
  900. bif_table[i].name,
  901. bif_table[i].arity);
  902. bif_export[i] = ep;
  903. ep->beam[0] = BeamOpCodeAddr(op_apply_bif);
  904. ep->beam[1] = (BeamInstr) bif_table[i].f;
  905. /* XXX: set func info for bifs */
  906. ep->info.op = BeamOpCodeAddr(op_i_func_info_IaaI);
  907. }
  908. }
  909. /*
  910. * erts_dirty_process_main() is what dirty schedulers execute. Since they handle
  911. * only NIF calls they do not need to be able to execute all BEAM
  912. * instructions.
  913. */
  914. void erts_dirty_process_main(ErtsSchedulerData *esdp)
  915. {
  916. Process* c_p = NULL;
  917. ErtsMonotonicTime start_time;
  918. #ifdef DEBUG
  919. ERTS_DECLARE_DUMMY(Eterm pid);
  920. #endif
  921. /* Pointer to X registers: x(1)..x(N); reg[0] is used when doing GC,
  922. * in all other cases x0 is used.
  923. */
  924. register Eterm* reg REG_xregs = NULL;
  925. /*
  926. * Top of heap (next free location); grows upwards.
  927. */
  928. register Eterm* HTOP REG_htop = NULL;
  929. /* Stack pointer. Grows downwards; points
  930. * to last item pushed (normally a saved
  931. * continuation pointer).
  932. */
  933. register Eterm* E REG_stop = NULL;
  934. /*
  935. * Pointer to next threaded instruction.
  936. */
  937. register BeamInstr *I REG_I = NULL;
  938. ERTS_MSACC_DECLARE_CACHE_X() /* a cached value of the tsd pointer for msacc */
  939. /*
  940. * start_time always positive for dirty CPU schedulers,
  941. * and negative for dirty I/O schedulers.
  942. */
  943. if (ERTS_SCHEDULER_IS_DIRTY_CPU(esdp)) {
  944. start_time = erts_get_monotonic_time(NULL);
  945. ASSERT(start_time >= 0);
  946. }
  947. else {
  948. start_time = ERTS_SINT64_MIN;
  949. ASSERT(start_time < 0);
  950. }
  951. goto do_dirty_schedule;
  952. context_switch:
  953. c_p->current = erts_code_to_codemfa(I); /* Pointer to Mod, Func, Arity */
  954. c_p->arity = c_p->current->arity;
  955. {
  956. int reds_used;
  957. Eterm* argp;
  958. int i;
  959. /*
  960. * Make sure that there is enough room for the argument registers to be saved.
  961. */
  962. if (c_p->arity > c_p->max_arg_reg) {
  963. /*
  964. * Yes, this is an expensive operation, but you only pay it the first
  965. * time you call a function with more than 6 arguments which is
  966. * scheduled out. This is better than paying for 26 words of wasted
  967. * space for most processes which never call functions with more than
  968. * 6 arguments.
  969. */
  970. Uint size = c_p->arity * sizeof(c_p->arg_reg[0]);
  971. if (c_p->arg_reg != c_p->def_arg_reg) {
  972. c_p->arg_reg = (Eterm *) erts_realloc(ERTS_ALC_T_ARG_REG,
  973. (void *) c_p->arg_reg,
  974. size);
  975. } else {
  976. c_p->arg_reg = (Eterm *) erts_alloc(ERTS_ALC_T_ARG_REG, size);
  977. }
  978. c_p->max_arg_reg = c_p->arity;
  979. }
  980. /*
  981. * Save the argument registers and everything else.
  982. */
  983. argp = c_p->arg_reg;
  984. for (i = c_p->arity - 1; i >= 0; i--) {
  985. argp[i] = reg[i];
  986. }
  987. SWAPOUT;
  988. c_p->i = I;
  989. do_dirty_schedule:
  990. if (start_time < 0) {
  991. /*
  992. * Dirty I/O scheduler:
  993. * One reduction consumed regardless of
  994. * time spent in the dirty NIF.
  995. */
  996. reds_used = esdp->virtual_reds + 1;
  997. }
  998. else {
  999. /*
  1000. * Dirty CPU scheduler:
  1001. * Reductions based on time consumed by
  1002. * the dirty NIF.
  1003. */
  1004. Sint64 treds;
  1005. treds = erts_time2reds(start_time,
  1006. erts_get_monotonic_time(esdp));
  1007. treds += esdp->virtual_reds;
  1008. reds_used = treds > INT_MAX ? INT_MAX : (int) treds;
  1009. }
  1010. if (c_p && ERTS_PROC_GET_PENDING_SUSPEND(c_p))
  1011. erts_proc_sig_handle_pending_suspend(c_p);
  1012. PROCESS_MAIN_CHK_LOCKS(c_p);
  1013. ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
  1014. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  1015. c_p = erts_schedule(esdp, c_p, reds_used);
  1016. if (start_time >= 0) {
  1017. start_time = erts_get_monotonic_time(esdp);
  1018. ASSERT(start_time >= 0);
  1019. }
  1020. }
  1021. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  1022. #ifdef DEBUG
  1023. pid = c_p->common.id; /* Save for debugging purposes */
  1024. #endif
  1025. ERTS_REQ_PROC_MAIN_LOCK(c_p);
  1026. PROCESS_MAIN_CHK_LOCKS(c_p);
  1027. ASSERT(!(c_p->flags & F_HIPE_MODE));
  1028. ERTS_MSACC_UPDATE_CACHE_X();
  1029. /*
  1030. * Set fcalls even though we ignore it, so we don't
  1031. * confuse code accessing it...
  1032. */
  1033. if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
  1034. c_p->fcalls = 0;
  1035. else
  1036. c_p->fcalls = CONTEXT_REDS;
  1037. if (erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_DIRTY_RUNNING_SYS) {
  1038. erts_execute_dirty_system_task(c_p);
  1039. goto do_dirty_schedule;
  1040. }
  1041. else {
  1042. ErtsCodeMFA *codemfa;
  1043. Eterm* argp;
  1044. int i, exiting;
  1045. reg = esdp->x_reg_array;
  1046. argp = c_p->arg_reg;
  1047. for (i = c_p->arity - 1; i >= 0; i--) {
  1048. reg[i] = argp[i];
  1049. CHECK_TERM(reg[i]);
  1050. }
  1051. /*
  1052. * We put the original reduction count in the process structure, to reduce
  1053. * the code size (referencing a field in a struct through a pointer stored
  1054. * in a register gives smaller code than referencing a global variable).
  1055. */
  1056. I = c_p->i;
  1057. SWAPIN;
  1058. #ifdef USE_VM_PROBES
  1059. if (DTRACE_ENABLED(process_scheduled)) {
  1060. DTRACE_CHARBUF(process_buf, DTRACE_TERM_BUF_SIZE);
  1061. DTRACE_CHARBUF(fun_buf, DTRACE_TERM_BUF_SIZE);
  1062. dtrace_proc_str(c_p, process_buf);
  1063. if (ERTS_PROC_IS_EXITING(c_p)) {
  1064. sys_strcpy(fun_buf, "<exiting>");
  1065. } else {
  1066. ErtsCodeMFA *cmfa = find_function_from_pc(c_p->i);
  1067. if (cmfa) {
  1068. dtrace_fun_decode(c_p, cmfa, NULL, fun_buf);
  1069. } else {
  1070. erts_snprintf(fun_buf, sizeof(DTRACE_CHARBUF_NAME(fun_buf)),
  1071. "<unknown/%p>", *I);
  1072. }
  1073. }
  1074. DTRACE2(process_scheduled, process_buf, fun_buf);
  1075. }
  1076. #endif
  1077. /*
  1078. * call_nif is always first instruction in function:
  1079. *
  1080. * I[-3]: Module
  1081. * I[-2]: Function
  1082. * I[-1]: Arity
  1083. * I[0]: &&call_nif
  1084. * I[1]: Function pointer to NIF function
  1085. * I[2]: Pointer to erl_module_nif
  1086. * I[3]: Function pointer to dirty NIF
  1087. *
  1088. * This layout is determined by the NifExport struct
  1089. */
  1090. ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_NIF);
  1091. codemfa = erts_code_to_codemfa(I);
  1092. DTRACE_NIF_ENTRY(c_p, codemfa);
  1093. c_p->current = codemfa;
  1094. SWAPOUT;
  1095. PROCESS_MAIN_CHK_LOCKS(c_p);
  1096. ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
  1097. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  1098. if (BeamIsOpCode(*I, op_apply_bif)) {
  1099. exiting = erts_call_dirty_bif(esdp, c_p, I, reg);
  1100. }
  1101. else {
  1102. ASSERT(BeamIsOpCode(*I, op_call_nif));
  1103. exiting = erts_call_dirty_nif(esdp, c_p, I, reg);
  1104. }
  1105. ASSERT(!(c_p->flags & F_HIBERNATE_SCHED));
  1106. PROCESS_MAIN_CHK_LOCKS(c_p);
  1107. ERTS_REQ_PROC_MAIN_LOCK(c_p);
  1108. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  1109. ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR);
  1110. if (exiting)
  1111. goto do_dirty_schedule;
  1112. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  1113. DTRACE_NIF_RETURN(c_p, codemfa);
  1114. ERTS_HOLE_CHECK(c_p);
  1115. SWAPIN;
  1116. I = c_p->i;
  1117. goto context_switch;
  1118. }
  1119. }
  1120. static ErtsCodeMFA *
  1121. ubif2mfa(void* uf)
  1122. {
  1123. int i;
  1124. for (i = 0; erts_u_bifs[i].bif; i++) {
  1125. if (erts_u_bifs[i].bif == uf)
  1126. return &bif_export[erts_u_bifs[i].exp_ix]->info.mfa;
  1127. }
  1128. erts_exit(ERTS_ERROR_EXIT, "bad u bif: %p\n", uf);
  1129. return NULL;
  1130. }
  1131. /*
  1132. * Mapping from the error code 'class tag' to atoms.
  1133. */
  1134. Eterm exception_tag[NUMBER_EXC_TAGS] = {
  1135. am_error, /* 0 */
  1136. am_exit, /* 1 */
  1137. am_throw, /* 2 */
  1138. };
  1139. /*
  1140. * Mapping from error code 'index' to atoms.
  1141. */
  1142. Eterm error_atom[NUMBER_EXIT_CODES] = {
  1143. am_internal_error, /* 0 */
  1144. am_normal, /* 1 */
  1145. am_internal_error, /* 2 */
  1146. am_badarg, /* 3 */
  1147. am_badarith, /* 4 */
  1148. am_badmatch, /* 5 */
  1149. am_function_clause, /* 6 */
  1150. am_case_clause, /* 7 */
  1151. am_if_clause, /* 8 */
  1152. am_undef, /* 9 */
  1153. am_badfun, /* 10 */
  1154. am_badarity, /* 11 */
  1155. am_timeout_value, /* 12 */
  1156. am_noproc, /* 13 */
  1157. am_notalive, /* 14 */
  1158. am_system_limit, /* 15 */
  1159. am_try_clause, /* 16 */
  1160. am_notsup, /* 17 */
  1161. am_badmap, /* 18 */
  1162. am_badkey, /* 19 */
  1163. };
  1164. /*
  1165. * To fully understand the error handling, one must keep in mind that
  1166. * when an exception is thrown, the search for a handler can jump back
  1167. * and forth between Beam and native code. Upon each mode switch, a
  1168. * dummy handler is inserted so that if an exception reaches that point,
  1169. * the handler is invoked (like any handler) and transfers control so
  1170. * that the search for a real handler is continued in the other mode.
  1171. * Therefore, c_p->freason and c_p->fvalue must still hold the exception
  1172. * info when the handler is executed, but normalized so that creation of
  1173. * error terms and saving of the stack trace is only done once, even if
  1174. * we pass through the error handling code several times.
  1175. *
  1176. * When a new exception is raised, the current stack trace information
  1177. * is quick-saved in a small structure allocated on the heap. Depending
  1178. * on how the exception is eventually caught (perhaps by causing the
  1179. * current process to terminate), the saved information may be used to
  1180. * create a symbolic (human-readable) representation of the stack trace
  1181. * at the point of the original exception.
  1182. */
  1183. static BeamInstr*
  1184. handle_error(Process* c_p, BeamInstr* pc, Eterm* reg, ErtsCodeMFA *bif_mfa)
  1185. {
  1186. Eterm* hp;
  1187. Eterm Value = c_p->fvalue;
  1188. Eterm Args = am_true;
  1189. ASSERT(c_p->freason != TRAP); /* Should have been handled earlier. */
  1190. if (c_p->freason & EXF_RESTORE_NIF)
  1191. erts_nif_export_restore_error(c_p, &pc, reg, &bif_mfa);
  1192. #ifdef DEBUG
  1193. if (bif_mfa) {
  1194. /* Verify that bif_mfa does not point into our nif export */
  1195. NifExport *nep = ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p);
  1196. ASSERT(!nep || !ErtsInArea(bif_mfa, (char *)nep, sizeof(NifExport)));
  1197. }
  1198. #endif
  1199. c_p->i = pc; /* In case we call erts_exit(). */
  1200. /*
  1201. * Check if we have an arglist for the top level call. If so, this
  1202. * is encoded in Value, so we have to dig out the real Value as well
  1203. * as the Arglist.
  1204. */
  1205. if (c_p->freason & EXF_ARGLIST) {
  1206. Eterm* tp;
  1207. ASSERT(is_tuple(Value));
  1208. tp = tuple_val(Value);
  1209. Value = tp[1];
  1210. Args = tp[2];
  1211. }
  1212. /*
  1213. * Save the stack trace info if the EXF_SAVETRACE flag is set. The
  1214. * main reason for doing this separately is to allow throws to later
  1215. * become promoted to errors without losing the original stack
  1216. * trace, even if they have passed through one or more catch and
  1217. * rethrow. It also makes the creation of symbolic stack traces much
  1218. * more modular.
  1219. */
  1220. if (c_p->freason & EXF_SAVETRACE) {
  1221. save_stacktrace(c_p, pc, reg, bif_mfa, Args);
  1222. }
  1223. /*
  1224. * Throws that are not caught are turned into 'nocatch' errors
  1225. */
  1226. if ((c_p->freason & EXF_THROWN) && (c_p->catches <= 0) ) {
  1227. hp = HAlloc(c_p, 3);
  1228. Value = TUPLE2(hp, am_nocatch, Value);
  1229. c_p->freason = EXC_ERROR;
  1230. }
  1231. /* Get the fully expanded error term */
  1232. Value = expand_error_value(c_p, c_p->freason, Value);
  1233. /* Save final error term and stabilize the exception flags so no
  1234. further expansion is done. */
  1235. c_p->fvalue = Value;
  1236. c_p->freason = PRIMARY_EXCEPTION(c_p->freason);
  1237. /* Find a handler or die */
  1238. if ((c_p->catches > 0 || IS_TRACED_FL(c_p, F_EXCEPTION_TRACE))
  1239. && !(c_p->freason & EXF_PANIC)) {
  1240. BeamInstr *new_pc;
  1241. /* The Beam handler code (catch_end or try_end) checks reg[0]
  1242. for THE_NON_VALUE to see if the previous code finished
  1243. abnormally. If so, reg[1], reg[2] and reg[3] should hold the
  1244. exception class, term and trace, respectively. (If the
  1245. handler is just a trap to native code, these registers will
  1246. be ignored.) */
  1247. reg[0] = THE_NON_VALUE;
  1248. reg[1] = exception_tag[GET_EXC_CLASS(c_p->freason)];
  1249. reg[2] = Value;
  1250. reg[3] = c_p->ftrace;
  1251. if ((new_pc = next_catch(c_p, reg))) {
  1252. c_p->cp = 0; /* To avoid keeping stale references. */
  1253. ERTS_RECV_MARK_CLEAR(c_p); /* No longer safe to use this position */
  1254. return new_pc;
  1255. }
  1256. if (c_p->catches > 0) erts_exit(ERTS_ERROR_EXIT, "Catch not found");
  1257. }
  1258. ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
  1259. terminate_proc(c_p, Value);
  1260. ERTS_REQ_PROC_MAIN_LOCK(c_p);
  1261. return NULL;
  1262. }
  1263. /*
  1264. * Find the nearest catch handler
  1265. */
  1266. static BeamInstr*
  1267. next_catch(Process* c_p, Eterm *reg) {
  1268. int active_catches = c_p->catches > 0;
  1269. int have_return_to_trace = 0;
  1270. Eterm *ptr, *prev, *return_to_trace_ptr = NULL;
  1271. BeamInstr i_return_trace = beam_return_trace[0];
  1272. BeamInstr i_return_to_trace = beam_return_to_trace[0];
  1273. BeamInstr i_return_time_trace = beam_return_time_trace[0];
  1274. ptr = prev = c_p->stop;
  1275. ASSERT(ptr <= STACK_START(c_p));
  1276. /* This function is only called if we have active catch tags or have
  1277. * previously called a function that was exception-traced. As the exception
  1278. * trace flag isn't cleared after the traced function returns (and the
  1279. * catch tag inserted by it is gone), it's possible to land here with an
  1280. * empty stack, and the process should simply die when that happens. */
  1281. if (ptr == STACK_START(c_p)) {
  1282. ASSERT(!active_catches && IS_TRACED_FL(c_p, F_EXCEPTION_TRACE));
  1283. return NULL;
  1284. }
  1285. /*
  1286. * Better safe than sorry here. In debug builds, produce a core
  1287. * dump if the top of the stack doesn't point to a continuation
  1288. * pointer. In other builds, ignore a non-CP at the top of stack.
  1289. */
  1290. ASSERT(is_CP(*ptr));
  1291. if ((is_not_CP(*ptr) || (*cp_val(*ptr) != i_return_trace &&
  1292. *cp_val(*ptr) != i_return_to_trace &&
  1293. *cp_val(*ptr) != i_return_time_trace ))
  1294. && c_p->cp) {
  1295. /* Can not follow cp here - code may be unloaded */
  1296. BeamInstr *cpp = c_p->cp;
  1297. if (cpp == beam_exception_trace) {
  1298. ErtsCodeMFA *mfa = (ErtsCodeMFA*)cp_val(ptr[0]);
  1299. erts_trace_exception(c_p, mfa,
  1300. reg[1], reg[2],
  1301. ERTS_TRACER_FROM_ETERM(ptr+1));
  1302. /* Skip return_trace parameters */
  1303. ptr += 2;
  1304. } else if (cpp == beam_return_trace) {
  1305. /* Skip return_trace parameters */
  1306. ptr += 2;
  1307. } else if (cpp == beam_return_time_trace) {
  1308. /* Skip return_trace parameters */
  1309. ptr += 1;
  1310. } else if (cpp == beam_return_to_trace) {
  1311. have_return_to_trace = !0; /* Record next cp */
  1312. }
  1313. }
  1314. while (ptr < STACK_START(c_p)) {
  1315. if (is_catch(*ptr)) {
  1316. if (active_catches) goto found_catch;
  1317. ptr++;
  1318. }
  1319. else if (is_CP(*ptr)) {
  1320. prev = ptr;
  1321. if (*cp_val(*prev) == i_return_trace) {
  1322. /* Skip stack frame variables */
  1323. while (++ptr, ptr < STACK_START(c_p) && is_not_CP(*ptr)) {
  1324. if (is_catch(*ptr) && active_catches) goto found_catch;
  1325. }
  1326. if (cp_val(*prev) == beam_exception_trace) {
  1327. ErtsCodeMFA *mfa = (ErtsCodeMFA*)cp_val(ptr[0]);
  1328. erts_trace_exception(c_p, mfa,
  1329. reg[1], reg[2],
  1330. ERTS_TRACER_FROM_ETERM(ptr+1));
  1331. }
  1332. /* Skip return_trace parameters */
  1333. ptr += 2;
  1334. } else if (*cp_val(*prev) == i_return_to_trace) {
  1335. /* Skip stack frame variables */
  1336. while (++ptr, ptr < STACK_START(c_p) && is_not_CP(*ptr)) {
  1337. if (is_catch(*ptr) && active_catches) goto found_catch;
  1338. }
  1339. have_return_to_trace = !0; /* Record next cp */
  1340. return_to_trace_ptr = NULL;
  1341. } else if (*cp_val(*prev) == i_return_time_trace) {
  1342. /* Skip stack frame variables */
  1343. while (++ptr, ptr < STACK_START(c_p) && is_not_CP(*ptr)) {
  1344. if (is_catch(*ptr) && active_catches) goto found_catch;
  1345. }
  1346. /* Skip return_trace parameters */
  1347. ptr += 1;
  1348. } else {
  1349. if (have_return_to_trace) {
  1350. /* Record this cp as possible return_to trace cp */
  1351. have_return_to_trace = 0;
  1352. return_to_trace_ptr = ptr;
  1353. } else return_to_trace_ptr = NULL;
  1354. ptr++;
  1355. }
  1356. } else ptr++;
  1357. }
  1358. return NULL;
  1359. found_catch:
  1360. ASSERT(ptr < STACK_START(c_p));
  1361. c_p->stop = prev;
  1362. if (IS_TRACED_FL(c_p, F_TRACE_RETURN_TO) && return_to_trace_ptr) {
  1363. /* The stackframe closest to the catch contained an
  1364. * return_to_trace entry, so since the execution now
  1365. * continues after the catch, a return_to trace message
  1366. * would be appropriate.
  1367. */
  1368. erts_trace_return_to(c_p, cp_val(*return_to_trace_ptr));
  1369. }
  1370. return catch_pc(*ptr);
  1371. }
  1372. /*
  1373. * Terminating the process when an exception is not caught
  1374. */
  1375. static void
  1376. terminate_proc(Process* c_p, Eterm Value)
  1377. {
  1378. Eterm *hp;
  1379. Eterm Args = NIL;
  1380. /* Add a stacktrace if this is an error. */
  1381. if (GET_EXC_CLASS(c_p->freason) == EXTAG_ERROR) {
  1382. Value = add_stacktrace(c_p, Value, c_p->ftrace);
  1383. }
  1384. /* EXF_LOG is a primary exception flag */
  1385. if (c_p->freason & EXF_LOG) {
  1386. int alive = erts_is_alive;
  1387. erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
  1388. /* Build the format message */
  1389. erts_dsprintf(dsbufp, "Error in process ~p ");
  1390. if (alive)
  1391. erts_dsprintf(dsbufp, "on node ~p ");
  1392. erts_dsprintf(dsbufp, "with exit value:~n~p~n");
  1393. /* Build the args in reverse order */
  1394. hp = HAlloc(c_p, 2);
  1395. Args = CONS(hp, Value, Args);
  1396. if (alive) {
  1397. hp = HAlloc(c_p, 2);
  1398. Args = CONS(hp, erts_this_node->sysname, Args);
  1399. }
  1400. hp = HAlloc(c_p, 2);
  1401. Args = CONS(hp, c_p->common.id, Args);
  1402. erts_send_error_term_to_logger(c_p->group_leader, dsbufp, Args);
  1403. }
  1404. /*
  1405. * If we use a shared heap, the process will be garbage-collected.
  1406. * Must zero c_p->arity to indicate that there are no live registers.
  1407. */
  1408. c_p->arity = 0;
  1409. erts_do_exit_process(c_p, Value);
  1410. }
  1411. /*
  1412. * Build and add a symbolic stack trace to the error value.
  1413. */
  1414. static Eterm
  1415. add_stacktrace(Process* c_p, Eterm Value, Eterm exc) {
  1416. Eterm Where = build_stacktrace(c_p, exc);
  1417. Eterm* hp = HAlloc(c_p, 3);
  1418. return TUPLE2(hp, Value, Where);
  1419. }
  1420. /*
  1421. * Forming the correct error value from the internal error code.
  1422. * This does not update c_p->fvalue or c_p->freason.
  1423. */
  1424. Eterm
  1425. expand_error_value(Process* c_p, Uint freason, Eterm Value) {
  1426. Eterm* hp;
  1427. Uint r;
  1428. r = GET_EXC_INDEX(freason);
  1429. ASSERT(r < NUMBER_EXIT_CODES); /* range check */
  1430. ASSERT(is_value(Value));
  1431. switch (r) {
  1432. case (GET_EXC_INDEX(EXC_PRIMARY)):
  1433. /* Primary exceptions use fvalue as it is */
  1434. break;
  1435. case (GET_EXC_INDEX(EXC_BADMATCH)):
  1436. case (GET_EXC_INDEX(EXC_CASE_CLAUSE)):
  1437. case (GET_EXC_INDEX(EXC_TRY_CLAUSE)):
  1438. case (GET_EXC_INDEX(EXC_BADFUN)):
  1439. case (GET_EXC_INDEX(EXC_BADARITY)):
  1440. case (GET_EXC_INDEX(EXC_BADMAP)):
  1441. case (GET_EXC_INDEX(EXC_BADKEY)):
  1442. /* Some common exceptions: value -> {atom, value} */
  1443. ASSERT(is_value(Value));
  1444. hp = HAlloc(c_p, 3);
  1445. Value = TUPLE2(hp, error_atom[r], Value);
  1446. break;
  1447. default:
  1448. /* Other exceptions just use an atom as descriptor */
  1449. Value = error_atom[r];
  1450. break;
  1451. }
  1452. #ifdef DEBUG
  1453. ASSERT(Value != am_internal_error);
  1454. #endif
  1455. return Value;
  1456. }
  1457. /*
  1458. * Quick-saving the stack trace in an internal form on the heap. Note
  1459. * that c_p->ftrace will point to a cons cell which holds the given args
  1460. * and the saved data (encoded as a bignum).
  1461. *
  1462. * There is an issue with line number information. Line number
  1463. * information is associated with the address *before* an operation
  1464. * that may fail or be stored stored on the stack. But continuation
  1465. * pointers point after its call instruction, not before. To avoid
  1466. * finding the wrong line number, we'll need to adjust them so that
  1467. * they point at the beginning of the call instruction or inside the
  1468. * call instruction. Since its impractical to point at the beginning,
  1469. * we'll do the simplest thing and decrement the continuation pointers
  1470. * by one.
  1471. *
  1472. * Here is an example of what can go wrong. Without the adjustment
  1473. * of continuation pointers, the call at line 42 below would seem to
  1474. * be at line 43:
  1475. *
  1476. * line 42
  1477. * call ...
  1478. * line 43
  1479. * gc_bif ...
  1480. *
  1481. * (It would be much better to put the arglist - when it exists - in the
  1482. * error value instead of in the actual trace; e.g. '{badarg, Args}'
  1483. * instead of using 'badarg' with Args in the trace. The arglist may
  1484. * contain very large values, and right now they will be kept alive as
  1485. * long as the stack trace is live. Preferably, the stack trace should
  1486. * always be small, so that it does not matter if it is long-lived.
  1487. * However, it is probably not possible to ever change the format of
  1488. * error terms.)
  1489. */
  1490. static void
  1491. save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg,
  1492. ErtsCodeMFA *bif_mfa, Eterm args) {
  1493. struct StackTrace* s;
  1494. int sz;
  1495. int depth = erts_backtrace_depth; /* max depth (never negative) */
  1496. if (depth > 0) {
  1497. /* There will always be a current function */
  1498. depth --;
  1499. }
  1500. /* Create a container for the exception data */
  1501. sz = (offsetof(struct StackTrace, trace) + sizeof(BeamInstr *)*depth
  1502. + sizeof(Eterm) - 1) / sizeof(Eterm);
  1503. s = (struct StackTrace *) HAlloc(c_p, 1 + sz);
  1504. /* The following fields are inside the bignum */
  1505. s->header = make_pos_bignum_header(sz);
  1506. s->freason = c_p->freason;
  1507. s->depth = 0;
  1508. /*
  1509. * If the failure was in a BIF other than 'error/1', 'error/2',
  1510. * 'exit/1' or 'throw/1', save BIF-MFA and save the argument
  1511. * registers by consing up an arglist.
  1512. */
  1513. if (bif_mfa) {
  1514. if (bif_mfa->module == am_erlang) {
  1515. switch (bif_mfa->function) {
  1516. case am_error:
  1517. if (bif_mfa->arity == 1 || bif_mfa->arity == 2)
  1518. goto non_bif_stacktrace;
  1519. break;
  1520. case am_exit:
  1521. if (bif_mfa->arity == 1)
  1522. goto non_bif_stacktrace;
  1523. break;
  1524. case am_throw:
  1525. if (bif_mfa->arity == 1)
  1526. goto non_bif_stacktrace;
  1527. break;
  1528. default:
  1529. break;
  1530. }
  1531. }
  1532. s->current = bif_mfa;
  1533. /* Save first stack entry */
  1534. ASSERT(pc);
  1535. if (depth > 0) {
  1536. s->trace[s->depth++] = pc;
  1537. depth--;
  1538. }
  1539. /* Save second stack entry if CP is valid and different from pc */
  1540. if (depth > 0 && c_p->cp != 0 && c_p->cp != pc) {
  1541. s->trace[s->depth++] = c_p->cp - 1;
  1542. depth--;
  1543. }
  1544. s->pc = NULL;
  1545. args = make_arglist(c_p, reg, bif_mfa->arity); /* Overwrite CAR(c_p->ftrace) */
  1546. } else {
  1547. non_bif_stacktrace:
  1548. s->current = c_p->current;
  1549. /*
  1550. * For a function_clause error, the arguments are in the beam
  1551. * registers, c_p->cp is valid, and c_p->current is set.
  1552. */
  1553. if ( (GET_EXC_INDEX(s->freason)) ==
  1554. (GET_EXC_INDEX(EXC_FUNCTION_CLAUSE)) ) {
  1555. int a;
  1556. ASSERT(s->current);
  1557. a = s->current->arity;
  1558. args = make_arglist(c_p, reg, a); /* Overwrite CAR(c_p->ftrace) */
  1559. /* Save first stack entry */
  1560. ASSERT(c_p->cp);
  1561. if (depth > 0) {
  1562. s->trace[s->depth++] = c_p->cp - 1;
  1563. depth--;
  1564. }
  1565. s->pc = NULL; /* Ignore pc */
  1566. } else {
  1567. if (depth > 0 && c_p->cp != 0 && c_p->cp != pc) {
  1568. s->trace[s->depth++] = c_p->cp - 1;
  1569. depth--;
  1570. }
  1571. s->pc = pc;
  1572. }
  1573. }
  1574. /* Package args and stack trace */
  1575. {
  1576. Eterm *hp;
  1577. hp = HAlloc(c_p, 2);
  1578. c_p->ftrace = CONS(hp, args, make_big((Eterm *) s));
  1579. }
  1580. /* Save the actual stack trace */
  1581. erts_save_stacktrace(c_p, s, depth);
  1582. }
  1583. void
  1584. erts_save_stacktrace(Process* p, struct StackTrace* s, int depth)
  1585. {
  1586. if (depth > 0) {
  1587. Eterm *ptr;
  1588. BeamInstr *prev = s->depth ? s->trace[s->depth-1] : NULL;
  1589. BeamInstr i_return_trace = beam_return_trace[0];
  1590. BeamInstr i_return_to_trace = beam_return_to_trace[0];
  1591. /*
  1592. * Traverse the stack backwards and add all unique continuation
  1593. * pointers to the buffer, up to the maximum stack trace size.
  1594. *
  1595. * Skip trace stack frames.
  1596. */
  1597. ptr = p->stop;
  1598. if (ptr < STACK_START(p) &&
  1599. (is_not_CP(*ptr)|| (*cp_val(*ptr) != i_return_trace &&
  1600. *cp_val(*ptr) != i_return_to_trace)) &&
  1601. p->cp) {
  1602. /* Cannot follow cp here - code may be unloaded */
  1603. BeamInstr *cpp = p->cp;
  1604. int trace_cp;
  1605. if (cpp == beam_exception_trace || cpp == beam_return_trace) {
  1606. /* Skip return_trace parameters */
  1607. ptr += 2;
  1608. trace_cp = 1;
  1609. } else if (cpp == beam_return_to_trace) {
  1610. /* Skip return_to_trace parameters */
  1611. ptr += 1;
  1612. trace_cp = 1;
  1613. }
  1614. else {
  1615. trace_cp = 0;
  1616. }
  1617. if (trace_cp && s->pc == cpp) {
  1618. /*
  1619. * If process 'cp' points to a return/exception trace
  1620. * instruction and 'cp' has been saved as 'pc' in
  1621. * stacktrace, we need to update 'pc' in stacktrace
  1622. * with the actual 'cp' located on the top of the
  1623. * stack; otherwise, we will lose the top stackframe
  1624. * when building the stack trace.
  1625. */
  1626. ASSERT(is_CP(p->stop[0]));
  1627. s->pc = cp_val(p->stop[0]);
  1628. }
  1629. }
  1630. while (ptr < STACK_START(p) && depth > 0) {
  1631. if (is_CP(*ptr)) {
  1632. if (*cp_val(*ptr) == i_return_trace) {
  1633. /* Skip stack frame variables */
  1634. do ++ptr; while (is_not_CP(*ptr));
  1635. /* Skip return_trace parameters */
  1636. ptr += 2;
  1637. } else if (*cp_val(*ptr) == i_return_to_trace) {
  1638. /* Skip stack frame variables */
  1639. do ++ptr; while (is_not_CP(*ptr));
  1640. } else {
  1641. BeamInstr *cp = cp_val(*ptr);
  1642. if (cp != prev) {
  1643. /* Record non-duplicates only */
  1644. prev = cp;
  1645. s->trace[s->depth++] = cp - 1;
  1646. depth--;
  1647. }
  1648. ptr++;
  1649. }
  1650. } else ptr++;
  1651. }
  1652. }
  1653. }
  1654. /*
  1655. * Getting the relevant fields from the term pointed to by ftrace
  1656. */
  1657. static struct StackTrace *get_trace_from_exc(Eterm exc) {
  1658. if (exc == NIL) {
  1659. return NULL;
  1660. } else {
  1661. ASSERT(is_list(exc));
  1662. return (struct StackTrace *) big_val(CDR(list_val(exc)));
  1663. }
  1664. }
  1665. static Eterm get_args_from_exc(Eterm exc) {
  1666. if (exc == NIL) {
  1667. return NIL;
  1668. } else {
  1669. ASSERT(is_list(exc));
  1670. return CAR(list_val(exc));
  1671. }
  1672. }
  1673. static int is_raised_exc(Eterm exc) {
  1674. if (exc == NIL) {
  1675. return 0;
  1676. } else {
  1677. ASSERT(is_list(exc));
  1678. return bignum_header_is_neg(*big_val(CDR(list_val(exc))));
  1679. }
  1680. }
  1681. /*
  1682. * Creating a list with the argument registers
  1683. */
  1684. static Eterm
  1685. make_arglist(Process* c_p, Eterm* reg, int a) {
  1686. Eterm args = NIL;
  1687. Eterm* hp = HAlloc(c_p, 2*a);
  1688. while (a > 0) {
  1689. args = CONS(hp, reg[a-1], args);
  1690. hp += 2;
  1691. a--;
  1692. }
  1693. return args;
  1694. }
  1695. /*
  1696. * Building a symbolic representation of a saved stack trace. Note that
  1697. * the exception object 'exc', unless NIL, points to a cons cell which
  1698. * holds the given args and the quick-saved data (encoded as a bignum).
  1699. *
  1700. * If the bignum is negative, the given args is a complete stacktrace.
  1701. */
  1702. Eterm
  1703. build_stacktrace(Process* c_p, Eterm exc) {
  1704. struct StackTrace* s;
  1705. Eterm args;
  1706. int depth;
  1707. FunctionInfo fi;
  1708. FunctionInfo* stk;
  1709. FunctionInfo* stkp;
  1710. Eterm res = NIL;
  1711. Uint heap_size;
  1712. Eterm* hp;
  1713. Eterm mfa;
  1714. int i;
  1715. if (! (s = get_trace_from_exc(exc))) {
  1716. return NIL;
  1717. }
  1718. #ifdef HIPE
  1719. if (s->freason & EXF_NATIVE) {
  1720. return hipe_build_stacktrace(c_p, s);
  1721. }
  1722. #endif
  1723. if (is_raised_exc(exc)) {
  1724. return get_args_from_exc(exc);
  1725. }
  1726. /*
  1727. * Find the current function. If the saved s->pc is null, then the
  1728. * saved s->current should already contain the proper value.
  1729. */
  1730. if (s->pc != NULL) {
  1731. erts_lookup_function_info(&fi, s->pc, 1);
  1732. } else if (GET_EXC_INDEX(s->freason) ==
  1733. GET_EXC_INDEX(EXC_FUNCTION_CLAUSE)) {
  1734. erts_lookup_function_info(&fi, erts_codemfa_to_code(s->current), 1);
  1735. } else {
  1736. erts_set_current_function(&fi, s->current);
  1737. }
  1738. depth = s->depth;
  1739. /*
  1740. * If fi.current is still NULL, and we have no
  1741. * stack at all, default to the initial function
  1742. * (e.g. spawn_link(erlang, abs, [1])).
  1743. */
  1744. if (fi.mfa == NULL) {
  1745. if (depth <= 0)
  1746. erts_set_current_function(&fi, &c_p->u.initial);
  1747. args = am_true; /* Just in case */
  1748. } else {
  1749. args = get_args_from_exc(exc);
  1750. }
  1751. /*
  1752. * Look up all saved continuation pointers and calculate
  1753. * needed heap space.
  1754. */
  1755. stk = stkp = (FunctionInfo *) erts_alloc(ERTS_ALC_T_TMP,
  1756. depth*sizeof(FunctionInfo));
  1757. heap_size = fi.mfa ? fi.needed + 2 : 0;
  1758. for (i = 0; i < depth; i++) {
  1759. erts_lookup_function_info(stkp, s->trace[i], 1);
  1760. if (stkp->mfa) {
  1761. heap_size += stkp->needed + 2;
  1762. stkp++;
  1763. }
  1764. }
  1765. /*
  1766. * Allocate heap space and build the stacktrace.
  1767. */
  1768. hp = HAlloc(c_p, heap_size);
  1769. while (stkp > stk) {
  1770. stkp--;
  1771. hp = erts_build_mfa_item(stkp, hp, am_true, &mfa);
  1772. res = CONS(hp, mfa, res);
  1773. hp += 2;
  1774. }
  1775. if (fi.mfa) {
  1776. hp = erts_build_mfa_item(&fi, hp, args, &mfa);
  1777. res = CONS(hp, mfa, res);
  1778. }
  1779. erts_free(ERTS_ALC_T_TMP, (void *) stk);
  1780. return res;
  1781. }
  1782. static BeamInstr*
  1783. call_error_handler(Process* p, ErtsCodeMFA* mfa, Eterm* reg, Eterm func)
  1784. {
  1785. Eterm* hp;
  1786. Export* ep;
  1787. int arity;
  1788. Eterm args;
  1789. Uint sz;
  1790. int i;
  1791. DBG_TRACE_MFA_P(mfa, "call_error_handler");
  1792. /*
  1793. * Search for the error_handler module.
  1794. */
  1795. ep = erts_find_function(erts_proc_get_error_handler(p), func, 3,
  1796. erts_active_code_ix());
  1797. if (ep == NULL) { /* No error handler */
  1798. p->current = mfa;
  1799. p->freason = EXC_UNDEF;
  1800. return 0;
  1801. }
  1802. /*
  1803. * Create a list with all arguments in the x registers.
  1804. */
  1805. arity = mfa->arity;
  1806. sz = 2 * arity;
  1807. if (HeapWordsLeft(p) < sz) {
  1808. erts_garbage_collect(p, sz, reg, arity);
  1809. }
  1810. hp = HEAP_TOP(p);
  1811. HEAP_TOP(p) += sz;
  1812. args = NIL;
  1813. for (i = arity-1; i >= 0; i--) {
  1814. args = CONS(hp, reg[i], args);
  1815. hp += 2;
  1816. }
  1817. /*
  1818. * Set up registers for call to error_handler:<func>/3.
  1819. */
  1820. reg[0] = mfa->module;
  1821. reg[1] = mfa->function;
  1822. reg[2] = args;
  1823. return ep->addressv[erts_active_code_ix()];
  1824. }
  1825. static Export*
  1826. apply_setup_error_handler(Process* p, Eterm module, Eterm function, Uint arity, Eterm* reg)
  1827. {
  1828. Export* ep;
  1829. /*
  1830. * Find the export table index for the error handler. Return NULL if
  1831. * there is no error handler module.
  1832. */
  1833. if ((ep = erts_active_export_entry(erts_proc_get_error_handler(p),
  1834. am_undefined_function, 3)) == NULL) {
  1835. return NULL;
  1836. } else {
  1837. int i;
  1838. Uint sz = 2*arity;
  1839. Eterm* hp;
  1840. Eterm args = NIL;
  1841. /*
  1842. * Always copy args from registers to a new list; this ensures
  1843. * that we have the same behaviour whether or not this was
  1844. * called from apply or fixed_apply (any additional last
  1845. * THIS-argument will be included, assuming that arity has been
  1846. * properly adjusted).
  1847. */
  1848. if (HeapWordsLeft(p) < sz) {
  1849. erts_garbage_collect(p, sz, reg, arity);
  1850. }
  1851. hp = HEAP_TOP(p);
  1852. HEAP_TOP(p) += sz;
  1853. for (i = arity-1; i >= 0; i--) {
  1854. args = CONS(hp, reg[i], args);
  1855. hp += 2;
  1856. }
  1857. reg[0] = module;
  1858. reg[1] = function;
  1859. reg[2] = args;
  1860. }
  1861. return ep;
  1862. }
  1863. static ERTS_INLINE void
  1864. apply_bif_error_adjustment(Process *p, Export *ep,
  1865. Eterm *reg, Uint arity,
  1866. BeamInstr *I, Uint stack_offset)
  1867. {
  1868. /*
  1869. * I is only set when the apply is a tail call, i.e.,
  1870. * from the instructions i_apply_only, i_apply_last_P,
  1871. * and apply_last_IP.
  1872. */
  1873. if (I
  1874. && BeamIsOpCode(ep->beam[0], op_apply_bif)
  1875. && (ep == bif_export[BIF_error_1]
  1876. || ep == bif_export[BIF_error_2]
  1877. || ep == bif_export[BIF_exit_1]
  1878. || ep == bif_export[BIF_throw_1])) {
  1879. /*
  1880. * We are about to tail apply one of the BIFs
  1881. * erlang:error/1, erlang:error/2, erlang:exit/1,
  1882. * or erlang:throw/1. Error handling of these BIFs is
  1883. * special!
  1884. *
  1885. * We need 'p->cp' to point into the calling
  1886. * function when handling the error after the BIF has
  1887. * been applied. This in order to get the topmost
  1888. * stackframe correct. Without the following adjustment,
  1889. * 'p->cp' will point into the function that called
  1890. * current function when handling the error. We add a
  1891. * dummy stackframe in order to achieve this.
  1892. *
  1893. * Note that these BIFs unconditionally will cause
  1894. * an exception to be raised. That is, our modifications
  1895. * of 'p->cp' as well as the stack will be corrected by
  1896. * the error handling code.
  1897. *
  1898. * If we find an exception/return-to trace continuation
  1899. * pointer as the topmost continuation pointer, we do not
  1900. * need to do anything since the information already will
  1901. * be available for generation of the stacktrace.
  1902. */
  1903. int apply_only = stack_offset == 0;
  1904. BeamInstr *cpp;
  1905. if (apply_only) {
  1906. ASSERT(p->cp != NULL);
  1907. cpp = p->cp;
  1908. }
  1909. else {
  1910. ASSERT(is_CP(p->stop[0]));
  1911. cpp = cp_val(p->stop[0]);
  1912. }
  1913. if (cpp != beam_exception_trace
  1914. && cpp != beam_return_trace
  1915. && cpp != beam_return_to_trace) {
  1916. Uint need = stack_offset /* bytes */ / sizeof(Eterm);
  1917. if (need == 0)
  1918. need = 1; /* i_apply_only */
  1919. if (p->stop - p->htop < need)
  1920. erts_garbage_collect(p, (int) need, reg, arity+1);
  1921. p->stop -= need;
  1922. if (apply_only) {
  1923. /*
  1924. * Called from the i_apply_only instruction.
  1925. *
  1926. * 'p->cp' contains continuation pointer pointing
  1927. * into the function that called current function.
  1928. * We push that continuation pointer onto the stack,
  1929. * and set 'p->cp' to point into current function.
  1930. */
  1931. p->stop[0] = make_cp(p->cp);
  1932. p->cp = I;
  1933. }
  1934. else {
  1935. /*
  1936. * Called from an i_apply_last_p, or apply_last_IP,
  1937. * instruction.
  1938. *
  1939. * Calling instruction will after we return read
  1940. * a continuation pointer from the stack and write
  1941. * it to 'p->cp', and then remove the topmost
  1942. * stackframe of size 'stack_offset'.
  1943. *
  1944. * We have sized the dummy-stackframe so that it
  1945. * will be removed by the instruction we currently
  1946. * are executing, and leave the stackframe that
  1947. * normally would have been removed intact.
  1948. *
  1949. */
  1950. p->stop[0] = make_cp(I);
  1951. }
  1952. }
  1953. }
  1954. }
  1955. static BeamInstr*
  1956. apply(Process* p, Eterm* reg, BeamInstr *I, Uint stack_offset)
  1957. {
  1958. int arity;
  1959. Export* ep;
  1960. Eterm tmp;
  1961. Eterm module = reg[0];
  1962. Eterm function = reg[1];
  1963. Eterm args = reg[2];
  1964. /*
  1965. * Check the arguments which should be of the form apply(Module,
  1966. * Function, Arguments) where Function is an atom and
  1967. * Arguments is an arity long list of terms.
  1968. */
  1969. if (is_not_atom(function)) {
  1970. /*
  1971. * No need to test args here -- done below.
  1972. */
  1973. error:
  1974. p->freason = BADARG;
  1975. error2:
  1976. reg[0] = module;
  1977. reg[1] = function;
  1978. reg[2] = args;
  1979. return 0;
  1980. }
  1981. while (1) {
  1982. Eterm m, f, a;
  1983. if (is_not_atom(module)) goto error;
  1984. if (module != am_erlang || function != am_apply)
  1985. break;
  1986. /* Adjust for multiple apply of apply/3... */
  1987. a = args;
  1988. if (is_list(a)) {
  1989. Eterm *consp = list_val(a);
  1990. m = CAR(consp);
  1991. a = CDR(consp);
  1992. if (is_list(a)) {
  1993. consp = list_val(a);
  1994. f = CAR(consp);
  1995. a = CDR(consp);
  1996. if (is_list(a)) {
  1997. consp = list_val(a);
  1998. a = CAR(consp);
  1999. if (is_nil(CDR(consp))) {
  2000. /* erlang:apply/3 */
  2001. module = m;
  2002. function = f;
  2003. args = a;
  2004. if (is_not_atom(f))
  2005. goto error;
  2006. continue;
  2007. }
  2008. }
  2009. }
  2010. }
  2011. break; /* != erlang:apply/3 */
  2012. }
  2013. /*
  2014. * Walk down the 3rd parameter of apply (the argument list) and copy
  2015. * the parameters to the x registers (reg[]).
  2016. */
  2017. tmp = args;
  2018. arity = 0;
  2019. while (is_list(tmp)) {
  2020. if (arity < (MAX_REG - 1)) {
  2021. reg[arity++] = CAR(list_val(tmp));
  2022. tmp = CDR(list_val(tmp));
  2023. } else {
  2024. p->freason = SYSTEM_LIMIT;
  2025. goto error2;
  2026. }
  2027. }
  2028. if (is_not_nil(tmp)) { /* Must be well-formed list */
  2029. goto error;
  2030. }
  2031. /*
  2032. * Get the index into the export table, or failing that the export
  2033. * entry for the error handler.
  2034. *
  2035. * Note: All BIFs have export entries; thus, no special case is needed.
  2036. */
  2037. if ((ep = erts_active_export_entry(module, function, arity)) == NULL) {
  2038. if ((ep = apply_setup_error_handler(p, module, function, arity, reg)) == NULL) goto error;
  2039. } else if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) {
  2040. save_calls(p, ep);
  2041. }
  2042. apply_bif_error_adjustment(p, ep, reg, arity, I, stack_offset);
  2043. DTRACE_GLOBAL_CALL_FROM_EXPORT(p, ep);
  2044. return ep->addressv[erts_active_code_ix()];
  2045. }
  2046. static BeamInstr*
  2047. fixed_apply(Process* p, Eterm* reg, Uint arity,
  2048. BeamInstr *I, Uint stack_offset)
  2049. {
  2050. Export* ep;
  2051. Eterm module;
  2052. Eterm function;
  2053. module = reg[arity]; /* The THIS pointer already in place */
  2054. function = reg[arity+1];
  2055. if (is_not_atom(function)) {
  2056. Eterm bad_args;
  2057. error:
  2058. bad_args = make_arglist(p, reg, arity);
  2059. p->freason = BADARG;
  2060. reg[0] = module;
  2061. reg[1] = function;
  2062. reg[2] = bad_args;
  2063. return 0;
  2064. }
  2065. if (is_not_atom(module)) goto error;
  2066. /* Handle apply of apply/3... */
  2067. if (module == am_erlang && function == am_apply && arity == 3) {
  2068. return apply(p, reg, I, stack_offset);
  2069. }
  2070. /*
  2071. * Get the index into the export table, or failing that the export
  2072. * entry for the error handler module.
  2073. *
  2074. * Note: All BIFs have export entries; thus, no special case is needed.
  2075. */
  2076. if ((ep = erts_active_export_entry(module, function, arity)) == NULL) {
  2077. if ((ep = apply_setup_error_handler(p, module, function, arity, reg)) == NULL)
  2078. goto error;
  2079. } else if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) {
  2080. save_calls(p, ep);
  2081. }
  2082. apply_bif_error_adjustment(p, ep, reg, arity, I, stack_offset);
  2083. DTRACE_GLOBAL_CALL_FROM_EXPORT(p, ep);
  2084. return ep->addressv[erts_active_code_ix()];
  2085. }
  2086. int
  2087. erts_hibernate(Process* c_p, Eterm* reg)
  2088. {
  2089. int arity;
  2090. Eterm tmp;
  2091. Eterm module = reg[0];
  2092. Eterm function = reg[1];
  2093. Eterm args = reg[2];
  2094. if (is_not_atom(module) || is_not_atom(function)) {
  2095. /*
  2096. * No need to test args here -- done below.
  2097. */
  2098. error:
  2099. c_p->freason = BADARG;
  2100. error2:
  2101. reg[0] = module;
  2102. reg[1] = function;
  2103. reg[2] = args;
  2104. return 0;
  2105. }
  2106. arity = 0;
  2107. tmp = args;
  2108. while (is_list(tmp)) {
  2109. if (arity < MAX_REG) {
  2110. tmp = CDR(list_val(tmp));
  2111. arity++;
  2112. } else {
  2113. c_p->freason = SYSTEM_LIMIT;
  2114. goto error2;
  2115. }
  2116. }
  2117. if (is_not_nil(tmp)) { /* Must be well-formed list */
  2118. goto error;
  2119. }
  2120. /*
  2121. * At this point, arguments are known to be good.
  2122. */
  2123. if (c_p->arg_reg != c_p->def_arg_reg) {
  2124. /* Save some memory */
  2125. erts_free(ERTS_ALC_T_ARG_REG, c_p->arg_reg);
  2126. c_p->arg_reg = c_p->def_arg_reg;
  2127. c_p->max_arg_reg = sizeof(c_p->def_arg_reg)/sizeof(c_p->def_arg_reg[0]);
  2128. }
  2129. #ifdef USE_VM_PROBES
  2130. if (DTRACE_ENABLED(process_hibernate)) {
  2131. ErtsCodeMFA cmfa = { module, function, arity};
  2132. DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE);
  2133. DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE);
  2134. dtrace_fun_decode(c_p, &cmfa, process_name, mfa_buf);
  2135. DTRACE2(process_hibernate, process_name, mfa_buf);
  2136. }
  2137. #endif
  2138. /*
  2139. * Arrange for the process to be resumed at the given MFA with
  2140. * the stack cleared.
  2141. */
  2142. c_p->arity = 3;
  2143. c_p->arg_reg[0] = module;
  2144. c_p->arg_reg[1] = function;
  2145. c_p->arg_reg[2] = args;
  2146. c_p->stop = STACK_START(c_p);
  2147. c_p->catches = 0;
  2148. c_p->i = beam_apply;
  2149. c_p->cp = (BeamInstr *) beam_apply+1;
  2150. /*
  2151. * If there are no waiting messages, garbage collect and
  2152. * shrink the heap.
  2153. */
  2154. erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
  2155. if (!erts_proc_sig_fetch(c_p)) {
  2156. erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
  2157. c_p->fvalue = NIL;
  2158. PROCESS_MAIN_CHK_LOCKS(c_p);
  2159. erts_garbage_collect_hibernate(c_p);
  2160. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  2161. PROCESS_MAIN_CHK_LOCKS(c_p);
  2162. erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
  2163. if (!erts_proc_sig_fetch(c_p))
  2164. erts_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_ACTIVE);
  2165. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  2166. }
  2167. erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
  2168. c_p->current = &bif_export[BIF_hibernate_3]->info.mfa;
  2169. c_p->flags |= F_HIBERNATE_SCHED; /* Needed also when woken! */
  2170. return 1;
  2171. }
  2172. static BeamInstr*
  2173. call_fun(Process* p, /* Current process. */
  2174. int arity, /* Number of arguments for Fun. */
  2175. Eterm* reg, /* Contents of registers. */
  2176. Eterm args) /* THE_NON_VALUE or pre-built list of arguments. */
  2177. {
  2178. Eterm fun = reg[arity];
  2179. Eterm hdr;
  2180. int i;
  2181. Eterm* hp;
  2182. if (!is_boxed(fun)) {
  2183. goto badfun;
  2184. }
  2185. hdr = *boxed_val(fun);
  2186. if (is_fun_header(hdr)) {
  2187. ErlFunThing* funp = (ErlFunThing *) fun_val(fun);
  2188. ErlFunEntry* fe = funp->fe;
  2189. BeamInstr* code_ptr = fe->address;
  2190. Eterm* var_ptr;
  2191. unsigned num_free = funp->num_free;
  2192. ErtsCodeMFA *mfa = erts_code_to_codemfa(code_ptr);
  2193. int actual_arity = mfa->arity;
  2194. if (actual_arity == arity+num_free) {
  2195. DTRACE_LOCAL_CALL(p, mfa);
  2196. if (num_free == 0) {
  2197. return code_ptr;
  2198. } else {
  2199. var_ptr = funp->env;
  2200. reg += arity;
  2201. i = 0;
  2202. do {
  2203. reg[i] = var_ptr[i];
  2204. i++;
  2205. } while (i < num_free);
  2206. reg[i] = fun;
  2207. return code_ptr;
  2208. }
  2209. return code_ptr;
  2210. } else {
  2211. /*
  2212. * Something wrong here. First build a list of the arguments.
  2213. */
  2214. if (is_non_value(args)) {
  2215. Uint sz = 2 * arity;
  2216. args = NIL;
  2217. if (HeapWordsLeft(p) < sz) {
  2218. erts_garbage_collect(p, sz, reg, arity+1);
  2219. fun = reg[arity];
  2220. }
  2221. hp = HEAP_TOP(p);
  2222. HEAP_TOP(p) += sz;
  2223. for (i = arity-1; i >= 0; i--) {
  2224. args = CONS(hp, reg[i], args);
  2225. hp += 2;
  2226. }
  2227. }
  2228. if (actual_arity >= 0) {
  2229. /*
  2230. * There is a fun defined, but the call has the wrong arity.
  2231. */
  2232. hp = HAlloc(p, 3);
  2233. p->freason = EXC_BADARITY;
  2234. p->fvalue = TUPLE2(hp, fun, args);
  2235. return NULL;
  2236. } else {
  2237. Export* ep;
  2238. Module* modp;
  2239. Eterm module;
  2240. ErtsCodeIndex code_ix = erts_active_code_ix();
  2241. /*
  2242. * No arity. There is no module loaded that defines the fun,
  2243. * either because the fun is newly created from the external
  2244. * representation (the module has never been loaded),
  2245. * or the module defining the fun has been unloaded.
  2246. */
  2247. module = fe->module;
  2248. ERTS_THR_READ_MEMORY_BARRIER;
  2249. if (fe->pend_purge_address) {
  2250. /*
  2251. * The system is currently trying to purge the
  2252. * module containing this fun. Suspend the process
  2253. * and let it try again when the purge operation is
  2254. * done (may succeed or not).
  2255. */
  2256. ep = erts_suspend_process_on_pending_purge_lambda(p, fe);
  2257. ASSERT(ep);
  2258. }
  2259. else {
  2260. if ((modp = erts_get_module(module, code_ix)) != NULL
  2261. && modp->curr.code_hdr != NULL) {
  2262. /*
  2263. * There is a module loaded, but obviously the fun is not
  2264. * defined in it. We must not call the error_handler
  2265. * (or we will get into an infinite loop).
  2266. */
  2267. goto badfun;
  2268. }
  2269. /*
  2270. * No current code for this module. Call the error_handler module
  2271. * to attempt loading the module.
  2272. */
  2273. ep = erts_find_function(erts_proc_get_error_handler(p),
  2274. am_undefined_lambda, 3, code_ix);
  2275. if (ep == NULL) { /* No error handler */
  2276. p->current = NULL;
  2277. p->freason = EXC_UNDEF;
  2278. return NULL;
  2279. }
  2280. }
  2281. reg[0] = module;
  2282. reg[1] = fun;
  2283. reg[2] = args;
  2284. reg[3] = NIL;
  2285. return ep->addressv[code_ix];
  2286. }
  2287. }
  2288. } else if (is_export_header(hdr)) {
  2289. Export *ep;
  2290. int actual_arity;
  2291. ep = *((Export **) (export_val(fun) + 1));
  2292. actual_arity = ep->info.mfa.arity;
  2293. if (arity == actual_arity) {
  2294. DTRACE_GLOBAL_CALL(p, &ep->info.mfa);
  2295. return ep->addressv[erts_active_code_ix()];
  2296. } else {
  2297. /*
  2298. * Wrong arity. First build a list of the arguments.
  2299. */
  2300. if (is_non_value(args)) {
  2301. args = NIL;
  2302. hp = HAlloc(p, arity*2);
  2303. for (i = arity-1; i >= 0; i--) {
  2304. args = CONS(hp, reg[i], args);
  2305. hp += 2;
  2306. }
  2307. }
  2308. hp = HAlloc(p, 3);
  2309. p->freason = EXC_BADARITY;
  2310. p->fvalue = TUPLE2(hp, fun, args);
  2311. return NULL;
  2312. }
  2313. } else {
  2314. badfun:
  2315. p->current = NULL;
  2316. p->freason = EXC_BADFUN;
  2317. p->fvalue = fun;
  2318. return NULL;
  2319. }
  2320. }
  2321. static BeamInstr*
  2322. apply_fun(Process* p, Eterm fun, Eterm args, Eterm* reg)
  2323. {
  2324. int arity;
  2325. Eterm tmp;
  2326. /*
  2327. * Walk down the 3rd parameter of apply (the argument list) and copy
  2328. * the parameters to the x registers (reg[]).
  2329. */
  2330. tmp = args;
  2331. arity = 0;
  2332. while (is_list(tmp)) {
  2333. if (arity < MAX_REG-1) {
  2334. reg[arity++] = CAR(list_val(tmp));
  2335. tmp = CDR(list_val(tmp));
  2336. } else {
  2337. p->freason = SYSTEM_LIMIT;
  2338. return NULL;
  2339. }
  2340. }
  2341. if (is_not_nil(tmp)) { /* Must be well-formed list */
  2342. p->freason = EXC_BADARG;
  2343. return NULL;
  2344. }
  2345. reg[arity] = fun;
  2346. return call_fun(p, arity, reg, args);
  2347. }
  2348. static Eterm
  2349. new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free)
  2350. {
  2351. unsigned needed = ERL_FUN_SIZE + num_free;
  2352. ErlFunThing* funp;
  2353. Eterm* hp;
  2354. int i;
  2355. if (HEAP_LIMIT(p) - HEAP_TOP(p) <= needed) {
  2356. PROCESS_MAIN_CHK_LOCKS(p);
  2357. erts_garbage_collect(p, needed, reg, num_free);
  2358. ERTS_VERIFY_UNUSED_TEMP_ALLOC(p);
  2359. PROCESS_MAIN_CHK_LOCKS(p);
  2360. }
  2361. hp = p->htop;
  2362. p->htop = hp + needed;
  2363. funp = (ErlFunThing *) hp;
  2364. hp = funp->env;
  2365. erts_refc_inc(&fe->refc, 2);
  2366. funp->thing_word = HEADER_FUN;
  2367. funp->next = MSO(p).first;
  2368. MSO(p).first = (struct erl_off_heap_header*) funp;
  2369. funp->fe = fe;
  2370. funp->num_free = num_free;
  2371. funp->creator = p->common.id;
  2372. funp->arity = (int)fe->address[-1] - num_free;
  2373. for (i = 0; i < num_free; i++) {
  2374. *hp++ = reg[i];
  2375. }
  2376. return make_fun(funp);
  2377. }
  2378. static int
  2379. is_function2(Eterm Term, Uint arity)
  2380. {
  2381. if (is_fun(Term)) {
  2382. ErlFunThing* funp = (ErlFunThing *) fun_val(Term);
  2383. return funp->arity == arity;
  2384. } else if (is_export(Term)) {
  2385. Export* exp = (Export *) (export_val(Term)[1]);
  2386. return exp->info.mfa.arity == arity;
  2387. }
  2388. return 0;
  2389. }
  2390. static Eterm get_map_element(Eterm map, Eterm key)
  2391. {
  2392. Uint32 hx;
  2393. const Eterm *vs;
  2394. if (is_flatmap(map)) {
  2395. flatmap_t *mp;
  2396. Eterm *ks;
  2397. Uint i;
  2398. Uint n;
  2399. mp = (flatmap_t *)flatmap_val(map);
  2400. ks = flatmap_get_keys(mp);
  2401. vs = flatmap_get_values(mp);
  2402. n = flatmap_get_size(mp);
  2403. if (is_immed(key)) {
  2404. for (i = 0; i < n; i++) {
  2405. if (ks[i] == key) {
  2406. return vs[i];
  2407. }
  2408. }
  2409. } else {
  2410. for (i = 0; i < n; i++) {
  2411. if (EQ(ks[i], key)) {
  2412. return vs[i];
  2413. }
  2414. }
  2415. }
  2416. return THE_NON_VALUE;
  2417. }
  2418. ASSERT(is_hashmap(map));
  2419. hx = hashmap_make_hash(key);
  2420. vs = erts_hashmap_get(hx,key,map);
  2421. return vs ? *vs : THE_NON_VALUE;
  2422. }
  2423. static Eterm get_map_element_hash(Eterm map, Eterm key, Uint32 hx)
  2424. {
  2425. const Eterm *vs;
  2426. if (is_flatmap(map)) {
  2427. flatmap_t *mp;
  2428. Eterm *ks;
  2429. Uint i;
  2430. Uint n;
  2431. mp = (flatmap_t *)flatmap_val(map);
  2432. ks = flatmap_get_keys(mp);
  2433. vs = flatmap_get_values(mp);
  2434. n = flatmap_get_size(mp);
  2435. if (is_immed(key)) {
  2436. for (i = 0; i < n; i++) {
  2437. if (ks[i] == key) {
  2438. return vs[i];
  2439. }
  2440. }
  2441. } else {
  2442. for (i = 0; i < n; i++) {
  2443. if (EQ(ks[i], key)) {
  2444. return vs[i];
  2445. }
  2446. }
  2447. }
  2448. return THE_NON_VALUE;
  2449. }
  2450. ASSERT(is_hashmap(map));
  2451. ASSERT(hx == hashmap_make_hash(key));
  2452. vs = erts_hashmap_get(hx, key, map);
  2453. return vs ? *vs : THE_NON_VALUE;
  2454. }
  2455. #define GET_TERM(term, dest) \
  2456. do { \
  2457. Eterm src = (Eterm)(term); \
  2458. switch (loader_tag(src)) { \
  2459. case LOADER_X_REG: \
  2460. dest = x(loader_x_reg_index(src)); \
  2461. break; \
  2462. case LOADER_Y_REG: \
  2463. dest = y(loader_y_reg_index(src)); \
  2464. break; \
  2465. default: \
  2466. dest = src; \
  2467. break; \
  2468. } \
  2469. } while(0)
  2470. static Eterm
  2471. erts_gc_new_map(Process* p, Eterm* reg, Uint live, Uint n, BeamInstr* ptr)
  2472. {
  2473. Uint i;
  2474. Uint need = n + 1 /* hdr */ + 1 /*size*/ + 1 /* ptr */ + 1 /* arity */;
  2475. Eterm keys;
  2476. Eterm *mhp,*thp;
  2477. Eterm *E;
  2478. flatmap_t *mp;
  2479. ErtsHeapFactory factory;
  2480. if (n > 2*MAP_SMALL_MAP_LIMIT) {
  2481. Eterm res;
  2482. if (HeapWordsLeft(p) < n) {
  2483. erts_garbage_collect(p, n, reg, live);
  2484. }
  2485. mhp = p->htop;
  2486. thp = p->htop;
  2487. E = p->stop;
  2488. for (i = 0; i < n/2; i++) {
  2489. GET_TERM(*ptr++, *mhp++);
  2490. GET_TERM(*ptr++, *mhp++);
  2491. }
  2492. p->htop = mhp;
  2493. erts_factory_proc_init(&factory, p);
  2494. res = erts_hashmap_from_array(&factory, thp, n/2, 0);
  2495. erts_factory_close(&factory);
  2496. return res;
  2497. }
  2498. if (HeapWordsLeft(p) < need) {
  2499. erts_garbage_collect(p, need, reg, live);
  2500. }
  2501. thp = p->htop;
  2502. mhp = thp + 1 + n/2;
  2503. E = p->stop;
  2504. keys = make_tuple(thp);
  2505. *thp++ = make_arityval(n/2);
  2506. mp = (flatmap_t *)mhp; mhp += MAP_HEADER_FLATMAP_SZ;
  2507. mp->thing_word = MAP_HEADER_FLATMAP;
  2508. mp->size = n/2;
  2509. mp->keys = keys;
  2510. for (i = 0; i < n/2; i++) {
  2511. GET_TERM(*ptr++, *thp++);
  2512. GET_TERM(*ptr++, *mhp++);
  2513. }
  2514. p->htop = mhp;
  2515. return make_flatmap(mp);
  2516. }
  2517. static Eterm
  2518. erts_gc_new_small_map_lit(Process* p, Eterm* reg, Eterm keys_literal,
  2519. Uint live, BeamInstr* ptr)
  2520. {
  2521. Eterm* keys = tuple_val(keys_literal);
  2522. Uint n = arityval(*keys);
  2523. Uint need = n + 1 /* hdr */ + 1 /*size*/ + 1 /* ptr */ + 1 /* arity */;
  2524. Uint i;
  2525. flatmap_t *mp;
  2526. Eterm *mhp;
  2527. Eterm *E;
  2528. ASSERT(n <= MAP_SMALL_MAP_LIMIT);
  2529. if (HeapWordsLeft(p) < need) {
  2530. erts_garbage_collect(p, need, reg, live);
  2531. }
  2532. mhp = p->htop;
  2533. E = p->stop;
  2534. mp = (flatmap_t *)mhp; mhp += MAP_HEADER_FLATMAP_SZ;
  2535. mp->thing_word = MAP_HEADER_FLATMAP;
  2536. mp->size = n;
  2537. mp->keys = keys_literal;
  2538. for (i = 0; i < n; i++) {
  2539. GET_TERM(*ptr++, *mhp++);
  2540. }
  2541. p->htop = mhp;
  2542. return make_flatmap(mp);
  2543. }
  2544. static Eterm
  2545. erts_gc_update_map_assoc(Process* p, Eterm* reg, Uint live,
  2546. Uint n, BeamInstr* new_p)
  2547. {
  2548. Uint num_old;
  2549. Uint num_updates;
  2550. Uint need;
  2551. flatmap_t *old_mp, *mp;
  2552. Eterm res;
  2553. Eterm* hp;
  2554. Eterm* E;
  2555. Eterm* old_keys;
  2556. Eterm* old_vals;
  2557. Eterm new_key;
  2558. Eterm* kp;
  2559. Eterm map;
  2560. num_updates = n / 2;
  2561. map = reg[live];
  2562. if (is_not_flatmap(map)) {
  2563. Uint32 hx;
  2564. Eterm val;
  2565. ASSERT(is_hashmap(map));
  2566. res = map;
  2567. E = p->stop;
  2568. while(num_updates--) {
  2569. /* assoc can't fail */
  2570. GET_TERM(new_p[0], new_key);
  2571. GET_TERM(new_p[1], val);
  2572. hx = hashmap_make_hash(new_key);
  2573. res = erts_hashmap_insert(p, hx, new_key, val, res, 0);
  2574. new_p += 2;
  2575. }
  2576. return res;
  2577. }
  2578. old_mp = (flatmap_t *) flatmap_val(map);
  2579. num_old = flatmap_get_size(old_mp);
  2580. /*
  2581. * If the old map is empty, create a new map.
  2582. */
  2583. if (num_old == 0) {
  2584. return erts_gc_new_map(p, reg, live, n, new_p);
  2585. }
  2586. /*
  2587. * Allocate heap space for the worst case (i.e. all keys in the
  2588. * update list are new).
  2589. */
  2590. need = 2*(num_old+num_updates) + 1 + MAP_HEADER_FLATMAP_SZ;
  2591. if (HeapWordsLeft(p) < need) {
  2592. erts_garbage_collect(p, need, reg, live+1);
  2593. map = reg[live];
  2594. old_mp = (flatmap_t *)flatmap_val(map);
  2595. }
  2596. /*
  2597. * Build the skeleton for the map, ready to be filled in.
  2598. *
  2599. * +-----------------------------------+
  2600. * | (Space for aritvyal for keys) | <-----------+
  2601. * +-----------------------------------+ |
  2602. * | (Space for key 1) | | <-- kp
  2603. * +-----------------------------------+ |
  2604. * . |
  2605. * . |
  2606. * . |
  2607. * +-----------------------------------+ |
  2608. * | (Space for last key) | |
  2609. * +-----------------------------------+ |
  2610. * | MAP_HEADER | |
  2611. * +-----------------------------------+ |
  2612. * | (Space for number of keys/values) | |
  2613. * +-----------------------------------+ |
  2614. * | Boxed tuple pointer >----------------+
  2615. * +-----------------------------------+
  2616. * | (Space for value 1) | <-- hp
  2617. * +-----------------------------------+
  2618. */
  2619. E = p->stop;
  2620. kp = p->htop + 1; /* Point to first key */
  2621. hp = kp + num_old + num_updates;
  2622. res = make_flatmap(hp);
  2623. mp = (flatmap_t *)hp;
  2624. hp += MAP_HEADER_FLATMAP_SZ;
  2625. mp->thing_word = MAP_HEADER_FLATMAP;
  2626. mp->keys = make_tuple(kp-1);
  2627. old_vals = flatmap_get_values(old_mp);
  2628. old_keys = flatmap_get_keys(old_mp);
  2629. GET_TERM(*new_p, new_key);
  2630. n = num_updates;
  2631. /*
  2632. * Fill in keys and values, until we run out of either updates
  2633. * or old values and keys.
  2634. */
  2635. for (;;) {
  2636. Eterm key;
  2637. Sint c;
  2638. ASSERT(kp < (Eterm *)mp);
  2639. key = *old_keys;
  2640. if ((c = CMP_TERM(key, new_key)) < 0) {
  2641. /* Copy old key and value */
  2642. *kp++ = key;
  2643. *hp++ = *old_vals;
  2644. old_keys++, old_vals++, num_old--;
  2645. } else { /* Replace or insert new */
  2646. GET_TERM(new_p[1], *hp++);
  2647. if (c > 0) { /* If new new key */
  2648. *kp++ = new_key;
  2649. } else { /* If replacement */
  2650. *kp++ = key;
  2651. old_keys++, old_vals++, num_old--;
  2652. }
  2653. n--;
  2654. if (n == 0) {
  2655. break;
  2656. } else {
  2657. new_p += 2;
  2658. GET_TERM(*new_p, new_key);
  2659. }
  2660. }
  2661. if (num_old == 0) {
  2662. break;
  2663. }
  2664. }
  2665. /*
  2666. * At this point, we have run out of either old keys and values,
  2667. * or the update list. In other words, at least of one n and
  2668. * num_old must be zero.
  2669. */
  2670. if (n > 0) {
  2671. /*
  2672. * All old keys and values have been copied, but there
  2673. * are still new keys and values in the update list that
  2674. * must be copied.
  2675. */
  2676. ASSERT(num_old == 0);
  2677. while (n-- > 0) {
  2678. GET_TERM(new_p[0], *kp++);
  2679. GET_TERM(new_p[1], *hp++);
  2680. new_p += 2;
  2681. }
  2682. } else {
  2683. /*
  2684. * All updates are now done. We may still have old
  2685. * keys and values that we must copy.
  2686. */
  2687. ASSERT(n == 0);
  2688. while (num_old-- > 0) {
  2689. ASSERT(kp < (Eterm *)mp);
  2690. *kp++ = *old_keys++;
  2691. *hp++ = *old_vals++;
  2692. }
  2693. }
  2694. /*
  2695. * Calculate how many values that are unused at the end of the
  2696. * key tuple and fill it out with a bignum header.
  2697. */
  2698. if ((n = (Eterm *)mp - kp) > 0) {
  2699. *kp = make_pos_bignum_header(n-1);
  2700. }
  2701. /*
  2702. * Fill in the size of the map in both the key tuple and in the map.
  2703. */
  2704. n = kp - p->htop - 1; /* Actual number of keys/values */
  2705. *p->htop = make_arityval(n);
  2706. p->htop = hp;
  2707. mp->size = n;
  2708. /* The expensive case, need to build a hashmap */
  2709. if (n > MAP_SMALL_MAP_LIMIT) {
  2710. ErtsHeapFactory factory;
  2711. erts_factory_proc_init(&factory, p);
  2712. res = erts_hashmap_from_ks_and_vs(&factory,flatmap_get_keys(mp),
  2713. flatmap_get_values(mp),n);
  2714. erts_factory_close(&factory);
  2715. }
  2716. return res;
  2717. }
  2718. /*
  2719. * Update values for keys that already exist in the map.
  2720. */
  2721. static Eterm
  2722. erts_gc_update_map_exact(Process* p, Eterm* reg, Uint live, Uint n, Eterm* new_p)
  2723. {
  2724. Uint i;
  2725. Uint num_old;
  2726. Uint need;
  2727. flatmap_t *old_mp, *mp;
  2728. Eterm res;
  2729. Eterm* old_hp;
  2730. Eterm* hp;
  2731. Eterm* E;
  2732. Eterm* old_keys;
  2733. Eterm* old_vals;
  2734. Eterm new_key;
  2735. Eterm map;
  2736. int changed = 0;
  2737. n /= 2; /* Number of values to be updated */
  2738. ASSERT(n > 0);
  2739. map = reg[live];
  2740. if (is_not_flatmap(map)) {
  2741. Uint32 hx;
  2742. Eterm val;
  2743. /* apparently the compiler does not emit is_map instructions,
  2744. * bad compiler */
  2745. if (is_not_hashmap(map)) {
  2746. p->freason = BADMAP;
  2747. p->fvalue = map;
  2748. return THE_NON_VALUE;
  2749. }
  2750. res = map;
  2751. E = p->stop;
  2752. while(n--) {
  2753. GET_TERM(new_p[0], new_key);
  2754. GET_TERM(new_p[1], val);
  2755. hx = hashmap_make_hash(new_key);
  2756. res = erts_hashmap_insert(p, hx, new_key, val, res, 1);
  2757. if (is_non_value(res)) {
  2758. p->fvalue = new_key;
  2759. p->freason = BADKEY;
  2760. return res;
  2761. }
  2762. new_p += 2;
  2763. }
  2764. return res;
  2765. }
  2766. old_mp = (flatmap_t *) flatmap_val(map);
  2767. num_old = flatmap_get_size(old_mp);
  2768. /*
  2769. * If the old map is empty, fail.
  2770. */
  2771. if (num_old == 0) {
  2772. E = p->stop;
  2773. p->freason = BADKEY;
  2774. GET_TERM(new_p[0], p->fvalue);
  2775. return THE_NON_VALUE;
  2776. }
  2777. /*
  2778. * Allocate the exact heap space needed.
  2779. */
  2780. need = num_old + MAP_HEADER_FLATMAP_SZ;
  2781. if (HeapWordsLeft(p) < need) {
  2782. erts_garbage_collect(p, need, reg, live+1);
  2783. map = reg[live];
  2784. old_mp = (flatmap_t *)flatmap_val(map);
  2785. }
  2786. /*
  2787. * Update map, keeping the old key tuple.
  2788. */
  2789. old_hp = p->htop;
  2790. hp = p->htop;
  2791. E = p->stop;
  2792. old_vals = flatmap_get_values(old_mp);
  2793. old_keys = flatmap_get_keys(old_mp);
  2794. res = make_flatmap(hp);
  2795. mp = (flatmap_t *)hp;
  2796. hp += MAP_HEADER_FLATMAP_SZ;
  2797. mp->thing_word = MAP_HEADER_FLATMAP;
  2798. mp->size = num_old;
  2799. mp->keys = old_mp->keys;
  2800. /* Get array of key/value pairs to be updated */
  2801. GET_TERM(*new_p, new_key);
  2802. /* Update all values */
  2803. for (i = 0; i < num_old; i++) {
  2804. if (!EQ(*old_keys, new_key)) {
  2805. /* Not same keys */
  2806. *hp++ = *old_vals;
  2807. } else {
  2808. GET_TERM(new_p[1], *hp);
  2809. if(*hp != *old_vals) changed = 1;
  2810. hp++;
  2811. n--;
  2812. if (n == 0) {
  2813. /*
  2814. * All updates done. Copy remaining values
  2815. * if any changed or return the original one.
  2816. */
  2817. if(changed) {
  2818. for (i++, old_vals++; i < num_old; i++) {
  2819. *hp++ = *old_vals++;
  2820. }
  2821. ASSERT(hp == p->htop + need);
  2822. p->htop = hp;
  2823. return res;
  2824. } else {
  2825. p->htop = old_hp;
  2826. return map;
  2827. }
  2828. } else {
  2829. new_p += 2;
  2830. GET_TERM(*new_p, new_key);
  2831. }
  2832. }
  2833. old_vals++, old_keys++;
  2834. }
  2835. /*
  2836. * Updates left. That means that at least one the keys in the
  2837. * update list did not previously exist.
  2838. */
  2839. ASSERT(hp == p->htop + need);
  2840. p->freason = BADKEY;
  2841. p->fvalue = new_key;
  2842. return THE_NON_VALUE;
  2843. }
  2844. #undef GET_TERM
  2845. int catchlevel(Process *p)
  2846. {
  2847. return p->catches;
  2848. }
  2849. /*
  2850. * Check if the given function is built-in (i.e. a BIF implemented in C).
  2851. *
  2852. * Returns 0 if not built-in, and a non-zero value if built-in.
  2853. */
  2854. int
  2855. erts_is_builtin(Eterm Mod, Eterm Name, int arity)
  2856. {
  2857. Export e;
  2858. Export* ep;
  2859. if (Mod == am_erlang) {
  2860. /*
  2861. * Special case for built-in functions that are implemented
  2862. * as instructions as opposed to SNIFs.
  2863. */
  2864. if (Name == am_apply && (arity == 2 || arity == 3)) {
  2865. return 1;
  2866. } else if (Name == am_yield && arity == 0) {
  2867. return 1;
  2868. }
  2869. }
  2870. e.info.mfa.module = Mod;
  2871. e.info.mfa.function = Name;
  2872. e.info.mfa.arity = arity;
  2873. if ((ep = export_get(&e)) == NULL) {
  2874. return 0;
  2875. }
  2876. return ep->addressv[erts_active_code_ix()] == ep->beam &&
  2877. BeamIsOpCode(ep->beam[0], op_apply_bif);
  2878. }
  2879. /*
  2880. * Return the current number of reductions consumed by the given process.
  2881. * To get the total number of reductions, p->reds must be added.
  2882. */
  2883. Uint
  2884. erts_current_reductions(Process *c_p, Process *p)
  2885. {
  2886. Sint reds_left;
  2887. if (c_p != p || !(erts_atomic32_read_nob(&c_p->state)
  2888. & ERTS_PSFLG_RUNNING)) {
  2889. return 0;
  2890. } else if (c_p->fcalls < 0 && ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) {
  2891. reds_left = c_p->fcalls + CONTEXT_REDS;
  2892. } else {
  2893. reds_left = c_p->fcalls;
  2894. }
  2895. return REDS_IN(c_p) - reds_left - erts_proc_sched_data(p)->virtual_reds;
  2896. }
  2897. int
  2898. erts_beam_jump_table(void)
  2899. {
  2900. #if defined(NO_JUMP_TABLE)
  2901. return 0;
  2902. #else
  2903. return 1;
  2904. #endif
  2905. }