PageRenderTime 63ms CodeModel.GetById 16ms RepoModel.GetById 0ms app.codeStats 1ms

/erts/emulator/beam/beam_emu.c

http://github.com/erlang/otp
C | 3152 lines | 2114 code | 388 blank | 650 comment | 436 complexity | de4d55a8f5f824f8ab6ea3661394b156 MD5 | raw file
Possible License(s): BSD-3-Clause, Apache-2.0, Unlicense, LGPL-2.1, MPL-2.0-no-copyleft-exception
  1. /*
  2. * %CopyrightBegin%
  3. *
  4. * Copyright Ericsson AB 1996-2020. All Rights Reserved.
  5. *
  6. * Licensed under the Apache License, Version 2.0 (the "License");
  7. * you may not use this file except in compliance with the License.
  8. * You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. *
  18. * %CopyrightEnd%
  19. */
  20. #ifdef HAVE_CONFIG_H
  21. # include "config.h"
  22. #endif
  23. #include <stddef.h> /* offsetof() */
  24. #include "sys.h"
  25. #include "erl_vm.h"
  26. #include "global.h"
  27. #include "erl_process.h"
  28. #include "error.h"
  29. #include "bif.h"
  30. #include "big.h"
  31. #include "beam_load.h"
  32. #include "erl_binary.h"
  33. #include "erl_map.h"
  34. #include "erl_bits.h"
  35. #include "dist.h"
  36. #include "beam_bp.h"
  37. #include "beam_catches.h"
  38. #include "erl_thr_progress.h"
  39. #include "erl_nfunc_sched.h"
  40. #ifdef HIPE
  41. #include "hipe_mode_switch.h"
  42. #include "hipe_bif1.h"
  43. #endif
  44. #include "dtrace-wrapper.h"
  45. #include "erl_proc_sig_queue.h"
  46. /* #define HARDDEBUG 1 */
  47. #if defined(NO_JUMP_TABLE)
  48. # define OpCase(OpCode) case op_##OpCode
  49. # define CountCase(OpCode) case op_count_##OpCode
  50. # define IsOpCode(InstrWord, OpCode) (BeamCodeAddr(InstrWord) == (BeamInstr)op_##OpCode)
  51. # define Goto(Rel) {Go = BeamCodeAddr(Rel); goto emulator_loop;}
  52. # define GotoPF(Rel) Goto(Rel)
  53. #else
  54. # define OpCase(OpCode) lb_##OpCode
  55. # define CountCase(OpCode) lb_count_##OpCode
  56. # define IsOpCode(InstrWord, OpCode) (BeamCodeAddr(InstrWord) == (BeamInstr)&&lb_##OpCode)
  57. # define Goto(Rel) goto *((void *)BeamCodeAddr(Rel))
  58. # define GotoPF(Rel) goto *((void *)Rel)
  59. # define LabelAddr(Label) &&Label
  60. #endif
  61. #ifdef ERTS_ENABLE_LOCK_CHECK
  62. # define PROCESS_MAIN_CHK_LOCKS(P) \
  63. do { \
  64. if ((P)) \
  65. erts_proc_lc_chk_only_proc_main((P)); \
  66. ERTS_LC_ASSERT(!erts_thr_progress_is_blocking()); \
  67. } while (0)
  68. # define ERTS_REQ_PROC_MAIN_LOCK(P) \
  69. do { \
  70. if ((P)) \
  71. erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN, \
  72. __FILE__, __LINE__); \
  73. } while (0)
  74. # define ERTS_UNREQ_PROC_MAIN_LOCK(P) \
  75. do { \
  76. if ((P)) \
  77. erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN); \
  78. } while (0)
  79. #else
  80. # define PROCESS_MAIN_CHK_LOCKS(P)
  81. # define ERTS_REQ_PROC_MAIN_LOCK(P)
  82. # define ERTS_UNREQ_PROC_MAIN_LOCK(P)
  83. #endif
  84. /*
  85. * Define macros for deep checking of terms.
  86. */
  87. #if defined(HARDDEBUG)
  88. # define CHECK_TERM(T) size_object(T)
  89. # define CHECK_ARGS(PC) \
  90. do { \
  91. int i_; \
  92. int Arity_ = PC[-1]; \
  93. for (i_ = 0; i_ < Arity_; i_++) { \
  94. CHECK_TERM(x(i_)); \
  95. } \
  96. } while (0)
  97. #else
  98. # define CHECK_TERM(T) ASSERT(!is_CP(T))
  99. # define CHECK_ARGS(T)
  100. #endif
  101. #define GET_EXPORT_MODULE(p) ((p)->info.mfa.module)
  102. #define GET_EXPORT_FUNCTION(p) ((p)->info.mfa.function)
  103. #define GET_EXPORT_ARITY(p) ((p)->info.mfa.arity)
  104. /*
  105. * We reuse some of fields in the save area in the process structure.
  106. * This is safe to do, since this space is only actively used when
  107. * the process is switched out.
  108. */
  109. #define REDS_IN(p) ((p)->def_arg_reg[5])
  110. /*
  111. * Add a byte offset to a pointer to Eterm. This is useful when the
  112. * the loader has precalculated a byte offset.
  113. */
  114. #define ADD_BYTE_OFFSET(ptr, offset) \
  115. ((Eterm *) (((unsigned char *)ptr) + (offset)))
  116. /* We don't check the range if an ordinary switch is used */
  117. #ifdef NO_JUMP_TABLE
  118. # define VALID_INSTR(IP) (BeamCodeAddr(IP) < (NUMBER_OF_OPCODES*2+10))
  119. #else
  120. # define VALID_INSTR(IP) \
  121. ((BeamInstr)LabelAddr(emulator_loop) <= BeamCodeAddr(IP) && \
  122. BeamCodeAddr(IP) < (BeamInstr)LabelAddr(end_emulator_loop))
  123. #endif /* NO_JUMP_TABLE */
  124. #define SET_I(ip) \
  125. ASSERT(VALID_INSTR(* (Eterm *)(ip))); \
  126. I = (ip)
  127. /*
  128. * Register target (X or Y register).
  129. */
  130. #define REG_TARGET_PTR(Target) (((Target) & 1) ? &yb((Target)-1) : &xb(Target))
  131. /*
  132. * Special Beam instructions.
  133. */
  134. BeamInstr beam_apply[2];
  135. BeamInstr beam_exit[1];
  136. BeamInstr beam_continue_exit[1];
  137. /* NOTE These should be the only variables containing trace instructions.
  138. ** Sometimes tests are for the instruction value, and sometimes
  139. ** for the referring variable (one of these), and rouge references
  140. ** will most likely cause chaos.
  141. */
  142. BeamInstr beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
  143. BeamInstr beam_return_trace[1]; /* OpCode(i_return_trace) */
  144. BeamInstr beam_exception_trace[1]; /* UGLY also OpCode(i_return_trace) */
  145. BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
  146. /*
  147. * All Beam instructions in numerical order.
  148. */
  149. #ifndef NO_JUMP_TABLE
  150. void** beam_ops;
  151. #endif
  152. #define SWAPIN \
  153. HTOP = HEAP_TOP(c_p); \
  154. E = c_p->stop
  155. #define SWAPOUT \
  156. HEAP_TOP(c_p) = HTOP; \
  157. c_p->stop = E
  158. #define HEAVY_SWAPIN \
  159. SWAPIN; \
  160. FCALLS = c_p->fcalls
  161. #define HEAVY_SWAPOUT \
  162. SWAPOUT; \
  163. c_p->fcalls = FCALLS
  164. /*
  165. * Use LIGHT_SWAPOUT when the called function
  166. * will call HeapOnlyAlloc() (and never HAlloc()).
  167. */
  168. #ifdef DEBUG
  169. # /* The stack pointer is used in an assertion. */
  170. # define LIGHT_SWAPOUT SWAPOUT
  171. # define DEBUG_SWAPOUT SWAPOUT
  172. # define DEBUG_SWAPIN SWAPIN
  173. #else
  174. # define LIGHT_SWAPOUT HEAP_TOP(c_p) = HTOP
  175. # define DEBUG_SWAPOUT
  176. # define DEBUG_SWAPIN
  177. #endif
  178. /*
  179. * Use LIGHT_SWAPIN when we know that c_p->stop cannot
  180. * have been updated (i.e. if there cannot have been
  181. * a garbage-collection).
  182. */
  183. #define LIGHT_SWAPIN HTOP = HEAP_TOP(c_p)
  184. #ifdef FORCE_HEAP_FRAGS
  185. # define HEAP_SPACE_VERIFIED(Words) do { \
  186. c_p->space_verified = (Words); \
  187. c_p->space_verified_from = HTOP; \
  188. }while(0)
  189. #else
  190. # define HEAP_SPACE_VERIFIED(Words) ((void)0)
  191. #endif
  192. #define PRE_BIF_SWAPOUT(P) \
  193. HEAP_TOP((P)) = HTOP; \
  194. (P)->stop = E; \
  195. PROCESS_MAIN_CHK_LOCKS((P)); \
  196. ERTS_UNREQ_PROC_MAIN_LOCK((P))
  197. #define db(N) (N)
  198. #define fb(N) ((Sint)(Sint32)(N))
  199. #define jb(N) ((Sint)(Sint32)(N))
  200. #define tb(N) (N)
  201. #define xb(N) (*ADD_BYTE_OFFSET(reg, N))
  202. #define yb(N) (*ADD_BYTE_OFFSET(E, N))
  203. #define Sb(N) (*REG_TARGET_PTR(N))
  204. #define lb(N) (*(double *) (((unsigned char *)&(freg[0].fd)) + (N)))
  205. #define Qb(N) (N)
  206. #define Ib(N) (N)
  207. #define x(N) reg[N]
  208. #define y(N) E[N]
  209. #define r(N) x(N)
  210. #define Q(N) (N*sizeof(Eterm *))
  211. #define l(N) (freg[N].fd)
  212. #define Arg(N) I[(N)+1]
  213. #define GetSource(raw, dst) \
  214. do { \
  215. dst = raw; \
  216. switch (loader_tag(dst)) { \
  217. case LOADER_X_REG: \
  218. dst = x(loader_x_reg_index(dst)); \
  219. break; \
  220. case LOADER_Y_REG: \
  221. ASSERT(loader_y_reg_index(dst) >= 1); \
  222. dst = y(loader_y_reg_index(dst)); \
  223. break; \
  224. } \
  225. CHECK_TERM(dst); \
  226. } while (0)
  227. #define PUT_TERM_REG(term, desc) \
  228. do { \
  229. switch (loader_tag(desc)) { \
  230. case LOADER_X_REG: \
  231. x(loader_x_reg_index(desc)) = (term); \
  232. break; \
  233. case LOADER_Y_REG: \
  234. y(loader_y_reg_index(desc)) = (term); \
  235. break; \
  236. default: \
  237. ASSERT(0); \
  238. break; \
  239. } \
  240. } while(0)
  241. #ifdef DEBUG
  242. /* Better static type testing by the C compiler */
  243. # define BEAM_IS_TUPLE(Src) is_tuple(Src)
  244. #else
  245. /* Better performance */
  246. # define BEAM_IS_TUPLE(Src) is_boxed(Src)
  247. #endif
  248. /*
  249. * process_main() is already huge, so we want to avoid inlining
  250. * seldom used functions into it.
  251. */
  252. static void init_emulator_finish(void) ERTS_NOINLINE;
  253. static ErtsCodeMFA *ubif2mfa(void* uf) ERTS_NOINLINE;
  254. static BeamInstr* handle_error(Process* c_p, BeamInstr* pc,
  255. Eterm* reg, ErtsCodeMFA* bif_mfa) ERTS_NOINLINE;
  256. static BeamInstr* call_error_handler(Process* p, ErtsCodeMFA* mfa,
  257. Eterm* reg, Eterm func) ERTS_NOINLINE;
  258. static BeamInstr* fixed_apply(Process* p, Eterm* reg, Uint arity,
  259. BeamInstr *I, Uint offs) ERTS_NOINLINE;
  260. static BeamInstr* apply(Process* p, Eterm* reg,
  261. BeamInstr *I, Uint offs) ERTS_NOINLINE;
  262. static BeamInstr* call_fun(Process* p, int arity,
  263. Eterm* reg, Eterm args) ERTS_NOINLINE;
  264. static BeamInstr* apply_fun(Process* p, Eterm fun,
  265. Eterm args, Eterm* reg) ERTS_NOINLINE;
  266. static Eterm new_fun(Process* p, Eterm* reg,
  267. ErlFunEntry* fe, int num_free) ERTS_NOINLINE;
  268. static int is_function2(Eterm Term, Uint arity);
  269. static Eterm erts_gc_new_map(Process* p, Eterm* reg, Uint live,
  270. Uint n, BeamInstr* ptr) ERTS_NOINLINE;
  271. static Eterm erts_gc_new_small_map_lit(Process* p, Eterm* reg, Eterm keys_literal,
  272. Uint live, BeamInstr* ptr) ERTS_NOINLINE;
  273. static Eterm erts_gc_update_map_assoc(Process* p, Eterm* reg, Uint live,
  274. Uint n, BeamInstr* new_p) ERTS_NOINLINE;
  275. static Eterm erts_gc_update_map_exact(Process* p, Eterm* reg, Uint live,
  276. Uint n, Eterm* new_p) ERTS_NOINLINE;
  277. static Eterm get_map_element(Eterm map, Eterm key);
  278. static Eterm get_map_element_hash(Eterm map, Eterm key, Uint32 hx);
  279. /*
  280. * Functions not directly called by process_main(). OK to inline.
  281. */
  282. static BeamInstr* next_catch(Process* c_p, Eterm *reg);
  283. static void terminate_proc(Process* c_p, Eterm Value);
  284. static Eterm add_stacktrace(Process* c_p, Eterm Value, Eterm exc);
  285. static void save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg,
  286. ErtsCodeMFA *bif_mfa, Eterm args);
  287. static struct StackTrace * get_trace_from_exc(Eterm exc);
  288. static Eterm *get_freason_ptr_from_exc(Eterm exc);
  289. static Eterm make_arglist(Process* c_p, Eterm* reg, int a);
  290. void
  291. init_emulator(void)
  292. {
  293. process_main(0, 0);
  294. }
  295. /*
  296. * On certain platforms, make sure that the main variables really are placed
  297. * in registers.
  298. */
  299. #if defined(__GNUC__) && defined(sparc) && !defined(DEBUG)
  300. # define REG_xregs asm("%l1")
  301. # define REG_htop asm("%l2")
  302. # define REG_stop asm("%l3")
  303. # define REG_I asm("%l4")
  304. # define REG_fcalls asm("%l5")
  305. #elif defined(__GNUC__) && defined(__amd64__) && !defined(DEBUG)
  306. # define REG_xregs asm("%r12")
  307. # define REG_htop
  308. # define REG_stop asm("%r13")
  309. # define REG_I asm("%rbx")
  310. # define REG_fcalls asm("%r14")
  311. #else
  312. # define REG_xregs
  313. # define REG_htop
  314. # define REG_stop
  315. # define REG_I
  316. # define REG_fcalls
  317. #endif
  318. #ifdef USE_VM_PROBES
  319. # define USE_VM_CALL_PROBES
  320. #endif
  321. #ifdef USE_VM_CALL_PROBES
  322. #define DTRACE_LOCAL_CALL(p, mfa) \
  323. if (DTRACE_ENABLED(local_function_entry)) { \
  324. DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
  325. DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
  326. int depth = STACK_START(p) - STACK_TOP(p); \
  327. dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
  328. DTRACE3(local_function_entry, process_name, mfa_buf, depth); \
  329. }
  330. #define DTRACE_GLOBAL_CALL(p, mfa) \
  331. if (DTRACE_ENABLED(global_function_entry)) { \
  332. DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
  333. DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
  334. int depth = STACK_START(p) - STACK_TOP(p); \
  335. dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
  336. DTRACE3(global_function_entry, process_name, mfa_buf, depth); \
  337. }
  338. #define DTRACE_RETURN(p, mfa) \
  339. if (DTRACE_ENABLED(function_return)) { \
  340. DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
  341. DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
  342. int depth = STACK_START(p) - STACK_TOP(p); \
  343. dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
  344. DTRACE3(function_return, process_name, mfa_buf, depth); \
  345. }
  346. #define DTRACE_BIF_ENTRY(p, mfa) \
  347. if (DTRACE_ENABLED(bif_entry)) { \
  348. DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
  349. DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
  350. dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
  351. DTRACE2(bif_entry, process_name, mfa_buf); \
  352. }
  353. #define DTRACE_BIF_RETURN(p, mfa) \
  354. if (DTRACE_ENABLED(bif_return)) { \
  355. DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
  356. DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
  357. dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
  358. DTRACE2(bif_return, process_name, mfa_buf); \
  359. }
  360. #define DTRACE_NIF_ENTRY(p, mfa) \
  361. if (DTRACE_ENABLED(nif_entry)) { \
  362. DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
  363. DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
  364. dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
  365. DTRACE2(nif_entry, process_name, mfa_buf); \
  366. }
  367. #define DTRACE_NIF_RETURN(p, mfa) \
  368. if (DTRACE_ENABLED(nif_return)) { \
  369. DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
  370. DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
  371. dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
  372. DTRACE2(nif_return, process_name, mfa_buf); \
  373. }
  374. #define DTRACE_GLOBAL_CALL_FROM_EXPORT(p,e) \
  375. do { \
  376. if (DTRACE_ENABLED(global_function_entry)) { \
  377. BeamInstr* fp = (BeamInstr *) (((Export *) (e))->addressv[erts_active_code_ix()]); \
  378. DTRACE_GLOBAL_CALL((p), erts_code_to_codemfa(fp)); \
  379. } \
  380. } while(0)
  381. #define DTRACE_RETURN_FROM_PC(p, i) \
  382. do { \
  383. ErtsCodeMFA* cmfa; \
  384. if (DTRACE_ENABLED(function_return) && (cmfa = find_function_from_pc(i))) { \
  385. DTRACE_RETURN((p), cmfa); \
  386. } \
  387. } while(0)
  388. #else /* USE_VM_PROBES */
  389. #define DTRACE_LOCAL_CALL(p, mfa) do {} while (0)
  390. #define DTRACE_GLOBAL_CALL(p, mfa) do {} while (0)
  391. #define DTRACE_GLOBAL_CALL_FROM_EXPORT(p, e) do {} while (0)
  392. #define DTRACE_RETURN(p, mfa) do {} while (0)
  393. #define DTRACE_RETURN_FROM_PC(p, i) do {} while (0)
  394. #define DTRACE_BIF_ENTRY(p, mfa) do {} while (0)
  395. #define DTRACE_BIF_RETURN(p, mfa) do {} while (0)
  396. #define DTRACE_NIF_ENTRY(p, mfa) do {} while (0)
  397. #define DTRACE_NIF_RETURN(p, mfa) do {} while (0)
  398. #endif /* USE_VM_PROBES */
  399. #ifdef DEBUG
  400. #define ERTS_DBG_CHK_REDS(P, FC) \
  401. do { \
  402. if (ERTS_PROC_GET_SAVED_CALLS_BUF((P))) { \
  403. ASSERT(FC <= 0); \
  404. ASSERT(erts_proc_sched_data(c_p)->virtual_reds \
  405. <= 0 - (FC)); \
  406. } \
  407. else { \
  408. ASSERT(FC <= CONTEXT_REDS); \
  409. ASSERT(erts_proc_sched_data(c_p)->virtual_reds \
  410. <= CONTEXT_REDS - (FC)); \
  411. } \
  412. } while (0)
  413. #else
  414. #define ERTS_DBG_CHK_REDS(P, FC)
  415. #endif
  416. #ifdef NO_FPE_SIGNALS
  417. # define ERTS_NO_FPE_CHECK_INIT ERTS_FP_CHECK_INIT
  418. # define ERTS_NO_FPE_ERROR ERTS_FP_ERROR
  419. #else
  420. # define ERTS_NO_FPE_CHECK_INIT(p)
  421. # define ERTS_NO_FPE_ERROR(p, a, b)
  422. #endif
  423. /*
  424. * process_main() is called twice:
  425. * The first call performs some initialisation, including exporting
  426. * the instructions' C labels to the loader.
  427. * The second call starts execution of BEAM code. This call never returns.
  428. */
  429. ERTS_NO_RETPOLINE
  430. void process_main(Eterm * x_reg_array, FloatDef* f_reg_array)
  431. {
  432. static int init_done = 0;
  433. Process* c_p = NULL;
  434. int reds_used;
  435. #ifdef DEBUG
  436. ERTS_DECLARE_DUMMY(Eterm pid);
  437. #endif
  438. /* Pointer to X registers: x(1)..x(N); reg[0] is used when doing GC,
  439. * in all other cases x0 is used.
  440. */
  441. register Eterm* reg REG_xregs = x_reg_array;
  442. /*
  443. * Top of heap (next free location); grows upwards.
  444. */
  445. register Eterm* HTOP REG_htop = NULL;
  446. /* Stack pointer. Grows downwards; points
  447. * to last item pushed (normally a saved
  448. * continuation pointer).
  449. */
  450. register Eterm* E REG_stop = NULL;
  451. /*
  452. * Pointer to next threaded instruction.
  453. */
  454. register BeamInstr *I REG_I = NULL;
  455. /* Number of reductions left. This function
  456. * returns to the scheduler when FCALLS reaches zero.
  457. */
  458. register Sint FCALLS REG_fcalls = 0;
  459. /*
  460. * X registers and floating point registers are located in
  461. * scheduler specific data.
  462. */
  463. register FloatDef *freg = f_reg_array;
  464. /*
  465. * For keeping the negative old value of 'reds' when call saving is active.
  466. */
  467. int neg_o_reds = 0;
  468. #ifdef ERTS_OPCODE_COUNTER_SUPPORT
  469. static void* counting_opcodes[] = { DEFINE_COUNTING_OPCODES };
  470. #else
  471. #ifndef NO_JUMP_TABLE
  472. static void* opcodes[] = { DEFINE_OPCODES };
  473. #else
  474. register BeamInstr Go;
  475. #endif
  476. #endif
  477. Uint64 start_time = 0; /* Monitor long schedule */
  478. BeamInstr* start_time_i = NULL;
  479. ERTS_MSACC_DECLARE_CACHE_X() /* a cached value of the tsd pointer for msacc */
  480. ERL_BITS_DECLARE_STATEP; /* Has to be last declaration */
  481. /*
  482. * Note: In this function, we attempt to place rarely executed code towards
  483. * the end of the function, in the hope that the cache hit rate will be better.
  484. * The initialization code is only run once, so it is at the very end.
  485. *
  486. * Note: c_p->arity must be set to reflect the number of useful terms in
  487. * c_p->arg_reg before calling the scheduler.
  488. */
  489. if (ERTS_UNLIKELY(!init_done)) {
  490. /* This should only be reached during the init phase when only the main
  491. * process is running. I.e. there is no race for init_done.
  492. */
  493. init_done = 1;
  494. goto init_emulator;
  495. }
  496. c_p = NULL;
  497. reds_used = 0;
  498. goto do_schedule1;
  499. do_schedule:
  500. ASSERT(c_p->arity < 6);
  501. ASSERT(c_p->debug_reds_in == REDS_IN(c_p));
  502. if (!ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
  503. reds_used = REDS_IN(c_p) - FCALLS;
  504. else
  505. reds_used = REDS_IN(c_p) - (CONTEXT_REDS + FCALLS);
  506. ASSERT(reds_used >= 0);
  507. do_schedule1:
  508. if (start_time != 0) {
  509. Sint64 diff = erts_timestamp_millis() - start_time;
  510. if (diff > 0 && (Uint) diff > erts_system_monitor_long_schedule) {
  511. ErtsCodeMFA *inptr = find_function_from_pc(start_time_i);
  512. ErtsCodeMFA *outptr = find_function_from_pc(c_p->i);
  513. monitor_long_schedule_proc(c_p,inptr,outptr,(Uint) diff);
  514. }
  515. }
  516. PROCESS_MAIN_CHK_LOCKS(c_p);
  517. ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
  518. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  519. c_p = erts_schedule(NULL, c_p, reds_used);
  520. ASSERT(!(c_p->flags & F_HIPE_MODE));
  521. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  522. start_time = 0;
  523. #ifdef DEBUG
  524. pid = c_p->common.id; /* Save for debugging purposes */
  525. #endif
  526. ERTS_REQ_PROC_MAIN_LOCK(c_p);
  527. PROCESS_MAIN_CHK_LOCKS(c_p);
  528. ERTS_MSACC_UPDATE_CACHE_X();
  529. if (erts_system_monitor_long_schedule != 0) {
  530. start_time = erts_timestamp_millis();
  531. start_time_i = c_p->i;
  532. }
  533. ERL_BITS_RELOAD_STATEP(c_p);
  534. {
  535. int reds;
  536. Eterm* argp;
  537. BeamInstr next;
  538. int i;
  539. argp = c_p->arg_reg;
  540. for (i = c_p->arity - 1; i >= 0; i--) {
  541. reg[i] = argp[i];
  542. CHECK_TERM(reg[i]);
  543. }
  544. /*
  545. * We put the original reduction count in the process structure, to reduce
  546. * the code size (referencing a field in a struct through a pointer stored
  547. * in a register gives smaller code than referencing a global variable).
  548. */
  549. SET_I(c_p->i);
  550. REDS_IN(c_p) = reds = c_p->fcalls;
  551. #ifdef DEBUG
  552. c_p->debug_reds_in = reds;
  553. #endif
  554. if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) {
  555. neg_o_reds = -CONTEXT_REDS;
  556. FCALLS = neg_o_reds + reds;
  557. } else {
  558. neg_o_reds = 0;
  559. FCALLS = reds;
  560. }
  561. ERTS_DBG_CHK_REDS(c_p, FCALLS);
  562. next = *I;
  563. SWAPIN;
  564. ASSERT(VALID_INSTR(next));
  565. #ifdef USE_VM_PROBES
  566. if (DTRACE_ENABLED(process_scheduled)) {
  567. DTRACE_CHARBUF(process_buf, DTRACE_TERM_BUF_SIZE);
  568. DTRACE_CHARBUF(fun_buf, DTRACE_TERM_BUF_SIZE);
  569. dtrace_proc_str(c_p, process_buf);
  570. if (ERTS_PROC_IS_EXITING(c_p)) {
  571. sys_strcpy(fun_buf, "<exiting>");
  572. } else {
  573. ErtsCodeMFA *cmfa = find_function_from_pc(c_p->i);
  574. if (cmfa) {
  575. dtrace_fun_decode(c_p, cmfa,
  576. NULL, fun_buf);
  577. } else {
  578. erts_snprintf(fun_buf, sizeof(DTRACE_CHARBUF_NAME(fun_buf)),
  579. "<unknown/%p>", next);
  580. }
  581. }
  582. DTRACE2(process_scheduled, process_buf, fun_buf);
  583. }
  584. #endif
  585. Goto(next);
  586. }
  587. #if defined(DEBUG) || defined(NO_JUMP_TABLE)
  588. emulator_loop:
  589. #endif
  590. #ifdef NO_JUMP_TABLE
  591. switch (Go) {
  592. #endif
  593. #include "beam_hot.h"
  594. /*
  595. * The labels are jumped to from the $DISPATCH() macros when the reductions
  596. * are used up.
  597. *
  598. * Since the I register points just beyond the FuncBegin instruction, we
  599. * can get the module, function, and arity for the function being
  600. * called from I[-3], I[-2], and I[-1] respectively.
  601. */
  602. context_switch_fun:
  603. /* Add one for the environment of the fun */
  604. c_p->arity = erts_code_to_codemfa(I)->arity + 1;
  605. goto context_switch2;
  606. context_switch:
  607. c_p->arity = erts_code_to_codemfa(I)->arity;
  608. context_switch2: /* Entry for fun calls. */
  609. c_p->current = erts_code_to_codemfa(I);
  610. context_switch3:
  611. {
  612. Eterm* argp;
  613. int i;
  614. if (erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_EXITING) {
  615. c_p->i = beam_exit;
  616. c_p->arity = 0;
  617. c_p->current = NULL;
  618. goto do_schedule;
  619. }
  620. /*
  621. * Make sure that there is enough room for the argument registers to be saved.
  622. */
  623. if (c_p->arity > c_p->max_arg_reg) {
  624. /*
  625. * Yes, this is an expensive operation, but you only pay it the first
  626. * time you call a function with more than 6 arguments which is
  627. * scheduled out. This is better than paying for 26 words of wasted
  628. * space for most processes which never call functions with more than
  629. * 6 arguments.
  630. */
  631. Uint size = c_p->arity * sizeof(c_p->arg_reg[0]);
  632. if (c_p->arg_reg != c_p->def_arg_reg) {
  633. c_p->arg_reg = (Eterm *) erts_realloc(ERTS_ALC_T_ARG_REG,
  634. (void *) c_p->arg_reg,
  635. size);
  636. } else {
  637. c_p->arg_reg = (Eterm *) erts_alloc(ERTS_ALC_T_ARG_REG, size);
  638. }
  639. c_p->max_arg_reg = c_p->arity;
  640. }
  641. /*
  642. * Since REDS_IN(c_p) is stored in the save area (c_p->arg_reg) we must read it
  643. * now before saving registers.
  644. *
  645. * The '+ 1' compensates for the last increment which was not done
  646. * (beacuse the code for the Dispatch() macro becomes shorter that way).
  647. */
  648. ASSERT(c_p->debug_reds_in == REDS_IN(c_p));
  649. if (!ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
  650. reds_used = REDS_IN(c_p) - FCALLS;
  651. else
  652. reds_used = REDS_IN(c_p) - (CONTEXT_REDS + FCALLS);
  653. ASSERT(reds_used >= 0);
  654. /*
  655. * Save the argument registers and everything else.
  656. */
  657. argp = c_p->arg_reg;
  658. for (i = c_p->arity - 1; i >= 0; i--) {
  659. argp[i] = reg[i];
  660. }
  661. SWAPOUT;
  662. c_p->i = I;
  663. goto do_schedule1;
  664. }
  665. #include "beam_warm.h"
  666. OpCase(normal_exit): {
  667. HEAVY_SWAPOUT;
  668. c_p->freason = EXC_NORMAL;
  669. c_p->arity = 0; /* In case this process will ever be garbed again. */
  670. ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
  671. erts_do_exit_process(c_p, am_normal);
  672. ERTS_REQ_PROC_MAIN_LOCK(c_p);
  673. HEAVY_SWAPIN;
  674. goto do_schedule;
  675. }
  676. OpCase(continue_exit): {
  677. HEAVY_SWAPOUT;
  678. ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
  679. erts_continue_exit_process(c_p);
  680. ERTS_REQ_PROC_MAIN_LOCK(c_p);
  681. HEAVY_SWAPIN;
  682. goto do_schedule;
  683. }
  684. find_func_info: {
  685. SWAPOUT;
  686. I = handle_error(c_p, I, reg, NULL);
  687. goto post_error_handling;
  688. }
  689. OpCase(call_error_handler):
  690. /*
  691. * At this point, I points to the code[3] in the export entry for
  692. * a function which is not loaded.
  693. *
  694. * code[0]: Module
  695. * code[1]: Function
  696. * code[2]: Arity
  697. * code[3]: &&call_error_handler
  698. * code[4]: Not used
  699. */
  700. HEAVY_SWAPOUT;
  701. I = call_error_handler(c_p, erts_code_to_codemfa(I),
  702. reg, am_undefined_function);
  703. HEAVY_SWAPIN;
  704. if (I) {
  705. Goto(*I);
  706. }
  707. /* Fall through */
  708. OpCase(error_action_code): {
  709. handle_error:
  710. SWAPOUT;
  711. I = handle_error(c_p, NULL, reg, NULL);
  712. post_error_handling:
  713. if (I == 0) {
  714. goto do_schedule;
  715. } else {
  716. ASSERT(!is_value(r(0)));
  717. SWAPIN;
  718. Goto(*I);
  719. }
  720. }
  721. OpCase(i_func_info_IaaI): {
  722. ErtsCodeInfo *ci = (ErtsCodeInfo*)I;
  723. c_p->freason = EXC_FUNCTION_CLAUSE;
  724. c_p->current = &ci->mfa;
  725. goto handle_error;
  726. }
  727. #include "beam_cold.h"
  728. #ifdef ERTS_OPCODE_COUNTER_SUPPORT
  729. DEFINE_COUNTING_LABELS;
  730. #endif
  731. #ifndef NO_JUMP_TABLE
  732. #ifdef DEBUG
  733. end_emulator_loop:
  734. #endif
  735. #endif
  736. OpCase(int_code_end):
  737. OpCase(label_L):
  738. OpCase(on_load):
  739. OpCase(line_I):
  740. erts_exit(ERTS_ERROR_EXIT, "meta op\n");
  741. /*
  742. * One-time initialization of Beam emulator.
  743. */
  744. init_emulator:
  745. {
  746. #ifndef NO_JUMP_TABLE
  747. #ifdef ERTS_OPCODE_COUNTER_SUPPORT
  748. #ifdef DEBUG
  749. counting_opcodes[op_catch_end_y] = LabelAddr(lb_catch_end_y);
  750. #endif
  751. counting_opcodes[op_i_func_info_IaaI] = LabelAddr(lb_i_func_info_IaaI);
  752. beam_ops = counting_opcodes;
  753. #else /* #ifndef ERTS_OPCODE_COUNTER_SUPPORT */
  754. beam_ops = opcodes;
  755. #endif /* ERTS_OPCODE_COUNTER_SUPPORT */
  756. #endif /* NO_JUMP_TABLE */
  757. init_emulator_finish();
  758. return;
  759. }
  760. #ifdef NO_JUMP_TABLE
  761. default:
  762. erts_exit(ERTS_ERROR_EXIT, "unexpected op code %d\n",Go);
  763. }
  764. #endif
  765. return; /* Never executed */
  766. }
  767. /*
  768. * Enter all BIFs into the export table.
  769. *
  770. * Note that they will all call the error_handler until their modules have been
  771. * loaded, which may prevent the system from booting if BIFs from non-preloaded
  772. * modules are apply/3'd while loading code. Ordinary BIF calls will work fine
  773. * however since they won't go through export entries.
  774. */
  775. static void install_bifs(void) {
  776. int i;
  777. for (i = 0; i < BIF_SIZE; i++) {
  778. BifEntry *entry;
  779. Export *ep;
  780. int j;
  781. entry = &bif_table[i];
  782. ep = erts_export_put(entry->module, entry->name, entry->arity);
  783. ep->info.op = BeamOpCodeAddr(op_i_func_info_IaaI);
  784. ep->info.mfa.module = entry->module;
  785. ep->info.mfa.function = entry->name;
  786. ep->info.mfa.arity = entry->arity;
  787. ep->bif_number = i;
  788. memset(&ep->trampoline, 0, sizeof(ep->trampoline));
  789. ep->trampoline.op = BeamOpCodeAddr(op_call_error_handler);
  790. for (j = 0; j < ERTS_NUM_CODE_IX; j++) {
  791. ep->addressv[j] = ep->trampoline.raw;
  792. }
  793. /* Set up a hidden export entry so we can trap to this BIF without
  794. * it being seen when tracing. */
  795. erts_init_trap_export(&bif_trap_export[i],
  796. entry->module, entry->name, entry->arity,
  797. entry->f);
  798. }
  799. }
  800. /*
  801. * One-time initialization of emulator. Does not need to be
  802. * in process_main().
  803. */
  804. static void
  805. init_emulator_finish(void)
  806. {
  807. #if defined(ARCH_64) && defined(CODE_MODEL_SMALL)
  808. int i;
  809. for (i = 0; i < NUMBER_OF_OPCODES; i++) {
  810. BeamInstr instr = BeamOpCodeAddr(i);
  811. if (instr >= (1ull << 32)) {
  812. erts_exit(ERTS_ERROR_EXIT,
  813. "This run-time was supposed be compiled with all code below 2Gb,\n"
  814. "but the instruction '%s' is located at %016lx.\n",
  815. opc[i].name, instr);
  816. }
  817. }
  818. #endif
  819. beam_apply[0] = BeamOpCodeAddr(op_i_apply);
  820. beam_apply[1] = BeamOpCodeAddr(op_normal_exit);
  821. beam_exit[0] = BeamOpCodeAddr(op_error_action_code);
  822. beam_continue_exit[0] = BeamOpCodeAddr(op_continue_exit);
  823. beam_return_to_trace[0] = BeamOpCodeAddr(op_i_return_to_trace);
  824. beam_return_trace[0] = BeamOpCodeAddr(op_return_trace);
  825. beam_exception_trace[0] = BeamOpCodeAddr(op_return_trace); /* UGLY */
  826. beam_return_time_trace[0] = BeamOpCodeAddr(op_i_return_time_trace);
  827. install_bifs();
  828. }
  829. /*
  830. * erts_dirty_process_main() is what dirty schedulers execute. Since they handle
  831. * only NIF calls they do not need to be able to execute all BEAM
  832. * instructions.
  833. */
  834. void erts_dirty_process_main(ErtsSchedulerData *esdp)
  835. {
  836. Process* c_p = NULL;
  837. ErtsMonotonicTime start_time;
  838. #ifdef DEBUG
  839. ERTS_DECLARE_DUMMY(Eterm pid);
  840. #endif
  841. /* Pointer to X registers: x(1)..x(N); reg[0] is used when doing GC,
  842. * in all other cases x0 is used.
  843. */
  844. register Eterm* reg REG_xregs = NULL;
  845. /*
  846. * Top of heap (next free location); grows upwards.
  847. */
  848. register Eterm* HTOP REG_htop = NULL;
  849. /* Stack pointer. Grows downwards; points
  850. * to last item pushed (normally a saved
  851. * continuation pointer).
  852. */
  853. register Eterm* E REG_stop = NULL;
  854. /*
  855. * Pointer to next threaded instruction.
  856. */
  857. register BeamInstr *I REG_I = NULL;
  858. ERTS_MSACC_DECLARE_CACHE_X() /* a cached value of the tsd pointer for msacc */
  859. /*
  860. * start_time always positive for dirty CPU schedulers,
  861. * and negative for dirty I/O schedulers.
  862. */
  863. if (ERTS_SCHEDULER_IS_DIRTY_CPU(esdp)) {
  864. start_time = erts_get_monotonic_time(NULL);
  865. ASSERT(start_time >= 0);
  866. }
  867. else {
  868. start_time = ERTS_SINT64_MIN;
  869. ASSERT(start_time < 0);
  870. }
  871. goto do_dirty_schedule;
  872. context_switch:
  873. c_p->current = erts_code_to_codemfa(I); /* Pointer to Mod, Func, Arity */
  874. c_p->arity = c_p->current->arity;
  875. {
  876. int reds_used;
  877. Eterm* argp;
  878. int i;
  879. /*
  880. * Make sure that there is enough room for the argument registers to be saved.
  881. */
  882. if (c_p->arity > c_p->max_arg_reg) {
  883. /*
  884. * Yes, this is an expensive operation, but you only pay it the first
  885. * time you call a function with more than 6 arguments which is
  886. * scheduled out. This is better than paying for 26 words of wasted
  887. * space for most processes which never call functions with more than
  888. * 6 arguments.
  889. */
  890. Uint size = c_p->arity * sizeof(c_p->arg_reg[0]);
  891. if (c_p->arg_reg != c_p->def_arg_reg) {
  892. c_p->arg_reg = (Eterm *) erts_realloc(ERTS_ALC_T_ARG_REG,
  893. (void *) c_p->arg_reg,
  894. size);
  895. } else {
  896. c_p->arg_reg = (Eterm *) erts_alloc(ERTS_ALC_T_ARG_REG, size);
  897. }
  898. c_p->max_arg_reg = c_p->arity;
  899. }
  900. /*
  901. * Save the argument registers and everything else.
  902. */
  903. argp = c_p->arg_reg;
  904. for (i = c_p->arity - 1; i >= 0; i--) {
  905. argp[i] = reg[i];
  906. }
  907. SWAPOUT;
  908. c_p->i = I;
  909. do_dirty_schedule:
  910. if (start_time < 0) {
  911. /*
  912. * Dirty I/O scheduler:
  913. * One reduction consumed regardless of
  914. * time spent in the dirty NIF.
  915. */
  916. reds_used = esdp->virtual_reds + 1;
  917. }
  918. else {
  919. /*
  920. * Dirty CPU scheduler:
  921. * Reductions based on time consumed by
  922. * the dirty NIF.
  923. */
  924. Sint64 treds;
  925. treds = erts_time2reds(start_time,
  926. erts_get_monotonic_time(esdp));
  927. treds += esdp->virtual_reds;
  928. reds_used = treds > INT_MAX ? INT_MAX : (int) treds;
  929. }
  930. if (c_p && ERTS_PROC_GET_PENDING_SUSPEND(c_p))
  931. erts_proc_sig_handle_pending_suspend(c_p);
  932. PROCESS_MAIN_CHK_LOCKS(c_p);
  933. ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
  934. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  935. c_p = erts_schedule(esdp, c_p, reds_used);
  936. if (start_time >= 0) {
  937. start_time = erts_get_monotonic_time(esdp);
  938. ASSERT(start_time >= 0);
  939. }
  940. }
  941. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  942. #ifdef DEBUG
  943. pid = c_p->common.id; /* Save for debugging purposes */
  944. #endif
  945. ERTS_REQ_PROC_MAIN_LOCK(c_p);
  946. PROCESS_MAIN_CHK_LOCKS(c_p);
  947. ASSERT(!(c_p->flags & F_HIPE_MODE));
  948. ERTS_MSACC_UPDATE_CACHE_X();
  949. /*
  950. * Set fcalls even though we ignore it, so we don't
  951. * confuse code accessing it...
  952. */
  953. if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
  954. c_p->fcalls = 0;
  955. else
  956. c_p->fcalls = CONTEXT_REDS;
  957. if (erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_DIRTY_RUNNING_SYS) {
  958. erts_execute_dirty_system_task(c_p);
  959. goto do_dirty_schedule;
  960. }
  961. else {
  962. ErtsCodeMFA *codemfa;
  963. Eterm* argp;
  964. int i, exiting;
  965. reg = esdp->x_reg_array;
  966. argp = c_p->arg_reg;
  967. for (i = c_p->arity - 1; i >= 0; i--) {
  968. reg[i] = argp[i];
  969. CHECK_TERM(reg[i]);
  970. }
  971. /*
  972. * We put the original reduction count in the process structure, to reduce
  973. * the code size (referencing a field in a struct through a pointer stored
  974. * in a register gives smaller code than referencing a global variable).
  975. */
  976. I = c_p->i;
  977. SWAPIN;
  978. #ifdef USE_VM_PROBES
  979. if (DTRACE_ENABLED(process_scheduled)) {
  980. DTRACE_CHARBUF(process_buf, DTRACE_TERM_BUF_SIZE);
  981. DTRACE_CHARBUF(fun_buf, DTRACE_TERM_BUF_SIZE);
  982. dtrace_proc_str(c_p, process_buf);
  983. if (ERTS_PROC_IS_EXITING(c_p)) {
  984. sys_strcpy(fun_buf, "<exiting>");
  985. } else {
  986. ErtsCodeMFA *cmfa = find_function_from_pc(c_p->i);
  987. if (cmfa) {
  988. dtrace_fun_decode(c_p, cmfa, NULL, fun_buf);
  989. } else {
  990. erts_snprintf(fun_buf, sizeof(DTRACE_CHARBUF_NAME(fun_buf)),
  991. "<unknown/%p>", *I);
  992. }
  993. }
  994. DTRACE2(process_scheduled, process_buf, fun_buf);
  995. }
  996. #endif
  997. /*
  998. * call_nif is always first instruction in function:
  999. *
  1000. * I[-3]: Module
  1001. * I[-2]: Function
  1002. * I[-1]: Arity
  1003. * I[0]: &&call_nif
  1004. * I[1]: Function pointer to NIF function
  1005. * I[2]: Pointer to erl_module_nif
  1006. * I[3]: Function pointer to dirty NIF
  1007. *
  1008. * This layout is determined by the ErtsNativeFunc struct
  1009. */
  1010. ERTS_MSACC_SET_STATE_CACHED_X(ERTS_MSACC_STATE_NIF);
  1011. codemfa = erts_code_to_codemfa(I);
  1012. DTRACE_NIF_ENTRY(c_p, codemfa);
  1013. c_p->current = codemfa;
  1014. SWAPOUT;
  1015. PROCESS_MAIN_CHK_LOCKS(c_p);
  1016. ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
  1017. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  1018. if (BeamIsOpCode(*I, op_call_bif_W)) {
  1019. exiting = erts_call_dirty_bif(esdp, c_p, I, reg);
  1020. }
  1021. else {
  1022. ASSERT(BeamIsOpCode(*I, op_call_nif_WWW));
  1023. exiting = erts_call_dirty_nif(esdp, c_p, I, reg);
  1024. }
  1025. ASSERT(!(c_p->flags & F_HIBERNATE_SCHED));
  1026. PROCESS_MAIN_CHK_LOCKS(c_p);
  1027. ERTS_REQ_PROC_MAIN_LOCK(c_p);
  1028. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  1029. ERTS_MSACC_SET_STATE_CACHED_X(ERTS_MSACC_STATE_EMULATOR);
  1030. if (exiting)
  1031. goto do_dirty_schedule;
  1032. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  1033. DTRACE_NIF_RETURN(c_p, codemfa);
  1034. ERTS_HOLE_CHECK(c_p);
  1035. SWAPIN;
  1036. I = c_p->i;
  1037. goto context_switch;
  1038. }
  1039. }
  1040. static ErtsCodeMFA *
  1041. ubif2mfa(void* uf)
  1042. {
  1043. int i;
  1044. for (i = 0; erts_u_bifs[i].bif; i++) {
  1045. if (erts_u_bifs[i].bif == uf)
  1046. return &bif_trap_export[erts_u_bifs[i].exp_ix].info.mfa;
  1047. }
  1048. erts_exit(ERTS_ERROR_EXIT, "bad u bif: %p\n", uf);
  1049. return NULL;
  1050. }
  1051. /*
  1052. * Mapping from the error code 'class tag' to atoms.
  1053. */
  1054. Eterm exception_tag[NUMBER_EXC_TAGS] = {
  1055. am_error, /* 0 */
  1056. am_exit, /* 1 */
  1057. am_throw, /* 2 */
  1058. };
  1059. /*
  1060. * Mapping from error code 'index' to atoms.
  1061. */
  1062. Eterm error_atom[NUMBER_EXIT_CODES] = {
  1063. am_internal_error, /* 0 */
  1064. am_normal, /* 1 */
  1065. am_internal_error, /* 2 */
  1066. am_badarg, /* 3 */
  1067. am_badarith, /* 4 */
  1068. am_badmatch, /* 5 */
  1069. am_function_clause, /* 6 */
  1070. am_case_clause, /* 7 */
  1071. am_if_clause, /* 8 */
  1072. am_undef, /* 9 */
  1073. am_badfun, /* 10 */
  1074. am_badarity, /* 11 */
  1075. am_timeout_value, /* 12 */
  1076. am_noproc, /* 13 */
  1077. am_notalive, /* 14 */
  1078. am_system_limit, /* 15 */
  1079. am_try_clause, /* 16 */
  1080. am_notsup, /* 17 */
  1081. am_badmap, /* 18 */
  1082. am_badkey, /* 19 */
  1083. };
  1084. /* Returns the return address at E[0] in printable form, skipping tracing in
  1085. * the same manner as gather_stacktrace.
  1086. *
  1087. * This is needed to generate correct stacktraces when throwing errors from
  1088. * instructions that return like an ordinary function, such as call_nif. */
  1089. BeamInstr *erts_printable_return_address(Process* p, Eterm *E) {
  1090. Eterm *ptr = E;
  1091. ASSERT(is_CP(*ptr));
  1092. while (ptr < STACK_START(p)) {
  1093. BeamInstr *cp = cp_val(*ptr);
  1094. if (cp == beam_exception_trace || cp == beam_return_trace) {
  1095. ptr += 3;
  1096. } else if (cp == beam_return_time_trace) {
  1097. ptr += 2;
  1098. } else if (cp == beam_return_to_trace) {
  1099. ptr += 1;
  1100. } else {
  1101. return cp;
  1102. }
  1103. }
  1104. ERTS_ASSERT(!"No continuation pointer on stack");
  1105. }
  1106. /*
  1107. * To fully understand the error handling, one must keep in mind that
  1108. * when an exception is thrown, the search for a handler can jump back
  1109. * and forth between Beam and native code. Upon each mode switch, a
  1110. * dummy handler is inserted so that if an exception reaches that point,
  1111. * the handler is invoked (like any handler) and transfers control so
  1112. * that the search for a real handler is continued in the other mode.
  1113. * Therefore, c_p->freason and c_p->fvalue must still hold the exception
  1114. * info when the handler is executed, but normalized so that creation of
  1115. * error terms and saving of the stack trace is only done once, even if
  1116. * we pass through the error handling code several times.
  1117. *
  1118. * When a new exception is raised, the current stack trace information
  1119. * is quick-saved in a small structure allocated on the heap. Depending
  1120. * on how the exception is eventually caught (perhaps by causing the
  1121. * current process to terminate), the saved information may be used to
  1122. * create a symbolic (human-readable) representation of the stack trace
  1123. * at the point of the original exception.
  1124. */
  1125. static BeamInstr*
  1126. handle_error(Process* c_p, BeamInstr* pc, Eterm* reg, ErtsCodeMFA *bif_mfa)
  1127. {
  1128. Eterm* hp;
  1129. Eterm Value = c_p->fvalue;
  1130. Eterm Args = am_true;
  1131. ASSERT(c_p->freason != TRAP); /* Should have been handled earlier. */
  1132. if (c_p->freason & EXF_RESTORE_NFUNC)
  1133. erts_nfunc_restore_error(c_p, &pc, reg, &bif_mfa);
  1134. #ifdef DEBUG
  1135. if (bif_mfa) {
  1136. /* Verify that bif_mfa does not point into our native function wrapper */
  1137. ErtsNativeFunc *nep = ERTS_PROC_GET_NFUNC_TRAP_WRAPPER(c_p);
  1138. ASSERT(!nep || !ErtsInArea(bif_mfa, (char *)nep, sizeof(ErtsNativeFunc)));
  1139. }
  1140. #endif
  1141. c_p->i = pc; /* In case we call erts_exit(). */
  1142. /*
  1143. * Check if we have an arglist for the top level call. If so, this
  1144. * is encoded in Value, so we have to dig out the real Value as well
  1145. * as the Arglist.
  1146. */
  1147. if (c_p->freason & EXF_ARGLIST) {
  1148. Eterm* tp;
  1149. ASSERT(is_tuple(Value));
  1150. tp = tuple_val(Value);
  1151. Value = tp[1];
  1152. Args = tp[2];
  1153. }
  1154. /*
  1155. * Save the stack trace info if the EXF_SAVETRACE flag is set. The
  1156. * main reason for doing this separately is to allow throws to later
  1157. * become promoted to errors without losing the original stack
  1158. * trace, even if they have passed through one or more catch and
  1159. * rethrow. It also makes the creation of symbolic stack traces much
  1160. * more modular.
  1161. */
  1162. if (c_p->freason & EXF_SAVETRACE) {
  1163. save_stacktrace(c_p, pc, reg, bif_mfa, Args);
  1164. }
  1165. /*
  1166. * Throws that are not caught are turned into 'nocatch' errors
  1167. */
  1168. if ((c_p->freason & EXF_THROWN) && (c_p->catches <= 0) ) {
  1169. hp = HAlloc(c_p, 3);
  1170. Value = TUPLE2(hp, am_nocatch, Value);
  1171. c_p->freason = EXC_ERROR;
  1172. }
  1173. /* Get the fully expanded error term */
  1174. Value = expand_error_value(c_p, c_p->freason, Value);
  1175. /* Save final error term and stabilize the exception flags so no
  1176. further expansion is done. */
  1177. c_p->fvalue = Value;
  1178. c_p->freason = PRIMARY_EXCEPTION(c_p->freason);
  1179. /* Find a handler or die */
  1180. if ((c_p->catches > 0 || IS_TRACED_FL(c_p, F_EXCEPTION_TRACE))
  1181. && !(c_p->freason & EXF_PANIC)) {
  1182. BeamInstr *new_pc;
  1183. /* The Beam handler code (catch_end or try_end) checks reg[0]
  1184. for THE_NON_VALUE to see if the previous code finished
  1185. abnormally. If so, reg[1], reg[2] and reg[3] should hold the
  1186. exception class, term and trace, respectively. (If the
  1187. handler is just a trap to native code, these registers will
  1188. be ignored.) */
  1189. reg[0] = THE_NON_VALUE;
  1190. reg[1] = exception_tag[GET_EXC_CLASS(c_p->freason)];
  1191. reg[2] = Value;
  1192. reg[3] = c_p->ftrace;
  1193. if ((new_pc = next_catch(c_p, reg))) {
  1194. c_p->stop[0] = NIL; /* To avoid keeping stale references. */
  1195. ERTS_RECV_MARK_CLEAR(c_p); /* No longer safe to use this position */
  1196. return new_pc;
  1197. }
  1198. if (c_p->catches > 0) erts_exit(ERTS_ERROR_EXIT, "Catch not found");
  1199. }
  1200. ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
  1201. terminate_proc(c_p, Value);
  1202. ERTS_REQ_PROC_MAIN_LOCK(c_p);
  1203. return NULL;
  1204. }
  1205. /*
  1206. * Find the nearest catch handler
  1207. */
  1208. static BeamInstr*
  1209. next_catch(Process* c_p, Eterm *reg) {
  1210. int active_catches = c_p->catches > 0;
  1211. int have_return_to_trace = 0;
  1212. Eterm *ptr, *prev, *return_to_trace_ptr = NULL;
  1213. BeamInstr i_return_trace = beam_return_trace[0];
  1214. BeamInstr i_return_to_trace = beam_return_to_trace[0];
  1215. BeamInstr i_return_time_trace = beam_return_time_trace[0];
  1216. ptr = prev = c_p->stop;
  1217. ASSERT(ptr <= STACK_START(c_p));
  1218. /* This function is only called if we have active catch tags or have
  1219. * previously called a function that was exception-traced. As the exception
  1220. * trace flag isn't cleared after the traced function returns (and the
  1221. * catch tag inserted by it is gone), it's possible to land here with an
  1222. * empty stack, and the process should simply die when that happens. */
  1223. if (ptr == STACK_START(c_p)) {
  1224. ASSERT(!active_catches && IS_TRACED_FL(c_p, F_EXCEPTION_TRACE));
  1225. return NULL;
  1226. }
  1227. while (ptr < STACK_START(c_p)) {
  1228. if (is_catch(*ptr)) {
  1229. if (active_catches) goto found_catch;
  1230. ptr++;
  1231. }
  1232. else if (is_CP(*ptr)) {
  1233. prev = ptr;
  1234. if (*cp_val(*prev) == i_return_trace) {
  1235. /* Skip stack frame variables */
  1236. while (++ptr, ptr < STACK_START(c_p) && is_not_CP(*ptr)) {
  1237. if (is_catch(*ptr) && active_catches) goto found_catch;
  1238. }
  1239. if (cp_val(*prev) == beam_exception_trace) {
  1240. ErtsCodeMFA *mfa = (ErtsCodeMFA*)cp_val(ptr[0]);
  1241. erts_trace_exception(c_p, mfa,
  1242. reg[1], reg[2],
  1243. ERTS_TRACER_FROM_ETERM(ptr+1));
  1244. }
  1245. /* Skip return_trace parameters */
  1246. ptr += 2;
  1247. } else if (*cp_val(*prev) == i_return_to_trace) {
  1248. /* Skip stack frame variables */
  1249. while (++ptr, ptr < STACK_START(c_p) && is_not_CP(*ptr)) {
  1250. if (is_catch(*ptr) && active_catches) goto found_catch;
  1251. }
  1252. have_return_to_trace = !0; /* Record next cp */
  1253. return_to_trace_ptr = NULL;
  1254. } else if (*cp_val(*prev) == i_return_time_trace) {
  1255. /* Skip stack frame variables */
  1256. while (++ptr, ptr < STACK_START(c_p) && is_not_CP(*ptr)) {
  1257. if (is_catch(*ptr) && active_catches) goto found_catch;
  1258. }
  1259. /* Skip return_trace parameters */
  1260. ptr += 1;
  1261. } else {
  1262. if (have_return_to_trace) {
  1263. /* Record this cp as possible return_to trace cp */
  1264. have_return_to_trace = 0;
  1265. return_to_trace_ptr = ptr;
  1266. } else return_to_trace_ptr = NULL;
  1267. ptr++;
  1268. }
  1269. } else ptr++;
  1270. }
  1271. return NULL;
  1272. found_catch:
  1273. ASSERT(ptr < STACK_START(c_p));
  1274. c_p->stop = prev;
  1275. if (IS_TRACED_FL(c_p, F_TRACE_RETURN_TO) && return_to_trace_ptr) {
  1276. /* The stackframe closest to the catch contained an
  1277. * return_to_trace entry, so since the execution now
  1278. * continues after the catch, a return_to trace message
  1279. * would be appropriate.
  1280. */
  1281. erts_trace_return_to(c_p, cp_val(*return_to_trace_ptr));
  1282. }
  1283. return catch_pc(*ptr);
  1284. }
  1285. /*
  1286. * Terminating the process when an exception is not caught
  1287. */
  1288. static void
  1289. terminate_proc(Process* c_p, Eterm Value)
  1290. {
  1291. Eterm *hp;
  1292. Eterm Args = NIL;
  1293. /* Add a stacktrace if this is an error. */
  1294. if (GET_EXC_CLASS(c_p->freason) == EXTAG_ERROR) {
  1295. Value = add_stacktrace(c_p, Value, c_p->ftrace);
  1296. }
  1297. c_p->ftrace = NIL;
  1298. /* EXF_LOG is a primary exception flag */
  1299. if (c_p->freason & EXF_LOG) {
  1300. int alive = erts_is_alive;
  1301. erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
  1302. /* Build the format message */
  1303. erts_dsprintf(dsbufp, "Error in process ~p ");
  1304. if (alive)
  1305. erts_dsprintf(dsbufp, "on node ~p ");
  1306. erts_dsprintf(dsbufp, "with exit value:~n~p~n");
  1307. /* Build the args in reverse order */
  1308. hp = HAlloc(c_p, 2);
  1309. Args = CONS(hp, Value, Args);
  1310. if (alive) {
  1311. hp = HAlloc(c_p, 2);
  1312. Args = CONS(hp, erts_this_node->sysname, Args);
  1313. }
  1314. hp = HAlloc(c_p, 2);
  1315. Args = CONS(hp, c_p->common.id, Args);
  1316. erts_send_error_term_to_logger(c_p->group_leader, dsbufp, Args);
  1317. }
  1318. /*
  1319. * If we use a shared heap, the process will be garbage-collected.
  1320. * Must zero c_p->arity to indicate that there are no live registers.
  1321. */
  1322. c_p->arity = 0;
  1323. erts_do_exit_process(c_p, Value);
  1324. }
  1325. /*
  1326. * Build and add a symbolic stack trace to the error value.
  1327. */
  1328. static Eterm
  1329. add_stacktrace(Process* c_p, Eterm Value, Eterm exc) {
  1330. Eterm Where = build_stacktrace(c_p, exc);
  1331. Eterm* hp = HAlloc(c_p, 3);
  1332. return TUPLE2(hp, Value, Where);
  1333. }
  1334. /*
  1335. * Forming the correct error value from the internal error code.
  1336. * This does not update c_p->fvalue or c_p->freason.
  1337. */
  1338. Eterm
  1339. expand_error_value(Process* c_p, Uint freason, Eterm Value) {
  1340. Eterm* hp;
  1341. Uint r;
  1342. r = GET_EXC_INDEX(freason);
  1343. ASSERT(r < NUMBER_EXIT_CODES); /* range check */
  1344. ASSERT(is_value(Value));
  1345. switch (r) {
  1346. case (GET_EXC_INDEX(EXC_PRIMARY)):
  1347. /* Primary exceptions use fvalue as it is */
  1348. break;
  1349. case (GET_EXC_INDEX(EXC_BADMATCH)):
  1350. case (GET_EXC_INDEX(EXC_CASE_CLAUSE)):
  1351. case (GET_EXC_INDEX(EXC_TRY_CLAUSE)):
  1352. case (GET_EXC_INDEX(EXC_BADFUN)):
  1353. case (GET_EXC_INDEX(EXC_BADARITY)):
  1354. case (GET_EXC_INDEX(EXC_BADMAP)):
  1355. case (GET_EXC_INDEX(EXC_BADKEY)):
  1356. /* Some common exceptions: value -> {atom, value} */
  1357. ASSERT(is_value(Value));
  1358. hp = HAlloc(c_p, 3);
  1359. Value = TUPLE2(hp, error_atom[r], Value);
  1360. break;
  1361. default:
  1362. /* Other exceptions just use an atom as descriptor */
  1363. Value = error_atom[r];
  1364. break;
  1365. }
  1366. #ifdef DEBUG
  1367. ASSERT(Value != am_internal_error);
  1368. #endif
  1369. return Value;
  1370. }
  1371. static void
  1372. gather_stacktrace(Process* p, struct StackTrace* s, int depth)
  1373. {
  1374. BeamInstr *prev;
  1375. Eterm *ptr;
  1376. if (depth == 0) {
  1377. return;
  1378. }
  1379. prev = s->depth ? s->trace[s->depth - 1] : s->pc;
  1380. ptr = p->stop;
  1381. /*
  1382. * Traverse the stack backwards and add all unique continuation
  1383. * pointers to the buffer, up to the maximum stack trace size.
  1384. *
  1385. * Skip trace stack frames.
  1386. */
  1387. ASSERT(ptr >= STACK_TOP(p) && ptr <= STACK_START(p));
  1388. while (ptr < STACK_START(p) && depth > 0) {
  1389. if (is_CP(*ptr)) {
  1390. BeamInstr *cp = cp_val(*ptr);
  1391. if (cp == beam_exception_trace || cp == beam_return_trace) {
  1392. ptr += 3;
  1393. } else if (cp == beam_return_time_trace) {
  1394. ptr += 2;
  1395. } else if (cp == beam_return_to_trace) {
  1396. ptr += 1;
  1397. } else {
  1398. if (cp != prev) {
  1399. /* Record non-duplicates only */
  1400. prev = cp;
  1401. s->trace[s->depth++] = cp - 1;
  1402. depth--;
  1403. }
  1404. ptr++;
  1405. }
  1406. } else {
  1407. ptr++;
  1408. }
  1409. }
  1410. }
  1411. /*
  1412. * Quick-saving the stack trace in an internal form on the heap. Note
  1413. * that c_p->ftrace will point to a cons cell which holds the given args
  1414. * and the saved data (encoded as a bignum).
  1415. *
  1416. * There is an issue with line number information. Line number
  1417. * information is associated with the address *before* an operation
  1418. * that may fail or be stored stored on the stack. But continuation
  1419. * pointers point after its call instruction, not before. To avoid
  1420. * finding the wrong line number, we'll need to adjust them so that
  1421. * they point at the beginning of the call instruction or inside the
  1422. * call instruction. Since its impractical to point at the beginning,
  1423. * we'll do the simplest thing and decrement the continuation pointers
  1424. * by one.
  1425. *
  1426. * Here is an example of what can go wrong. Without the adjustment
  1427. * of continuation pointers, the call at line 42 below would seem to
  1428. * be at line 43:
  1429. *
  1430. * line 42
  1431. * call ...
  1432. * line 43
  1433. * gc_bif ...
  1434. *
  1435. * (It would be much better to put the arglist - when it exists - in the
  1436. * error value instead of in the actual trace; e.g. '{badarg, Args}'
  1437. * instead of using 'badarg' with Args in the trace. The arglist may
  1438. * contain very large values, and right now they will be kept alive as
  1439. * long as the stack trace is live. Preferably, the stack trace should
  1440. * always be small, so that it does not matter if it is long-lived.
  1441. * However, it is probably not possible to ever change the format of
  1442. * error terms.)
  1443. */
  1444. static void
  1445. save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg,
  1446. ErtsCodeMFA *bif_mfa, Eterm args) {
  1447. struct StackTrace* s;
  1448. int sz;
  1449. int depth = erts_backtrace_depth; /* max depth (never negative) */
  1450. if (depth > 0) {
  1451. /* There will always be a current function */
  1452. depth --;
  1453. }
  1454. /* Create a container for the exception data */
  1455. sz = (offsetof(struct StackTrace, trace) + sizeof(BeamInstr *)*depth
  1456. + sizeof(Eterm) - 1) / sizeof(Eterm);
  1457. s = (struct StackTrace *) HAlloc(c_p, 1 + sz);
  1458. /* The following fields are inside the bignum */
  1459. s->header = make_pos_bignum_header(sz);
  1460. s->freason = c_p->freason;
  1461. s->depth = 0;
  1462. /*
  1463. * If the failure was in a BIF other than 'error/1', 'error/2',
  1464. * 'exit/1' or 'throw/1', save BIF-MFA and save the argument
  1465. * registers by consing up an arglist.
  1466. */
  1467. if (bif_mfa) {
  1468. if (bif_mfa->module == am_erlang) {
  1469. switch (bif_mfa->function) {
  1470. case am_error:
  1471. if (bif_mfa->arity == 1 || bif_mfa->arity == 2)
  1472. goto non_bif_stacktrace;
  1473. break;
  1474. case am_exit:
  1475. if (bif_mfa->arity == 1)
  1476. goto non_bif_stacktrace;
  1477. break;
  1478. case am_throw:
  1479. if (bif_mfa->arity == 1)
  1480. goto non_bif_stacktrace;
  1481. break;
  1482. default:
  1483. break;
  1484. }
  1485. }
  1486. s->current = bif_mfa;
  1487. /* Save first stack entry */
  1488. ASSERT(pc);
  1489. if (depth > 0) {
  1490. s->trace[s->depth++] = pc;
  1491. depth--;
  1492. }
  1493. s->pc = NULL;
  1494. args = make_arglist(c_p, reg, bif_mfa->arity); /* Overwrite CAR(c_p->ftrace) */
  1495. } else {
  1496. non_bif_stacktrace:
  1497. s->current = c_p->current;
  1498. /*
  1499. * For a function_clause error, the arguments are in the beam
  1500. * registers and c_p->current is set.
  1501. */
  1502. if ( (GET_EXC_INDEX(s->freason)) ==
  1503. (GET_EXC_INDEX(EXC_FUNCTION_CLAUSE)) ) {
  1504. int a;
  1505. ASSERT(s->current);
  1506. a = s->current->arity;
  1507. args = make_arglist(c_p, reg, a); /* Overwrite CAR(c_p->ftrace) */
  1508. s->pc = NULL; /* Ignore pc */
  1509. } else {
  1510. s->pc = pc;
  1511. }
  1512. }
  1513. /* Package args and stack trace */
  1514. {
  1515. Eterm *hp;
  1516. hp = HAlloc(c_p, 2);
  1517. c_p->ftrace = CONS(hp, args, make_big((Eterm *) s));
  1518. }
  1519. /* Save the actual stack trace */
  1520. gather_stacktrace(c_p, s, depth);
  1521. }
  1522. void
  1523. erts_save_stacktrace(Process* p, struct StackTrace* s, int depth)
  1524. {
  1525. gather_stacktrace(p, s, depth);
  1526. }
  1527. /*
  1528. * Getting the relevant fields from the term pointed to by ftrace
  1529. */
  1530. static struct StackTrace *get_trace_from_exc(Eterm exc) {
  1531. if (exc == NIL) {
  1532. return NULL;
  1533. } else {
  1534. ASSERT(is_list(exc));
  1535. return (struct StackTrace *) big_val(CDR(list_val(exc)));
  1536. }
  1537. }
  1538. static Eterm get_args_from_exc(Eterm exc) {
  1539. if (exc == NIL) {
  1540. return NIL;
  1541. } else {
  1542. ASSERT(is_list(exc));
  1543. return CAR(list_val(exc));
  1544. }
  1545. }
  1546. static int is_raised_exc(Eterm exc) {
  1547. if (exc == NIL) {
  1548. return 0;
  1549. } else {
  1550. ASSERT(is_list(exc));
  1551. return bignum_header_is_neg(*big_val(CDR(list_val(exc))));
  1552. }
  1553. }
  1554. static Eterm *get_freason_ptr_from_exc(Eterm exc) {
  1555. static Eterm dummy_freason;
  1556. struct StackTrace* s;
  1557. if (exc == NIL) {
  1558. /*
  1559. * Is is not exactly clear when exc can be NIL. Probably only
  1560. * when the exception has been generated from native code.
  1561. * Return a pointer to an Eterm that can be safely written and
  1562. * ignored.
  1563. */
  1564. return &dummy_freason;
  1565. } else {
  1566. ASSERT(is_list(exc));
  1567. s = (struct StackTrace *) big_val(CDR(list_val(exc)));
  1568. return &s->freason;
  1569. }
  1570. }
  1571. /*
  1572. * Creating a list with the argument registers
  1573. */
  1574. static Eterm
  1575. make_arglist(Process* c_p, Eterm* reg, int a) {
  1576. Eterm args = NIL;
  1577. Eterm* hp = HAlloc(c_p, 2*a);
  1578. while (a > 0) {
  1579. args = CONS(hp, reg[a-1], args);
  1580. hp += 2;
  1581. a--;
  1582. }
  1583. return args;
  1584. }
  1585. /*
  1586. * Building a symbolic representation of a saved stack trace. Note that
  1587. * the exception object 'exc', unless NIL, points to a cons cell which
  1588. * holds the given args and the quick-saved data (encoded as a bignum).
  1589. *
  1590. * If the bignum is negative, the given args is a complete stacktrace.
  1591. */
  1592. Eterm
  1593. build_stacktrace(Process* c_p, Eterm exc) {
  1594. struct StackTrace* s;
  1595. Eterm args;
  1596. int depth;
  1597. FunctionInfo fi;
  1598. FunctionInfo* stk;
  1599. FunctionInfo* stkp;
  1600. Eterm res = NIL;
  1601. Uint heap_size;
  1602. Eterm* hp;
  1603. Eterm mfa;
  1604. int i;
  1605. if (! (s = get_trace_from_exc(exc))) {
  1606. return NIL;
  1607. }
  1608. #ifdef HIPE
  1609. if (s->freason & EXF_NATIVE) {
  1610. return hipe_build_stacktrace(c_p, s);
  1611. }
  1612. #endif
  1613. if (is_raised_exc(exc)) {
  1614. return get_args_from_exc(exc);
  1615. }
  1616. /*
  1617. * Find the current function. If the saved s->pc is null, then the
  1618. * saved s->current should already contain the proper value.
  1619. */
  1620. if (s->pc != NULL) {
  1621. erts_lookup_function_info(&fi, s->pc, 1);
  1622. } else if (GET_EXC_INDEX(s->freason) ==
  1623. GET_EXC_INDEX(EXC_FUNCTION_CLAUSE)) {
  1624. erts_lookup_function_info(&fi, erts_codemfa_to_code(s->current), 1);
  1625. } else {
  1626. erts_set_current_function(&fi, s->current);
  1627. }
  1628. depth = s->depth;
  1629. /*
  1630. * If fi.current is still NULL, and we have no
  1631. * stack at all, default to the initial function
  1632. * (e.g. spawn_link(erlang, abs, [1])).
  1633. */
  1634. if (fi.mfa == NULL) {
  1635. if (depth <= 0)
  1636. erts_set_current_function(&fi, &c_p->u.initial);
  1637. args = am_true; /* Just in case */
  1638. } else {
  1639. args = get_args_from_exc(exc);
  1640. }
  1641. /*
  1642. * Look up all saved continuation pointers and calculate
  1643. * needed heap space.
  1644. */
  1645. stk = stkp = (FunctionInfo *) erts_alloc(ERTS_ALC_T_TMP,
  1646. depth*sizeof(FunctionInfo));
  1647. heap_size = fi.mfa ? fi.needed + 2 : 0;
  1648. for (i = 0; i < depth; i++) {
  1649. erts_lookup_function_info(stkp, s->trace[i], 1);
  1650. if (stkp->mfa) {
  1651. heap_size += stkp->needed + 2;
  1652. stkp++;
  1653. }
  1654. }
  1655. /*
  1656. * Allocate heap space and build the stacktrace.
  1657. */
  1658. hp = HAlloc(c_p, heap_size);
  1659. while (stkp > stk) {
  1660. stkp--;
  1661. hp = erts_build_mfa_item(stkp, hp, am_true, &mfa);
  1662. res = CONS(hp, mfa, res);
  1663. hp += 2;
  1664. }
  1665. if (fi.mfa) {
  1666. hp = erts_build_mfa_item(&fi, hp, args, &mfa);
  1667. res = CONS(hp, mfa, res);
  1668. }
  1669. erts_free(ERTS_ALC_T_TMP, (void *) stk);
  1670. return res;
  1671. }
  1672. static BeamInstr*
  1673. call_error_handler(Process* p, ErtsCodeMFA* mfa, Eterm* reg, Eterm func)
  1674. {
  1675. Eterm* hp;
  1676. Export* ep;
  1677. int arity;
  1678. Eterm args;
  1679. Uint sz;
  1680. int i;
  1681. DBG_TRACE_MFA_P(mfa, "call_error_handler");
  1682. /*
  1683. * Search for the error_handler module.
  1684. */
  1685. ep = erts_find_function(erts_proc_get_error_handler(p), func, 3,
  1686. erts_active_code_ix());
  1687. if (ep == NULL) { /* No error handler */
  1688. p->current = mfa;
  1689. p->freason = EXC_UNDEF;
  1690. return 0;
  1691. }
  1692. /*
  1693. * Create a list with all arguments in the x registers.
  1694. */
  1695. arity = mfa->arity;
  1696. sz = 2 * arity;
  1697. if (HeapWordsLeft(p) < sz) {
  1698. erts_garbage_collect(p, sz, reg, arity);
  1699. }
  1700. hp = HEAP_TOP(p);
  1701. HEAP_TOP(p) += sz;
  1702. args = NIL;
  1703. for (i = arity-1; i >= 0; i--) {
  1704. args = CONS(hp, reg[i], args);
  1705. hp += 2;
  1706. }
  1707. /*
  1708. * Set up registers for call to error_handler:<func>/3.
  1709. */
  1710. reg[0] = mfa->module;
  1711. reg[1] = mfa->function;
  1712. reg[2] = args;
  1713. return ep->addressv[erts_active_code_ix()];
  1714. }
  1715. static Export*
  1716. apply_setup_error_handler(Process* p, Eterm module, Eterm function, Uint arity, Eterm* reg)
  1717. {
  1718. Export* ep;
  1719. /*
  1720. * Find the export table index for the error handler. Return NULL if
  1721. * there is no error handler module.
  1722. */
  1723. if ((ep = erts_active_export_entry(erts_proc_get_error_handler(p),
  1724. am_undefined_function, 3)) == NULL) {
  1725. return NULL;
  1726. } else {
  1727. int i;
  1728. Uint sz = 2*arity;
  1729. Eterm* hp;
  1730. Eterm args = NIL;
  1731. /*
  1732. * Always copy args from registers to a new list; this ensures
  1733. * that we have the same behaviour whether or not this was
  1734. * called from apply or fixed_apply (any additional last
  1735. * THIS-argument will be included, assuming that arity has been
  1736. * properly adjusted).
  1737. */
  1738. if (HeapWordsLeft(p) < sz) {
  1739. erts_garbage_collect(p, sz, reg, arity);
  1740. }
  1741. hp = HEAP_TOP(p);
  1742. HEAP_TOP(p) += sz;
  1743. for (i = arity-1; i >= 0; i--) {
  1744. args = CONS(hp, reg[i], args);
  1745. hp += 2;
  1746. }
  1747. reg[0] = module;
  1748. reg[1] = function;
  1749. reg[2] = args;
  1750. }
  1751. return ep;
  1752. }
  1753. static ERTS_INLINE void
  1754. apply_bif_error_adjustment(Process *p, Export *ep,
  1755. Eterm *reg, Uint arity,
  1756. BeamInstr *I, Uint stack_offset)
  1757. {
  1758. int apply_only;
  1759. Uint need;
  1760. need = stack_offset /* bytes */ / sizeof(Eterm);
  1761. apply_only = stack_offset == 0;
  1762. /*
  1763. * I is only set when the apply is a tail call, i.e.,
  1764. * from the instructions i_apply_only, i_apply_last_P,
  1765. * and apply_last_IP.
  1766. */
  1767. if (!(I && (ep->bif_number == BIF_error_1 ||
  1768. ep->bif_number == BIF_error_2 ||
  1769. ep->bif_number == BIF_exit_1 ||
  1770. ep->bif_number == BIF_throw_1))) {
  1771. return;
  1772. }
  1773. /*
  1774. * We are about to tail apply one of the BIFs erlang:error/1,
  1775. * erlang:error/2, erlang:exit/1, or erlang:throw/1. Error handling of
  1776. * these BIFs is special!
  1777. *
  1778. * We need the topmost continuation pointer to point into the calling
  1779. * function when handling the error after the BIF has been applied. This in
  1780. * order to get the topmost stackframe correct.
  1781. *
  1782. * Note that these BIFs will unconditionally cause an exception to be
  1783. * raised. That is, our modifications of the stack will be corrected by the
  1784. * error handling code.
  1785. */
  1786. if (need == 0) {
  1787. need = 1; /* i_apply_only */
  1788. }
  1789. if (p->stop - p->htop < need) {
  1790. erts_garbage_collect(p, (int) need, reg, arity+1);
  1791. }
  1792. if (apply_only) {
  1793. /*
  1794. * Called from the i_apply_only instruction.
  1795. *
  1796. * Push the continuation pointer for the current function to the stack.
  1797. */
  1798. p->stop -= need;
  1799. p->stop[0] = make_cp(I);
  1800. } else {
  1801. /*
  1802. * Called from an i_apply_last_* instruction.
  1803. *
  1804. * The calling instruction will deallocate a stack frame of size
  1805. * 'stack_offset'.
  1806. *
  1807. * Push the continuation pointer for the current function to the stack,
  1808. * and then add a dummy stackframe for the i_apply_last* instruction
  1809. * to discard.
  1810. */
  1811. p->stop[0] = make_cp(I);
  1812. p->stop -= need;
  1813. }
  1814. }
  1815. static BeamInstr*
  1816. apply(Process* p, Eterm* reg, BeamInstr *I, Uint stack_offset)
  1817. {
  1818. int arity;
  1819. Export* ep;
  1820. Eterm tmp;
  1821. Eterm module = reg[0];
  1822. Eterm function = reg[1];
  1823. Eterm args = reg[2];
  1824. /*
  1825. * Check the arguments which should be of the form apply(Module,
  1826. * Function, Arguments) where Function is an atom and
  1827. * Arguments is an arity long list of terms.
  1828. */
  1829. if (is_not_atom(function)) {
  1830. /*
  1831. * No need to test args here -- done below.
  1832. */
  1833. error:
  1834. p->freason = BADARG;
  1835. error2:
  1836. reg[0] = module;
  1837. reg[1] = function;
  1838. reg[2] = args;
  1839. return 0;
  1840. }
  1841. while (1) {
  1842. Eterm m, f, a;
  1843. if (is_not_atom(module)) goto error;
  1844. if (module != am_erlang || function != am_apply)
  1845. break;
  1846. /* Adjust for multiple apply of apply/3... */
  1847. a = args;
  1848. if (is_list(a)) {
  1849. Eterm *consp = list_val(a);
  1850. m = CAR(consp);
  1851. a = CDR(consp);
  1852. if (is_list(a)) {
  1853. consp = list_val(a);
  1854. f = CAR(consp);
  1855. a = CDR(consp);
  1856. if (is_list(a)) {
  1857. consp = list_val(a);
  1858. a = CAR(consp);
  1859. if (is_nil(CDR(consp))) {
  1860. /* erlang:apply/3 */
  1861. module = m;
  1862. function = f;
  1863. args = a;
  1864. if (is_not_atom(f))
  1865. goto error;
  1866. continue;
  1867. }
  1868. }
  1869. }
  1870. }
  1871. break; /* != erlang:apply/3 */
  1872. }
  1873. /*
  1874. * Walk down the 3rd parameter of apply (the argument list) and copy
  1875. * the parameters to the x registers (reg[]).
  1876. */
  1877. tmp = args;
  1878. arity = 0;
  1879. while (is_list(tmp)) {
  1880. if (arity < (MAX_REG - 1)) {
  1881. reg[arity++] = CAR(list_val(tmp));
  1882. tmp = CDR(list_val(tmp));
  1883. } else {
  1884. p->freason = SYSTEM_LIMIT;
  1885. goto error2;
  1886. }
  1887. }
  1888. if (is_not_nil(tmp)) { /* Must be well-formed list */
  1889. goto error;
  1890. }
  1891. /*
  1892. * Get the index into the export table, or failing that the export
  1893. * entry for the error handler.
  1894. *
  1895. * Note: All BIFs have export entries; thus, no special case is needed.
  1896. */
  1897. if ((ep = erts_active_export_entry(module, function, arity)) == NULL) {
  1898. if ((ep = apply_setup_error_handler(p, module, function, arity, reg)) == NULL) goto error;
  1899. } else if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) {
  1900. save_calls(p, ep);
  1901. }
  1902. apply_bif_error_adjustment(p, ep, reg, arity, I, stack_offset);
  1903. DTRACE_GLOBAL_CALL_FROM_EXPORT(p, ep);
  1904. return ep->addressv[erts_active_code_ix()];
  1905. }
  1906. static BeamInstr*
  1907. fixed_apply(Process* p, Eterm* reg, Uint arity,
  1908. BeamInstr *I, Uint stack_offset)
  1909. {
  1910. Export* ep;
  1911. Eterm module;
  1912. Eterm function;
  1913. module = reg[arity]; /* The THIS pointer already in place */
  1914. function = reg[arity+1];
  1915. if (is_not_atom(function)) {
  1916. Eterm bad_args;
  1917. error:
  1918. bad_args = make_arglist(p, reg, arity);
  1919. p->freason = BADARG;
  1920. reg[0] = module;
  1921. reg[1] = function;
  1922. reg[2] = bad_args;
  1923. return 0;
  1924. }
  1925. if (is_not_atom(module)) goto error;
  1926. /* Handle apply of apply/3... */
  1927. if (module == am_erlang && function == am_apply && arity == 3) {
  1928. return apply(p, reg, I, stack_offset);
  1929. }
  1930. /*
  1931. * Get the index into the export table, or failing that the export
  1932. * entry for the error handler module.
  1933. *
  1934. * Note: All BIFs have export entries; thus, no special case is needed.
  1935. */
  1936. if ((ep = erts_active_export_entry(module, function, arity)) == NULL) {
  1937. if ((ep = apply_setup_error_handler(p, module, function, arity, reg)) == NULL)
  1938. goto error;
  1939. } else if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) {
  1940. save_calls(p, ep);
  1941. }
  1942. apply_bif_error_adjustment(p, ep, reg, arity, I, stack_offset);
  1943. DTRACE_GLOBAL_CALL_FROM_EXPORT(p, ep);
  1944. return ep->addressv[erts_active_code_ix()];
  1945. }
  1946. int
  1947. erts_hibernate(Process* c_p, Eterm* reg)
  1948. {
  1949. int arity;
  1950. Eterm tmp;
  1951. Eterm module = reg[0];
  1952. Eterm function = reg[1];
  1953. Eterm args = reg[2];
  1954. if (is_not_atom(module) || is_not_atom(function)) {
  1955. /*
  1956. * No need to test args here -- done below.
  1957. */
  1958. error:
  1959. c_p->freason = BADARG;
  1960. error2:
  1961. reg[0] = module;
  1962. reg[1] = function;
  1963. reg[2] = args;
  1964. return 0;
  1965. }
  1966. arity = 0;
  1967. tmp = args;
  1968. while (is_list(tmp)) {
  1969. if (arity < MAX_REG) {
  1970. tmp = CDR(list_val(tmp));
  1971. arity++;
  1972. } else {
  1973. c_p->freason = SYSTEM_LIMIT;
  1974. goto error2;
  1975. }
  1976. }
  1977. if (is_not_nil(tmp)) { /* Must be well-formed list */
  1978. goto error;
  1979. }
  1980. /*
  1981. * At this point, arguments are known to be good.
  1982. */
  1983. if (c_p->arg_reg != c_p->def_arg_reg) {
  1984. /* Save some memory */
  1985. erts_free(ERTS_ALC_T_ARG_REG, c_p->arg_reg);
  1986. c_p->arg_reg = c_p->def_arg_reg;
  1987. c_p->max_arg_reg = sizeof(c_p->def_arg_reg)/sizeof(c_p->def_arg_reg[0]);
  1988. }
  1989. #ifdef USE_VM_PROBES
  1990. if (DTRACE_ENABLED(process_hibernate)) {
  1991. ErtsCodeMFA cmfa = { module, function, arity};
  1992. DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE);
  1993. DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE);
  1994. dtrace_fun_decode(c_p, &cmfa, process_name, mfa_buf);
  1995. DTRACE2(process_hibernate, process_name, mfa_buf);
  1996. }
  1997. #endif
  1998. /*
  1999. * Arrange for the process to be resumed at the given MFA with
  2000. * the stack cleared.
  2001. */
  2002. c_p->arity = 3;
  2003. c_p->arg_reg[0] = module;
  2004. c_p->arg_reg[1] = function;
  2005. c_p->arg_reg[2] = args;
  2006. c_p->stop = c_p->hend - 1; /* Keep first continuation pointer */
  2007. ASSERT(c_p->stop[0] == make_cp(beam_apply+1));
  2008. c_p->catches = 0;
  2009. c_p->i = beam_apply;
  2010. /*
  2011. * If there are no waiting messages, garbage collect and
  2012. * shrink the heap.
  2013. */
  2014. erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
  2015. if (!erts_proc_sig_fetch(c_p)) {
  2016. erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
  2017. c_p->fvalue = NIL;
  2018. PROCESS_MAIN_CHK_LOCKS(c_p);
  2019. erts_garbage_collect_hibernate(c_p);
  2020. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  2021. PROCESS_MAIN_CHK_LOCKS(c_p);
  2022. erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
  2023. if (!erts_proc_sig_fetch(c_p))
  2024. erts_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_ACTIVE);
  2025. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  2026. }
  2027. erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
  2028. c_p->current = &bif_trap_export[BIF_hibernate_3].info.mfa;
  2029. c_p->flags |= F_HIBERNATE_SCHED; /* Needed also when woken! */
  2030. return 1;
  2031. }
  2032. static BeamInstr*
  2033. call_fun(Process* p, /* Current process. */
  2034. int arity, /* Number of arguments for Fun. */
  2035. Eterm* reg, /* Contents of registers. */
  2036. Eterm args) /* THE_NON_VALUE or pre-built list of arguments. */
  2037. {
  2038. Eterm fun = reg[arity];
  2039. Eterm hdr;
  2040. int i;
  2041. Eterm* hp;
  2042. if (!is_boxed(fun)) {
  2043. goto badfun;
  2044. }
  2045. hdr = *boxed_val(fun);
  2046. if (is_fun_header(hdr)) {
  2047. ErlFunThing* funp = (ErlFunThing *) fun_val(fun);
  2048. ErlFunEntry* fe = funp->fe;
  2049. BeamInstr* code_ptr = fe->address;
  2050. Eterm* var_ptr;
  2051. unsigned num_free = funp->num_free;
  2052. ErtsCodeMFA *mfa = erts_code_to_codemfa(code_ptr);
  2053. int actual_arity = mfa->arity;
  2054. if (actual_arity == arity+num_free) {
  2055. DTRACE_LOCAL_CALL(p, mfa);
  2056. if (num_free == 0) {
  2057. return code_ptr;
  2058. } else {
  2059. var_ptr = funp->env;
  2060. reg += arity;
  2061. i = 0;
  2062. do {
  2063. reg[i] = var_ptr[i];
  2064. i++;
  2065. } while (i < num_free);
  2066. reg[i] = fun;
  2067. return code_ptr;
  2068. }
  2069. return code_ptr;
  2070. } else {
  2071. /*
  2072. * Something wrong here. First build a list of the arguments.
  2073. */
  2074. if (is_non_value(args)) {
  2075. Uint sz = 2 * arity;
  2076. args = NIL;
  2077. if (HeapWordsLeft(p) < sz) {
  2078. erts_garbage_collect(p, sz, reg, arity+1);
  2079. fun = reg[arity];
  2080. }
  2081. hp = HEAP_TOP(p);
  2082. HEAP_TOP(p) += sz;
  2083. for (i = arity-1; i >= 0; i--) {
  2084. args = CONS(hp, reg[i], args);
  2085. hp += 2;
  2086. }
  2087. }
  2088. if (actual_arity >= 0) {
  2089. /*
  2090. * There is a fun defined, but the call has the wrong arity.
  2091. */
  2092. hp = HAlloc(p, 3);
  2093. p->freason = EXC_BADARITY;
  2094. p->fvalue = TUPLE2(hp, fun, args);
  2095. return NULL;
  2096. } else {
  2097. Export* ep;
  2098. Module* modp;
  2099. Eterm module;
  2100. ErtsCodeIndex code_ix = erts_active_code_ix();
  2101. /*
  2102. * No arity. There is no module loaded that defines the fun,
  2103. * either because the fun is newly created from the external
  2104. * representation (the module has never been loaded),
  2105. * or the module defining the fun has been unloaded.
  2106. */
  2107. module = fe->module;
  2108. ERTS_THR_READ_MEMORY_BARRIER;
  2109. if (fe->pend_purge_address) {
  2110. /*
  2111. * The system is currently trying to purge the
  2112. * module containing this fun. Suspend the process
  2113. * and let it try again when the purge operation is
  2114. * done (may succeed or not).
  2115. */
  2116. ep = erts_suspend_process_on_pending_purge_lambda(p, fe);
  2117. ASSERT(ep);
  2118. }
  2119. else {
  2120. if ((modp = erts_get_module(module, code_ix)) != NULL
  2121. && modp->curr.code_hdr != NULL) {
  2122. /*
  2123. * There is a module loaded, but obviously the fun is not
  2124. * defined in it. We must not call the error_handler
  2125. * (or we will get into an infinite loop).
  2126. */
  2127. goto badfun;
  2128. }
  2129. /*
  2130. * No current code for this module. Call the error_handler module
  2131. * to attempt loading the module.
  2132. */
  2133. ep = erts_find_function(erts_proc_get_error_handler(p),
  2134. am_undefined_lambda, 3, code_ix);
  2135. if (ep == NULL) { /* No error handler */
  2136. p->current = NULL;
  2137. p->freason = EXC_UNDEF;
  2138. return NULL;
  2139. }
  2140. }
  2141. reg[0] = module;
  2142. reg[1] = fun;
  2143. reg[2] = args;
  2144. reg[3] = NIL;
  2145. return ep->addressv[code_ix];
  2146. }
  2147. }
  2148. } else if (is_export_header(hdr)) {
  2149. Export *ep;
  2150. int actual_arity;
  2151. ep = *((Export **) (export_val(fun) + 1));
  2152. actual_arity = ep->info.mfa.arity;
  2153. if (arity == actual_arity) {
  2154. DTRACE_GLOBAL_CALL(p, &ep->info.mfa);
  2155. return ep->addressv[erts_active_code_ix()];
  2156. } else {
  2157. /*
  2158. * Wrong arity. First build a list of the arguments.
  2159. */
  2160. if (is_non_value(args)) {
  2161. args = NIL;
  2162. hp = HAlloc(p, arity*2);
  2163. for (i = arity-1; i >= 0; i--) {
  2164. args = CONS(hp, reg[i], args);
  2165. hp += 2;
  2166. }
  2167. }
  2168. hp = HAlloc(p, 3);
  2169. p->freason = EXC_BADARITY;
  2170. p->fvalue = TUPLE2(hp, fun, args);
  2171. return NULL;
  2172. }
  2173. } else {
  2174. badfun:
  2175. p->current = NULL;
  2176. p->freason = EXC_BADFUN;
  2177. p->fvalue = fun;
  2178. return NULL;
  2179. }
  2180. }
  2181. static BeamInstr*
  2182. apply_fun(Process* p, Eterm fun, Eterm args, Eterm* reg)
  2183. {
  2184. int arity;
  2185. Eterm tmp;
  2186. /*
  2187. * Walk down the 3rd parameter of apply (the argument list) and copy
  2188. * the parameters to the x registers (reg[]).
  2189. */
  2190. tmp = args;
  2191. arity = 0;
  2192. while (is_list(tmp)) {
  2193. if (arity < MAX_REG-1) {
  2194. reg[arity++] = CAR(list_val(tmp));
  2195. tmp = CDR(list_val(tmp));
  2196. } else {
  2197. p->freason = SYSTEM_LIMIT;
  2198. return NULL;
  2199. }
  2200. }
  2201. if (is_not_nil(tmp)) { /* Must be well-formed list */
  2202. p->freason = EXC_BADARG;
  2203. return NULL;
  2204. }
  2205. reg[arity] = fun;
  2206. return call_fun(p, arity, reg, args);
  2207. }
  2208. static Eterm
  2209. new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free)
  2210. {
  2211. unsigned needed = ERL_FUN_SIZE + num_free;
  2212. ErlFunThing* funp;
  2213. Eterm* hp;
  2214. int i;
  2215. if (HEAP_LIMIT(p) - HEAP_TOP(p) <= needed) {
  2216. PROCESS_MAIN_CHK_LOCKS(p);
  2217. erts_garbage_collect(p, needed, reg, num_free);
  2218. ERTS_VERIFY_UNUSED_TEMP_ALLOC(p);
  2219. PROCESS_MAIN_CHK_LOCKS(p);
  2220. }
  2221. hp = p->htop;
  2222. p->htop = hp + needed;
  2223. funp = (ErlFunThing *) hp;
  2224. hp = funp->env;
  2225. erts_refc_inc(&fe->refc, 2);
  2226. funp->thing_word = HEADER_FUN;
  2227. funp->next = MSO(p).first;
  2228. MSO(p).first = (struct erl_off_heap_header*) funp;
  2229. funp->fe = fe;
  2230. funp->num_free = num_free;
  2231. funp->creator = p->common.id;
  2232. funp->arity = (int)fe->address[-1] - num_free;
  2233. for (i = 0; i < num_free; i++) {
  2234. *hp++ = reg[i];
  2235. }
  2236. return make_fun(funp);
  2237. }
  2238. static int
  2239. is_function2(Eterm Term, Uint arity)
  2240. {
  2241. if (is_fun(Term)) {
  2242. ErlFunThing* funp = (ErlFunThing *) fun_val(Term);
  2243. return funp->arity == arity;
  2244. } else if (is_export(Term)) {
  2245. Export* exp = (Export *) (export_val(Term)[1]);
  2246. return exp->info.mfa.arity == arity;
  2247. }
  2248. return 0;
  2249. }
  2250. static Eterm get_map_element(Eterm map, Eterm key)
  2251. {
  2252. Uint32 hx;
  2253. const Eterm *vs;
  2254. if (is_flatmap(map)) {
  2255. flatmap_t *mp;
  2256. Eterm *ks;
  2257. Uint i;
  2258. Uint n;
  2259. mp = (flatmap_t *)flatmap_val(map);
  2260. ks = flatmap_get_keys(mp);
  2261. vs = flatmap_get_values(mp);
  2262. n = flatmap_get_size(mp);
  2263. if (is_immed(key)) {
  2264. for (i = 0; i < n; i++) {
  2265. if (ks[i] == key) {
  2266. return vs[i];
  2267. }
  2268. }
  2269. } else {
  2270. for (i = 0; i < n; i++) {
  2271. if (EQ(ks[i], key)) {
  2272. return vs[i];
  2273. }
  2274. }
  2275. }
  2276. return THE_NON_VALUE;
  2277. }
  2278. ASSERT(is_hashmap(map));
  2279. hx = hashmap_make_hash(key);
  2280. vs = erts_hashmap_get(hx,key,map);
  2281. return vs ? *vs : THE_NON_VALUE;
  2282. }
  2283. static Eterm get_map_element_hash(Eterm map, Eterm key, Uint32 hx)
  2284. {
  2285. const Eterm *vs;
  2286. if (is_flatmap(map)) {
  2287. flatmap_t *mp;
  2288. Eterm *ks;
  2289. Uint i;
  2290. Uint n;
  2291. mp = (flatmap_t *)flatmap_val(map);
  2292. ks = flatmap_get_keys(mp);
  2293. vs = flatmap_get_values(mp);
  2294. n = flatmap_get_size(mp);
  2295. if (is_immed(key)) {
  2296. for (i = 0; i < n; i++) {
  2297. if (ks[i] == key) {
  2298. return vs[i];
  2299. }
  2300. }
  2301. } else {
  2302. for (i = 0; i < n; i++) {
  2303. if (EQ(ks[i], key)) {
  2304. return vs[i];
  2305. }
  2306. }
  2307. }
  2308. return THE_NON_VALUE;
  2309. }
  2310. ASSERT(is_hashmap(map));
  2311. ASSERT(hx == hashmap_make_hash(key));
  2312. vs = erts_hashmap_get(hx, key, map);
  2313. return vs ? *vs : THE_NON_VALUE;
  2314. }
  2315. #define GET_TERM(term, dest) \
  2316. do { \
  2317. Eterm src = (Eterm)(term); \
  2318. switch (loader_tag(src)) { \
  2319. case LOADER_X_REG: \
  2320. dest = x(loader_x_reg_index(src)); \
  2321. break; \
  2322. case LOADER_Y_REG: \
  2323. dest = y(loader_y_reg_index(src)); \
  2324. break; \
  2325. default: \
  2326. dest = src; \
  2327. break; \
  2328. } \
  2329. } while(0)
  2330. static Eterm
  2331. erts_gc_new_map(Process* p, Eterm* reg, Uint live, Uint n, BeamInstr* ptr)
  2332. {
  2333. Uint i;
  2334. Uint need = n + 1 /* hdr */ + 1 /*size*/ + 1 /* ptr */ + 1 /* arity */;
  2335. Eterm keys;
  2336. Eterm *mhp,*thp;
  2337. Eterm *E;
  2338. flatmap_t *mp;
  2339. ErtsHeapFactory factory;
  2340. if (n > 2*MAP_SMALL_MAP_LIMIT) {
  2341. Eterm res;
  2342. if (HeapWordsLeft(p) < n) {
  2343. erts_garbage_collect(p, n, reg, live);
  2344. }
  2345. mhp = p->htop;
  2346. thp = p->htop;
  2347. E = p->stop;
  2348. for (i = 0; i < n/2; i++) {
  2349. GET_TERM(*ptr++, *mhp++);
  2350. GET_TERM(*ptr++, *mhp++);
  2351. }
  2352. p->htop = mhp;
  2353. erts_factory_proc_init(&factory, p);
  2354. res = erts_hashmap_from_array(&factory, thp, n/2, 0);
  2355. erts_factory_close(&factory);
  2356. return res;
  2357. }
  2358. if (HeapWordsLeft(p) < need) {
  2359. erts_garbage_collect(p, need, reg, live);
  2360. }
  2361. thp = p->htop;
  2362. mhp = thp + 1 + n/2;
  2363. E = p->stop;
  2364. keys = make_tuple(thp);
  2365. *thp++ = make_arityval(n/2);
  2366. mp = (flatmap_t *)mhp; mhp += MAP_HEADER_FLATMAP_SZ;
  2367. mp->thing_word = MAP_HEADER_FLATMAP;
  2368. mp->size = n/2;
  2369. mp->keys = keys;
  2370. for (i = 0; i < n/2; i++) {
  2371. GET_TERM(*ptr++, *thp++);
  2372. GET_TERM(*ptr++, *mhp++);
  2373. }
  2374. p->htop = mhp;
  2375. return make_flatmap(mp);
  2376. }
  2377. static Eterm
  2378. erts_gc_new_small_map_lit(Process* p, Eterm* reg, Eterm keys_literal,
  2379. Uint live, BeamInstr* ptr)
  2380. {
  2381. Eterm* keys = tuple_val(keys_literal);
  2382. Uint n = arityval(*keys);
  2383. Uint need = n + 1 /* hdr */ + 1 /*size*/ + 1 /* ptr */ + 1 /* arity */;
  2384. Uint i;
  2385. flatmap_t *mp;
  2386. Eterm *mhp;
  2387. Eterm *E;
  2388. ASSERT(n <= MAP_SMALL_MAP_LIMIT);
  2389. if (HeapWordsLeft(p) < need) {
  2390. erts_garbage_collect(p, need, reg, live);
  2391. }
  2392. mhp = p->htop;
  2393. E = p->stop;
  2394. mp = (flatmap_t *)mhp; mhp += MAP_HEADER_FLATMAP_SZ;
  2395. mp->thing_word = MAP_HEADER_FLATMAP;
  2396. mp->size = n;
  2397. mp->keys = keys_literal;
  2398. for (i = 0; i < n; i++) {
  2399. GET_TERM(*ptr++, *mhp++);
  2400. }
  2401. p->htop = mhp;
  2402. return make_flatmap(mp);
  2403. }
  2404. static Eterm
  2405. erts_gc_update_map_assoc(Process* p, Eterm* reg, Uint live,
  2406. Uint n, BeamInstr* new_p)
  2407. {
  2408. Uint num_old;
  2409. Uint num_updates;
  2410. Uint need;
  2411. flatmap_t *old_mp, *mp;
  2412. Eterm res;
  2413. Eterm* hp;
  2414. Eterm* E;
  2415. Eterm* old_keys;
  2416. Eterm* old_vals;
  2417. Eterm new_key;
  2418. Eterm* kp;
  2419. Eterm map;
  2420. num_updates = n / 2;
  2421. map = reg[live];
  2422. if (is_not_flatmap(map)) {
  2423. Uint32 hx;
  2424. Eterm val;
  2425. ASSERT(is_hashmap(map));
  2426. res = map;
  2427. E = p->stop;
  2428. while(num_updates--) {
  2429. /* assoc can't fail */
  2430. GET_TERM(new_p[0], new_key);
  2431. GET_TERM(new_p[1], val);
  2432. hx = hashmap_make_hash(new_key);
  2433. res = erts_hashmap_insert(p, hx, new_key, val, res, 0);
  2434. new_p += 2;
  2435. }
  2436. return res;
  2437. }
  2438. old_mp = (flatmap_t *) flatmap_val(map);
  2439. num_old = flatmap_get_size(old_mp);
  2440. /*
  2441. * If the old map is empty, create a new map.
  2442. */
  2443. if (num_old == 0) {
  2444. return erts_gc_new_map(p, reg, live, n, new_p);
  2445. }
  2446. /*
  2447. * Allocate heap space for the worst case (i.e. all keys in the
  2448. * update list are new).
  2449. */
  2450. need = 2*(num_old+num_updates) + 1 + MAP_HEADER_FLATMAP_SZ;
  2451. if (HeapWordsLeft(p) < need) {
  2452. erts_garbage_collect(p, need, reg, live+1);
  2453. map = reg[live];
  2454. old_mp = (flatmap_t *)flatmap_val(map);
  2455. }
  2456. /*
  2457. * Build the skeleton for the map, ready to be filled in.
  2458. *
  2459. * +-----------------------------------+
  2460. * | (Space for aritvyal for keys) | <-----------+
  2461. * +-----------------------------------+ |
  2462. * | (Space for key 1) | | <-- kp
  2463. * +-----------------------------------+ |
  2464. * . |
  2465. * . |
  2466. * . |
  2467. * +-----------------------------------+ |
  2468. * | (Space for last key) | |
  2469. * +-----------------------------------+ |
  2470. * | MAP_HEADER | |
  2471. * +-----------------------------------+ |
  2472. * | (Space for number of keys/values) | |
  2473. * +-----------------------------------+ |
  2474. * | Boxed tuple pointer >----------------+
  2475. * +-----------------------------------+
  2476. * | (Space for value 1) | <-- hp
  2477. * +-----------------------------------+
  2478. */
  2479. E = p->stop;
  2480. kp = p->htop + 1; /* Point to first key */
  2481. hp = kp + num_old + num_updates;
  2482. res = make_flatmap(hp);
  2483. mp = (flatmap_t *)hp;
  2484. hp += MAP_HEADER_FLATMAP_SZ;
  2485. mp->thing_word = MAP_HEADER_FLATMAP;
  2486. mp->keys = make_tuple(kp-1);
  2487. old_vals = flatmap_get_values(old_mp);
  2488. old_keys = flatmap_get_keys(old_mp);
  2489. GET_TERM(*new_p, new_key);
  2490. n = num_updates;
  2491. /*
  2492. * Fill in keys and values, until we run out of either updates
  2493. * or old values and keys.
  2494. */
  2495. for (;;) {
  2496. Eterm key;
  2497. Sint c;
  2498. ASSERT(kp < (Eterm *)mp);
  2499. key = *old_keys;
  2500. if ((c = CMP_TERM(key, new_key)) < 0) {
  2501. /* Copy old key and value */
  2502. *kp++ = key;
  2503. *hp++ = *old_vals;
  2504. old_keys++, old_vals++, num_old--;
  2505. } else { /* Replace or insert new */
  2506. GET_TERM(new_p[1], *hp++);
  2507. if (c > 0) { /* If new new key */
  2508. *kp++ = new_key;
  2509. } else { /* If replacement */
  2510. *kp++ = key;
  2511. old_keys++, old_vals++, num_old--;
  2512. }
  2513. n--;
  2514. if (n == 0) {
  2515. break;
  2516. } else {
  2517. new_p += 2;
  2518. GET_TERM(*new_p, new_key);
  2519. }
  2520. }
  2521. if (num_old == 0) {
  2522. break;
  2523. }
  2524. }
  2525. /*
  2526. * At this point, we have run out of either old keys and values,
  2527. * or the update list. In other words, at least of one n and
  2528. * num_old must be zero.
  2529. */
  2530. if (n > 0) {
  2531. /*
  2532. * All old keys and values have been copied, but there
  2533. * are still new keys and values in the update list that
  2534. * must be copied.
  2535. */
  2536. ASSERT(num_old == 0);
  2537. while (n-- > 0) {
  2538. GET_TERM(new_p[0], *kp++);
  2539. GET_TERM(new_p[1], *hp++);
  2540. new_p += 2;
  2541. }
  2542. } else {
  2543. /*
  2544. * All updates are now done. We may still have old
  2545. * keys and values that we must copy.
  2546. */
  2547. ASSERT(n == 0);
  2548. while (num_old-- > 0) {
  2549. ASSERT(kp < (Eterm *)mp);
  2550. *kp++ = *old_keys++;
  2551. *hp++ = *old_vals++;
  2552. }
  2553. }
  2554. /*
  2555. * Calculate how many values that are unused at the end of the
  2556. * key tuple and fill it out with a bignum header.
  2557. */
  2558. if ((n = (Eterm *)mp - kp) > 0) {
  2559. *kp = make_pos_bignum_header(n-1);
  2560. }
  2561. /*
  2562. * Fill in the size of the map in both the key tuple and in the map.
  2563. */
  2564. n = kp - p->htop - 1; /* Actual number of keys/values */
  2565. *p->htop = make_arityval(n);
  2566. p->htop = hp;
  2567. mp->size = n;
  2568. /* The expensive case, need to build a hashmap */
  2569. if (n > MAP_SMALL_MAP_LIMIT) {
  2570. ErtsHeapFactory factory;
  2571. erts_factory_proc_init(&factory, p);
  2572. res = erts_hashmap_from_ks_and_vs(&factory,flatmap_get_keys(mp),
  2573. flatmap_get_values(mp),n);
  2574. erts_factory_close(&factory);
  2575. }
  2576. return res;
  2577. }
  2578. /*
  2579. * Update values for keys that already exist in the map.
  2580. */
  2581. static Eterm
  2582. erts_gc_update_map_exact(Process* p, Eterm* reg, Uint live, Uint n, Eterm* new_p)
  2583. {
  2584. Uint i;
  2585. Uint num_old;
  2586. Uint need;
  2587. flatmap_t *old_mp, *mp;
  2588. Eterm res;
  2589. Eterm* old_hp;
  2590. Eterm* hp;
  2591. Eterm* E;
  2592. Eterm* old_keys;
  2593. Eterm* old_vals;
  2594. Eterm new_key;
  2595. Eterm map;
  2596. int changed = 0;
  2597. n /= 2; /* Number of values to be updated */
  2598. ASSERT(n > 0);
  2599. map = reg[live];
  2600. if (is_not_flatmap(map)) {
  2601. Uint32 hx;
  2602. Eterm val;
  2603. /* apparently the compiler does not emit is_map instructions,
  2604. * bad compiler */
  2605. if (is_not_hashmap(map)) {
  2606. p->freason = BADMAP;
  2607. p->fvalue = map;
  2608. return THE_NON_VALUE;
  2609. }
  2610. res = map;
  2611. E = p->stop;
  2612. while(n--) {
  2613. GET_TERM(new_p[0], new_key);
  2614. GET_TERM(new_p[1], val);
  2615. hx = hashmap_make_hash(new_key);
  2616. res = erts_hashmap_insert(p, hx, new_key, val, res, 1);
  2617. if (is_non_value(res)) {
  2618. p->fvalue = new_key;
  2619. p->freason = BADKEY;
  2620. return res;
  2621. }
  2622. new_p += 2;
  2623. }
  2624. return res;
  2625. }
  2626. old_mp = (flatmap_t *) flatmap_val(map);
  2627. num_old = flatmap_get_size(old_mp);
  2628. /*
  2629. * If the old map is empty, fail.
  2630. */
  2631. if (num_old == 0) {
  2632. E = p->stop;
  2633. p->freason = BADKEY;
  2634. GET_TERM(new_p[0], p->fvalue);
  2635. return THE_NON_VALUE;
  2636. }
  2637. /*
  2638. * Allocate the exact heap space needed.
  2639. */
  2640. need = num_old + MAP_HEADER_FLATMAP_SZ;
  2641. if (HeapWordsLeft(p) < need) {
  2642. erts_garbage_collect(p, need, reg, live+1);
  2643. map = reg[live];
  2644. old_mp = (flatmap_t *)flatmap_val(map);
  2645. }
  2646. /*
  2647. * Update map, keeping the old key tuple.
  2648. */
  2649. old_hp = p->htop;
  2650. hp = p->htop;
  2651. E = p->stop;
  2652. old_vals = flatmap_get_values(old_mp);
  2653. old_keys = flatmap_get_keys(old_mp);
  2654. res = make_flatmap(hp);
  2655. mp = (flatmap_t *)hp;
  2656. hp += MAP_HEADER_FLATMAP_SZ;
  2657. mp->thing_word = MAP_HEADER_FLATMAP;
  2658. mp->size = num_old;
  2659. mp->keys = old_mp->keys;
  2660. /* Get array of key/value pairs to be updated */
  2661. GET_TERM(*new_p, new_key);
  2662. /* Update all values */
  2663. for (i = 0; i < num_old; i++) {
  2664. if (!EQ(*old_keys, new_key)) {
  2665. /* Not same keys */
  2666. *hp++ = *old_vals;
  2667. } else {
  2668. GET_TERM(new_p[1], *hp);
  2669. if(*hp != *old_vals) changed = 1;
  2670. hp++;
  2671. n--;
  2672. if (n == 0) {
  2673. /*
  2674. * All updates done. Copy remaining values
  2675. * if any changed or return the original one.
  2676. */
  2677. if(changed) {
  2678. for (i++, old_vals++; i < num_old; i++) {
  2679. *hp++ = *old_vals++;
  2680. }
  2681. ASSERT(hp == p->htop + need);
  2682. p->htop = hp;
  2683. return res;
  2684. } else {
  2685. p->htop = old_hp;
  2686. return map;
  2687. }
  2688. } else {
  2689. new_p += 2;
  2690. GET_TERM(*new_p, new_key);
  2691. }
  2692. }
  2693. old_vals++, old_keys++;
  2694. }
  2695. /*
  2696. * Updates left. That means that at least one the keys in the
  2697. * update list did not previously exist.
  2698. */
  2699. ASSERT(hp == p->htop + need);
  2700. p->freason = BADKEY;
  2701. p->fvalue = new_key;
  2702. return THE_NON_VALUE;
  2703. }
  2704. #undef GET_TERM
  2705. int catchlevel(Process *p)
  2706. {
  2707. return p->catches;
  2708. }
  2709. /*
  2710. * Check if the given function is built-in (i.e. a BIF implemented in C).
  2711. *
  2712. * Returns 0 if not built-in, and a non-zero value if built-in.
  2713. */
  2714. int
  2715. erts_is_builtin(Eterm Mod, Eterm Name, int arity)
  2716. {
  2717. Export e;
  2718. Export* ep;
  2719. if (Mod == am_erlang) {
  2720. /*
  2721. * Special case for built-in functions that are implemented
  2722. * as instructions as opposed to SNIFs.
  2723. */
  2724. if (Name == am_apply && (arity == 2 || arity == 3)) {
  2725. return 1;
  2726. } else if (Name == am_yield && arity == 0) {
  2727. return 1;
  2728. }
  2729. }
  2730. e.info.mfa.module = Mod;
  2731. e.info.mfa.function = Name;
  2732. e.info.mfa.arity = arity;
  2733. if ((ep = export_get(&e)) == NULL) {
  2734. return 0;
  2735. }
  2736. return ep->bif_number != -1;
  2737. }
  2738. /*
  2739. * Return the current number of reductions consumed by the given process.
  2740. * To get the total number of reductions, p->reds must be added.
  2741. */
  2742. Uint
  2743. erts_current_reductions(Process *c_p, Process *p)
  2744. {
  2745. Sint reds_left;
  2746. if (c_p != p || !(erts_atomic32_read_nob(&c_p->state)
  2747. & ERTS_PSFLG_RUNNING)) {
  2748. return 0;
  2749. } else if (c_p->fcalls < 0 && ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) {
  2750. reds_left = c_p->fcalls + CONTEXT_REDS;
  2751. } else {
  2752. reds_left = c_p->fcalls;
  2753. }
  2754. return REDS_IN(c_p) - reds_left - erts_proc_sched_data(p)->virtual_reds;
  2755. }
  2756. int
  2757. erts_beam_jump_table(void)
  2758. {
  2759. #if defined(NO_JUMP_TABLE)
  2760. return 0;
  2761. #else
  2762. return 1;
  2763. #endif
  2764. }