PageRenderTime 195ms CodeModel.GetById 41ms app.highlight 106ms RepoModel.GetById 14ms app.codeStats 2ms

/erts/emulator/beam/beam_emu.c

http://github.com/erlang/otp
C | 3152 lines | 2114 code | 388 blank | 650 comment | 436 complexity | de4d55a8f5f824f8ab6ea3661394b156 MD5 | raw file

Large files files are truncated, but you can click here to view the full file

   1/*
   2 * %CopyrightBegin%
   3 *
   4 * Copyright Ericsson AB 1996-2020. All Rights Reserved.
   5 *
   6 * Licensed under the Apache License, Version 2.0 (the "License");
   7 * you may not use this file except in compliance with the License.
   8 * You may obtain a copy of the License at
   9 *
  10 *     http://www.apache.org/licenses/LICENSE-2.0
  11 *
  12 * Unless required by applicable law or agreed to in writing, software
  13 * distributed under the License is distributed on an "AS IS" BASIS,
  14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15 * See the License for the specific language governing permissions and
  16 * limitations under the License.
  17 *
  18 * %CopyrightEnd%
  19 */
  20
  21#ifdef HAVE_CONFIG_H
  22#  include "config.h"
  23#endif
  24
  25#include <stddef.h> /* offsetof() */
  26#include "sys.h"
  27#include "erl_vm.h"
  28#include "global.h"
  29#include "erl_process.h"
  30#include "error.h"
  31#include "bif.h"
  32#include "big.h"
  33#include "beam_load.h"
  34#include "erl_binary.h"
  35#include "erl_map.h"
  36#include "erl_bits.h"
  37#include "dist.h"
  38#include "beam_bp.h"
  39#include "beam_catches.h"
  40#include "erl_thr_progress.h"
  41#include "erl_nfunc_sched.h"
  42#ifdef HIPE
  43#include "hipe_mode_switch.h"
  44#include "hipe_bif1.h"
  45#endif
  46#include "dtrace-wrapper.h"
  47#include "erl_proc_sig_queue.h"
  48
  49/* #define HARDDEBUG 1 */
  50
  51#if defined(NO_JUMP_TABLE)
  52#  define OpCase(OpCode)    case op_##OpCode
  53#  define CountCase(OpCode) case op_count_##OpCode
  54#  define IsOpCode(InstrWord, OpCode)  (BeamCodeAddr(InstrWord) == (BeamInstr)op_##OpCode)
  55#  define Goto(Rel)         {Go = BeamCodeAddr(Rel); goto emulator_loop;}
  56#  define GotoPF(Rel)       Goto(Rel)
  57#else
  58#  define OpCase(OpCode)    lb_##OpCode
  59#  define CountCase(OpCode) lb_count_##OpCode
  60#  define IsOpCode(InstrWord, OpCode)  (BeamCodeAddr(InstrWord) == (BeamInstr)&&lb_##OpCode)
  61#  define Goto(Rel)         goto *((void *)BeamCodeAddr(Rel))
  62#  define GotoPF(Rel)       goto *((void *)Rel)
  63#  define LabelAddr(Label)  &&Label
  64#endif
  65
  66#ifdef ERTS_ENABLE_LOCK_CHECK
  67#    define PROCESS_MAIN_CHK_LOCKS(P)                   \
  68do {                                                    \
  69    if ((P))                                            \
  70	erts_proc_lc_chk_only_proc_main((P));           \
  71    ERTS_LC_ASSERT(!erts_thr_progress_is_blocking());   \
  72} while (0)
  73#    define ERTS_REQ_PROC_MAIN_LOCK(P)				\
  74do {                                                            \
  75    if ((P))                                                    \
  76	erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN,     \
  77				  __FILE__, __LINE__);          \
  78} while (0)
  79#    define ERTS_UNREQ_PROC_MAIN_LOCK(P)				\
  80do {									\
  81    if ((P))								\
  82	erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN);		\
  83} while (0)
  84#else
  85#  define PROCESS_MAIN_CHK_LOCKS(P)
  86#  define ERTS_REQ_PROC_MAIN_LOCK(P)
  87#  define ERTS_UNREQ_PROC_MAIN_LOCK(P)
  88#endif
  89
  90/*
  91 * Define macros for deep checking of terms.
  92 */
  93
  94#if defined(HARDDEBUG)
  95
  96#  define CHECK_TERM(T) size_object(T)
  97
  98#  define CHECK_ARGS(PC)                 \
  99do {                                     \
 100  int i_;                                \
 101  int Arity_ = PC[-1];                   \
 102  for (i_ = 0; i_ < Arity_; i_++) {      \
 103	CHECK_TERM(x(i_));               \
 104  }                                      \
 105} while (0)
 106    
 107#else
 108#  define CHECK_TERM(T) ASSERT(!is_CP(T))
 109#  define CHECK_ARGS(T)
 110#endif
 111
 112#define GET_EXPORT_MODULE(p)  ((p)->info.mfa.module)
 113#define GET_EXPORT_FUNCTION(p)  ((p)->info.mfa.function)
 114#define GET_EXPORT_ARITY(p)  ((p)->info.mfa.arity)
 115
 116/*
 117 * We reuse some of fields in the save area in the process structure.
 118 * This is safe to do, since this space is only actively used when
 119 * the process is switched out.
 120 */
 121#define REDS_IN(p)  ((p)->def_arg_reg[5])
 122
 123/*
 124 * Add a byte offset to a pointer to Eterm.  This is useful when the
 125 * the loader has precalculated a byte offset.
 126 */
 127#define ADD_BYTE_OFFSET(ptr, offset) \
 128   ((Eterm *) (((unsigned char *)ptr) + (offset)))
 129
 130/* We don't check the range if an ordinary switch is used */
 131#ifdef NO_JUMP_TABLE
 132#  define VALID_INSTR(IP) (BeamCodeAddr(IP) < (NUMBER_OF_OPCODES*2+10))
 133#else
 134#  define VALID_INSTR(IP) \
 135    ((BeamInstr)LabelAddr(emulator_loop) <= BeamCodeAddr(IP) && \
 136     BeamCodeAddr(IP) < (BeamInstr)LabelAddr(end_emulator_loop))
 137#endif /* NO_JUMP_TABLE */
 138
 139#define SET_I(ip) \
 140   ASSERT(VALID_INSTR(* (Eterm *)(ip))); \
 141   I = (ip)
 142
 143/*
 144 * Register target (X or Y register).
 145 */
 146
 147#define REG_TARGET_PTR(Target) (((Target) & 1) ? &yb((Target)-1) : &xb(Target))
 148
 149/*
 150 * Special Beam instructions.
 151 */
 152
 153BeamInstr beam_apply[2];
 154BeamInstr beam_exit[1];
 155BeamInstr beam_continue_exit[1];
 156
 157
 158/* NOTE These should be the only variables containing trace instructions.
 159**      Sometimes tests are for the instruction value, and sometimes
 160**      for the referring variable (one of these), and rouge references
 161**      will most likely cause chaos.
 162*/
 163BeamInstr beam_return_to_trace[1];   /* OpCode(i_return_to_trace) */
 164BeamInstr beam_return_trace[1];      /* OpCode(i_return_trace) */
 165BeamInstr beam_exception_trace[1];   /* UGLY also OpCode(i_return_trace) */
 166BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
 167
 168
 169/*
 170 * All Beam instructions in numerical order.
 171 */
 172
 173#ifndef NO_JUMP_TABLE
 174void** beam_ops;
 175#endif
 176
 177#define SWAPIN             \
 178    HTOP = HEAP_TOP(c_p);  \
 179    E = c_p->stop
 180
 181#define SWAPOUT            \
 182    HEAP_TOP(c_p) = HTOP;  \
 183    c_p->stop = E
 184
 185#define HEAVY_SWAPIN       \
 186    SWAPIN;		   \
 187    FCALLS = c_p->fcalls
 188
 189#define HEAVY_SWAPOUT      \
 190    SWAPOUT;		   \
 191    c_p->fcalls = FCALLS
 192
 193/*
 194 * Use LIGHT_SWAPOUT when the called function
 195 * will call HeapOnlyAlloc() (and never HAlloc()).
 196 */
 197#ifdef DEBUG
 198#  /* The stack pointer is used in an assertion. */
 199#  define LIGHT_SWAPOUT SWAPOUT
 200#  define DEBUG_SWAPOUT SWAPOUT
 201#  define DEBUG_SWAPIN  SWAPIN
 202#else
 203#  define LIGHT_SWAPOUT HEAP_TOP(c_p) = HTOP
 204#  define DEBUG_SWAPOUT
 205#  define DEBUG_SWAPIN
 206#endif
 207
 208/*
 209 * Use LIGHT_SWAPIN when we know that c_p->stop cannot
 210 * have been updated (i.e. if there cannot have been
 211 * a garbage-collection).
 212 */
 213
 214#define LIGHT_SWAPIN HTOP = HEAP_TOP(c_p)
 215
 216#ifdef FORCE_HEAP_FRAGS
 217#  define HEAP_SPACE_VERIFIED(Words) do { \
 218      c_p->space_verified = (Words);	  \
 219      c_p->space_verified_from = HTOP;	  \
 220    }while(0)
 221#else
 222#  define HEAP_SPACE_VERIFIED(Words) ((void)0)
 223#endif
 224
 225#define PRE_BIF_SWAPOUT(P)						\
 226     HEAP_TOP((P)) = HTOP;  						\
 227     (P)->stop = E;  							\
 228     PROCESS_MAIN_CHK_LOCKS((P));					\
 229     ERTS_UNREQ_PROC_MAIN_LOCK((P))
 230
 231#define db(N) (N)
 232#define fb(N) ((Sint)(Sint32)(N))
 233#define jb(N) ((Sint)(Sint32)(N))
 234#define tb(N) (N)
 235#define xb(N) (*ADD_BYTE_OFFSET(reg, N))
 236#define yb(N) (*ADD_BYTE_OFFSET(E, N))
 237#define Sb(N) (*REG_TARGET_PTR(N))
 238#define lb(N) (*(double *) (((unsigned char *)&(freg[0].fd)) + (N)))
 239#define Qb(N) (N)
 240#define Ib(N) (N)
 241
 242#define x(N) reg[N]
 243#define y(N) E[N]
 244#define r(N) x(N)
 245#define Q(N) (N*sizeof(Eterm *))
 246#define l(N) (freg[N].fd)
 247
 248#define Arg(N)       I[(N)+1]
 249
 250#define GetSource(raw, dst)			\
 251   do {						\
 252     dst = raw;                                 \
 253     switch (loader_tag(dst)) {			\
 254     case LOADER_X_REG:				\
 255        dst = x(loader_x_reg_index(dst));       \
 256        break;					\
 257     case LOADER_Y_REG:				\
 258        ASSERT(loader_y_reg_index(dst) >= 1);	\
 259        dst = y(loader_y_reg_index(dst));       \
 260        break;					\
 261     }						\
 262     CHECK_TERM(dst);				\
 263   } while (0)
 264
 265#define PUT_TERM_REG(term, desc)		\
 266do {						\
 267    switch (loader_tag(desc)) {			\
 268    case LOADER_X_REG:				\
 269	x(loader_x_reg_index(desc)) = (term);	\
 270	break;					\
 271    case LOADER_Y_REG:				\
 272	y(loader_y_reg_index(desc)) = (term);	\
 273	break;					\
 274    default:					\
 275	ASSERT(0);				\
 276	break;					\
 277    }						\
 278} while(0)
 279
 280#ifdef DEBUG
 281/* Better static type testing by the C compiler */
 282#  define BEAM_IS_TUPLE(Src) is_tuple(Src)
 283#else
 284/* Better performance */
 285# define BEAM_IS_TUPLE(Src) is_boxed(Src)
 286#endif
 287
 288/*
 289 * process_main() is already huge, so we want to avoid inlining
 290 * seldom used functions into it.
 291 */
 292static void init_emulator_finish(void) ERTS_NOINLINE;
 293static ErtsCodeMFA *ubif2mfa(void* uf) ERTS_NOINLINE;
 294static BeamInstr* handle_error(Process* c_p, BeamInstr* pc,
 295			       Eterm* reg, ErtsCodeMFA* bif_mfa) ERTS_NOINLINE;
 296static BeamInstr* call_error_handler(Process* p, ErtsCodeMFA* mfa,
 297				     Eterm* reg, Eterm func) ERTS_NOINLINE;
 298static BeamInstr* fixed_apply(Process* p, Eterm* reg, Uint arity,
 299			      BeamInstr *I, Uint offs) ERTS_NOINLINE;
 300static BeamInstr* apply(Process* p, Eterm* reg,
 301                        BeamInstr *I, Uint offs) ERTS_NOINLINE;
 302static BeamInstr* call_fun(Process* p, int arity,
 303			   Eterm* reg, Eterm args) ERTS_NOINLINE;
 304static BeamInstr* apply_fun(Process* p, Eterm fun,
 305			    Eterm args, Eterm* reg) ERTS_NOINLINE;
 306static Eterm new_fun(Process* p, Eterm* reg,
 307		     ErlFunEntry* fe, int num_free) ERTS_NOINLINE;
 308static int is_function2(Eterm Term, Uint arity);
 309static Eterm erts_gc_new_map(Process* p, Eterm* reg, Uint live,
 310                             Uint n, BeamInstr* ptr) ERTS_NOINLINE;
 311static Eterm erts_gc_new_small_map_lit(Process* p, Eterm* reg, Eterm keys_literal,
 312                               Uint live, BeamInstr* ptr) ERTS_NOINLINE;
 313static Eterm erts_gc_update_map_assoc(Process* p, Eterm* reg, Uint live,
 314                              Uint n, BeamInstr* new_p) ERTS_NOINLINE;
 315static Eterm erts_gc_update_map_exact(Process* p, Eterm* reg, Uint live,
 316                              Uint n, Eterm* new_p) ERTS_NOINLINE;
 317static Eterm get_map_element(Eterm map, Eterm key);
 318static Eterm get_map_element_hash(Eterm map, Eterm key, Uint32 hx);
 319
 320/*
 321 * Functions not directly called by process_main(). OK to inline.
 322 */
 323static BeamInstr* next_catch(Process* c_p, Eterm *reg);
 324static void terminate_proc(Process* c_p, Eterm Value);
 325static Eterm add_stacktrace(Process* c_p, Eterm Value, Eterm exc);
 326static void save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg,
 327			    ErtsCodeMFA *bif_mfa, Eterm args);
 328static struct StackTrace * get_trace_from_exc(Eterm exc);
 329static Eterm *get_freason_ptr_from_exc(Eterm exc);
 330static Eterm make_arglist(Process* c_p, Eterm* reg, int a);
 331
 332void
 333init_emulator(void)
 334{
 335    process_main(0, 0);
 336}
 337
 338/*
 339 * On certain platforms, make sure that the main variables really are placed
 340 * in registers.
 341 */
 342
 343#if defined(__GNUC__) && defined(sparc) && !defined(DEBUG)
 344#  define REG_xregs asm("%l1")
 345#  define REG_htop asm("%l2")
 346#  define REG_stop asm("%l3")
 347#  define REG_I asm("%l4")
 348#  define REG_fcalls asm("%l5")
 349#elif defined(__GNUC__) && defined(__amd64__) && !defined(DEBUG)
 350#  define REG_xregs asm("%r12")
 351#  define REG_htop
 352#  define REG_stop asm("%r13")
 353#  define REG_I asm("%rbx")
 354#  define REG_fcalls asm("%r14")
 355#else
 356#  define REG_xregs
 357#  define REG_htop
 358#  define REG_stop
 359#  define REG_I
 360#  define REG_fcalls
 361#endif
 362
 363#ifdef USE_VM_PROBES
 364#  define USE_VM_CALL_PROBES
 365#endif
 366
 367#ifdef USE_VM_CALL_PROBES
 368
 369#define DTRACE_LOCAL_CALL(p, mfa)					\
 370    if (DTRACE_ENABLED(local_function_entry)) {				\
 371        DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE);		\
 372        DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE);			\
 373        int depth = STACK_START(p) - STACK_TOP(p);			\
 374        dtrace_fun_decode(p, mfa, process_name, mfa_buf);               \
 375        DTRACE3(local_function_entry, process_name, mfa_buf, depth);	\
 376    }
 377
 378#define DTRACE_GLOBAL_CALL(p, mfa)					\
 379    if (DTRACE_ENABLED(global_function_entry)) {			\
 380        DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE);		\
 381        DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE);			\
 382        int depth = STACK_START(p) - STACK_TOP(p);			\
 383        dtrace_fun_decode(p, mfa, process_name, mfa_buf);               \
 384        DTRACE3(global_function_entry, process_name, mfa_buf, depth);	\
 385    }
 386
 387#define DTRACE_RETURN(p, mfa)                                    \
 388    if (DTRACE_ENABLED(function_return)) {                      \
 389        DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE);     \
 390        DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE);          \
 391        int depth = STACK_START(p) - STACK_TOP(p);              \
 392        dtrace_fun_decode(p, mfa, process_name, mfa_buf);       \
 393        DTRACE3(function_return, process_name, mfa_buf, depth); \
 394    }
 395
 396#define DTRACE_BIF_ENTRY(p, mfa)                                    \
 397    if (DTRACE_ENABLED(bif_entry)) {                                \
 398        DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE);         \
 399        DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE);              \
 400        dtrace_fun_decode(p, mfa, process_name, mfa_buf);           \
 401        DTRACE2(bif_entry, process_name, mfa_buf);                  \
 402    }
 403
 404#define DTRACE_BIF_RETURN(p, mfa)                                   \
 405    if (DTRACE_ENABLED(bif_return)) {                               \
 406        DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE);         \
 407        DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE);              \
 408        dtrace_fun_decode(p, mfa, process_name, mfa_buf);           \
 409        DTRACE2(bif_return, process_name, mfa_buf);                 \
 410    }
 411
 412#define DTRACE_NIF_ENTRY(p, mfa)                                        \
 413    if (DTRACE_ENABLED(nif_entry)) {                                    \
 414        DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE);             \
 415        DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE);                  \
 416        dtrace_fun_decode(p, mfa, process_name, mfa_buf);               \
 417        DTRACE2(nif_entry, process_name, mfa_buf);                      \
 418    }
 419
 420#define DTRACE_NIF_RETURN(p, mfa)                                       \
 421    if (DTRACE_ENABLED(nif_return)) {                                   \
 422        DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE);             \
 423        DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE);                  \
 424        dtrace_fun_decode(p, mfa, process_name, mfa_buf);               \
 425        DTRACE2(nif_return, process_name, mfa_buf);                     \
 426    }
 427
 428#define DTRACE_GLOBAL_CALL_FROM_EXPORT(p,e)                                                    \
 429    do {                                                                                       \
 430        if (DTRACE_ENABLED(global_function_entry)) {                                           \
 431            BeamInstr* fp = (BeamInstr *) (((Export *) (e))->addressv[erts_active_code_ix()]); \
 432            DTRACE_GLOBAL_CALL((p), erts_code_to_codemfa(fp));          \
 433        }                                                                                      \
 434    } while(0)
 435
 436#define DTRACE_RETURN_FROM_PC(p, i)                                                        \
 437    do {                                                                                \
 438        ErtsCodeMFA* cmfa;                                                                  \
 439        if (DTRACE_ENABLED(function_return) && (cmfa = find_function_from_pc(i))) { \
 440            DTRACE_RETURN((p), cmfa);                               \
 441        }                                                                               \
 442    } while(0)
 443
 444#else /* USE_VM_PROBES */
 445#define DTRACE_LOCAL_CALL(p, mfa)        do {} while (0)
 446#define DTRACE_GLOBAL_CALL(p, mfa)       do {} while (0)
 447#define DTRACE_GLOBAL_CALL_FROM_EXPORT(p, e) do {} while (0)
 448#define DTRACE_RETURN(p, mfa)            do {} while (0)
 449#define DTRACE_RETURN_FROM_PC(p, i)      do {} while (0)
 450#define DTRACE_BIF_ENTRY(p, mfa)         do {} while (0)
 451#define DTRACE_BIF_RETURN(p, mfa)        do {} while (0)
 452#define DTRACE_NIF_ENTRY(p, mfa)         do {} while (0)
 453#define DTRACE_NIF_RETURN(p, mfa)        do {} while (0)
 454#endif /* USE_VM_PROBES */
 455
 456#ifdef DEBUG
 457#define ERTS_DBG_CHK_REDS(P, FC)					\
 458    do {								\
 459	if (ERTS_PROC_GET_SAVED_CALLS_BUF((P))) {			\
 460	    ASSERT(FC <= 0);						\
 461	    ASSERT(erts_proc_sched_data(c_p)->virtual_reds		\
 462		   <= 0 - (FC));					\
 463	}								\
 464	else {								\
 465	    ASSERT(FC <= CONTEXT_REDS);					\
 466	    ASSERT(erts_proc_sched_data(c_p)->virtual_reds		\
 467		   <= CONTEXT_REDS - (FC));				\
 468	}								\
 469} while (0)
 470#else
 471#define ERTS_DBG_CHK_REDS(P, FC)
 472#endif
 473
 474#ifdef NO_FPE_SIGNALS
 475#  define ERTS_NO_FPE_CHECK_INIT ERTS_FP_CHECK_INIT
 476#  define ERTS_NO_FPE_ERROR ERTS_FP_ERROR
 477#else
 478#  define ERTS_NO_FPE_CHECK_INIT(p)
 479#  define ERTS_NO_FPE_ERROR(p, a, b)
 480#endif
 481
 482/*
 483 * process_main() is called twice:
 484 * The first call performs some initialisation, including exporting
 485 * the instructions' C labels to the loader.
 486 * The second call starts execution of BEAM code. This call never returns.
 487 */
 488ERTS_NO_RETPOLINE
 489void process_main(Eterm * x_reg_array, FloatDef* f_reg_array)
 490{
 491    static int init_done = 0;
 492    Process* c_p = NULL;
 493    int reds_used;
 494#ifdef DEBUG
 495    ERTS_DECLARE_DUMMY(Eterm pid);
 496#endif
 497
 498    /* Pointer to X registers: x(1)..x(N); reg[0] is used when doing GC,
 499     * in all other cases x0 is used.
 500     */
 501    register Eterm* reg REG_xregs = x_reg_array;
 502
 503    /*
 504     * Top of heap (next free location); grows upwards.
 505     */
 506    register Eterm* HTOP REG_htop = NULL;
 507
 508    /* Stack pointer.  Grows downwards; points
 509     * to last item pushed (normally a saved
 510     * continuation pointer).
 511     */
 512    register Eterm* E REG_stop = NULL;
 513
 514    /*
 515     * Pointer to next threaded instruction.
 516     */
 517    register BeamInstr *I REG_I = NULL;
 518
 519    /* Number of reductions left.  This function
 520     * returns to the scheduler when FCALLS reaches zero.
 521     */
 522    register Sint FCALLS REG_fcalls = 0;
 523
 524    /*
 525     * X registers and floating point registers are located in
 526     * scheduler specific data.
 527     */
 528    register FloatDef *freg = f_reg_array;
 529
 530    /*
 531     * For keeping the negative old value of 'reds' when call saving is active.
 532     */
 533    int neg_o_reds = 0;
 534
 535#ifdef ERTS_OPCODE_COUNTER_SUPPORT
 536    static void* counting_opcodes[] = { DEFINE_COUNTING_OPCODES };
 537#else
 538#ifndef NO_JUMP_TABLE
 539    static void* opcodes[] = { DEFINE_OPCODES };
 540#else
 541    register BeamInstr Go;
 542#endif
 543#endif
 544
 545    Uint64 start_time = 0;          /* Monitor long schedule */
 546    BeamInstr* start_time_i = NULL;
 547
 548    ERTS_MSACC_DECLARE_CACHE_X() /* a cached value of the tsd pointer for msacc */
 549
 550    ERL_BITS_DECLARE_STATEP; /* Has to be last declaration */
 551
 552
 553    /*
 554     * Note: In this function, we attempt to place rarely executed code towards
 555     * the end of the function, in the hope that the cache hit rate will be better.
 556     * The initialization code is only run once, so it is at the very end.
 557     *
 558     * Note: c_p->arity must be set to reflect the number of useful terms in
 559     * c_p->arg_reg before calling the scheduler.
 560     */
 561    if (ERTS_UNLIKELY(!init_done)) {
 562       /* This should only be reached during the init phase when only the main
 563        * process is running. I.e. there is no race for init_done.
 564        */
 565	init_done = 1;
 566	goto init_emulator;
 567    }
 568
 569    c_p = NULL;
 570    reds_used = 0;
 571
 572    goto do_schedule1;
 573
 574 do_schedule:
 575    ASSERT(c_p->arity < 6);
 576    ASSERT(c_p->debug_reds_in == REDS_IN(c_p));
 577    if (!ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
 578	reds_used = REDS_IN(c_p) - FCALLS;
 579    else
 580	reds_used = REDS_IN(c_p) - (CONTEXT_REDS + FCALLS);
 581    ASSERT(reds_used >= 0);
 582 do_schedule1:
 583
 584    if (start_time != 0) {
 585        Sint64 diff = erts_timestamp_millis() - start_time;
 586	if (diff > 0 && (Uint) diff >  erts_system_monitor_long_schedule) {
 587	    ErtsCodeMFA *inptr = find_function_from_pc(start_time_i);
 588	    ErtsCodeMFA *outptr = find_function_from_pc(c_p->i);
 589	    monitor_long_schedule_proc(c_p,inptr,outptr,(Uint) diff);
 590	}
 591    }
 592
 593    PROCESS_MAIN_CHK_LOCKS(c_p);
 594    ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
 595    ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
 596    c_p = erts_schedule(NULL, c_p, reds_used);
 597    ASSERT(!(c_p->flags & F_HIPE_MODE));
 598    ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
 599    start_time = 0;
 600#ifdef DEBUG
 601    pid = c_p->common.id; /* Save for debugging purposes */
 602#endif
 603    ERTS_REQ_PROC_MAIN_LOCK(c_p);
 604    PROCESS_MAIN_CHK_LOCKS(c_p);
 605
 606    ERTS_MSACC_UPDATE_CACHE_X();
 607
 608    if (erts_system_monitor_long_schedule != 0) {
 609	start_time = erts_timestamp_millis();
 610	start_time_i = c_p->i;
 611    }
 612
 613    ERL_BITS_RELOAD_STATEP(c_p);
 614    {
 615	int reds;
 616	Eterm* argp;
 617	BeamInstr next;
 618	int i;
 619
 620	argp = c_p->arg_reg;
 621	for (i = c_p->arity - 1; i >= 0; i--) {
 622	    reg[i] = argp[i];
 623	    CHECK_TERM(reg[i]);
 624	}
 625
 626	/*
 627	 * We put the original reduction count in the process structure, to reduce
 628	 * the code size (referencing a field in a struct through a pointer stored
 629	 * in a register gives smaller code than referencing a global variable).
 630	 */
 631
 632	SET_I(c_p->i);
 633
 634	REDS_IN(c_p) = reds = c_p->fcalls;
 635#ifdef DEBUG
 636	c_p->debug_reds_in = reds;
 637#endif
 638
 639	if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) {
 640	    neg_o_reds = -CONTEXT_REDS;
 641	    FCALLS = neg_o_reds + reds;
 642	} else {
 643	    neg_o_reds = 0;
 644	    FCALLS = reds;
 645	}
 646
 647	ERTS_DBG_CHK_REDS(c_p, FCALLS);
 648
 649	next = *I;
 650	SWAPIN;
 651	ASSERT(VALID_INSTR(next));
 652
 653#ifdef USE_VM_PROBES
 654        if (DTRACE_ENABLED(process_scheduled)) {
 655            DTRACE_CHARBUF(process_buf, DTRACE_TERM_BUF_SIZE);
 656            DTRACE_CHARBUF(fun_buf, DTRACE_TERM_BUF_SIZE);
 657            dtrace_proc_str(c_p, process_buf);
 658
 659            if (ERTS_PROC_IS_EXITING(c_p)) {
 660                sys_strcpy(fun_buf, "<exiting>");
 661            } else {
 662                ErtsCodeMFA *cmfa = find_function_from_pc(c_p->i);
 663                if (cmfa) {
 664                    dtrace_fun_decode(c_p, cmfa,
 665                                      NULL, fun_buf);
 666                } else {
 667                    erts_snprintf(fun_buf, sizeof(DTRACE_CHARBUF_NAME(fun_buf)),
 668                                  "<unknown/%p>", next);
 669                }
 670            }
 671
 672            DTRACE2(process_scheduled, process_buf, fun_buf);
 673        }
 674#endif
 675	Goto(next);
 676    }
 677
 678#if defined(DEBUG) || defined(NO_JUMP_TABLE)
 679 emulator_loop:
 680#endif
 681
 682#ifdef NO_JUMP_TABLE
 683    switch (Go) {
 684#endif
 685
 686#include "beam_hot.h"
 687    /*
 688     * The labels are jumped to from the $DISPATCH() macros when the reductions
 689     * are used up.
 690     *
 691     * Since the I register points just beyond the FuncBegin instruction, we
 692     * can get the module, function, and arity for the function being
 693     * called from I[-3], I[-2], and I[-1] respectively.
 694     */
 695 context_switch_fun:
 696    /* Add one for the environment of the fun */
 697    c_p->arity = erts_code_to_codemfa(I)->arity + 1;
 698    goto context_switch2;
 699
 700 context_switch:
 701    c_p->arity = erts_code_to_codemfa(I)->arity;
 702
 703 context_switch2: 		/* Entry for fun calls. */
 704    c_p->current = erts_code_to_codemfa(I);
 705
 706 context_switch3:
 707
 708 {
 709     Eterm* argp;
 710     int i;
 711
 712     if (erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_EXITING) {
 713         c_p->i = beam_exit;
 714         c_p->arity = 0;
 715         c_p->current = NULL;
 716         goto do_schedule;
 717     }
 718
 719     /*
 720      * Make sure that there is enough room for the argument registers to be saved.
 721      */
 722     if (c_p->arity > c_p->max_arg_reg) {
 723	 /*
 724	  * Yes, this is an expensive operation, but you only pay it the first
 725	  * time you call a function with more than 6 arguments which is
 726	  * scheduled out.  This is better than paying for 26 words of wasted
 727	  * space for most processes which never call functions with more than
 728	  * 6 arguments.
 729	  */
 730	 Uint size = c_p->arity * sizeof(c_p->arg_reg[0]);
 731	 if (c_p->arg_reg != c_p->def_arg_reg) {
 732	     c_p->arg_reg = (Eterm *) erts_realloc(ERTS_ALC_T_ARG_REG,
 733						   (void *) c_p->arg_reg,
 734						   size);
 735	 } else {
 736	     c_p->arg_reg = (Eterm *) erts_alloc(ERTS_ALC_T_ARG_REG, size);
 737	 }
 738	 c_p->max_arg_reg = c_p->arity;
 739     }
 740
 741     /*
 742      * Since REDS_IN(c_p) is stored in the save area (c_p->arg_reg) we must read it
 743      * now before saving registers.
 744      *
 745      * The '+ 1' compensates for the last increment which was not done
 746      * (beacuse the code for the Dispatch() macro becomes shorter that way).
 747      */
 748
 749     ASSERT(c_p->debug_reds_in == REDS_IN(c_p));
 750    if (!ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
 751	reds_used = REDS_IN(c_p) - FCALLS;
 752    else
 753	reds_used = REDS_IN(c_p) - (CONTEXT_REDS + FCALLS);
 754    ASSERT(reds_used >= 0);
 755
 756     /*
 757      * Save the argument registers and everything else.
 758      */
 759
 760     argp = c_p->arg_reg;
 761     for (i = c_p->arity - 1; i >= 0; i--) {
 762	 argp[i] = reg[i];
 763     }
 764     SWAPOUT;
 765     c_p->i = I;
 766     goto do_schedule1;
 767 }
 768
 769#include "beam_warm.h"
 770
 771 OpCase(normal_exit): {
 772     HEAVY_SWAPOUT;
 773     c_p->freason = EXC_NORMAL;
 774     c_p->arity = 0; /* In case this process will ever be garbed again. */
 775     ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
 776     erts_do_exit_process(c_p, am_normal);
 777     ERTS_REQ_PROC_MAIN_LOCK(c_p);
 778     HEAVY_SWAPIN;
 779     goto do_schedule;
 780 }
 781
 782 OpCase(continue_exit): {
 783     HEAVY_SWAPOUT;
 784     ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
 785     erts_continue_exit_process(c_p);
 786     ERTS_REQ_PROC_MAIN_LOCK(c_p);
 787     HEAVY_SWAPIN;
 788     goto do_schedule;
 789 }
 790
 791 find_func_info: {
 792     SWAPOUT;
 793     I = handle_error(c_p, I, reg, NULL);
 794     goto post_error_handling;
 795 }
 796
 797 OpCase(call_error_handler):
 798    /*
 799     * At this point, I points to the code[3] in the export entry for
 800     * a function which is not loaded.
 801     *
 802     * code[0]: Module
 803     * code[1]: Function
 804     * code[2]: Arity
 805     * code[3]: &&call_error_handler
 806     * code[4]: Not used
 807     */
 808    HEAVY_SWAPOUT;
 809    I = call_error_handler(c_p, erts_code_to_codemfa(I),
 810                           reg, am_undefined_function);
 811    HEAVY_SWAPIN;
 812    if (I) {
 813	Goto(*I);
 814    }
 815
 816 /* Fall through */
 817 OpCase(error_action_code): {
 818    handle_error:
 819     SWAPOUT;
 820     I = handle_error(c_p, NULL, reg, NULL);
 821 post_error_handling:
 822     if (I == 0) {
 823	 goto do_schedule;
 824     } else {
 825	 ASSERT(!is_value(r(0)));
 826	 SWAPIN;
 827	 Goto(*I);
 828     }
 829 }
 830
 831 OpCase(i_func_info_IaaI): {
 832     ErtsCodeInfo *ci = (ErtsCodeInfo*)I;
 833     c_p->freason = EXC_FUNCTION_CLAUSE;
 834     c_p->current = &ci->mfa;
 835     goto handle_error;
 836 }
 837
 838#include "beam_cold.h"
 839
 840#ifdef ERTS_OPCODE_COUNTER_SUPPORT
 841    DEFINE_COUNTING_LABELS;
 842#endif
 843
 844#ifndef NO_JUMP_TABLE
 845#ifdef DEBUG
 846 end_emulator_loop:
 847#endif
 848#endif
 849
 850 OpCase(int_code_end):
 851 OpCase(label_L):
 852 OpCase(on_load):
 853 OpCase(line_I):
 854    erts_exit(ERTS_ERROR_EXIT, "meta op\n");
 855
 856    /*
 857     * One-time initialization of Beam emulator.
 858     */
 859
 860 init_emulator:
 861 {
 862#ifndef NO_JUMP_TABLE
 863#ifdef ERTS_OPCODE_COUNTER_SUPPORT
 864#ifdef DEBUG
 865     counting_opcodes[op_catch_end_y] = LabelAddr(lb_catch_end_y);
 866#endif
 867     counting_opcodes[op_i_func_info_IaaI] = LabelAddr(lb_i_func_info_IaaI);
 868     beam_ops = counting_opcodes;
 869#else /* #ifndef ERTS_OPCODE_COUNTER_SUPPORT */
 870     beam_ops = opcodes;
 871#endif /* ERTS_OPCODE_COUNTER_SUPPORT */
 872#endif /* NO_JUMP_TABLE */
 873
 874     init_emulator_finish();
 875     return;
 876 }
 877#ifdef NO_JUMP_TABLE
 878 default:
 879    erts_exit(ERTS_ERROR_EXIT, "unexpected op code %d\n",Go);
 880  }
 881#endif
 882    return;			/* Never executed */
 883}
 884
 885/*
 886 * Enter all BIFs into the export table.
 887 *
 888 * Note that they will all call the error_handler until their modules have been
 889 * loaded, which may prevent the system from booting if BIFs from non-preloaded
 890 * modules are apply/3'd while loading code. Ordinary BIF calls will work fine
 891 * however since they won't go through export entries.
 892 */
 893static void install_bifs(void) {
 894    int i;
 895
 896    for (i = 0; i < BIF_SIZE; i++) {
 897        BifEntry *entry;
 898        Export *ep;
 899        int j;
 900
 901        entry = &bif_table[i];
 902
 903        ep = erts_export_put(entry->module, entry->name, entry->arity);
 904
 905        ep->info.op = BeamOpCodeAddr(op_i_func_info_IaaI);
 906        ep->info.mfa.module = entry->module;
 907        ep->info.mfa.function = entry->name;
 908        ep->info.mfa.arity = entry->arity;
 909        ep->bif_number = i;
 910
 911        memset(&ep->trampoline, 0, sizeof(ep->trampoline));
 912        ep->trampoline.op = BeamOpCodeAddr(op_call_error_handler);
 913
 914        for (j = 0; j < ERTS_NUM_CODE_IX; j++) {
 915            ep->addressv[j] = ep->trampoline.raw;
 916        }
 917
 918        /* Set up a hidden export entry so we can trap to this BIF without
 919         * it being seen when tracing. */
 920        erts_init_trap_export(&bif_trap_export[i],
 921                              entry->module, entry->name, entry->arity,
 922                              entry->f);
 923    }
 924}
 925
 926/*
 927 * One-time initialization of emulator. Does not need to be
 928 * in process_main().
 929 */
 930static void
 931init_emulator_finish(void)
 932{
 933#if defined(ARCH_64) && defined(CODE_MODEL_SMALL)
 934    int i;
 935
 936    for (i = 0; i < NUMBER_OF_OPCODES; i++) {
 937        BeamInstr instr = BeamOpCodeAddr(i);
 938        if (instr >= (1ull << 32)) {
 939            erts_exit(ERTS_ERROR_EXIT,
 940                      "This run-time was supposed be compiled with all code below 2Gb,\n"
 941                      "but the instruction '%s' is located at %016lx.\n",
 942                      opc[i].name, instr);
 943        }
 944    }
 945#endif
 946
 947    beam_apply[0]             = BeamOpCodeAddr(op_i_apply);
 948    beam_apply[1]             = BeamOpCodeAddr(op_normal_exit);
 949    beam_exit[0]              = BeamOpCodeAddr(op_error_action_code);
 950    beam_continue_exit[0]     = BeamOpCodeAddr(op_continue_exit);
 951    beam_return_to_trace[0]   = BeamOpCodeAddr(op_i_return_to_trace);
 952    beam_return_trace[0]      = BeamOpCodeAddr(op_return_trace);
 953    beam_exception_trace[0]   = BeamOpCodeAddr(op_return_trace); /* UGLY */
 954    beam_return_time_trace[0] = BeamOpCodeAddr(op_i_return_time_trace);
 955
 956    install_bifs();
 957}
 958
 959/*
 960 * erts_dirty_process_main() is what dirty schedulers execute. Since they handle
 961 * only NIF calls they do not need to be able to execute all BEAM
 962 * instructions.
 963 */
 964void erts_dirty_process_main(ErtsSchedulerData *esdp)
 965{
 966    Process* c_p = NULL;
 967    ErtsMonotonicTime start_time;
 968#ifdef DEBUG
 969    ERTS_DECLARE_DUMMY(Eterm pid);
 970#endif
 971
 972    /* Pointer to X registers: x(1)..x(N); reg[0] is used when doing GC,
 973     * in all other cases x0 is used.
 974     */
 975    register Eterm* reg REG_xregs = NULL;
 976
 977    /*
 978     * Top of heap (next free location); grows upwards.
 979     */
 980    register Eterm* HTOP REG_htop = NULL;
 981
 982    /* Stack pointer.  Grows downwards; points
 983     * to last item pushed (normally a saved
 984     * continuation pointer).
 985     */
 986    register Eterm* E REG_stop = NULL;
 987
 988    /*
 989     * Pointer to next threaded instruction.
 990     */
 991    register BeamInstr *I REG_I = NULL;
 992
 993    ERTS_MSACC_DECLARE_CACHE_X() /* a cached value of the tsd pointer for msacc */
 994
 995    /*
 996     * start_time always positive for dirty CPU schedulers,
 997     * and negative for dirty I/O schedulers.
 998     */
 999
1000    if (ERTS_SCHEDULER_IS_DIRTY_CPU(esdp)) {
1001	start_time = erts_get_monotonic_time(NULL);
1002	ASSERT(start_time >= 0);
1003    }
1004    else {
1005	start_time = ERTS_SINT64_MIN;
1006	ASSERT(start_time < 0);
1007    }
1008
1009    goto do_dirty_schedule;
1010
1011 context_switch:
1012    c_p->current = erts_code_to_codemfa(I);	/* Pointer to Mod, Func, Arity */
1013    c_p->arity = c_p->current->arity;
1014
1015    {
1016	int reds_used;
1017	Eterm* argp;
1018	int i;
1019
1020	/*
1021	 * Make sure that there is enough room for the argument registers to be saved.
1022	 */
1023	if (c_p->arity > c_p->max_arg_reg) {
1024	    /*
1025	     * Yes, this is an expensive operation, but you only pay it the first
1026	     * time you call a function with more than 6 arguments which is
1027	     * scheduled out.  This is better than paying for 26 words of wasted
1028	     * space for most processes which never call functions with more than
1029	     * 6 arguments.
1030	     */
1031	    Uint size = c_p->arity * sizeof(c_p->arg_reg[0]);
1032	    if (c_p->arg_reg != c_p->def_arg_reg) {
1033		c_p->arg_reg = (Eterm *) erts_realloc(ERTS_ALC_T_ARG_REG,
1034						      (void *) c_p->arg_reg,
1035						      size);
1036	    } else {
1037		c_p->arg_reg = (Eterm *) erts_alloc(ERTS_ALC_T_ARG_REG, size);
1038	    }
1039	    c_p->max_arg_reg = c_p->arity;
1040	}
1041
1042	/*
1043	 * Save the argument registers and everything else.
1044	 */
1045
1046	argp = c_p->arg_reg;
1047	for (i = c_p->arity - 1; i >= 0; i--) {
1048	    argp[i] = reg[i];
1049	}
1050	SWAPOUT;
1051	c_p->i = I;
1052
1053    do_dirty_schedule:
1054
1055	if (start_time < 0) {
1056	    /*
1057	     * Dirty I/O scheduler:
1058	     *   One reduction consumed regardless of
1059	     *   time spent in the dirty NIF.
1060	     */
1061	    reds_used = esdp->virtual_reds + 1;
1062	}
1063	else {
1064	    /*
1065	     * Dirty CPU scheduler:
1066	     *   Reductions based on time consumed by
1067	     *   the dirty NIF.
1068	     */
1069	    Sint64 treds;
1070	    treds = erts_time2reds(start_time,
1071				   erts_get_monotonic_time(esdp));
1072	    treds += esdp->virtual_reds;
1073	    reds_used = treds > INT_MAX ? INT_MAX : (int) treds;
1074	}
1075
1076        if (c_p && ERTS_PROC_GET_PENDING_SUSPEND(c_p))
1077            erts_proc_sig_handle_pending_suspend(c_p);
1078
1079	PROCESS_MAIN_CHK_LOCKS(c_p);
1080	ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
1081	ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
1082	c_p = erts_schedule(esdp, c_p, reds_used);
1083
1084	if (start_time >= 0) {
1085	    start_time = erts_get_monotonic_time(esdp);
1086	    ASSERT(start_time >= 0);
1087	}
1088    }
1089
1090    ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
1091#ifdef DEBUG
1092    pid = c_p->common.id; /* Save for debugging purposes */
1093#endif
1094    ERTS_REQ_PROC_MAIN_LOCK(c_p);
1095    PROCESS_MAIN_CHK_LOCKS(c_p);
1096
1097    ASSERT(!(c_p->flags & F_HIPE_MODE));
1098    ERTS_MSACC_UPDATE_CACHE_X();
1099
1100    /*
1101     * Set fcalls even though we ignore it, so we don't
1102     * confuse code accessing it...
1103     */
1104    if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
1105	c_p->fcalls = 0;
1106    else
1107	c_p->fcalls = CONTEXT_REDS;
1108
1109    if (erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_DIRTY_RUNNING_SYS) {
1110	erts_execute_dirty_system_task(c_p);
1111	goto do_dirty_schedule;
1112    }
1113    else {
1114	ErtsCodeMFA *codemfa;
1115	Eterm* argp;
1116	int i, exiting;
1117
1118	reg = esdp->x_reg_array;
1119
1120	argp = c_p->arg_reg;
1121	for (i = c_p->arity - 1; i >= 0; i--) {
1122	    reg[i] = argp[i];
1123	    CHECK_TERM(reg[i]);
1124	}
1125
1126	/*
1127	 * We put the original reduction count in the process structure, to reduce
1128	 * the code size (referencing a field in a struct through a pointer stored
1129	 * in a register gives smaller code than referencing a global variable).
1130	 */
1131
1132	I = c_p->i;
1133
1134	SWAPIN;
1135
1136#ifdef USE_VM_PROBES
1137        if (DTRACE_ENABLED(process_scheduled)) {
1138            DTRACE_CHARBUF(process_buf, DTRACE_TERM_BUF_SIZE);
1139            DTRACE_CHARBUF(fun_buf, DTRACE_TERM_BUF_SIZE);
1140            dtrace_proc_str(c_p, process_buf);
1141
1142            if (ERTS_PROC_IS_EXITING(c_p)) {
1143                sys_strcpy(fun_buf, "<exiting>");
1144            } else {
1145                ErtsCodeMFA *cmfa = find_function_from_pc(c_p->i);
1146                if (cmfa) {
1147		    dtrace_fun_decode(c_p, cmfa, NULL, fun_buf);
1148                } else {
1149                    erts_snprintf(fun_buf, sizeof(DTRACE_CHARBUF_NAME(fun_buf)),
1150                                  "<unknown/%p>", *I);
1151                }
1152            }
1153
1154            DTRACE2(process_scheduled, process_buf, fun_buf);
1155        }
1156#endif
1157
1158	/*
1159	 * call_nif is always first instruction in function:
1160	 *
1161	 * I[-3]: Module
1162	 * I[-2]: Function
1163	 * I[-1]: Arity
1164	 * I[0]: &&call_nif
1165	 * I[1]: Function pointer to NIF function
1166	 * I[2]: Pointer to erl_module_nif
1167	 * I[3]: Function pointer to dirty NIF
1168	 *
1169	 * This layout is determined by the ErtsNativeFunc struct
1170	 */
1171
1172	ERTS_MSACC_SET_STATE_CACHED_X(ERTS_MSACC_STATE_NIF);
1173
1174	codemfa = erts_code_to_codemfa(I);
1175
1176	DTRACE_NIF_ENTRY(c_p, codemfa);
1177	c_p->current = codemfa;
1178	SWAPOUT;
1179	PROCESS_MAIN_CHK_LOCKS(c_p);
1180	ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
1181
1182	ASSERT(!ERTS_PROC_IS_EXITING(c_p));
1183	if (BeamIsOpCode(*I, op_call_bif_W)) {
1184	    exiting = erts_call_dirty_bif(esdp, c_p, I, reg);
1185	}
1186	else {
1187	    ASSERT(BeamIsOpCode(*I, op_call_nif_WWW));
1188            exiting = erts_call_dirty_nif(esdp, c_p, I, reg);
1189	}
1190
1191	ASSERT(!(c_p->flags & F_HIBERNATE_SCHED));
1192
1193	PROCESS_MAIN_CHK_LOCKS(c_p);
1194	ERTS_REQ_PROC_MAIN_LOCK(c_p);
1195	ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
1196	ERTS_MSACC_SET_STATE_CACHED_X(ERTS_MSACC_STATE_EMULATOR);
1197	if (exiting)
1198	    goto do_dirty_schedule;
1199	ASSERT(!ERTS_PROC_IS_EXITING(c_p));
1200
1201	DTRACE_NIF_RETURN(c_p, codemfa);
1202	ERTS_HOLE_CHECK(c_p);
1203	SWAPIN;
1204	I = c_p->i;
1205	goto context_switch;
1206    }
1207}
1208
1209static ErtsCodeMFA *
1210ubif2mfa(void* uf)
1211{
1212    int i;
1213    for (i = 0; erts_u_bifs[i].bif; i++) {
1214	if (erts_u_bifs[i].bif == uf)
1215	    return &bif_trap_export[erts_u_bifs[i].exp_ix].info.mfa;
1216    }
1217    erts_exit(ERTS_ERROR_EXIT, "bad u bif: %p\n", uf);
1218    return NULL;
1219}
1220
1221/*
1222 * Mapping from the error code 'class tag' to atoms.
1223 */
1224Eterm exception_tag[NUMBER_EXC_TAGS] = {
1225  am_error,	/* 0 */
1226  am_exit,	/* 1 */
1227  am_throw,	/* 2 */
1228};
1229
1230/*
1231 * Mapping from error code 'index' to atoms.
1232 */
1233Eterm error_atom[NUMBER_EXIT_CODES] = {
1234  am_internal_error,	/* 0 */
1235  am_normal,		/* 1 */
1236  am_internal_error,	/* 2 */
1237  am_badarg,		/* 3 */
1238  am_badarith,		/* 4 */
1239  am_badmatch,		/* 5 */
1240  am_function_clause,	/* 6 */
1241  am_case_clause,	/* 7 */
1242  am_if_clause,		/* 8 */
1243  am_undef,		/* 9 */
1244  am_badfun,		/* 10 */
1245  am_badarity,		/* 11 */
1246  am_timeout_value,	/* 12 */
1247  am_noproc,		/* 13 */
1248  am_notalive,		/* 14 */
1249  am_system_limit,	/* 15 */
1250  am_try_clause,	/* 16 */
1251  am_notsup,		/* 17 */
1252  am_badmap,		/* 18 */
1253  am_badkey,		/* 19 */
1254};
1255
1256/* Returns the return address at E[0] in printable form, skipping tracing in
1257 * the same manner as gather_stacktrace.
1258 *
1259 * This is needed to generate correct stacktraces when throwing errors from
1260 * instructions that return like an ordinary function, such as call_nif. */
1261BeamInstr *erts_printable_return_address(Process* p, Eterm *E) {
1262    Eterm *ptr = E;
1263
1264    ASSERT(is_CP(*ptr));
1265
1266    while (ptr < STACK_START(p)) {
1267        BeamInstr *cp = cp_val(*ptr);
1268
1269        if (cp == beam_exception_trace || cp == beam_return_trace) {
1270            ptr += 3;
1271        } else if (cp == beam_return_time_trace) {
1272            ptr += 2;
1273        } else if (cp == beam_return_to_trace) {
1274            ptr += 1;
1275        } else {
1276            return cp;
1277        }
1278    }
1279
1280    ERTS_ASSERT(!"No continuation pointer on stack");
1281}
1282
1283/*
1284 * To fully understand the error handling, one must keep in mind that
1285 * when an exception is thrown, the search for a handler can jump back
1286 * and forth between Beam and native code. Upon each mode switch, a
1287 * dummy handler is inserted so that if an exception reaches that point,
1288 * the handler is invoked (like any handler) and transfers control so
1289 * that the search for a real handler is continued in the other mode.
1290 * Therefore, c_p->freason and c_p->fvalue must still hold the exception
1291 * info when the handler is executed, but normalized so that creation of
1292 * error terms and saving of the stack trace is only done once, even if
1293 * we pass through the error handling code several times.
1294 *
1295 * When a new exception is raised, the current stack trace information
1296 * is quick-saved in a small structure allocated on the heap. Depending
1297 * on how the exception is eventually caught (perhaps by causing the
1298 * current process to terminate), the saved information may be used to
1299 * create a symbolic (human-readable) representation of the stack trace
1300 * at the point of the original exception.
1301 */
1302
1303static BeamInstr*
1304handle_error(Process* c_p, BeamInstr* pc, Eterm* reg, ErtsCodeMFA *bif_mfa)
1305{
1306    Eterm* hp;
1307    Eterm Value = c_p->fvalue;
1308    Eterm Args = am_true;
1309
1310    ASSERT(c_p->freason != TRAP); /* Should have been handled earlier. */
1311
1312    if (c_p->freason & EXF_RESTORE_NFUNC)
1313	erts_nfunc_restore_error(c_p, &pc, reg, &bif_mfa);
1314
1315#ifdef DEBUG
1316    if (bif_mfa) {
1317	/* Verify that bif_mfa does not point into our native function wrapper */
1318	ErtsNativeFunc *nep = ERTS_PROC_GET_NFUNC_TRAP_WRAPPER(c_p);
1319	ASSERT(!nep || !ErtsInArea(bif_mfa, (char *)nep, sizeof(ErtsNativeFunc)));
1320    }
1321#endif
1322
1323    c_p->i = pc;    /* In case we call erts_exit(). */
1324
1325    /*
1326     * Check if we have an arglist for the top level call. If so, this
1327     * is encoded in Value, so we have to dig out the real Value as well
1328     * as the Arglist.
1329     */
1330    if (c_p->freason & EXF_ARGLIST) {
1331	  Eterm* tp;
1332	  ASSERT(is_tuple(Value));
1333	  tp = tuple_val(Value);
1334	  Value = tp[1];
1335	  Args = tp[2];
1336    }
1337
1338    /*
1339     * Save the stack trace info if the EXF_SAVETRACE flag is set. The
1340     * main reason for doing this separately is to allow throws to later
1341     * become promoted to errors without losing the original stack
1342     * trace, even if they have passed through one or more catch and
1343     * rethrow. It also makes the creation of symbolic stack traces much
1344     * more modular.
1345     */
1346    if (c_p->freason & EXF_SAVETRACE) {
1347        save_stacktrace(c_p, pc, reg, bif_mfa, Args);
1348    }
1349
1350    /*
1351     * Throws that are not caught are turned into 'nocatch' errors
1352     */
1353    if ((c_p->freason & EXF_THROWN) && (c_p->catches <= 0) ) {
1354	hp = HAlloc(c_p, 3);
1355        Value = TUPLE2(hp, am_nocatch, Value);
1356        c_p->freason = EXC_ERROR;
1357    }
1358
1359    /* Get the fully expanded error term */
1360    Value = expand_error_value(c_p, c_p->freason, Value);
1361
1362    /* Save final error term and stabilize the exception flags so no
1363       further expansion is done. */
1364    c_p->fvalue = Value;
1365    c_p->freason = PRIMARY_EXCEPTION(c_p->freason);
1366
1367    /* Find a handler or die */
1368    if ((c_p->catches > 0 || IS_TRACED_FL(c_p, F_EXCEPTION_TRACE))
1369	&& !(c_p->freason & EXF_PANIC)) {
1370	BeamInstr *new_pc;
1371        /* The Beam handler code (catch_end or try_end) checks reg[0]
1372	   for THE_NON_VALUE to see if the previous code finished
1373	   abnormally. If so, reg[1], reg[2] and reg[3] should hold the
1374	   exception class, term and trace, respectively. (If the
1375	   handler is just a trap to native code, these registers will
1376	   be ignored.) */
1377	reg[0] = THE_NON_VALUE;
1378	reg[1] = exception_tag[GET_EXC_CLASS(c_p->freason)];
1379	reg[2] = Value;
1380	reg[3] = c_p->ftrace;
1381        if ((new_pc = next_catch(c_p, reg))) {
1382            c_p->stop[0] = NIL;  /* To avoid keeping stale references. */
1383            ERTS_RECV_MARK_CLEAR(c_p); /* No longer safe to use this position */
1384	    return new_pc;
1385	}
1386	if (c_p->catches > 0) erts_exit(ERTS_ERROR_EXIT, "Catch not found");
1387    }
1388    ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
1389    terminate_proc(c_p, Value);
1390    ERTS_REQ_PROC_MAIN_LOCK(c_p);
1391    return NULL;
1392}
1393
1394/*
1395 * Find the nearest catch handler
1396 */
1397static BeamInstr*
1398next_catch(Process* c_p, Eterm *reg) {
1399    int active_catches = c_p->catches > 0;
1400    int have_return_to_trace = 0;
1401    Eterm *ptr, *prev, *return_to_trace_ptr = NULL;
1402
1403    BeamInstr i_return_trace      = beam_return_trace[0];
1404    BeamInstr i_return_to_trace   = beam_return_to_trace[0];
1405    BeamInstr i_return_time_trace = beam_return_time_trace[0];
1406
1407    ptr = prev = c_p->stop;
1408    ASSERT(ptr <= STACK_START(c_p));
1409
1410    /* This function is only called if we have active catch tags or have
1411     * previously called a function that was exception-traced. As the exception
1412     * trace flag isn't cleared after the traced function returns (and the
1413     * catch tag inserted by it is gone), it's possible to land here with an
1414     * empty stack, and the process should simply die when that happens. */
1415    if (ptr == STACK_START(c_p)) {
1416        ASSERT(!active_catches && IS_TRACED_FL(c_p, F_EXCEPTION_TRACE));
1417        return NULL;
1418    }
1419
1420    while (ptr < STACK_START(c_p)) {
1421	if (is_catch(*ptr)) {
1422	    if (active_catches) goto found_catch;
1423	    ptr++;
1424	}
1425	else if (is_CP(*ptr)) {
1426	    prev = ptr;
1427	    if (*cp_val(*prev) == i_return_trace) {
1428		/* Skip stack frame variables */
1429		while (++ptr, ptr < STACK_START(c_p) && is_not_CP(*ptr)) {
1430		    if (is_catch(*ptr) && active_catches) goto found_catch;
1431		}
1432		if (cp_val(*prev) == beam_exception_trace) {
1433                    ErtsCodeMFA *mfa = (ErtsCodeMFA*)cp_val(ptr[0]);
1434		    erts_trace_exception(c_p, mfa,
1435					 reg[1], reg[2],
1436                                         ERTS_TRACER_FROM_ETERM(ptr+1));
1437		}
1438		/* Skip return_trace parameters */
1439		ptr += 2;
1440	    } else if (*cp_val(*prev) == i_return_to_trace) {
1441		/* Skip stack frame variables */
1442		while (++ptr, ptr < STACK_START(c_p) && is_not_CP(*ptr)) {
1443		    if (is_catch(*ptr) && active_catches) goto found_catch;
1444		}
1445		have_return_to_trace = !0; /* Record next cp */
1446		return_to_trace_ptr = NULL;
1447	    } else if (*cp_val(*prev) == i_return_time_trace) {
1448		/* Skip stack frame variables */
1449		while (++ptr, ptr < STACK_START(c_p) && is_not_CP(*ptr)) {
1450		    if (is_catch(*ptr) && active_catches) goto found_catch;
1451		}
1452		/* Skip return_trace parameters */
1453		ptr += 1;
1454	    } else {
1455		if (have_return_to_trace) {
1456		    /* Record this cp as possible return_to trace cp */
1457		    have_return_to_trace = 0;
1458		    return_to_trace_ptr = ptr;
1459		} else return_to_trace_ptr = NULL;
1460		ptr++;
1461	    }
1462	} else ptr++;
1463    }
1464    return NULL;
1465    
1466 found_catch:
1467    ASSERT(ptr < STACK_START(c_p));
1468    c_p->stop = prev;
1469    if (IS_TRACED_FL(c_p, F_TRACE_RETURN_TO) && return_to_trace_ptr) {
1470	/* The stackframe closest to the catch contained an
1471	 * return_to_trace entry, so since the execution now
1472	 * continues after the catch, a return_to trace message 
1473	 * would be appropriate.
1474	 */
1475	erts_trace_return_to(c_p, cp_val(*return_to_trace_ptr));
1476    }
1477    return catch_pc(*ptr);
1478}
1479
1480/*
1481 * Terminating the process when an exception is not caught
1482 */
1483static void
1484terminate_proc(Process* c_p, Eterm Value)
1485{
1486    Eterm *hp;
1487    Eterm Args = NIL;
1488
1489    /* Add a stacktrace if this is an error. */
1490    if (GET_EXC_CLASS(c_p->freason) == EXTAG_ERROR) {
1491        Value = add_stacktrace(c_p, Value, c_p->ftrace);
1492    }
1493    c_p->ftrace = NIL;
1494
1495    /* EXF_LOG is a primary exception flag */
1496    if (c_p->freason & EXF_LOG) {
1497	int alive = erts_is_alive;
1498	erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
1499
1500        /* Build the format message */
1501	erts_dsprintf(dsbufp, "Error in process ~p ");
1502	if (alive)
1503	    erts_dsprintf(dsbufp, "on node ~p ");
1504	erts_dsprintf(dsbufp, "with exit value:~n~p~n");
1505
1506        /* Build the args in reverse order */
1507	hp = HAlloc(c_p, 2);
1508	Args = CONS(hp, Value, Args);
1509	if (alive) {
1510	    hp = HAlloc(c_p, 2);
1511	    Args = CONS(hp, erts_this_node->sysname, Args);
1512	}
1513	hp = HAlloc(c_p, 2);
1514	Args = CONS(hp, c_p->common.id, Args);
1515
1516	erts_send_error_term_to_logger(c_p->group_leader, dsbufp, Args);
1517    }
1518    /*
1519     * If we use a shared heap, the process will be garbage-collected.
1520     * Must zero c_p->arity to indicate that there are no live registers.
1521     */
1522    c_p->arity = 0;
1523    erts_do_exit_process(c_p, Value);
1524}
1525
1526/*
1527 * Build and add a symbolic stack trace to the error value.
1528 */
1529static Eterm
1530add_stacktrace(Process* c_p, Eterm Value, Eterm exc) {
1531    Eterm Where = build_stacktrace(c_p, exc);
1532    Eterm* hp = HAlloc(c_p, 3);
1533    return TUPLE2(hp, Value, Where);
1534}
1535
1536/*
1537 * Forming the correct error value from the internal error code.
1538 * This does not update c_p->fvalue or c_p->freason.
1539 */
1540Eterm
1541expand_error_value(Process* c_p, Uint freason, Eterm Value) {
1542    Eterm* hp;
1543    Uint r;
1544
1545    r = GET_EXC_INDEX(freason);
1546    ASSERT(r < NUMBER_EXIT_CODES); /* range check */
1547    ASSERT(is_value(Value));
1548
1549    switch (r) {
1550    case (GET_EXC_INDEX(EXC_PRIMARY)):
1551        /* Primary exceptions use fvalue as it is */
1552	break;
1553    case (GET_EXC_INDEX(EXC_BADMATCH)):
1554    case (GET_EXC_INDEX(EXC_CASE_CLAUSE)):
1555    case (GET_EXC_INDEX(EXC_TRY_CLAUSE)):
1556    case (GET_EXC_INDEX(EXC_BADFUN)):
1557    case (GET_EXC_INDEX(EXC_BADARITY)):
1558    case (GET_EXC_INDEX(EXC_BADMAP)):
1559    case (GET_EXC_INDEX(EXC_BADKEY)):
1560        /* Some common exceptions: value -> {atom, value} */
1561        ASSERT(is_value(Value));
1562	hp = HAlloc(c_p, 3);
1563	Value = TUPLE2(hp, error_atom[r], Value);
1564	break;
1565    default:
1566        /* Other exceptions just use an atom as descriptor */
1567        Value = error_atom[r];
1568	break;
1569    }
1570#ifdef DEBUG
1571    ASSERT(Value != am_internal_error);
1572#endif
1573    return Value;
1574}
1575
1576
1577static void
1578gather_stacktrace(Process* p, struct StackTrace* s, int depth)
1579{
1580    BeamInstr *prev;
1581    Eterm *ptr;
1582
1583    if (depth == 0) {
1584        return;
1585    }
1586
1587    prev = s->depth ? s->trace[s->depth - 1] : s->pc;
1588    ptr = p->stop;
1589
1590    /*
1591     * Traverse the stack backwards and add all unique continuation
1592     * pointers to the buffer, up to the maximum stack trace size.
1593     *
1594     * Skip trace stack frames.
1595     */
1596
1597    ASSERT(ptr >= STACK_TOP(p) && ptr <= STACK_START(p));
1598
1599    while (ptr < STACK_START(p) && depth > 0) {
1600        if (is_CP(*ptr)) {
1601            BeamInstr *cp = cp_val(*ptr);
1602
1603            if (cp == beam_exception_trace || cp == beam_return_trace) {
1604                ptr += 3;
1605            } else if (cp == beam_return_time_trace) {
1606                ptr += 2;
1607            } else if (cp == beam_return_to_trace) {
1608                ptr += 1;
1609            } else {
1610                if (cp != prev) {
1611                    /* Record non-duplicates only */
1612                    prev = cp;
1613                    s->trace[s->depth++] = cp - 1;
1614                    depth--;
1615                }
1616                ptr++;
1617            }
1618        } else {
1619            ptr++;
1620        }
1621    }
1622}
1623
1624/*
1625 * Quick-saving the stack trace in an internal form on the heap. Note
1626 * that c_p->ftrace will point to a cons cell which holds the given args
1627 * and the saved data (encoded as a bignum).
1628 *
1629 * There is an issue with line number information. Line number
1630 * information is associated with the address *before* an operation
1631 * that may fail or be stored stored on the stack. But continuation
1632 * pointers point after its call instruction, not before. To avoid
1633 * finding the wrong line number, we'll need to adjust them so that
1634 * they point at the beginning of the call instruction or inside the
1635 * call instruction. Since its impractical to point at the beginning,
1636 * we'll do the simplest thing and decrement the continuation pointers
1637 * by one.
1638 *
1639 * Here is an example of what can go wrong. Without the adjustment
1640 * of continuation pointers, the call at line 42 below would seem to
1641 * be at line 43:
1642 *
1643 * line 42
1644 * call ...
1645 * line 43
1646 * gc_bif ...
1647 *
1648 * (It would be much better to put the arglist - when it exists - in the
1649 * error value instead of in the actual trace; e.g. '{badarg, Args}'
1650 * instead of using 'badarg' with Args in the trace. The arglist may
1651 * contain very large values, and right now they will be kept alive as
1652 * long as the stack trace is live. Preferably, the stack trace should
1653 * always be small, so that it does not matter if it is long-lived.
1654 * However, it is probably not possible to ever change the format of
1655 * error terms.)
1656 */
1657
1658static void
1659save_stacktrace(Process* c_p, BeamInstr*

Large files files are truncated, but you can click here to view the full file