/erts/emulator/beam/erl_process.c
C | 9869 lines | 7843 code | 1360 blank | 666 comment | 1576 complexity | 1f9bd5fd36712c5cabe6c033675490be MD5 | raw file
Possible License(s): BSD-2-Clause
Large files files are truncated, but you can click here to view the full file
- /*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 1996-2012. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
- #define ERL_PROCESS_C__
- #ifdef HAVE_CONFIG_H
- # include "config.h"
- #endif
- #include <stddef.h> /* offsetof() */
- #include "sys.h"
- #include "erl_vm.h"
- #include "global.h"
- #include "erl_process.h"
- #include "erl_nmgc.h"
- #include "error.h"
- #include "bif.h"
- #include "erl_db.h"
- #include "dist.h"
- #include "beam_catches.h"
- #include "erl_instrument.h"
- #include "erl_threads.h"
- #include "erl_binary.h"
- #include "beam_bp.h"
- #include "erl_cpu_topology.h"
- #include "erl_thr_progress.h"
- #include "erl_thr_queue.h"
- #include "erl_async.h"
- #define ERTS_RUNQ_CHECK_BALANCE_REDS_PER_SCHED (2000*CONTEXT_REDS)
- #define ERTS_RUNQ_CALL_CHECK_BALANCE_REDS \
- (ERTS_RUNQ_CHECK_BALANCE_REDS_PER_SCHED/2)
- #define ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST (CONTEXT_REDS/10)
- #define ERTS_SCHED_SPIN_UNTIL_YIELD 100
- #define ERTS_SCHED_SYS_SLEEP_SPINCOUNT 10
- #define ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT 1000
- #define ERTS_SCHED_TSE_SLEEP_SPINCOUNT \
- (ERTS_SCHED_SYS_SLEEP_SPINCOUNT*ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT)
- #define ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT 0
- #define ERTS_WAKEUP_OTHER_LIMIT_VERY_HIGH (200*CONTEXT_REDS)
- #define ERTS_WAKEUP_OTHER_LIMIT_HIGH (50*CONTEXT_REDS)
- #define ERTS_WAKEUP_OTHER_LIMIT_MEDIUM (10*CONTEXT_REDS)
- #define ERTS_WAKEUP_OTHER_LIMIT_LOW (CONTEXT_REDS)
- #define ERTS_WAKEUP_OTHER_LIMIT_VERY_LOW (CONTEXT_REDS/10)
- #define ERTS_WAKEUP_OTHER_DEC 10
- #define ERTS_WAKEUP_OTHER_FIXED_INC (CONTEXT_REDS/10)
- #if 0 || defined(DEBUG)
- #define ERTS_FAKE_SCHED_BIND_PRINT_SORTED_CPU_DATA
- #endif
- #if defined(DEBUG) && 0
- #define HARDDEBUG
- #else
- #undef HARDDEBUG
- #endif
- #ifdef HARDDEBUG
- #define HARDDEBUG_RUNQS
- #endif
- #ifdef HIPE
- #include "hipe_mode_switch.h" /* for hipe_init_process() */
- #include "hipe_signal.h" /* for hipe_thread_signal_init() */
- #endif
- #ifdef ERTS_ENABLE_LOCK_COUNT
- #include "erl_lock_count.h"
- #endif
- #define MAX_BIT (1 << PRIORITY_MAX)
- #define HIGH_BIT (1 << PRIORITY_HIGH)
- #define NORMAL_BIT (1 << PRIORITY_NORMAL)
- #define LOW_BIT (1 << PRIORITY_LOW)
- #define ERTS_MAYBE_SAVE_TERMINATING_PROCESS(P) \
- do { \
- ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&proc_tab_mtx)); \
- if (saved_term_procs.end) \
- save_terminating_process((P)); \
- } while (0)
- #define ERTS_EMPTY_RUNQ(RQ) \
- ((RQ)->len == 0 && (RQ)->misc.start == NULL)
- extern BeamInstr beam_apply[];
- extern BeamInstr beam_exit[];
- extern BeamInstr beam_continue_exit[];
- static Sint p_last;
- static Sint p_next;
- static Sint p_serial;
- static Uint p_serial_mask;
- static Uint p_serial_shift;
- int erts_sched_compact_load;
- Uint erts_no_schedulers;
- Uint erts_max_processes = ERTS_DEFAULT_MAX_PROCESSES;
- Uint erts_process_tab_index_mask;
- static int wakeup_other_limit;
- int erts_sched_thread_suggested_stack_size = -1;
- #ifdef ERTS_ENABLE_LOCK_CHECK
- ErtsLcPSDLocks erts_psd_required_locks[ERTS_PSD_SIZE];
- #endif
- #ifdef ERTS_SMP
- int erts_disable_proc_not_running_opt;
- static ErtsAuxWorkData *aux_thread_aux_work_data;
- #define ERTS_SCHDLR_SSPND_CHNG_WAITER (((erts_aint32_t) 1) << 0)
- #define ERTS_SCHDLR_SSPND_CHNG_MSB (((erts_aint32_t) 1) << 1)
- #define ERTS_SCHDLR_SSPND_CHNG_ONLN (((erts_aint32_t) 1) << 2)
- #ifndef DEBUG
- #define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \
- erts_smp_atomic32_set_nob(&schdlr_sspnd.changing, (VAL))
- #else
- #define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \
- do { \
- erts_aint32_t old_val__; \
- old_val__ = erts_smp_atomic32_xchg_nob(&schdlr_sspnd.changing, \
- (VAL)); \
- ASSERT(old_val__ == (OLD_VAL)); \
- } while (0)
- #endif
- static struct {
- erts_smp_mtx_t mtx;
- erts_smp_cnd_t cnd;
- int online;
- int curr_online;
- int wait_curr_online;
- erts_smp_atomic32_t changing;
- erts_smp_atomic32_t active;
- struct {
- int ongoing;
- long wait_active;
- ErtsProcList *procs;
- } msb; /* Multi Scheduling Block */
- } schdlr_sspnd;
- static struct {
- erts_smp_mtx_t update_mtx;
- erts_smp_atomic32_t no_runqs;
- int last_active_runqs;
- int forced_check_balance;
- erts_smp_atomic32_t checking_balance;
- int halftime;
- int full_reds_history_index;
- struct {
- int active_runqs;
- int reds;
- int max_len;
- } prev_rise;
- Uint n;
- } balance_info;
- #define ERTS_BLNCE_SAVE_RISE(ACTIVE, MAX_LEN, REDS) \
- do { \
- balance_info.prev_rise.active_runqs = (ACTIVE); \
- balance_info.prev_rise.max_len = (MAX_LEN); \
- balance_info.prev_rise.reds = (REDS); \
- } while (0)
- #endif
- erts_sched_stat_t erts_sched_stat;
- #ifdef USE_THREADS
- static erts_tsd_key_t sched_data_key;
- #endif
- static erts_smp_mtx_t proc_tab_mtx;
- static erts_smp_atomic32_t function_calls;
- #ifdef ERTS_SMP
- static erts_smp_atomic32_t doing_sys_schedule;
- static erts_smp_atomic32_t no_empty_run_queues;
- #else /* !ERTS_SMP */
- ErtsSchedulerData *erts_scheduler_data;
- #endif
- ErtsAlignedRunQueue *erts_aligned_run_queues;
- Uint erts_no_run_queues;
- ErtsAlignedSchedulerData *erts_aligned_scheduler_data;
- typedef union {
- ErtsSchedulerSleepInfo ssi;
- char align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsSchedulerSleepInfo))];
- } ErtsAlignedSchedulerSleepInfo;
- static ErtsAlignedSchedulerSleepInfo *aligned_sched_sleep_info;
- Process** process_tab;
- static Uint last_reductions;
- static Uint last_exact_reductions;
- Uint erts_default_process_flags;
- Eterm erts_system_monitor;
- Eterm erts_system_monitor_long_gc;
- Eterm erts_system_monitor_large_heap;
- struct erts_system_monitor_flags_t erts_system_monitor_flags;
- /* system performance monitor */
- Eterm erts_system_profile;
- struct erts_system_profile_flags_t erts_system_profile_flags;
- #ifdef HYBRID
- Uint erts_num_active_procs;
- Process** erts_active_procs;
- #endif
- #if ERTS_MAX_PROCESSES > 0x7fffffff
- #error "Need to store process_count in another type"
- #endif
- static erts_smp_atomic32_t process_count;
- typedef struct ErtsTermProcElement_ ErtsTermProcElement;
- struct ErtsTermProcElement_ {
- ErtsTermProcElement *next;
- ErtsTermProcElement *prev;
- int ix;
- union {
- struct {
- Eterm pid;
- SysTimeval spawned;
- SysTimeval exited;
- } process;
- struct {
- SysTimeval time;
- } bif_invocation;
- } u;
- };
- static struct {
- ErtsTermProcElement *start;
- ErtsTermProcElement *end;
- } saved_term_procs;
- ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(misc_op_list,
- ErtsMiscOpList,
- 10,
- ERTS_ALC_T_MISC_OP_LIST)
- ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(proclist,
- ErtsProcList,
- 200,
- ERTS_ALC_T_PROC_LIST)
- #define ERTS_SCHED_SLEEP_INFO_IX(IX) \
- (ASSERT_EXPR(-1 <= ((int) (IX)) \
- && ((int) (IX)) < ((int) erts_no_schedulers)), \
- &aligned_sched_sleep_info[(IX)].ssi)
- #define ERTS_FOREACH_RUNQ(RQVAR, DO) \
- do { \
- ErtsRunQueue *RQVAR; \
- int ix__; \
- for (ix__ = 0; ix__ < erts_no_run_queues; ix__++) { \
- RQVAR = ERTS_RUNQ_IX(ix__); \
- erts_smp_runq_lock(RQVAR); \
- { DO; } \
- erts_smp_runq_unlock(RQVAR); \
- } \
- } while (0)
- #define ERTS_FOREACH_OP_RUNQ(RQVAR, DO) \
- do { \
- ErtsRunQueue *RQVAR; \
- int ix__; \
- ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&schdlr_sspnd.mtx)); \
- for (ix__ = 0; ix__ < schdlr_sspnd.online; ix__++) { \
- RQVAR = ERTS_RUNQ_IX(ix__); \
- erts_smp_runq_lock(RQVAR); \
- { DO; } \
- erts_smp_runq_unlock(RQVAR); \
- } \
- } while (0)
- #define ERTS_ATOMIC_FOREACH_RUNQ_X(RQVAR, DO, DOX) \
- do { \
- ErtsRunQueue *RQVAR; \
- int ix__; \
- for (ix__ = 0; ix__ < erts_no_run_queues; ix__++) { \
- RQVAR = ERTS_RUNQ_IX(ix__); \
- erts_smp_runq_lock(RQVAR); \
- { DO; } \
- } \
- { DOX; } \
- for (ix__ = 0; ix__ < erts_no_run_queues; ix__++) \
- erts_smp_runq_unlock(ERTS_RUNQ_IX(ix__)); \
- } while (0)
- #define ERTS_ATOMIC_FOREACH_RUNQ(RQVAR, DO) \
- ERTS_ATOMIC_FOREACH_RUNQ_X(RQVAR, DO, )
- /*
- * Local functions.
- */
- static void init_processes_bif(void);
- static void save_terminating_process(Process *p);
- static void exec_misc_ops(ErtsRunQueue *);
- static void print_function_from_pc(int to, void *to_arg, BeamInstr* x);
- static int stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp,
- int yreg);
- static void aux_work_timeout(void *unused);
- static void aux_work_timeout_early_init(int no_schedulers);
- static void aux_work_timeout_late_init(void);
- static void setup_aux_work_timer(void);
- #if defined(DEBUG) || 0
- #define ERTS_DBG_CHK_AUX_WORK_VAL(V) dbg_chk_aux_work_val((V))
- static void
- dbg_chk_aux_work_val(erts_aint32_t value)
- {
- erts_aint32_t valid = 0;
- valid |= ERTS_SSI_AUX_WORK_SET_TMO;
- valid |= ERTS_SSI_AUX_WORK_MISC;
- valid |= ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM;
- valid |= ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC;
- #if ERTS_USE_ASYNC_READY_Q
- valid |= ERTS_SSI_AUX_WORK_ASYNC_READY;
- valid |= ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN;
- #endif
- #ifdef ERTS_SMP
- valid |= ERTS_SSI_AUX_WORK_MISC_THR_PRGR;
- valid |= ERTS_SSI_AUX_WORK_DD;
- valid |= ERTS_SSI_AUX_WORK_DD_THR_PRGR;
- #endif
- #if HAVE_ERTS_MSEG
- valid |= ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK;
- #endif
- #ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- valid |= ERTS_SSI_AUX_WORK_CHECK_CHILDREN;
- #endif
- if (~valid & value)
- erl_exit(ERTS_ABORT_EXIT,
- "Invalid aux_work value found: 0x%x\n",
- ~valid & value);
- }
- #define ERTS_DBG_CHK_SSI_AUX_WORK(SSI) \
- ERTS_DBG_CHK_AUX_WORK_VAL(erts_atomic32_read_nob(&(SSI)->aux_work))
- #else
- #define ERTS_DBG_CHK_AUX_WORK_VAL(V)
- #define ERTS_DBG_CHK_SSI_AUX_WORK(SSI)
- #endif
- #ifdef ERTS_SMP
- static void handle_pending_exiters(ErtsProcList *);
- #endif
- #if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
- int
- erts_smp_lc_runq_is_locked(ErtsRunQueue *runq)
- {
- return erts_smp_lc_mtx_is_locked(&runq->mtx);
- }
- #endif
- void
- erts_pre_init_process(void)
- {
- #ifdef USE_THREADS
- erts_tsd_key_create(&sched_data_key);
- #endif
- #ifdef ERTS_ENABLE_LOCK_CHECK
- {
- int ix;
- erts_psd_required_locks[ERTS_PSD_ERROR_HANDLER].get_locks
- = ERTS_PSD_ERROR_HANDLER_BUF_GET_LOCKS;
- erts_psd_required_locks[ERTS_PSD_ERROR_HANDLER].set_locks
- = ERTS_PSD_ERROR_HANDLER_BUF_SET_LOCKS;
- erts_psd_required_locks[ERTS_PSD_SAVED_CALLS_BUF].get_locks
- = ERTS_PSD_SAVED_CALLS_BUF_GET_LOCKS;
- erts_psd_required_locks[ERTS_PSD_SAVED_CALLS_BUF].set_locks
- = ERTS_PSD_SAVED_CALLS_BUF_SET_LOCKS;
- erts_psd_required_locks[ERTS_PSD_SCHED_ID].get_locks
- = ERTS_PSD_SCHED_ID_GET_LOCKS;
- erts_psd_required_locks[ERTS_PSD_SCHED_ID].set_locks
- = ERTS_PSD_SCHED_ID_SET_LOCKS;
- erts_psd_required_locks[ERTS_PSD_DIST_ENTRY].get_locks
- = ERTS_PSD_DIST_ENTRY_GET_LOCKS;
- erts_psd_required_locks[ERTS_PSD_DIST_ENTRY].set_locks
- = ERTS_PSD_DIST_ENTRY_SET_LOCKS;
- erts_psd_required_locks[ERTS_PSD_CALL_TIME_BP].get_locks
- = ERTS_PSD_CALL_TIME_BP_GET_LOCKS;
- erts_psd_required_locks[ERTS_PSD_CALL_TIME_BP].set_locks
- = ERTS_PSD_CALL_TIME_BP_SET_LOCKS;
- /* Check that we have locks for all entries */
- for (ix = 0; ix < ERTS_PSD_SIZE; ix++) {
- ERTS_SMP_LC_ASSERT(erts_psd_required_locks[ix].get_locks);
- ERTS_SMP_LC_ASSERT(erts_psd_required_locks[ix].set_locks);
- }
- }
- #endif
- }
- /* initialize the scheduler */
- void
- erts_init_process(int ncpu)
- {
- Uint proc_bits = ERTS_PROC_BITS;
- #ifdef ERTS_SMP
- erts_disable_proc_not_running_opt = 0;
- erts_init_proc_lock(ncpu);
- #endif
- init_proclist_alloc();
- erts_smp_atomic32_init_nob(&process_count, 0);
- if (erts_use_r9_pids_ports) {
- proc_bits = ERTS_R9_PROC_BITS;
- ASSERT(erts_max_processes <= (1 << ERTS_R9_PROC_BITS));
- }
- process_tab = (Process**) erts_alloc(ERTS_ALC_T_PROC_TABLE,
- erts_max_processes*sizeof(Process*));
- sys_memzero(process_tab, erts_max_processes * sizeof(Process*));
- #ifdef HYBRID
- erts_active_procs = (Process**)
- erts_alloc(ERTS_ALC_T_ACTIVE_PROCS,
- erts_max_processes * sizeof(Process*));
- erts_num_active_procs = 0;
- #endif
- erts_smp_mtx_init(&proc_tab_mtx, "proc_tab");
- p_last = -1;
- p_next = 0;
- p_serial = 0;
- p_serial_shift = erts_fit_in_bits(erts_max_processes - 1);
- p_serial_mask = ((~(~((Uint) 0) << proc_bits)) >> p_serial_shift);
- erts_process_tab_index_mask = ~(~((Uint) 0) << p_serial_shift);
- last_reductions = 0;
- last_exact_reductions = 0;
- erts_default_process_flags = 0;
- }
- void
- erts_late_init_process(void)
- {
- int ix;
- init_processes_bif();
- erts_smp_spinlock_init(&erts_sched_stat.lock, "sched_stat");
- for (ix = 0; ix < ERTS_NO_PRIO_LEVELS; ix++) {
- Eterm atom;
- char *atom_str;
- switch (ix) {
- case PRIORITY_MAX:
- atom_str = "process_max";
- break;
- case PRIORITY_HIGH:
- atom_str = "process_high";
- break;
- case PRIORITY_NORMAL:
- atom_str = "process_normal";
- break;
- case PRIORITY_LOW:
- atom_str = "process_low";
- break;
- case ERTS_PORT_PRIO_LEVEL:
- atom_str = "port";
- break;
- default:
- atom_str = "bad_prio";
- ASSERT(!"bad prio");
- break;
- }
- atom = am_atom_put(atom_str, sys_strlen(atom_str));
- erts_sched_stat.prio[ix].name = atom;
- erts_sched_stat.prio[ix].total_executed = 0;
- erts_sched_stat.prio[ix].executed = 0;
- erts_sched_stat.prio[ix].total_migrated = 0;
- erts_sched_stat.prio[ix].migrated = 0;
- }
- }
- static void
- init_sched_wall_time(ErtsSchedWallTime *swtp)
- {
- swtp->enabled = 0;
- swtp->start = 0;
- swtp->working.total = 0;
- swtp->working.start = 0;
- swtp->working.currently = 0;
- }
- static ERTS_INLINE Uint64
- sched_wall_time_ts(void)
- {
- #ifdef HAVE_GETHRTIME
- return (Uint64) sys_gethrtime();
- #else
- Uint64 res;
- SysTimeval tv;
- sys_gettimeofday(&tv);
- res = (Uint64) tv.tv_sec*1000000;
- res += (Uint64) tv.tv_usec;
- return res;
- #endif
- }
- static ERTS_INLINE void
- sched_wall_time_change(ErtsSchedulerData *esdp, int working)
- {
- if (esdp->sched_wall_time.enabled) {
- Uint64 ts = sched_wall_time_ts();
- if (working) {
- #ifdef DEBUG
- ASSERT(!esdp->sched_wall_time.working.currently);
- esdp->sched_wall_time.working.currently = 1;
- #endif
- ts -= esdp->sched_wall_time.start;
- esdp->sched_wall_time.working.start = ts;
- }
- else {
- #ifdef DEBUG
- ASSERT(esdp->sched_wall_time.working.currently);
- esdp->sched_wall_time.working.currently = 0;
- #endif
- ts -= esdp->sched_wall_time.start;
- ts -= esdp->sched_wall_time.working.start;
- esdp->sched_wall_time.working.total += ts;
- }
- }
- }
- typedef struct {
- int set;
- int enable;
- Process *proc;
- Eterm ref;
- Eterm ref_heap[REF_THING_SIZE];
- Uint req_sched;
- erts_smp_atomic32_t refc;
- } ErtsSchedWallTimeReq;
- #if !HALFWORD_HEAP
- ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(swtreq,
- ErtsSchedWallTimeReq,
- 5,
- ERTS_ALC_T_SCHED_WTIME_REQ)
- #else
- static ERTS_INLINE ErtsSchedWallTimeReq *
- swtreq_alloc(void)
- {
- return erts_alloc(ERTS_ALC_T_SCHED_WTIME_REQ,
- sizeof(ErtsSchedWallTimeReq));
- }
- static ERTS_INLINE void
- swtreq_free(ErtsSchedWallTimeReq *ptr)
- {
- erts_free(ERTS_ALC_T_SCHED_WTIME_REQ, ptr);
- }
- #endif
- static void
- reply_sched_wall_time(void *vswtrp)
- {
- Uint64 working = 0, total = 0;
- ErtsSchedulerData *esdp = erts_get_scheduler_data();
- ErtsSchedWallTimeReq *swtrp = (ErtsSchedWallTimeReq *) vswtrp;
- ErtsProcLocks rp_locks = (swtrp->req_sched == esdp->no
- ? ERTS_PROC_LOCK_MAIN
- : 0);
- Process *rp = swtrp->proc;
- Eterm ref_copy = NIL, msg;
- Eterm *hp = NULL;
- Eterm **hpp;
- Uint sz, *szp;
- ErlOffHeap *ohp = NULL;
- ErlHeapFragment *bp = NULL;
- ASSERT(esdp);
-
- if (swtrp->set) {
- if (!swtrp->enable && esdp->sched_wall_time.enabled)
- esdp->sched_wall_time.enabled = 0;
- else if (swtrp->enable && !esdp->sched_wall_time.enabled) {
- Uint64 ts = sched_wall_time_ts();
- esdp->sched_wall_time.enabled = 1;
- esdp->sched_wall_time.start = ts;
- esdp->sched_wall_time.working.total = 0;
- esdp->sched_wall_time.working.start = 0;
- esdp->sched_wall_time.working.currently = 1;
- }
- }
- if (esdp->sched_wall_time.enabled) {
- Uint64 ts = sched_wall_time_ts();
- ASSERT(esdp->sched_wall_time.working.currently);
- ts -= esdp->sched_wall_time.start;
- total = ts;
- ts -= esdp->sched_wall_time.working.start;
- working = esdp->sched_wall_time.working.total + ts;
- }
- sz = 0;
- hpp = NULL;
- szp = &sz;
- while (1) {
- if (hpp)
- ref_copy = STORE_NC(hpp, ohp, swtrp->ref);
- else
- *szp += REF_THING_SIZE;
- if (swtrp->set)
- msg = ref_copy;
- else {
- msg = (!esdp->sched_wall_time.enabled
- ? am_notsup
- : erts_bld_tuple(hpp, szp, 3,
- make_small(esdp->no),
- erts_bld_uint64(hpp, szp, working),
- erts_bld_uint64(hpp, szp, total)));
- msg = erts_bld_tuple(hpp, szp, 2, ref_copy, msg);
- }
- if (hpp)
- break;
- hp = erts_alloc_message_heap(sz, &bp, &ohp, rp, &rp_locks);
- szp = NULL;
- hpp = &hp;
- }
- erts_queue_message(rp, &rp_locks, bp, msg, NIL);
- if (swtrp->req_sched == esdp->no)
- rp_locks &= ~ERTS_PROC_LOCK_MAIN;
-
- if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
- erts_smp_proc_dec_refc(rp);
- if (erts_smp_atomic32_dec_read_nob(&swtrp->refc) == 0)
- swtreq_free(vswtrp);
- }
- Eterm
- erts_sched_wall_time_request(Process *c_p, int set, int enable)
- {
- ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(c_p);
- Eterm ref;
- ErtsSchedWallTimeReq *swtrp;
- Eterm *hp;
- if (!set && !esdp->sched_wall_time.enabled)
- return THE_NON_VALUE;
- swtrp = swtreq_alloc();
- ref = erts_make_ref(c_p);
- hp = &swtrp->ref_heap[0];
- swtrp->set = set;
- swtrp->enable = enable;
- swtrp->proc = c_p;
- swtrp->ref = STORE_NC(&hp, NULL, ref);
- swtrp->req_sched = esdp->no;
- erts_smp_atomic32_init_nob(&swtrp->refc,
- (erts_aint32_t) erts_no_schedulers);
- erts_smp_proc_add_refc(c_p, (Sint32) erts_no_schedulers);
- #ifdef ERTS_SMP
- if (erts_no_schedulers > 1)
- erts_schedule_multi_misc_aux_work(1,
- erts_no_schedulers,
- reply_sched_wall_time,
- (void *) swtrp);
- #endif
- reply_sched_wall_time((void *) swtrp);
- return ref;
- }
- static ERTS_INLINE ErtsProcList *
- proclist_create(Process *p)
- {
- ErtsProcList *plp = proclist_alloc();
- plp->pid = p->id;
- plp->started = p->started;
- return plp;
- }
- static ERTS_INLINE void
- proclist_destroy(ErtsProcList *plp)
- {
- proclist_free(plp);
- }
- static ERTS_INLINE int
- proclist_same(ErtsProcList *plp, Process *p)
- {
- return (plp->pid == p->id
- && erts_cmp_timeval(&plp->started, &p->started) == 0);
- }
- ErtsProcList *
- erts_proclist_create(Process *p)
- {
- return proclist_create(p);
- }
- void
- erts_proclist_destroy(ErtsProcList *plp)
- {
- proclist_destroy(plp);
- }
- int
- erts_proclist_same(ErtsProcList *plp, Process *p)
- {
- return proclist_same(plp, p);
- }
- void *
- erts_psd_set_init(Process *p, ErtsProcLocks plocks, int ix, void *data)
- {
- void *old;
- ErtsProcLocks xplocks;
- int refc = 0;
- ErtsPSD *psd = erts_alloc(ERTS_ALC_T_PSD, sizeof(ErtsPSD));
- int i;
- for (i = 0; i < ERTS_PSD_SIZE; i++)
- psd->data[i] = NULL;
- ERTS_SMP_LC_ASSERT(plocks);
- ERTS_SMP_LC_ASSERT(plocks == erts_proc_lc_my_proc_locks(p));
- xplocks = ERTS_PROC_LOCKS_ALL;
- xplocks &= ~plocks;
- if (xplocks && erts_smp_proc_trylock(p, xplocks) == EBUSY) {
- if (xplocks & ERTS_PROC_LOCK_MAIN) {
- erts_smp_proc_inc_refc(p);
- erts_smp_proc_unlock(p, plocks);
- erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL);
- refc = 1;
- }
- else {
- if (plocks & ERTS_PROC_LOCKS_ALL_MINOR)
- erts_smp_proc_unlock(p, plocks & ERTS_PROC_LOCKS_ALL_MINOR);
- erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
- }
- }
- if (!p->psd)
- p->psd = psd;
- if (xplocks)
- erts_smp_proc_unlock(p, xplocks);
- if (refc)
- erts_smp_proc_dec_refc(p);
- ASSERT(p->psd);
- if (p->psd != psd)
- erts_free(ERTS_ALC_T_PSD, psd);
- old = p->psd->data[ix];
- p->psd->data[ix] = data;
- ERTS_SMP_LC_ASSERT(plocks == erts_proc_lc_my_proc_locks(p));
- return old;
- }
- #ifdef ERTS_SMP
- void
- erts_sched_finish_poke(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flags)
- {
- switch (flags & ERTS_SSI_FLGS_SLEEP_TYPE) {
- case ERTS_SSI_FLG_POLL_SLEEPING:
- erts_sys_schedule_interrupt(1);
- break;
- case ERTS_SSI_FLG_POLL_SLEEPING|ERTS_SSI_FLG_TSE_SLEEPING:
- /*
- * Thread progress blocking while poll sleeping; need
- * to signal on both...
- */
- erts_sys_schedule_interrupt(1);
- /* fall through */
- case ERTS_SSI_FLG_TSE_SLEEPING:
- erts_tse_set(ssi->event);
- break;
- case 0:
- break;
- default:
- erl_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error\n",
- __FILE__, __LINE__);
- break;
- }
- }
- #endif
- static ERTS_INLINE void
- set_aux_work_flags_wakeup_nob(ErtsSchedulerSleepInfo *ssi,
- erts_aint32_t flgs)
- {
- erts_aint32_t old_flgs;
- ERTS_DBG_CHK_SSI_AUX_WORK(ssi);
- old_flgs = erts_atomic32_read_nob(&ssi->aux_work);
- if ((old_flgs & flgs) == 0) {
- old_flgs = erts_atomic32_read_bor_nob(&ssi->aux_work, flgs);
- if ((old_flgs & flgs) == 0) {
- #ifdef ERTS_SMP
- erts_sched_poke(ssi);
- #else
- erts_sys_schedule_interrupt(1);
- #endif
- }
- }
- }
- #if 0 /* Currently not used */
- static ERTS_INLINE void
- set_aux_work_flags_wakeup_relb(ErtsSchedulerSleepInfo *ssi,
- erts_aint32_t flgs)
- {
- erts_aint32_t old_flgs;
- ERTS_DBG_CHK_SSI_AUX_WORK(ssi);
- old_flgs = erts_atomic32_read_bor_relb(&ssi->aux_work, flgs);
- if ((old_flgs & flgs) == 0) {
- #ifdef ERTS_SMP
- erts_sched_poke(ssi);
- #else
- erts_sys_schedule_interrupt(1);
- #endif
- }
- }
- #endif
- static ERTS_INLINE erts_aint32_t
- set_aux_work_flags(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flgs)
- {
- return erts_atomic32_read_bor_nob(&ssi->aux_work, flgs);
- }
- static ERTS_INLINE erts_aint32_t
- unset_aux_work_flags(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flgs)
- {
- return erts_atomic32_read_band_nob(&ssi->aux_work, ~flgs);
- }
- #ifdef ERTS_SMP
- static ERTS_INLINE void
- thr_prgr_current_reset(ErtsAuxWorkData *awdp)
- {
- awdp->current_thr_prgr = ERTS_THR_PRGR_INVALID;
- }
- static ERTS_INLINE ErtsThrPrgrVal
- thr_prgr_current(ErtsAuxWorkData *awdp)
- {
- ErtsThrPrgrVal current = awdp->current_thr_prgr;
- if (current == ERTS_THR_PRGR_INVALID) {
- current = erts_thr_progress_current();
- awdp->current_thr_prgr = current;
- }
- return current;
- }
- #endif
- typedef struct erts_misc_aux_work_t_ erts_misc_aux_work_t;
- struct erts_misc_aux_work_t_ {
- void (*func)(void *);
- void *arg;
- };
- ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(misc_aux_work,
- erts_misc_aux_work_t,
- 200,
- ERTS_ALC_T_MISC_AUX_WORK)
- typedef union {
- ErtsThrQ_t q;
- char align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsThrQ_t))];
- } erts_algnd_misc_aux_work_q_t;
- static erts_algnd_misc_aux_work_q_t *misc_aux_work_queues;
- static void
- notify_aux_work(void *vssi)
- {
- set_aux_work_flags_wakeup_nob((ErtsSchedulerSleepInfo *) vssi,
- ERTS_SSI_AUX_WORK_MISC);
- }
- static void
- init_misc_aux_work(void)
- {
- int ix;
- ErtsThrQInit_t qinit = ERTS_THR_Q_INIT_DEFAULT;
- qinit.notify = notify_aux_work;
- init_misc_aux_work_alloc();
- misc_aux_work_queues =
- erts_alloc_permanent_cache_aligned(ERTS_ALC_T_MISC_AUX_WORK_Q,
- sizeof(erts_algnd_misc_aux_work_q_t)
- * (erts_no_schedulers+1));
- #ifdef ERTS_SMP
- ix = 0; /* aux_thread + schedulers */
- #else
- ix = 1; /* scheduler only */
- #endif
- for (; ix <= erts_no_schedulers; ix++) {
- qinit.arg = (void *) ERTS_SCHED_SLEEP_INFO_IX(ix-1);
- erts_thr_q_initialize(&misc_aux_work_queues[ix].q, &qinit);
- }
- }
- static erts_aint32_t
- misc_aux_work_clean(ErtsThrQ_t *q,
- ErtsAuxWorkData *awdp,
- erts_aint32_t aux_work)
- {
- switch (erts_thr_q_clean(q)) {
- case ERTS_THR_Q_DIRTY:
- set_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_MISC);
- return aux_work | ERTS_SSI_AUX_WORK_MISC;
- case ERTS_THR_Q_NEED_THR_PRGR:
- #ifdef ERTS_SMP
- set_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_MISC_THR_PRGR);
- erts_thr_progress_wakeup(awdp->esdp,
- erts_thr_q_need_thr_progress(q));
- #endif
- case ERTS_THR_Q_CLEAN:
- break;
- }
- return aux_work;
- }
- static ERTS_INLINE erts_aint32_t
- handle_misc_aux_work(ErtsAuxWorkData *awdp,
- erts_aint32_t aux_work)
- {
- ErtsThrQ_t *q = &misc_aux_work_queues[awdp->sched_id].q;
- unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_MISC);
- while (1) {
- erts_misc_aux_work_t *mawp = erts_thr_q_dequeue(q);
- if (!mawp)
- break;
- mawp->func(mawp->arg);
- misc_aux_work_free(mawp);
- }
- return misc_aux_work_clean(q, awdp, aux_work & ~ERTS_SSI_AUX_WORK_MISC);
- }
- #ifdef ERTS_SMP
- static ERTS_INLINE erts_aint32_t
- handle_misc_aux_work_thr_prgr(ErtsAuxWorkData *awdp,
- erts_aint32_t aux_work)
- {
- if (!erts_thr_progress_has_reached_this(thr_prgr_current(awdp),
- awdp->misc.thr_prgr))
- return aux_work & ~ERTS_SSI_AUX_WORK_MISC_THR_PRGR;
- unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_MISC_THR_PRGR);
- return misc_aux_work_clean(&misc_aux_work_queues[awdp->sched_id].q,
- awdp,
- aux_work & ~ERTS_SSI_AUX_WORK_MISC_THR_PRGR);
- }
- #endif
- static ERTS_INLINE void
- schedule_misc_aux_work(int sched_id,
- void (*func)(void *),
- void *arg)
- {
- ErtsThrQ_t *q;
- erts_misc_aux_work_t *mawp;
- #ifdef ERTS_SMP
- ASSERT(0 <= sched_id && sched_id <= erts_no_schedulers);
- #else
- ASSERT(sched_id == 1);
- #endif
- q = &misc_aux_work_queues[sched_id].q;
- mawp = misc_aux_work_alloc();
- mawp->func = func;
- mawp->arg = arg;
- erts_thr_q_enqueue(q, mawp);
- }
- void
- erts_schedule_misc_aux_work(int sched_id,
- void (*func)(void *),
- void *arg)
- {
- schedule_misc_aux_work(sched_id, func, arg);
- }
- void
- erts_schedule_multi_misc_aux_work(int ignore_self,
- int max_sched,
- void (*func)(void *),
- void *arg)
- {
- int id, self = 0;
- if (ignore_self) {
- ErtsSchedulerData *esdp = erts_get_scheduler_data();
- if (esdp)
- self = (int) esdp->no;
- }
- ASSERT(0 < max_sched && max_sched <= erts_no_schedulers);
- for (id = 1; id <= max_sched; id++) {
- if (id == self)
- continue;
- schedule_misc_aux_work(id, func, arg);
- }
- }
- #if ERTS_USE_ASYNC_READY_Q
- void
- erts_notify_check_async_ready_queue(void *vno)
- {
- int ix = ((int) (SWord) vno) -1;
- set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(ix),
- ERTS_SSI_AUX_WORK_ASYNC_READY);
- }
- static ERTS_INLINE erts_aint32_t
- handle_async_ready(ErtsAuxWorkData *awdp,
- erts_aint32_t aux_work)
- {
- ErtsSchedulerSleepInfo *ssi = awdp->ssi;
- unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY);
- if (erts_check_async_ready(awdp->async_ready.queue)) {
- if (set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY)
- & ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN) {
- unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN);
- aux_work &= ~ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN;
- }
- return aux_work;
- }
- #ifdef ERTS_SMP
- awdp->async_ready.need_thr_prgr = 0;
- #endif
- set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN);
- return ((aux_work & ~ERTS_SSI_AUX_WORK_ASYNC_READY)
- | ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN);
- }
- static ERTS_INLINE erts_aint32_t
- handle_async_ready_clean(ErtsAuxWorkData *awdp,
- erts_aint32_t aux_work)
- {
- void *thr_prgr_p;
- #ifdef ERTS_SMP
- if (awdp->async_ready.need_thr_prgr
- && !erts_thr_progress_has_reached_this(thr_prgr_current(awdp),
- awdp->async_ready.thr_prgr)) {
- return aux_work & ~ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN;
- }
- awdp->async_ready.need_thr_prgr = 0;
- thr_prgr_p = (void *) &awdp->async_ready.thr_prgr;
- #else
- thr_prgr_p = NULL;
- #endif
- switch (erts_async_ready_clean(awdp->async_ready.queue, thr_prgr_p)) {
- case ERTS_ASYNC_READY_CLEAN:
- unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN);
- return aux_work & ~ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN;
- #ifdef ERTS_SMP
- case ERTS_ASYNC_READY_NEED_THR_PRGR:
- erts_thr_progress_wakeup(awdp->esdp,
- awdp->async_ready.thr_prgr);
- awdp->async_ready.need_thr_prgr = 1;
- return aux_work & ~ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN;
- #endif
- default:
- return aux_work;
- }
- }
- #endif
- static ERTS_INLINE erts_aint32_t
- handle_fix_alloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
- {
- ErtsSchedulerSleepInfo *ssi = awdp->ssi;
- erts_aint32_t res;
- unset_aux_work_flags(ssi, (ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
- | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC));
- aux_work &= ~(ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
- | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC);
- res = erts_alloc_fix_alloc_shrink(awdp->sched_id, aux_work);
- if (res) {
- set_aux_work_flags(ssi, res);
- aux_work |= res;
- }
- return aux_work;
- }
- #ifdef ERTS_SMP
- void
- erts_alloc_notify_delayed_dealloc(int ix)
- {
- set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(ix-1),
- ERTS_SSI_AUX_WORK_DD);
- }
- static ERTS_INLINE erts_aint32_t
- handle_delayed_dealloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
- {
- ErtsSchedulerSleepInfo *ssi = awdp->ssi;
- int need_thr_progress = 0;
- ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID;
- int more_work = 0;
- unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD);
- erts_alloc_scheduler_handle_delayed_dealloc((void *) awdp->esdp,
- &need_thr_progress,
- &wakeup,
- &more_work);
- if (more_work) {
- if (set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD)
- & ERTS_SSI_AUX_WORK_DD_THR_PRGR) {
- unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD_THR_PRGR);
- aux_work &= ~ERTS_SSI_AUX_WORK_DD_THR_PRGR;
- }
- return aux_work;
- }
- if (need_thr_progress) {
- if (wakeup == ERTS_THR_PRGR_INVALID)
- wakeup = erts_thr_progress_later_than(thr_prgr_current(awdp));
- awdp->dd.thr_prgr = wakeup;
- set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD_THR_PRGR);
- awdp->dd.thr_prgr = wakeup;
- erts_thr_progress_wakeup(awdp->esdp, wakeup);
- }
- else if (awdp->dd.completed_callback) {
- awdp->dd.completed_callback(awdp->dd.completed_arg);
- awdp->dd.completed_callback = NULL;
- awdp->dd.completed_arg = NULL;
- }
- return aux_work & ~ERTS_SSI_AUX_WORK_DD;
- }
- static ERTS_INLINE erts_aint32_t
- handle_delayed_dealloc_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
- {
- ErtsSchedulerSleepInfo *ssi;
- int need_thr_progress;
- int more_work;
- ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID;
- ErtsThrPrgrVal current = thr_prgr_current(awdp);
- if (!erts_thr_progress_has_reached_this(current, awdp->dd.thr_prgr))
- return aux_work & ~ERTS_SSI_AUX_WORK_DD_THR_PRGR;
- ssi = awdp->ssi;
- need_thr_progress = 0;
- more_work = 0;
- erts_alloc_scheduler_handle_delayed_dealloc((void *) awdp->esdp,
- &need_thr_progress,
- &wakeup,
- &more_work);
- if (more_work) {
- set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD);
- unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD_THR_PRGR);
- return ((aux_work & ~ERTS_SSI_AUX_WORK_DD_THR_PRGR)
- | ERTS_SSI_AUX_WORK_DD);
- }
- if (need_thr_progress) {
- if (wakeup == ERTS_THR_PRGR_INVALID)
- wakeup = erts_thr_progress_later_than(current);
- awdp->dd.thr_prgr = wakeup;
- erts_thr_progress_wakeup(awdp->esdp, wakeup);
- }
- else {
- unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD_THR_PRGR);
- if (awdp->dd.completed_callback) {
- awdp->dd.completed_callback(awdp->dd.completed_arg);
- awdp->dd.completed_callback = NULL;
- awdp->dd.completed_arg = NULL;
- }
- }
- return aux_work & ~ERTS_SSI_AUX_WORK_DD_THR_PRGR;
- }
- static erts_atomic32_t completed_dealloc_count;
- static void
- completed_dealloc(void *vproc)
- {
- if (erts_atomic32_dec_read_mb(&completed_dealloc_count) == 0) {
- erts_resume((Process *) vproc, (ErtsProcLocks) 0);
- erts_smp_proc_dec_refc((Process *) vproc);
- }
- }
- static void
- setup_completed_dealloc(void *vproc)
- {
- ErtsSchedulerData *esdp = erts_get_scheduler_data();
- ErtsAuxWorkData *awdp = (esdp
- ? &esdp->aux_work_data
- : aux_thread_aux_work_data);
- erts_alloc_fix_alloc_shrink(awdp->sched_id, 0);
- set_aux_work_flags_wakeup_nob(awdp->ssi, ERTS_SSI_AUX_WORK_DD);
- awdp->dd.completed_callback = completed_dealloc;
- awdp->dd.completed_arg = vproc;
- }
- static void
- prep_setup_completed_dealloc(void *vproc)
- {
- erts_aint32_t count = (erts_aint32_t) (erts_no_schedulers+1);
- if (erts_atomic32_dec_read_mb(&completed_dealloc_count) == count) {
- /* scheduler threads */
- erts_schedule_multi_misc_aux_work(0,
- erts_no_schedulers,
- setup_completed_dealloc,
- vproc);
- /* aux_thread */
- erts_schedule_misc_aux_work(0,
- setup_completed_dealloc,
- vproc);
- }
- }
- #endif /* ERTS_SMP */
- int
- erts_debug_wait_deallocations(Process *c_p)
- {
- #ifndef ERTS_SMP
- erts_alloc_fix_alloc_shrink(1, 0);
- return 1;
- #else
- /* Only one process at a time can do this */
- erts_aint32_t count = (erts_aint32_t) (2*(erts_no_schedulers+1));
- if (0 == erts_atomic32_cmpxchg_mb(&completed_dealloc_count,
- count,
- 0)) {
- erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
- erts_smp_proc_inc_refc(c_p);
- /* scheduler threads */
- erts_schedule_multi_misc_aux_work(0,
- erts_no_schedulers,
- prep_setup_completed_dealloc,
- (void *) c_p);
- /* aux_thread */
- erts_schedule_misc_aux_work(0,
- prep_setup_completed_dealloc,
- (void *) c_p);
- return 1;
- }
- return 0;
- #endif
- }
- #ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- void
- erts_smp_notify_check_children_needed(void)
- {
- int i;
- for (i = 0; i < erts_no_schedulers; i++)
- set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(i),
- ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
- }
- static ERTS_INLINE erts_aint32_t
- handle_check_children(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
- {
- unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
- erts_check_children();
- return aux_work & ~ERTS_SSI_AUX_WORK_CHECK_CHILDREN;
- }
- #endif
- #if HAVE_ERTS_MSEG
- static ERTS_INLINE erts_aint32_t
- handle_mseg_cache_check(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
- {
- unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK);
- erts_mseg_cache_check();
- return aux_work & ~ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK;
- }
- #endif
- static ERTS_INLINE erts_aint32_t
- handle_setup_aux_work_timer(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
- {
- unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_SET_TMO);
- setup_aux_work_timer();
- return aux_work & ~ERTS_SSI_AUX_WORK_SET_TMO;
- }
- static erts_aint32_t
- handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work)
- {
- #undef HANDLE_AUX_WORK
- #define HANDLE_AUX_WORK(FLG, HNDLR) \
- ignore |= FLG; \
- if (aux_work & FLG) { \
- aux_work = HNDLR(awdp, aux_work); \
- ERTS_DBG_CHK_AUX_WORK_VAL(aux_work); \
- if (!(aux_work & ~ignore)) { \
- ERTS_DBG_CHK_AUX_WORK_VAL(aux_work); \
- return aux_work; \
- } \
- }
- erts_aint32_t aux_work = orig_aux_work;
- erts_aint32_t ignore = 0;
- #ifdef ERTS_SMP
- thr_prgr_current_reset(awdp);
- #endif
- ERTS_DBG_CHK_AUX_WORK_VAL(aux_work);
- ASSERT(aux_work);
- /*
- * Handlers are *only* allowed to modify flags in return value
- * and ssi flags that are explicity handled by the handler.
- * Handlers are, e.g., not allowed to read the ssi flag field and
- * then unconditionally return that value.
- *
- * Flag field returned should only contain flags for work that
- * can continue immediately.
- */
- /*
- * Keep ERTS_SSI_AUX_WORK flags in expected frequency order relative
- * eachother. Most frequent first.
- */
- #ifdef ERTS_SMP
- HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_DD,
- handle_delayed_dealloc);
- /* DD must be before DD_THR_PRGR */
- HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_DD_THR_PRGR,
- handle_delayed_dealloc_thr_prgr);
- #endif
- HANDLE_AUX_WORK((ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
- | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC),
- handle_fix_alloc);
- #if ERTS_USE_ASYNC_READY_Q
- HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_ASYNC_READY,
- handle_async_ready);
- /* ASYNC_READY must be before ASYNC_READY_CLEAN */
- HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN,
- handle_async_ready_clean);
- #endif
- #ifdef ERTS_SMP
- HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_MISC_THR_PRGR,
- handle_misc_aux_work_thr_prgr);
- #endif
- /* MISC_THR_PRGR must be before MISC */
- HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_MISC,
- handle_misc_aux_work);
- #ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_CHECK_CHILDREN,
- handle_check_children);
- #endif
- HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_SET_TMO,
- handle_setup_aux_work_timer);
- #if HAVE_ERTS_MSEG
- HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK,
- handle_mseg_cache_check);
- #endif
- ERTS_DBG_CHK_AUX_WORK_VAL(aux_work);
- return aux_work;
- #undef HANDLE_AUX_WORK
- }
- typedef struct {
- union {
- ErlTimer data;
- char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErlTimer))];
- } timer;
- int initialized;
- erts_atomic32_t refc;
- erts_atomic32_t type[1];
- } ErtsAuxWorkTmo;
- static ErtsAuxWorkTmo *aux_work_tmo;
- static void
- aux_work_timeout_early_init(int no_schedulers)
- {
- int i;
- UWord p;
- /*
- * This is done really early. Our own allocators have
- * not been started yet.
- */
- p = (UWord) malloc((sizeof(ErtsAuxWorkTmo)
- + sizeof(erts_atomic32_t)*(no_schedulers+1))
- + ERTS_CACHE_LINE_SIZE-1);
- if (p & ERTS_CACHE_LINE_MASK)
- p = (p & ~ERTS_CACHE_LINE_MASK) + ERTS_CACHE_LINE_SIZE;
- ASSERT((p & ERTS_CACHE_LINE_MASK) == 0);
- aux_work_tmo = (ErtsAuxWorkTmo *) p;
- aux_work_tmo->initialized = 0;
- erts_atomic32_init_nob(&aux_work_tmo->refc, 0);
- for (i = 0; i <= no_schedulers; i++)
- erts_atomic32_init_nob(&aux_work_tmo->type[i], 0);
- }
- void
- aux_work_timeout_late_init(void)
- {
- aux_work_tmo->initialized = 1;
- if (erts_atomic32_read_nob(&aux_work_tmo->refc)) {
- aux_work_tmo->timer.data.active = 0;
- erts_set_timer(&aux_work_tmo->timer.data,
- aux_work_timeout,
- NULL,
- NULL,
- 1000);
- }
- }
- static void
- aux_work_timeout(void *unused)
- {
- erts_aint32_t refc;
- int i;
- #ifdef ERTS_SMP
- i = 0;
- #else
- i = 1;
- #endif
- for (; i <= erts_no_schedulers; i++) {
- erts_aint32_t type;
- type = erts_atomic32_read_acqb(&aux_work_tmo->type[i]);
- if (type)
- set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(i-1),
- type);
- }
- refc = erts_atomic32_read_nob(&aux_work_tmo->refc);
- ASSERT(refc >= 1);
- if (refc != 1
- || 1 != erts_atomic32_cmpxchg_relb(&aux_work_tmo->refc, 0, 1)) {
- /* Setup next timeout... */
- aux_work_tmo->timer.data.active = 0;
- erts_set_timer(&aux_work_tmo->timer.data,
- aux_work_timeout,
- NULL,
- NULL,
- 1000);
- }
- }
- static void
- setup_aux_work_timer(void)
- {
- #ifndef ERTS_SMP
- if (!erts_get_scheduler_data())
- set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(0),
- ERTS_SSI_AUX_WORK_SET_TMO);
- else
- #endif
- {
- aux_work_tmo->timer.data.active = 0;
- erts_set_timer(&aux_work_tmo->timer.data,
- aux_work_timeout,
- NULL,
- NULL,
- 1000);
- }
- }
- erts_aint32_t
- erts_set_aux_work_timeout(int ix, erts_aint32_t type, int enable)
- {
- erts_aint32_t old, refc;
- #ifndef ERTS_SMP
- ix = 1;
- #endif
- ERTS_DBG_CHK_AUX_WORK_VAL(type);
- ERTS_DBG_CHK_AUX_WORK_VAL(erts_atomic32_read_nob(&aux_work_tmo->type[ix]));
- // erts_fprintf(stderr, "t(%d, 0x%x, %d)\n", ix, type, enable);
- if (!enable) {
- old = erts_atomic32_read_band_mb(&aux_work_tmo->type[ix], ~type);
- ERTS_DBG_CHK_AUX_WORK_VAL(erts_atomic32_read_nob(&aux_work_tmo->type[ix]));
- if (old != 0 && (old & ~type) == 0)
- erts_atomic32_dec_relb(&aux_work_tmo->refc);
- return old;
- }
- old = erts_atomic32_read_bor_mb(&aux_work_tmo->type[ix], type);
- ERTS_DBG_CHK_AUX_WORK_VAL(erts_atomic32_read_nob(&aux_work_tmo->type[ix]));
- if (old == 0 && type != 0) {
- refc = erts_atomic32_inc_read_acqb(&aux_work_tmo->refc);
- if (refc == 1) {
- erts_atomic32_inc_acqb(&aux_work_tmo->refc);
- if (aux_work_tmo->initialized)
- setup_aux_work_timer();
- }
- }
- return old;
- }
- static ERTS_INLINE void
- sched_waiting_sys(Uint no, ErtsRunQueue *rq)
- {
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
- ASSERT(rq->waiting >= 0);
- rq->flags |= (ERTS_RUNQ_FLG_OUT_OF_WORK
- | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK);
- rq->waiting++;
- rq->waiting *= -1;
- rq->woken = 0;
- if (erts_system_profile_flags.scheduler)
- profile_scheduler(make_small(no), am_inactive);
- }
- static ERTS_INLINE void
- sched_active_sys(Uint no, ErtsRunQueue *rq)
- {
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
- ASSERT(rq->waiting < 0);
- rq->waiting *= -1;
- rq->waiting--;
- if (erts_system_profile_flags.scheduler)
- profile_scheduler(make_small(no), am_active);
- }
- Uint
- erts_active_schedulers(void)
- {
- Uint as = erts_no_schedulers;
- ERTS_ATOMIC_FOREACH_RUNQ(rq, as -= abs(rq->waiting));
- ASSERT(as >= 0);
- return as;
- }
- #ifdef ERTS_SMP
- static ERTS_INLINE void
- clear_sys_scheduling(void)
- {
- erts_smp_atomic32_set_mb(&doing_sys_schedule, 0);
- }
- static ERTS_INLINE int
- try_set_sys_scheduling(void)
- {
- return 0 == erts_smp_atomic32_cmpxchg_acqb(&doing_sys_schedule, 1, 0);
- }
- #endif
- static ERTS_INLINE int
- prepare_for_sys_schedule(void)
- {
- #ifdef ERTS_SMP
- while (!erts_port_task_have_outstanding_io_tasks()
- && try_set_sys_scheduling()) {
- if (!erts_port_task_have_outstanding_io_tasks())
- return 1;
- clear_sys_scheduling();
- }
- return 0;
- #else
- return !erts_port_task_have_outstanding_io_tasks();
- #endif
- }
- #ifdef ERTS_SMP
- static ERTS_INLINE void
- sched_change_waiting_sys_to_waiting(Uint no, ErtsRunQueue *rq)
- {
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
- ASSERT(rq->waiting < 0);
- rq->waiting *= -1;
- }
- static ERTS_INLINE void
- sched_waiting(Uint no, ErtsRunQueue *rq)
- {
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
- rq->flags |= (ERTS_RUNQ_FLG_OUT_OF_WORK
- | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK);
- if (rq->waiting < 0)
- rq->waiting--;
- else
- rq->waiting++;
- rq->woken = 0;
- if (erts_system_profile_flags.scheduler)
- profile_scheduler(make_small(no), am_inactive);
- }
- static ERTS_INLINE void
- sched_active(Uint no, ErtsRunQueue *rq)
- {
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
- if (rq->waiting < 0)
- rq->waiting++;
- else
- rq->waiting--;
- if (erts_system_profile_flags.scheduler)
- profile_scheduler(make_small(no), am_active);
- }
- static int ERTS_INLINE
- ongoing_multi_scheduling_block(void)
- {
- ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&schdlr_sspnd.mtx));
- return schdlr_sspnd.msb.ongoing;
- }
- static ERTS_INLINE void
- empty_runq(ErtsRunQueue *rq)
- {
- erts_aint32_t oifls = erts_smp_atomic32_read_band_nob(&rq->info_flags,
- ~ERTS_RUNQ_IFLG_NONEMPTY);
- if (oifls & ERTS_RUNQ_IFLG_NONEMPTY) {
- #ifdef DEBUG
- erts_aint32_t empty = erts_smp_atomic32_read_nob(&no_empty_run_queues);
- /*
- * For a short period of time no_empty_run_queues may have
- * been increased twice for a specific run queue.
- */
- ASSERT(0 <= empty && empty < 2*erts_no_run_queues);
- #endif
- erts_smp_atomic32_inc_relb(&no_empty_run_queues);
- }
- }
- static ERTS_INLINE void
- non_empty_runq(ErtsRunQueue *rq)
- {
- erts_aint32_t oifls = erts_smp_atomic32_read_bor_nob(&rq->info_flags,
- ERTS_RUNQ_IFLG_NONEMPTY);
- if (!(oifls & ERTS_RUNQ_IFLG_NONEMPTY)) {
- #ifdef DEBUG
- erts_aint32_t empty = erts_smp_atomic32_read_nob(&no_empty_run_queues);
- /*
- * For a short period of time no_empty_run_queues may have
- * been increased twice for a specific run queue.
- */
- ASSERT(0 < empty && empty <= 2*erts_no_run_queues);
- #endif
- erts_smp_atomic32_dec_relb(&no_empty_run_queues);
- }
- }
- static erts_aint32_t
- sched_prep_spin_wait(ErtsSchedulerSleepInfo *ssi)
- {
- erts_aint32_t oflgs;
- erts_aint32_t nflgs = (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_WAITING);
- erts_aint32_t xflgs = 0;
- do {
- oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
- if (oflgs == xflgs)
- return nflgs;
- xflgs = oflgs;
- } while (!(oflgs & ERTS_SSI_FLG_SUSPENDED));
- return oflgs;
- }
- static erts_aint32_t
- sched_prep_cont_spin_wait(ErtsSchedulerSleepInfo *ssi)
- {
- erts_aint32_t oflgs;
- erts_aint32_t nflgs = (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_WAITING);
- erts_aint32_t xflgs = ERTS_SSI_FLG_WAITING;
- do {
- oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
- if (oflgs == xflgs)
- return nflgs;
- xflgs = oflgs;
- nflgs |= oflgs & ERTS_SSI_FLG_SUSPENDED;
- } while (oflgs & ERTS_SSI_FLG_WAITING);
- return oflgs;
- }
- static erts_aint32_t
- sched_spin_wait(ErtsSchedulerSleepInfo *ssi, int spincount)
- {
- int until_yield = ERTS_SCHED_SPIN_UNTIL_YIELD;
- int sc = spincount;
- erts_aint32_t flgs;
- do {
- flgs = erts_smp_atomic32_read_acqb(&ssi->flags);
- if ((flgs & (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING))
- != (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING)) {
- break;
- }
- ERTS_SPIN_BODY;
- if (--until_yield == 0) {
- until_yield = ERTS_SCHED_SPIN_UNTIL_YIELD;
- erts_thr_yield();
- }
- } while (--sc > 0);
- return flgs;
- }
- static erts_aint32_t
- sched_set_sleeptype(ErtsSchedulerSleepInfo *ssi, erts_aint32_t sleep_type)
- {
- erts_aint32_t oflgs;
- erts_aint32_t nflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING|sleep_type;
- erts_aint32_t xflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING;
- if (sleep_type == ERTS_SSI_FLG_TSE_SLEEPING)
- erts_tse_reset(ssi->event);
- else {
- ASSERT(sleep_type == ERTS_SSI_FLG_POLL_SLEEPING);
- erts_sys_schedule_interrupt(0);
- }
- while (1) {
- oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
- if (oflgs == xflgs)
- return nflgs;
- if ((oflgs & (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING))
- != (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING)) {
- return oflgs;
- }
- xflgs = oflgs;
- nflgs |= oflgs & ERTS_SSI_FLG_SUSPENDED;
- }
- }
- #define ERTS_SCHED_WAIT_WOKEN(FLGS) \
- (((FLGS) & (ERTS_SSI_FLG_WAITING|ERTS_SSI_FLG_SUSPENDED)) \
- != ERTS_SSI_FLG_WAITING)
- static void
- thr_prgr_wakeup(void *vssi)
- {
- erts_sched_poke((ErtsSchedulerSleepInfo *) vssi);
- }
- static void
- thr_prgr_prep_wait(void *vssi)
- {
- ErtsSchedulerSleepInfo *ssi = (ErtsSchedulerSleepInfo *) vssi;
- erts_smp_atomic32_read_bor_acqb(&ssi->flags,
- ERTS_SSI_FLG_SLEEPING);
- }
- static void
- thr_prgr_wait(void *vssi)
- {
- ErtsSchedulerSleepInfo *ssi = (ErtsSchedulerSleepInfo *) vssi;
- erts_aint32_t xflgs = ERTS_SSI_FLG_SLEEPING;
- erts_tse_reset(ssi->event);
- while (1) {
- erts_aint32_t aflgs, nflgs;
- nflgs = xflgs | ERTS_SSI_FLG_TSE_SLEEPING;
- aflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
- if (aflgs == xflgs) {
- erts_tse_wait(ssi->event);
- break;
- }
- if ((aflgs & ERTS_SSI_FLG_SLEEPING) == 0)
- break;
- xflgs = aflgs;
- }
- }
- static void
- thr_prgr_fin_wait(void *vssi)
- {
- ErtsSchedulerSleepInfo *ssi = (ErtsSchedulerSleepInfo *) vssi;
- erts_smp_atomic32_read_band_nob(&ssi->flags,
- ~(ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_TSE_SLEEPING));
- }
- static void init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp);
- static void *
- aux_thread(void *unused)
- {
- ErtsAuxWorkData *awdp = aux_thread_aux_work_data;
- ErtsSchedulerSleepInfo *ssi = ERTS_SCHED_SLEEP_INFO_IX(-1);
- erts_aint32_t aux_work;
- ErtsThrPrgrCallbacks callbacks;
- int thr_prgr_active = 1;
- ssi->event = erts_tse_fetch();
- callbacks.arg = (void *) ssi;
- callbacks.wakeup = thr_prgr_wakeup;
- callbacks.prepare_wait = thr_prgr_prep_wait;
- callbacks.wait = thr_prgr_wait;
- callbacks.finalize_wait = thr_prgr_fin_wait;
- erts_thr_progress_register_managed_thread(NULL, &callbacks, 1);
- init_aux_work_data(awdp, NULL);
- awdp->ssi = ssi;
- sched_prep_spin_wait(ssi);
- while (1) {
- erts_aint32_t flgs;
- aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
- if (aux_work) {
- if (!thr_prgr_active)
- erts_thr_progress_active(NULL, thr_prgr_active = 1);
- aux_work = handle_aux_work(awdp, aux_work);
- if (aux_work && erts_thr_progress_update(NULL))
- erts_thr_progress_leader_update(NULL);
- }
- if (!aux_work) {
- if (thr_prgr_active)
- erts_thr_progress_active(NULL, thr_prgr_active = 0);
- erts_thr_progress_prepare_wait(NULL);
- flgs = sched_spin_wait(ssi, 0);
- if (flgs & ERTS_SSI_FLG_SLEEPING) {
- ASSERT(flgs & ERTS_SSI_FLG_WAITING);
- flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING);
- if (flgs & ERTS_SSI_FLG_SLEEPING) {
- int res;
- ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING);
- ASSERT(flgs & ERTS_SSI_FLG_WAITING);
- do {
- res = erts_tse_wait(ssi->event);
- } while (res == EINTR);
- }
- }
- erts_thr_progress_finalize_wait(NULL);
- }
- flgs = sched_prep_spin_wait(ssi);
- }
- return NULL;
- }
- #endif /* ERTS_SMP */
- static void
- scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
- {
- int working = 1;
- ErtsSchedulerSleepInfo *ssi = esdp->ssi;
- int spincount;
- erts_aint32_t aux_work = 0;
- #ifdef ERTS_SMP
- int thr_prgr_active = 1;
- erts_aint32_t flgs;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
- flgs = sched_prep_spin_wait(ssi);
- if (flgs & ERTS_SSI_FLG_SUSPENDED) {
- /* Go suspend instead... */
- return;
- }
- /*
- * If all schedulers are waiting, one of them *should*
- * be waiting in erl_sys_schedule()
- */
- if (!prepare_for_sys_schedule()) {
- sched_waiting(esdp->no, rq);
- erts_smp_runq_unlock(rq);
- spincount = ERTS_SCHED_TSE_SLEEP_SPINCOUNT;
- tse_wait:
- if (thr_prgr_active != working)
- s…
Large files files are truncated, but you can click here to view the full file