PageRenderTime 51ms CodeModel.GetById 19ms RepoModel.GetById 1ms app.codeStats 0ms

/erts/emulator/beam/erl_init.c

https://github.com/notarf/otp
C | 1674 lines | 1327 code | 198 blank | 149 comment | 247 complexity | 5e675c02d92e5d611fcd848d05b5bd10 MD5 | raw file
Possible License(s): BSD-2-Clause
  1. /*
  2. * %CopyrightBegin%
  3. *
  4. * Copyright Ericsson AB 1997-2011. All Rights Reserved.
  5. *
  6. * The contents of this file are subject to the Erlang Public License,
  7. * Version 1.1, (the "License"); you may not use this file except in
  8. * compliance with the License. You should have received a copy of the
  9. * Erlang Public License along with this software. If not, it can be
  10. * retrieved online at http://www.erlang.org/.
  11. *
  12. * Software distributed under the License is distributed on an "AS IS"
  13. * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
  14. * the License for the specific language governing rights and limitations
  15. * under the License.
  16. *
  17. * %CopyrightEnd%
  18. */
  19. #ifdef HAVE_CONFIG_H
  20. # include "config.h"
  21. #endif
  22. #include "sys.h"
  23. #include <ctype.h>
  24. #include "erl_vm.h"
  25. #include "global.h"
  26. #include "erl_process.h"
  27. #include "error.h"
  28. #include "erl_version.h"
  29. #include "erl_db.h"
  30. #include "beam_bp.h"
  31. #include "erl_bits.h"
  32. #include "erl_binary.h"
  33. #include "dist.h"
  34. #include "erl_mseg.h"
  35. #include "erl_nmgc.h"
  36. #include "erl_threads.h"
  37. #include "erl_bif_timer.h"
  38. #include "erl_instrument.h"
  39. #include "erl_printf_term.h"
  40. #include "erl_misc_utils.h"
  41. #include "packet_parser.h"
  42. #include "erl_cpu_topology.h"
  43. #include "erl_thr_progress.h"
  44. #include "erl_thr_queue.h"
  45. #include "erl_async.h"
  46. #ifdef HIPE
  47. #include "hipe_mode_switch.h" /* for hipe_mode_switch_init() */
  48. #include "hipe_signal.h" /* for hipe_signal_init() */
  49. #endif
  50. #ifdef HAVE_SYS_RESOURCE_H
  51. # include <sys/resource.h>
  52. #endif
  53. /*
  54. * Note about VxWorks: All variables must be initialized by executable code,
  55. * not by an initializer. Otherwise a new instance of the emulator will
  56. * inherit previous values.
  57. */
  58. extern void erl_crash_dump_v(char *, int, char *, va_list);
  59. #ifdef __WIN32__
  60. extern void ConNormalExit(void);
  61. extern void ConWaitForExit(void);
  62. #endif
  63. static void erl_init(int ncpu);
  64. #define ERTS_MIN_COMPAT_REL 7
  65. static erts_atomic_t exiting;
  66. #ifdef ERTS_SMP
  67. erts_smp_atomic32_t erts_writing_erl_crash_dump;
  68. erts_tsd_key_t erts_is_crash_dumping_key;
  69. #else
  70. volatile int erts_writing_erl_crash_dump = 0;
  71. #endif
  72. int erts_initialized = 0;
  73. #if defined(USE_THREADS) && !defined(ERTS_SMP)
  74. static erts_tid_t main_thread;
  75. #endif
  76. int erts_use_sender_punish;
  77. /*
  78. * Configurable parameters.
  79. */
  80. Uint display_items; /* no of items to display in traces etc */
  81. int H_MIN_SIZE; /* The minimum heap grain */
  82. int BIN_VH_MIN_SIZE; /* The minimum binary virtual*/
  83. Uint32 erts_debug_flags; /* Debug flags. */
  84. #ifdef ERTS_OPCODE_COUNTER_SUPPORT
  85. int count_instructions;
  86. #endif
  87. int erts_backtrace_depth; /* How many functions to show in a backtrace
  88. * in error codes.
  89. */
  90. erts_smp_atomic32_t erts_max_gen_gcs;
  91. Eterm erts_error_logger_warnings; /* What to map warning logs to, am_error,
  92. am_info or am_warning, am_error is
  93. the default for BC */
  94. int erts_compat_rel;
  95. static int no_schedulers;
  96. static int no_schedulers_online;
  97. #ifdef DEBUG
  98. Uint32 verbose; /* See erl_debug.h for information about verbose */
  99. #endif
  100. int erts_disable_tolerant_timeofday; /* Time correction can be disabled it is
  101. * not and/or it is too slow.
  102. */
  103. int erts_atom_table_size = ATOM_LIMIT; /* Maximum number of atoms */
  104. int erts_modified_timing_level;
  105. int erts_no_crash_dump = 0; /* Use -d to suppress crash dump. */
  106. int erts_no_line_info = 0; /* -L: Don't load line information */
  107. /*
  108. * Other global variables.
  109. */
  110. ErtsModifiedTimings erts_modified_timings[] = {
  111. /* 0 */ {make_small(0), CONTEXT_REDS, INPUT_REDUCTIONS},
  112. /* 1 */ {make_small(0), 2*CONTEXT_REDS, 2*INPUT_REDUCTIONS},
  113. /* 2 */ {make_small(0), CONTEXT_REDS/2, INPUT_REDUCTIONS/2},
  114. /* 3 */ {make_small(0), 3*CONTEXT_REDS, 3*INPUT_REDUCTIONS},
  115. /* 4 */ {make_small(0), CONTEXT_REDS/3, 3*INPUT_REDUCTIONS},
  116. /* 5 */ {make_small(0), 4*CONTEXT_REDS, INPUT_REDUCTIONS/2},
  117. /* 6 */ {make_small(1), CONTEXT_REDS/4, 2*INPUT_REDUCTIONS},
  118. /* 7 */ {make_small(1), 5*CONTEXT_REDS, INPUT_REDUCTIONS/3},
  119. /* 8 */ {make_small(10), CONTEXT_REDS/5, 3*INPUT_REDUCTIONS},
  120. /* 9 */ {make_small(10), 6*CONTEXT_REDS, INPUT_REDUCTIONS/4}
  121. };
  122. #define ERTS_MODIFIED_TIMING_LEVELS \
  123. (sizeof(erts_modified_timings)/sizeof(ErtsModifiedTimings))
  124. Export *erts_delay_trap = NULL;
  125. int erts_use_r9_pids_ports;
  126. #ifdef HYBRID
  127. Eterm *global_heap;
  128. Eterm *global_hend;
  129. Eterm *global_htop;
  130. Eterm *global_saved_htop;
  131. Eterm *global_old_heap;
  132. Eterm *global_old_hend;
  133. ErlOffHeap erts_global_offheap;
  134. Uint global_heap_sz = SH_DEFAULT_SIZE;
  135. #ifndef INCREMENTAL
  136. Eterm *global_high_water;
  137. Eterm *global_old_htop;
  138. #endif
  139. Uint16 global_gen_gcs;
  140. Uint16 global_max_gen_gcs;
  141. Uint global_gc_flags;
  142. Uint global_heap_min_sz = SH_DEFAULT_SIZE;
  143. #endif
  144. int ignore_break;
  145. int replace_intr;
  146. static ERTS_INLINE int
  147. has_prefix(const char *prefix, const char *string)
  148. {
  149. int i;
  150. for (i = 0; prefix[i]; i++)
  151. if (prefix[i] != string[i])
  152. return 0;
  153. return 1;
  154. }
  155. static char*
  156. progname(char *fullname)
  157. {
  158. int i;
  159. i = strlen(fullname);
  160. while (i >= 0) {
  161. if ((fullname[i] != '/') && (fullname[i] != '\\'))
  162. i--;
  163. else
  164. break;
  165. }
  166. return fullname+i+1;
  167. }
  168. static int
  169. this_rel_num(void)
  170. {
  171. static int this_rel = -1;
  172. if (this_rel < 1) {
  173. int i;
  174. char this_rel_str[] = ERLANG_OTP_RELEASE;
  175. i = 0;
  176. while (this_rel_str[i] && !isdigit((int) this_rel_str[i]))
  177. i++;
  178. this_rel = atoi(&this_rel_str[i]);
  179. if (this_rel < 1)
  180. erl_exit(-1, "Unexpected ERLANG_OTP_RELEASE format\n");
  181. }
  182. return this_rel;
  183. }
  184. /*
  185. * Common error printout function, all error messages
  186. * that don't go to the error logger go through here.
  187. */
  188. void erl_error(char *fmt, va_list args)
  189. {
  190. erts_vfprintf(stderr, fmt, args);
  191. }
  192. static int early_init(int *argc, char **argv);
  193. void
  194. erts_short_init(void)
  195. {
  196. int ncpu = early_init(NULL, NULL);
  197. erl_init(ncpu);
  198. erts_initialized = 1;
  199. }
  200. static void
  201. erl_init(int ncpu)
  202. {
  203. init_benchmarking();
  204. erts_init_monitors();
  205. erts_init_gc();
  206. erts_init_time();
  207. erts_init_sys_common_misc();
  208. erts_init_process(ncpu);
  209. erts_init_scheduling(no_schedulers,
  210. no_schedulers_online);
  211. erts_init_cpu_topology(); /* Must be after init_scheduling */
  212. erts_alloc_late_init();
  213. H_MIN_SIZE = erts_next_heap_size(H_MIN_SIZE, 0);
  214. BIN_VH_MIN_SIZE = erts_next_heap_size(BIN_VH_MIN_SIZE, 0);
  215. erts_init_trace();
  216. erts_init_binary();
  217. erts_init_bits();
  218. erts_init_fun_table();
  219. init_atom_table();
  220. init_export_table();
  221. init_module_table();
  222. init_register_table();
  223. init_message();
  224. erts_bif_info_init();
  225. erts_ddll_init();
  226. init_emulator();
  227. erts_bp_init();
  228. init_db(); /* Must be after init_emulator */
  229. erts_bif_timer_init();
  230. erts_init_node_tables();
  231. init_dist();
  232. erl_drv_thr_init();
  233. erts_init_async();
  234. init_io();
  235. init_copy();
  236. init_load();
  237. erts_init_bif();
  238. erts_init_bif_chksum();
  239. erts_init_bif_binary();
  240. erts_init_bif_re();
  241. erts_init_unicode(); /* after RE to get access to PCRE unicode */
  242. erts_delay_trap = erts_export_put(am_erlang, am_delay_trap, 2);
  243. erts_late_init_process();
  244. #if HAVE_ERTS_MSEG
  245. erts_mseg_late_init(); /* Must be after timer (erts_init_time()) and thread
  246. initializations */
  247. #endif
  248. #ifdef HIPE
  249. hipe_mode_switch_init(); /* Must be after init_load/beam_catches/init */
  250. #endif
  251. packet_parser_init();
  252. erl_nif_init();
  253. }
  254. static void
  255. init_shared_memory(int argc, char **argv)
  256. {
  257. #ifdef HYBRID
  258. int arg_size = 0;
  259. global_heap_sz = erts_next_heap_size(global_heap_sz,0);
  260. /* Make sure arguments will fit on the heap, no one else will check! */
  261. while (argc--)
  262. arg_size += 2 + strlen(argv[argc]);
  263. if (global_heap_sz < arg_size)
  264. global_heap_sz = erts_next_heap_size(arg_size,1);
  265. #ifndef INCREMENTAL
  266. global_heap = (Eterm *) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP,
  267. sizeof(Eterm) * global_heap_sz);
  268. global_hend = global_heap + global_heap_sz;
  269. global_htop = global_heap;
  270. global_high_water = global_heap;
  271. global_old_hend = global_old_htop = global_old_heap = NULL;
  272. #endif
  273. global_gen_gcs = 0;
  274. global_max_gen_gcs = (Uint16) erts_smp_atomic32_read_nob(&erts_max_gen_gcs);
  275. global_gc_flags = erts_default_process_flags;
  276. erts_global_offheap.mso = NULL;
  277. #ifndef HYBRID /* FIND ME! */
  278. erts_global_offheap.funs = NULL;
  279. #endif
  280. erts_global_offheap.overhead = 0;
  281. #endif
  282. #ifdef INCREMENTAL
  283. erts_init_incgc();
  284. #endif
  285. }
  286. static void
  287. erl_first_process_otp(char* modname, void* code, unsigned size, int argc, char** argv)
  288. {
  289. int i;
  290. Eterm start_mod;
  291. Eterm args;
  292. Eterm* hp;
  293. Process parent;
  294. ErlSpawnOpts so;
  295. Eterm env;
  296. start_mod = am_atom_put(modname, sys_strlen(modname));
  297. if (erts_find_function(start_mod, am_start, 2) == NULL) {
  298. erl_exit(5, "No function %s:start/2\n", modname);
  299. }
  300. /*
  301. * We need a dummy parent process to be able to call erl_create_process().
  302. */
  303. erts_init_empty_process(&parent);
  304. erts_smp_proc_lock(&parent, ERTS_PROC_LOCK_MAIN);
  305. hp = HAlloc(&parent, argc*2 + 4);
  306. args = NIL;
  307. for (i = argc-1; i >= 0; i--) {
  308. int len = sys_strlen(argv[i]);
  309. args = CONS(hp, new_binary(&parent, (byte*)argv[i], len), args);
  310. hp += 2;
  311. }
  312. env = new_binary(&parent, code, size);
  313. args = CONS(hp, args, NIL);
  314. hp += 2;
  315. args = CONS(hp, env, args);
  316. so.flags = 0;
  317. (void) erl_create_process(&parent, start_mod, am_start, args, &so);
  318. erts_smp_proc_unlock(&parent, ERTS_PROC_LOCK_MAIN);
  319. erts_cleanup_empty_process(&parent);
  320. }
  321. Eterm
  322. erts_preloaded(Process* p)
  323. {
  324. Eterm previous;
  325. int j;
  326. int need;
  327. Eterm mod;
  328. Eterm* hp;
  329. char* name;
  330. const Preload *preload = sys_preloaded();
  331. j = 0;
  332. while (preload[j].name != NULL) {
  333. j++;
  334. }
  335. previous = NIL;
  336. need = 2*j;
  337. hp = HAlloc(p, need);
  338. j = 0;
  339. while ((name = preload[j].name) != NULL) {
  340. mod = am_atom_put(name, sys_strlen(name));
  341. previous = CONS(hp, mod, previous);
  342. hp += 2;
  343. j++;
  344. }
  345. return previous;
  346. }
  347. /* static variables that must not change (use same values at restart) */
  348. static char* program;
  349. static char* init = "init";
  350. static char* boot = "boot";
  351. static int boot_argc;
  352. static char** boot_argv;
  353. static char *
  354. get_arg(char* rest, char* next, int* ip)
  355. {
  356. if (*rest == '\0') {
  357. if (next == NULL) {
  358. erts_fprintf(stderr, "too few arguments\n");
  359. erts_usage();
  360. }
  361. (*ip)++;
  362. return next;
  363. }
  364. return rest;
  365. }
  366. static void
  367. load_preloaded(void)
  368. {
  369. int i;
  370. Eterm res;
  371. Preload* preload_p;
  372. Eterm module_name;
  373. byte* code;
  374. char* name;
  375. int length;
  376. if ((preload_p = sys_preloaded()) == NULL) {
  377. return;
  378. }
  379. i = 0;
  380. while ((name = preload_p[i].name) != NULL) {
  381. length = preload_p[i].size;
  382. module_name = am_atom_put(name, sys_strlen(name));
  383. if ((code = sys_preload_begin(&preload_p[i])) == 0)
  384. erl_exit(1, "Failed to find preloaded code for module %s\n",
  385. name);
  386. res = erts_load_module(NULL, 0, NIL, &module_name, code, length);
  387. sys_preload_end(&preload_p[i]);
  388. if (res != NIL)
  389. erl_exit(1,"Failed loading preloaded module %s (%T)\n",
  390. name, res);
  391. i++;
  392. }
  393. }
  394. /* be helpful (or maybe downright rude:-) */
  395. void erts_usage(void)
  396. {
  397. erts_fprintf(stderr, "Usage: %s [flags] [ -- [init_args] ]\n", progname(program));
  398. erts_fprintf(stderr, "The flags are:\n\n");
  399. /* erts_fprintf(stderr, "-# number set the number of items to be used in traces etc\n"); */
  400. erts_fprintf(stderr, "-a size suggested stack size in kilo words for threads\n");
  401. erts_fprintf(stderr, " in the async-thread pool, valid range is [%d-%d]\n",
  402. ERTS_ASYNC_THREAD_MIN_STACK_SIZE,
  403. ERTS_ASYNC_THREAD_MAX_STACK_SIZE);
  404. erts_fprintf(stderr, "-A number set number of threads in async thread pool,\n");
  405. erts_fprintf(stderr, " valid range is [0-%d]\n",
  406. ERTS_MAX_NO_OF_ASYNC_THREADS);
  407. erts_fprintf(stderr, "-B[c|d|i] c to have Ctrl-c interrupt the Erlang shell,\n");
  408. erts_fprintf(stderr, " d (or no extra option) to disable the break\n");
  409. erts_fprintf(stderr, " handler, i to ignore break signals\n");
  410. /* erts_fprintf(stderr, "-b func set the boot function (default boot)\n"); */
  411. erts_fprintf(stderr, "-c disable continuous date/time correction with\n");
  412. erts_fprintf(stderr, " respect to uptime\n");
  413. erts_fprintf(stderr, "-d don't write a crash dump for internally detected errors\n");
  414. erts_fprintf(stderr, " (halt(String) will still produce a crash dump)\n");
  415. erts_fprintf(stderr, "-hms size set minimum heap size in words (default %d)\n",
  416. H_DEFAULT_SIZE);
  417. erts_fprintf(stderr, "-hmbs size set minimum binary virtual heap size in words (default %d)\n",
  418. VH_DEFAULT_SIZE);
  419. /* erts_fprintf(stderr, "-i module set the boot module (default init)\n"); */
  420. erts_fprintf(stderr, "-K boolean enable or disable kernel poll\n");
  421. erts_fprintf(stderr, "-M<X> <Y> memory allocator switches,\n");
  422. erts_fprintf(stderr, " see the erts_alloc(3) documentation for more info.\n");
  423. erts_fprintf(stderr, "-P number set maximum number of processes on this node,\n");
  424. erts_fprintf(stderr, " valid range is [%d-%d]\n",
  425. ERTS_MIN_PROCESSES, ERTS_MAX_PROCESSES);
  426. erts_fprintf(stderr, "-R number set compatibility release number,\n");
  427. erts_fprintf(stderr, " valid range [%d-%d]\n",
  428. ERTS_MIN_COMPAT_REL, this_rel_num());
  429. erts_fprintf(stderr, "-r force ets memory block to be moved on realloc\n");
  430. erts_fprintf(stderr, "-rg amount set reader groups limit\n");
  431. erts_fprintf(stderr, "-sbt type set scheduler bind type, valid types are:\n");
  432. erts_fprintf(stderr, " u|ns|ts|ps|s|nnts|nnps|tnnps|db\n");
  433. erts_fprintf(stderr, "-scl bool enable/disable compaction of scheduler load,\n");
  434. erts_fprintf(stderr, " see the erl(1) documentation for more info.\n");
  435. erts_fprintf(stderr, "-sct cput set cpu topology,\n");
  436. erts_fprintf(stderr, " see the erl(1) documentation for more info.\n");
  437. erts_fprintf(stderr, "-swt val set scheduler wakeup threshold, valid values are:\n");
  438. erts_fprintf(stderr, " very_low|low|medium|high|very_high.\n");
  439. erts_fprintf(stderr, "-sss size suggested stack size in kilo words for scheduler threads,\n");
  440. erts_fprintf(stderr, " valid range is [%d-%d]\n",
  441. ERTS_SCHED_THREAD_MIN_STACK_SIZE,
  442. ERTS_SCHED_THREAD_MAX_STACK_SIZE);
  443. erts_fprintf(stderr, "-S n1:n2 set number of schedulers (n1), and number of\n");
  444. erts_fprintf(stderr, " schedulers online (n2), valid range for both\n");
  445. erts_fprintf(stderr, " numbers are [1-%d]\n",
  446. ERTS_MAX_NO_OF_SCHEDULERS);
  447. erts_fprintf(stderr, "-t size set the maximum number of atoms the "
  448. "emulator can handle\n");
  449. erts_fprintf(stderr, " valid range is [%d-%d]\n",
  450. MIN_ATOM_TABLE_SIZE, MAX_ATOM_TABLE_SIZE);
  451. erts_fprintf(stderr, "-T number set modified timing level,\n");
  452. erts_fprintf(stderr, " valid range is [0-%d]\n",
  453. ERTS_MODIFIED_TIMING_LEVELS-1);
  454. erts_fprintf(stderr, "-V print Erlang version\n");
  455. erts_fprintf(stderr, "-v turn on chatty mode (GCs will be reported etc)\n");
  456. erts_fprintf(stderr, "-W<i|w> set error logger warnings mapping,\n");
  457. erts_fprintf(stderr, " see error_logger documentation for details\n");
  458. erts_fprintf(stderr, "-zdbbl size set the distribution buffer busy limit in kilobytes\n");
  459. erts_fprintf(stderr, " valid range is [1-%d]\n", INT_MAX/1024);
  460. erts_fprintf(stderr, "\n");
  461. erts_fprintf(stderr, "Note that if the emulator is started with erlexec (typically\n");
  462. erts_fprintf(stderr, "from the erl script), these flags should be specified with +.\n");
  463. erts_fprintf(stderr, "\n\n");
  464. erl_exit(-1, "");
  465. }
  466. #ifdef USE_THREADS
  467. /*
  468. * allocators for thread lib
  469. */
  470. static void *ethr_std_alloc(size_t size)
  471. {
  472. return erts_alloc_fnf(ERTS_ALC_T_ETHR_STD, (Uint) size);
  473. }
  474. static void *ethr_std_realloc(void *ptr, size_t size)
  475. {
  476. return erts_realloc_fnf(ERTS_ALC_T_ETHR_STD, ptr, (Uint) size);
  477. }
  478. static void ethr_std_free(void *ptr)
  479. {
  480. erts_free(ERTS_ALC_T_ETHR_STD, ptr);
  481. }
  482. static void *ethr_sl_alloc(size_t size)
  483. {
  484. return erts_alloc_fnf(ERTS_ALC_T_ETHR_SL, (Uint) size);
  485. }
  486. static void *ethr_sl_realloc(void *ptr, size_t size)
  487. {
  488. return erts_realloc_fnf(ERTS_ALC_T_ETHR_SL, ptr, (Uint) size);
  489. }
  490. static void ethr_sl_free(void *ptr)
  491. {
  492. erts_free(ERTS_ALC_T_ETHR_SL, ptr);
  493. }
  494. static void *ethr_ll_alloc(size_t size)
  495. {
  496. return erts_alloc_fnf(ERTS_ALC_T_ETHR_LL, (Uint) size);
  497. }
  498. static void *ethr_ll_realloc(void *ptr, size_t size)
  499. {
  500. return erts_realloc_fnf(ERTS_ALC_T_ETHR_LL, ptr, (Uint) size);
  501. }
  502. static void ethr_ll_free(void *ptr)
  503. {
  504. erts_free(ERTS_ALC_T_ETHR_LL, ptr);
  505. }
  506. #endif
  507. static int
  508. early_init(int *argc, char **argv) /*
  509. * Only put things here which are
  510. * really important initialize
  511. * early!
  512. */
  513. {
  514. ErtsAllocInitOpts alloc_opts = ERTS_ALLOC_INIT_DEF_OPTS_INITER;
  515. int ncpu;
  516. int ncpuonln;
  517. int ncpuavail;
  518. int schdlrs;
  519. int schdlrs_onln;
  520. int max_main_threads;
  521. int max_reader_groups;
  522. int reader_groups;
  523. char envbuf[21]; /* enough for any 64-bit integer */
  524. size_t envbufsz;
  525. erts_sched_compact_load = 1;
  526. erts_printf_eterm_func = erts_printf_term;
  527. erts_disable_tolerant_timeofday = 0;
  528. display_items = 200;
  529. erts_backtrace_depth = DEFAULT_BACKTRACE_SIZE;
  530. erts_async_max_threads = 0;
  531. erts_async_thread_suggested_stack_size = ERTS_ASYNC_THREAD_MIN_STACK_SIZE;
  532. H_MIN_SIZE = H_DEFAULT_SIZE;
  533. BIN_VH_MIN_SIZE = VH_DEFAULT_SIZE;
  534. erts_initialized = 0;
  535. erts_use_sender_punish = 1;
  536. erts_pre_early_init_cpu_topology(&max_reader_groups,
  537. &ncpu,
  538. &ncpuonln,
  539. &ncpuavail);
  540. #ifndef ERTS_SMP
  541. ncpu = 1;
  542. ncpuonln = 1;
  543. ncpuavail = 1;
  544. #endif
  545. ignore_break = 0;
  546. replace_intr = 0;
  547. program = argv[0];
  548. erts_modified_timing_level = -1;
  549. erts_compat_rel = this_rel_num();
  550. erts_use_r9_pids_ports = 0;
  551. erts_sys_pre_init();
  552. erts_atomic_init_nob(&exiting, 0);
  553. #ifdef ERTS_SMP
  554. erts_thr_progress_pre_init();
  555. #endif
  556. #ifdef ERTS_ENABLE_LOCK_CHECK
  557. erts_lc_init();
  558. #endif
  559. #ifdef ERTS_SMP
  560. erts_smp_atomic32_init_nob(&erts_writing_erl_crash_dump, 0L);
  561. erts_tsd_key_create(&erts_is_crash_dumping_key);
  562. #else
  563. erts_writing_erl_crash_dump = 0;
  564. #endif
  565. erts_smp_atomic32_init_nob(&erts_max_gen_gcs,
  566. (erts_aint32_t) ((Uint16) -1));
  567. erts_pre_init_process();
  568. #if defined(USE_THREADS) && !defined(ERTS_SMP)
  569. main_thread = erts_thr_self();
  570. #endif
  571. /*
  572. * We need to know the number of schedulers to use before we
  573. * can initialize the allocators.
  574. */
  575. no_schedulers = (Uint) (ncpu > 0 ? ncpu : 1);
  576. no_schedulers_online = (ncpuavail > 0
  577. ? ncpuavail
  578. : (ncpuonln > 0 ? ncpuonln : no_schedulers));
  579. schdlrs = no_schedulers;
  580. schdlrs_onln = no_schedulers_online;
  581. envbufsz = sizeof(envbuf);
  582. /* erts_sys_getenv() not initialized yet; need erts_sys_getenv__() */
  583. if (erts_sys_getenv__("ERL_THREAD_POOL_SIZE", envbuf, &envbufsz) == 0)
  584. erts_async_max_threads = atoi(envbuf);
  585. else
  586. erts_async_max_threads = 0;
  587. if (erts_async_max_threads > ERTS_MAX_NO_OF_ASYNC_THREADS)
  588. erts_async_max_threads = ERTS_MAX_NO_OF_ASYNC_THREADS;
  589. if (argc && argv) {
  590. int i = 1;
  591. while (i < *argc) {
  592. if (strcmp(argv[i], "--") == 0) { /* end of emulator options */
  593. i++;
  594. break;
  595. }
  596. if (argv[i][0] == '-') {
  597. switch (argv[i][1]) {
  598. case 'r': {
  599. char *sub_param = argv[i]+2;
  600. if (has_prefix("g", sub_param)) {
  601. char *arg = get_arg(sub_param+1, argv[i+1], &i);
  602. if (sscanf(arg, "%d", &max_reader_groups) != 1) {
  603. erts_fprintf(stderr,
  604. "bad reader groups limit: %s\n", arg);
  605. erts_usage();
  606. }
  607. if (max_reader_groups < 0) {
  608. erts_fprintf(stderr,
  609. "bad reader groups limit: %d\n",
  610. max_reader_groups);
  611. erts_usage();
  612. }
  613. }
  614. break;
  615. }
  616. case 'A': {
  617. /* set number of threads in thread pool */
  618. char *arg = get_arg(argv[i]+2, argv[i+1], &i);
  619. if (((erts_async_max_threads = atoi(arg)) < 0) ||
  620. (erts_async_max_threads > ERTS_MAX_NO_OF_ASYNC_THREADS)) {
  621. erts_fprintf(stderr,
  622. "bad number of async threads %s\n",
  623. arg);
  624. erts_usage();
  625. VERBOSE(DEBUG_SYSTEM, ("using %d async-threads\n",
  626. erts_async_max_threads));
  627. }
  628. break;
  629. }
  630. case 'S' : {
  631. int tot, onln;
  632. char *arg = get_arg(argv[i]+2, argv[i+1], &i);
  633. switch (sscanf(arg, "%d:%d", &tot, &onln)) {
  634. case 0:
  635. switch (sscanf(arg, ":%d", &onln)) {
  636. case 1:
  637. tot = no_schedulers;
  638. goto chk_S;
  639. default:
  640. goto bad_S;
  641. }
  642. case 1:
  643. onln = tot < schdlrs_onln ? tot : schdlrs_onln;
  644. case 2:
  645. chk_S:
  646. if (tot > 0)
  647. schdlrs = tot;
  648. else
  649. schdlrs = no_schedulers + tot;
  650. if (onln > 0)
  651. schdlrs_onln = onln;
  652. else
  653. schdlrs_onln = no_schedulers_online + onln;
  654. if (schdlrs < 1 || ERTS_MAX_NO_OF_SCHEDULERS < schdlrs) {
  655. erts_fprintf(stderr,
  656. "bad amount of schedulers %d\n",
  657. tot);
  658. erts_usage();
  659. }
  660. if (schdlrs_onln < 1 || schdlrs < schdlrs_onln) {
  661. erts_fprintf(stderr,
  662. "bad amount of schedulers online %d "
  663. "(total amount of schedulers %d)\n",
  664. schdlrs_onln, schdlrs);
  665. erts_usage();
  666. }
  667. break;
  668. default:
  669. bad_S:
  670. erts_fprintf(stderr,
  671. "bad amount of schedulers %s\n",
  672. arg);
  673. erts_usage();
  674. break;
  675. }
  676. VERBOSE(DEBUG_SYSTEM,
  677. ("using %d:%d scheduler(s)\n", tot, onln));
  678. break;
  679. }
  680. default:
  681. break;
  682. }
  683. }
  684. i++;
  685. }
  686. }
  687. #ifdef ERTS_SMP
  688. no_schedulers = schdlrs;
  689. no_schedulers_online = schdlrs_onln;
  690. erts_no_schedulers = (Uint) no_schedulers;
  691. #endif
  692. erts_early_init_scheduling(no_schedulers);
  693. alloc_opts.ncpu = ncpu;
  694. erts_alloc_init(argc, argv, &alloc_opts); /* Handles (and removes)
  695. -M flags. */
  696. /* Require allocators */
  697. #ifdef ERTS_SMP
  698. /*
  699. * Thread progress management:
  700. *
  701. * * Managed threads:
  702. * ** Scheduler threads (see erl_process.c)
  703. * ** Aux thread (see erl_process.c)
  704. * ** Sys message dispatcher thread (see erl_trace.c)
  705. *
  706. * * Unmanaged threads that need to register:
  707. * ** Async threads (see erl_async.c)
  708. */
  709. erts_thr_progress_init(no_schedulers,
  710. no_schedulers+2,
  711. erts_async_max_threads);
  712. #endif
  713. erts_thr_q_init();
  714. erts_init_utils();
  715. erts_early_init_cpu_topology(no_schedulers,
  716. &max_main_threads,
  717. max_reader_groups,
  718. &reader_groups);
  719. #ifdef USE_THREADS
  720. {
  721. erts_thr_late_init_data_t elid = ERTS_THR_LATE_INIT_DATA_DEF_INITER;
  722. elid.mem.std.alloc = ethr_std_alloc;
  723. elid.mem.std.realloc = ethr_std_realloc;
  724. elid.mem.std.free = ethr_std_free;
  725. elid.mem.sl.alloc = ethr_sl_alloc;
  726. elid.mem.sl.realloc = ethr_sl_realloc;
  727. elid.mem.sl.free = ethr_sl_free;
  728. elid.mem.ll.alloc = ethr_ll_alloc;
  729. elid.mem.ll.realloc = ethr_ll_realloc;
  730. elid.mem.ll.free = ethr_ll_free;
  731. elid.main_threads = max_main_threads;
  732. elid.reader_groups = reader_groups;
  733. erts_thr_late_init(&elid);
  734. }
  735. #endif
  736. #ifdef ERTS_ENABLE_LOCK_CHECK
  737. erts_lc_late_init();
  738. #endif
  739. #ifdef ERTS_ENABLE_LOCK_COUNT
  740. erts_lcnt_late_init();
  741. #endif
  742. #if defined(HIPE)
  743. hipe_signal_init(); /* must be done very early */
  744. #endif
  745. erl_sys_args(argc, argv);
  746. /* Creates threads on Windows that depend on the arguments, so has to be after erl_sys_args */
  747. erl_sys_init();
  748. erts_ets_realloc_always_moves = 0;
  749. erts_ets_always_compress = 0;
  750. erts_dist_buf_busy_limit = ERTS_DE_BUSY_LIMIT;
  751. return ncpu;
  752. }
  753. #ifndef ERTS_SMP
  754. static void set_main_stack_size(void)
  755. {
  756. if (erts_sched_thread_suggested_stack_size > 0) {
  757. # if HAVE_DECL_GETRLIMIT && HAVE_DECL_SETRLIMIT && HAVE_DECL_RLIMIT_STACK
  758. struct rlimit rl;
  759. int bytes = erts_sched_thread_suggested_stack_size * sizeof(Uint) * 1024;
  760. if (getrlimit(RLIMIT_STACK, &rl) != 0 ||
  761. (rl.rlim_cur = bytes, setrlimit(RLIMIT_STACK, &rl) != 0)) {
  762. erts_fprintf(stderr, "failed to set stack size for scheduler "
  763. "thread to %d bytes\n", bytes);
  764. erts_usage();
  765. }
  766. # else
  767. erts_fprintf(stderr, "no OS support for dynamic stack size limit\n");
  768. erts_usage();
  769. # endif
  770. }
  771. }
  772. #endif
  773. void
  774. erl_start(int argc, char **argv)
  775. {
  776. int i = 1;
  777. char* arg=NULL;
  778. char* Parg = NULL;
  779. int have_break_handler = 1;
  780. char envbuf[21]; /* enough for any 64-bit integer */
  781. size_t envbufsz;
  782. int ncpu = early_init(&argc, argv);
  783. envbufsz = sizeof(envbuf);
  784. if (erts_sys_getenv(ERL_MAX_ETS_TABLES_ENV, envbuf, &envbufsz) == 0)
  785. user_requested_db_max_tabs = atoi(envbuf);
  786. else
  787. user_requested_db_max_tabs = 0;
  788. envbufsz = sizeof(envbuf);
  789. if (erts_sys_getenv("ERL_FULLSWEEP_AFTER", envbuf, &envbufsz) == 0) {
  790. Uint16 max_gen_gcs = atoi(envbuf);
  791. erts_smp_atomic32_set_nob(&erts_max_gen_gcs,
  792. (erts_aint32_t) max_gen_gcs);
  793. }
  794. #if (defined(__APPLE__) && defined(__MACH__)) || defined(__DARWIN__)
  795. /*
  796. * The default stack size on MacOS X is too small for pcre.
  797. */
  798. erts_sched_thread_suggested_stack_size = 256;
  799. #endif
  800. #ifdef DEBUG
  801. verbose = DEBUG_DEFAULT;
  802. #endif
  803. erts_error_logger_warnings = am_error;
  804. while (i < argc) {
  805. if (argv[i][0] != '-') {
  806. erts_usage();
  807. }
  808. if (strcmp(argv[i], "--") == 0) { /* end of emulator options */
  809. i++;
  810. break;
  811. }
  812. switch (argv[i][1]) {
  813. /*
  814. * NOTE: -M flags are handled (and removed from argv) by
  815. * erts_alloc_init().
  816. *
  817. * The -d, -m, -S, -t, and -T flags was removed in
  818. * Erlang 5.3/OTP R9C.
  819. *
  820. * -S, and -T has been reused in Erlang 5.5/OTP R11B.
  821. *
  822. * -d has been reused in a patch R12B-4.
  823. */
  824. case '#' :
  825. arg = get_arg(argv[i]+2, argv[i+1], &i);
  826. if ((display_items = atoi(arg)) == 0) {
  827. erts_fprintf(stderr, "bad display items%s\n", arg);
  828. erts_usage();
  829. }
  830. VERBOSE(DEBUG_SYSTEM,
  831. ("using display items %d\n",display_items));
  832. break;
  833. case 'f':
  834. if (!strncmp(argv[i],"-fn",3)) {
  835. arg = get_arg(argv[i]+3, argv[i+1], &i);
  836. switch (*arg) {
  837. case 'u':
  838. erts_set_user_requested_filename_encoding(ERL_FILENAME_UTF8);
  839. break;
  840. case 'l':
  841. erts_set_user_requested_filename_encoding(ERL_FILENAME_LATIN1);
  842. break;
  843. case 'a':
  844. erts_set_user_requested_filename_encoding(ERL_FILENAME_UNKNOWN);
  845. default:
  846. erts_fprintf(stderr, "bad filename encoding %s, can be (l,u or a)\n", arg);
  847. erts_usage();
  848. }
  849. break;
  850. } else {
  851. erts_fprintf(stderr, "%s unknown flag %s\n", argv[0], argv[i]);
  852. erts_usage();
  853. }
  854. case 'L':
  855. erts_no_line_info = 1;
  856. break;
  857. case 'v':
  858. #ifdef DEBUG
  859. if (argv[i][2] == '\0') {
  860. verbose |= DEBUG_SYSTEM;
  861. } else {
  862. char *ch;
  863. for (ch = argv[i]+2; *ch != '\0'; ch++) {
  864. switch (*ch) {
  865. case 's': verbose |= DEBUG_SYSTEM; break;
  866. case 'g': verbose |= DEBUG_PRIVATE_GC; break;
  867. case 'h': verbose |= DEBUG_HYBRID_GC; break;
  868. case 'M': verbose |= DEBUG_MEMORY; break;
  869. case 'a': verbose |= DEBUG_ALLOCATION; break;
  870. case 't': verbose |= DEBUG_THREADS; break;
  871. case 'p': verbose |= DEBUG_PROCESSES; break;
  872. case 'm': verbose |= DEBUG_MESSAGES; break;
  873. default : erts_fprintf(stderr,"Unknown verbose option: %c\n",*ch);
  874. }
  875. }
  876. }
  877. erts_printf("Verbose level: ");
  878. if (verbose & DEBUG_SYSTEM) erts_printf("SYSTEM ");
  879. if (verbose & DEBUG_PRIVATE_GC) erts_printf("PRIVATE_GC ");
  880. if (verbose & DEBUG_HYBRID_GC) erts_printf("HYBRID_GC ");
  881. if (verbose & DEBUG_MEMORY) erts_printf("PARANOID_MEMORY ");
  882. if (verbose & DEBUG_ALLOCATION) erts_printf("ALLOCATION ");
  883. if (verbose & DEBUG_THREADS) erts_printf("THREADS ");
  884. if (verbose & DEBUG_PROCESSES) erts_printf("PROCESSES ");
  885. if (verbose & DEBUG_MESSAGES) erts_printf("MESSAGES ");
  886. erts_printf("\n");
  887. #else
  888. erts_fprintf(stderr, "warning: -v (only in debug compiled code)\n");
  889. #endif
  890. break;
  891. case 'V' :
  892. {
  893. char tmp[256];
  894. tmp[0] = tmp[1] = '\0';
  895. #ifdef DEBUG
  896. strcat(tmp, ",DEBUG");
  897. #endif
  898. #ifdef ERTS_SMP
  899. strcat(tmp, ",SMP");
  900. #endif
  901. #ifdef USE_THREADS
  902. strcat(tmp, ",ASYNC_THREADS");
  903. #endif
  904. #ifdef HIPE
  905. strcat(tmp, ",HIPE");
  906. #endif
  907. #ifdef INCREMENTAL
  908. strcat(tmp, ",INCREMENTAL_GC");
  909. #endif
  910. #ifdef HYBRID
  911. strcat(tmp, ",HYBRID");
  912. #endif
  913. erts_fprintf(stderr, "Erlang ");
  914. if (tmp[1]) {
  915. erts_fprintf(stderr, "(%s) ", tmp+1);
  916. }
  917. erts_fprintf(stderr, "(" EMULATOR ") emulator version "
  918. ERLANG_VERSION "\n");
  919. erl_exit(0, "");
  920. }
  921. break;
  922. case 'H': /* undocumented */
  923. fprintf(stderr, "The undocumented +H option has been removed (R10B-6).\n\n");
  924. break;
  925. case 'h': {
  926. char *sub_param = argv[i]+2;
  927. /* set default heap size
  928. *
  929. * h|ms - min_heap_size
  930. * h|mbs - min_bin_vheap_size
  931. *
  932. */
  933. if (has_prefix("mbs", sub_param)) {
  934. arg = get_arg(sub_param+3, argv[i+1], &i);
  935. if ((BIN_VH_MIN_SIZE = atoi(arg)) <= 0) {
  936. erts_fprintf(stderr, "bad heap size %s\n", arg);
  937. erts_usage();
  938. }
  939. VERBOSE(DEBUG_SYSTEM, ("using minimum binary virtual heap size %d\n", BIN_VH_MIN_SIZE));
  940. } else if (has_prefix("ms", sub_param)) {
  941. arg = get_arg(sub_param+2, argv[i+1], &i);
  942. if ((H_MIN_SIZE = atoi(arg)) <= 0) {
  943. erts_fprintf(stderr, "bad heap size %s\n", arg);
  944. erts_usage();
  945. }
  946. VERBOSE(DEBUG_SYSTEM, ("using minimum heap size %d\n", H_MIN_SIZE));
  947. } else {
  948. /* backward compatibility */
  949. arg = get_arg(argv[i]+2, argv[i+1], &i);
  950. if ((H_MIN_SIZE = atoi(arg)) <= 0) {
  951. erts_fprintf(stderr, "bad heap size %s\n", arg);
  952. erts_usage();
  953. }
  954. VERBOSE(DEBUG_SYSTEM, ("using minimum heap size %d\n", H_MIN_SIZE));
  955. }
  956. break;
  957. }
  958. case 'd':
  959. /*
  960. * Never produce crash dumps for internally detected
  961. * errors; only produce a core dump. (Generation of
  962. * crash dumps is destructive and makes it impossible
  963. * to inspect the contents of process heaps in the
  964. * core dump.)
  965. */
  966. erts_no_crash_dump = 1;
  967. break;
  968. case 'e':
  969. if (sys_strcmp("c", argv[i]+2) == 0) {
  970. erts_ets_always_compress = 1;
  971. }
  972. else {
  973. /* set maximum number of ets tables */
  974. arg = get_arg(argv[i]+2, argv[i+1], &i);
  975. if (( user_requested_db_max_tabs = atoi(arg) ) < 0) {
  976. erts_fprintf(stderr, "bad maximum number of ets tables %s\n", arg);
  977. erts_usage();
  978. }
  979. VERBOSE(DEBUG_SYSTEM,
  980. ("using maximum number of ets tables %d\n",
  981. user_requested_db_max_tabs));
  982. }
  983. break;
  984. case 'i':
  985. /* define name of module for initial function */
  986. init = get_arg(argv[i]+2, argv[i+1], &i);
  987. break;
  988. case 'b':
  989. /* define name of initial function */
  990. boot = get_arg(argv[i]+2, argv[i+1], &i);
  991. break;
  992. case 'B':
  993. if (argv[i][2] == 'i') /* +Bi */
  994. ignore_break = 1;
  995. else if (argv[i][2] == 'c') /* +Bc */
  996. replace_intr = 1;
  997. else if (argv[i][2] == 'd') /* +Bd */
  998. have_break_handler = 0;
  999. else if (argv[i+1][0] == 'i') { /* +B i */
  1000. get_arg(argv[i]+2, argv[i+1], &i);
  1001. ignore_break = 1;
  1002. }
  1003. else if (argv[i+1][0] == 'c') { /* +B c */
  1004. get_arg(argv[i]+2, argv[i+1], &i);
  1005. replace_intr = 1;
  1006. }
  1007. else if (argv[i+1][0] == 'd') { /* +B d */
  1008. get_arg(argv[i]+2, argv[i+1], &i);
  1009. have_break_handler = 0;
  1010. }
  1011. else /* +B */
  1012. have_break_handler = 0;
  1013. break;
  1014. case 'K':
  1015. /* If kernel poll support is present,
  1016. erl_sys_args() will remove the K parameter
  1017. and value */
  1018. get_arg(argv[i]+2, argv[i+1], &i);
  1019. erts_fprintf(stderr,
  1020. "kernel-poll not supported; \"K\" parameter ignored\n",
  1021. arg);
  1022. break;
  1023. case 'P':
  1024. /* set maximum number of processes */
  1025. Parg = get_arg(argv[i]+2, argv[i+1], &i);
  1026. erts_max_processes = atoi(Parg);
  1027. /* Check of result is delayed until later. This is because +R
  1028. may be given after +P. */
  1029. break;
  1030. case 'S' : /* Was handled in early_init() just read past it */
  1031. (void) get_arg(argv[i]+2, argv[i+1], &i);
  1032. break;
  1033. case 's' : {
  1034. char *estr;
  1035. int res;
  1036. char *sub_param = argv[i]+2;
  1037. if (has_prefix("bt", sub_param)) {
  1038. arg = get_arg(sub_param+2, argv[i+1], &i);
  1039. res = erts_init_scheduler_bind_type_string(arg);
  1040. if (res != ERTS_INIT_SCHED_BIND_TYPE_SUCCESS) {
  1041. switch (res) {
  1042. case ERTS_INIT_SCHED_BIND_TYPE_NOT_SUPPORTED:
  1043. estr = "not supported";
  1044. break;
  1045. case ERTS_INIT_SCHED_BIND_TYPE_ERROR_NO_CPU_TOPOLOGY:
  1046. estr = "no cpu topology available";
  1047. break;
  1048. case ERTS_INIT_SCHED_BIND_TYPE_ERROR_NO_BAD_TYPE:
  1049. estr = "invalid type";
  1050. break;
  1051. default:
  1052. estr = "undefined error";
  1053. break;
  1054. }
  1055. erts_fprintf(stderr,
  1056. "setting scheduler bind type '%s' failed: %s\n",
  1057. arg,
  1058. estr);
  1059. erts_usage();
  1060. }
  1061. }
  1062. else if (has_prefix("cl", sub_param)) {
  1063. arg = get_arg(sub_param+2, argv[i+1], &i);
  1064. if (sys_strcmp("true", arg) == 0)
  1065. erts_sched_compact_load = 1;
  1066. else if (sys_strcmp("false", arg) == 0)
  1067. erts_sched_compact_load = 0;
  1068. else {
  1069. erts_fprintf(stderr,
  1070. "bad scheduler compact load value '%s'\n",
  1071. arg);
  1072. erts_usage();
  1073. }
  1074. }
  1075. else if (has_prefix("ct", sub_param)) {
  1076. arg = get_arg(sub_param+2, argv[i+1], &i);
  1077. res = erts_init_cpu_topology_string(arg);
  1078. if (res != ERTS_INIT_CPU_TOPOLOGY_OK) {
  1079. switch (res) {
  1080. case ERTS_INIT_CPU_TOPOLOGY_INVALID_ID:
  1081. estr = "invalid identifier";
  1082. break;
  1083. case ERTS_INIT_CPU_TOPOLOGY_INVALID_ID_RANGE:
  1084. estr = "invalid identifier range";
  1085. break;
  1086. case ERTS_INIT_CPU_TOPOLOGY_INVALID_HIERARCHY:
  1087. estr = "invalid hierarchy";
  1088. break;
  1089. case ERTS_INIT_CPU_TOPOLOGY_INVALID_ID_TYPE:
  1090. estr = "invalid identifier type";
  1091. break;
  1092. case ERTS_INIT_CPU_TOPOLOGY_INVALID_NODES:
  1093. estr = "invalid nodes declaration";
  1094. break;
  1095. case ERTS_INIT_CPU_TOPOLOGY_MISSING_LID:
  1096. estr = "missing logical identifier";
  1097. break;
  1098. case ERTS_INIT_CPU_TOPOLOGY_NOT_UNIQUE_LIDS:
  1099. estr = "not unique logical identifiers";
  1100. break;
  1101. case ERTS_INIT_CPU_TOPOLOGY_NOT_UNIQUE_ENTITIES:
  1102. estr = "not unique entities";
  1103. break;
  1104. case ERTS_INIT_CPU_TOPOLOGY_MISSING:
  1105. estr = "missing cpu topology";
  1106. break;
  1107. default:
  1108. estr = "undefined error";
  1109. break;
  1110. }
  1111. erts_fprintf(stderr,
  1112. "bad cpu topology '%s': %s\n",
  1113. arg,
  1114. estr);
  1115. erts_usage();
  1116. }
  1117. }
  1118. else if (sys_strcmp("nsp", sub_param) == 0)
  1119. erts_use_sender_punish = 0;
  1120. else if (sys_strcmp("wt", sub_param) == 0) {
  1121. arg = get_arg(sub_param+2, argv[i+1], &i);
  1122. if (erts_sched_set_wakeup_limit(arg) != 0) {
  1123. erts_fprintf(stderr, "scheduler wakeup threshold: %s\n",
  1124. arg);
  1125. erts_usage();
  1126. }
  1127. VERBOSE(DEBUG_SYSTEM,
  1128. ("scheduler wakup threshold: %s\n", arg));
  1129. }
  1130. else if (has_prefix("ss", sub_param)) {
  1131. /* suggested stack size (Kilo Words) for scheduler threads */
  1132. arg = get_arg(sub_param+2, argv[i+1], &i);
  1133. erts_sched_thread_suggested_stack_size = atoi(arg);
  1134. if ((erts_sched_thread_suggested_stack_size
  1135. < ERTS_SCHED_THREAD_MIN_STACK_SIZE)
  1136. || (erts_sched_thread_suggested_stack_size >
  1137. ERTS_SCHED_THREAD_MAX_STACK_SIZE)) {
  1138. erts_fprintf(stderr, "bad stack size for scheduler threads %s\n",
  1139. arg);
  1140. erts_usage();
  1141. }
  1142. VERBOSE(DEBUG_SYSTEM,
  1143. ("suggested scheduler thread stack size %d kilo words\n",
  1144. erts_sched_thread_suggested_stack_size));
  1145. }
  1146. else {
  1147. erts_fprintf(stderr, "bad scheduling option %s\n", argv[i]);
  1148. erts_usage();
  1149. }
  1150. break;
  1151. }
  1152. case 't':
  1153. /* set atom table size */
  1154. arg = get_arg(argv[i]+2, argv[i+1], &i);
  1155. errno = 0;
  1156. erts_atom_table_size = strtol(arg, NULL, 10);
  1157. if (errno != 0 ||
  1158. erts_atom_table_size < MIN_ATOM_TABLE_SIZE ||
  1159. erts_atom_table_size > MAX_ATOM_TABLE_SIZE) {
  1160. erts_fprintf(stderr, "bad atom table size %s\n", arg);
  1161. erts_usage();
  1162. }
  1163. VERBOSE(DEBUG_SYSTEM,
  1164. ("setting maximum number of atoms to %d\n",
  1165. erts_atom_table_size));
  1166. break;
  1167. case 'T' :
  1168. arg = get_arg(argv[i]+2, argv[i+1], &i);
  1169. errno = 0;
  1170. erts_modified_timing_level = atoi(arg);
  1171. if ((erts_modified_timing_level == 0 && errno != 0)
  1172. || erts_modified_timing_level < 0
  1173. || erts_modified_timing_level >= ERTS_MODIFIED_TIMING_LEVELS) {
  1174. erts_fprintf(stderr, "bad modified timing level %s\n", arg);
  1175. erts_usage();
  1176. }
  1177. else {
  1178. VERBOSE(DEBUG_SYSTEM,
  1179. ("using modified timing level %d\n",
  1180. erts_modified_timing_level));
  1181. }
  1182. break;
  1183. case 'R': {
  1184. /* set compatibility release */
  1185. arg = get_arg(argv[i]+2, argv[i+1], &i);
  1186. erts_compat_rel = atoi(arg);
  1187. if (erts_compat_rel < ERTS_MIN_COMPAT_REL
  1188. || erts_compat_rel > this_rel_num()) {
  1189. erts_fprintf(stderr, "bad compatibility release number %s\n", arg);
  1190. erts_usage();
  1191. }
  1192. ASSERT(ERTS_MIN_COMPAT_REL >= 7);
  1193. switch (erts_compat_rel) {
  1194. case 7:
  1195. case 8:
  1196. case 9:
  1197. erts_use_r9_pids_ports = 1;
  1198. default:
  1199. break;
  1200. }
  1201. break;
  1202. }
  1203. case 'A': /* Was handled in early init just read past it */
  1204. (void) get_arg(argv[i]+2, argv[i+1], &i);
  1205. break;
  1206. case 'a':
  1207. /* suggested stack size (Kilo Words) for threads in thread pool */
  1208. arg = get_arg(argv[i]+2, argv[i+1], &i);
  1209. erts_async_thread_suggested_stack_size = atoi(arg);
  1210. if ((erts_async_thread_suggested_stack_size
  1211. < ERTS_ASYNC_THREAD_MIN_STACK_SIZE)
  1212. || (erts_async_thread_suggested_stack_size >
  1213. ERTS_ASYNC_THREAD_MAX_STACK_SIZE)) {
  1214. erts_fprintf(stderr, "bad stack size for async threads %s\n",
  1215. arg);
  1216. erts_usage();
  1217. }
  1218. VERBOSE(DEBUG_SYSTEM,
  1219. ("suggested async-thread stack size %d kilo words\n",
  1220. erts_async_thread_suggested_stack_size));
  1221. break;
  1222. case 'r': {
  1223. char *sub_param = argv[i]+2;
  1224. if (has_prefix("g", sub_param)) {
  1225. get_arg(sub_param+1, argv[i+1], &i);
  1226. /* already handled */
  1227. }
  1228. else {
  1229. erts_ets_realloc_always_moves = 1;
  1230. }
  1231. break;
  1232. }
  1233. case 'n': /* XXX obsolete */
  1234. break;
  1235. case 'c':
  1236. if (argv[i][2] == 0) { /* -c: documented option */
  1237. erts_disable_tolerant_timeofday = 1;
  1238. }
  1239. #ifdef ERTS_OPCODE_COUNTER_SUPPORT
  1240. else if (argv[i][2] == 'i') { /* -ci: undcoumented option*/
  1241. count_instructions = 1;
  1242. }
  1243. #endif
  1244. break;
  1245. case 'W':
  1246. arg = get_arg(argv[i]+2, argv[i+1], &i);
  1247. switch (arg[0]) {
  1248. case 'i':
  1249. erts_error_logger_warnings = am_info;
  1250. break;
  1251. case 'w':
  1252. erts_error_logger_warnings = am_warning;
  1253. break;
  1254. case 'e': /* The default */
  1255. erts_error_logger_warnings = am_error;
  1256. default:
  1257. erts_fprintf(stderr, "unrecognized warning_map option %s\n", arg);
  1258. erts_usage();
  1259. }
  1260. break;
  1261. case 'z': {
  1262. char *sub_param = argv[i]+2;
  1263. int new_limit;
  1264. if (has_prefix("dbbl", sub_param)) {
  1265. arg = get_arg(sub_param+4, argv[i+1], &i);
  1266. new_limit = atoi(arg);
  1267. if (new_limit < 1 || INT_MAX/1024 < new_limit) {
  1268. erts_fprintf(stderr, "Invalid dbbl limit: %d\n", new_limit);
  1269. erts_usage();
  1270. } else {
  1271. erts_dist_buf_busy_limit = new_limit*1024;
  1272. }
  1273. } else {
  1274. erts_fprintf(stderr, "bad -z option %s\n", argv[i]);
  1275. erts_usage();
  1276. }
  1277. break;
  1278. }
  1279. default:
  1280. erts_fprintf(stderr, "%s unknown flag %s\n", argv[0], argv[i]);
  1281. erts_usage();
  1282. }
  1283. i++;
  1284. }
  1285. /* Delayed check of +P flag */
  1286. if (erts_max_processes < ERTS_MIN_PROCESSES
  1287. || erts_max_processes > ERTS_MAX_PROCESSES
  1288. || (erts_use_r9_pids_ports
  1289. && erts_max_processes > ERTS_MAX_R9_PROCESSES)) {
  1290. erts_fprintf(stderr, "bad number of processes %s\n", Parg);
  1291. erts_usage();
  1292. }
  1293. /* Restart will not reinstall the break handler */
  1294. #ifdef __WIN32__
  1295. if (ignore_break)
  1296. erts_set_ignore_break();
  1297. else if (replace_intr)
  1298. erts_replace_intr();
  1299. else
  1300. init_break_handler();
  1301. #else
  1302. if (ignore_break)
  1303. erts_set_ignore_break();
  1304. else if (have_break_handler)
  1305. init_break_handler();
  1306. if (replace_intr)
  1307. erts_replace_intr();
  1308. #endif
  1309. boot_argc = argc - i; /* Number of arguments to init */
  1310. boot_argv = &argv[i];
  1311. erl_init(ncpu);
  1312. init_shared_memory(boot_argc, boot_argv);
  1313. load_preloaded();
  1314. erts_initialized = 1;
  1315. erl_first_process_otp("otp_ring0", NULL, 0, boot_argc, boot_argv);
  1316. #ifdef ERTS_SMP
  1317. erts_start_schedulers();
  1318. /* Let system specific code decide what to do with the main thread... */
  1319. erts_sys_main_thread(); /* May or may not return! */
  1320. #else
  1321. erts_thr_set_main_status(1, 1);
  1322. #if ERTS_USE_ASYNC_READY_Q
  1323. erts_get_scheduler_data()->aux_work_data.async_ready.queue
  1324. = erts_get_async_ready_queue(1);
  1325. #endif
  1326. set_main_stack_size();
  1327. process_main();
  1328. #endif
  1329. }
  1330. #ifdef USE_THREADS
  1331. __decl_noreturn void erts_thr_fatal_error(int err, char *what)
  1332. {
  1333. char *errstr = err ? strerror(err) : NULL;
  1334. erts_fprintf(stderr,
  1335. "Failed to %s: %s%s(%d)\n",
  1336. what,
  1337. errstr ? errstr : "",
  1338. errstr ? " " : "",
  1339. err);
  1340. abort();
  1341. }
  1342. #endif
  1343. static void
  1344. system_cleanup(int exit_code)
  1345. {
  1346. /*
  1347. * Make sure only one thread exits the runtime system.
  1348. */
  1349. if (erts_atomic_inc_read_nob(&exiting) != 1) {
  1350. /*
  1351. * Another thread is currently exiting the system;
  1352. * wait for it to do its job.
  1353. */
  1354. #ifdef ERTS_SMP
  1355. if (erts_thr_progress_is_managed_thread()) {
  1356. /*
  1357. * The exiting thread might be waiting for
  1358. * us to block; need to update status...
  1359. */
  1360. erts_thr_progress_active(NULL, 0);
  1361. erts_thr_progress_prepare_wait(NULL);
  1362. }
  1363. #endif
  1364. /* Wait forever... */
  1365. while (1)
  1366. erts_milli_sleep(10000000);
  1367. }
  1368. /* No cleanup wanted if ...
  1369. * 1. we are about to do an abnormal exit
  1370. * 2. we haven't finished initializing, or
  1371. * 3. another thread than the main thread is performing the exit
  1372. * (in threaded non smp case).
  1373. */
  1374. if (exit_code != 0
  1375. || !erts_initialized
  1376. #if defined(USE_THREADS) && !defined(ERTS_SMP)
  1377. || !erts_equal_tids(main_thread, erts_thr_self())
  1378. #endif
  1379. )
  1380. return;
  1381. #ifdef ERTS_SMP
  1382. #ifdef ERTS_ENABLE_LOCK_CHECK
  1383. erts_lc_check_exact(NULL, 0);
  1384. #endif
  1385. #endif
  1386. #ifdef HYBRID
  1387. if (ma_src_stack) erts_free(ERTS_ALC_T_OBJECT_STACK,
  1388. (void *)ma_src_stack);
  1389. if (ma_dst_stack) erts_free(ERTS_ALC_T_OBJECT_STACK,
  1390. (void *)ma_dst_stack);
  1391. if (ma_offset_stack) erts_free(ERTS_ALC_T_OBJECT_STACK,
  1392. (void *)ma_offset_stack);
  1393. ma_src_stack = NULL;
  1394. ma_dst_stack = NULL;
  1395. ma_offset_stack = NULL;
  1396. erts_cleanup_offheap(&erts_global_offheap);
  1397. #endif
  1398. #if defined(HYBRID) && !defined(INCREMENTAL)
  1399. if (global_heap) {
  1400. ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
  1401. (void*) global_heap,
  1402. sizeof(Eterm) * global_heap_sz);
  1403. }
  1404. global_heap = NULL;
  1405. #endif
  1406. #ifdef INCREMENTAL
  1407. erts_cleanup_incgc();
  1408. #endif
  1409. erts_exit_flush_async();
  1410. }
  1411. /*
  1412. * Common exit function, all exits from the system go through here.
  1413. * n <= 0 -> normal exit with status n;
  1414. * n = 127 -> Erlang crash dump produced, exit with status 1;
  1415. * other positive n -> Erlang crash dump and core dump produced.
  1416. */
  1417. __decl_noreturn void erl_exit0(char *file, int line, int n, char *fmt,...)
  1418. {
  1419. unsigned int an;
  1420. va_list args;
  1421. va_start(args, fmt);
  1422. system_cleanup(n);
  1423. save_statistics();
  1424. an = abs(n);
  1425. if (erts_mtrace_enabled)
  1426. erts_mtrace_exit((Uint32) an);
  1427. /* Produce an Erlang core dump if error */
  1428. if (n > 0 && erts_initialized &&
  1429. (erts_no_crash_dump == 0 || n == ERTS_DUMP_EXIT)) {
  1430. erl_crash_dump_v(file, line, fmt, args);
  1431. }
  1432. /* need to reinitialize va_args thing */
  1433. va_end(args);
  1434. va_start(args, fmt);
  1435. if (fmt != NULL && *fmt != '\0')
  1436. erl_error(fmt, args); /* Print error message. */
  1437. va_end(args);
  1438. sys_tty_reset(n);
  1439. if (n == ERTS_INTR_EXIT)
  1440. exit(0);
  1441. else if (n == 127)
  1442. ERTS_EXIT_AFTER_DUMP(1);
  1443. else if (n > 0 || n == ERTS_ABORT_EXIT)
  1444. abort();
  1445. exit(an);
  1446. }
  1447. __decl_noreturn void erl_exit(int n, char *fmt,...)
  1448. {
  1449. unsigned int an;
  1450. va_list args;
  1451. va_start(args, fmt);
  1452. system_cleanup(n);
  1453. save_statistics();
  1454. an = abs(n);
  1455. if (erts_mtrace_enabled)
  1456. erts_mtrace_exit((Uint32) an);
  1457. /* Produce an Erlang core dump if error */
  1458. if (n > 0 && erts_initialized &&
  1459. (erts_no_crash_dump == 0 || n == ERTS_DUMP_EXIT)) {
  1460. erl_crash_dump_v((char*) NULL, 0, fmt, args);
  1461. }
  1462. /* need to reinitialize va_args thing */
  1463. va_end(args);
  1464. va_start(args, fmt);
  1465. if (fmt != NULL && *fmt != '\0')
  1466. erl_error(fmt, args); /* Print error message. */
  1467. va_end(args);
  1468. sys_tty_reset(n);
  1469. if (n == ERTS_INTR_EXIT)
  1470. exit(0);
  1471. else if (n == ERTS_DUMP_EXIT)
  1472. ERTS_EXIT_AFTER_DUMP(1);
  1473. else if (n > 0 || n == ERTS_ABORT_EXIT)
  1474. abort();
  1475. exit(an);
  1476. }