PageRenderTime 30ms CodeModel.GetById 23ms RepoModel.GetById 1ms app.codeStats 1ms

/erts/emulator/sys/unix/sys.c

https://github.com/dudefrommangalore/otp
C | 3345 lines | 2515 code | 418 blank | 412 comment | 438 complexity | c8ca265027e88bddb30c7c593bca9afd MD5 | raw file
Possible License(s): MPL-2.0-no-copyleft-exception, BSD-2-Clause, LGPL-2.1
  1. /*
  2. * %CopyrightBegin%
  3. *
  4. * Copyright Ericsson AB 1996-2010. All Rights Reserved.
  5. *
  6. * The contents of this file are subject to the Erlang Public License,
  7. * Version 1.1, (the "License"); you may not use this file except in
  8. * compliance with the License. You should have received a copy of the
  9. * Erlang Public License along with this software. If not, it can be
  10. * retrieved online at http://www.erlang.org/.
  11. *
  12. * Software distributed under the License is distributed on an "AS IS"
  13. * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
  14. * the License for the specific language governing rights and limitations
  15. * under the License.
  16. *
  17. * %CopyrightEnd%
  18. */
  19. #ifdef HAVE_CONFIG_H
  20. # include "config.h"
  21. #endif
  22. #ifdef ISC32
  23. #define _POSIX_SOURCE
  24. #define _XOPEN_SOURCE
  25. #endif
  26. #include <sys/times.h> /* ! */
  27. #include <time.h>
  28. #include <signal.h>
  29. #include <sys/wait.h>
  30. #include <sys/uio.h>
  31. #include <termios.h>
  32. #include <ctype.h>
  33. #include <sys/utsname.h>
  34. #ifdef ISC32
  35. #include <sys/bsdtypes.h>
  36. #endif
  37. #include <termios.h>
  38. #ifdef HAVE_FCNTL_H
  39. #include <fcntl.h>
  40. #endif
  41. #ifdef HAVE_SYS_IOCTL_H
  42. #include <sys/ioctl.h>
  43. #endif
  44. #define NEED_CHILD_SETUP_DEFINES
  45. #define ERTS_WANT_BREAK_HANDLING
  46. #define ERTS_WANT_GOT_SIGUSR1
  47. #define WANT_NONBLOCKING /* must define this to pull in defs from sys.h */
  48. #include "sys.h"
  49. #ifdef USE_THREADS
  50. #include "erl_threads.h"
  51. #endif
  52. #include "erl_mseg.h"
  53. extern char **environ;
  54. static erts_smp_rwmtx_t environ_rwmtx;
  55. #define MAX_VSIZE 16 /* Max number of entries allowed in an I/O
  56. * vector sock_sendv().
  57. */
  58. /*
  59. * Don't need global.h, but bif_table.h (included by bif.h),
  60. * won't compile otherwise
  61. */
  62. #include "global.h"
  63. #include "bif.h"
  64. #include "erl_sys_driver.h"
  65. #include "erl_check_io.h"
  66. #ifndef DISABLE_VFORK
  67. #define DISABLE_VFORK 0
  68. #endif
  69. #ifdef USE_THREADS
  70. # ifdef ENABLE_CHILD_WAITER_THREAD
  71. # define CHLDWTHR ENABLE_CHILD_WAITER_THREAD
  72. # else
  73. # define CHLDWTHR 0
  74. # endif
  75. #else
  76. # define CHLDWTHR 0
  77. #endif
  78. /*
  79. * [OTP-3906]
  80. * Solaris signal management gets confused when threads are used and a
  81. * lot of child processes dies. The confusion results in that SIGCHLD
  82. * signals aren't delivered to the emulator which in turn results in
  83. * a lot of defunct processes in the system.
  84. *
  85. * The problem seems to appear when a signal is frequently
  86. * blocked/unblocked at the same time as the signal is frequently
  87. * propagated. The child waiter thread is a workaround for this problem.
  88. * The SIGCHLD signal is always blocked (in all threads), and the child
  89. * waiter thread fetches the signal by a call to sigwait(). See
  90. * child_waiter().
  91. */
  92. typedef struct ErtsSysReportExit_ ErtsSysReportExit;
  93. struct ErtsSysReportExit_ {
  94. ErtsSysReportExit *next;
  95. Eterm port;
  96. int pid;
  97. int ifd;
  98. int ofd;
  99. #if CHLDWTHR && !defined(ERTS_SMP)
  100. int status;
  101. #endif
  102. };
  103. static ErtsSysReportExit *report_exit_list;
  104. #if CHLDWTHR && !defined(ERTS_SMP)
  105. static ErtsSysReportExit *report_exit_transit_list;
  106. #endif
  107. extern int check_async_ready(void);
  108. extern int driver_interrupt(int, int);
  109. /*EXTERN_FUNCTION(void, increment_time, (int));*/
  110. /*EXTERN_FUNCTION(int, next_time, (_VOID_));*/
  111. extern void do_break(void);
  112. extern void erl_sys_args(int*, char**);
  113. /* The following two defs should probably be moved somewhere else */
  114. extern void erts_sys_init_float(void);
  115. extern void erl_crash_dump(char* file, int line, char* fmt, ...);
  116. #define DIR_SEPARATOR_CHAR '/'
  117. #if defined(DEBUG)
  118. #define ERL_BUILD_TYPE_MARKER ".debug"
  119. #elif defined(PURIFY)
  120. #define ERL_BUILD_TYPE_MARKER ".purify"
  121. #elif defined(QUANTIFY)
  122. #define ERL_BUILD_TYPE_MARKER ".quantify"
  123. #elif defined(PURECOV)
  124. #define ERL_BUILD_TYPE_MARKER ".purecov"
  125. #elif defined(VALGRIND)
  126. #define ERL_BUILD_TYPE_MARKER ".valgrind"
  127. #else /* opt */
  128. #define ERL_BUILD_TYPE_MARKER
  129. #endif
  130. #define CHILD_SETUP_PROG_NAME "child_setup" ERL_BUILD_TYPE_MARKER
  131. #if !DISABLE_VFORK
  132. static char *child_setup_prog;
  133. #endif
  134. #ifdef DEBUG
  135. static int debug_log = 0;
  136. #endif
  137. #ifdef ERTS_SMP
  138. erts_smp_atomic_t erts_got_sigusr1;
  139. #define ERTS_SET_GOT_SIGUSR1 \
  140. erts_smp_atomic_set(&erts_got_sigusr1, 1)
  141. #define ERTS_UNSET_GOT_SIGUSR1 \
  142. erts_smp_atomic_set(&erts_got_sigusr1, 0)
  143. static erts_smp_atomic_t have_prepared_crash_dump;
  144. #define ERTS_PREPARED_CRASH_DUMP \
  145. ((int) erts_smp_atomic_xchg(&have_prepared_crash_dump, 1))
  146. #else
  147. volatile int erts_got_sigusr1;
  148. #define ERTS_SET_GOT_SIGUSR1 (erts_got_sigusr1 = 1)
  149. #define ERTS_UNSET_GOT_SIGUSR1 (erts_got_sigusr1 = 0)
  150. static volatile int have_prepared_crash_dump;
  151. #define ERTS_PREPARED_CRASH_DUMP \
  152. (have_prepared_crash_dump++)
  153. #endif
  154. static erts_smp_atomic_t sys_misc_mem_sz;
  155. #if defined(ERTS_SMP)
  156. static void smp_sig_notify(char c);
  157. static int sig_notify_fds[2] = {-1, -1};
  158. #elif defined(USE_THREADS)
  159. static int async_fd[2];
  160. #endif
  161. #if CHLDWTHR || defined(ERTS_SMP)
  162. erts_mtx_t chld_stat_mtx;
  163. #endif
  164. #if CHLDWTHR
  165. static erts_tid_t child_waiter_tid;
  166. /* chld_stat_mtx is used to protect against concurrent accesses
  167. of the driver_data fields pid, alive, and status. */
  168. erts_cnd_t chld_stat_cnd;
  169. static long children_alive;
  170. #define CHLD_STAT_LOCK erts_mtx_lock(&chld_stat_mtx)
  171. #define CHLD_STAT_UNLOCK erts_mtx_unlock(&chld_stat_mtx)
  172. #define CHLD_STAT_WAIT erts_cnd_wait(&chld_stat_cnd, &chld_stat_mtx)
  173. #define CHLD_STAT_SIGNAL erts_cnd_signal(&chld_stat_cnd)
  174. #elif defined(ERTS_SMP) /* ------------------------------------------------- */
  175. #define CHLD_STAT_LOCK erts_mtx_lock(&chld_stat_mtx)
  176. #define CHLD_STAT_UNLOCK erts_mtx_unlock(&chld_stat_mtx)
  177. #else /* ------------------------------------------------------------------- */
  178. #define CHLD_STAT_LOCK
  179. #define CHLD_STAT_UNLOCK
  180. static volatile int children_died;
  181. #endif
  182. static struct fd_data {
  183. char pbuf[4]; /* hold partial packet bytes */
  184. int psz; /* size of pbuf */
  185. char *buf;
  186. char *cpos;
  187. int sz;
  188. int remain; /* for input on fd */
  189. } *fd_data; /* indexed by fd */
  190. /* static FUNCTION(int, write_fill, (int, char*, int)); unused? */
  191. static FUNCTION(void, note_child_death, (int, int));
  192. #if CHLDWTHR
  193. static FUNCTION(void *, child_waiter, (void *));
  194. #endif
  195. /********************* General functions ****************************/
  196. /* This is used by both the drivers and general I/O, must be set early */
  197. static int max_files = -1;
  198. /*
  199. * a few variables used by the break handler
  200. */
  201. #ifdef ERTS_SMP
  202. erts_smp_atomic_t erts_break_requested;
  203. #define ERTS_SET_BREAK_REQUESTED \
  204. erts_smp_atomic_set(&erts_break_requested, (long) 1)
  205. #define ERTS_UNSET_BREAK_REQUESTED \
  206. erts_smp_atomic_set(&erts_break_requested, (long) 0)
  207. #else
  208. volatile int erts_break_requested = 0;
  209. #define ERTS_SET_BREAK_REQUESTED (erts_break_requested = 1)
  210. #define ERTS_UNSET_BREAK_REQUESTED (erts_break_requested = 0)
  211. #endif
  212. /* set early so the break handler has access to initial mode */
  213. static struct termios initial_tty_mode;
  214. static int replace_intr = 0;
  215. /* assume yes initially, ttsl_init will clear it */
  216. int using_oldshell = 1;
  217. #ifdef ERTS_ENABLE_KERNEL_POLL
  218. int erts_use_kernel_poll = 0;
  219. struct {
  220. int (*select)(ErlDrvPort, ErlDrvEvent, int, int);
  221. int (*event)(ErlDrvPort, ErlDrvEvent, ErlDrvEventData);
  222. void (*check_io_interrupt)(int);
  223. void (*check_io_interrupt_tmd)(int, long);
  224. void (*check_io)(int);
  225. Uint (*size)(void);
  226. Eterm (*info)(void *);
  227. int (*check_io_debug)(void);
  228. } io_func = {0};
  229. int
  230. driver_select(ErlDrvPort port, ErlDrvEvent event, int mode, int on)
  231. {
  232. return (*io_func.select)(port, event, mode, on);
  233. }
  234. int
  235. driver_event(ErlDrvPort port, ErlDrvEvent event, ErlDrvEventData event_data)
  236. {
  237. return (*io_func.event)(port, event, event_data);
  238. }
  239. Eterm erts_check_io_info(void *p)
  240. {
  241. return (*io_func.info)(p);
  242. }
  243. int
  244. erts_check_io_debug(void)
  245. {
  246. return (*io_func.check_io_debug)();
  247. }
  248. static void
  249. init_check_io(void)
  250. {
  251. if (erts_use_kernel_poll) {
  252. io_func.select = driver_select_kp;
  253. io_func.event = driver_event_kp;
  254. io_func.check_io_interrupt = erts_check_io_interrupt_kp;
  255. io_func.check_io_interrupt_tmd = erts_check_io_interrupt_timed_kp;
  256. io_func.check_io = erts_check_io_kp;
  257. io_func.size = erts_check_io_size_kp;
  258. io_func.info = erts_check_io_info_kp;
  259. io_func.check_io_debug = erts_check_io_debug_kp;
  260. erts_init_check_io_kp();
  261. max_files = erts_check_io_max_files_kp();
  262. }
  263. else {
  264. io_func.select = driver_select_nkp;
  265. io_func.event = driver_event_nkp;
  266. io_func.check_io_interrupt = erts_check_io_interrupt_nkp;
  267. io_func.check_io_interrupt_tmd = erts_check_io_interrupt_timed_nkp;
  268. io_func.check_io = erts_check_io_nkp;
  269. io_func.size = erts_check_io_size_nkp;
  270. io_func.info = erts_check_io_info_nkp;
  271. io_func.check_io_debug = erts_check_io_debug_nkp;
  272. erts_init_check_io_nkp();
  273. max_files = erts_check_io_max_files_nkp();
  274. }
  275. }
  276. #define ERTS_CHK_IO_INTR (*io_func.check_io_interrupt)
  277. #define ERTS_CHK_IO_INTR_TMD (*io_func.check_io_interrupt_tmd)
  278. #define ERTS_CHK_IO (*io_func.check_io)
  279. #define ERTS_CHK_IO_SZ (*io_func.size)
  280. #else /* !ERTS_ENABLE_KERNEL_POLL */
  281. static void
  282. init_check_io(void)
  283. {
  284. erts_init_check_io();
  285. max_files = erts_check_io_max_files();
  286. }
  287. #define ERTS_CHK_IO_INTR erts_check_io_interrupt
  288. #define ERTS_CHK_IO_INTR_TMD erts_check_io_interrupt_timed
  289. #define ERTS_CHK_IO erts_check_io
  290. #define ERTS_CHK_IO_SZ erts_check_io_size
  291. #endif
  292. #ifdef ERTS_SMP
  293. void
  294. erts_sys_schedule_interrupt(int set)
  295. {
  296. ERTS_CHK_IO_INTR(set);
  297. }
  298. void
  299. erts_sys_schedule_interrupt_timed(int set, long msec)
  300. {
  301. ERTS_CHK_IO_INTR_TMD(set, msec);
  302. }
  303. #endif
  304. Uint
  305. erts_sys_misc_mem_sz(void)
  306. {
  307. Uint res = ERTS_CHK_IO_SZ();
  308. res += erts_smp_atomic_read(&sys_misc_mem_sz);
  309. return res;
  310. }
  311. /*
  312. * reset the terminal to the original settings on exit
  313. */
  314. void sys_tty_reset(void)
  315. {
  316. if (using_oldshell && !replace_intr) {
  317. SET_BLOCKING(0);
  318. }
  319. else if (isatty(0)) {
  320. tcsetattr(0,TCSANOW,&initial_tty_mode);
  321. }
  322. }
  323. #ifdef __tile__
  324. /* Direct malloc to spread memory around the caches of multiple tiles. */
  325. #include <malloc.h>
  326. MALLOC_USE_HASH(1);
  327. #endif
  328. #ifdef USE_THREADS
  329. static void *ethr_internal_alloc(size_t size)
  330. {
  331. return erts_alloc_fnf(ERTS_ALC_T_ETHR_INTERNAL, (Uint) size);
  332. }
  333. static void *ethr_internal_realloc(void *ptr, size_t size)
  334. {
  335. return erts_realloc_fnf(ERTS_ALC_T_ETHR_INTERNAL, ptr, (Uint) size);
  336. }
  337. static void ethr_internal_free(void *ptr)
  338. {
  339. erts_free(ERTS_ALC_T_ETHR_INTERNAL, ptr);
  340. }
  341. #ifdef ERTS_THR_HAVE_SIG_FUNCS
  342. /*
  343. * Child thread inherits parents signal mask at creation. In order to
  344. * guarantee that the main thread will receive all SIGINT, SIGCHLD, and
  345. * SIGUSR1 signals sent to the process, we block these signals in the
  346. * parent thread when creating a new thread.
  347. */
  348. static sigset_t thr_create_sigmask;
  349. #endif /* #ifdef ERTS_THR_HAVE_SIG_FUNCS */
  350. typedef struct {
  351. #ifdef ERTS_THR_HAVE_SIG_FUNCS
  352. sigset_t saved_sigmask;
  353. #endif
  354. int unbind_child;
  355. } erts_thr_create_data_t;
  356. /*
  357. * thr_create_prepare() is called in parent thread before thread creation.
  358. * Returned value is passed as argument to thr_create_cleanup().
  359. */
  360. static void *
  361. thr_create_prepare(void)
  362. {
  363. erts_thr_create_data_t *tcdp;
  364. ErtsSchedulerData *esdp;
  365. tcdp = erts_alloc(ERTS_ALC_T_TMP, sizeof(erts_thr_create_data_t));
  366. #ifdef ERTS_THR_HAVE_SIG_FUNCS
  367. erts_thr_sigmask(SIG_BLOCK, &thr_create_sigmask, &tcdp->saved_sigmask);
  368. #endif
  369. esdp = erts_get_scheduler_data();
  370. tcdp->unbind_child = esdp && erts_is_scheduler_bound(esdp);
  371. return (void *) tcdp;
  372. }
  373. /* thr_create_cleanup() is called in parent thread after thread creation. */
  374. static void
  375. thr_create_cleanup(void *vtcdp)
  376. {
  377. erts_thr_create_data_t *tcdp = (erts_thr_create_data_t *) vtcdp;
  378. #ifdef ERTS_THR_HAVE_SIG_FUNCS
  379. /* Restore signalmask... */
  380. erts_thr_sigmask(SIG_SETMASK, &tcdp->saved_sigmask, NULL);
  381. #endif
  382. erts_free(ERTS_ALC_T_TMP, tcdp);
  383. }
  384. static void
  385. thr_create_prepare_child(void *vtcdp)
  386. {
  387. erts_thr_create_data_t *tcdp = (erts_thr_create_data_t *) vtcdp;
  388. #ifndef NO_FPE_SIGNALS
  389. /*
  390. * We do not want fp exeptions in other threads than the
  391. * scheduler threads. We enable fpe explicitly in the scheduler
  392. * threads after this.
  393. */
  394. erts_thread_disable_fpe();
  395. #endif
  396. if (tcdp->unbind_child) {
  397. erts_smp_rwmtx_rlock(&erts_cpu_bind_rwmtx);
  398. erts_unbind_from_cpu(erts_cpuinfo);
  399. erts_smp_rwmtx_runlock(&erts_cpu_bind_rwmtx);
  400. }
  401. }
  402. #endif /* #ifdef USE_THREADS */
  403. void
  404. erts_sys_pre_init(void)
  405. {
  406. erts_printf_add_cr_to_stdout = 1;
  407. erts_printf_add_cr_to_stderr = 1;
  408. #ifdef USE_THREADS
  409. {
  410. erts_thr_init_data_t eid = ERTS_THR_INIT_DATA_DEF_INITER;
  411. eid.alloc = ethr_internal_alloc;
  412. eid.realloc = ethr_internal_realloc;
  413. eid.free = ethr_internal_free;
  414. eid.thread_create_child_func = thr_create_prepare_child;
  415. /* Before creation in parent */
  416. eid.thread_create_prepare_func = thr_create_prepare;
  417. /* After creation in parent */
  418. eid.thread_create_parent_func = thr_create_cleanup,
  419. #ifdef ERTS_THR_HAVE_SIG_FUNCS
  420. sigemptyset(&thr_create_sigmask);
  421. sigaddset(&thr_create_sigmask, SIGINT); /* block interrupt */
  422. sigaddset(&thr_create_sigmask, SIGCHLD); /* block child signals */
  423. sigaddset(&thr_create_sigmask, SIGUSR1); /* block user defined signal */
  424. #endif
  425. erts_thr_init(&eid);
  426. report_exit_list = NULL;
  427. #ifdef ERTS_ENABLE_LOCK_COUNT
  428. erts_lcnt_init();
  429. #endif
  430. #if CHLDWTHR || defined(ERTS_SMP)
  431. erts_mtx_init(&chld_stat_mtx, "child_status");
  432. #endif
  433. #if CHLDWTHR
  434. #ifndef ERTS_SMP
  435. report_exit_transit_list = NULL;
  436. #endif
  437. erts_cnd_init(&chld_stat_cnd);
  438. children_alive = 0;
  439. #endif
  440. }
  441. #ifdef ERTS_SMP
  442. erts_smp_atomic_init(&erts_break_requested, 0);
  443. erts_smp_atomic_init(&erts_got_sigusr1, 0);
  444. erts_smp_atomic_init(&have_prepared_crash_dump, 0);
  445. #else
  446. erts_break_requested = 0;
  447. erts_got_sigusr1 = 0;
  448. have_prepared_crash_dump = 0;
  449. #endif
  450. #if !CHLDWTHR && !defined(ERTS_SMP)
  451. children_died = 0;
  452. #endif
  453. #endif /* USE_THREADS */
  454. erts_smp_atomic_init(&sys_misc_mem_sz, 0);
  455. erts_smp_rwmtx_init(&environ_rwmtx, "environ");
  456. }
  457. void
  458. erl_sys_init(void)
  459. {
  460. #if !DISABLE_VFORK
  461. int res;
  462. char bindir[MAXPATHLEN];
  463. size_t bindirsz = sizeof(bindir);
  464. Uint csp_path_sz;
  465. res = erts_sys_getenv("BINDIR", bindir, &bindirsz);
  466. if (res != 0) {
  467. if (res < 0)
  468. erl_exit(-1,
  469. "Environment variable BINDIR is not set\n");
  470. if (res > 0)
  471. erl_exit(-1,
  472. "Value of environment variable BINDIR is too large\n");
  473. }
  474. if (bindir[0] != DIR_SEPARATOR_CHAR)
  475. erl_exit(-1,
  476. "Environment variable BINDIR does not contain an"
  477. " absolute path\n");
  478. csp_path_sz = (strlen(bindir)
  479. + 1 /* DIR_SEPARATOR_CHAR */
  480. + sizeof(CHILD_SETUP_PROG_NAME)
  481. + 1);
  482. child_setup_prog = erts_alloc(ERTS_ALC_T_CS_PROG_PATH, csp_path_sz);
  483. erts_smp_atomic_add(&sys_misc_mem_sz, csp_path_sz);
  484. sprintf(child_setup_prog,
  485. "%s%c%s",
  486. bindir,
  487. DIR_SEPARATOR_CHAR,
  488. CHILD_SETUP_PROG_NAME);
  489. #endif
  490. #ifdef USE_SETLINEBUF
  491. setlinebuf(stdout);
  492. #else
  493. setvbuf(stdout, (char *)NULL, _IOLBF, BUFSIZ);
  494. #endif
  495. erts_sys_init_float();
  496. /* we save this so the break handler can set and reset it properly */
  497. /* also so that we can reset on exit (break handler or not) */
  498. if (isatty(0)) {
  499. tcgetattr(0,&initial_tty_mode);
  500. }
  501. tzset(); /* Required at least for NetBSD with localtime_r() */
  502. }
  503. /* signal handling */
  504. #ifdef SIG_SIGSET /* Old SysV */
  505. RETSIGTYPE (*sys_sigset(sig, func))()
  506. int sig;
  507. RETSIGTYPE (*func)();
  508. {
  509. return(sigset(sig, func));
  510. }
  511. void sys_sigblock(int sig)
  512. {
  513. sighold(sig);
  514. }
  515. void sys_sigrelease(int sig)
  516. {
  517. sigrelse(sig);
  518. }
  519. #else /* !SIG_SIGSET */
  520. #ifdef SIG_SIGNAL /* Old BSD */
  521. RETSIGTYPE (*sys_sigset(sig, func))(int, int)
  522. int sig;
  523. RETSIGTYPE (*func)();
  524. {
  525. return(signal(sig, func));
  526. }
  527. sys_sigblock(int sig)
  528. {
  529. sigblock(sig);
  530. }
  531. sys_sigrelease(int sig)
  532. {
  533. sigsetmask(sigblock(0) & ~sigmask(sig));
  534. }
  535. #else /* !SIG_SIGNAL */ /* The True Way - POSIX!:-) */
  536. RETSIGTYPE (*sys_sigset(int sig, RETSIGTYPE (*func)(int)))(int)
  537. {
  538. struct sigaction act, oact;
  539. sigemptyset(&act.sa_mask);
  540. act.sa_flags = 0;
  541. act.sa_handler = func;
  542. sigaction(sig, &act, &oact);
  543. return(oact.sa_handler);
  544. }
  545. #ifdef USE_THREADS
  546. #undef sigprocmask
  547. #define sigprocmask erts_thr_sigmask
  548. #endif
  549. void sys_sigblock(int sig)
  550. {
  551. sigset_t mask;
  552. sigemptyset(&mask);
  553. sigaddset(&mask, sig);
  554. sigprocmask(SIG_BLOCK, &mask, (sigset_t *)NULL);
  555. }
  556. void sys_sigrelease(int sig)
  557. {
  558. sigset_t mask;
  559. sigemptyset(&mask);
  560. sigaddset(&mask, sig);
  561. sigprocmask(SIG_UNBLOCK, &mask, (sigset_t *)NULL);
  562. }
  563. #endif /* !SIG_SIGNAL */
  564. #endif /* !SIG_SIGSET */
  565. #if (0) /* not used? -- gordon */
  566. static void (*break_func)();
  567. static RETSIGTYPE break_handler(int sig)
  568. {
  569. #ifdef QNX
  570. /* Turn off SIGCHLD during break processing */
  571. sys_sigblock(SIGCHLD);
  572. #endif
  573. (*break_func)();
  574. #ifdef QNX
  575. sys_sigrelease(SIGCHLD);
  576. #endif
  577. }
  578. #endif /* 0 */
  579. static ERTS_INLINE void
  580. prepare_crash_dump(void)
  581. {
  582. int i, max;
  583. char env[21]; /* enough to hold any 64-bit integer */
  584. size_t envsz;
  585. if (ERTS_PREPARED_CRASH_DUMP)
  586. return; /* We have already been called */
  587. /* Make sure we unregister at epmd (unknown fd) and get at least
  588. one free filedescriptor (for erl_crash.dump) */
  589. max = max_files;
  590. if (max < 1024)
  591. max = 1024;
  592. for (i = 3; i < max; i++) {
  593. #if defined(ERTS_SMP)
  594. /* We don't want to close the signal notification pipe... */
  595. if (i == sig_notify_fds[0] || i == sig_notify_fds[1])
  596. continue;
  597. #elif defined(USE_THREADS)
  598. /* We don't want to close the async notification pipe... */
  599. if (i == async_fd[0] || i == async_fd[1])
  600. continue;
  601. #endif
  602. close(i);
  603. }
  604. envsz = sizeof(env);
  605. i = erts_sys_getenv("ERL_CRASH_DUMP_NICE", env, &envsz);
  606. if (i >= 0) {
  607. int nice_val;
  608. nice_val = i != 0 ? 0 : atoi(env);
  609. if (nice_val > 39) {
  610. nice_val = 39;
  611. }
  612. erts_silence_warn_unused_result(nice(nice_val));
  613. }
  614. envsz = sizeof(env);
  615. i = erts_sys_getenv("ERL_CRASH_DUMP_SECONDS", env, &envsz);
  616. if (i >= 0) {
  617. unsigned sec;
  618. sec = (unsigned) i != 0 ? 0 : atoi(env);
  619. alarm(sec);
  620. }
  621. }
  622. void
  623. erts_sys_prepare_crash_dump(void)
  624. {
  625. prepare_crash_dump();
  626. }
  627. static ERTS_INLINE void
  628. break_requested(void)
  629. {
  630. /*
  631. * just set a flag - checked for and handled by
  632. * scheduler threads erts_check_io() (not signal handler).
  633. */
  634. #ifdef DEBUG
  635. fprintf(stderr,"break!\n");
  636. #endif
  637. if (ERTS_BREAK_REQUESTED)
  638. erl_exit(ERTS_INTR_EXIT, "");
  639. ERTS_SET_BREAK_REQUESTED;
  640. ERTS_CHK_IO_INTR(1); /* Make sure we don't sleep in poll */
  641. }
  642. /* set up signal handlers for break and quit */
  643. #if (defined(SIG_SIGSET) || defined(SIG_SIGNAL))
  644. static RETSIGTYPE request_break(void)
  645. #else
  646. static RETSIGTYPE request_break(int signum)
  647. #endif
  648. {
  649. #ifdef ERTS_SMP
  650. smp_sig_notify('I');
  651. #else
  652. break_requested();
  653. #endif
  654. }
  655. static ERTS_INLINE void
  656. sigusr1_exit(void)
  657. {
  658. /* We do this at interrupt level, since the main reason for
  659. wanting to generate a crash dump in this way is that the emulator
  660. is hung somewhere, so it won't be able to poll any flag we set here.
  661. */
  662. ERTS_SET_GOT_SIGUSR1;
  663. prepare_crash_dump();
  664. erl_exit(1, "Received SIGUSR1\n");
  665. }
  666. #ifdef ETHR_UNUSABLE_SIGUSRX
  667. #warning "Unusable SIGUSR1 & SIGUSR2. Disabling use of these signals"
  668. #endif
  669. #ifndef ETHR_UNUSABLE_SIGUSRX
  670. #if (defined(SIG_SIGSET) || defined(SIG_SIGNAL))
  671. static RETSIGTYPE user_signal1(void)
  672. #else
  673. static RETSIGTYPE user_signal1(int signum)
  674. #endif
  675. {
  676. #ifdef ERTS_SMP
  677. smp_sig_notify('1');
  678. #else
  679. sigusr1_exit();
  680. #endif
  681. }
  682. #ifdef QUANTIFY
  683. #if (defined(SIG_SIGSET) || defined(SIG_SIGNAL))
  684. static RETSIGTYPE user_signal2(void)
  685. #else
  686. static RETSIGTYPE user_signal2(int signum)
  687. #endif
  688. {
  689. #ifdef ERTS_SMP
  690. smp_sig_notify('2');
  691. #else
  692. quantify_save_data();
  693. #endif
  694. }
  695. #endif
  696. #endif /* #ifndef ETHR_UNUSABLE_SIGUSRX */
  697. static void
  698. quit_requested(void)
  699. {
  700. erl_exit(ERTS_INTR_EXIT, "");
  701. }
  702. #if (defined(SIG_SIGSET) || defined(SIG_SIGNAL))
  703. static RETSIGTYPE do_quit(void)
  704. #else
  705. static RETSIGTYPE do_quit(int signum)
  706. #endif
  707. {
  708. #ifdef ERTS_SMP
  709. smp_sig_notify('Q');
  710. #else
  711. quit_requested();
  712. #endif
  713. }
  714. /* Disable break */
  715. void erts_set_ignore_break(void) {
  716. sys_sigset(SIGINT, SIG_IGN);
  717. sys_sigset(SIGQUIT, SIG_IGN);
  718. sys_sigset(SIGTSTP, SIG_IGN);
  719. }
  720. /* Don't use ctrl-c for break handler but let it be
  721. used by the shell instead (see user_drv.erl) */
  722. void erts_replace_intr(void) {
  723. struct termios mode;
  724. if (isatty(0)) {
  725. tcgetattr(0, &mode);
  726. /* here's an example of how to replace ctrl-c with ctrl-u */
  727. /* mode.c_cc[VKILL] = 0;
  728. mode.c_cc[VINTR] = CKILL; */
  729. mode.c_cc[VINTR] = 0; /* disable ctrl-c */
  730. tcsetattr(0, TCSANOW, &mode);
  731. replace_intr = 1;
  732. }
  733. }
  734. void init_break_handler(void)
  735. {
  736. sys_sigset(SIGINT, request_break);
  737. #ifndef ETHR_UNUSABLE_SIGUSRX
  738. sys_sigset(SIGUSR1, user_signal1);
  739. #ifdef QUANTIFY
  740. sys_sigset(SIGUSR2, user_signal2);
  741. #endif
  742. #endif /* #ifndef ETHR_UNUSABLE_SIGUSRX */
  743. sys_sigset(SIGQUIT, do_quit);
  744. }
  745. int sys_max_files(void)
  746. {
  747. return(max_files);
  748. }
  749. static void block_signals(void)
  750. {
  751. #if !CHLDWTHR
  752. sys_sigblock(SIGCHLD);
  753. #endif
  754. #ifndef ERTS_SMP
  755. sys_sigblock(SIGINT);
  756. #ifndef ETHR_UNUSABLE_SIGUSRX
  757. sys_sigblock(SIGUSR1);
  758. #endif
  759. #endif
  760. }
  761. static void unblock_signals(void)
  762. {
  763. /* Update erl_child_setup.c if changed */
  764. #if !CHLDWTHR
  765. sys_sigrelease(SIGCHLD);
  766. #endif
  767. #ifndef ERTS_SMP
  768. sys_sigrelease(SIGINT);
  769. #ifndef ETHR_UNUSABLE_SIGUSRX
  770. sys_sigrelease(SIGUSR1);
  771. #endif /* #ifndef ETHR_UNUSABLE_SIGUSRX */
  772. #endif
  773. }
  774. /************************** Time stuff **************************/
  775. #ifdef HAVE_GETHRTIME
  776. #ifdef GETHRTIME_WITH_CLOCK_GETTIME
  777. SysHrTime sys_gethrtime(void)
  778. {
  779. struct timespec ts;
  780. long long result;
  781. if (clock_gettime(CLOCK_MONOTONIC,&ts) != 0) {
  782. erl_exit(1,"Fatal, could not get clock_monotonic value!, "
  783. "errno = %d\n", errno);
  784. }
  785. result = ((long long) ts.tv_sec) * 1000000000LL +
  786. ((long long) ts.tv_nsec);
  787. return (SysHrTime) result;
  788. }
  789. #endif
  790. #endif
  791. /************************** OS info *******************************/
  792. /* Used by erlang:info/1. */
  793. /* (This code was formerly in drv.XXX/XXX_os_drv.c) */
  794. char os_type[] = "unix";
  795. static int
  796. get_number(char **str_ptr)
  797. {
  798. char* s = *str_ptr; /* Pointer to beginning of string. */
  799. char* dot; /* Pointer to dot in string or NULL. */
  800. if (!isdigit((int) *s))
  801. return 0;
  802. if ((dot = strchr(s, '.')) == NULL) {
  803. *str_ptr = s+strlen(s);
  804. return atoi(s);
  805. } else {
  806. *dot = '\0';
  807. *str_ptr = dot+1;
  808. return atoi(s);
  809. }
  810. }
  811. void
  812. os_flavor(char* namebuf, /* Where to return the name. */
  813. unsigned size) /* Size of name buffer. */
  814. {
  815. static int called = 0;
  816. static struct utsname uts; /* Information about the system. */
  817. if (!called) {
  818. char* s;
  819. (void) uname(&uts);
  820. called = 1;
  821. for (s = uts.sysname; *s; s++) {
  822. if (isupper((int) *s)) {
  823. *s = tolower((int) *s);
  824. }
  825. }
  826. }
  827. strcpy(namebuf, uts.sysname);
  828. }
  829. void
  830. os_version(pMajor, pMinor, pBuild)
  831. int* pMajor; /* Pointer to major version. */
  832. int* pMinor; /* Pointer to minor version. */
  833. int* pBuild; /* Pointer to build number. */
  834. {
  835. struct utsname uts; /* Information about the system. */
  836. char* release; /* Pointer to the release string:
  837. * X.Y or X.Y.Z.
  838. */
  839. (void) uname(&uts);
  840. release = uts.release;
  841. *pMajor = get_number(&release);
  842. *pMinor = get_number(&release);
  843. *pBuild = get_number(&release);
  844. }
  845. void init_getenv_state(GETENV_STATE *state)
  846. {
  847. erts_smp_rwmtx_rlock(&environ_rwmtx);
  848. *state = NULL;
  849. }
  850. char *getenv_string(GETENV_STATE *state0)
  851. {
  852. char **state = (char **) *state0;
  853. char *cp;
  854. ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rlocked(&environ_rwmtx));
  855. if (state == NULL)
  856. state = environ;
  857. cp = *state++;
  858. *state0 = (GETENV_STATE) state;
  859. return cp;
  860. }
  861. void fini_getenv_state(GETENV_STATE *state)
  862. {
  863. *state = NULL;
  864. erts_smp_rwmtx_runlock(&environ_rwmtx);
  865. }
  866. /************************** Port I/O *******************************/
  867. /* I. Common stuff */
  868. /*
  869. * Decreasing the size of it below 16384 is not allowed.
  870. */
  871. /* II. The spawn/fd/vanilla drivers */
  872. #define ERTS_SYS_READ_BUF_SZ (64*1024)
  873. /* This data is shared by these drivers - initialized by spawn_init() */
  874. static struct driver_data {
  875. int port_num, ofd, packet_bytes;
  876. ErtsSysReportExit *report_exit;
  877. int pid;
  878. int alive;
  879. int status;
  880. } *driver_data; /* indexed by fd */
  881. /* Driver interfaces */
  882. static ErlDrvData spawn_start(ErlDrvPort, char*, SysDriverOpts*);
  883. static ErlDrvData fd_start(ErlDrvPort, char*, SysDriverOpts*);
  884. static int fd_control(ErlDrvData, unsigned int, char *, int, char **, int);
  885. static ErlDrvData vanilla_start(ErlDrvPort, char*, SysDriverOpts*);
  886. static int spawn_init(void);
  887. static void fd_stop(ErlDrvData);
  888. static void stop(ErlDrvData);
  889. static void ready_input(ErlDrvData, ErlDrvEvent);
  890. static void ready_output(ErlDrvData, ErlDrvEvent);
  891. static void output(ErlDrvData, char*, int);
  892. static void outputv(ErlDrvData, ErlIOVec*);
  893. static void stop_select(ErlDrvEvent, void*);
  894. struct erl_drv_entry spawn_driver_entry = {
  895. spawn_init,
  896. spawn_start,
  897. stop,
  898. output,
  899. ready_input,
  900. ready_output,
  901. "spawn",
  902. NULL,
  903. NULL,
  904. NULL,
  905. NULL,
  906. NULL,
  907. NULL,
  908. NULL,
  909. NULL,
  910. NULL,
  911. ERL_DRV_EXTENDED_MARKER,
  912. ERL_DRV_EXTENDED_MAJOR_VERSION,
  913. ERL_DRV_EXTENDED_MINOR_VERSION,
  914. ERL_DRV_FLAG_USE_PORT_LOCKING,
  915. NULL, NULL,
  916. stop_select
  917. };
  918. struct erl_drv_entry fd_driver_entry = {
  919. NULL,
  920. fd_start,
  921. fd_stop,
  922. output,
  923. ready_input,
  924. ready_output,
  925. "fd",
  926. NULL,
  927. NULL,
  928. fd_control,
  929. NULL,
  930. outputv,
  931. NULL, /* ready_async */
  932. NULL, /* flush */
  933. NULL, /* call */
  934. NULL, /* event */
  935. ERL_DRV_EXTENDED_MARKER,
  936. ERL_DRV_EXTENDED_MAJOR_VERSION,
  937. ERL_DRV_EXTENDED_MINOR_VERSION,
  938. 0, /* ERL_DRV_FLAGs */
  939. NULL, /* handle2 */
  940. NULL, /* process_exit */
  941. stop_select
  942. };
  943. struct erl_drv_entry vanilla_driver_entry = {
  944. NULL,
  945. vanilla_start,
  946. stop,
  947. output,
  948. ready_input,
  949. ready_output,
  950. "vanilla",
  951. NULL,
  952. NULL,
  953. NULL,
  954. NULL,
  955. NULL,
  956. NULL,
  957. NULL, /* flush */
  958. NULL, /* call */
  959. NULL, /* event */
  960. ERL_DRV_EXTENDED_MARKER,
  961. ERL_DRV_EXTENDED_MAJOR_VERSION,
  962. ERL_DRV_EXTENDED_MINOR_VERSION,
  963. 0, /* ERL_DRV_FLAGs */
  964. NULL, /* handle2 */
  965. NULL, /* process_exit */
  966. stop_select
  967. };
  968. #if defined(USE_THREADS) && !defined(ERTS_SMP)
  969. static int async_drv_init(void);
  970. static ErlDrvData async_drv_start(ErlDrvPort, char*, SysDriverOpts*);
  971. static void async_drv_stop(ErlDrvData);
  972. static void async_drv_input(ErlDrvData, ErlDrvEvent);
  973. /* INTERNAL use only */
  974. struct erl_drv_entry async_driver_entry = {
  975. async_drv_init,
  976. async_drv_start,
  977. async_drv_stop,
  978. NULL,
  979. async_drv_input,
  980. NULL,
  981. "async",
  982. NULL,
  983. NULL,
  984. NULL,
  985. NULL,
  986. NULL,
  987. NULL
  988. };
  989. #endif
  990. /* Handle SIGCHLD signals. */
  991. #if (defined(SIG_SIGSET) || defined(SIG_SIGNAL))
  992. static RETSIGTYPE onchld(void)
  993. #else
  994. static RETSIGTYPE onchld(int signum)
  995. #endif
  996. {
  997. #if CHLDWTHR
  998. ASSERT(0); /* We should *never* catch a SIGCHLD signal */
  999. #elif defined(ERTS_SMP)
  1000. smp_sig_notify('C');
  1001. #else
  1002. children_died = 1;
  1003. ERTS_CHK_IO_INTR(1); /* Make sure we don't sleep in poll */
  1004. #endif
  1005. }
  1006. static int set_driver_data(int port_num,
  1007. int ifd,
  1008. int ofd,
  1009. int packet_bytes,
  1010. int read_write,
  1011. int exit_status,
  1012. int pid)
  1013. {
  1014. ErtsSysReportExit *report_exit;
  1015. if (!exit_status)
  1016. report_exit = NULL;
  1017. else {
  1018. report_exit = erts_alloc(ERTS_ALC_T_PRT_REP_EXIT,
  1019. sizeof(ErtsSysReportExit));
  1020. report_exit->next = report_exit_list;
  1021. report_exit->port = erts_port[port_num].id;
  1022. report_exit->pid = pid;
  1023. report_exit->ifd = read_write & DO_READ ? ifd : -1;
  1024. report_exit->ofd = read_write & DO_WRITE ? ofd : -1;
  1025. #if CHLDWTHR && !defined(ERTS_SMP)
  1026. report_exit->status = 0;
  1027. #endif
  1028. report_exit_list = report_exit;
  1029. }
  1030. if (read_write & DO_READ) {
  1031. driver_data[ifd].packet_bytes = packet_bytes;
  1032. driver_data[ifd].port_num = port_num;
  1033. driver_data[ifd].report_exit = report_exit;
  1034. driver_data[ifd].pid = pid;
  1035. driver_data[ifd].alive = 1;
  1036. driver_data[ifd].status = 0;
  1037. if (read_write & DO_WRITE) {
  1038. driver_data[ifd].ofd = ofd;
  1039. if (ifd != ofd)
  1040. driver_data[ofd] = driver_data[ifd]; /* structure copy */
  1041. } else { /* DO_READ only */
  1042. driver_data[ifd].ofd = -1;
  1043. }
  1044. (void) driver_select(port_num, ifd, (ERL_DRV_READ|ERL_DRV_USE), 1);
  1045. return(ifd);
  1046. } else { /* DO_WRITE only */
  1047. driver_data[ofd].packet_bytes = packet_bytes;
  1048. driver_data[ofd].port_num = port_num;
  1049. driver_data[ofd].report_exit = report_exit;
  1050. driver_data[ofd].ofd = ofd;
  1051. driver_data[ofd].pid = pid;
  1052. driver_data[ofd].alive = 1;
  1053. driver_data[ofd].status = 0;
  1054. return(ofd);
  1055. }
  1056. }
  1057. static int spawn_init()
  1058. {
  1059. int i;
  1060. #if CHLDWTHR
  1061. erts_thr_opts_t thr_opts = ERTS_THR_OPTS_DEFAULT_INITER;
  1062. thr_opts.detached = 0;
  1063. thr_opts.suggested_stack_size = 0; /* Smallest possible */
  1064. #endif
  1065. sys_sigset(SIGPIPE, SIG_IGN); /* Ignore - we'll handle the write failure */
  1066. driver_data = (struct driver_data *)
  1067. erts_alloc(ERTS_ALC_T_DRV_TAB, max_files * sizeof(struct driver_data));
  1068. erts_smp_atomic_add(&sys_misc_mem_sz,
  1069. max_files * sizeof(struct driver_data));
  1070. for (i = 0; i < max_files; i++)
  1071. driver_data[i].pid = -1;
  1072. #if CHLDWTHR
  1073. sys_sigblock(SIGCHLD);
  1074. #endif
  1075. sys_sigset(SIGCHLD, onchld); /* Reap children */
  1076. #if CHLDWTHR
  1077. erts_thr_create(&child_waiter_tid, child_waiter, NULL, &thr_opts);
  1078. #endif
  1079. return 1;
  1080. }
  1081. static void close_pipes(int ifd[2], int ofd[2], int read_write)
  1082. {
  1083. if (read_write & DO_READ) {
  1084. (void) close(ifd[0]);
  1085. (void) close(ifd[1]);
  1086. }
  1087. if (read_write & DO_WRITE) {
  1088. (void) close(ofd[0]);
  1089. (void) close(ofd[1]);
  1090. }
  1091. }
  1092. static void init_fd_data(int fd, int prt)
  1093. {
  1094. fd_data[fd].buf = NULL;
  1095. fd_data[fd].cpos = NULL;
  1096. fd_data[fd].remain = 0;
  1097. fd_data[fd].sz = 0;
  1098. fd_data[fd].psz = 0;
  1099. }
  1100. static char **build_unix_environment(char *block)
  1101. {
  1102. int i;
  1103. int j;
  1104. int len;
  1105. char *cp;
  1106. char **cpp;
  1107. char** old_env;
  1108. ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rlocked(&environ_rwmtx));
  1109. cp = block;
  1110. len = 0;
  1111. while (*cp != '\0') {
  1112. cp += strlen(cp) + 1;
  1113. len++;
  1114. }
  1115. old_env = environ;
  1116. while (*old_env++ != NULL) {
  1117. len++;
  1118. }
  1119. cpp = (char **) erts_alloc_fnf(ERTS_ALC_T_ENVIRONMENT,
  1120. sizeof(char *) * (len+1));
  1121. if (cpp == NULL) {
  1122. return NULL;
  1123. }
  1124. cp = block;
  1125. len = 0;
  1126. while (*cp != '\0') {
  1127. cpp[len] = cp;
  1128. cp += strlen(cp) + 1;
  1129. len++;
  1130. }
  1131. i = len;
  1132. for (old_env = environ; *old_env; old_env++) {
  1133. char* old = *old_env;
  1134. for (j = 0; j < len; j++) {
  1135. char *s, *t;
  1136. s = cpp[j];
  1137. t = old;
  1138. while (*s == *t && *s != '=') {
  1139. s++, t++;
  1140. }
  1141. if (*s == '=' && *t == '=') {
  1142. break;
  1143. }
  1144. }
  1145. if (j == len) { /* New version not found */
  1146. cpp[len++] = old;
  1147. }
  1148. }
  1149. for (j = 0; j < i; j++) {
  1150. if (cpp[j][strlen(cpp[j])-1] == '=') {
  1151. cpp[j] = cpp[--len];
  1152. }
  1153. }
  1154. cpp[len] = NULL;
  1155. return cpp;
  1156. }
  1157. /*
  1158. [arndt] In most Unix systems, including Solaris 2.5, 'fork' allocates memory
  1159. in swap space for the child of a 'fork', whereas 'vfork' does not do this.
  1160. The natural call to use here is therefore 'vfork'. Due to a bug in
  1161. 'vfork' in Solaris 2.5 (apparently fixed in 2.6), using 'vfork'
  1162. can be dangerous in what seems to be these circumstances:
  1163. If the child code under a vfork sets the signal action to SIG_DFL
  1164. (or SIG_IGN)
  1165. for any signal which was previously set to a signal handler, the
  1166. state of the parent is clobbered, so that the later arrival of
  1167. such a signal yields a sigsegv in the parent. If the signal was
  1168. not set to a signal handler, but ignored, all seems to work.
  1169. If you change the forking code below, beware of this.
  1170. */
  1171. static ErlDrvData spawn_start(ErlDrvPort port_num, char* name, SysDriverOpts* opts)
  1172. {
  1173. #define CMD_LINE_PREFIX_STR "exec "
  1174. #define CMD_LINE_PREFIX_STR_SZ (sizeof(CMD_LINE_PREFIX_STR) - 1)
  1175. int ifd[2], ofd[2], len, pid, i;
  1176. char **volatile new_environ; /* volatile since a vfork() then cannot
  1177. cause 'new_environ' to be clobbered
  1178. in the parent process. */
  1179. int saved_errno;
  1180. long res;
  1181. char *cmd_line;
  1182. #ifndef QNX
  1183. int unbind;
  1184. #endif
  1185. #if !DISABLE_VFORK
  1186. int no_vfork;
  1187. size_t no_vfork_sz = sizeof(no_vfork);
  1188. no_vfork = (erts_sys_getenv("ERL_NO_VFORK",
  1189. (char *) &no_vfork,
  1190. &no_vfork_sz) >= 0);
  1191. #endif
  1192. switch (opts->read_write) {
  1193. case DO_READ:
  1194. if (pipe(ifd) < 0)
  1195. return ERL_DRV_ERROR_ERRNO;
  1196. if (ifd[0] >= max_files) {
  1197. close_pipes(ifd, ofd, opts->read_write);
  1198. errno = EMFILE;
  1199. return ERL_DRV_ERROR_ERRNO;
  1200. }
  1201. ofd[1] = -1; /* keep purify happy */
  1202. break;
  1203. case DO_WRITE:
  1204. if (pipe(ofd) < 0) return ERL_DRV_ERROR_ERRNO;
  1205. if (ofd[1] >= max_files) {
  1206. close_pipes(ifd, ofd, opts->read_write);
  1207. errno = EMFILE;
  1208. return ERL_DRV_ERROR_ERRNO;
  1209. }
  1210. ifd[0] = -1; /* keep purify happy */
  1211. break;
  1212. case DO_READ|DO_WRITE:
  1213. if (pipe(ifd) < 0) return ERL_DRV_ERROR_ERRNO;
  1214. errno = EMFILE; /* default for next two conditions */
  1215. if (ifd[0] >= max_files || pipe(ofd) < 0) {
  1216. close_pipes(ifd, ofd, DO_READ);
  1217. return ERL_DRV_ERROR_ERRNO;
  1218. }
  1219. if (ofd[1] >= max_files) {
  1220. close_pipes(ifd, ofd, opts->read_write);
  1221. errno = EMFILE;
  1222. return ERL_DRV_ERROR_ERRNO;
  1223. }
  1224. break;
  1225. default:
  1226. ASSERT(0);
  1227. return ERL_DRV_ERROR_GENERAL;
  1228. }
  1229. if (opts->spawn_type == ERTS_SPAWN_EXECUTABLE) {
  1230. /* started with spawn_executable, not with spawn */
  1231. len = strlen(name);
  1232. cmd_line = (char *) erts_alloc_fnf(ERTS_ALC_T_TMP, len + 1);
  1233. if (!cmd_line) {
  1234. close_pipes(ifd, ofd, opts->read_write);
  1235. errno = ENOMEM;
  1236. return ERL_DRV_ERROR_ERRNO;
  1237. }
  1238. memcpy((void *) cmd_line,(void *) name, len);
  1239. cmd_line[len] = '\0';
  1240. if (access(cmd_line,X_OK) != 0) {
  1241. int save_errno = errno;
  1242. erts_free(ERTS_ALC_T_TMP, cmd_line);
  1243. errno = save_errno;
  1244. return ERL_DRV_ERROR_ERRNO;
  1245. }
  1246. } else {
  1247. /* make the string suitable for giving to "sh" */
  1248. len = strlen(name);
  1249. cmd_line = (char *) erts_alloc_fnf(ERTS_ALC_T_TMP,
  1250. CMD_LINE_PREFIX_STR_SZ + len + 1);
  1251. if (!cmd_line) {
  1252. close_pipes(ifd, ofd, opts->read_write);
  1253. errno = ENOMEM;
  1254. return ERL_DRV_ERROR_ERRNO;
  1255. }
  1256. memcpy((void *) cmd_line,
  1257. (void *) CMD_LINE_PREFIX_STR,
  1258. CMD_LINE_PREFIX_STR_SZ);
  1259. memcpy((void *) (cmd_line + CMD_LINE_PREFIX_STR_SZ), (void *) name, len);
  1260. cmd_line[CMD_LINE_PREFIX_STR_SZ + len] = '\0';
  1261. }
  1262. erts_smp_rwmtx_rlock(&environ_rwmtx);
  1263. if (opts->envir == NULL) {
  1264. new_environ = environ;
  1265. } else if ((new_environ = build_unix_environment(opts->envir)) == NULL) {
  1266. erts_smp_rwmtx_runlock(&environ_rwmtx);
  1267. erts_free(ERTS_ALC_T_TMP, (void *) cmd_line);
  1268. errno = ENOMEM;
  1269. return ERL_DRV_ERROR_ERRNO;
  1270. }
  1271. #ifndef QNX
  1272. /* Block child from SIGINT and SIGUSR1. Must be before fork()
  1273. to be safe. */
  1274. block_signals();
  1275. CHLD_STAT_LOCK;
  1276. unbind = erts_is_scheduler_bound(NULL);
  1277. if (unbind)
  1278. erts_smp_rwmtx_rlock(&erts_cpu_bind_rwmtx);
  1279. #if !DISABLE_VFORK
  1280. /* See fork/vfork discussion before this function. */
  1281. if (no_vfork) {
  1282. #endif
  1283. DEBUGF(("Using fork\n"));
  1284. pid = fork();
  1285. if (pid == 0) {
  1286. /* The child! Setup child... */
  1287. if (unbind && erts_unbind_from_cpu(erts_cpuinfo) != 0)
  1288. goto child_error;
  1289. /* OBSERVE!
  1290. * Keep child setup after vfork() (implemented below and in
  1291. * erl_child_setup.c) up to date if changes are made here.
  1292. */
  1293. if (opts->use_stdio) {
  1294. if (opts->read_write & DO_READ) {
  1295. /* stdout for process */
  1296. if (dup2(ifd[1], 1) < 0)
  1297. goto child_error;
  1298. if(opts->redir_stderr)
  1299. /* stderr for process */
  1300. if (dup2(ifd[1], 2) < 0)
  1301. goto child_error;
  1302. }
  1303. if (opts->read_write & DO_WRITE)
  1304. /* stdin for process */
  1305. if (dup2(ofd[0], 0) < 0)
  1306. goto child_error;
  1307. }
  1308. else { /* XXX will fail if ofd[0] == 4 (unlikely..) */
  1309. if (opts->read_write & DO_READ)
  1310. if (dup2(ifd[1], 4) < 0)
  1311. goto child_error;
  1312. if (opts->read_write & DO_WRITE)
  1313. if (dup2(ofd[0], 3) < 0)
  1314. goto child_error;
  1315. }
  1316. for (i = opts->use_stdio ? 3 : 5; i < max_files; i++)
  1317. (void) close(i);
  1318. if (opts->wd && chdir(opts->wd) < 0)
  1319. goto child_error;
  1320. #if defined(USE_SETPGRP_NOARGS) /* SysV */
  1321. (void) setpgrp();
  1322. #elif defined(USE_SETPGRP) /* BSD */
  1323. (void) setpgrp(0, getpid());
  1324. #else /* POSIX */
  1325. (void) setsid();
  1326. #endif
  1327. unblock_signals();
  1328. if (opts->spawn_type == ERTS_SPAWN_EXECUTABLE) {
  1329. if (opts->argv == NULL) {
  1330. execle(cmd_line,cmd_line,(char *) NULL, new_environ);
  1331. } else {
  1332. if (opts->argv[0] == erts_default_arg0) {
  1333. opts->argv[0] = cmd_line;
  1334. }
  1335. execve(cmd_line, opts->argv, new_environ);
  1336. if (opts->argv[0] == cmd_line) {
  1337. opts->argv[0] = erts_default_arg0;
  1338. }
  1339. }
  1340. } else {
  1341. execle("/bin/sh", "sh", "-c", cmd_line, (char *) NULL, new_environ);
  1342. }
  1343. child_error:
  1344. _exit(1);
  1345. }
  1346. #if !DISABLE_VFORK
  1347. }
  1348. else { /* Use vfork() */
  1349. char **cs_argv= erts_alloc(ERTS_ALC_T_TMP,(CS_ARGV_NO_OF_ARGS + 1)*
  1350. sizeof(char *));
  1351. char fd_close_range[44]; /* 44 bytes are enough to */
  1352. char dup2_op[CS_ARGV_NO_OF_DUP2_OPS][44]; /* hold any "%d:%d" string */
  1353. /* on a 64-bit machine. */
  1354. /* Setup argv[] for the child setup program (implemented in
  1355. erl_child_setup.c) */
  1356. i = 0;
  1357. if (opts->use_stdio) {
  1358. if (opts->read_write & DO_READ){
  1359. /* stdout for process */
  1360. sprintf(&dup2_op[i++][0], "%d:%d", ifd[1], 1);
  1361. if(opts->redir_stderr)
  1362. /* stderr for process */
  1363. sprintf(&dup2_op[i++][0], "%d:%d", ifd[1], 2);
  1364. }
  1365. if (opts->read_write & DO_WRITE)
  1366. /* stdin for process */
  1367. sprintf(&dup2_op[i++][0], "%d:%d", ofd[0], 0);
  1368. } else { /* XXX will fail if ofd[0] == 4 (unlikely..) */
  1369. if (opts->read_write & DO_READ)
  1370. sprintf(&dup2_op[i++][0], "%d:%d", ifd[1], 4);
  1371. if (opts->read_write & DO_WRITE)
  1372. sprintf(&dup2_op[i++][0], "%d:%d", ofd[0], 3);
  1373. }
  1374. for (; i < CS_ARGV_NO_OF_DUP2_OPS; i++)
  1375. strcpy(&dup2_op[i][0], "-");
  1376. sprintf(fd_close_range, "%d:%d", opts->use_stdio ? 3 : 5, max_files-1);
  1377. cs_argv[CS_ARGV_PROGNAME_IX] = child_setup_prog;
  1378. cs_argv[CS_ARGV_WD_IX] = opts->wd ? opts->wd : ".";
  1379. cs_argv[CS_ARGV_UNBIND_IX]
  1380. = (unbind ? erts_get_unbind_from_cpu_str(erts_cpuinfo) : "false");
  1381. cs_argv[CS_ARGV_FD_CR_IX] = fd_close_range;
  1382. for (i = 0; i < CS_ARGV_NO_OF_DUP2_OPS; i++)
  1383. cs_argv[CS_ARGV_DUP2_OP_IX(i)] = &dup2_op[i][0];
  1384. if (opts->spawn_type == ERTS_SPAWN_EXECUTABLE) {
  1385. int num = 0;
  1386. int j = 0;
  1387. if (opts->argv != NULL) {
  1388. for(; opts->argv[num] != NULL; ++num)
  1389. ;
  1390. }
  1391. cs_argv = erts_realloc(ERTS_ALC_T_TMP,cs_argv, (CS_ARGV_NO_OF_ARGS + 1 + num + 1) * sizeof(char *));
  1392. cs_argv[CS_ARGV_CMD_IX] = "-";
  1393. cs_argv[CS_ARGV_NO_OF_ARGS] = cmd_line;
  1394. if (opts->argv != NULL) {
  1395. for (;opts->argv[j] != NULL; ++j) {
  1396. if (opts->argv[j] == erts_default_arg0) {
  1397. cs_argv[CS_ARGV_NO_OF_ARGS + 1 + j] = cmd_line;
  1398. } else {
  1399. cs_argv[CS_ARGV_NO_OF_ARGS + 1 + j] = opts->argv[j];
  1400. }
  1401. }
  1402. }
  1403. cs_argv[CS_ARGV_NO_OF_ARGS + 1 + j] = NULL;
  1404. } else {
  1405. cs_argv[CS_ARGV_CMD_IX] = cmd_line; /* Command */
  1406. cs_argv[CS_ARGV_NO_OF_ARGS] = NULL;
  1407. }
  1408. DEBUGF(("Using vfork\n"));
  1409. pid = vfork();
  1410. if (pid == 0) {
  1411. /* The child! */
  1412. /* Observe!
  1413. * OTP-4389: The child setup program (implemented in
  1414. * erl_child_setup.c) will perform the necessary setup of the
  1415. * child before it execs to the user program. This because
  1416. * vfork() only allow an *immediate* execve() or _exit() in the
  1417. * child.
  1418. */
  1419. execve(child_setup_prog, cs_argv, new_environ);
  1420. _exit(1);
  1421. }
  1422. erts_free(ERTS_ALC_T_TMP,cs_argv);
  1423. }
  1424. #endif
  1425. if (unbind)
  1426. erts_smp_rwmtx_runlock(&erts_cpu_bind_rwmtx);
  1427. if (pid == -1) {
  1428. saved_errno = errno;
  1429. CHLD_STAT_UNLOCK;
  1430. erts_smp_rwmtx_runlock(&environ_rwmtx);
  1431. erts_free(ERTS_ALC_T_TMP, (void *) cmd_line);
  1432. unblock_signals();
  1433. close_pipes(ifd, ofd, opts->read_write);
  1434. errno = saved_errno;
  1435. return ERL_DRV_ERROR_ERRNO;
  1436. }
  1437. #else /* QNX */
  1438. if (opts->use_stdio) {
  1439. if (opts->read_write & DO_READ)
  1440. qnx_spawn_options.iov[1] = ifd[1]; /* stdout for process */
  1441. if (opts->read_write & DO_WRITE)
  1442. qnx_spawn_options.iov[0] = ofd[0]; /* stdin for process */
  1443. }
  1444. else {
  1445. if (opts->read_write & DO_READ)
  1446. qnx_spawn_options.iov[4] = ifd[1];
  1447. if (opts->read_write & DO_WRITE)
  1448. qnx_spawn_options.iov[3] = ofd[0];
  1449. }
  1450. /* Close fds on exec */
  1451. for (i = 3; i < max_files; i++)
  1452. fcntl(i, F_SETFD, 1);
  1453. qnx_spawn_options.flags = _SPAWN_SETSID;
  1454. if ((pid = spawnl(P_NOWAIT, "/bin/sh", "/bin/sh", "-c", cmd_line,
  1455. (char *) 0)) < 0) {
  1456. erts_free(ERTS_ALC_T_TMP, (void *) cmd_line);
  1457. reset_qnx_spawn();
  1458. erts_smp_rwmtx_runlock(&environ_rwmtx);
  1459. close_pipes(ifd, ofd, opts->read_write);
  1460. return ERL_DRV_ERROR_GENERAL;
  1461. }
  1462. reset_qnx_spawn();
  1463. #endif /* QNX */
  1464. erts_free(ERTS_ALC_T_TMP, (void *) cmd_line);
  1465. if (new_environ != environ)
  1466. erts_free(ERTS_ALC_T_ENVIRONMENT, (void *) new_environ);
  1467. if (opts->read_write & DO_READ)
  1468. (void) close(ifd[1]);
  1469. if (opts->read_write & DO_WRITE)
  1470. (void) close(ofd[0]);
  1471. if (opts->read_write & DO_READ) {
  1472. SET_NONBLOCKING(ifd[0]);
  1473. init_fd_data(ifd[0], port_num);
  1474. }
  1475. if (opts->read_write & DO_WRITE) {
  1476. SET_NONBLOCKING(ofd[1]);
  1477. init_fd_data(ofd[1], port_num);
  1478. }
  1479. res = set_driver_data(port_num, ifd[0], ofd[1], opts->packet_bytes,
  1480. opts->read_write, opts->exit_status, pid);
  1481. /* Don't unblock SIGCHLD until now, since the call above must
  1482. first complete putting away the info about our new subprocess. */
  1483. unblock_signals();
  1484. #if CHLDWTHR
  1485. ASSERT(children_alive >= 0);
  1486. if (!(children_alive++))
  1487. CHLD_STAT_SIGNAL; /* Wake up child waiter thread if no children
  1488. was alive before we fork()ed ... */
  1489. #endif
  1490. /* Don't unlock chld_stat_mtx until now of the same reason as above */
  1491. CHLD_STAT_UNLOCK;
  1492. erts_smp_rwmtx_runlock(&environ_rwmtx);
  1493. return (ErlDrvData)res;
  1494. #undef CMD_LINE_PREFIX_STR
  1495. #undef CMD_LINE_PREFIX_STR_SZ
  1496. }
  1497. #ifdef QNX
  1498. static reset_qnx_spawn()
  1499. {
  1500. int i;
  1501. /* Reset qnx_spawn_options */
  1502. qnx_spawn_options.flags = 0;
  1503. qnx_spawn_options.iov[0] = 0xff;
  1504. qnx_spawn_options.iov[1] = 0xff;
  1505. qnx_spawn_options.iov[2] = 0xff;
  1506. qnx_spawn_options.iov[3] = 0xff;
  1507. }
  1508. #endif
  1509. #define FD_DEF_HEIGHT 24
  1510. #define FD_DEF_WIDTH 80
  1511. /* Control op */
  1512. #define FD_CTRL_OP_GET_WINSIZE 100
  1513. static int fd_get_window_size(int fd, Uint32 *width, Uint32 *height)
  1514. {
  1515. #ifdef TIOCGWINSZ
  1516. struct winsize ws;
  1517. if (ioctl(fd,TIOCGWINSZ,&ws) == 0) {
  1518. *width = (Uint32) ws.ws_col;
  1519. *height = (Uint32) ws.ws_row;
  1520. return 0;
  1521. }
  1522. #endif
  1523. return -1;
  1524. }
  1525. static int fd_control(ErlDrvData drv_data,
  1526. unsigned int command,
  1527. char *buf, int len,
  1528. char **rbuf, int rlen)
  1529. {
  1530. int fd = (int)(long)drv_data;
  1531. char resbuff[2*sizeof(Uint32)];
  1532. switch (command) {
  1533. case FD_CTRL_OP_GET_WINSIZE:
  1534. {
  1535. Uint32 w,h;
  1536. if (fd_get_window_size(fd,&w,&h))
  1537. return 0;
  1538. memcpy(resbuff,&w,sizeof(Uint32));
  1539. memcpy(resbuff+sizeof(Uint32),&h,sizeof(Uint32));
  1540. }
  1541. break;
  1542. default:
  1543. return 0;
  1544. }
  1545. if (rlen < 2*sizeof(Uint32)) {
  1546. *rbuf = driver_alloc(2*sizeof(Uint32));
  1547. }
  1548. memcpy(*rbuf,resbuff,2*sizeof(Uint32));
  1549. return 2*sizeof(Uint32);
  1550. }
  1551. static ErlDrvData fd_start(ErlDrvPort port_num, char* name,
  1552. SysDriverOpts* opts)
  1553. {
  1554. ErlDrvData res;
  1555. if (((opts->read_write & DO_READ) && opts->ifd >= max_files) ||
  1556. ((opts->read_write & DO_WRITE) && opts->ofd >= max_files))
  1557. return ERL_DRV_ERROR_GENERAL;
  1558. /*
  1559. * Historical:
  1560. *
  1561. * "Note about nonblocking I/O.
  1562. *
  1563. * At least on Solaris, setting the write end of a TTY to nonblocking,
  1564. * will set the input end to nonblocking as well (and vice-versa).
  1565. * If erl is run in a pipeline like this: cat | erl
  1566. * the input end of the TTY will be the standard input of cat.
  1567. * And cat is not prepared to handle nonblocking I/O."
  1568. *
  1569. * Actually, the reason for this is not that the tty itself gets set
  1570. * in non-blocking mode, but that the "input end" (cat's stdin) and
  1571. * the "output end" (erlang's stdout) are typically the "same" file
  1572. * descriptor, dup()'ed from a single fd by one of this process'
  1573. * ancestors.
  1574. *
  1575. * The workaround for this problem used to be a rather bad kludge,
  1576. * interposing an extra process ("internal cat") between erlang's
  1577. * stdout and the original stdout, allowing erlang to set its stdout
  1578. * in non-blocking mode without affecting the stdin of the preceding
  1579. * process in the pipeline - and being a kludge, it caused all kinds
  1580. * of weird problems.
  1581. *
  1582. * So, this is the current logic:
  1583. *
  1584. * The only reason to set non-blocking mode on the output fd at all is
  1585. * if it's something that can cause a write() to block, of course,
  1586. * i.e. primarily if it points to a tty, socket, pipe, or fifo.
  1587. *
  1588. * If we don't set non-blocking mode when we "should" have, and output
  1589. * becomes blocked, the entire runtime system will be suspended - this
  1590. * is normally bad of course, and can happen fairly "easily" - e.g. user
  1591. * hits ^S on tty - but doesn't necessarily happen.
  1592. *
  1593. * If we do set non-blocking mode when we "shouldn't" have, the runtime
  1594. * system will end up seeing EOF on the input fd (due to the preceding
  1595. * process dying), which typically will cause the entire runtime system
  1596. * to terminate immediately (due to whatever erlang process is seeing
  1597. * the EOF taking it as a signal to halt the system). This is *very* bad.
  1598. *
  1599. * I.e. we should take a conservative approach, and only set non-
  1600. * blocking mode when we a) need to, and b) are reasonably certain
  1601. * that it won't be a problem. And as in the example above, the problem
  1602. * occurs when input fd and output fd point to different "things".
  1603. *
  1604. * However, determining that they are not just the same "type" of
  1605. * "thing", but actually the same instance of that type of thing, is
  1606. * unreasonably complex in many/most cases.
  1607. *
  1608. * Also, with pipes, sockets, and fifos it's far from obvious that the
  1609. * user *wants* non-blocking output: If you're running erlang inside
  1610. * some complex pipeline, you're probably not running a real-time system
  1611. * that must never stop, but rather *want* it to suspend if the output
  1612. * channel is "full".
  1613. *
  1614. * So, the bottom line: We will only set the output fd non-blocking if
  1615. * it points to a tty, and either a) the input fd also points to a tty,
  1616. * or b) we can make sure that setting the output fd non-blocking
  1617. * doesn't interfere with someone else's input, via a somewhat milder
  1618. * kludge than the above.
  1619. *
  1620. * Also keep in mind that while this code is almost exclusively run as
  1621. * a result of an erlang open_port({fd,0,1}, ...), that isn't the only
  1622. * case - it can be called with any old pre-existing file descriptors,
  1623. * the relations between which (if they're even two) we can only guess
  1624. * at - still, we try our best...
  1625. */
  1626. if (opts->read_write & DO_READ) {
  1627. init_fd_data(opts->ifd, port_num);
  1628. }
  1629. if (opts->read_write & DO_WRITE) {
  1630. init_fd_data(opts->ofd, port_num);
  1631. /* If we don't have a read end, all bets are off - no non-blocking. */
  1632. if (opts->read_write & DO_READ) {
  1633. if (isatty(opts->ofd)) { /* output fd is a tty:-) */
  1634. if (isatty(opts->ifd)) { /* input fd is also a tty */
  1635. /* To really do this "right", we should also check that
  1636. input and output fd point to the *same* tty - but
  1637. this seems like overkill; ttyname() isn't for free,
  1638. and this is a very common case - and it's hard to
  1639. imagine a scenario where setting non-blocking mode
  1640. here would cause problems - go ahead and do it. */
  1641. SET_NONBLOCKING(opts->ofd);
  1642. } else { /* output fd is a tty, input fd isn't */
  1643. /* This is a "problem case", but also common (see the
  1644. example above) - i.e. it makes sense to try a bit
  1645. harder before giving up on non-blocking mode: Try to
  1646. re-open the tty that the output fd points to, and if
  1647. successful replace the original one with the "new" fd
  1648. obtained this way, and set *that* one in non-blocking
  1649. mode. (Yes, this is a kludge.)
  1650. However, re-opening the tty may fail in a couple of
  1651. (unusual) cases:
  1652. 1) The name of the tty (or an equivalent one, i.e.
  1653. same major/minor number) can't be found, because
  1654. it actually lives somewhere other than /dev (or
  1655. wherever ttyname() looks for it), and isn't
  1656. equivalent to any of those that do live in the
  1657. "standard" place - this should be *very* unusual.
  1658. 2) Permissions on the tty don't allow us to open it -
  1659. it's perfectly possible to have an fd open to an
  1660. object whose permissions wouldn't allow us to open
  1661. it. This is not as unusual as it sounds, one case
  1662. is if the user has su'ed to someone else (not
  1663. root) - we have a read/write fd open to the tty
  1664. (because it has been inherited all the way down
  1665. here), but we have neither read nor write
  1666. permission for the tty.
  1667. In these cases, we finally give up, and don't set the
  1668. output fd in non-blocking mode. */
  1669. char *tty;
  1670. int nfd;
  1671. if ((tty = ttyname(opts->ofd)) != NULL &&
  1672. (nfd = open(tty, O_WRONLY)) != -1) {
  1673. dup2(nfd, opts->ofd);
  1674. close(nfd);
  1675. SET_NONBLOCKING(opts->ofd);
  1676. }
  1677. }
  1678. }
  1679. }
  1680. }
  1681. CHLD_STAT_LOCK;
  1682. res = (ErlDrvData)(long)set_driver_data(port_num, opts->ifd, opts->ofd,
  1683. opts->packet_bytes,
  1684. opts->read_write, 0, -1);
  1685. CHLD_STAT_UNLOCK;
  1686. return res;
  1687. }
  1688. static void clear_fd_data(int fd)
  1689. {
  1690. if (fd_data[fd].sz > 0) {
  1691. erts_free(ERTS_ALC_T_FD_ENTRY_BUF, (void *) fd_data[fd].buf);
  1692. ASSERT(erts_smp_atomic_read(&sys_misc_mem_sz) >= fd_data[fd].sz);
  1693. erts_smp_atomic_add(&sys_misc_mem_sz, -1*fd_data[fd].sz);
  1694. }
  1695. fd_data[fd].buf = NULL;
  1696. fd_data[fd].sz = 0;
  1697. fd_data[fd].remain = 0;
  1698. fd_data[fd].cpos = NULL;
  1699. fd_data[fd].psz = 0;
  1700. }
  1701. static void nbio_stop_fd(int prt, int fd)
  1702. {
  1703. driver_select(prt,fd,DO_READ|DO_WRITE,0);
  1704. clear_fd_data(fd);
  1705. SET_BLOCKING(fd);
  1706. }
  1707. static void fd_stop(ErlDrvData fd) /* Does not close the fds */
  1708. {
  1709. int ofd;
  1710. nbio_stop_fd(driver_data[(int)(long)fd].port_num, (int)(long)fd);
  1711. ofd = driver_data[(int)(long)fd].ofd;
  1712. if (ofd != (int)(long)fd && ofd != -1)
  1713. nbio_stop_fd(driver_data[(int)(long)fd].port_num, (int)(long)ofd);
  1714. }
  1715. static ErlDrvData vanilla_start(ErlDrvPort port_num, char* name,
  1716. SysDriverOpts* opts)
  1717. {
  1718. int flags, fd;
  1719. ErlDrvData res;
  1720. flags = (opts->read_write == DO_READ ? O_RDONLY :
  1721. opts->read_write == DO_WRITE ? O_WRONLY|O_CREAT|O_TRUNC :
  1722. O_RDWR|O_CREAT);
  1723. if ((fd = open(name, flags, 0666)) < 0)
  1724. return ERL_DRV_ERROR_GENERAL;
  1725. if (fd >= max_files) {
  1726. close(fd);
  1727. return ERL_DRV_ERROR_GENERAL;
  1728. }
  1729. SET_NONBLOCKING(fd);
  1730. init_fd_data(fd, port_num);
  1731. CHLD_STAT_LOCK;
  1732. res = (ErlDrvData)(long)set_driver_data(port_num, fd, fd,
  1733. opts->packet_bytes,
  1734. opts->read_write, 0, -1);
  1735. CHLD_STAT_UNLOCK;
  1736. return res;
  1737. }
  1738. /* Note that driver_data[fd].ifd == fd if the port was opened for reading, */
  1739. /* otherwise (i.e. write only) driver_data[fd].ofd = fd. */
  1740. static void stop(ErlDrvData fd)
  1741. {
  1742. int prt, ofd;
  1743. prt = driver_data[(int)(long)fd].port_num;
  1744. nbio_stop_fd(prt, (int)(long)fd);
  1745. ofd = driver_data[(int)(long)fd].ofd;
  1746. if (ofd != (int)(long)fd && (int)(long)ofd != -1)
  1747. nbio_stop_fd(prt, ofd);
  1748. else
  1749. ofd = -1;
  1750. CHLD_STAT_LOCK;
  1751. /* Mark as unused. Maybe resetting the 'port_num' slot is better? */
  1752. driver_data[(int)(long)fd].pid = -1;
  1753. CHLD_STAT_UNLOCK;
  1754. /* SMP note: Close has to be last thing done (open file descriptors work
  1755. as locks on driver_data[] entries) */
  1756. driver_select(prt, (int)(long)fd, ERL_DRV_USE, 0); /* close(fd); */
  1757. if (ofd >= 0) {
  1758. driver_select(prt, (int)(long)ofd, ERL_DRV_USE, 0); /* close(ofd); */
  1759. }
  1760. }
  1761. static void outputv(ErlDrvData e, ErlIOVec* ev)
  1762. {
  1763. int fd = (int)(long)e;
  1764. int ix = driver_data[fd].port_num;
  1765. int pb = driver_data[fd].packet_bytes;
  1766. int ofd = driver_data[fd].ofd;
  1767. int n;
  1768. int sz;
  1769. char lb[4];
  1770. char* lbp;
  1771. int len = ev->size;
  1772. /* (len > ((unsigned long)-1 >> (4-pb)*8)) */
  1773. if (((pb == 2) && (len > 0xffff)) || (pb == 1 && len > 0xff)) {
  1774. driver_failure_posix(ix, EINVAL);
  1775. return; /* -1; */
  1776. }
  1777. put_int32(len, lb);
  1778. lbp = lb + (4-pb);
  1779. ev->iov[0].iov_base = lbp;
  1780. ev->iov[0].iov_len = pb;
  1781. ev->size += pb;
  1782. if ((sz = driver_sizeq(ix)) > 0) {
  1783. driver_enqv(ix, ev, 0);
  1784. if (sz + ev->size >= (1 << 13))
  1785. set_busy_port(ix, 1);
  1786. }
  1787. else {
  1788. int vsize = ev->vsize > MAX_VSIZE ? MAX_VSIZE : ev->vsize;
  1789. n = writev(ofd, (const void *) (ev->iov), vsize);
  1790. if (n == ev->size)
  1791. return; /* 0;*/
  1792. if (n < 0) {
  1793. if ((errno != EINTR) && (errno != ERRNO_BLOCK)) {
  1794. driver_failure_posix(ix, errno);
  1795. return; /* -1;*/
  1796. }
  1797. n = 0;
  1798. }
  1799. driver_enqv(ix, ev, n); /* n is the skip value */
  1800. driver_select(ix, ofd, ERL_DRV_WRITE|ERL_DRV_USE, 1);
  1801. }
  1802. /* return 0;*/
  1803. }
  1804. static void output(ErlDrvData e, char* buf, int len)
  1805. {
  1806. int fd = (int)(long)e;
  1807. int ix = driver_data[fd].port_num;
  1808. int pb = driver_data[fd].packet_bytes;
  1809. int ofd = driver_data[fd].ofd;
  1810. int n;
  1811. int sz;
  1812. char lb[4];
  1813. char* lbp;
  1814. struct iovec iv[2];
  1815. /* (len > ((unsigned long)-1 >> (4-pb)*8)) */
  1816. if (((pb == 2) && (len > 0xffff)) || (pb == 1 && len > 0xff)) {
  1817. driver_failure_posix(ix, EINVAL);
  1818. return; /* -1; */
  1819. }
  1820. put_int32(len, lb);
  1821. lbp = lb + (4-pb);
  1822. if ((sz = driver_sizeq(ix)) > 0) {
  1823. driver_enq(ix, lbp, pb);
  1824. driver_enq(ix, buf, len);
  1825. if (sz + len + pb >= (1 << 13))
  1826. set_busy_port(ix, 1);
  1827. }
  1828. else {
  1829. iv[0].iov_base = lbp;
  1830. iv[0].iov_len = pb; /* should work for pb=0 */
  1831. iv[1].iov_base = buf;
  1832. iv[1].iov_len = len;
  1833. n = writev(ofd, iv, 2);
  1834. if (n == pb+len)
  1835. return; /* 0; */
  1836. if (n < 0) {
  1837. if ((errno != EINTR) && (errno != ERRNO_BLOCK)) {
  1838. driver_failure_posix(ix, errno);
  1839. return; /* -1; */
  1840. }
  1841. n = 0;
  1842. }
  1843. if (n < pb) {
  1844. driver_enq(ix, lbp+n, pb-n);
  1845. driver_enq(ix, buf, len);
  1846. }
  1847. else {
  1848. n -= pb;
  1849. driver_enq(ix, buf+n, len-n);
  1850. }
  1851. driver_select(ix, ofd, ERL_DRV_WRITE|ERL_DRV_USE, 1);
  1852. }
  1853. return; /* 0; */
  1854. }
  1855. static int port_inp_failure(int port_num, int ready_fd, int res)
  1856. /* Result: 0 (eof) or -1 (error) */
  1857. {
  1858. int err = errno;
  1859. ASSERT(res <= 0);
  1860. (void) driver_select(port_num, ready_fd, ERL_DRV_READ|ERL_DRV_WRITE, 0);
  1861. clear_fd_data(ready_fd);
  1862. if (res == 0) {
  1863. if (driver_data[ready_fd].report_exit) {
  1864. CHLD_STAT_LOCK;
  1865. if (driver_data[ready_fd].alive) {
  1866. /*
  1867. * We have eof and want to report exit status, but the process
  1868. * hasn't exited yet. When it does report_exit_status() will
  1869. * driver_select() this fd which will make sure that we get
  1870. * back here with driver_data[ready_fd].alive == 0 and
  1871. * driver_data[ready_fd].status set.
  1872. */
  1873. CHLD_STAT_UNLOCK;
  1874. return 0;
  1875. }
  1876. else {
  1877. int status = driver_data[ready_fd].status;
  1878. CHLD_STAT_UNLOCK;
  1879. /* We need not be prepared for stopped/continued processes. */
  1880. if (WIFSIGNALED(status))
  1881. status = 128 + WTERMSIG(status);
  1882. else
  1883. status = WEXITSTATUS(status);
  1884. driver_report_exit(driver_data[ready_fd].port_num, status);
  1885. }
  1886. }
  1887. driver_failure_eof(port_num);
  1888. } else {
  1889. driver_failure_posix(port_num, err);
  1890. }
  1891. return 0;
  1892. }
  1893. /* fd is the drv_data that is returned from the */
  1894. /* initial start routine */
  1895. /* ready_fd is the descriptor that is ready to read */
  1896. static void ready_input(ErlDrvData e, ErlDrvEvent ready_fd)
  1897. {
  1898. int fd = (int)(long)e;
  1899. int port_num;
  1900. int packet_bytes;
  1901. int res;
  1902. Uint h;
  1903. port_num = driver_data[fd].port_num;
  1904. packet_bytes = driver_data[fd].packet_bytes;
  1905. if (packet_bytes == 0) {
  1906. byte *read_buf = (byte *) erts_alloc(ERTS_ALC_T_SYS_READ_BUF,
  1907. ERTS_SYS_READ_BUF_SZ);
  1908. res = read(ready_fd, read_buf, ERTS_SYS_READ_BUF_SZ);
  1909. if (res < 0) {
  1910. if ((errno != EINTR) && (errno != ERRNO_BLOCK))
  1911. port_inp_failure(port_num, ready_fd, res);
  1912. }
  1913. else if (res == 0)
  1914. port_inp_failure(port_num, ready_fd, res);
  1915. else
  1916. driver_output(port_num, (char*) read_buf, res);
  1917. erts_free(ERTS_ALC_T_SYS_READ_BUF, (void *) read_buf);
  1918. }
  1919. else if (fd_data[ready_fd].remain > 0) { /* We try to read the remainder */
  1920. /* space is allocated in buf */
  1921. res = read(ready_fd, fd_data[ready_fd].cpos,
  1922. fd_data[ready_fd].remain);
  1923. if (res < 0) {
  1924. if ((errno != EINTR) && (errno != ERRNO_BLOCK))
  1925. port_inp_failure(port_num, ready_fd, res);
  1926. }
  1927. else if (res == 0) {
  1928. port_inp_failure(port_num, ready_fd, res);
  1929. }
  1930. else if (res == fd_data[ready_fd].remain) { /* we're done */
  1931. driver_output(port_num, fd_data[ready_fd].buf,
  1932. fd_data[ready_fd].sz);
  1933. clear_fd_data(ready_fd);
  1934. }
  1935. else { /* if (res < fd_data[ready_fd].remain) */
  1936. fd_data[ready_fd].cpos += res;
  1937. fd_data[ready_fd].remain -= res;
  1938. }
  1939. }
  1940. else if (fd_data[ready_fd].remain == 0) { /* clean fd */
  1941. byte *read_buf = (byte *) erts_alloc(ERTS_ALC_T_SYS_READ_BUF,
  1942. ERTS_SYS_READ_BUF_SZ);
  1943. /* We make one read attempt and see what happens */
  1944. res = read(ready_fd, read_buf, ERTS_SYS_READ_BUF_SZ);
  1945. if (res < 0) {
  1946. if ((errno != EINTR) && (errno != ERRNO_BLOCK))
  1947. port_inp_failure(port_num, ready_fd, res);
  1948. }
  1949. else if (res == 0) { /* eof */
  1950. port_inp_failure(port_num, ready_fd, res);
  1951. }
  1952. else if (res < packet_bytes - fd_data[ready_fd].psz) {
  1953. memcpy(fd_data[ready_fd].pbuf+fd_data[ready_fd].psz,
  1954. read_buf, res);
  1955. fd_data[ready_fd].psz += res;
  1956. }
  1957. else { /* if (res >= packet_bytes) */
  1958. unsigned char* cpos = read_buf;
  1959. int bytes_left = res;
  1960. while (1) {
  1961. int psz = fd_data[ready_fd].psz;
  1962. char* pbp = fd_data[ready_fd].pbuf + psz;
  1963. while(bytes_left && (psz < packet_bytes)) {
  1964. *pbp++ = *cpos++;
  1965. bytes_left--;
  1966. psz++;
  1967. }
  1968. if (psz < packet_bytes) {
  1969. fd_data[ready_fd].psz = psz;
  1970. break;
  1971. }
  1972. fd_data[ready_fd].psz = 0;
  1973. switch (packet_bytes) {
  1974. case 1: h = get_int8(fd_data[ready_fd].pbuf); break;
  1975. case 2: h = get_int16(fd_data[ready_fd].pbuf); break;
  1976. case 4: h = get_int32(fd_data[ready_fd].pbuf); break;
  1977. default: ASSERT(0); return; /* -1; */
  1978. }
  1979. if (h <= (bytes_left)) {
  1980. driver_output(port_num, (char*) cpos, h);
  1981. cpos += h;
  1982. bytes_left -= h;
  1983. continue;
  1984. }
  1985. else { /* The last message we got was split */
  1986. char *buf = erts_alloc_fnf(ERTS_ALC_T_FD_ENTRY_BUF, h);
  1987. if (!buf) {
  1988. errno = ENOMEM;
  1989. port_inp_failure(port_num, ready_fd, -1);
  1990. }
  1991. else {
  1992. erts_smp_atomic_add(&sys_misc_mem_sz, h);
  1993. sys_memcpy(buf, cpos, bytes_left);
  1994. fd_data[ready_fd].buf = buf;
  1995. fd_data[ready_fd].sz = h;
  1996. fd_data[ready_fd].remain = h - bytes_left;
  1997. fd_data[ready_fd].cpos = buf + bytes_left;
  1998. }
  1999. break;
  2000. }
  2001. }
  2002. }
  2003. erts_free(ERTS_ALC_T_SYS_READ_BUF, (void *) read_buf);
  2004. }
  2005. }
  2006. /* fd is the drv_data that is returned from the */
  2007. /* initial start routine */
  2008. /* ready_fd is the descriptor that is ready to read */
  2009. static void ready_output(ErlDrvData e, ErlDrvEvent ready_fd)
  2010. {
  2011. int fd = (int)(long)e;
  2012. int ix = driver_data[fd].port_num;
  2013. int n;
  2014. struct iovec* iv;
  2015. int vsize;
  2016. if ((iv = (struct iovec*) driver_peekq(ix, &vsize)) == NULL) {
  2017. driver_select(ix, ready_fd, ERL_DRV_WRITE, 0);
  2018. return; /* 0; */
  2019. }
  2020. vsize = vsize > MAX_VSIZE ? MAX_VSIZE : vsize;
  2021. if ((n = writev(ready_fd, iv, vsize)) > 0) {
  2022. if (driver_deq(ix, n) == 0)
  2023. set_busy_port(ix, 0);
  2024. }
  2025. else if (n < 0) {
  2026. if (errno == ERRNO_BLOCK || errno == EINTR)
  2027. return; /* 0; */
  2028. else {
  2029. int res = errno;
  2030. driver_select(ix, ready_fd, ERL_DRV_WRITE, 0);
  2031. driver_failure_posix(ix, res);
  2032. return; /* -1; */
  2033. }
  2034. }
  2035. return; /* 0; */
  2036. }
  2037. static void stop_select(ErlDrvEvent fd, void* _)
  2038. {
  2039. close((int)fd);
  2040. }
  2041. /*
  2042. ** Async opertation support
  2043. */
  2044. #if defined(USE_THREADS) && !defined(ERTS_SMP)
  2045. static void
  2046. sys_async_ready_failed(int fd, int r, int err)
  2047. {
  2048. char buf[120];
  2049. sprintf(buf, "sys_async_ready(): Fatal error: fd=%d, r=%d, errno=%d\n",
  2050. fd, r, err);
  2051. erts_silence_warn_unused_result(write(2, buf, strlen(buf)));
  2052. abort();
  2053. }
  2054. /* called from threads !! */
  2055. void sys_async_ready(int fd)
  2056. {
  2057. int r;
  2058. while (1) {
  2059. r = write(fd, "0", 1); /* signal main thread fd MUST be async_fd[1] */
  2060. if (r == 1) {
  2061. DEBUGF(("sys_async_ready(): r = 1\r\n"));
  2062. break;
  2063. }
  2064. if (r < 0 && errno == EINTR) {
  2065. DEBUGF(("sys_async_ready(): r = %d\r\n", r));
  2066. continue;
  2067. }
  2068. sys_async_ready_failed(fd, r, errno);
  2069. }
  2070. }
  2071. static int async_drv_init(void)
  2072. {
  2073. async_fd[0] = -1;
  2074. async_fd[1] = -1;
  2075. return 0;
  2076. }
  2077. static ErlDrvData async_drv_start(ErlDrvPort port_num,
  2078. char* name, SysDriverOpts* opts)
  2079. {
  2080. if (async_fd[0] != -1)
  2081. return ERL_DRV_ERROR_GENERAL;
  2082. if (pipe(async_fd) < 0)
  2083. return ERL_DRV_ERROR_GENERAL;
  2084. DEBUGF(("async_drv_start: %d\r\n", port_num));
  2085. SET_NONBLOCKING(async_fd[0]);
  2086. driver_select(port_num, async_fd[0], ERL_DRV_READ, 1);
  2087. if (init_async(async_fd[1]) < 0)
  2088. return ERL_DRV_ERROR_GENERAL;
  2089. return (ErlDrvData)port_num;
  2090. }
  2091. static void async_drv_stop(ErlDrvData e)
  2092. {
  2093. int port_num = (int)(long)e;
  2094. DEBUGF(("async_drv_stop: %d\r\n", port_num));
  2095. exit_async();
  2096. driver_select(port_num, async_fd[0], ERL_DRV_READ, 0);
  2097. close(async_fd[0]);
  2098. close(async_fd[1]);
  2099. async_fd[0] = async_fd[1] = -1;
  2100. }
  2101. static void async_drv_input(ErlDrvData e, ErlDrvEvent fd)
  2102. {
  2103. char *buf[32];
  2104. DEBUGF(("async_drv_input\r\n"));
  2105. while (read((int) fd, (void *) buf, 32) > 0); /* fd MUST be async_fd[0] */
  2106. check_async_ready(); /* invoke all async_ready */
  2107. }
  2108. #endif
  2109. void erts_do_break_handling(void)
  2110. {
  2111. struct termios temp_mode;
  2112. int saved = 0;
  2113. /*
  2114. * Most functions that do_break() calls are intentionally not thread safe;
  2115. * therefore, make sure that all threads but this one are blocked before
  2116. * proceeding!
  2117. */
  2118. erts_smp_block_system(0);
  2119. /*
  2120. * NOTE: since we allow gc we are not allowed to lock
  2121. * (any) process main locks while blocking system...
  2122. */
  2123. /* during break we revert to initial settings */
  2124. /* this is done differently for oldshell */
  2125. if (using_oldshell && !replace_intr) {
  2126. SET_BLOCKING(1);
  2127. }
  2128. else if (isatty(0)) {
  2129. tcgetattr(0,&temp_mode);
  2130. tcsetattr(0,TCSANOW,&initial_tty_mode);
  2131. saved = 1;
  2132. }
  2133. /* call the break handling function, reset the flag */
  2134. do_break();
  2135. ERTS_UNSET_BREAK_REQUESTED;
  2136. fflush(stdout);
  2137. /* after break we go back to saved settings */
  2138. if (using_oldshell && !replace_intr) {
  2139. SET_NONBLOCKING(1);
  2140. }
  2141. else if (saved) {
  2142. tcsetattr(0,TCSANOW,&temp_mode);
  2143. }
  2144. erts_smp_release_system();
  2145. }
  2146. /* Fills in the systems representation of the jam/beam process identifier.
  2147. ** The Pid is put in STRING representation in the supplied buffer,
  2148. ** no interpretatione of this should be done by the rest of the
  2149. ** emulator. The buffer should be at least 21 bytes long.
  2150. */
  2151. void sys_get_pid(char *buffer){
  2152. pid_t p = getpid();
  2153. /* Assume the pid is scalar and can rest in an unsigned long... */
  2154. sprintf(buffer,"%lu",(unsigned long) p);
  2155. }
  2156. int
  2157. erts_sys_putenv(char *buffer, int sep_ix)
  2158. {
  2159. int res;
  2160. char *env;
  2161. #ifdef HAVE_COPYING_PUTENV
  2162. env = buffer;
  2163. #else
  2164. Uint sz = strlen(buffer)+1;
  2165. env = erts_alloc(ERTS_ALC_T_PUTENV_STR, sz);
  2166. erts_smp_atomic_add(&sys_misc_mem_sz, sz);
  2167. strcpy(env,buffer);
  2168. #endif
  2169. erts_smp_rwmtx_rwlock(&environ_rwmtx);
  2170. res = putenv(env);
  2171. erts_smp_rwmtx_rwunlock(&environ_rwmtx);
  2172. return res;
  2173. }
  2174. int
  2175. erts_sys_getenv(char *key, char *value, size_t *size)
  2176. {
  2177. char *orig_value;
  2178. int res;
  2179. erts_smp_rwmtx_rlock(&environ_rwmtx);
  2180. orig_value = getenv(key);
  2181. if (!orig_value)
  2182. res = -1;
  2183. else {
  2184. size_t len = sys_strlen(orig_value);
  2185. if (len >= *size) {
  2186. *size = len + 1;
  2187. res = 1;
  2188. }
  2189. else {
  2190. *size = len;
  2191. sys_memcpy((void *) value, (void *) orig_value, len+1);
  2192. res = 0;
  2193. }
  2194. }
  2195. erts_smp_rwmtx_runlock(&environ_rwmtx);
  2196. return res;
  2197. }
  2198. void
  2199. sys_init_io(void)
  2200. {
  2201. fd_data = (struct fd_data *)
  2202. erts_alloc(ERTS_ALC_T_FD_TAB, max_files * sizeof(struct fd_data));
  2203. erts_smp_atomic_add(&sys_misc_mem_sz,
  2204. max_files * sizeof(struct fd_data));
  2205. #ifdef USE_THREADS
  2206. #ifdef ERTS_SMP
  2207. if (init_async(-1) < 0)
  2208. erl_exit(1, "Failed to initialize async-threads\n");
  2209. #else
  2210. {
  2211. /* This is speical stuff, starting a driver from the
  2212. * system routines, but is a nice way of handling stuff
  2213. * the erlang way
  2214. */
  2215. SysDriverOpts dopts;
  2216. int ret;
  2217. sys_memset((void*)&dopts, 0, sizeof(SysDriverOpts));
  2218. add_driver_entry(&async_driver_entry);
  2219. ret = erts_open_driver(NULL, NIL, "async", &dopts, NULL);
  2220. DEBUGF(("open_driver = %d\n", ret));
  2221. if (ret < 0)
  2222. erl_exit(1, "Failed to open async driver\n");
  2223. erts_port[ret].status |= ERTS_PORT_SFLG_IMMORTAL;
  2224. }
  2225. #endif
  2226. #endif
  2227. }
  2228. #if (0) /* unused? */
  2229. static int write_fill(fd, buf, len)
  2230. int fd, len;
  2231. char *buf;
  2232. {
  2233. int i, done = 0;
  2234. do {
  2235. if ((i = write(fd, buf+done, len-done)) < 0) {
  2236. if (errno != EINTR)
  2237. return (i);
  2238. i = 0;
  2239. }
  2240. done += i;
  2241. } while (done < len);
  2242. return (len);
  2243. }
  2244. #endif
  2245. extern const char pre_loaded_code[];
  2246. extern Preload pre_loaded[];
  2247. void erts_sys_alloc_init(void)
  2248. {
  2249. elib_ensure_initialized();
  2250. }
  2251. void *erts_sys_alloc(ErtsAlcType_t t, void *x, Uint sz)
  2252. {
  2253. void *res = malloc((size_t) sz);
  2254. #if HAVE_ERTS_MSEG
  2255. if (!res) {
  2256. erts_mseg_clear_cache();
  2257. return malloc((size_t) sz);
  2258. }
  2259. #endif
  2260. return res;
  2261. }
  2262. void *erts_sys_realloc(ErtsAlcType_t t, void *x, void *p, Uint sz)
  2263. {
  2264. void *res = realloc(p, (size_t) sz);
  2265. #if HAVE_ERTS_MSEG
  2266. if (!res) {
  2267. erts_mseg_clear_cache();
  2268. return realloc(p, (size_t) sz);
  2269. }
  2270. #endif
  2271. return res;
  2272. }
  2273. void erts_sys_free(ErtsAlcType_t t, void *x, void *p)
  2274. {
  2275. free(p);
  2276. }
  2277. /* Return a pointer to a vector of names of preloaded modules */
  2278. Preload*
  2279. sys_preloaded(void)
  2280. {
  2281. return pre_loaded;
  2282. }
  2283. /* Return a pointer to preloaded code for module "module" */
  2284. unsigned char*
  2285. sys_preload_begin(Preload* p)
  2286. {
  2287. return p->code;
  2288. }
  2289. /* Clean up if allocated */
  2290. void sys_preload_end(Preload* p)
  2291. {
  2292. /* Nothing */
  2293. }
  2294. /* Read a key from console (?) */
  2295. int sys_get_key(fd)
  2296. int fd;
  2297. {
  2298. int c;
  2299. unsigned char rbuf[64];
  2300. fflush(stdout); /* Flush query ??? */
  2301. if ((c = read(fd,rbuf,64)) <= 0) {
  2302. return c;
  2303. }
  2304. return rbuf[0];
  2305. }
  2306. #ifdef DEBUG
  2307. extern int erts_initialized;
  2308. void
  2309. erl_assert_error(char* expr, char* file, int line)
  2310. {
  2311. fflush(stdout);
  2312. fprintf(stderr, "Assertion failed: %s in %s, line %d\n",
  2313. expr, file, line);
  2314. fflush(stderr);
  2315. #if !defined(ERTS_SMP) && 0
  2316. /* Writing a crashdump from a failed assertion when smp support
  2317. * is enabled almost a guaranteed deadlocking, don't even bother.
  2318. *
  2319. * It could maybe be useful (but I'm not convinced) to write the
  2320. * crashdump if smp support is disabled...
  2321. */
  2322. if (erts_initialized)
  2323. erl_crash_dump(file, line, "Assertion failed: %s\n", expr);
  2324. #endif
  2325. abort();
  2326. }
  2327. void
  2328. erl_debug(char* fmt, ...)
  2329. {
  2330. char sbuf[1024]; /* Temporary buffer. */
  2331. va_list va;
  2332. if (debug_log) {
  2333. va_start(va, fmt);
  2334. vsprintf(sbuf, fmt, va);
  2335. va_end(va);
  2336. fprintf(stderr, "%s", sbuf);
  2337. }
  2338. }
  2339. #endif /* DEBUG */
  2340. static ERTS_INLINE void
  2341. report_exit_status(ErtsSysReportExit *rep, int status)
  2342. {
  2343. Port *pp;
  2344. #ifdef ERTS_SMP
  2345. CHLD_STAT_UNLOCK;
  2346. #endif
  2347. pp = erts_id2port_sflgs(rep->port,
  2348. NULL,
  2349. 0,
  2350. ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
  2351. #ifdef ERTS_SMP
  2352. CHLD_STAT_LOCK;
  2353. #endif
  2354. if (pp) {
  2355. if (rep->ifd >= 0) {
  2356. driver_data[rep->ifd].alive = 0;
  2357. driver_data[rep->ifd].status = status;
  2358. (void) driver_select((ErlDrvPort) internal_port_index(pp->id),
  2359. rep->ifd,
  2360. (ERL_DRV_READ|ERL_DRV_USE),
  2361. 1);
  2362. }
  2363. if (rep->ofd >= 0) {
  2364. driver_data[rep->ofd].alive = 0;
  2365. driver_data[rep->ofd].status = status;
  2366. (void) driver_select((ErlDrvPort) internal_port_index(pp->id),
  2367. rep->ofd,
  2368. (ERL_DRV_WRITE|ERL_DRV_USE),
  2369. 1);
  2370. }
  2371. erts_port_release(pp);
  2372. }
  2373. erts_free(ERTS_ALC_T_PRT_REP_EXIT, rep);
  2374. }
  2375. #if !CHLDWTHR /* ---------------------------------------------------------- */
  2376. #define ERTS_REPORT_EXIT_STATUS report_exit_status
  2377. static int check_children(void)
  2378. {
  2379. int res = 0;
  2380. int pid;
  2381. int status;
  2382. #ifndef ERTS_SMP
  2383. if (children_died)
  2384. #endif
  2385. {
  2386. sys_sigblock(SIGCHLD);
  2387. CHLD_STAT_LOCK;
  2388. while ((pid = waitpid(-1, &status, WNOHANG)) > 0)
  2389. note_child_death(pid, status);
  2390. #ifndef ERTS_SMP
  2391. children_died = 0;
  2392. #endif
  2393. CHLD_STAT_UNLOCK;
  2394. sys_sigrelease(SIGCHLD);
  2395. res = 1;
  2396. }
  2397. return res;
  2398. }
  2399. #ifdef ERTS_SMP
  2400. void
  2401. erts_check_children(void)
  2402. {
  2403. (void) check_children();
  2404. }
  2405. #endif
  2406. #elif CHLDWTHR && defined(ERTS_SMP) /* ------------------------------------- */
  2407. #define ERTS_REPORT_EXIT_STATUS report_exit_status
  2408. #define check_children() (0)
  2409. #else /* CHLDWTHR && !defined(ERTS_SMP) ------------------------------------ */
  2410. #define ERTS_REPORT_EXIT_STATUS initiate_report_exit_status
  2411. static ERTS_INLINE void
  2412. initiate_report_exit_status(ErtsSysReportExit *rep, int status)
  2413. {
  2414. rep->next = report_exit_transit_list;
  2415. rep->status = status;
  2416. report_exit_transit_list = rep;
  2417. /*
  2418. * We need the scheduler thread to call check_children().
  2419. * If the scheduler thread is sleeping in a poll with a
  2420. * timeout, we need to wake the scheduler thread. We use the
  2421. * functionality of the async driver to do this, instead of
  2422. * implementing yet another driver doing the same thing. A
  2423. * little bit ugly, but it works...
  2424. */
  2425. sys_async_ready(async_fd[1]);
  2426. }
  2427. static int check_children(void)
  2428. {
  2429. int res;
  2430. ErtsSysReportExit *rep;
  2431. CHLD_STAT_LOCK;
  2432. rep = report_exit_transit_list;
  2433. res = rep != NULL;
  2434. while (rep) {
  2435. ErtsSysReportExit *curr_rep = rep;
  2436. rep = rep->next;
  2437. report_exit_status(curr_rep, curr_rep->status);
  2438. }
  2439. report_exit_transit_list = NULL;
  2440. CHLD_STAT_UNLOCK;
  2441. return res;
  2442. }
  2443. #endif /* ------------------------------------------------------------------ */
  2444. static void note_child_death(int pid, int status)
  2445. {
  2446. ErtsSysReportExit **repp = &report_exit_list;
  2447. ErtsSysReportExit *rep = report_exit_list;
  2448. while (rep) {
  2449. if (pid == rep->pid) {
  2450. *repp = rep->next;
  2451. ERTS_REPORT_EXIT_STATUS(rep, status);
  2452. break;
  2453. }
  2454. repp = &rep->next;
  2455. rep = rep->next;
  2456. }
  2457. }
  2458. #if CHLDWTHR
  2459. static void *
  2460. child_waiter(void *unused)
  2461. {
  2462. int pid;
  2463. int status;
  2464. #ifdef ERTS_ENABLE_LOCK_CHECK
  2465. erts_lc_set_thread_name("child waiter");
  2466. #endif
  2467. while(1) {
  2468. #ifdef DEBUG
  2469. int waitpid_errno;
  2470. #endif
  2471. pid = waitpid(-1, &status, 0);
  2472. #ifdef DEBUG
  2473. waitpid_errno = errno;
  2474. #endif
  2475. CHLD_STAT_LOCK;
  2476. if (pid < 0) {
  2477. ASSERT(waitpid_errno == ECHILD);
  2478. }
  2479. else {
  2480. children_alive--;
  2481. ASSERT(children_alive >= 0);
  2482. note_child_death(pid, status);
  2483. }
  2484. while (!children_alive)
  2485. CHLD_STAT_WAIT; /* Wait for children to wait on... :) */
  2486. CHLD_STAT_UNLOCK;
  2487. }
  2488. return NULL;
  2489. }
  2490. #endif
  2491. /*
  2492. * Called from schedule() when it runs out of runnable processes,
  2493. * or when Erlang code has performed INPUT_REDUCTIONS reduction
  2494. * steps. runnable == 0 iff there are no runnable Erlang processes.
  2495. */
  2496. void
  2497. erl_sys_schedule(int runnable)
  2498. {
  2499. #ifdef ERTS_SMP
  2500. ERTS_CHK_IO(!runnable);
  2501. ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING);
  2502. #else
  2503. ERTS_CHK_IO_INTR(0);
  2504. if (runnable) {
  2505. ERTS_CHK_IO(0); /* Poll for I/O */
  2506. check_async_ready(); /* Check async completions */
  2507. } else {
  2508. int wait_for_io = !check_async_ready();
  2509. if (wait_for_io)
  2510. wait_for_io = !check_children();
  2511. ERTS_CHK_IO(wait_for_io);
  2512. }
  2513. (void) check_children();
  2514. #endif
  2515. }
  2516. #ifdef ERTS_SMP
  2517. static erts_smp_tid_t sig_dispatcher_tid;
  2518. static void
  2519. smp_sig_notify(char c)
  2520. {
  2521. int res;
  2522. do {
  2523. /* write() is async-signal safe (according to posix) */
  2524. res = write(sig_notify_fds[1], &c, 1);
  2525. } while (res < 0 && errno == EINTR);
  2526. if (res != 1) {
  2527. char msg[] =
  2528. "smp_sig_notify(): Failed to notify signal-dispatcher thread "
  2529. "about received signal";
  2530. erts_silence_warn_unused_result(write(2, msg, sizeof(msg)));
  2531. abort();
  2532. }
  2533. }
  2534. static void *
  2535. signal_dispatcher_thread_func(void *unused)
  2536. {
  2537. int initialized = 0;
  2538. #if !CHLDWTHR
  2539. int notify_check_children = 0;
  2540. #endif
  2541. #ifdef ERTS_ENABLE_LOCK_CHECK
  2542. erts_lc_set_thread_name("signal_dispatcher");
  2543. #endif
  2544. while (1) {
  2545. char buf[32];
  2546. int res, i;
  2547. /* Block on read() waiting for a signal notification to arrive... */
  2548. res = read(sig_notify_fds[0], (void *) &buf[0], 32);
  2549. if (res < 0) {
  2550. if (errno == EINTR)
  2551. continue;
  2552. erl_exit(ERTS_ABORT_EXIT,
  2553. "signal-dispatcher thread got unexpected error: %s (%d)\n",
  2554. erl_errno_id(errno),
  2555. errno);
  2556. }
  2557. for (i = 0; i < res; i++) {
  2558. /*
  2559. * NOTE 1: The signal dispatcher thread should not do work
  2560. * that takes a substantial amount of time (except
  2561. * perhaps in test and debug builds). It needs to
  2562. * be responsive, i.e, it should only dispatch work
  2563. * to other threads.
  2564. *
  2565. * NOTE 2: The signal dispatcher thread is not a blockable
  2566. * thread (i.e., it hasn't called
  2567. * erts_register_blockable_thread()). This is
  2568. * intentional. We want to be able to interrupt
  2569. * writing of a crash dump by hitting C-c twice.
  2570. * Since it isn't a blockable thread it is important
  2571. * that it doesn't change the state of any data that
  2572. * a blocking thread expects to have exclusive access
  2573. * to (unless the signal dispatcher itself explicitly
  2574. * is blocking all blockable threads).
  2575. */
  2576. switch (buf[i]) {
  2577. case 0: /* Emulator initialized */
  2578. initialized = 1;
  2579. #if !CHLDWTHR
  2580. if (!notify_check_children)
  2581. #endif
  2582. break;
  2583. #if !CHLDWTHR
  2584. case 'C': /* SIGCHLD */
  2585. if (initialized)
  2586. erts_smp_notify_check_children_needed();
  2587. else
  2588. notify_check_children = 1;
  2589. break;
  2590. #endif
  2591. case 'I': /* SIGINT */
  2592. break_requested();
  2593. break;
  2594. case 'Q': /* SIGQUIT */
  2595. quit_requested();
  2596. break;
  2597. case '1': /* SIGUSR1 */
  2598. sigusr1_exit();
  2599. break;
  2600. #ifdef QUANTIFY
  2601. case '2': /* SIGUSR2 */
  2602. quantify_save_data(); /* Might take a substantial amount of
  2603. time, but this is a test/debug
  2604. build */
  2605. break;
  2606. #endif
  2607. default:
  2608. erl_exit(ERTS_ABORT_EXIT,
  2609. "signal-dispatcher thread received unknown "
  2610. "signal notification: '%c'\n",
  2611. buf[i]);
  2612. }
  2613. }
  2614. ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING);
  2615. }
  2616. return NULL;
  2617. }
  2618. static void
  2619. init_smp_sig_notify(void)
  2620. {
  2621. erts_smp_thr_opts_t thr_opts = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
  2622. thr_opts.detached = 1;
  2623. if (pipe(sig_notify_fds) < 0) {
  2624. erl_exit(ERTS_ABORT_EXIT,
  2625. "Failed to create signal-dispatcher pipe: %s (%d)\n",
  2626. erl_errno_id(errno),
  2627. errno);
  2628. }
  2629. /* Start signal handler thread */
  2630. erts_smp_thr_create(&sig_dispatcher_tid,
  2631. signal_dispatcher_thread_func,
  2632. NULL,
  2633. &thr_opts);
  2634. }
  2635. void
  2636. erts_sys_main_thread(void)
  2637. {
  2638. erts_thread_disable_fpe();
  2639. /* Become signal receiver thread... */
  2640. #ifdef ERTS_ENABLE_LOCK_CHECK
  2641. erts_lc_set_thread_name("signal_receiver");
  2642. #endif
  2643. smp_sig_notify(0); /* Notify initialized */
  2644. while (1) {
  2645. /* Wait for a signal to arrive... */
  2646. #ifdef DEBUG
  2647. int res =
  2648. #else
  2649. (void)
  2650. #endif
  2651. select(0, NULL, NULL, NULL, NULL);
  2652. ASSERT(res < 0);
  2653. ASSERT(errno == EINTR);
  2654. }
  2655. }
  2656. #endif /* ERTS_SMP */
  2657. #ifdef ERTS_ENABLE_KERNEL_POLL /* get_value() is currently only used when
  2658. kernel-poll is enabled */
  2659. /* Get arg marks argument as handled by
  2660. putting NULL in argv */
  2661. static char *
  2662. get_value(char* rest, char** argv, int* ip)
  2663. {
  2664. char *param = argv[*ip]+1;
  2665. argv[*ip] = NULL;
  2666. if (*rest == '\0') {
  2667. char *next = argv[*ip + 1];
  2668. if (next[0] == '-'
  2669. && next[1] == '-'
  2670. && next[2] == '\0') {
  2671. erts_fprintf(stderr, "bad \"%s\" value: \n", param);
  2672. erts_usage();
  2673. }
  2674. (*ip)++;
  2675. argv[*ip] = NULL;
  2676. return next;
  2677. }
  2678. return rest;
  2679. }
  2680. #endif /* ERTS_ENABLE_KERNEL_POLL */
  2681. void
  2682. erl_sys_args(int* argc, char** argv)
  2683. {
  2684. int i, j;
  2685. i = 1;
  2686. ASSERT(argc && argv);
  2687. while (i < *argc) {
  2688. if(argv[i][0] == '-') {
  2689. switch (argv[i][1]) {
  2690. #ifdef ERTS_ENABLE_KERNEL_POLL
  2691. case 'K': {
  2692. char *arg = get_value(argv[i] + 2, argv, &i);
  2693. if (strcmp("true", arg) == 0) {
  2694. erts_use_kernel_poll = 1;
  2695. }
  2696. else if (strcmp("false", arg) == 0) {
  2697. erts_use_kernel_poll = 0;
  2698. }
  2699. else {
  2700. erts_fprintf(stderr, "bad \"K\" value: %s\n", arg);
  2701. erts_usage();
  2702. }
  2703. break;
  2704. }
  2705. #endif
  2706. case '-':
  2707. goto done_parsing;
  2708. default:
  2709. break;
  2710. }
  2711. }
  2712. i++;
  2713. }
  2714. done_parsing:
  2715. #ifdef ERTS_ENABLE_KERNEL_POLL
  2716. if (erts_use_kernel_poll) {
  2717. char no_kp[10];
  2718. size_t no_kp_sz = sizeof(no_kp);
  2719. int res = erts_sys_getenv("ERL_NO_KERNEL_POLL", no_kp, &no_kp_sz);
  2720. if (res > 0
  2721. || (res == 0
  2722. && sys_strcmp("false", no_kp) != 0
  2723. && sys_strcmp("FALSE", no_kp) != 0)) {
  2724. erts_use_kernel_poll = 0;
  2725. }
  2726. }
  2727. #endif
  2728. init_check_io();
  2729. #ifdef ERTS_SMP
  2730. init_smp_sig_notify();
  2731. #endif
  2732. /* Handled arguments have been marked with NULL. Slide arguments
  2733. not handled towards the beginning of argv. */
  2734. for (i = 0, j = 0; i < *argc; i++) {
  2735. if (argv[i])
  2736. argv[j++] = argv[i];
  2737. }
  2738. *argc = j;
  2739. }
  2740. #ifdef ERTS_TIMER_THREAD
  2741. /*
  2742. * Interruptible-wait facility: low-level synchronisation state
  2743. * and methods that are implementation dependent.
  2744. *
  2745. * Constraint: Every implementation must define 'struct erts_iwait'
  2746. * with a field 'erts_smp_atomic_t state;'.
  2747. */
  2748. /* values for struct erts_iwait's state field */
  2749. #define IWAIT_WAITING 0
  2750. #define IWAIT_AWAKE 1
  2751. #define IWAIT_INTERRUPT 2
  2752. #if 0 /* XXX: needs feature test in erts/configure.in */
  2753. /*
  2754. * This is an implementation of the interruptible wait facility on
  2755. * top of Linux-specific futexes.
  2756. */
  2757. #include <asm/unistd.h>
  2758. #define FUTEX_WAIT 0
  2759. #define FUTEX_WAKE 1
  2760. static int sys_futex(void *futex, int op, int val, const struct timespec *timeout)
  2761. {
  2762. return syscall(__NR_futex, futex, op, val, timeout);
  2763. }
  2764. struct erts_iwait {
  2765. erts_smp_atomic_t state; /* &state.counter is our futex */
  2766. };
  2767. static void iwait_lowlevel_init(struct erts_iwait *iwait) { /* empty */ }
  2768. static void iwait_lowlevel_wait(struct erts_iwait *iwait, struct timeval *delay)
  2769. {
  2770. struct timespec timeout;
  2771. int res;
  2772. timeout.tv_sec = delay->tv_sec;
  2773. timeout.tv_nsec = delay->tv_usec * 1000;
  2774. res = sys_futex((void*)&iwait->state.counter, FUTEX_WAIT, IWAIT_WAITING, &timeout);
  2775. if (res < 0 && errno != ETIMEDOUT && errno != EWOULDBLOCK && errno != EINTR)
  2776. perror("FUTEX_WAIT");
  2777. }
  2778. static void iwait_lowlevel_interrupt(struct erts_iwait *iwait)
  2779. {
  2780. int res = sys_futex((void*)&iwait->state.counter, FUTEX_WAKE, 1, NULL);
  2781. if (res < 0)
  2782. perror("FUTEX_WAKE");
  2783. }
  2784. #else /* using poll() or select() */
  2785. /*
  2786. * This is an implementation of the interruptible wait facility on
  2787. * top of pipe(), poll() or select(), read(), and write().
  2788. */
  2789. struct erts_iwait {
  2790. erts_smp_atomic_t state;
  2791. int read_fd; /* wait polls and reads this fd */
  2792. int write_fd; /* interrupt writes this fd */
  2793. };
  2794. static void iwait_lowlevel_init(struct erts_iwait *iwait)
  2795. {
  2796. int fds[2];
  2797. if (pipe(fds) < 0) {
  2798. perror("pipe()");
  2799. exit(1);
  2800. }
  2801. iwait->read_fd = fds[0];
  2802. iwait->write_fd = fds[1];
  2803. }
  2804. #if defined(ERTS_USE_POLL)
  2805. #include <sys/poll.h>
  2806. #define PERROR_POLL "poll()"
  2807. static int iwait_lowlevel_poll(int read_fd, struct timeval *delay)
  2808. {
  2809. struct pollfd pollfd;
  2810. int timeout;
  2811. pollfd.fd = read_fd;
  2812. pollfd.events = POLLIN;
  2813. pollfd.revents = 0;
  2814. timeout = delay->tv_sec * 1000 + delay->tv_usec / 1000;
  2815. return poll(&pollfd, 1, timeout);
  2816. }
  2817. #else /* !ERTS_USE_POLL */
  2818. #include <sys/select.h>
  2819. #define PERROR_POLL "select()"
  2820. static int iwait_lowlevel_poll(int read_fd, struct timeval *delay)
  2821. {
  2822. fd_set readfds;
  2823. FD_ZERO(&readfds);
  2824. FD_SET(read_fd, &readfds);
  2825. return select(read_fd + 1, &readfds, NULL, NULL, delay);
  2826. }
  2827. #endif /* !ERTS_USE_POLL */
  2828. static void iwait_lowlevel_wait(struct erts_iwait *iwait, struct timeval *delay)
  2829. {
  2830. int res;
  2831. char buf[64];
  2832. res = iwait_lowlevel_poll(iwait->read_fd, delay);
  2833. if (res > 0)
  2834. (void)read(iwait->read_fd, buf, sizeof buf);
  2835. else if (res < 0 && errno != EINTR)
  2836. perror(PERROR_POLL);
  2837. }
  2838. static void iwait_lowlevel_interrupt(struct erts_iwait *iwait)
  2839. {
  2840. int res = write(iwait->write_fd, "!", 1);
  2841. if (res < 0)
  2842. perror("write()");
  2843. }
  2844. #endif /* using poll() or select() */
  2845. #if 0 /* not using poll() or select() */
  2846. /*
  2847. * This is an implementation of the interruptible wait facility on
  2848. * top of pthread_cond_timedwait(). This has two problems:
  2849. * 1. pthread_cond_timedwait() requires an absolute time point,
  2850. * so the relative delay must be converted to absolute time.
  2851. * Worse, this breaks if the machine's time is adjusted while
  2852. * we're preparing to wait.
  2853. * 2. Each cond operation requires additional mutex lock/unlock operations.
  2854. *
  2855. * Problem 2 is probably not too bad on Linux (they'll just become
  2856. * relatively cheap futex operations), but problem 1 is the real killer.
  2857. * Only use this implementation if no better alternatives are available!
  2858. */
  2859. struct erts_iwait {
  2860. erts_smp_atomic_t state;
  2861. pthread_cond_t cond;
  2862. pthread_mutex_t mutex;
  2863. };
  2864. static void iwait_lowlevel_init(struct erts_iwait *iwait)
  2865. {
  2866. iwait->cond = (pthread_cond_t) PTHREAD_COND_INITIALIZER;
  2867. iwait->mutex = (pthread_mutex_t) PTHREAD_MUTEX_INITIALIZER;
  2868. }
  2869. static void iwait_lowlevel_wait(struct erts_iwait *iwait, struct timeval *delay)
  2870. {
  2871. struct timeval tmp;
  2872. struct timespec timeout;
  2873. /* Due to pthread_cond_timedwait()'s use of absolute
  2874. time, this must be the real gettimeofday(), _not_
  2875. the "smoothed" one beam/erl_time_sup.c implements. */
  2876. gettimeofday(&tmp, NULL);
  2877. tmp.tv_sec += delay->tv_sec;
  2878. tmp.tv_usec += delay->tv_usec;
  2879. if (tmp.tv_usec >= 1000*1000) {
  2880. tmp.tv_usec -= 1000*1000;
  2881. tmp.tv_sec += 1;
  2882. }
  2883. timeout.tv_sec = tmp.tv_sec;
  2884. timeout.tv_nsec = tmp.tv_usec * 1000;
  2885. pthread_mutex_lock(&iwait->mutex);
  2886. pthread_cond_timedwait(&iwait->cond, &iwait->mutex, &timeout);
  2887. pthread_mutex_unlock(&iwait->mutex);
  2888. }
  2889. static void iwait_lowlevel_interrupt(struct erts_iwait *iwait)
  2890. {
  2891. pthread_mutex_lock(&iwait->mutex);
  2892. pthread_cond_signal(&iwait->cond);
  2893. pthread_mutex_unlock(&iwait->mutex);
  2894. }
  2895. #endif /* not using POLL */
  2896. /*
  2897. * Interruptible-wait facility. This is just a wrapper around the
  2898. * low-level synchronisation code, where we maintain our logical
  2899. * state in order to suppress some state transitions.
  2900. */
  2901. struct erts_iwait *erts_iwait_init(void)
  2902. {
  2903. struct erts_iwait *iwait = malloc(sizeof *iwait);
  2904. if (!iwait) {
  2905. perror("malloc");
  2906. exit(1);
  2907. }
  2908. iwait_lowlevel_init(iwait);
  2909. erts_smp_atomic_init(&iwait->state, IWAIT_AWAKE);
  2910. return iwait;
  2911. }
  2912. void erts_iwait_wait(struct erts_iwait *iwait, struct timeval *delay)
  2913. {
  2914. if (erts_smp_atomic_xchg(&iwait->state, IWAIT_WAITING) != IWAIT_INTERRUPT)
  2915. iwait_lowlevel_wait(iwait, delay);
  2916. erts_smp_atomic_set(&iwait->state, IWAIT_AWAKE);
  2917. }
  2918. void erts_iwait_interrupt(struct erts_iwait *iwait)
  2919. {
  2920. if (erts_smp_atomic_xchg(&iwait->state, IWAIT_INTERRUPT) == IWAIT_WAITING)
  2921. iwait_lowlevel_interrupt(iwait);
  2922. }
  2923. #endif /* ERTS_TIMER_THREAD */