PageRenderTime 65ms CodeModel.GetById 15ms RepoModel.GetById 1ms app.codeStats 0ms

/erts/emulator/beam/erl_bif_info.c

https://github.com/Bwooce/otp
C | 3952 lines | 3363 code | 379 blank | 210 comment | 924 complexity | 65089e3b1aa62b7d4b0936b17d0b4c49 MD5 | raw file
Possible License(s): LGPL-2.1, MPL-2.0-no-copyleft-exception, BSD-2-Clause
  1. /*
  2. * %CopyrightBegin%
  3. *
  4. * Copyright Ericsson AB 1999-2010. All Rights Reserved.
  5. *
  6. * The contents of this file are subject to the Erlang Public License,
  7. * Version 1.1, (the "License"); you may not use this file except in
  8. * compliance with the License. You should have received a copy of the
  9. * Erlang Public License along with this software. If not, it can be
  10. * retrieved online at http://www.erlang.org/.
  11. *
  12. * Software distributed under the License is distributed on an "AS IS"
  13. * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
  14. * the License for the specific language governing rights and limitations
  15. * under the License.
  16. *
  17. * %CopyrightEnd%
  18. */
  19. #ifdef HAVE_CONFIG_H
  20. # include "config.h"
  21. #endif
  22. #include "sys.h"
  23. #include "erl_vm.h"
  24. #include "global.h"
  25. #include "erl_process.h"
  26. #include "erl_nmgc.h"
  27. #include "error.h"
  28. #include "erl_driver.h"
  29. #include "bif.h"
  30. #include "big.h"
  31. #include "erl_version.h"
  32. #include "erl_db_util.h"
  33. #include "erl_message.h"
  34. #include "erl_binary.h"
  35. #include "erl_db.h"
  36. #include "erl_instrument.h"
  37. #include "dist.h"
  38. #include "erl_gc.h"
  39. #include "erl_cpu_topology.h"
  40. #ifdef HIPE
  41. #include "hipe_arch.h"
  42. #endif
  43. #ifdef ERTS_ENABLE_LOCK_COUNT
  44. #include "erl_lock_count.h"
  45. #endif
  46. #ifdef VALGRIND
  47. #include <valgrind/valgrind.h>
  48. #include <valgrind/memcheck.h>
  49. #endif
  50. #define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
  51. /* Keep erts_system_version as a global variable for easy access from a core */
  52. static char erts_system_version[] = ("Erlang " ERLANG_OTP_RELEASE
  53. " (erts-" ERLANG_VERSION ")"
  54. #if !HEAP_ON_C_STACK && !HALFWORD_HEAP
  55. " [no-c-stack-objects]"
  56. #endif
  57. #ifndef OTP_RELEASE
  58. " [source]"
  59. #endif
  60. #ifdef ARCH_64
  61. #if HALFWORD_HEAP
  62. " [64-bit halfword]"
  63. #else
  64. " [64-bit]"
  65. #endif
  66. #endif
  67. #ifdef ERTS_SMP
  68. " [smp:%bpu:%bpu]"
  69. #endif
  70. " [rq:%bpu]"
  71. #ifdef USE_THREADS
  72. " [async-threads:%d]"
  73. #endif
  74. #ifdef HIPE
  75. " [hipe]"
  76. #endif
  77. #ifdef ERTS_ENABLE_KERNEL_POLL
  78. " [kernel-poll:%s]"
  79. #endif
  80. #ifdef HYBRID
  81. " [hybrid heap]"
  82. #endif
  83. #ifdef INCREMENTAL
  84. " [incremental GC]"
  85. #endif
  86. #ifdef ET_DEBUG
  87. #if ET_DEBUG
  88. " [type-assertions]"
  89. #endif
  90. #endif
  91. #ifdef DEBUG
  92. " [debug-compiled]"
  93. #endif
  94. #ifdef ERTS_ENABLE_LOCK_CHECK
  95. " [lock-checking]"
  96. #endif
  97. #ifdef ERTS_ENABLE_LOCK_COUNT
  98. " [lock-counting]"
  99. #endif
  100. #ifdef PURIFY
  101. " [purify-compiled]"
  102. #endif
  103. #ifdef VALGRIND
  104. " [valgrind-compiled]"
  105. #endif
  106. "\n");
  107. #define ASIZE(a) (sizeof(a)/sizeof(a[0]))
  108. #if defined(HAVE_SOLARIS_SPARC_PERFMON)
  109. # include <sys/ioccom.h>
  110. # define PERFMON_SETPCR _IOW('P', 1, unsigned long long)
  111. # define PERFMON_GETPCR _IOR('P', 2, unsigned long long)
  112. #endif
  113. static Eterm
  114. bld_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
  115. {
  116. struct erl_off_heap_header* ohh;
  117. Eterm res = NIL;
  118. Eterm tuple;
  119. for (ohh = oh->first; ohh; ohh = ohh->next) {
  120. if (ohh->thing_word == HEADER_PROC_BIN) {
  121. ProcBin* pb = (ProcBin*) ohh;
  122. Eterm val = erts_bld_uword(hpp, szp, (UWord) pb->val);
  123. Eterm orig_size = erts_bld_uint(hpp, szp, pb->val->orig_size);
  124. if (szp)
  125. *szp += 4+2;
  126. if (hpp) {
  127. Uint refc = (Uint) erts_smp_atomic_read(&pb->val->refc);
  128. tuple = TUPLE3(*hpp, val, orig_size, make_small(refc));
  129. res = CONS(*hpp + 4, tuple, res);
  130. *hpp += 4+2;
  131. }
  132. }
  133. }
  134. return res;
  135. }
  136. /*
  137. make_monitor_list:
  138. returns a list of records..
  139. -record(erl_monitor, {
  140. type, % MON_ORIGIN or MON_TARGET (1 or 3)
  141. ref,
  142. pid, % Process or nodename
  143. name % registered name or []
  144. }).
  145. */
  146. static void do_calc_mon_size(ErtsMonitor *mon, void *vpsz)
  147. {
  148. Uint *psz = vpsz;
  149. *psz += IS_CONST(mon->ref) ? 0 : NC_HEAP_SIZE(mon->ref);
  150. *psz += IS_CONST(mon->pid) ? 0 : NC_HEAP_SIZE(mon->pid);
  151. *psz += 8; /* CONS + 5-tuple */
  152. }
  153. typedef struct {
  154. Process *p;
  155. Eterm *hp;
  156. Eterm res;
  157. Eterm tag;
  158. } MonListContext;
  159. static void do_make_one_mon_element(ErtsMonitor *mon, void * vpmlc)
  160. {
  161. MonListContext *pmlc = vpmlc;
  162. Eterm tup;
  163. Eterm r = (IS_CONST(mon->ref)
  164. ? mon->ref
  165. : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->ref));
  166. Eterm p = (IS_CONST(mon->pid)
  167. ? mon->pid
  168. : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->pid));
  169. tup = TUPLE5(pmlc->hp, pmlc->tag, make_small(mon->type), r, p, mon->name);
  170. pmlc->hp += 6;
  171. pmlc->res = CONS(pmlc->hp, tup, pmlc->res);
  172. pmlc->hp += 2;
  173. }
  174. static Eterm
  175. make_monitor_list(Process *p, ErtsMonitor *root)
  176. {
  177. DECL_AM(erl_monitor);
  178. Uint sz = 0;
  179. MonListContext mlc;
  180. erts_doforall_monitors(root, &do_calc_mon_size, &sz);
  181. if (sz == 0) {
  182. return NIL;
  183. }
  184. mlc.p = p;
  185. mlc.hp = HAlloc(p,sz);
  186. mlc.res = NIL;
  187. mlc.tag = AM_erl_monitor;
  188. erts_doforall_monitors(root, &do_make_one_mon_element, &mlc);
  189. return mlc.res;
  190. }
  191. /*
  192. make_link_list:
  193. returns a list of records..
  194. -record(erl_link, {
  195. type, % LINK_NODE or LINK_PID (1 or 3)
  196. pid, % Process or nodename
  197. targets % List of erl_link's or nil
  198. }).
  199. */
  200. static void do_calc_lnk_size(ErtsLink *lnk, void *vpsz)
  201. {
  202. Uint *psz = vpsz;
  203. *psz += IS_CONST(lnk->pid) ? 0 : NC_HEAP_SIZE(lnk->pid);
  204. if (lnk->type != LINK_NODE && ERTS_LINK_ROOT(lnk) != NULL) {
  205. /* Node links use this pointer as ref counter... */
  206. erts_doforall_links(ERTS_LINK_ROOT(lnk),&do_calc_lnk_size,vpsz);
  207. }
  208. *psz += 7; /* CONS + 4-tuple */
  209. }
  210. typedef struct {
  211. Process *p;
  212. Eterm *hp;
  213. Eterm res;
  214. Eterm tag;
  215. } LnkListContext;
  216. static void do_make_one_lnk_element(ErtsLink *lnk, void * vpllc)
  217. {
  218. LnkListContext *pllc = vpllc;
  219. Eterm tup;
  220. Eterm old_res, targets = NIL;
  221. Eterm p = (IS_CONST(lnk->pid)
  222. ? lnk->pid
  223. : STORE_NC(&(pllc->hp), &MSO(pllc->p), lnk->pid));
  224. if (lnk->type == LINK_NODE) {
  225. targets = make_small(ERTS_LINK_REFC(lnk));
  226. } else if (ERTS_LINK_ROOT(lnk) != NULL) {
  227. old_res = pllc->res;
  228. pllc->res = NIL;
  229. erts_doforall_links(ERTS_LINK_ROOT(lnk),&do_make_one_lnk_element, vpllc);
  230. targets = pllc->res;
  231. pllc->res = old_res;
  232. }
  233. tup = TUPLE4(pllc->hp, pllc->tag, make_small(lnk->type), p, targets);
  234. pllc->hp += 5;
  235. pllc->res = CONS(pllc->hp, tup, pllc->res);
  236. pllc->hp += 2;
  237. }
  238. static Eterm
  239. make_link_list(Process *p, ErtsLink *root, Eterm tail)
  240. {
  241. DECL_AM(erl_link);
  242. Uint sz = 0;
  243. LnkListContext llc;
  244. erts_doforall_links(root, &do_calc_lnk_size, &sz);
  245. if (sz == 0) {
  246. return tail;
  247. }
  248. llc.p = p;
  249. llc.hp = HAlloc(p,sz);
  250. llc.res = tail;
  251. llc.tag = AM_erl_link;
  252. erts_doforall_links(root, &do_make_one_lnk_element, &llc);
  253. return llc.res;
  254. }
  255. int
  256. erts_print_system_version(int to, void *arg, Process *c_p)
  257. {
  258. #ifdef ERTS_SMP
  259. Uint total, online, active;
  260. (void) erts_schedulers_state(&total, &online, &active, 0);
  261. #endif
  262. return erts_print(to, arg, erts_system_version
  263. #ifdef ERTS_SMP
  264. , total, online, erts_no_run_queues
  265. #else
  266. , 1
  267. #endif
  268. #ifdef USE_THREADS
  269. , erts_async_max_threads
  270. #endif
  271. #ifdef ERTS_ENABLE_KERNEL_POLL
  272. , erts_use_kernel_poll ? "true" : "false"
  273. #endif
  274. );
  275. }
  276. typedef struct {
  277. Eterm entity;
  278. Eterm node;
  279. } MonitorInfo;
  280. typedef struct {
  281. MonitorInfo *mi;
  282. Uint mi_i;
  283. Uint mi_max;
  284. int sz;
  285. } MonitorInfoCollection;
  286. #define INIT_MONITOR_INFOS(MIC) do { \
  287. (MIC).mi = NULL; \
  288. (MIC).mi_i = (MIC).mi_max = 0; \
  289. (MIC).sz = 0; \
  290. } while(0)
  291. #define MI_INC 50
  292. #define EXTEND_MONITOR_INFOS(MICP) \
  293. do { \
  294. if ((MICP)->mi_i >= (MICP)->mi_max) { \
  295. (MICP)->mi = ((MICP)->mi ? erts_realloc(ERTS_ALC_T_TMP, \
  296. (MICP)->mi, \
  297. ((MICP)->mi_max+MI_INC) \
  298. * sizeof(MonitorInfo)) \
  299. : erts_alloc(ERTS_ALC_T_TMP, \
  300. MI_INC*sizeof(MonitorInfo))); \
  301. (MICP)->mi_max += MI_INC; \
  302. } \
  303. } while (0)
  304. #define DESTROY_MONITOR_INFOS(MIC) \
  305. do { \
  306. if ((MIC).mi != NULL) { \
  307. erts_free(ERTS_ALC_T_TMP, (void *) (MIC).mi); \
  308. } \
  309. } while (0)
  310. static void collect_one_link(ErtsLink *lnk, void *vmicp)
  311. {
  312. MonitorInfoCollection *micp = vmicp;
  313. EXTEND_MONITOR_INFOS(micp);
  314. if (!(lnk->type == LINK_PID)) {
  315. return;
  316. }
  317. micp->mi[micp->mi_i].entity = lnk->pid;
  318. micp->sz += 2 + NC_HEAP_SIZE(lnk->pid);
  319. micp->mi_i++;
  320. }
  321. static void collect_one_origin_monitor(ErtsMonitor *mon, void *vmicp)
  322. {
  323. MonitorInfoCollection *micp = vmicp;
  324. if (mon->type != MON_ORIGIN) {
  325. return;
  326. }
  327. EXTEND_MONITOR_INFOS(micp);
  328. if (is_atom(mon->pid)) { /* external by name */
  329. micp->mi[micp->mi_i].entity = mon->name;
  330. micp->mi[micp->mi_i].node = mon->pid;
  331. micp->sz += 3; /* need one 2-tuple */
  332. } else if (is_external_pid(mon->pid)) { /* external by pid */
  333. micp->mi[micp->mi_i].entity = mon->pid;
  334. micp->mi[micp->mi_i].node = NIL;
  335. micp->sz += NC_HEAP_SIZE(mon->pid);
  336. } else if (!is_nil(mon->name)) { /* internal by name */
  337. micp->mi[micp->mi_i].entity = mon->name;
  338. micp->mi[micp->mi_i].node = erts_this_dist_entry->sysname;
  339. micp->sz += 3; /* need one 2-tuple */
  340. } else { /* internal by pid */
  341. micp->mi[micp->mi_i].entity = mon->pid;
  342. micp->mi[micp->mi_i].node = NIL;
  343. /* no additional heap space needed */
  344. }
  345. micp->mi_i++;
  346. micp->sz += 2 + 3; /* For a cons cell and a 2-tuple */
  347. }
  348. static void collect_one_target_monitor(ErtsMonitor *mon, void *vmicp)
  349. {
  350. MonitorInfoCollection *micp = vmicp;
  351. if (mon->type != MON_TARGET) {
  352. return;
  353. }
  354. EXTEND_MONITOR_INFOS(micp);
  355. micp->mi[micp->mi_i].node = NIL;
  356. micp->mi[micp->mi_i].entity = mon->pid;
  357. micp->sz += (NC_HEAP_SIZE(mon->pid) + 2 /* cons */);
  358. micp->mi_i++;
  359. }
  360. typedef struct {
  361. Process *c_p;
  362. ErtsProcLocks c_p_locks;
  363. ErtsSuspendMonitor **smi;
  364. Uint smi_i;
  365. Uint smi_max;
  366. int sz;
  367. } ErtsSuspendMonitorInfoCollection;
  368. #define ERTS_INIT_SUSPEND_MONITOR_INFOS(SMIC, CP, CPL) do { \
  369. (SMIC).c_p = (CP); \
  370. (SMIC).c_p_locks = (CPL); \
  371. (SMIC).smi = NULL; \
  372. (SMIC).smi_i = (SMIC).smi_max = 0; \
  373. (SMIC).sz = 0; \
  374. } while(0)
  375. #define ERTS_SMI_INC 50
  376. #define ERTS_EXTEND_SUSPEND_MONITOR_INFOS(SMICP) \
  377. do { \
  378. if ((SMICP)->smi_i >= (SMICP)->smi_max) { \
  379. (SMICP)->smi = ((SMICP)->smi \
  380. ? erts_realloc(ERTS_ALC_T_TMP, \
  381. (SMICP)->smi, \
  382. ((SMICP)->smi_max \
  383. + ERTS_SMI_INC) \
  384. * sizeof(ErtsSuspendMonitor *)) \
  385. : erts_alloc(ERTS_ALC_T_TMP, \
  386. ERTS_SMI_INC \
  387. * sizeof(ErtsSuspendMonitor *))); \
  388. (SMICP)->smi_max += ERTS_SMI_INC; \
  389. } \
  390. } while (0)
  391. #define ERTS_DESTROY_SUSPEND_MONITOR_INFOS(SMIC) \
  392. do { \
  393. if ((SMIC).smi != NULL) { \
  394. erts_free(ERTS_ALC_T_TMP, (void *) (SMIC).smi); \
  395. } \
  396. } while (0)
  397. static void
  398. collect_one_suspend_monitor(ErtsSuspendMonitor *smon, void *vsmicp)
  399. {
  400. ErtsSuspendMonitorInfoCollection *smicp = vsmicp;
  401. Process *suspendee = erts_pid2proc(smicp->c_p,
  402. smicp->c_p_locks,
  403. smon->pid,
  404. 0);
  405. if (suspendee) { /* suspendee is alive */
  406. Sint a, p;
  407. if (smon->active) {
  408. smon->active += smon->pending;
  409. smon->pending = 0;
  410. }
  411. ASSERT((smon->active && !smon->pending)
  412. || (smon->pending && !smon->active));
  413. ERTS_EXTEND_SUSPEND_MONITOR_INFOS(smicp);
  414. smicp->smi[smicp->smi_i] = smon;
  415. smicp->sz += 2 /* cons */ + 4 /* 3-tuple */;
  416. a = (Sint) smon->active; /* quiet compiler warnings */
  417. p = (Sint) smon->pending; /* on 64-bit machines */
  418. if (!IS_SSMALL(a))
  419. smicp->sz += BIG_UINT_HEAP_SIZE;
  420. if (!IS_SSMALL(p))
  421. smicp->sz += BIG_UINT_HEAP_SIZE;
  422. smicp->smi_i++;
  423. }
  424. }
  425. static void one_link_size(ErtsLink *lnk, void *vpu)
  426. {
  427. Uint *pu = vpu;
  428. *pu += ERTS_LINK_SIZE*sizeof(Uint);
  429. if(!IS_CONST(lnk->pid))
  430. *pu += NC_HEAP_SIZE(lnk->pid)*sizeof(Uint);
  431. if (lnk->type != LINK_NODE && ERTS_LINK_ROOT(lnk) != NULL) {
  432. erts_doforall_links(ERTS_LINK_ROOT(lnk),&one_link_size,vpu);
  433. }
  434. }
  435. static void one_mon_size(ErtsMonitor *mon, void *vpu)
  436. {
  437. Uint *pu = vpu;
  438. *pu += ERTS_MONITOR_SIZE*sizeof(Uint);
  439. if(!IS_CONST(mon->pid))
  440. *pu += NC_HEAP_SIZE(mon->pid)*sizeof(Uint);
  441. if(!IS_CONST(mon->ref))
  442. *pu += NC_HEAP_SIZE(mon->ref)*sizeof(Uint);
  443. }
  444. /*
  445. * process_info/[1,2]
  446. */
  447. #define ERTS_PI_FAIL_TYPE_BADARG 0
  448. #define ERTS_PI_FAIL_TYPE_YIELD 1
  449. #define ERTS_PI_FAIL_TYPE_AWAIT_EXIT 2
  450. static ERTS_INLINE ErtsProcLocks
  451. pi_locks(Eterm info)
  452. {
  453. switch (info) {
  454. case am_status:
  455. case am_priority:
  456. return ERTS_PROC_LOCK_STATUS;
  457. case am_links:
  458. case am_monitors:
  459. case am_monitored_by:
  460. case am_suspending:
  461. return ERTS_PROC_LOCK_LINK;
  462. case am_messages:
  463. case am_message_queue_len:
  464. case am_total_heap_size:
  465. return ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_MSGQ;
  466. case am_memory:
  467. return ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_MSGQ;
  468. default:
  469. return ERTS_PROC_LOCK_MAIN;
  470. }
  471. }
  472. /*
  473. * All valid process_info arguments.
  474. */
  475. static Eterm pi_args[] = {
  476. am_registered_name,
  477. am_current_function,
  478. am_initial_call,
  479. am_status,
  480. am_messages,
  481. am_message_queue_len,
  482. am_links,
  483. am_monitors,
  484. am_monitored_by,
  485. am_dictionary,
  486. am_trap_exit,
  487. am_error_handler,
  488. am_heap_size,
  489. am_stack_size,
  490. am_memory,
  491. am_garbage_collection,
  492. am_group_leader,
  493. am_reductions,
  494. am_priority,
  495. am_trace,
  496. am_binary,
  497. am_sequential_trace_token,
  498. am_catchlevel,
  499. am_backtrace,
  500. am_last_calls,
  501. am_total_heap_size,
  502. am_suspending,
  503. am_min_heap_size,
  504. am_min_bin_vheap_size,
  505. #ifdef HYBRID
  506. am_message_binary
  507. #endif
  508. };
  509. #define ERTS_PI_ARGS ((int) (sizeof(pi_args)/sizeof(Eterm)))
  510. static ERTS_INLINE Eterm
  511. pi_ix2arg(int ix)
  512. {
  513. if (ix < 0 || ERTS_PI_ARGS <= ix)
  514. return am_undefined;
  515. return pi_args[ix];
  516. }
  517. static ERTS_INLINE int
  518. pi_arg2ix(Eterm arg)
  519. {
  520. switch (arg) {
  521. case am_registered_name: return 0;
  522. case am_current_function: return 1;
  523. case am_initial_call: return 2;
  524. case am_status: return 3;
  525. case am_messages: return 4;
  526. case am_message_queue_len: return 5;
  527. case am_links: return 6;
  528. case am_monitors: return 7;
  529. case am_monitored_by: return 8;
  530. case am_dictionary: return 9;
  531. case am_trap_exit: return 10;
  532. case am_error_handler: return 11;
  533. case am_heap_size: return 12;
  534. case am_stack_size: return 13;
  535. case am_memory: return 14;
  536. case am_garbage_collection: return 15;
  537. case am_group_leader: return 16;
  538. case am_reductions: return 17;
  539. case am_priority: return 18;
  540. case am_trace: return 19;
  541. case am_binary: return 20;
  542. case am_sequential_trace_token: return 21;
  543. case am_catchlevel: return 22;
  544. case am_backtrace: return 23;
  545. case am_last_calls: return 24;
  546. case am_total_heap_size: return 25;
  547. case am_suspending: return 26;
  548. case am_min_heap_size: return 27;
  549. case am_min_bin_vheap_size: return 28;
  550. #ifdef HYBRID
  551. case am_message_binary: return 29;
  552. #endif
  553. default: return -1;
  554. }
  555. }
  556. static Eterm pi_1_keys[] = {
  557. am_registered_name,
  558. am_current_function,
  559. am_initial_call,
  560. am_status,
  561. am_message_queue_len,
  562. am_messages,
  563. am_links,
  564. am_dictionary,
  565. am_trap_exit,
  566. am_error_handler,
  567. am_priority,
  568. am_group_leader,
  569. am_total_heap_size,
  570. am_heap_size,
  571. am_stack_size,
  572. am_reductions,
  573. am_garbage_collection,
  574. am_suspending
  575. };
  576. #define ERTS_PI_1_NO_OF_KEYS (sizeof(pi_1_keys)/sizeof(Eterm))
  577. static Eterm pi_1_keys_list;
  578. #if HEAP_ON_C_STACK
  579. static Eterm pi_1_keys_list_heap[2*ERTS_PI_1_NO_OF_KEYS];
  580. #endif
  581. static void
  582. process_info_init(void)
  583. {
  584. #if HEAP_ON_C_STACK
  585. Eterm *hp = &pi_1_keys_list_heap[0];
  586. #else
  587. Eterm *hp = erts_alloc(ERTS_ALC_T_LL_TEMP_TERM,sizeof(Eterm)*2*ERTS_PI_1_NO_OF_KEYS);
  588. #endif
  589. int i;
  590. pi_1_keys_list = NIL;
  591. for (i = ERTS_PI_1_NO_OF_KEYS-1; i >= 0; i--) {
  592. pi_1_keys_list = CONS(hp, pi_1_keys[i], pi_1_keys_list);
  593. hp += 2;
  594. }
  595. #ifdef DEBUG
  596. { /* Make sure the process_info argument mappings are consistent */
  597. int ix;
  598. for (ix = 0; ix < ERTS_PI_ARGS; ix++) {
  599. ASSERT(pi_arg2ix(pi_ix2arg(ix)) == ix);
  600. }
  601. }
  602. #endif
  603. }
  604. static ERTS_INLINE Process *
  605. pi_pid2proc(Process *c_p, Eterm pid, ErtsProcLocks info_locks)
  606. {
  607. #ifdef ERTS_SMP
  608. /*
  609. * If the main lock is needed, we use erts_pid2proc_not_running()
  610. * instead of erts_pid2proc() for two reasons:
  611. * * Current function of pid and possibly other information will
  612. * have been updated so that process_info() is consistent with an
  613. * info-request/info-response signal model.
  614. * * We avoid blocking the whole scheduler executing the
  615. * process that is calling process_info() for a long time
  616. * which will happen if pid is currently running.
  617. * The caller of process_info() may have to yield if pid
  618. * is currently running.
  619. */
  620. if (info_locks & ERTS_PROC_LOCK_MAIN)
  621. return erts_pid2proc_not_running(c_p, ERTS_PROC_LOCK_MAIN,
  622. pid, info_locks);
  623. else
  624. #endif
  625. return erts_pid2proc(c_p, ERTS_PROC_LOCK_MAIN,
  626. pid, info_locks);
  627. }
  628. BIF_RETTYPE
  629. process_info_aux(Process *BIF_P,
  630. Process *rp,
  631. Eterm rpid,
  632. Eterm item,
  633. int always_wrap);
  634. #define ERTS_PI_RES_ELEM_IX_BUF_INC 1024
  635. #define ERTS_PI_DEF_RES_ELEM_IX_BUF_SZ ERTS_PI_ARGS
  636. static Eterm
  637. process_info_list(Process *c_p, Eterm pid, Eterm list, int always_wrap,
  638. int *fail_type)
  639. {
  640. int want_messages = 0;
  641. int def_res_elem_ix_buf[ERTS_PI_DEF_RES_ELEM_IX_BUF_SZ];
  642. int *res_elem_ix = &def_res_elem_ix_buf[0];
  643. int res_elem_ix_ix = -1;
  644. int res_elem_ix_sz = ERTS_PI_DEF_RES_ELEM_IX_BUF_SZ;
  645. Eterm part_res[ERTS_PI_ARGS];
  646. Eterm res, arg;
  647. Uint *hp, *hp_end;
  648. ErtsProcLocks locks = (ErtsProcLocks) 0;
  649. int res_len, ix;
  650. Process *rp = NULL;
  651. *fail_type = ERTS_PI_FAIL_TYPE_BADARG;
  652. for (ix = 0; ix < ERTS_PI_ARGS; ix++)
  653. part_res[ix] = THE_NON_VALUE;
  654. ASSERT(is_list(list));
  655. while (is_list(list)) {
  656. Eterm* consp = list_val(list);
  657. arg = CAR(consp);
  658. ix = pi_arg2ix(arg);
  659. if (ix < 0) {
  660. res = THE_NON_VALUE;
  661. goto done;
  662. }
  663. if (arg == am_messages)
  664. want_messages = 1;
  665. locks |= pi_locks(arg);
  666. res_elem_ix_ix++;
  667. if (res_elem_ix_ix >= res_elem_ix_sz) {
  668. if (res_elem_ix != &def_res_elem_ix_buf[0])
  669. res_elem_ix =
  670. erts_realloc(ERTS_ALC_T_TMP,
  671. res_elem_ix,
  672. sizeof(int)*(res_elem_ix_sz
  673. += ERTS_PI_RES_ELEM_IX_BUF_INC));
  674. else {
  675. int new_res_elem_ix_sz = ERTS_PI_RES_ELEM_IX_BUF_INC;
  676. int *new_res_elem_ix = erts_alloc(ERTS_ALC_T_TMP,
  677. sizeof(int)*new_res_elem_ix_sz);
  678. sys_memcpy((void *) new_res_elem_ix,
  679. (void *) res_elem_ix,
  680. sizeof(int)*res_elem_ix_sz);
  681. res_elem_ix = new_res_elem_ix;
  682. res_elem_ix_sz = new_res_elem_ix_sz;
  683. }
  684. }
  685. res_elem_ix[res_elem_ix_ix] = ix;
  686. list = CDR(consp);
  687. }
  688. if (is_not_nil(list)) {
  689. res = THE_NON_VALUE;
  690. goto done;
  691. }
  692. res_len = res_elem_ix_ix+1;
  693. ASSERT(res_len > 0);
  694. rp = pi_pid2proc(c_p, pid, locks|ERTS_PROC_LOCK_STATUS);
  695. if (!rp) {
  696. res = am_undefined;
  697. goto done;
  698. }
  699. else if (rp == ERTS_PROC_LOCK_BUSY) {
  700. rp = NULL;
  701. res = THE_NON_VALUE;
  702. *fail_type = ERTS_PI_FAIL_TYPE_YIELD;
  703. goto done;
  704. }
  705. else if (c_p != rp && ERTS_PROC_PENDING_EXIT(rp)) {
  706. locks |= ERTS_PROC_LOCK_STATUS;
  707. res = THE_NON_VALUE;
  708. *fail_type = ERTS_PI_FAIL_TYPE_AWAIT_EXIT;
  709. goto done;
  710. }
  711. else if (!(locks & ERTS_PROC_LOCK_STATUS)) {
  712. erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
  713. }
  714. /*
  715. * We always handle 'messages' first if it should be part
  716. * of the result. This since if both 'messages' and
  717. * 'message_queue_len' are wanted, 'messages' may
  718. * change the result of 'message_queue_len' (in case
  719. * the queue contain bad distribution messages).
  720. */
  721. if (want_messages) {
  722. ix = pi_arg2ix(am_messages);
  723. ASSERT(part_res[ix] == THE_NON_VALUE);
  724. part_res[ix] = process_info_aux(c_p, rp, pid, am_messages, always_wrap);
  725. ASSERT(part_res[ix] != THE_NON_VALUE);
  726. }
  727. for (; res_elem_ix_ix >= 0; res_elem_ix_ix--) {
  728. ix = res_elem_ix[res_elem_ix_ix];
  729. if (part_res[ix] == THE_NON_VALUE) {
  730. arg = pi_ix2arg(ix);
  731. part_res[ix] = process_info_aux(c_p, rp, pid, arg, always_wrap);
  732. ASSERT(part_res[ix] != THE_NON_VALUE);
  733. }
  734. }
  735. hp = HAlloc(c_p, res_len*2);
  736. hp_end = hp + res_len*2;
  737. res = NIL;
  738. for (res_elem_ix_ix = res_len - 1; res_elem_ix_ix >= 0; res_elem_ix_ix--) {
  739. ix = res_elem_ix[res_elem_ix_ix];
  740. ASSERT(part_res[ix] != THE_NON_VALUE);
  741. /*
  742. * If we should ignore the value of registered_name,
  743. * its value is nil. For more info, see comment in the
  744. * beginning of process_info_aux().
  745. */
  746. if (is_nil(part_res[ix])) {
  747. ASSERT(!always_wrap);
  748. ASSERT(pi_ix2arg(ix) == am_registered_name);
  749. }
  750. else {
  751. res = CONS(hp, part_res[ix], res);
  752. hp += 2;
  753. }
  754. }
  755. if (!always_wrap) {
  756. HRelease(c_p, hp_end, hp);
  757. }
  758. done:
  759. if (c_p == rp)
  760. locks &= ~ERTS_PROC_LOCK_MAIN;
  761. if (locks && rp)
  762. erts_smp_proc_unlock(rp, locks);
  763. if (res_elem_ix != &def_res_elem_ix_buf[0])
  764. erts_free(ERTS_ALC_T_TMP, res_elem_ix);
  765. return res;
  766. }
  767. BIF_RETTYPE process_info_1(BIF_ALIST_1)
  768. {
  769. Eterm res;
  770. int fail_type;
  771. if (is_external_pid(BIF_ARG_1)
  772. && external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry)
  773. BIF_RET(am_undefined);
  774. if (is_not_internal_pid(BIF_ARG_1)
  775. || internal_pid_index(BIF_ARG_1) >= erts_max_processes) {
  776. BIF_ERROR(BIF_P, BADARG);
  777. }
  778. res = process_info_list(BIF_P, BIF_ARG_1, pi_1_keys_list, 0, &fail_type);
  779. if (is_non_value(res)) {
  780. switch (fail_type) {
  781. case ERTS_PI_FAIL_TYPE_BADARG:
  782. BIF_ERROR(BIF_P, BADARG);
  783. case ERTS_PI_FAIL_TYPE_YIELD:
  784. ERTS_BIF_YIELD1(bif_export[BIF_process_info_1], BIF_P, BIF_ARG_1);
  785. case ERTS_PI_FAIL_TYPE_AWAIT_EXIT:
  786. ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_undefined);
  787. default:
  788. erl_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error", __FILE__, __LINE__);
  789. }
  790. }
  791. ASSERT(!(BIF_P->flags & F_P2PNR_RESCHED));
  792. BIF_RET(res);
  793. }
  794. BIF_RETTYPE process_info_2(BIF_ALIST_2)
  795. {
  796. Eterm res;
  797. Process *rp;
  798. Eterm pid = BIF_ARG_1;
  799. ErtsProcLocks info_locks;
  800. int fail_type;
  801. if (is_external_pid(pid)
  802. && external_pid_dist_entry(pid) == erts_this_dist_entry)
  803. BIF_RET(am_undefined);
  804. if (is_not_internal_pid(pid)
  805. || internal_pid_index(BIF_ARG_1) >= erts_max_processes) {
  806. BIF_ERROR(BIF_P, BADARG);
  807. }
  808. if (is_nil(BIF_ARG_2))
  809. BIF_RET(NIL);
  810. if (is_list(BIF_ARG_2)) {
  811. res = process_info_list(BIF_P, BIF_ARG_1, BIF_ARG_2, 1, &fail_type);
  812. if (is_non_value(res)) {
  813. switch (fail_type) {
  814. case ERTS_PI_FAIL_TYPE_BADARG:
  815. BIF_ERROR(BIF_P, BADARG);
  816. case ERTS_PI_FAIL_TYPE_YIELD:
  817. ERTS_BIF_YIELD2(bif_export[BIF_process_info_2], BIF_P,
  818. BIF_ARG_1, BIF_ARG_2);
  819. case ERTS_PI_FAIL_TYPE_AWAIT_EXIT:
  820. ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_undefined);
  821. default:
  822. erl_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error",
  823. __FILE__, __LINE__);
  824. }
  825. }
  826. ASSERT(!(BIF_P->flags & F_P2PNR_RESCHED));
  827. BIF_RET(res);
  828. }
  829. if (pi_arg2ix(BIF_ARG_2) < 0)
  830. BIF_ERROR(BIF_P, BADARG);
  831. info_locks = pi_locks(BIF_ARG_2);
  832. rp = pi_pid2proc(BIF_P, pid, info_locks|ERTS_PROC_LOCK_STATUS);
  833. if (!rp)
  834. res = am_undefined;
  835. else if (rp == ERTS_PROC_LOCK_BUSY)
  836. ERTS_BIF_YIELD2(bif_export[BIF_process_info_2], BIF_P,
  837. BIF_ARG_1, BIF_ARG_2);
  838. else if (rp != BIF_P && ERTS_PROC_PENDING_EXIT(rp)) {
  839. erts_smp_proc_unlock(rp, info_locks|ERTS_PROC_LOCK_STATUS);
  840. ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_undefined);
  841. }
  842. else {
  843. if (!(info_locks & ERTS_PROC_LOCK_STATUS))
  844. erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
  845. res = process_info_aux(BIF_P, rp, pid, BIF_ARG_2, 0);
  846. }
  847. ASSERT(is_value(res));
  848. #ifdef ERTS_SMP
  849. if (BIF_P == rp)
  850. info_locks &= ~ERTS_PROC_LOCK_MAIN;
  851. if (rp && info_locks)
  852. erts_smp_proc_unlock(rp, info_locks);
  853. #endif
  854. ASSERT(!(BIF_P->flags & F_P2PNR_RESCHED));
  855. BIF_RET(res);
  856. }
  857. Eterm
  858. process_info_aux(Process *BIF_P,
  859. Process *rp,
  860. Eterm rpid,
  861. Eterm item,
  862. int always_wrap)
  863. {
  864. Eterm *hp;
  865. Eterm res = NIL;
  866. ASSERT(rp);
  867. /*
  868. * Q: Why this always_wrap argument?
  869. *
  870. * A: registered_name is strange. If process has no registered name,
  871. * process_info(Pid, registered_name) returns [], and
  872. * the result of process_info(Pid) has no {registered_name, Name}
  873. * tuple in the resulting list. This is inconsistent with all other
  874. * options, but we do not dare to change it.
  875. *
  876. * When process_info/2 is called with a list as second argument,
  877. * registered_name behaves as it should, i.e. a
  878. * {registered_name, []} will appear in the resulting list.
  879. *
  880. * If always_wrap != 0, process_info_aux() always wrap the result
  881. * in a key two tuple.
  882. */
  883. switch (item) {
  884. case am_registered_name:
  885. if (rp->reg != NULL) {
  886. hp = HAlloc(BIF_P, 3);
  887. res = rp->reg->name;
  888. } else {
  889. if (always_wrap) {
  890. hp = HAlloc(BIF_P, 3);
  891. res = NIL;
  892. }
  893. else {
  894. return NIL;
  895. }
  896. }
  897. break;
  898. case am_current_function:
  899. if (rp->current == NULL) {
  900. rp->current = find_function_from_pc(rp->i);
  901. }
  902. if (rp->current == NULL) {
  903. hp = HAlloc(BIF_P, 3);
  904. res = am_undefined;
  905. } else {
  906. BeamInstr* current;
  907. if (rp->current[0] == am_erlang &&
  908. rp->current[1] == am_process_info &&
  909. (rp->current[2] == 1 || rp->current[2] == 2) &&
  910. (current = find_function_from_pc(rp->cp)) != NULL) {
  911. /*
  912. * The current function is erlang:process_info/2,
  913. * which is not the answer that the application want.
  914. * We will use the function pointed into by rp->cp
  915. * instead.
  916. */
  917. rp->current = current;
  918. }
  919. hp = HAlloc(BIF_P, 3+4);
  920. res = TUPLE3(hp, rp->current[0],
  921. rp->current[1], make_small(rp->current[2]));
  922. hp += 4;
  923. }
  924. break;
  925. case am_initial_call:
  926. hp = HAlloc(BIF_P, 3+4);
  927. res = TUPLE3(hp,
  928. rp->initial[INITIAL_MOD],
  929. rp->initial[INITIAL_FUN],
  930. make_small(rp->initial[INITIAL_ARI]));
  931. hp += 4;
  932. break;
  933. case am_status:
  934. res = erts_process_status(BIF_P, ERTS_PROC_LOCK_MAIN, rp, rpid);
  935. ASSERT(res != am_undefined);
  936. hp = HAlloc(BIF_P, 3);
  937. break;
  938. case am_messages: {
  939. ErlMessage* mp;
  940. int n;
  941. ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
  942. n = rp->msg.len;
  943. if (n == 0 || rp->trace_flags & F_SENSITIVE) {
  944. hp = HAlloc(BIF_P, 3);
  945. } else {
  946. int remove_bad_messages = 0;
  947. struct {
  948. Uint copy_struct_size;
  949. ErlMessage* msgp;
  950. } *mq = erts_alloc(ERTS_ALC_T_TMP, n*sizeof(*mq));
  951. Sint i = 0;
  952. Uint heap_need = 3;
  953. Eterm *hp_end;
  954. for (mp = rp->msg.first; mp; mp = mp->next) {
  955. heap_need += 2;
  956. mq[i].msgp = mp;
  957. if (rp != BIF_P) {
  958. Eterm msg = ERL_MESSAGE_TERM(mq[i].msgp);
  959. if (is_value(msg)) {
  960. mq[i].copy_struct_size = (is_immed(msg)
  961. #ifdef HYBRID
  962. || NO_COPY(msg)
  963. #endif
  964. ? 0
  965. : size_object(msg));
  966. }
  967. else if (mq[i].msgp->data.attached) {
  968. mq[i].copy_struct_size
  969. = erts_msg_attached_data_size(mq[i].msgp);
  970. }
  971. else {
  972. /* Bad distribution message; ignore */
  973. remove_bad_messages = 1;
  974. mq[i].copy_struct_size = 0;
  975. }
  976. heap_need += mq[i].copy_struct_size;
  977. }
  978. else {
  979. mq[i].copy_struct_size = 0;
  980. if (mp->data.attached)
  981. heap_need += erts_msg_attached_data_size(mp);
  982. }
  983. i++;
  984. }
  985. hp = HAlloc(BIF_P, heap_need);
  986. hp_end = hp + heap_need;
  987. ASSERT(i == n);
  988. for (i--; i >= 0; i--) {
  989. Eterm msg = ERL_MESSAGE_TERM(mq[i].msgp);
  990. if (rp != BIF_P) {
  991. if (is_value(msg)) {
  992. if (mq[i].copy_struct_size)
  993. msg = copy_struct(msg,
  994. mq[i].copy_struct_size,
  995. &hp,
  996. &MSO(BIF_P));
  997. }
  998. else if (mq[i].msgp->data.attached) {
  999. ErlHeapFragment *hfp;
  1000. /*
  1001. * Decode it into a message buffer and attach it
  1002. * to the message instead of the attached external
  1003. * term.
  1004. *
  1005. * Note that we may not pass a process pointer
  1006. * to erts_msg_distext2heap(), since it would then
  1007. * try to alter locks on that process.
  1008. */
  1009. msg = erts_msg_distext2heap(
  1010. NULL, NULL, &hfp, &ERL_MESSAGE_TOKEN(mq[i].msgp),
  1011. mq[i].msgp->data.dist_ext);
  1012. ERL_MESSAGE_TERM(mq[i].msgp) = msg;
  1013. mq[i].msgp->data.heap_frag = hfp;
  1014. if (is_non_value(msg)) {
  1015. ASSERT(!mq[i].msgp->data.heap_frag);
  1016. /* Bad distribution message; ignore */
  1017. remove_bad_messages = 1;
  1018. continue;
  1019. }
  1020. else {
  1021. /* Make our copy of the message */
  1022. ASSERT(size_object(msg) == hfp->used_size);
  1023. msg = copy_struct(msg,
  1024. hfp->used_size,
  1025. &hp,
  1026. &MSO(BIF_P));
  1027. }
  1028. }
  1029. else {
  1030. /* Bad distribution message; ignore */
  1031. remove_bad_messages = 1;
  1032. continue;
  1033. }
  1034. }
  1035. else {
  1036. if (mq[i].msgp->data.attached) {
  1037. /* Decode it on the heap */
  1038. erts_move_msg_attached_data_to_heap(&hp,
  1039. &MSO(BIF_P),
  1040. mq[i].msgp);
  1041. msg = ERL_MESSAGE_TERM(mq[i].msgp);
  1042. ASSERT(!mq[i].msgp->data.attached);
  1043. if (is_non_value(msg)) {
  1044. /* Bad distribution message; ignore */
  1045. remove_bad_messages = 1;
  1046. continue;
  1047. }
  1048. }
  1049. }
  1050. res = CONS(hp, msg, res);
  1051. hp += 2;
  1052. }
  1053. HRelease(BIF_P, hp_end, hp+3);
  1054. erts_free(ERTS_ALC_T_TMP, mq);
  1055. if (remove_bad_messages) {
  1056. ErlMessage **mpp;
  1057. /*
  1058. * We need to remove bad distribution messages from
  1059. * the queue, so that the value returned for
  1060. * 'message_queue_len' is consistent with the value
  1061. * returned for 'messages'.
  1062. */
  1063. mpp = &rp->msg.first;
  1064. mp = rp->msg.first;
  1065. while (mp) {
  1066. if (is_value(ERL_MESSAGE_TERM(mp))) {
  1067. mpp = &mp->next;
  1068. mp = mp->next;
  1069. }
  1070. else {
  1071. ErlMessage* bad_mp = mp;
  1072. ASSERT(!mp->data.attached);
  1073. if (rp->msg.save == &mp->next)
  1074. rp->msg.save = mpp;
  1075. if (rp->msg.last == &mp->next)
  1076. rp->msg.last = mpp;
  1077. *mpp = mp->next;
  1078. mp = mp->next;
  1079. rp->msg.len--;
  1080. free_message(bad_mp);
  1081. }
  1082. }
  1083. }
  1084. }
  1085. break;
  1086. }
  1087. case am_message_queue_len:
  1088. hp = HAlloc(BIF_P, 3);
  1089. ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
  1090. res = make_small(rp->msg.len);
  1091. break;
  1092. case am_links: {
  1093. MonitorInfoCollection mic;
  1094. int i;
  1095. Eterm item;
  1096. INIT_MONITOR_INFOS(mic);
  1097. erts_doforall_links(rp->nlinks,&collect_one_link,&mic);
  1098. hp = HAlloc(BIF_P, 3 + mic.sz);
  1099. res = NIL;
  1100. for (i = 0; i < mic.mi_i; i++) {
  1101. item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
  1102. res = CONS(hp, item, res);
  1103. hp += 2;
  1104. }
  1105. DESTROY_MONITOR_INFOS(mic);
  1106. break;
  1107. }
  1108. case am_monitors: {
  1109. MonitorInfoCollection mic;
  1110. int i;
  1111. INIT_MONITOR_INFOS(mic);
  1112. erts_doforall_monitors(rp->monitors,&collect_one_origin_monitor,&mic);
  1113. hp = HAlloc(BIF_P, 3 + mic.sz);
  1114. res = NIL;
  1115. for (i = 0; i < mic.mi_i; i++) {
  1116. if (is_atom(mic.mi[i].entity)) {
  1117. /* Monitor by name.
  1118. * Build {process, {Name, Node}} and cons it.
  1119. */
  1120. Eterm t1, t2;
  1121. t1 = TUPLE2(hp, mic.mi[i].entity, mic.mi[i].node);
  1122. hp += 3;
  1123. t2 = TUPLE2(hp, am_process, t1);
  1124. hp += 3;
  1125. res = CONS(hp, t2, res);
  1126. hp += 2;
  1127. }
  1128. else {
  1129. /* Monitor by pid. Build {process, Pid} and cons it. */
  1130. Eterm t;
  1131. Eterm pid = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
  1132. t = TUPLE2(hp, am_process, pid);
  1133. hp += 3;
  1134. res = CONS(hp, t, res);
  1135. hp += 2;
  1136. }
  1137. }
  1138. DESTROY_MONITOR_INFOS(mic);
  1139. break;
  1140. }
  1141. case am_monitored_by: {
  1142. MonitorInfoCollection mic;
  1143. int i;
  1144. Eterm item;
  1145. INIT_MONITOR_INFOS(mic);
  1146. erts_doforall_monitors(rp->monitors,&collect_one_target_monitor,&mic);
  1147. hp = HAlloc(BIF_P, 3 + mic.sz);
  1148. res = NIL;
  1149. for (i = 0; i < mic.mi_i; ++i) {
  1150. item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
  1151. res = CONS(hp, item, res);
  1152. hp += 2;
  1153. }
  1154. DESTROY_MONITOR_INFOS(mic);
  1155. break;
  1156. }
  1157. case am_suspending: {
  1158. ErtsSuspendMonitorInfoCollection smic;
  1159. int i;
  1160. Eterm item;
  1161. #ifdef DEBUG
  1162. Eterm *hp_end;
  1163. #endif
  1164. ERTS_INIT_SUSPEND_MONITOR_INFOS(smic,
  1165. BIF_P,
  1166. (BIF_P == rp
  1167. ? ERTS_PROC_LOCK_MAIN
  1168. : 0) | ERTS_PROC_LOCK_LINK);
  1169. erts_doforall_suspend_monitors(rp->suspend_monitors,
  1170. &collect_one_suspend_monitor,
  1171. &smic);
  1172. hp = HAlloc(BIF_P, 3 + smic.sz);
  1173. #ifdef DEBUG
  1174. hp_end = hp + smic.sz;
  1175. #endif
  1176. res = NIL;
  1177. for (i = 0; i < smic.smi_i; i++) {
  1178. Sint a = (Sint) smic.smi[i]->active; /* quiet compiler warnings */
  1179. Sint p = (Sint) smic.smi[i]->pending; /* on 64-bit machines... */
  1180. Eterm active;
  1181. Eterm pending;
  1182. if (IS_SSMALL(a))
  1183. active = make_small(a);
  1184. else {
  1185. active = small_to_big(a, hp);
  1186. hp += BIG_UINT_HEAP_SIZE;
  1187. }
  1188. if (IS_SSMALL(p))
  1189. pending = make_small(p);
  1190. else {
  1191. pending = small_to_big(p, hp);
  1192. hp += BIG_UINT_HEAP_SIZE;
  1193. }
  1194. item = TUPLE3(hp, smic.smi[i]->pid, active, pending);
  1195. hp += 4;
  1196. res = CONS(hp, item, res);
  1197. hp += 2;
  1198. }
  1199. ERTS_DESTROY_SUSPEND_MONITOR_INFOS(smic);
  1200. ASSERT(hp == hp_end);
  1201. break;
  1202. }
  1203. case am_dictionary:
  1204. if (rp->trace_flags & F_SENSITIVE) {
  1205. res = NIL;
  1206. } else {
  1207. res = erts_dictionary_copy(BIF_P, rp->dictionary);
  1208. }
  1209. hp = HAlloc(BIF_P, 3);
  1210. break;
  1211. case am_trap_exit:
  1212. hp = HAlloc(BIF_P, 3);
  1213. if (rp->flags & F_TRAPEXIT)
  1214. res = am_true;
  1215. else
  1216. res = am_false;
  1217. break;
  1218. case am_error_handler:
  1219. hp = HAlloc(BIF_P, 3);
  1220. res = erts_proc_get_error_handler(BIF_P);
  1221. break;
  1222. case am_heap_size: {
  1223. Uint hsz = 3;
  1224. (void) erts_bld_uint(NULL, &hsz, HEAP_SIZE(rp));
  1225. hp = HAlloc(BIF_P, hsz);
  1226. res = erts_bld_uint(&hp, NULL, HEAP_SIZE(rp));
  1227. break;
  1228. }
  1229. case am_fullsweep_after: {
  1230. Uint hsz = 3;
  1231. (void) erts_bld_uint(NULL, &hsz, MAX_GEN_GCS(rp));
  1232. hp = HAlloc(BIF_P, hsz);
  1233. res = erts_bld_uint(&hp, NULL, MAX_GEN_GCS(rp));
  1234. break;
  1235. }
  1236. case am_min_heap_size: {
  1237. Uint hsz = 3;
  1238. (void) erts_bld_uint(NULL, &hsz, MIN_HEAP_SIZE(rp));
  1239. hp = HAlloc(BIF_P, hsz);
  1240. res = erts_bld_uint(&hp, NULL, MIN_HEAP_SIZE(rp));
  1241. break;
  1242. }
  1243. case am_min_bin_vheap_size: {
  1244. Uint hsz = 3;
  1245. (void) erts_bld_uint(NULL, &hsz, MIN_VHEAP_SIZE(rp));
  1246. hp = HAlloc(BIF_P, hsz);
  1247. res = erts_bld_uint(&hp, NULL, MIN_VHEAP_SIZE(rp));
  1248. break;
  1249. }
  1250. case am_total_heap_size: {
  1251. ErlMessage *mp;
  1252. Uint total_heap_size;
  1253. Uint hsz = 3;
  1254. total_heap_size = rp->heap_sz;
  1255. if (rp->old_hend && rp->old_heap)
  1256. total_heap_size += rp->old_hend - rp->old_heap;
  1257. total_heap_size += rp->mbuf_sz;
  1258. ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
  1259. for (mp = rp->msg.first; mp; mp = mp->next)
  1260. if (mp->data.attached)
  1261. total_heap_size += erts_msg_attached_data_size(mp);
  1262. (void) erts_bld_uint(NULL, &hsz, total_heap_size);
  1263. hp = HAlloc(BIF_P, hsz);
  1264. res = erts_bld_uint(&hp, NULL, total_heap_size);
  1265. break;
  1266. }
  1267. case am_stack_size: {
  1268. Uint stack_size = STACK_START(rp) - rp->stop;
  1269. Uint hsz = 3;
  1270. (void) erts_bld_uint(NULL, &hsz, stack_size);
  1271. hp = HAlloc(BIF_P, hsz);
  1272. res = erts_bld_uint(&hp, NULL, stack_size);
  1273. break;
  1274. }
  1275. case am_memory: { /* Memory consumed in bytes */
  1276. ErlMessage *mp;
  1277. Uint size = 0;
  1278. Uint hsz = 3;
  1279. struct saved_calls *scb;
  1280. size += sizeof(Process);
  1281. ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
  1282. erts_doforall_links(rp->nlinks, &one_link_size, &size);
  1283. erts_doforall_monitors(rp->monitors, &one_mon_size, &size);
  1284. size += (rp->heap_sz + rp->mbuf_sz) * sizeof(Eterm);
  1285. if (rp->old_hend && rp->old_heap)
  1286. size += (rp->old_hend - rp->old_heap) * sizeof(Eterm);
  1287. size += rp->msg.len * sizeof(ErlMessage);
  1288. for (mp = rp->msg.first; mp; mp = mp->next)
  1289. if (mp->data.attached)
  1290. size += erts_msg_attached_data_size(mp)*sizeof(Eterm);
  1291. if (rp->arg_reg != rp->def_arg_reg) {
  1292. size += rp->arity * sizeof(rp->arg_reg[0]);
  1293. }
  1294. if (rp->psd)
  1295. size += sizeof(ErtsPSD);
  1296. scb = ERTS_PROC_GET_SAVED_CALLS_BUF(rp);
  1297. if (scb) {
  1298. size += (sizeof(struct saved_calls)
  1299. + (scb->len-1) * sizeof(scb->ct[0]));
  1300. }
  1301. size += erts_dicts_mem_size(rp);
  1302. (void) erts_bld_uint(NULL, &hsz, size);
  1303. hp = HAlloc(BIF_P, hsz);
  1304. res = erts_bld_uint(&hp, NULL, size);
  1305. break;
  1306. }
  1307. case am_garbage_collection: {
  1308. DECL_AM(minor_gcs);
  1309. Eterm t;
  1310. hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2 + 3+2 + 3); /* last "3" is for outside tuple */
  1311. t = TUPLE2(hp, AM_minor_gcs, make_small(GEN_GCS(rp))); hp += 3;
  1312. res = CONS(hp, t, NIL); hp += 2;
  1313. t = TUPLE2(hp, am_fullsweep_after, make_small(MAX_GEN_GCS(rp))); hp += 3;
  1314. res = CONS(hp, t, res); hp += 2;
  1315. t = TUPLE2(hp, am_min_heap_size, make_small(MIN_HEAP_SIZE(rp))); hp += 3;
  1316. res = CONS(hp, t, res); hp += 2;
  1317. t = TUPLE2(hp, am_min_bin_vheap_size, make_small(MIN_VHEAP_SIZE(rp))); hp += 3;
  1318. res = CONS(hp, t, res); hp += 2;
  1319. break;
  1320. }
  1321. case am_group_leader: {
  1322. int sz = NC_HEAP_SIZE(rp->group_leader);
  1323. hp = HAlloc(BIF_P, 3 + sz);
  1324. res = STORE_NC(&hp, &MSO(BIF_P), rp->group_leader);
  1325. break;
  1326. }
  1327. case am_reductions: {
  1328. Uint reds = rp->reds + erts_current_reductions(BIF_P, rp);
  1329. Uint hsz = 3;
  1330. (void) erts_bld_uint(NULL, &hsz, reds);
  1331. hp = HAlloc(BIF_P, hsz);
  1332. res = erts_bld_uint(&hp, NULL, reds);
  1333. break;
  1334. }
  1335. case am_priority:
  1336. hp = HAlloc(BIF_P, 3);
  1337. res = erts_get_process_priority(rp);
  1338. break;
  1339. case am_trace:
  1340. hp = HAlloc(BIF_P, 3);
  1341. res = make_small(rp->trace_flags & TRACEE_FLAGS);
  1342. break;
  1343. case am_binary: {
  1344. Uint sz = 3;
  1345. (void) bld_bin_list(NULL, &sz, &MSO(rp));
  1346. hp = HAlloc(BIF_P, sz);
  1347. res = bld_bin_list(&hp, NULL, &MSO(rp));
  1348. break;
  1349. }
  1350. #ifdef HYBRID
  1351. case am_message_binary: {
  1352. Uint sz = 3;
  1353. (void) bld_bin_list(NULL, &sz, erts_global_offheap.mso);
  1354. hp = HAlloc(BIF_P, sz);
  1355. res = bld_bin_list(&hp, NULL, erts_global_offheap.mso);
  1356. break;
  1357. }
  1358. #endif
  1359. case am_sequential_trace_token:
  1360. res = copy_object(rp->seq_trace_token, BIF_P);
  1361. hp = HAlloc(BIF_P, 3);
  1362. break;
  1363. case am_catchlevel:
  1364. hp = HAlloc(BIF_P, 3);
  1365. res = make_small(catchlevel(BIF_P));
  1366. break;
  1367. case am_backtrace: {
  1368. erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0);
  1369. erts_stack_dump(ERTS_PRINT_DSBUF, (void *) dsbufp, rp);
  1370. res = new_binary(BIF_P, (byte *) dsbufp->str, (int) dsbufp->str_len);
  1371. erts_destroy_tmp_dsbuf(dsbufp);
  1372. hp = HAlloc(BIF_P, 3);
  1373. break;
  1374. }
  1375. case am_last_calls: {
  1376. struct saved_calls *scb = ERTS_PROC_GET_SAVED_CALLS_BUF(BIF_P);
  1377. if (!scb) {
  1378. hp = HAlloc(BIF_P, 3);
  1379. res = am_false;
  1380. } else {
  1381. /*
  1382. * One cons cell and a 3-struct, and a 2-tuple.
  1383. * Might be less than that, if there are sends, receives or timeouts,
  1384. * so we must do a HRelease() to avoid creating holes.
  1385. */
  1386. Uint needed = scb->n*(2+4) + 3;
  1387. Eterm* limit;
  1388. Eterm term, list;
  1389. int i, j;
  1390. hp = HAlloc(BIF_P, needed);
  1391. limit = hp + needed;
  1392. list = NIL;
  1393. for (i = 0; i < scb->n; i++) {
  1394. j = scb->cur - i - 1;
  1395. if (j < 0)
  1396. j += scb->len;
  1397. if (scb->ct[j] == &exp_send)
  1398. term = am_send;
  1399. else if (scb->ct[j] == &exp_receive)
  1400. term = am_receive;
  1401. else if (scb->ct[j] == &exp_timeout)
  1402. term = am_timeout;
  1403. else {
  1404. term = TUPLE3(hp,
  1405. scb->ct[j]->code[0],
  1406. scb->ct[j]->code[1],
  1407. make_small(scb->ct[j]->code[2]));
  1408. hp += 4;
  1409. }
  1410. list = CONS(hp, term, list);
  1411. hp += 2;
  1412. }
  1413. res = list;
  1414. res = TUPLE2(hp, item, res);
  1415. hp += 3;
  1416. HRelease(BIF_P,limit,hp);
  1417. return res;
  1418. }
  1419. break;
  1420. }
  1421. default:
  1422. return THE_NON_VALUE; /* will produce badarg */
  1423. }
  1424. return TUPLE2(hp, item, res);
  1425. }
  1426. #undef MI_INC
  1427. #if defined(VALGRIND)
  1428. static int check_if_xml(void)
  1429. {
  1430. char buf[1];
  1431. size_t bufsz = sizeof(buf);
  1432. return erts_sys_getenv("VALGRIND_LOG_XML", buf, &bufsz) >= 0;
  1433. }
  1434. #else
  1435. #define check_if_xml() 0
  1436. #endif
  1437. /*
  1438. * This function takes care of calls to erlang:system_info/1 when the argument
  1439. * is a tuple.
  1440. */
  1441. static BIF_RETTYPE
  1442. info_1_tuple(Process* BIF_P, /* Pointer to current process. */
  1443. Eterm* tp, /* Pointer to first element in tuple */
  1444. int arity) /* Arity of tuple (untagged). */
  1445. {
  1446. Eterm ret;
  1447. Eterm sel;
  1448. sel = *tp++;
  1449. if (sel == am_allocator_sizes && arity == 2) {
  1450. return erts_allocator_info_term(BIF_P, *tp, 1);
  1451. } else if (sel == am_wordsize && arity == 2) {
  1452. if (tp[0] == am_internal) {
  1453. return make_small(sizeof(Eterm));
  1454. }
  1455. if (tp[0] == am_external) {
  1456. return make_small(sizeof(UWord));
  1457. }
  1458. goto badarg;
  1459. } else if (sel == am_allocated) {
  1460. if (arity == 2) {
  1461. Eterm res = THE_NON_VALUE;
  1462. char *buf;
  1463. int len = is_string(*tp);
  1464. if (len <= 0)
  1465. return res;
  1466. buf = (char *) erts_alloc(ERTS_ALC_T_TMP, len+1);
  1467. if (intlist_to_buf(*tp, buf, len) != len)
  1468. erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
  1469. buf[len] = '\0';
  1470. res = erts_instr_dump_memory_map(buf) ? am_true : am_false;
  1471. erts_free(ERTS_ALC_T_TMP, (void *) buf);
  1472. if (is_non_value(res))
  1473. goto badarg;
  1474. return res;
  1475. }
  1476. else if (arity == 3 && tp[0] == am_status) {
  1477. if (is_atom(tp[1]))
  1478. return erts_instr_get_stat(BIF_P, tp[1], 1);
  1479. else {
  1480. Eterm res = THE_NON_VALUE;
  1481. char *buf;
  1482. int len = is_string(tp[1]);
  1483. if (len <= 0)
  1484. return res;
  1485. buf = (char *) erts_alloc(ERTS_ALC_T_TMP, len+1);
  1486. if (intlist_to_buf(tp[1], buf, len) != len)
  1487. erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
  1488. buf[len] = '\0';
  1489. res = erts_instr_dump_stat(buf, 1) ? am_true : am_false;
  1490. erts_free(ERTS_ALC_T_TMP, (void *) buf);
  1491. if (is_non_value(res))
  1492. goto badarg;
  1493. return res;
  1494. }
  1495. }
  1496. else
  1497. goto badarg;
  1498. } else if (sel == am_allocator && arity == 2) {
  1499. return erts_allocator_info_term(BIF_P, *tp, 0);
  1500. } else if (ERTS_IS_ATOM_STR("internal_cpu_topology", sel) && arity == 2) {
  1501. return erts_get_cpu_topology_term(BIF_P, *tp);
  1502. } else if (ERTS_IS_ATOM_STR("cpu_topology", sel) && arity == 2) {
  1503. Eterm res = erts_get_cpu_topology_term(BIF_P, *tp);
  1504. if (res == THE_NON_VALUE)
  1505. goto badarg;
  1506. ERTS_BIF_PREP_TRAP1(ret, erts_format_cpu_topology_trap, BIF_P, res);
  1507. return ret;
  1508. #if defined(PURIFY) || defined(VALGRIND)
  1509. } else if (ERTS_IS_ATOM_STR("error_checker", sel)
  1510. #if defined(PURIFY)
  1511. || sel == am_purify
  1512. #elif defined(VALGRIND)
  1513. || ERTS_IS_ATOM_STR("valgrind", sel)
  1514. #endif
  1515. ) {
  1516. if (*tp == am_memory) {
  1517. #if defined(PURIFY)
  1518. BIF_RET(erts_make_integer(purify_new_leaks(), BIF_P));
  1519. #elif defined(VALGRIND)
  1520. VALGRIND_DO_LEAK_CHECK;
  1521. BIF_RET(make_small(0));
  1522. #endif
  1523. } else if (*tp == am_fd) {
  1524. #if defined(PURIFY)
  1525. BIF_RET(erts_make_integer(purify_new_fds_inuse(), BIF_P));
  1526. #elif defined(VALGRIND)
  1527. /* Not present in valgrind... */
  1528. BIF_RET(make_small(0));
  1529. #endif
  1530. } else if (*tp == am_running) {
  1531. #if defined(PURIFY)
  1532. BIF_RET(purify_is_running() ? am_true : am_false);
  1533. #elif defined(VALGRIND)
  1534. BIF_RET(RUNNING_ON_VALGRIND ? am_true : am_false);
  1535. #endif
  1536. } else if (is_list(*tp)) {
  1537. #if defined(PURIFY)
  1538. #define ERTS_ERROR_CHECKER_PRINTF purify_printf
  1539. #elif defined(VALGRIND)
  1540. #define ERTS_ERROR_CHECKER_PRINTF VALGRIND_PRINTF
  1541. #endif
  1542. int buf_size = 8*1024; /* Try with 8KB first */
  1543. char *buf = erts_alloc(ERTS_ALC_T_TMP, buf_size);
  1544. int r = io_list_to_buf(*tp, (char*) buf, buf_size - 1);
  1545. if (r < 0) {
  1546. erts_free(ERTS_ALC_T_TMP, (void *) buf);
  1547. buf_size = io_list_len(*tp);
  1548. if (buf_size < 0)
  1549. goto badarg;
  1550. buf_size++;
  1551. buf = erts_alloc(ERTS_ALC_T_TMP, buf_size);
  1552. r = io_list_to_buf(*tp, (char*) buf, buf_size - 1);
  1553. ASSERT(r == buf_size - 1);
  1554. }
  1555. buf[buf_size - 1 - r] = '\0';
  1556. if (check_if_xml()) {
  1557. ERTS_ERROR_CHECKER_PRINTF("<erlang_info_log>"
  1558. "%s</erlang_info_log>\n", buf);
  1559. } else {
  1560. ERTS_ERROR_CHECKER_PRINTF("%s\n", buf);
  1561. }
  1562. erts_free(ERTS_ALC_T_TMP, (void *) buf);
  1563. BIF_RET(am_true);
  1564. #undef ERTS_ERROR_CHECKER_PRINTF
  1565. }
  1566. #endif
  1567. #ifdef QUANTIFY
  1568. } else if (sel == am_quantify) {
  1569. if (*tp == am_clear) {
  1570. quantify_clear_data();
  1571. BIF_RET(am_true);
  1572. } else if (*tp == am_start) {
  1573. quantify_start_recording_data();
  1574. BIF_RET(am_true);
  1575. } else if (*tp == am_stop) {
  1576. quantify_stop_recording_data();
  1577. BIF_RET(am_true);
  1578. } else if (*tp == am_running) {
  1579. BIF_RET(quantify_is_running() ? am_true : am_false);
  1580. }
  1581. #endif
  1582. #if defined(__GNUC__) && defined(HAVE_SOLARIS_SPARC_PERFMON)
  1583. } else if (ERTS_IS_ATOM_STR("ultrasparc_set_pcr", sel)) {
  1584. unsigned long long tmp;
  1585. int fd;
  1586. int rc;
  1587. if (arity != 2 || !is_small(*tp)) {
  1588. goto badarg;
  1589. }
  1590. tmp = signed_val(*tp);
  1591. if ((fd = open("/dev/perfmon", O_RDONLY)) == -1) {
  1592. BIF_RET(am_false);
  1593. }
  1594. rc = ioctl(fd, PERFMON_SETPCR, &tmp);
  1595. close(fd);
  1596. if (rc < 0) {
  1597. BIF_RET(am_false);
  1598. }
  1599. BIF_RET(am_true);
  1600. #endif
  1601. }
  1602. badarg:
  1603. ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
  1604. return ret;
  1605. }
  1606. #define INFO_DSBUF_INC_SZ 256
  1607. static erts_dsprintf_buf_t *
  1608. grow_info_dsbuf(erts_dsprintf_buf_t *dsbufp, size_t need)
  1609. {
  1610. size_t size;
  1611. size_t free_size = dsbufp->size - dsbufp->str_len;
  1612. ASSERT(dsbufp);
  1613. if (need <= free_size)
  1614. return dsbufp;
  1615. size = need - free_size + INFO_DSBUF_INC_SZ;
  1616. size = ((size + INFO_DSBUF_INC_SZ - 1)/INFO_DSBUF_INC_SZ)*INFO_DSBUF_INC_SZ;
  1617. size += dsbufp->size;
  1618. ASSERT(dsbufp->str_len + need <= size);
  1619. dsbufp->str = (char *) erts_realloc(ERTS_ALC_T_INFO_DSBUF,
  1620. (void *) dsbufp->str,
  1621. size);
  1622. dsbufp->size = size;
  1623. return dsbufp;
  1624. }
  1625. static erts_dsprintf_buf_t *
  1626. erts_create_info_dsbuf(Uint size)
  1627. {
  1628. Uint init_size = size ? size : INFO_DSBUF_INC_SZ;
  1629. erts_dsprintf_buf_t init = ERTS_DSPRINTF_BUF_INITER(grow_info_dsbuf);
  1630. erts_dsprintf_buf_t *dsbufp = erts_alloc(ERTS_ALC_T_INFO_DSBUF,
  1631. sizeof(erts_dsprintf_buf_t));
  1632. sys_memcpy((void *) dsbufp, (void *) &init, sizeof(erts_dsprintf_buf_t));
  1633. dsbufp->str = (char *) erts_alloc(ERTS_ALC_T_INFO_DSBUF, init_size);
  1634. dsbufp->str[0] = '\0';
  1635. dsbufp->size = init_size;
  1636. return dsbufp;
  1637. }
  1638. static void
  1639. erts_destroy_info_dsbuf(erts_dsprintf_buf_t *dsbufp)
  1640. {
  1641. if (dsbufp->str)
  1642. erts_free(ERTS_ALC_T_INFO_DSBUF, (void *) dsbufp->str);
  1643. erts_free(ERTS_ALC_T_INFO_DSBUF, (void *) dsbufp);
  1644. }
  1645. static Eterm
  1646. c_compiler_used(Eterm **hpp, Uint *szp)
  1647. {
  1648. #if defined(__GNUC__)
  1649. # if defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
  1650. # define ERTS_GNUC_VSN_NUMS 3
  1651. # elif defined(__GNUC_MINOR__)
  1652. # define ERTS_GNUC_VSN_NUMS 2
  1653. # else
  1654. # define ERTS_GNUC_VSN_NUMS 1
  1655. # endif
  1656. return erts_bld_tuple(hpp,
  1657. szp,
  1658. 2,
  1659. erts_bld_atom(hpp, szp, "gnuc"),
  1660. #if ERTS_GNUC_VSN_NUMS > 1
  1661. erts_bld_tuple(hpp,
  1662. szp,
  1663. ERTS_GNUC_VSN_NUMS,
  1664. #endif
  1665. erts_bld_uint(hpp, szp,
  1666. (Uint) __GNUC__)
  1667. #ifdef __GNUC_MINOR__
  1668. ,
  1669. erts_bld_uint(hpp, szp,
  1670. (Uint) __GNUC_MINOR__)
  1671. #ifdef __GNUC_PATCHLEVEL__
  1672. ,
  1673. erts_bld_uint(hpp, szp,
  1674. (Uint) __GNUC_PATCHLEVEL__)
  1675. #endif
  1676. #endif
  1677. #if ERTS_GNUC_VSN_NUMS > 1
  1678. )
  1679. #endif
  1680. );
  1681. #elif defined(_MSC_VER)
  1682. return erts_bld_tuple(hpp,
  1683. szp,
  1684. 2,
  1685. erts_bld_atom(hpp, szp, "msc"),
  1686. erts_bld_uint(hpp, szp, (Uint) _MSC_VER));
  1687. #else
  1688. return erts_bld_tuple(hpp,
  1689. szp,
  1690. 2,
  1691. am_undefined,
  1692. am_undefined);
  1693. #endif
  1694. }
  1695. static int is_snif_term(Eterm module_atom) {
  1696. int i;
  1697. Atom *a = atom_tab(atom_val(module_atom));
  1698. char *aname = (char *) a->name;
  1699. /* if a->name has a '.' then the bif (snif) is bogus i.e a package */
  1700. for (i = 0; i < a->len; i++) {
  1701. if (aname[i] == '.')
  1702. return 0;
  1703. }
  1704. return 1;
  1705. }
  1706. static Eterm build_snif_term(Eterm **hpp, Uint *szp, int ix, Eterm res) {
  1707. Eterm tup;
  1708. tup = erts_bld_tuple(hpp, szp, 3, bif_table[ix].module, bif_table[ix].name, make_small(bif_table[ix].arity));
  1709. res = erts_bld_cons( hpp, szp, tup, res);
  1710. return res;
  1711. }
  1712. static Eterm build_snifs_term(Eterm **hpp, Uint *szp, Eterm res) {
  1713. int i;
  1714. for (i = 0; i < BIF_SIZE; i++) {
  1715. if (is_snif_term(bif_table[i].module)) {
  1716. res = build_snif_term(hpp, szp, i, res);
  1717. }
  1718. }
  1719. return res;
  1720. }
  1721. BIF_RETTYPE system_info_1(BIF_ALIST_1)
  1722. {
  1723. Eterm res;
  1724. Eterm* hp;
  1725. Eterm val;
  1726. int i;
  1727. if (is_tuple(BIF_ARG_1)) {
  1728. Eterm* tp = tuple_val(BIF_ARG_1);
  1729. Uint arity = *tp++;
  1730. return info_1_tuple(BIF_P, tp, arityval(arity));
  1731. } else if (BIF_ARG_1 == am_scheduler_id) {
  1732. #ifdef ERTS_SMP
  1733. ASSERT(BIF_P->scheduler_data);
  1734. BIF_RET(make_small(BIF_P->scheduler_data->no));
  1735. #else
  1736. BIF_RET(make_small(1));
  1737. #endif
  1738. } else if (BIF_ARG_1 == am_compat_rel) {
  1739. ASSERT(erts_compat_rel > 0);
  1740. BIF_RET(make_small(erts_compat_rel));
  1741. } else if (BIF_ARG_1 == am_multi_scheduling) {
  1742. #ifndef ERTS_SMP
  1743. BIF_RET(am_disabled);
  1744. #else
  1745. if (erts_no_schedulers == 1)
  1746. BIF_RET(am_disabled);
  1747. else {
  1748. BIF_RET(erts_is_multi_scheduling_blocked()
  1749. ? am_blocked
  1750. : am_enabled);
  1751. }
  1752. #endif
  1753. } else if (BIF_ARG_1 == am_build_type) {
  1754. #if defined(DEBUG)
  1755. ERTS_DECL_AM(debug);
  1756. BIF_RET(AM_debug);
  1757. #elif defined(PURIFY)
  1758. ERTS_DECL_AM(purify);
  1759. BIF_RET(AM_purify);
  1760. #elif defined(QUANTIFY)
  1761. ERTS_DECL_AM(quantify);
  1762. BIF_RET(AM_quantify);
  1763. #elif defined(PURECOV)
  1764. ERTS_DECL_AM(purecov);
  1765. BIF_RET(AM_purecov);
  1766. #elif defined(ERTS_GCOV)
  1767. ERTS_DECL_AM(gcov);
  1768. BIF_RET(AM_gcov);
  1769. #elif defined(VALGRIND)
  1770. ERTS_DECL_AM(valgrind);
  1771. BIF_RET(AM_valgrind);
  1772. #elif defined(GPROF)
  1773. ERTS_DECL_AM(gprof);
  1774. BIF_RET(AM_gprof);
  1775. #elif defined(ERTS_ENABLE_LOCK_COUNT)
  1776. ERTS_DECL_AM(lcnt);
  1777. BIF_RET(AM_lcnt);
  1778. #else
  1779. BIF_RET(am_opt);
  1780. #endif
  1781. BIF_RET(res);
  1782. } else if (BIF_ARG_1 == am_allocated_areas) {
  1783. res = erts_allocated_areas(NULL, NULL, BIF_P);
  1784. BIF_RET(res);
  1785. } else if (BIF_ARG_1 == am_allocated) {
  1786. BIF_RET(erts_instr_get_memory_map(BIF_P));
  1787. } else if (BIF_ARG_1 == am_hipe_architecture) {
  1788. #if defined(HIPE)
  1789. BIF_RET(hipe_arch_name);
  1790. #else
  1791. BIF_RET(am_undefined);
  1792. #endif
  1793. } else if (BIF_ARG_1 == am_trace_control_word) {
  1794. BIF_RET(db_get_trace_control_word_0(BIF_P));
  1795. } else if (ERTS_IS_ATOM_STR("ets_realloc_moves", BIF_ARG_1)) {
  1796. BIF_RET((erts_ets_realloc_always_moves) ? am_true : am_false);
  1797. } else if (ERTS_IS_ATOM_STR("ets_always_compress", BIF_ARG_1)) {
  1798. BIF_RET((erts_ets_always_compress) ? am_true : am_false);
  1799. } else if (ERTS_IS_ATOM_STR("snifs", BIF_ARG_1)) {
  1800. Uint size = 0;
  1801. Uint *szp;
  1802. szp = &size;
  1803. build_snifs_term(NULL, szp, NIL);
  1804. hp = HAlloc(BIF_P, size);
  1805. res = build_snifs_term(&hp, NULL, NIL);
  1806. BIF_RET(res);
  1807. } else if (BIF_ARG_1 == am_sequential_tracer) {
  1808. val = erts_get_system_seq_tracer();
  1809. ASSERT(is_internal_pid(val) || is_internal_port(val) || val==am_false)
  1810. hp = HAlloc(BIF_P, 3);
  1811. res = TUPLE2(hp, am_sequential_tracer, val);
  1812. BIF_RET(res);
  1813. } else if (BIF_ARG_1 == am_garbage_collection){
  1814. Uint val = (Uint) erts_smp_atomic32_read(&erts_max_gen_gcs);
  1815. Eterm tup;
  1816. hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2);
  1817. tup = TUPLE2(hp, am_fullsweep_after, make_small(val)); hp += 3;
  1818. res = CONS(hp, tup, NIL); hp += 2;
  1819. tup = TUPLE2(hp, am_min_heap_size, make_small(H_MIN_SIZE)); hp += 3;
  1820. res = CONS(hp, tup, res); hp += 2;
  1821. tup = TUPLE2(hp, am_min_bin_vheap_size, make_small(BIN_VH_MIN_SIZE)); hp += 3;
  1822. res = CONS(hp, tup, res); hp += 2;
  1823. BIF_RET(res);
  1824. } else if (BIF_ARG_1 == am_fullsweep_after){
  1825. Uint val = (Uint) erts_smp_atomic32_read(&erts_max_gen_gcs);
  1826. hp = HAlloc(BIF_P, 3);
  1827. res = TUPLE2(hp, am_fullsweep_after, make_small(val));
  1828. BIF_RET(res);
  1829. } else if (BIF_ARG_1 == am_min_heap_size) {
  1830. hp = HAlloc(BIF_P, 3);
  1831. res = TUPLE2(hp, am_min_heap_size,make_small(H_MIN_SIZE));
  1832. BIF_RET(res);
  1833. } else if (BIF_ARG_1 == am_min_bin_vheap_size) {
  1834. hp = HAlloc(BIF_P, 3);
  1835. res = TUPLE2(hp, am_min_bin_vheap_size,make_small(BIN_VH_MIN_SIZE));
  1836. BIF_RET(res);
  1837. } else if (BIF_ARG_1 == am_process_count) {
  1838. BIF_RET(make_small(erts_process_count()));
  1839. } else if (BIF_ARG_1 == am_process_limit) {
  1840. BIF_RET(make_small(erts_max_processes));
  1841. } else if (BIF_ARG_1 == am_info
  1842. || BIF_ARG_1 == am_procs
  1843. || BIF_ARG_1 == am_loaded
  1844. || BIF_ARG_1 == am_dist) {
  1845. erts_dsprintf_buf_t *dsbufp = erts_create_info_dsbuf(0);
  1846. /* Need to be the only thread running... */
  1847. erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
  1848. erts_smp_block_system(0);
  1849. if (BIF_ARG_1 == am_info)
  1850. info(ERTS_PRINT_DSBUF, (void *) dsbufp);
  1851. else if (BIF_ARG_1 == am_procs)
  1852. process_info(ERTS_PRINT_DSBUF, (void *) dsbufp);
  1853. else if (BIF_ARG_1 == am_loaded)
  1854. loaded(ERTS_PRINT_DSBUF, (void *) dsbufp);
  1855. else
  1856. distribution_info(ERTS_PRINT_DSBUF, (void *) dsbufp);
  1857. erts_smp_release_system();
  1858. erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
  1859. ASSERT(dsbufp && dsbufp->str);
  1860. res = new_binary(BIF_P, (byte *) dsbufp->str, (int) dsbufp->str_len);
  1861. erts_destroy_info_dsbuf(dsbufp);
  1862. BIF_RET(res);
  1863. } else if (ERTS_IS_ATOM_STR("dist_ctrl", BIF_ARG_1)) {
  1864. DistEntry *dep;
  1865. i = 0;
  1866. /* Need to be the only thread running... */
  1867. erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
  1868. erts_smp_block_system(0);
  1869. for (dep = erts_visible_dist_entries; dep; dep = dep->next)
  1870. ++i;
  1871. for (dep = erts_hidden_dist_entries; dep; dep = dep->next)
  1872. ++i;
  1873. hp = HAlloc(BIF_P,i*(3+2));
  1874. res = NIL;
  1875. for (dep = erts_hidden_dist_entries; dep; dep = dep->next) {
  1876. Eterm tpl;
  1877. ASSERT(is_immed(dep->cid));
  1878. tpl = TUPLE2(hp, dep->sysname, dep->cid);
  1879. hp +=3;
  1880. res = CONS(hp, tpl, res);
  1881. hp += 2;
  1882. }
  1883. for (dep = erts_visible_dist_entries; dep; dep = dep->next) {
  1884. Eterm tpl;
  1885. ASSERT(is_immed(dep->cid));
  1886. tpl = TUPLE2(hp, dep->sysname, dep->cid);
  1887. hp +=3;
  1888. res = CONS(hp, tpl, res);
  1889. hp += 2;
  1890. }
  1891. erts_smp_release_system();
  1892. erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
  1893. BIF_RET(res);
  1894. } else if (BIF_ARG_1 == am_system_version) {
  1895. erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0);
  1896. erts_print_system_version(ERTS_PRINT_DSBUF, (void *) dsbufp, BIF_P);
  1897. hp = HAlloc(BIF_P, dsbufp->str_len*2);
  1898. res = buf_to_intlist(&hp, dsbufp->str, dsbufp->str_len, NIL);
  1899. erts_destroy_tmp_dsbuf(dsbufp);
  1900. BIF_RET(res);
  1901. } else if (BIF_ARG_1 == am_system_architecture) {
  1902. hp = HAlloc(BIF_P, 2*(sizeof(ERLANG_ARCHITECTURE)-1));
  1903. BIF_RET(buf_to_intlist(&hp,
  1904. ERLANG_ARCHITECTURE,
  1905. sizeof(ERLANG_ARCHITECTURE)-1,
  1906. NIL));
  1907. }
  1908. else if (BIF_ARG_1 == am_memory_types) {
  1909. return erts_instr_get_type_info(BIF_P);
  1910. }
  1911. else if (BIF_ARG_1 == am_os_type) {
  1912. Eterm type = am_atom_put(os_type, strlen(os_type));
  1913. Eterm flav, tup;
  1914. char *buf = erts_alloc(ERTS_ALC_T_TMP, 1024); /* More than enough */
  1915. os_flavor(buf, 1024);
  1916. flav = am_atom_put(buf, strlen(buf));
  1917. hp = HAlloc(BIF_P, 3);
  1918. tup = TUPLE2(hp, type, flav);
  1919. erts_free(ERTS_ALC_T_TMP, (void *) buf);
  1920. BIF_RET(tup);
  1921. }
  1922. else if (BIF_ARG_1 == am_allocator) {
  1923. BIF_RET(erts_allocator_options((void *) BIF_P));
  1924. }
  1925. else if (BIF_ARG_1 == am_thread_pool_size) {
  1926. #ifdef USE_THREADS
  1927. extern int erts_async_max_threads;
  1928. #endif
  1929. int n;
  1930. #ifdef USE_THREADS
  1931. n = erts_async_max_threads;
  1932. #else
  1933. n = 0;
  1934. #endif
  1935. BIF_RET(make_small(n));
  1936. }
  1937. else if (BIF_ARG_1 == am_alloc_util_allocators) {
  1938. BIF_RET(erts_alloc_util_allocators((void *) BIF_P));
  1939. }
  1940. else if (BIF_ARG_1 == am_elib_malloc) {
  1941. /* To be removed in R15 */
  1942. BIF_RET(am_false);
  1943. }
  1944. else if (BIF_ARG_1 == am_os_version) {
  1945. int major, minor, build;
  1946. Eterm tup;
  1947. os_version(&major, &minor, &build);
  1948. hp = HAlloc(BIF_P, 4);
  1949. tup = TUPLE3(hp,
  1950. make_small(major),
  1951. make_small(minor),
  1952. make_small(build));
  1953. BIF_RET(tup);
  1954. }
  1955. else if (BIF_ARG_1 == am_version) {
  1956. int n = strlen(ERLANG_VERSION);
  1957. hp = HAlloc(BIF_P, ((sizeof ERLANG_VERSION)-1) * 2);
  1958. BIF_RET(buf_to_intlist(&hp, ERLANG_VERSION, n, NIL));
  1959. }
  1960. else if (BIF_ARG_1 == am_machine) {
  1961. int n = strlen(EMULATOR);
  1962. hp = HAlloc(BIF_P, n*2);
  1963. BIF_RET(buf_to_intlist(&hp, EMULATOR, n, NIL));
  1964. }
  1965. else if (BIF_ARG_1 == am_garbage_collection) {
  1966. BIF_RET(am_generational);
  1967. #ifdef ERTS_OPCODE_COUNTER_SUPPORT
  1968. } else if (BIF_ARG_1 == am_instruction_counts) {
  1969. #ifdef DEBUG
  1970. Eterm *endp;
  1971. #endif
  1972. Eterm *hp, **hpp;
  1973. Uint hsz, *hszp;
  1974. int i;
  1975. hpp = NULL;
  1976. hsz = 0;
  1977. hszp = &hsz;
  1978. bld_instruction_counts:
  1979. res = NIL;
  1980. for (i = num_instructions-1; i >= 0; i--) {
  1981. res = erts_bld_cons(hpp, hszp,
  1982. erts_bld_tuple(hpp, hszp, 2,
  1983. am_atom_put(opc[i].name,
  1984. strlen(opc[i].name)),
  1985. erts_bld_uint(hpp, hszp,
  1986. opc[i].count)),
  1987. res);
  1988. }
  1989. if (!hpp) {
  1990. hp = HAlloc(BIF_P, hsz);
  1991. hpp = &hp;
  1992. #ifdef DEBUG
  1993. endp = hp + hsz;
  1994. #endif
  1995. hszp = NULL;
  1996. goto bld_instruction_counts;
  1997. }
  1998. #ifdef DEBUG
  1999. ASSERT(endp == hp);
  2000. #endif
  2001. BIF_RET(res);
  2002. #endif /* #ifndef ERTS_SMP */
  2003. } else if (BIF_ARG_1 == am_wordsize) {
  2004. return make_small(sizeof(Eterm));
  2005. } else if (BIF_ARG_1 == am_endian) {
  2006. #if defined(WORDS_BIGENDIAN)
  2007. return am_big;
  2008. #else
  2009. return am_little;
  2010. #endif
  2011. } else if (BIF_ARG_1 == am_heap_sizes) {
  2012. return erts_heap_sizes(BIF_P);
  2013. } else if (BIF_ARG_1 == am_global_heaps_size) {
  2014. #ifdef HYBRID
  2015. Uint hsz = 0;
  2016. Uint sz = 0;
  2017. sz += global_heap_sz;
  2018. #ifdef INCREMENTAL
  2019. /* The size of the old generation is a bit hard to define here...
  2020. * The amount of live data in the last collection perhaps..? */
  2021. sz = 0;
  2022. #else
  2023. if (global_old_hend && global_old_heap)
  2024. sz += global_old_hend - global_old_heap;
  2025. #endif
  2026. sz *= sizeof(Eterm);
  2027. (void) erts_bld_uint(NULL, &hsz, sz);
  2028. hp = hsz ? HAlloc(BIF_P, hsz) : NULL;
  2029. res = erts_bld_uint(&hp, NULL, sz);
  2030. #else
  2031. res = make_small(0);
  2032. #endif
  2033. return res;
  2034. } else if (BIF_ARG_1 == am_heap_type) {
  2035. #if defined(HYBRID)
  2036. return am_hybrid;
  2037. #else
  2038. return am_private;
  2039. #endif
  2040. } else if (ERTS_IS_ATOM_STR("cpu_topology", BIF_ARG_1)) {
  2041. res = erts_get_cpu_topology_term(BIF_P, am_used);
  2042. BIF_TRAP1(erts_format_cpu_topology_trap, BIF_P, res);
  2043. } else if (ERTS_IS_ATOM_STR("update_cpu_info", BIF_ARG_1)) {
  2044. if (erts_update_cpu_info()) {
  2045. ERTS_DECL_AM(changed);
  2046. BIF_RET(AM_changed);
  2047. }
  2048. else {
  2049. ERTS_DECL_AM(unchanged);
  2050. BIF_RET(AM_unchanged);
  2051. }
  2052. #if defined(__GNUC__) && defined(HAVE_SOLARIS_SPARC_PERFMON)
  2053. } else if (ERTS_IS_ATOM_STR("ultrasparc_read_tick1", BIF_ARG_1)) {
  2054. register unsigned high asm("%l0");
  2055. register unsigned low asm("%l1");
  2056. hp = HAlloc(BIF_P, 5);
  2057. asm volatile (".word 0xa3410000;" /* rd %tick, %l1 */
  2058. ".word 0xa1347020" /* srlx %l1, 0x20, %l0 */
  2059. : "=r" (high), "=r" (low));
  2060. res = TUPLE4(hp, make_small(high >> 16),
  2061. make_small(high & 0xFFFF),
  2062. make_small(low >> 16),
  2063. make_small(low & 0xFFFF));
  2064. BIF_RET(res);
  2065. } else if (ERTS_IS_ATOM_STR("ultrasparc_read_tick2", BIF_ARG_1)) {
  2066. register unsigned high asm("%l0");
  2067. register unsigned low asm("%l1");
  2068. asm volatile (".word 0xa3410000;" /* rd %tick, %l1 */
  2069. ".word 0xa1347020" /* srlx %l1, 0x20, %l0 */
  2070. : "=r" (high), "=r" (low));
  2071. hp = HAlloc(BIF_P, 5);
  2072. res = TUPLE4(hp, make_small(high >> 16),
  2073. make_small(high & 0xFFFF),
  2074. make_small(low >> 16),
  2075. make_small(low & 0xFFFF));
  2076. BIF_RET(res);
  2077. } else if (ERTS_IS_ATOM_STR("ultrasparc_read_pic1", BIF_ARG_1)) {
  2078. register unsigned high asm("%l0");
  2079. register unsigned low asm("%l1");
  2080. hp = HAlloc(BIF_P, 5);
  2081. asm volatile (".word 0xa3444000;" /* rd %asr17, %l1 */
  2082. ".word 0xa1347020" /* srlx %l1, 0x20, %l0 */
  2083. : "=r" (high), "=r" (low));
  2084. res = TUPLE4(hp, make_small(high >> 16),
  2085. make_small(high & 0xFFFF),
  2086. make_small(low >> 16),
  2087. make_small(low & 0xFFFF));
  2088. BIF_RET(res);
  2089. } else if (ERTS_IS_ATOM_STR("ultrasparc_read_pic2", BIF_ARG_1)) {
  2090. register unsigned high asm("%l0");
  2091. register unsigned low asm("%l1");
  2092. asm volatile (".word 0xa3444000;" /* rd %asr17, %l1 */
  2093. ".word 0xa1347020" /* srlx %l1, 0x20, %l0 */
  2094. : "=r" (high), "=r" (low));
  2095. hp = HAlloc(BIF_P, 5);
  2096. res = TUPLE4(hp, make_small(high >> 16),
  2097. make_small(high & 0xFFFF),
  2098. make_small(low >> 16),
  2099. make_small(low & 0xFFFF));
  2100. BIF_RET(res);
  2101. #endif
  2102. } else if (BIF_ARG_1 == am_threads) {
  2103. #ifdef USE_THREADS
  2104. return am_true;
  2105. #else
  2106. return am_false;
  2107. #endif
  2108. } else if (BIF_ARG_1 == am_creation) {
  2109. return make_small(erts_this_node->creation);
  2110. } else if (BIF_ARG_1 == am_break_ignored) {
  2111. extern int ignore_break;
  2112. if (ignore_break)
  2113. return am_true;
  2114. else
  2115. return am_false;
  2116. }
  2117. /* Arguments that are unusual follow ... */
  2118. else if (ERTS_IS_ATOM_STR("logical_processors", BIF_ARG_1)) {
  2119. int no;
  2120. erts_get_logical_processors(&no, NULL, NULL);
  2121. if (no > 0)
  2122. BIF_RET(make_small((Uint) no));
  2123. else {
  2124. DECL_AM(unknown);
  2125. BIF_RET(AM_unknown);
  2126. }
  2127. }
  2128. else if (ERTS_IS_ATOM_STR("logical_processors_online", BIF_ARG_1)) {
  2129. int no;
  2130. erts_get_logical_processors(NULL, &no, NULL);
  2131. if (no > 0)
  2132. BIF_RET(make_small((Uint) no));
  2133. else {
  2134. DECL_AM(unknown);
  2135. BIF_RET(AM_unknown);
  2136. }
  2137. }
  2138. else if (ERTS_IS_ATOM_STR("logical_processors_available", BIF_ARG_1)) {
  2139. int no;
  2140. erts_get_logical_processors(NULL, NULL, &no);
  2141. if (no > 0)
  2142. BIF_RET(make_small((Uint) no));
  2143. else {
  2144. DECL_AM(unknown);
  2145. BIF_RET(AM_unknown);
  2146. }
  2147. } else if (ERTS_IS_ATOM_STR("otp_release", BIF_ARG_1)) {
  2148. int n = sizeof(ERLANG_OTP_RELEASE)-1;
  2149. hp = HAlloc(BIF_P, 2*n);
  2150. BIF_RET(buf_to_intlist(&hp, ERLANG_OTP_RELEASE, n, NIL));
  2151. } else if (ERTS_IS_ATOM_STR("driver_version", BIF_ARG_1)) {
  2152. char buf[42];
  2153. int n = erts_snprintf(buf, 42, "%d.%d",
  2154. ERL_DRV_EXTENDED_MAJOR_VERSION,
  2155. ERL_DRV_EXTENDED_MINOR_VERSION);
  2156. hp = HAlloc(BIF_P, 2*n);
  2157. BIF_RET(buf_to_intlist(&hp, buf, n, NIL));
  2158. } else if (ERTS_IS_ATOM_STR("smp_support", BIF_ARG_1)) {
  2159. #ifdef ERTS_SMP
  2160. BIF_RET(am_true);
  2161. #else
  2162. BIF_RET(am_false);
  2163. #endif
  2164. } else if (ERTS_IS_ATOM_STR("scheduler_bind_type", BIF_ARG_1)) {
  2165. BIF_RET(erts_bound_schedulers_term(BIF_P));
  2166. } else if (ERTS_IS_ATOM_STR("scheduler_bindings", BIF_ARG_1)) {
  2167. BIF_RET(erts_get_schedulers_binds(BIF_P));
  2168. } else if (ERTS_IS_ATOM_STR("constant_pool_support", BIF_ARG_1)) {
  2169. BIF_RET(am_true);
  2170. } else if (ERTS_IS_ATOM_STR("schedulers", BIF_ARG_1)
  2171. || ERTS_IS_ATOM_STR("schedulers_total", BIF_ARG_1)) {
  2172. res = make_small(erts_no_schedulers);
  2173. BIF_RET(res);
  2174. } else if (ERTS_IS_ATOM_STR("schedulers_state", BIF_ARG_1)) {
  2175. #ifndef ERTS_SMP
  2176. Eterm *hp = HAlloc(BIF_P, 4);
  2177. res = TUPLE3(hp, make_small(1), make_small(1), make_small(1));
  2178. BIF_RET(res);
  2179. #else
  2180. Uint total, online, active;
  2181. switch (erts_schedulers_state(&total,
  2182. &online,
  2183. &active,
  2184. 1)) {
  2185. case ERTS_SCHDLR_SSPND_DONE: {
  2186. Eterm *hp = HAlloc(BIF_P, 4);
  2187. res = TUPLE3(hp,
  2188. make_small(total),
  2189. make_small(online),
  2190. make_small(active));
  2191. BIF_RET(res);
  2192. }
  2193. case ERTS_SCHDLR_SSPND_YIELD_RESTART:
  2194. ERTS_VBUMP_ALL_REDS(BIF_P);
  2195. BIF_TRAP1(bif_export[BIF_system_info_1],
  2196. BIF_P, BIF_ARG_1);
  2197. default:
  2198. ASSERT(0);
  2199. BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
  2200. }
  2201. #endif
  2202. } else if (ERTS_IS_ATOM_STR("schedulers_online", BIF_ARG_1)) {
  2203. #ifndef ERTS_SMP
  2204. BIF_RET(make_small(1));
  2205. #else
  2206. Uint total, online, active;
  2207. switch (erts_schedulers_state(&total, &online, &active, 1)) {
  2208. case ERTS_SCHDLR_SSPND_DONE:
  2209. BIF_RET(make_small(online));
  2210. case ERTS_SCHDLR_SSPND_YIELD_RESTART:
  2211. ERTS_VBUMP_ALL_REDS(BIF_P);
  2212. BIF_TRAP1(bif_export[BIF_system_info_1],
  2213. BIF_P, BIF_ARG_1);
  2214. default:
  2215. ASSERT(0);
  2216. BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
  2217. }
  2218. #endif
  2219. } else if (ERTS_IS_ATOM_STR("schedulers_active", BIF_ARG_1)) {
  2220. #ifndef ERTS_SMP
  2221. BIF_RET(make_small(1));
  2222. #else
  2223. Uint total, online, active;
  2224. switch (erts_schedulers_state(&total, &online, &active, 1)) {
  2225. case ERTS_SCHDLR_SSPND_DONE:
  2226. BIF_RET(make_small(active));
  2227. case ERTS_SCHDLR_SSPND_YIELD_RESTART:
  2228. ERTS_VBUMP_ALL_REDS(BIF_P);
  2229. BIF_TRAP1(bif_export[BIF_system_info_1],
  2230. BIF_P, BIF_ARG_1);
  2231. default:
  2232. ASSERT(0);
  2233. BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
  2234. }
  2235. #endif
  2236. } else if (ERTS_IS_ATOM_STR("run_queues", BIF_ARG_1)) {
  2237. res = make_small(erts_no_run_queues);
  2238. BIF_RET(res);
  2239. } else if (ERTS_IS_ATOM_STR("c_compiler_used", BIF_ARG_1)) {
  2240. Eterm *hp = NULL;
  2241. Uint sz = 0;
  2242. (void) c_compiler_used(NULL, &sz);
  2243. if (sz)
  2244. hp = HAlloc(BIF_P, sz);
  2245. BIF_RET(c_compiler_used(&hp, NULL));
  2246. } else if (ERTS_IS_ATOM_STR("stop_memory_trace", BIF_ARG_1)) {
  2247. erts_mtrace_stop();
  2248. BIF_RET(am_true);
  2249. } else if (ERTS_IS_ATOM_STR("context_reductions", BIF_ARG_1)) {
  2250. BIF_RET(make_small(CONTEXT_REDS));
  2251. } else if (ERTS_IS_ATOM_STR("kernel_poll", BIF_ARG_1)) {
  2252. #ifdef ERTS_ENABLE_KERNEL_POLL
  2253. BIF_RET(erts_use_kernel_poll ? am_true : am_false);
  2254. #else
  2255. BIF_RET(am_false);
  2256. #endif
  2257. } else if (ERTS_IS_ATOM_STR("lock_checking", BIF_ARG_1)) {
  2258. #ifdef ERTS_ENABLE_LOCK_CHECK
  2259. BIF_RET(am_true);
  2260. #else
  2261. BIF_RET(am_false);
  2262. #endif
  2263. } else if (ERTS_IS_ATOM_STR("lock_counting", BIF_ARG_1)) {
  2264. #ifdef ERTS_ENABLE_LOCK_COUNT
  2265. BIF_RET(am_true);
  2266. #else
  2267. BIF_RET(am_false);
  2268. #endif
  2269. } else if (ERTS_IS_ATOM_STR("debug_compiled", BIF_ARG_1)) {
  2270. #ifdef DEBUG
  2271. BIF_RET(am_true);
  2272. #else
  2273. BIF_RET(am_false);
  2274. #endif
  2275. } else if (ERTS_IS_ATOM_STR("check_io", BIF_ARG_1)) {
  2276. BIF_RET(erts_check_io_info(BIF_P));
  2277. } else if (ERTS_IS_ATOM_STR("multi_scheduling_blockers", BIF_ARG_1)) {
  2278. #ifndef ERTS_SMP
  2279. BIF_RET(NIL);
  2280. #else
  2281. if (erts_no_schedulers == 1)
  2282. BIF_RET(NIL);
  2283. else
  2284. BIF_RET(erts_multi_scheduling_blockers(BIF_P));
  2285. #endif
  2286. } else if (ERTS_IS_ATOM_STR("modified_timing_level", BIF_ARG_1)) {
  2287. BIF_RET(ERTS_USE_MODIFIED_TIMING()
  2288. ? make_small(erts_modified_timing_level)
  2289. : am_undefined);
  2290. } else if (ERTS_IS_ATOM_STR("port_tasks", BIF_ARG_1)) {
  2291. BIF_RET(am_true);
  2292. } else if (ERTS_IS_ATOM_STR("io_thread", BIF_ARG_1)) {
  2293. BIF_RET(am_false);
  2294. } else if (ERTS_IS_ATOM_STR("scheduling_statistics", BIF_ARG_1)) {
  2295. BIF_RET(erts_sched_stat_term(BIF_P, 0));
  2296. } else if (ERTS_IS_ATOM_STR("total_scheduling_statistics", BIF_ARG_1)) {
  2297. BIF_RET(erts_sched_stat_term(BIF_P, 1));
  2298. } else if (ERTS_IS_ATOM_STR("taints", BIF_ARG_1)) {
  2299. BIF_RET(erts_nif_taints(BIF_P));
  2300. } else if (ERTS_IS_ATOM_STR("reader_groups_map", BIF_ARG_1)) {
  2301. BIF_RET(erts_get_reader_groups_map(BIF_P));
  2302. } else if (ERTS_IS_ATOM_STR("dist_buf_busy_limit", BIF_ARG_1)) {
  2303. Uint hsz = 0;
  2304. (void) erts_bld_uint(NULL, &hsz, erts_dist_buf_busy_limit);
  2305. hp = hsz ? HAlloc(BIF_P, hsz) : NULL;
  2306. res = erts_bld_uint(&hp, NULL, erts_dist_buf_busy_limit);
  2307. BIF_RET(res);
  2308. }
  2309. BIF_ERROR(BIF_P, BADARG);
  2310. }
  2311. Eterm
  2312. port_info_1(Process* p, Eterm pid)
  2313. {
  2314. static Eterm keys[] = {
  2315. am_name,
  2316. am_links,
  2317. am_id,
  2318. am_connected,
  2319. am_input,
  2320. am_output
  2321. };
  2322. Eterm items[ASIZE(keys)];
  2323. Eterm result = NIL;
  2324. Eterm reg_name;
  2325. Eterm* hp;
  2326. Uint need;
  2327. int i;
  2328. /*
  2329. * Collect all information about the port.
  2330. */
  2331. for (i = 0; i < ASIZE(keys); i++) {
  2332. Eterm item;
  2333. item = port_info_2(p, pid, keys[i]);
  2334. if (is_non_value(item)) {
  2335. return THE_NON_VALUE;
  2336. }
  2337. if (item == am_undefined) {
  2338. return am_undefined;
  2339. }
  2340. items[i] = item;
  2341. }
  2342. reg_name = port_info_2(p, pid, am_registered_name);
  2343. /*
  2344. * Build the resulting list.
  2345. */
  2346. need = 2*ASIZE(keys);
  2347. if (is_tuple(reg_name)) {
  2348. need += 2;
  2349. }
  2350. hp = HAlloc(p, need);
  2351. for (i = ASIZE(keys) - 1; i >= 0; i--) {
  2352. result = CONS(hp, items[i], result);
  2353. hp += 2;
  2354. }
  2355. if (is_tuple(reg_name)) {
  2356. result = CONS(hp, reg_name, result);
  2357. }
  2358. return result;
  2359. }
  2360. /**********************************************************************/
  2361. /* Return information on ports */
  2362. /* Info:
  2363. ** id Port index
  2364. ** connected (Pid)
  2365. ** links List of pids
  2366. ** name String
  2367. ** input Number of bytes input from port program
  2368. ** output Number of bytes output to the port program
  2369. */
  2370. BIF_RETTYPE port_info_2(BIF_ALIST_2)
  2371. {
  2372. BIF_RETTYPE ret;
  2373. Eterm portid = BIF_ARG_1;
  2374. Port *prt;
  2375. Eterm item = BIF_ARG_2;
  2376. Eterm res;
  2377. Eterm* hp;
  2378. int count;
  2379. if (is_internal_port(portid))
  2380. prt = erts_id2port(portid, BIF_P, ERTS_PROC_LOCK_MAIN);
  2381. else if (is_atom(portid))
  2382. erts_whereis_name(BIF_P, ERTS_PROC_LOCK_MAIN,
  2383. portid, NULL, 0, 0, &prt);
  2384. else if (is_external_port(portid)
  2385. && external_port_dist_entry(portid) == erts_this_dist_entry)
  2386. BIF_RET(am_undefined);
  2387. else {
  2388. BIF_ERROR(BIF_P, BADARG);
  2389. }
  2390. if (!prt) {
  2391. BIF_RET(am_undefined);
  2392. }
  2393. if (item == am_id) {
  2394. hp = HAlloc(BIF_P, 3);
  2395. res = make_small(internal_port_number(portid));
  2396. }
  2397. else if (item == am_links) {
  2398. MonitorInfoCollection mic;
  2399. int i;
  2400. Eterm item;
  2401. INIT_MONITOR_INFOS(mic);
  2402. erts_doforall_links(prt->nlinks, &collect_one_link, &mic);
  2403. hp = HAlloc(BIF_P, 3 + mic.sz);
  2404. res = NIL;
  2405. for (i = 0; i < mic.mi_i; i++) {
  2406. item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
  2407. res = CONS(hp, item, res);
  2408. hp += 2;
  2409. }
  2410. DESTROY_MONITOR_INFOS(mic);
  2411. }
  2412. else if (item == am_monitors) {
  2413. MonitorInfoCollection mic;
  2414. int i;
  2415. Eterm item;
  2416. INIT_MONITOR_INFOS(mic);
  2417. erts_doforall_monitors(prt->monitors, &collect_one_origin_monitor, &mic);
  2418. hp = HAlloc(BIF_P, 3 + mic.sz);
  2419. res = NIL;
  2420. for (i = 0; i < mic.mi_i; i++) {
  2421. Eterm t;
  2422. item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
  2423. t = TUPLE2(hp, am_process, item);
  2424. hp += 3;
  2425. res = CONS(hp, t, res);
  2426. hp += 2;
  2427. }
  2428. DESTROY_MONITOR_INFOS(mic);
  2429. }
  2430. else if (item == am_name) {
  2431. count = sys_strlen(prt->name);
  2432. hp = HAlloc(BIF_P, 3 + 2*count);
  2433. res = buf_to_intlist(&hp, prt->name, count, NIL);
  2434. }
  2435. else if (item == am_connected) {
  2436. hp = HAlloc(BIF_P, 3);
  2437. res = prt->connected; /* internal pid */
  2438. }
  2439. else if (item == am_input) {
  2440. Uint hsz = 3;
  2441. Uint n = prt->bytes_in;
  2442. (void) erts_bld_uint(NULL, &hsz, n);
  2443. hp = HAlloc(BIF_P, hsz);
  2444. res = erts_bld_uint(&hp, NULL, n);
  2445. }
  2446. else if (item == am_output) {
  2447. Uint hsz = 3;
  2448. Uint n = prt->bytes_out;
  2449. (void) erts_bld_uint(NULL, &hsz, n);
  2450. hp = HAlloc(BIF_P, hsz);
  2451. res = erts_bld_uint(&hp, NULL, n);
  2452. }
  2453. else if (item == am_registered_name) {
  2454. RegProc *reg;
  2455. reg = prt->reg;
  2456. if (reg == NULL) {
  2457. ERTS_BIF_PREP_RET(ret, NIL);
  2458. goto done;
  2459. } else {
  2460. hp = HAlloc(BIF_P, 3);
  2461. res = reg->name;
  2462. }
  2463. }
  2464. else if (item == am_memory) {
  2465. /* All memory consumed in bytes (the Port struct should not be
  2466. included though).
  2467. */
  2468. Uint hsz = 3;
  2469. Uint size = 0;
  2470. ErlHeapFragment* bp;
  2471. hp = HAlloc(BIF_P, 3);
  2472. erts_doforall_links(prt->nlinks, &one_link_size, &size);
  2473. for (bp = prt->bp; bp; bp = bp->next)
  2474. size += sizeof(ErlHeapFragment) + (bp->alloc_size - 1)*sizeof(Eterm);
  2475. if (prt->linebuf)
  2476. size += sizeof(LineBuf) + prt->linebuf->ovsiz;
  2477. /* ... */
  2478. /* All memory allocated by the driver should be included, but it is
  2479. hard to retrieve... */
  2480. (void) erts_bld_uint(NULL, &hsz, size);
  2481. hp = HAlloc(BIF_P, hsz);
  2482. res = erts_bld_uint(&hp, NULL, size);
  2483. }
  2484. else if (item == am_queue_size) {
  2485. Uint ioq_size = erts_port_ioq_size(prt);
  2486. Uint hsz = 3;
  2487. (void) erts_bld_uint(NULL, &hsz, ioq_size);
  2488. hp = HAlloc(BIF_P, hsz);
  2489. res = erts_bld_uint(&hp, NULL, ioq_size);
  2490. }
  2491. else if (ERTS_IS_ATOM_STR("locking", item)) {
  2492. hp = HAlloc(BIF_P, 3);
  2493. #ifndef ERTS_SMP
  2494. res = am_false;
  2495. #else
  2496. if (prt->status & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) {
  2497. DECL_AM(port_level);
  2498. ASSERT(prt->drv_ptr->flags
  2499. & ERL_DRV_FLAG_USE_PORT_LOCKING);
  2500. res = AM_port_level;
  2501. }
  2502. else {
  2503. DECL_AM(driver_level);
  2504. ASSERT(!(prt->drv_ptr->flags
  2505. & ERL_DRV_FLAG_USE_PORT_LOCKING));
  2506. res = AM_driver_level;
  2507. }
  2508. #endif
  2509. }
  2510. else {
  2511. ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
  2512. goto done;
  2513. }
  2514. ERTS_BIF_PREP_RET(ret, TUPLE2(hp, item, res));
  2515. done:
  2516. erts_smp_port_unlock(prt);
  2517. return ret;
  2518. }
  2519. Eterm
  2520. fun_info_2(Process* p, Eterm fun, Eterm what)
  2521. {
  2522. Eterm* hp;
  2523. Eterm val;
  2524. if (is_fun(fun)) {
  2525. ErlFunThing* funp = (ErlFunThing *) fun_val(fun);
  2526. switch (what) {
  2527. case am_type:
  2528. hp = HAlloc(p, 3);
  2529. val = am_local;
  2530. break;
  2531. case am_pid:
  2532. hp = HAlloc(p, 3);
  2533. val = funp->creator;
  2534. break;
  2535. case am_module:
  2536. hp = HAlloc(p, 3);
  2537. val = funp->fe->module;
  2538. break;
  2539. case am_new_index:
  2540. hp = HAlloc(p, 3);
  2541. val = make_small(funp->fe->index);
  2542. break;
  2543. case am_new_uniq:
  2544. val = new_binary(p, funp->fe->uniq, 16);
  2545. hp = HAlloc(p, 3);
  2546. break;
  2547. case am_index:
  2548. hp = HAlloc(p, 3);
  2549. val = make_small(funp->fe->old_index);
  2550. break;
  2551. case am_uniq:
  2552. hp = HAlloc(p, 3);
  2553. val = make_small(funp->fe->old_uniq);
  2554. break;
  2555. case am_env:
  2556. {
  2557. Uint num_free = funp->num_free;
  2558. int i;
  2559. hp = HAlloc(p, 3 + 2*num_free);
  2560. val = NIL;
  2561. for (i = num_free-1; i >= 0; i--) {
  2562. val = CONS(hp, funp->env[i], val);
  2563. hp += 2;
  2564. }
  2565. }
  2566. break;
  2567. case am_refc:
  2568. val = erts_make_integer(erts_smp_atomic_read(&funp->fe->refc), p);
  2569. hp = HAlloc(p, 3);
  2570. break;
  2571. case am_arity:
  2572. hp = HAlloc(p, 3);
  2573. val = make_small(funp->arity);
  2574. break;
  2575. case am_name:
  2576. hp = HAlloc(p, 3);
  2577. val = funp->fe->address[-2];
  2578. break;
  2579. default:
  2580. goto error;
  2581. }
  2582. } else if (is_export(fun)) {
  2583. Export* exp = (Export *) ((UWord) (export_val(fun))[1]);
  2584. switch (what) {
  2585. case am_type:
  2586. hp = HAlloc(p, 3);
  2587. val = am_external;
  2588. break;
  2589. case am_pid:
  2590. hp = HAlloc(p, 3);
  2591. val = am_undefined;
  2592. break;
  2593. case am_module:
  2594. hp = HAlloc(p, 3);
  2595. val = exp->code[0];
  2596. break;
  2597. case am_new_index:
  2598. hp = HAlloc(p, 3);
  2599. val = am_undefined;
  2600. break;
  2601. case am_new_uniq:
  2602. hp = HAlloc(p, 3);
  2603. val = am_undefined;
  2604. break;
  2605. case am_index:
  2606. hp = HAlloc(p, 3);
  2607. val = am_undefined;
  2608. break;
  2609. case am_uniq:
  2610. hp = HAlloc(p, 3);
  2611. val = am_undefined;
  2612. break;
  2613. case am_env:
  2614. hp = HAlloc(p, 3);
  2615. val = NIL;
  2616. break;
  2617. case am_refc:
  2618. hp = HAlloc(p, 3);
  2619. val = am_undefined;
  2620. break;
  2621. case am_arity:
  2622. hp = HAlloc(p, 3);
  2623. val = make_small(exp->code[2]);
  2624. break;
  2625. case am_name:
  2626. hp = HAlloc(p, 3);
  2627. val = exp->code[1];
  2628. break;
  2629. default:
  2630. goto error;
  2631. }
  2632. } else {
  2633. error:
  2634. BIF_ERROR(p, BADARG);
  2635. }
  2636. return TUPLE2(hp, what, val);
  2637. }
  2638. BIF_RETTYPE is_process_alive_1(BIF_ALIST_1)
  2639. {
  2640. if(is_internal_pid(BIF_ARG_1)) {
  2641. Process *rp;
  2642. if (BIF_ARG_1 == BIF_P->id)
  2643. BIF_RET(am_true);
  2644. if(internal_pid_index(BIF_ARG_1) >= erts_max_processes)
  2645. BIF_ERROR(BIF_P, BADARG);
  2646. rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
  2647. BIF_ARG_1, ERTS_PROC_LOCK_STATUS);
  2648. if (!rp) {
  2649. BIF_RET(am_false);
  2650. }
  2651. else {
  2652. int have_pending_exit = ERTS_PROC_PENDING_EXIT(rp);
  2653. erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
  2654. if (have_pending_exit)
  2655. ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_false);
  2656. else
  2657. BIF_RET(am_true);
  2658. }
  2659. }
  2660. else if(is_external_pid(BIF_ARG_1)) {
  2661. if(external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry)
  2662. BIF_RET(am_false); /* A pid from an old incarnation of this node */
  2663. else
  2664. BIF_ERROR(BIF_P, BADARG);
  2665. }
  2666. else {
  2667. BIF_ERROR(BIF_P, BADARG);
  2668. }
  2669. }
  2670. BIF_RETTYPE process_display_2(BIF_ALIST_2)
  2671. {
  2672. Process *rp;
  2673. if (BIF_ARG_2 != am_backtrace)
  2674. BIF_ERROR(BIF_P, BADARG);
  2675. rp = erts_pid2proc_nropt(BIF_P, ERTS_PROC_LOCK_MAIN,
  2676. BIF_ARG_1, ERTS_PROC_LOCKS_ALL);
  2677. if(!rp) {
  2678. BIF_ERROR(BIF_P, BADARG);
  2679. }
  2680. if (rp == ERTS_PROC_LOCK_BUSY)
  2681. ERTS_BIF_YIELD2(bif_export[BIF_process_display_2], BIF_P,
  2682. BIF_ARG_1, BIF_ARG_2);
  2683. if (rp != BIF_P && ERTS_PROC_PENDING_EXIT(rp)) {
  2684. Eterm args[2] = {BIF_ARG_1, BIF_ARG_2};
  2685. erts_smp_proc_unlock(rp, ERTS_PROC_LOCKS_ALL);
  2686. ERTS_BIF_AWAIT_X_APPLY_TRAP(BIF_P,
  2687. BIF_ARG_1,
  2688. am_erlang,
  2689. am_process_display,
  2690. args,
  2691. 2);
  2692. }
  2693. erts_stack_dump(ERTS_PRINT_STDERR, NULL, rp);
  2694. #ifdef ERTS_SMP
  2695. erts_smp_proc_unlock(rp, (BIF_P == rp
  2696. ? ERTS_PROC_LOCKS_ALL_MINOR
  2697. : ERTS_PROC_LOCKS_ALL));
  2698. #endif
  2699. BIF_RET(am_true);
  2700. }
  2701. /* this is a general call which return some possibly useful information */
  2702. BIF_RETTYPE statistics_1(BIF_ALIST_1)
  2703. {
  2704. Eterm res;
  2705. Eterm* hp;
  2706. if (BIF_ARG_1 == am_context_switches) {
  2707. Eterm cs = erts_make_integer(erts_get_total_context_switches(), BIF_P);
  2708. hp = HAlloc(BIF_P, 3);
  2709. res = TUPLE2(hp, cs, SMALL_ZERO);
  2710. BIF_RET(res);
  2711. } else if (BIF_ARG_1 == am_garbage_collection) {
  2712. Uint hsz = 4;
  2713. ErtsGCInfo gc_info;
  2714. Eterm gcs;
  2715. Eterm recl;
  2716. erts_gc_info(&gc_info);
  2717. (void) erts_bld_uint(NULL, &hsz, gc_info.garbage_collections);
  2718. (void) erts_bld_uint(NULL, &hsz, gc_info.reclaimed);
  2719. hp = HAlloc(BIF_P, hsz);
  2720. gcs = erts_bld_uint(&hp, NULL, gc_info.garbage_collections);
  2721. recl = erts_bld_uint(&hp, NULL, gc_info.reclaimed);
  2722. res = TUPLE3(hp, gcs, recl, SMALL_ZERO);
  2723. BIF_RET(res);
  2724. } else if (BIF_ARG_1 == am_reductions) {
  2725. Uint reds;
  2726. Uint diff;
  2727. Uint hsz = 3;
  2728. Eterm b1, b2;
  2729. erts_get_total_reductions(&reds, &diff);
  2730. (void) erts_bld_uint(NULL, &hsz, reds);
  2731. (void) erts_bld_uint(NULL, &hsz, diff);
  2732. hp = HAlloc(BIF_P, hsz);
  2733. b1 = erts_bld_uint(&hp, NULL, reds);
  2734. b2 = erts_bld_uint(&hp, NULL, diff);
  2735. res = TUPLE2(hp, b1, b2);
  2736. BIF_RET(res);
  2737. } else if (BIF_ARG_1 == am_exact_reductions) {
  2738. Uint reds;
  2739. Uint diff;
  2740. Uint hsz = 3;
  2741. Eterm b1, b2;
  2742. erts_get_exact_total_reductions(BIF_P, &reds, &diff);
  2743. (void) erts_bld_uint(NULL, &hsz, reds);
  2744. (void) erts_bld_uint(NULL, &hsz, diff);
  2745. hp = HAlloc(BIF_P, hsz);
  2746. b1 = erts_bld_uint(&hp, NULL, reds);
  2747. b2 = erts_bld_uint(&hp, NULL, diff);
  2748. res = TUPLE2(hp, b1, b2);
  2749. BIF_RET(res);
  2750. } else if (BIF_ARG_1 == am_runtime) {
  2751. unsigned long u1, u2, dummy;
  2752. Eterm b1, b2;
  2753. elapsed_time_both(&u1,&dummy,&u2,&dummy);
  2754. b1 = erts_make_integer(u1,BIF_P);
  2755. b2 = erts_make_integer(u2,BIF_P);
  2756. hp = HAlloc(BIF_P,3);
  2757. res = TUPLE2(hp, b1, b2);
  2758. BIF_RET(res);
  2759. } else if (BIF_ARG_1 == am_run_queue) {
  2760. res = erts_run_queues_len(NULL);
  2761. BIF_RET(make_small(res));
  2762. } else if (BIF_ARG_1 == am_wall_clock) {
  2763. UWord w1, w2;
  2764. Eterm b1, b2;
  2765. wall_clock_elapsed_time_both(&w1, &w2);
  2766. b1 = erts_make_integer((Uint) w1,BIF_P);
  2767. b2 = erts_make_integer((Uint) w2,BIF_P);
  2768. hp = HAlloc(BIF_P,3);
  2769. res = TUPLE2(hp, b1, b2);
  2770. BIF_RET(res);
  2771. } else if (BIF_ARG_1 == am_io) {
  2772. Eterm r1, r2;
  2773. Eterm in, out;
  2774. Uint hsz = 9;
  2775. Uint bytes_in = (Uint) erts_smp_atomic_read(&erts_bytes_in);
  2776. Uint bytes_out = (Uint) erts_smp_atomic_read(&erts_bytes_out);
  2777. (void) erts_bld_uint(NULL, &hsz, bytes_in);
  2778. (void) erts_bld_uint(NULL, &hsz, bytes_out);
  2779. hp = HAlloc(BIF_P, hsz);
  2780. in = erts_bld_uint(&hp, NULL, bytes_in);
  2781. out = erts_bld_uint(&hp, NULL, bytes_out);
  2782. r1 = TUPLE2(hp, am_input, in);
  2783. hp += 3;
  2784. r2 = TUPLE2(hp, am_output, out);
  2785. hp += 3;
  2786. BIF_RET(TUPLE2(hp, r1, r2));
  2787. }
  2788. else if (ERTS_IS_ATOM_STR("run_queues", BIF_ARG_1)) {
  2789. Eterm res, *hp, **hpp;
  2790. Uint sz, *szp;
  2791. int no_qs = erts_no_run_queues;
  2792. Uint *qszs = erts_alloc(ERTS_ALC_T_TMP,sizeof(Uint)*no_qs*2);
  2793. (void) erts_run_queues_len(qszs);
  2794. sz = 0;
  2795. szp = &sz;
  2796. hpp = NULL;
  2797. while (1) {
  2798. int i;
  2799. for (i = 0; i < no_qs; i++)
  2800. qszs[no_qs+i] = erts_bld_uint(hpp, szp, qszs[i]);
  2801. res = erts_bld_tuplev(hpp, szp, no_qs, &qszs[no_qs]);
  2802. if (hpp) {
  2803. erts_free(ERTS_ALC_T_TMP, qszs);
  2804. BIF_RET(res);
  2805. }
  2806. hp = HAlloc(BIF_P, sz);
  2807. szp = NULL;
  2808. hpp = &hp;
  2809. }
  2810. }
  2811. BIF_ERROR(BIF_P, BADARG);
  2812. }
  2813. BIF_RETTYPE memory_0(BIF_ALIST_0)
  2814. {
  2815. BIF_RETTYPE res = erts_memory(NULL, NULL, BIF_P, THE_NON_VALUE);
  2816. switch (res) {
  2817. case am_badarg: BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR); /* never... */
  2818. case am_notsup: BIF_ERROR(BIF_P, EXC_NOTSUP);
  2819. default: BIF_RET(res);
  2820. }
  2821. }
  2822. BIF_RETTYPE memory_1(BIF_ALIST_1)
  2823. {
  2824. BIF_RETTYPE res = erts_memory(NULL, NULL, BIF_P, BIF_ARG_1);
  2825. switch (res) {
  2826. case am_badarg: BIF_ERROR(BIF_P, BADARG);
  2827. case am_notsup: BIF_ERROR(BIF_P, EXC_NOTSUP);
  2828. default: BIF_RET(res);
  2829. }
  2830. }
  2831. BIF_RETTYPE error_logger_warning_map_0(BIF_ALIST_0)
  2832. {
  2833. BIF_RET(erts_error_logger_warnings);
  2834. }
  2835. static erts_smp_atomic_t available_internal_state;
  2836. BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
  2837. {
  2838. /*
  2839. * NOTE: Only supposed to be used for testing, and debugging.
  2840. */
  2841. if (!erts_smp_atomic_read(&available_internal_state)) {
  2842. BIF_ERROR(BIF_P, EXC_UNDEF);
  2843. }
  2844. if (is_atom(BIF_ARG_1)) {
  2845. if (ERTS_IS_ATOM_STR("reds_left", BIF_ARG_1)) {
  2846. /* Used by (emulator) */
  2847. BIF_RET(make_small((Uint) ERTS_BIF_REDS_LEFT(BIF_P)));
  2848. }
  2849. else if (ERTS_IS_ATOM_STR("node_and_dist_references", BIF_ARG_1)) {
  2850. /* Used by node_container_SUITE (emulator) */
  2851. Eterm res = erts_get_node_and_dist_references(BIF_P);
  2852. BIF_RET(res);
  2853. }
  2854. else if (ERTS_IS_ATOM_STR("monitoring_nodes", BIF_ARG_1)) {
  2855. BIF_RET(erts_processes_monitoring_nodes(BIF_P));
  2856. }
  2857. else if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1)
  2858. || ERTS_IS_ATOM_STR("next_port", BIF_ARG_1)) {
  2859. /* Used by node_container_SUITE (emulator) */
  2860. Eterm res;
  2861. if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1))
  2862. res = erts_test_next_pid(0, 0);
  2863. else {
  2864. res = erts_test_next_port(0, 0);
  2865. }
  2866. if (res < 0)
  2867. BIF_RET(am_false);
  2868. BIF_RET(erts_make_integer(res, BIF_P));
  2869. }
  2870. else if (ERTS_IS_ATOM_STR("DbTable_words", BIF_ARG_1)) {
  2871. /* Used by ets_SUITE (stdlib) */
  2872. size_t words = (sizeof(DbTable) + sizeof(Uint) - 1)/sizeof(Uint);
  2873. BIF_RET(make_small((Uint) words));
  2874. }
  2875. else if (ERTS_IS_ATOM_STR("check_io_debug", BIF_ARG_1)) {
  2876. /* Used by (emulator) */
  2877. int res;
  2878. #ifdef HAVE_ERTS_CHECK_IO_DEBUG
  2879. erts_smp_proc_unlock(BIF_P,ERTS_PROC_LOCK_MAIN);
  2880. res = erts_check_io_debug();
  2881. erts_smp_proc_lock(BIF_P,ERTS_PROC_LOCK_MAIN);
  2882. #else
  2883. res = 0;
  2884. #endif
  2885. ASSERT(res >= 0);
  2886. BIF_RET(erts_make_integer((Uint) res, BIF_P));
  2887. }
  2888. else if (ERTS_IS_ATOM_STR("process_info_args", BIF_ARG_1)) {
  2889. /* Used by process_SUITE (emulator) */
  2890. int i;
  2891. Eterm res = NIL;
  2892. Uint *hp = HAlloc(BIF_P, 2*ERTS_PI_ARGS);
  2893. for (i = ERTS_PI_ARGS-1; i >= 0; i--) {
  2894. res = CONS(hp, pi_args[i], res);
  2895. hp += 2;
  2896. }
  2897. BIF_RET(res);
  2898. }
  2899. else if (ERTS_IS_ATOM_STR("processes", BIF_ARG_1)) {
  2900. /* Used by process_SUITE (emulator) */
  2901. BIF_RET(erts_debug_processes(BIF_P));
  2902. }
  2903. else if (ERTS_IS_ATOM_STR("processes_bif_info", BIF_ARG_1)) {
  2904. /* Used by process_SUITE (emulator) */
  2905. BIF_RET(erts_debug_processes_bif_info(BIF_P));
  2906. }
  2907. else if (ERTS_IS_ATOM_STR("max_atom_out_cache_index", BIF_ARG_1)) {
  2908. /* Used by distribution_SUITE (emulator) */
  2909. BIF_RET(make_small((Uint) erts_debug_max_atom_out_cache_index()));
  2910. }
  2911. else if (ERTS_IS_ATOM_STR("nbalance", BIF_ARG_1)) {
  2912. Uint n;
  2913. erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
  2914. n = erts_debug_nbalance();
  2915. erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
  2916. BIF_RET(erts_make_integer(n, BIF_P));
  2917. }
  2918. else if (ERTS_IS_ATOM_STR("available_internal_state", BIF_ARG_1)) {
  2919. BIF_RET(am_true);
  2920. }
  2921. else if (ERTS_IS_ATOM_STR("force_heap_frags", BIF_ARG_1)) {
  2922. #ifdef FORCE_HEAP_FRAGS
  2923. BIF_RET(am_true);
  2924. #else
  2925. BIF_RET(am_false);
  2926. #endif
  2927. }
  2928. }
  2929. else if (is_tuple(BIF_ARG_1)) {
  2930. Eterm* tp = tuple_val(BIF_ARG_1);
  2931. switch (arityval(tp[0])) {
  2932. case 2: {
  2933. if (ERTS_IS_ATOM_STR("process_status", tp[1])) {
  2934. /* Used by timer process_SUITE, timer_bif_SUITE, and
  2935. node_container_SUITE (emulator) */
  2936. if (is_internal_pid(tp[2])) {
  2937. BIF_RET(erts_process_status(BIF_P,
  2938. ERTS_PROC_LOCK_MAIN,
  2939. NULL,
  2940. tp[2]));
  2941. }
  2942. }
  2943. else if (ERTS_IS_ATOM_STR("link_list", tp[1])) {
  2944. /* Used by erl_link_SUITE (emulator) */
  2945. if(is_internal_pid(tp[2])) {
  2946. Eterm res;
  2947. Process *p;
  2948. p = erts_pid2proc(BIF_P,
  2949. ERTS_PROC_LOCK_MAIN,
  2950. tp[2],
  2951. ERTS_PROC_LOCK_LINK);
  2952. if (!p) {
  2953. ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P);
  2954. BIF_RET(am_undefined);
  2955. }
  2956. res = make_link_list(BIF_P, p->nlinks, NIL);
  2957. erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
  2958. BIF_RET(res);
  2959. }
  2960. else if(is_internal_port(tp[2])) {
  2961. Eterm res;
  2962. Port *p = erts_id2port(tp[2], BIF_P, ERTS_PROC_LOCK_MAIN);
  2963. if(!p)
  2964. BIF_RET(am_undefined);
  2965. res = make_link_list(BIF_P, p->nlinks, NIL);
  2966. erts_smp_port_unlock(p);
  2967. BIF_RET(res);
  2968. }
  2969. else if(is_node_name_atom(tp[2])) {
  2970. DistEntry *dep = erts_find_dist_entry(tp[2]);
  2971. if(dep) {
  2972. Eterm subres;
  2973. erts_smp_de_links_lock(dep);
  2974. subres = make_link_list(BIF_P, dep->nlinks, NIL);
  2975. subres = make_link_list(BIF_P, dep->node_links, subres);
  2976. erts_smp_de_links_unlock(dep);
  2977. erts_deref_dist_entry(dep);
  2978. BIF_RET(subres);
  2979. } else {
  2980. BIF_RET(am_undefined);
  2981. }
  2982. }
  2983. }
  2984. else if (ERTS_IS_ATOM_STR("monitor_list", tp[1])) {
  2985. /* Used by erl_link_SUITE (emulator) */
  2986. if(is_internal_pid(tp[2])) {
  2987. Process *p;
  2988. Eterm res;
  2989. p = erts_pid2proc(BIF_P,
  2990. ERTS_PROC_LOCK_MAIN,
  2991. tp[2],
  2992. ERTS_PROC_LOCK_LINK);
  2993. if (!p) {
  2994. ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P);
  2995. BIF_RET(am_undefined);
  2996. }
  2997. res = make_monitor_list(BIF_P, p->monitors);
  2998. erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
  2999. BIF_RET(res);
  3000. } else if(is_node_name_atom(tp[2])) {
  3001. DistEntry *dep = erts_find_dist_entry(tp[2]);
  3002. if(dep) {
  3003. Eterm ml;
  3004. erts_smp_de_links_lock(dep);
  3005. ml = make_monitor_list(BIF_P, dep->monitors);
  3006. erts_smp_de_links_unlock(dep);
  3007. erts_deref_dist_entry(dep);
  3008. BIF_RET(ml);
  3009. } else {
  3010. BIF_RET(am_undefined);
  3011. }
  3012. }
  3013. }
  3014. else if (ERTS_IS_ATOM_STR("channel_number", tp[1])) {
  3015. Eterm res;
  3016. DistEntry *dep = erts_find_dist_entry(tp[2]);
  3017. if (!dep)
  3018. res = am_undefined;
  3019. else {
  3020. Uint cno = dist_entry_channel_no(dep);
  3021. res = make_small(cno);
  3022. erts_deref_dist_entry(dep);
  3023. }
  3024. BIF_RET(res);
  3025. }
  3026. else if (ERTS_IS_ATOM_STR("have_pending_exit", tp[1])) {
  3027. Process *rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
  3028. tp[2], ERTS_PROC_LOCK_STATUS);
  3029. if (!rp) {
  3030. BIF_RET(am_undefined);
  3031. }
  3032. else {
  3033. Eterm res = ERTS_PROC_PENDING_EXIT(rp) ? am_true : am_false;
  3034. erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
  3035. BIF_RET(res);
  3036. }
  3037. }
  3038. else if (ERTS_IS_ATOM_STR("binary_info", tp[1])) {
  3039. Eterm bin = tp[2];
  3040. if (is_binary(bin)) {
  3041. Eterm real_bin = bin;
  3042. Eterm res = am_true;
  3043. ErlSubBin* sb = (ErlSubBin *) binary_val(real_bin);
  3044. if (sb->thing_word == HEADER_SUB_BIN) {
  3045. real_bin = sb->orig;
  3046. }
  3047. if (*binary_val(real_bin) == HEADER_PROC_BIN) {
  3048. ProcBin* pb;
  3049. Binary* val;
  3050. Eterm SzTerm;
  3051. Uint hsz = 3 + 5;
  3052. Eterm* hp;
  3053. DECL_AM(refc_binary);
  3054. pb = (ProcBin *) binary_val(real_bin);
  3055. val = pb->val;
  3056. (void) erts_bld_uint(NULL, &hsz, pb->size);
  3057. (void) erts_bld_uint(NULL, &hsz, val->orig_size);
  3058. hp = HAlloc(BIF_P, hsz);
  3059. /* Info about the Binary* object */
  3060. SzTerm = erts_bld_uint(&hp, NULL, val->orig_size);
  3061. res = TUPLE2(hp, am_binary, SzTerm);
  3062. hp += 3;
  3063. /* Info about the ProcBin* object */
  3064. SzTerm = erts_bld_uint(&hp, NULL, pb->size);
  3065. res = TUPLE4(hp, AM_refc_binary, SzTerm,
  3066. res, make_small(pb->flags));
  3067. } else { /* heap binary */
  3068. DECL_AM(heap_binary);
  3069. res = AM_heap_binary;
  3070. }
  3071. BIF_RET(res);
  3072. }
  3073. }
  3074. else if (ERTS_IS_ATOM_STR("term_to_binary_no_funs", tp[1])) {
  3075. Uint dflags = (DFLAG_EXTENDED_REFERENCES |
  3076. DFLAG_EXTENDED_PIDS_PORTS |
  3077. DFLAG_BIT_BINARIES);
  3078. BIF_RET(erts_term_to_binary(BIF_P, tp[2], 0, dflags));
  3079. }
  3080. else if (ERTS_IS_ATOM_STR("dist_port", tp[1])) {
  3081. Eterm res = am_undefined;
  3082. DistEntry *dep = erts_sysname_to_connected_dist_entry(tp[2]);
  3083. if (dep) {
  3084. erts_smp_de_rlock(dep);
  3085. if (is_internal_port(dep->cid))
  3086. res = dep->cid;
  3087. erts_smp_de_runlock(dep);
  3088. erts_deref_dist_entry(dep);
  3089. }
  3090. BIF_RET(res);
  3091. }
  3092. else if (ERTS_IS_ATOM_STR("atom_out_cache_index", tp[1])) {
  3093. /* Used by distribution_SUITE (emulator) */
  3094. if (is_atom(tp[2])) {
  3095. BIF_RET(make_small(
  3096. (Uint)
  3097. erts_debug_atom_to_out_cache_index(tp[2])));
  3098. }
  3099. }
  3100. else if (ERTS_IS_ATOM_STR("fake_scheduler_bindings", tp[1])) {
  3101. return erts_fake_scheduler_bindings(BIF_P, tp[2]);
  3102. }
  3103. else if (ERTS_IS_ATOM_STR("reader_groups_map", tp[1])) {
  3104. Sint groups;
  3105. if (is_not_small(tp[2]))
  3106. BIF_ERROR(BIF_P, BADARG);
  3107. groups = signed_val(tp[2]);
  3108. if (groups < (Sint) 1 || groups > (Sint) INT_MAX)
  3109. BIF_ERROR(BIF_P, BADARG);
  3110. BIF_RET(erts_debug_reader_groups_map(BIF_P, (int) groups));
  3111. }
  3112. break;
  3113. }
  3114. default:
  3115. break;
  3116. }
  3117. }
  3118. BIF_ERROR(BIF_P, BADARG);
  3119. }
  3120. static erts_smp_atomic_t hipe_test_reschedule_flag;
  3121. BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
  3122. {
  3123. /*
  3124. * NOTE: Only supposed to be used for testing, and debugging.
  3125. */
  3126. if (ERTS_IS_ATOM_STR("available_internal_state", BIF_ARG_1)
  3127. && (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false)) {
  3128. erts_aint_t on = (erts_aint_t) (BIF_ARG_2 == am_true);
  3129. erts_aint_t prev_on = erts_smp_atomic_xchg(&available_internal_state, on);
  3130. if (on) {
  3131. erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
  3132. erts_dsprintf(dsbufp, "Process %T ", BIF_P->id);
  3133. if (erts_is_alive)
  3134. erts_dsprintf(dsbufp, "on node %T ", erts_this_node->sysname);
  3135. erts_dsprintf(dsbufp,
  3136. "enabled access to the emulator internal state.\n");
  3137. erts_dsprintf(dsbufp,
  3138. "NOTE: This is an erts internal test feature and "
  3139. "should *only* be used by OTP test-suites.\n");
  3140. erts_send_warning_to_logger(BIF_P->group_leader, dsbufp);
  3141. }
  3142. BIF_RET(prev_on ? am_true : am_false);
  3143. }
  3144. if (!erts_smp_atomic_read(&available_internal_state)) {
  3145. BIF_ERROR(BIF_P, EXC_UNDEF);
  3146. }
  3147. if (is_atom(BIF_ARG_1)) {
  3148. if (ERTS_IS_ATOM_STR("reds_left", BIF_ARG_1)) {
  3149. Sint reds;
  3150. if (term_to_Sint(BIF_ARG_2, &reds) != 0) {
  3151. if (0 <= reds && reds <= CONTEXT_REDS) {
  3152. if (!ERTS_PROC_GET_SAVED_CALLS_BUF(BIF_P))
  3153. BIF_P->fcalls = reds;
  3154. else
  3155. BIF_P->fcalls = reds - CONTEXT_REDS;
  3156. }
  3157. BIF_RET(am_true);
  3158. }
  3159. }
  3160. else if (ERTS_IS_ATOM_STR("block", BIF_ARG_1)
  3161. || ERTS_IS_ATOM_STR("sleep", BIF_ARG_1)) {
  3162. int block = ERTS_IS_ATOM_STR("block", BIF_ARG_1);
  3163. Sint ms;
  3164. if (term_to_Sint(BIF_ARG_2, &ms) != 0) {
  3165. if (ms > 0) {
  3166. erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3167. if (block)
  3168. erts_smp_block_system(0);
  3169. while (erts_milli_sleep((long) ms) != 0);
  3170. if (block)
  3171. erts_smp_release_system();
  3172. erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3173. }
  3174. BIF_RET(am_true);
  3175. }
  3176. }
  3177. else if (ERTS_IS_ATOM_STR("block_scheduler", BIF_ARG_1)) {
  3178. Sint ms;
  3179. if (term_to_Sint(BIF_ARG_2, &ms) != 0) {
  3180. if (ms > 0) {
  3181. erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3182. while (erts_milli_sleep((long) ms) != 0);
  3183. erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3184. }
  3185. BIF_RET(am_true);
  3186. }
  3187. }
  3188. else if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1)
  3189. || ERTS_IS_ATOM_STR("next_port", BIF_ARG_1)) {
  3190. /* Used by node_container_SUITE (emulator) */
  3191. Uint next;
  3192. if (term_to_Uint(BIF_ARG_2, &next) != 0) {
  3193. Eterm res;
  3194. if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1))
  3195. res = erts_test_next_pid(1, next);
  3196. else {
  3197. res = erts_test_next_port(1, next);
  3198. }
  3199. if (res < 0)
  3200. BIF_RET(am_false);
  3201. BIF_RET(erts_make_integer(res, BIF_P));
  3202. }
  3203. }
  3204. else if (ERTS_IS_ATOM_STR("force_gc", BIF_ARG_1)) {
  3205. /* Used by signal_SUITE (emulator) */
  3206. Process *rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
  3207. BIF_ARG_2, ERTS_PROC_LOCK_MAIN);
  3208. if (!rp) {
  3209. BIF_RET(am_false);
  3210. }
  3211. else {
  3212. FLAGS(rp) |= F_FORCE_GC;
  3213. if (BIF_P != rp)
  3214. erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
  3215. BIF_RET(am_true);
  3216. }
  3217. }
  3218. else if (ERTS_IS_ATOM_STR("send_fake_exit_signal", BIF_ARG_1)) {
  3219. /* Used by signal_SUITE (emulator) */
  3220. /* Testcases depend on the exit being received via
  3221. a pending exit when the receiver is the same as
  3222. the caller. */
  3223. if (is_tuple(BIF_ARG_2)) {
  3224. Eterm* tp = tuple_val(BIF_ARG_2);
  3225. if (arityval(tp[0]) == 3
  3226. && (is_pid(tp[1]) || is_port(tp[1]))
  3227. && is_internal_pid(tp[2])) {
  3228. int xres;
  3229. ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
  3230. Process *rp = erts_pid2proc_opt(BIF_P, ERTS_PROC_LOCK_MAIN,
  3231. tp[2], rp_locks,
  3232. ERTS_P2P_FLG_SMP_INC_REFC);
  3233. if (!rp) {
  3234. DECL_AM(dead);
  3235. BIF_RET(AM_dead);
  3236. }
  3237. #ifdef ERTS_SMP
  3238. if (BIF_P == rp)
  3239. rp_locks |= ERTS_PROC_LOCK_MAIN;
  3240. #endif
  3241. xres = erts_send_exit_signal(NULL, /* NULL in order to
  3242. force a pending exit
  3243. when we send to our
  3244. selves. */
  3245. tp[1],
  3246. rp,
  3247. &rp_locks,
  3248. tp[3],
  3249. NIL,
  3250. NULL,
  3251. 0);
  3252. #ifdef ERTS_SMP
  3253. if (BIF_P == rp)
  3254. rp_locks &= ~ERTS_PROC_LOCK_MAIN;
  3255. #endif
  3256. erts_smp_proc_unlock(rp, rp_locks);
  3257. erts_smp_proc_dec_refc(rp);
  3258. if (xres > 1) {
  3259. DECL_AM(message);
  3260. BIF_RET(AM_message);
  3261. }
  3262. else if (xres == 0) {
  3263. DECL_AM(unaffected);
  3264. BIF_RET(AM_unaffected);
  3265. }
  3266. else {
  3267. DECL_AM(exit);
  3268. BIF_RET(AM_exit);
  3269. }
  3270. }
  3271. }
  3272. }
  3273. else if (ERTS_IS_ATOM_STR("colliding_names", BIF_ARG_1)) {
  3274. /* Used by ets_SUITE (stdlib) */
  3275. if (is_tuple(BIF_ARG_2)) {
  3276. Eterm* tpl = tuple_val(BIF_ARG_2);
  3277. Uint cnt;
  3278. if (arityval(tpl[0]) == 2 && is_atom(tpl[1]) &&
  3279. term_to_Uint(tpl[2], &cnt)) {
  3280. BIF_RET(erts_ets_colliding_names(BIF_P,tpl[1],cnt));
  3281. }
  3282. }
  3283. }
  3284. else if (ERTS_IS_ATOM_STR("binary_loop_limit", BIF_ARG_1)) {
  3285. /* Used by binary_module_SUITE (stdlib) */
  3286. Uint max_loops;
  3287. if (is_atom(BIF_ARG_2) && ERTS_IS_ATOM_STR("default", BIF_ARG_2)) {
  3288. max_loops = erts_binary_set_loop_limit(-1);
  3289. BIF_RET(make_small(max_loops));
  3290. } else if (term_to_Uint(BIF_ARG_2, &max_loops) != 0) {
  3291. max_loops = erts_binary_set_loop_limit(max_loops);
  3292. BIF_RET(make_small(max_loops));
  3293. }
  3294. }
  3295. else if (ERTS_IS_ATOM_STR("re_loop_limit", BIF_ARG_1)) {
  3296. /* Used by re_SUITE (stdlib) */
  3297. Uint max_loops;
  3298. if (is_atom(BIF_ARG_2) && ERTS_IS_ATOM_STR("default", BIF_ARG_2)) {
  3299. max_loops = erts_re_set_loop_limit(-1);
  3300. BIF_RET(make_small(max_loops));
  3301. } else if (term_to_Uint(BIF_ARG_2, &max_loops) != 0) {
  3302. max_loops = erts_re_set_loop_limit(max_loops);
  3303. BIF_RET(make_small(max_loops));
  3304. }
  3305. }
  3306. else if (ERTS_IS_ATOM_STR("unicode_loop_limit", BIF_ARG_1)) {
  3307. /* Used by unicode_SUITE (stdlib) */
  3308. Uint max_loops;
  3309. if (is_atom(BIF_ARG_2) && ERTS_IS_ATOM_STR("default", BIF_ARG_2)) {
  3310. max_loops = erts_unicode_set_loop_limit(-1);
  3311. BIF_RET(make_small(max_loops));
  3312. } else if (term_to_Uint(BIF_ARG_2, &max_loops) != 0) {
  3313. max_loops = erts_unicode_set_loop_limit(max_loops);
  3314. BIF_RET(make_small(max_loops));
  3315. }
  3316. }
  3317. else if (ERTS_IS_ATOM_STR("hipe_test_reschedule_suspend", BIF_ARG_1)) {
  3318. /* Used by hipe test suites */
  3319. erts_aint_t flag = erts_smp_atomic_read(&hipe_test_reschedule_flag);
  3320. if (!flag && BIF_ARG_2 != am_false) {
  3321. erts_smp_atomic_set(&hipe_test_reschedule_flag, 1);
  3322. erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
  3323. ERTS_BIF_YIELD2(bif_export[BIF_erts_debug_set_internal_state_2],
  3324. BIF_P, BIF_ARG_1, BIF_ARG_2);
  3325. }
  3326. erts_smp_atomic_set(&hipe_test_reschedule_flag, !flag);
  3327. BIF_RET(NIL);
  3328. }
  3329. else if (ERTS_IS_ATOM_STR("hipe_test_reschedule_resume", BIF_ARG_1)) {
  3330. /* Used by hipe test suites */
  3331. Eterm res = am_false;
  3332. Process *rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
  3333. BIF_ARG_2, ERTS_PROC_LOCK_STATUS);
  3334. if (rp) {
  3335. erts_resume(rp, ERTS_PROC_LOCK_STATUS);
  3336. res = am_true;
  3337. erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
  3338. }
  3339. BIF_RET(res);
  3340. }
  3341. else if (ERTS_IS_ATOM_STR("test_long_gc_sleep", BIF_ARG_1)) {
  3342. if (term_to_Uint(BIF_ARG_2, &erts_test_long_gc_sleep) > 0)
  3343. BIF_RET(am_true);
  3344. }
  3345. else if (ERTS_IS_ATOM_STR("abort", BIF_ARG_1)) {
  3346. erl_exit(ERTS_ABORT_EXIT, "%T\n", BIF_ARG_2);
  3347. }
  3348. else if (ERTS_IS_ATOM_STR("kill_dist_connection", BIF_ARG_1)) {
  3349. DistEntry *dep = erts_sysname_to_connected_dist_entry(BIF_ARG_2);
  3350. if (!dep)
  3351. BIF_RET(am_false);
  3352. else {
  3353. Uint32 con_id;
  3354. erts_smp_de_rlock(dep);
  3355. con_id = dep->connection_id;
  3356. erts_smp_de_runlock(dep);
  3357. erts_kill_dist_connection(dep, con_id);
  3358. erts_deref_dist_entry(dep);
  3359. BIF_RET(am_true);
  3360. }
  3361. }
  3362. else if (ERTS_IS_ATOM_STR("not_running_optimization", BIF_ARG_1)) {
  3363. #ifdef ERTS_SMP
  3364. int old_use_opt, use_opt;
  3365. switch (BIF_ARG_2) {
  3366. case am_true:
  3367. use_opt = 1;
  3368. break;
  3369. case am_false:
  3370. use_opt = 0;
  3371. break;
  3372. default:
  3373. BIF_ERROR(BIF_P, BADARG);
  3374. }
  3375. erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3376. erts_smp_block_system(0);
  3377. old_use_opt = !erts_disable_proc_not_running_opt;
  3378. erts_disable_proc_not_running_opt = !use_opt;
  3379. erts_smp_release_system();
  3380. erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3381. BIF_RET(old_use_opt ? am_true : am_false);
  3382. #else
  3383. BIF_ERROR(BIF_P, EXC_NOTSUP);
  3384. #endif
  3385. }
  3386. }
  3387. BIF_ERROR(BIF_P, BADARG);
  3388. }
  3389. #ifdef ERTS_ENABLE_LOCK_COUNT
  3390. static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_stats_t *stats, Eterm res) {
  3391. Uint tries = 0, colls = 0;
  3392. unsigned long timer_s = 0, timer_ns = 0, timer_n = 0;
  3393. unsigned int line = 0;
  3394. Eterm af, uil;
  3395. Eterm uit, uic;
  3396. Eterm uits, uitns, uitn;
  3397. Eterm tt, tstat, tloc, t;
  3398. /* term:
  3399. * [{{file, line}, {tries, colls, {seconds, nanoseconds, n_blocks}}}]
  3400. */
  3401. tries = (Uint) ethr_atomic_read(&stats->tries);
  3402. colls = (Uint) ethr_atomic_read(&stats->colls);
  3403. line = stats->line;
  3404. timer_s = stats->timer.s;
  3405. timer_ns = stats->timer.ns;
  3406. timer_n = stats->timer_n;
  3407. af = am_atom_put(stats->file, strlen(stats->file));
  3408. uil = erts_bld_uint( hpp, szp, line);
  3409. tloc = erts_bld_tuple(hpp, szp, 2, af, uil);
  3410. uit = erts_bld_uint( hpp, szp, tries);
  3411. uic = erts_bld_uint( hpp, szp, colls);
  3412. uits = erts_bld_uint( hpp, szp, timer_s);
  3413. uitns = erts_bld_uint( hpp, szp, timer_ns);
  3414. uitn = erts_bld_uint( hpp, szp, timer_n);
  3415. tt = erts_bld_tuple(hpp, szp, 3, uits, uitns, uitn);
  3416. tstat = erts_bld_tuple(hpp, szp, 3, uit, uic, tt);
  3417. t = erts_bld_tuple(hpp, szp, 2, tloc, tstat);
  3418. res = erts_bld_cons( hpp, szp, t, res);
  3419. return res;
  3420. }
  3421. static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_t *lock, Eterm res) {
  3422. Eterm name, type, id, stats = NIL, t;
  3423. Process *proc = NULL;
  3424. char *ltype;
  3425. int i;
  3426. /* term:
  3427. * [{name, id, type, stats()}]
  3428. */
  3429. ASSERT(lock->name);
  3430. ltype = erts_lcnt_lock_type(lock->flag);
  3431. ASSERT(ltype);
  3432. type = am_atom_put(ltype, strlen(ltype));
  3433. name = am_atom_put(lock->name, strlen(lock->name));
  3434. if (lock->flag & ERTS_LCNT_LT_ALLOC) {
  3435. /* use allocator types names as id's for allocator locks */
  3436. ltype = (char *) ERTS_ALC_A2AD(signed_val(lock->id));
  3437. id = am_atom_put(ltype, strlen(ltype));
  3438. } else if (lock->flag & ERTS_LCNT_LT_PROCLOCK) {
  3439. /* use registered names as id's for process locks if available */
  3440. proc = erts_pid2proc_unlocked(lock->id);
  3441. if (proc && proc->reg) {
  3442. id = proc->reg->name;
  3443. } else {
  3444. /* otherwise use process id */
  3445. id = lock->id;
  3446. }
  3447. } else {
  3448. id = lock->id;
  3449. }
  3450. for (i = 0; i < lock->n_stats; i++) {
  3451. stats = lcnt_build_lock_stats_term(hpp, szp, &(lock->stats[i]), stats);
  3452. }
  3453. t = erts_bld_tuple(hpp, szp, 4, name, id, type, stats);
  3454. res = erts_bld_cons( hpp, szp, t, res);
  3455. return res;
  3456. }
  3457. static Eterm lcnt_build_result_term(Eterm **hpp, Uint *szp, erts_lcnt_data_t *data, Eterm res) {
  3458. Eterm dts, dtns, tdt, adur, tdur, aloc, lloc = NIL, tloc;
  3459. erts_lcnt_lock_t *lock = NULL;
  3460. char *str_duration = "duration";
  3461. char *str_locks = "locks";
  3462. /* term:
  3463. * [{'duration', {seconds, nanoseconds}}, {'locks', locks()}]
  3464. */
  3465. /* duration tuple */
  3466. dts = erts_bld_uint( hpp, szp, data->duration.s);
  3467. dtns = erts_bld_uint( hpp, szp, data->duration.ns);
  3468. tdt = erts_bld_tuple(hpp, szp, 2, dts, dtns);
  3469. adur = am_atom_put(str_duration, strlen(str_duration));
  3470. tdur = erts_bld_tuple(hpp, szp, 2, adur, tdt);
  3471. /* lock tuple */
  3472. aloc = am_atom_put(str_locks, strlen(str_locks));
  3473. for (lock = data->current_locks->head; lock != NULL ; lock = lock->next ) {
  3474. lloc = lcnt_build_lock_term(hpp, szp, lock, lloc);
  3475. }
  3476. for (lock = data->deleted_locks->head; lock != NULL ; lock = lock->next ) {
  3477. lloc = lcnt_build_lock_term(hpp, szp, lock, lloc);
  3478. }
  3479. tloc = erts_bld_tuple(hpp, szp, 2, aloc, lloc);
  3480. res = erts_bld_cons( hpp, szp, tloc, res);
  3481. res = erts_bld_cons( hpp, szp, tdur, res);
  3482. return res;
  3483. }
  3484. #endif
  3485. BIF_RETTYPE erts_debug_lock_counters_1(BIF_ALIST_1)
  3486. {
  3487. #ifdef ERTS_ENABLE_LOCK_COUNT
  3488. Eterm res = NIL;
  3489. #endif
  3490. if (BIF_ARG_1 == am_enabled) {
  3491. #ifdef ERTS_ENABLE_LOCK_COUNT
  3492. BIF_RET(am_true);
  3493. #else
  3494. BIF_RET(am_false);
  3495. #endif
  3496. }
  3497. #ifdef ERTS_ENABLE_LOCK_COUNT
  3498. else if (BIF_ARG_1 == am_info) {
  3499. erts_lcnt_data_t *data;
  3500. Uint hsize = 0;
  3501. Uint *szp;
  3502. Eterm* hp;
  3503. erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3504. erts_smp_block_system(0);
  3505. erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_SUSPEND);
  3506. data = erts_lcnt_get_data();
  3507. /* calculate size */
  3508. szp = &hsize;
  3509. lcnt_build_result_term(NULL, szp, data, NIL);
  3510. /* alloc and build */
  3511. hp = HAlloc(BIF_P, hsize);
  3512. res = lcnt_build_result_term(&hp, NULL, data, res);
  3513. erts_lcnt_clear_rt_opt(ERTS_LCNT_OPT_SUSPEND);
  3514. erts_smp_release_system();
  3515. erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3516. BIF_RET(res);
  3517. } else if (BIF_ARG_1 == am_clear) {
  3518. erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3519. erts_smp_block_system(0);
  3520. erts_lcnt_clear_counters();
  3521. erts_smp_release_system();
  3522. erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3523. BIF_RET(am_ok);
  3524. } else if (is_tuple(BIF_ARG_1)) {
  3525. Eterm* tp = tuple_val(BIF_ARG_1);
  3526. switch (arityval(tp[0])) {
  3527. case 2:
  3528. if (ERTS_IS_ATOM_STR("copy_save", tp[1])) {
  3529. erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3530. erts_smp_block_system(0);
  3531. if (tp[2] == am_true) {
  3532. res = erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_COPYSAVE) ? am_true : am_false;
  3533. } else if (tp[2] == am_false) {
  3534. res = erts_lcnt_clear_rt_opt(ERTS_LCNT_OPT_COPYSAVE) ? am_true : am_false;
  3535. } else {
  3536. erts_smp_release_system();
  3537. erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3538. BIF_ERROR(BIF_P, BADARG);
  3539. }
  3540. erts_smp_release_system();
  3541. erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3542. BIF_RET(res);
  3543. } else if (ERTS_IS_ATOM_STR("process_locks", tp[1])) {
  3544. erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3545. erts_smp_block_system(0);
  3546. if (tp[2] == am_true) {
  3547. res = erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_PROCLOCK) ? am_true : am_false;
  3548. } else if (tp[2] == am_false) {
  3549. res = erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_PROCLOCK) ? am_true : am_false;
  3550. } else {
  3551. erts_smp_release_system();
  3552. erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3553. BIF_ERROR(BIF_P, BADARG);
  3554. }
  3555. erts_smp_release_system();
  3556. erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3557. BIF_RET(res);
  3558. }
  3559. break;
  3560. default:
  3561. break;
  3562. }
  3563. }
  3564. #endif
  3565. BIF_ERROR(BIF_P, BADARG);
  3566. }
  3567. void
  3568. erts_bif_info_init(void)
  3569. {
  3570. erts_smp_atomic_init(&available_internal_state, 0);
  3571. erts_smp_atomic_init(&hipe_test_reschedule_flag, 0);
  3572. process_info_init();
  3573. }