PageRenderTime 59ms CodeModel.GetById 24ms RepoModel.GetById 1ms app.codeStats 0ms

/erts/emulator/beam/erl_bif_info.c

https://github.com/Bwooce/otp
C | 3952 lines | 3363 code | 379 blank | 210 comment | 924 complexity | 65089e3b1aa62b7d4b0936b17d0b4c49 MD5 | raw file
Possible License(s): LGPL-2.1, MPL-2.0-no-copyleft-exception, BSD-2-Clause

Large files files are truncated, but you can click here to view the full file

  1. /*
  2. * %CopyrightBegin%
  3. *
  4. * Copyright Ericsson AB 1999-2010. All Rights Reserved.
  5. *
  6. * The contents of this file are subject to the Erlang Public License,
  7. * Version 1.1, (the "License"); you may not use this file except in
  8. * compliance with the License. You should have received a copy of the
  9. * Erlang Public License along with this software. If not, it can be
  10. * retrieved online at http://www.erlang.org/.
  11. *
  12. * Software distributed under the License is distributed on an "AS IS"
  13. * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
  14. * the License for the specific language governing rights and limitations
  15. * under the License.
  16. *
  17. * %CopyrightEnd%
  18. */
  19. #ifdef HAVE_CONFIG_H
  20. # include "config.h"
  21. #endif
  22. #include "sys.h"
  23. #include "erl_vm.h"
  24. #include "global.h"
  25. #include "erl_process.h"
  26. #include "erl_nmgc.h"
  27. #include "error.h"
  28. #include "erl_driver.h"
  29. #include "bif.h"
  30. #include "big.h"
  31. #include "erl_version.h"
  32. #include "erl_db_util.h"
  33. #include "erl_message.h"
  34. #include "erl_binary.h"
  35. #include "erl_db.h"
  36. #include "erl_instrument.h"
  37. #include "dist.h"
  38. #include "erl_gc.h"
  39. #include "erl_cpu_topology.h"
  40. #ifdef HIPE
  41. #include "hipe_arch.h"
  42. #endif
  43. #ifdef ERTS_ENABLE_LOCK_COUNT
  44. #include "erl_lock_count.h"
  45. #endif
  46. #ifdef VALGRIND
  47. #include <valgrind/valgrind.h>
  48. #include <valgrind/memcheck.h>
  49. #endif
  50. #define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
  51. /* Keep erts_system_version as a global variable for easy access from a core */
  52. static char erts_system_version[] = ("Erlang " ERLANG_OTP_RELEASE
  53. " (erts-" ERLANG_VERSION ")"
  54. #if !HEAP_ON_C_STACK && !HALFWORD_HEAP
  55. " [no-c-stack-objects]"
  56. #endif
  57. #ifndef OTP_RELEASE
  58. " [source]"
  59. #endif
  60. #ifdef ARCH_64
  61. #if HALFWORD_HEAP
  62. " [64-bit halfword]"
  63. #else
  64. " [64-bit]"
  65. #endif
  66. #endif
  67. #ifdef ERTS_SMP
  68. " [smp:%bpu:%bpu]"
  69. #endif
  70. " [rq:%bpu]"
  71. #ifdef USE_THREADS
  72. " [async-threads:%d]"
  73. #endif
  74. #ifdef HIPE
  75. " [hipe]"
  76. #endif
  77. #ifdef ERTS_ENABLE_KERNEL_POLL
  78. " [kernel-poll:%s]"
  79. #endif
  80. #ifdef HYBRID
  81. " [hybrid heap]"
  82. #endif
  83. #ifdef INCREMENTAL
  84. " [incremental GC]"
  85. #endif
  86. #ifdef ET_DEBUG
  87. #if ET_DEBUG
  88. " [type-assertions]"
  89. #endif
  90. #endif
  91. #ifdef DEBUG
  92. " [debug-compiled]"
  93. #endif
  94. #ifdef ERTS_ENABLE_LOCK_CHECK
  95. " [lock-checking]"
  96. #endif
  97. #ifdef ERTS_ENABLE_LOCK_COUNT
  98. " [lock-counting]"
  99. #endif
  100. #ifdef PURIFY
  101. " [purify-compiled]"
  102. #endif
  103. #ifdef VALGRIND
  104. " [valgrind-compiled]"
  105. #endif
  106. "\n");
  107. #define ASIZE(a) (sizeof(a)/sizeof(a[0]))
  108. #if defined(HAVE_SOLARIS_SPARC_PERFMON)
  109. # include <sys/ioccom.h>
  110. # define PERFMON_SETPCR _IOW('P', 1, unsigned long long)
  111. # define PERFMON_GETPCR _IOR('P', 2, unsigned long long)
  112. #endif
  113. static Eterm
  114. bld_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
  115. {
  116. struct erl_off_heap_header* ohh;
  117. Eterm res = NIL;
  118. Eterm tuple;
  119. for (ohh = oh->first; ohh; ohh = ohh->next) {
  120. if (ohh->thing_word == HEADER_PROC_BIN) {
  121. ProcBin* pb = (ProcBin*) ohh;
  122. Eterm val = erts_bld_uword(hpp, szp, (UWord) pb->val);
  123. Eterm orig_size = erts_bld_uint(hpp, szp, pb->val->orig_size);
  124. if (szp)
  125. *szp += 4+2;
  126. if (hpp) {
  127. Uint refc = (Uint) erts_smp_atomic_read(&pb->val->refc);
  128. tuple = TUPLE3(*hpp, val, orig_size, make_small(refc));
  129. res = CONS(*hpp + 4, tuple, res);
  130. *hpp += 4+2;
  131. }
  132. }
  133. }
  134. return res;
  135. }
  136. /*
  137. make_monitor_list:
  138. returns a list of records..
  139. -record(erl_monitor, {
  140. type, % MON_ORIGIN or MON_TARGET (1 or 3)
  141. ref,
  142. pid, % Process or nodename
  143. name % registered name or []
  144. }).
  145. */
  146. static void do_calc_mon_size(ErtsMonitor *mon, void *vpsz)
  147. {
  148. Uint *psz = vpsz;
  149. *psz += IS_CONST(mon->ref) ? 0 : NC_HEAP_SIZE(mon->ref);
  150. *psz += IS_CONST(mon->pid) ? 0 : NC_HEAP_SIZE(mon->pid);
  151. *psz += 8; /* CONS + 5-tuple */
  152. }
  153. typedef struct {
  154. Process *p;
  155. Eterm *hp;
  156. Eterm res;
  157. Eterm tag;
  158. } MonListContext;
  159. static void do_make_one_mon_element(ErtsMonitor *mon, void * vpmlc)
  160. {
  161. MonListContext *pmlc = vpmlc;
  162. Eterm tup;
  163. Eterm r = (IS_CONST(mon->ref)
  164. ? mon->ref
  165. : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->ref));
  166. Eterm p = (IS_CONST(mon->pid)
  167. ? mon->pid
  168. : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->pid));
  169. tup = TUPLE5(pmlc->hp, pmlc->tag, make_small(mon->type), r, p, mon->name);
  170. pmlc->hp += 6;
  171. pmlc->res = CONS(pmlc->hp, tup, pmlc->res);
  172. pmlc->hp += 2;
  173. }
  174. static Eterm
  175. make_monitor_list(Process *p, ErtsMonitor *root)
  176. {
  177. DECL_AM(erl_monitor);
  178. Uint sz = 0;
  179. MonListContext mlc;
  180. erts_doforall_monitors(root, &do_calc_mon_size, &sz);
  181. if (sz == 0) {
  182. return NIL;
  183. }
  184. mlc.p = p;
  185. mlc.hp = HAlloc(p,sz);
  186. mlc.res = NIL;
  187. mlc.tag = AM_erl_monitor;
  188. erts_doforall_monitors(root, &do_make_one_mon_element, &mlc);
  189. return mlc.res;
  190. }
  191. /*
  192. make_link_list:
  193. returns a list of records..
  194. -record(erl_link, {
  195. type, % LINK_NODE or LINK_PID (1 or 3)
  196. pid, % Process or nodename
  197. targets % List of erl_link's or nil
  198. }).
  199. */
  200. static void do_calc_lnk_size(ErtsLink *lnk, void *vpsz)
  201. {
  202. Uint *psz = vpsz;
  203. *psz += IS_CONST(lnk->pid) ? 0 : NC_HEAP_SIZE(lnk->pid);
  204. if (lnk->type != LINK_NODE && ERTS_LINK_ROOT(lnk) != NULL) {
  205. /* Node links use this pointer as ref counter... */
  206. erts_doforall_links(ERTS_LINK_ROOT(lnk),&do_calc_lnk_size,vpsz);
  207. }
  208. *psz += 7; /* CONS + 4-tuple */
  209. }
  210. typedef struct {
  211. Process *p;
  212. Eterm *hp;
  213. Eterm res;
  214. Eterm tag;
  215. } LnkListContext;
  216. static void do_make_one_lnk_element(ErtsLink *lnk, void * vpllc)
  217. {
  218. LnkListContext *pllc = vpllc;
  219. Eterm tup;
  220. Eterm old_res, targets = NIL;
  221. Eterm p = (IS_CONST(lnk->pid)
  222. ? lnk->pid
  223. : STORE_NC(&(pllc->hp), &MSO(pllc->p), lnk->pid));
  224. if (lnk->type == LINK_NODE) {
  225. targets = make_small(ERTS_LINK_REFC(lnk));
  226. } else if (ERTS_LINK_ROOT(lnk) != NULL) {
  227. old_res = pllc->res;
  228. pllc->res = NIL;
  229. erts_doforall_links(ERTS_LINK_ROOT(lnk),&do_make_one_lnk_element, vpllc);
  230. targets = pllc->res;
  231. pllc->res = old_res;
  232. }
  233. tup = TUPLE4(pllc->hp, pllc->tag, make_small(lnk->type), p, targets);
  234. pllc->hp += 5;
  235. pllc->res = CONS(pllc->hp, tup, pllc->res);
  236. pllc->hp += 2;
  237. }
  238. static Eterm
  239. make_link_list(Process *p, ErtsLink *root, Eterm tail)
  240. {
  241. DECL_AM(erl_link);
  242. Uint sz = 0;
  243. LnkListContext llc;
  244. erts_doforall_links(root, &do_calc_lnk_size, &sz);
  245. if (sz == 0) {
  246. return tail;
  247. }
  248. llc.p = p;
  249. llc.hp = HAlloc(p,sz);
  250. llc.res = tail;
  251. llc.tag = AM_erl_link;
  252. erts_doforall_links(root, &do_make_one_lnk_element, &llc);
  253. return llc.res;
  254. }
  255. int
  256. erts_print_system_version(int to, void *arg, Process *c_p)
  257. {
  258. #ifdef ERTS_SMP
  259. Uint total, online, active;
  260. (void) erts_schedulers_state(&total, &online, &active, 0);
  261. #endif
  262. return erts_print(to, arg, erts_system_version
  263. #ifdef ERTS_SMP
  264. , total, online, erts_no_run_queues
  265. #else
  266. , 1
  267. #endif
  268. #ifdef USE_THREADS
  269. , erts_async_max_threads
  270. #endif
  271. #ifdef ERTS_ENABLE_KERNEL_POLL
  272. , erts_use_kernel_poll ? "true" : "false"
  273. #endif
  274. );
  275. }
  276. typedef struct {
  277. Eterm entity;
  278. Eterm node;
  279. } MonitorInfo;
  280. typedef struct {
  281. MonitorInfo *mi;
  282. Uint mi_i;
  283. Uint mi_max;
  284. int sz;
  285. } MonitorInfoCollection;
  286. #define INIT_MONITOR_INFOS(MIC) do { \
  287. (MIC).mi = NULL; \
  288. (MIC).mi_i = (MIC).mi_max = 0; \
  289. (MIC).sz = 0; \
  290. } while(0)
  291. #define MI_INC 50
  292. #define EXTEND_MONITOR_INFOS(MICP) \
  293. do { \
  294. if ((MICP)->mi_i >= (MICP)->mi_max) { \
  295. (MICP)->mi = ((MICP)->mi ? erts_realloc(ERTS_ALC_T_TMP, \
  296. (MICP)->mi, \
  297. ((MICP)->mi_max+MI_INC) \
  298. * sizeof(MonitorInfo)) \
  299. : erts_alloc(ERTS_ALC_T_TMP, \
  300. MI_INC*sizeof(MonitorInfo))); \
  301. (MICP)->mi_max += MI_INC; \
  302. } \
  303. } while (0)
  304. #define DESTROY_MONITOR_INFOS(MIC) \
  305. do { \
  306. if ((MIC).mi != NULL) { \
  307. erts_free(ERTS_ALC_T_TMP, (void *) (MIC).mi); \
  308. } \
  309. } while (0)
  310. static void collect_one_link(ErtsLink *lnk, void *vmicp)
  311. {
  312. MonitorInfoCollection *micp = vmicp;
  313. EXTEND_MONITOR_INFOS(micp);
  314. if (!(lnk->type == LINK_PID)) {
  315. return;
  316. }
  317. micp->mi[micp->mi_i].entity = lnk->pid;
  318. micp->sz += 2 + NC_HEAP_SIZE(lnk->pid);
  319. micp->mi_i++;
  320. }
  321. static void collect_one_origin_monitor(ErtsMonitor *mon, void *vmicp)
  322. {
  323. MonitorInfoCollection *micp = vmicp;
  324. if (mon->type != MON_ORIGIN) {
  325. return;
  326. }
  327. EXTEND_MONITOR_INFOS(micp);
  328. if (is_atom(mon->pid)) { /* external by name */
  329. micp->mi[micp->mi_i].entity = mon->name;
  330. micp->mi[micp->mi_i].node = mon->pid;
  331. micp->sz += 3; /* need one 2-tuple */
  332. } else if (is_external_pid(mon->pid)) { /* external by pid */
  333. micp->mi[micp->mi_i].entity = mon->pid;
  334. micp->mi[micp->mi_i].node = NIL;
  335. micp->sz += NC_HEAP_SIZE(mon->pid);
  336. } else if (!is_nil(mon->name)) { /* internal by name */
  337. micp->mi[micp->mi_i].entity = mon->name;
  338. micp->mi[micp->mi_i].node = erts_this_dist_entry->sysname;
  339. micp->sz += 3; /* need one 2-tuple */
  340. } else { /* internal by pid */
  341. micp->mi[micp->mi_i].entity = mon->pid;
  342. micp->mi[micp->mi_i].node = NIL;
  343. /* no additional heap space needed */
  344. }
  345. micp->mi_i++;
  346. micp->sz += 2 + 3; /* For a cons cell and a 2-tuple */
  347. }
  348. static void collect_one_target_monitor(ErtsMonitor *mon, void *vmicp)
  349. {
  350. MonitorInfoCollection *micp = vmicp;
  351. if (mon->type != MON_TARGET) {
  352. return;
  353. }
  354. EXTEND_MONITOR_INFOS(micp);
  355. micp->mi[micp->mi_i].node = NIL;
  356. micp->mi[micp->mi_i].entity = mon->pid;
  357. micp->sz += (NC_HEAP_SIZE(mon->pid) + 2 /* cons */);
  358. micp->mi_i++;
  359. }
  360. typedef struct {
  361. Process *c_p;
  362. ErtsProcLocks c_p_locks;
  363. ErtsSuspendMonitor **smi;
  364. Uint smi_i;
  365. Uint smi_max;
  366. int sz;
  367. } ErtsSuspendMonitorInfoCollection;
  368. #define ERTS_INIT_SUSPEND_MONITOR_INFOS(SMIC, CP, CPL) do { \
  369. (SMIC).c_p = (CP); \
  370. (SMIC).c_p_locks = (CPL); \
  371. (SMIC).smi = NULL; \
  372. (SMIC).smi_i = (SMIC).smi_max = 0; \
  373. (SMIC).sz = 0; \
  374. } while(0)
  375. #define ERTS_SMI_INC 50
  376. #define ERTS_EXTEND_SUSPEND_MONITOR_INFOS(SMICP) \
  377. do { \
  378. if ((SMICP)->smi_i >= (SMICP)->smi_max) { \
  379. (SMICP)->smi = ((SMICP)->smi \
  380. ? erts_realloc(ERTS_ALC_T_TMP, \
  381. (SMICP)->smi, \
  382. ((SMICP)->smi_max \
  383. + ERTS_SMI_INC) \
  384. * sizeof(ErtsSuspendMonitor *)) \
  385. : erts_alloc(ERTS_ALC_T_TMP, \
  386. ERTS_SMI_INC \
  387. * sizeof(ErtsSuspendMonitor *))); \
  388. (SMICP)->smi_max += ERTS_SMI_INC; \
  389. } \
  390. } while (0)
  391. #define ERTS_DESTROY_SUSPEND_MONITOR_INFOS(SMIC) \
  392. do { \
  393. if ((SMIC).smi != NULL) { \
  394. erts_free(ERTS_ALC_T_TMP, (void *) (SMIC).smi); \
  395. } \
  396. } while (0)
  397. static void
  398. collect_one_suspend_monitor(ErtsSuspendMonitor *smon, void *vsmicp)
  399. {
  400. ErtsSuspendMonitorInfoCollection *smicp = vsmicp;
  401. Process *suspendee = erts_pid2proc(smicp->c_p,
  402. smicp->c_p_locks,
  403. smon->pid,
  404. 0);
  405. if (suspendee) { /* suspendee is alive */
  406. Sint a, p;
  407. if (smon->active) {
  408. smon->active += smon->pending;
  409. smon->pending = 0;
  410. }
  411. ASSERT((smon->active && !smon->pending)
  412. || (smon->pending && !smon->active));
  413. ERTS_EXTEND_SUSPEND_MONITOR_INFOS(smicp);
  414. smicp->smi[smicp->smi_i] = smon;
  415. smicp->sz += 2 /* cons */ + 4 /* 3-tuple */;
  416. a = (Sint) smon->active; /* quiet compiler warnings */
  417. p = (Sint) smon->pending; /* on 64-bit machines */
  418. if (!IS_SSMALL(a))
  419. smicp->sz += BIG_UINT_HEAP_SIZE;
  420. if (!IS_SSMALL(p))
  421. smicp->sz += BIG_UINT_HEAP_SIZE;
  422. smicp->smi_i++;
  423. }
  424. }
  425. static void one_link_size(ErtsLink *lnk, void *vpu)
  426. {
  427. Uint *pu = vpu;
  428. *pu += ERTS_LINK_SIZE*sizeof(Uint);
  429. if(!IS_CONST(lnk->pid))
  430. *pu += NC_HEAP_SIZE(lnk->pid)*sizeof(Uint);
  431. if (lnk->type != LINK_NODE && ERTS_LINK_ROOT(lnk) != NULL) {
  432. erts_doforall_links(ERTS_LINK_ROOT(lnk),&one_link_size,vpu);
  433. }
  434. }
  435. static void one_mon_size(ErtsMonitor *mon, void *vpu)
  436. {
  437. Uint *pu = vpu;
  438. *pu += ERTS_MONITOR_SIZE*sizeof(Uint);
  439. if(!IS_CONST(mon->pid))
  440. *pu += NC_HEAP_SIZE(mon->pid)*sizeof(Uint);
  441. if(!IS_CONST(mon->ref))
  442. *pu += NC_HEAP_SIZE(mon->ref)*sizeof(Uint);
  443. }
  444. /*
  445. * process_info/[1,2]
  446. */
  447. #define ERTS_PI_FAIL_TYPE_BADARG 0
  448. #define ERTS_PI_FAIL_TYPE_YIELD 1
  449. #define ERTS_PI_FAIL_TYPE_AWAIT_EXIT 2
  450. static ERTS_INLINE ErtsProcLocks
  451. pi_locks(Eterm info)
  452. {
  453. switch (info) {
  454. case am_status:
  455. case am_priority:
  456. return ERTS_PROC_LOCK_STATUS;
  457. case am_links:
  458. case am_monitors:
  459. case am_monitored_by:
  460. case am_suspending:
  461. return ERTS_PROC_LOCK_LINK;
  462. case am_messages:
  463. case am_message_queue_len:
  464. case am_total_heap_size:
  465. return ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_MSGQ;
  466. case am_memory:
  467. return ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_MSGQ;
  468. default:
  469. return ERTS_PROC_LOCK_MAIN;
  470. }
  471. }
  472. /*
  473. * All valid process_info arguments.
  474. */
  475. static Eterm pi_args[] = {
  476. am_registered_name,
  477. am_current_function,
  478. am_initial_call,
  479. am_status,
  480. am_messages,
  481. am_message_queue_len,
  482. am_links,
  483. am_monitors,
  484. am_monitored_by,
  485. am_dictionary,
  486. am_trap_exit,
  487. am_error_handler,
  488. am_heap_size,
  489. am_stack_size,
  490. am_memory,
  491. am_garbage_collection,
  492. am_group_leader,
  493. am_reductions,
  494. am_priority,
  495. am_trace,
  496. am_binary,
  497. am_sequential_trace_token,
  498. am_catchlevel,
  499. am_backtrace,
  500. am_last_calls,
  501. am_total_heap_size,
  502. am_suspending,
  503. am_min_heap_size,
  504. am_min_bin_vheap_size,
  505. #ifdef HYBRID
  506. am_message_binary
  507. #endif
  508. };
  509. #define ERTS_PI_ARGS ((int) (sizeof(pi_args)/sizeof(Eterm)))
  510. static ERTS_INLINE Eterm
  511. pi_ix2arg(int ix)
  512. {
  513. if (ix < 0 || ERTS_PI_ARGS <= ix)
  514. return am_undefined;
  515. return pi_args[ix];
  516. }
  517. static ERTS_INLINE int
  518. pi_arg2ix(Eterm arg)
  519. {
  520. switch (arg) {
  521. case am_registered_name: return 0;
  522. case am_current_function: return 1;
  523. case am_initial_call: return 2;
  524. case am_status: return 3;
  525. case am_messages: return 4;
  526. case am_message_queue_len: return 5;
  527. case am_links: return 6;
  528. case am_monitors: return 7;
  529. case am_monitored_by: return 8;
  530. case am_dictionary: return 9;
  531. case am_trap_exit: return 10;
  532. case am_error_handler: return 11;
  533. case am_heap_size: return 12;
  534. case am_stack_size: return 13;
  535. case am_memory: return 14;
  536. case am_garbage_collection: return 15;
  537. case am_group_leader: return 16;
  538. case am_reductions: return 17;
  539. case am_priority: return 18;
  540. case am_trace: return 19;
  541. case am_binary: return 20;
  542. case am_sequential_trace_token: return 21;
  543. case am_catchlevel: return 22;
  544. case am_backtrace: return 23;
  545. case am_last_calls: return 24;
  546. case am_total_heap_size: return 25;
  547. case am_suspending: return 26;
  548. case am_min_heap_size: return 27;
  549. case am_min_bin_vheap_size: return 28;
  550. #ifdef HYBRID
  551. case am_message_binary: return 29;
  552. #endif
  553. default: return -1;
  554. }
  555. }
  556. static Eterm pi_1_keys[] = {
  557. am_registered_name,
  558. am_current_function,
  559. am_initial_call,
  560. am_status,
  561. am_message_queue_len,
  562. am_messages,
  563. am_links,
  564. am_dictionary,
  565. am_trap_exit,
  566. am_error_handler,
  567. am_priority,
  568. am_group_leader,
  569. am_total_heap_size,
  570. am_heap_size,
  571. am_stack_size,
  572. am_reductions,
  573. am_garbage_collection,
  574. am_suspending
  575. };
  576. #define ERTS_PI_1_NO_OF_KEYS (sizeof(pi_1_keys)/sizeof(Eterm))
  577. static Eterm pi_1_keys_list;
  578. #if HEAP_ON_C_STACK
  579. static Eterm pi_1_keys_list_heap[2*ERTS_PI_1_NO_OF_KEYS];
  580. #endif
  581. static void
  582. process_info_init(void)
  583. {
  584. #if HEAP_ON_C_STACK
  585. Eterm *hp = &pi_1_keys_list_heap[0];
  586. #else
  587. Eterm *hp = erts_alloc(ERTS_ALC_T_LL_TEMP_TERM,sizeof(Eterm)*2*ERTS_PI_1_NO_OF_KEYS);
  588. #endif
  589. int i;
  590. pi_1_keys_list = NIL;
  591. for (i = ERTS_PI_1_NO_OF_KEYS-1; i >= 0; i--) {
  592. pi_1_keys_list = CONS(hp, pi_1_keys[i], pi_1_keys_list);
  593. hp += 2;
  594. }
  595. #ifdef DEBUG
  596. { /* Make sure the process_info argument mappings are consistent */
  597. int ix;
  598. for (ix = 0; ix < ERTS_PI_ARGS; ix++) {
  599. ASSERT(pi_arg2ix(pi_ix2arg(ix)) == ix);
  600. }
  601. }
  602. #endif
  603. }
  604. static ERTS_INLINE Process *
  605. pi_pid2proc(Process *c_p, Eterm pid, ErtsProcLocks info_locks)
  606. {
  607. #ifdef ERTS_SMP
  608. /*
  609. * If the main lock is needed, we use erts_pid2proc_not_running()
  610. * instead of erts_pid2proc() for two reasons:
  611. * * Current function of pid and possibly other information will
  612. * have been updated so that process_info() is consistent with an
  613. * info-request/info-response signal model.
  614. * * We avoid blocking the whole scheduler executing the
  615. * process that is calling process_info() for a long time
  616. * which will happen if pid is currently running.
  617. * The caller of process_info() may have to yield if pid
  618. * is currently running.
  619. */
  620. if (info_locks & ERTS_PROC_LOCK_MAIN)
  621. return erts_pid2proc_not_running(c_p, ERTS_PROC_LOCK_MAIN,
  622. pid, info_locks);
  623. else
  624. #endif
  625. return erts_pid2proc(c_p, ERTS_PROC_LOCK_MAIN,
  626. pid, info_locks);
  627. }
  628. BIF_RETTYPE
  629. process_info_aux(Process *BIF_P,
  630. Process *rp,
  631. Eterm rpid,
  632. Eterm item,
  633. int always_wrap);
  634. #define ERTS_PI_RES_ELEM_IX_BUF_INC 1024
  635. #define ERTS_PI_DEF_RES_ELEM_IX_BUF_SZ ERTS_PI_ARGS
  636. static Eterm
  637. process_info_list(Process *c_p, Eterm pid, Eterm list, int always_wrap,
  638. int *fail_type)
  639. {
  640. int want_messages = 0;
  641. int def_res_elem_ix_buf[ERTS_PI_DEF_RES_ELEM_IX_BUF_SZ];
  642. int *res_elem_ix = &def_res_elem_ix_buf[0];
  643. int res_elem_ix_ix = -1;
  644. int res_elem_ix_sz = ERTS_PI_DEF_RES_ELEM_IX_BUF_SZ;
  645. Eterm part_res[ERTS_PI_ARGS];
  646. Eterm res, arg;
  647. Uint *hp, *hp_end;
  648. ErtsProcLocks locks = (ErtsProcLocks) 0;
  649. int res_len, ix;
  650. Process *rp = NULL;
  651. *fail_type = ERTS_PI_FAIL_TYPE_BADARG;
  652. for (ix = 0; ix < ERTS_PI_ARGS; ix++)
  653. part_res[ix] = THE_NON_VALUE;
  654. ASSERT(is_list(list));
  655. while (is_list(list)) {
  656. Eterm* consp = list_val(list);
  657. arg = CAR(consp);
  658. ix = pi_arg2ix(arg);
  659. if (ix < 0) {
  660. res = THE_NON_VALUE;
  661. goto done;
  662. }
  663. if (arg == am_messages)
  664. want_messages = 1;
  665. locks |= pi_locks(arg);
  666. res_elem_ix_ix++;
  667. if (res_elem_ix_ix >= res_elem_ix_sz) {
  668. if (res_elem_ix != &def_res_elem_ix_buf[0])
  669. res_elem_ix =
  670. erts_realloc(ERTS_ALC_T_TMP,
  671. res_elem_ix,
  672. sizeof(int)*(res_elem_ix_sz
  673. += ERTS_PI_RES_ELEM_IX_BUF_INC));
  674. else {
  675. int new_res_elem_ix_sz = ERTS_PI_RES_ELEM_IX_BUF_INC;
  676. int *new_res_elem_ix = erts_alloc(ERTS_ALC_T_TMP,
  677. sizeof(int)*new_res_elem_ix_sz);
  678. sys_memcpy((void *) new_res_elem_ix,
  679. (void *) res_elem_ix,
  680. sizeof(int)*res_elem_ix_sz);
  681. res_elem_ix = new_res_elem_ix;
  682. res_elem_ix_sz = new_res_elem_ix_sz;
  683. }
  684. }
  685. res_elem_ix[res_elem_ix_ix] = ix;
  686. list = CDR(consp);
  687. }
  688. if (is_not_nil(list)) {
  689. res = THE_NON_VALUE;
  690. goto done;
  691. }
  692. res_len = res_elem_ix_ix+1;
  693. ASSERT(res_len > 0);
  694. rp = pi_pid2proc(c_p, pid, locks|ERTS_PROC_LOCK_STATUS);
  695. if (!rp) {
  696. res = am_undefined;
  697. goto done;
  698. }
  699. else if (rp == ERTS_PROC_LOCK_BUSY) {
  700. rp = NULL;
  701. res = THE_NON_VALUE;
  702. *fail_type = ERTS_PI_FAIL_TYPE_YIELD;
  703. goto done;
  704. }
  705. else if (c_p != rp && ERTS_PROC_PENDING_EXIT(rp)) {
  706. locks |= ERTS_PROC_LOCK_STATUS;
  707. res = THE_NON_VALUE;
  708. *fail_type = ERTS_PI_FAIL_TYPE_AWAIT_EXIT;
  709. goto done;
  710. }
  711. else if (!(locks & ERTS_PROC_LOCK_STATUS)) {
  712. erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
  713. }
  714. /*
  715. * We always handle 'messages' first if it should be part
  716. * of the result. This since if both 'messages' and
  717. * 'message_queue_len' are wanted, 'messages' may
  718. * change the result of 'message_queue_len' (in case
  719. * the queue contain bad distribution messages).
  720. */
  721. if (want_messages) {
  722. ix = pi_arg2ix(am_messages);
  723. ASSERT(part_res[ix] == THE_NON_VALUE);
  724. part_res[ix] = process_info_aux(c_p, rp, pid, am_messages, always_wrap);
  725. ASSERT(part_res[ix] != THE_NON_VALUE);
  726. }
  727. for (; res_elem_ix_ix >= 0; res_elem_ix_ix--) {
  728. ix = res_elem_ix[res_elem_ix_ix];
  729. if (part_res[ix] == THE_NON_VALUE) {
  730. arg = pi_ix2arg(ix);
  731. part_res[ix] = process_info_aux(c_p, rp, pid, arg, always_wrap);
  732. ASSERT(part_res[ix] != THE_NON_VALUE);
  733. }
  734. }
  735. hp = HAlloc(c_p, res_len*2);
  736. hp_end = hp + res_len*2;
  737. res = NIL;
  738. for (res_elem_ix_ix = res_len - 1; res_elem_ix_ix >= 0; res_elem_ix_ix--) {
  739. ix = res_elem_ix[res_elem_ix_ix];
  740. ASSERT(part_res[ix] != THE_NON_VALUE);
  741. /*
  742. * If we should ignore the value of registered_name,
  743. * its value is nil. For more info, see comment in the
  744. * beginning of process_info_aux().
  745. */
  746. if (is_nil(part_res[ix])) {
  747. ASSERT(!always_wrap);
  748. ASSERT(pi_ix2arg(ix) == am_registered_name);
  749. }
  750. else {
  751. res = CONS(hp, part_res[ix], res);
  752. hp += 2;
  753. }
  754. }
  755. if (!always_wrap) {
  756. HRelease(c_p, hp_end, hp);
  757. }
  758. done:
  759. if (c_p == rp)
  760. locks &= ~ERTS_PROC_LOCK_MAIN;
  761. if (locks && rp)
  762. erts_smp_proc_unlock(rp, locks);
  763. if (res_elem_ix != &def_res_elem_ix_buf[0])
  764. erts_free(ERTS_ALC_T_TMP, res_elem_ix);
  765. return res;
  766. }
  767. BIF_RETTYPE process_info_1(BIF_ALIST_1)
  768. {
  769. Eterm res;
  770. int fail_type;
  771. if (is_external_pid(BIF_ARG_1)
  772. && external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry)
  773. BIF_RET(am_undefined);
  774. if (is_not_internal_pid(BIF_ARG_1)
  775. || internal_pid_index(BIF_ARG_1) >= erts_max_processes) {
  776. BIF_ERROR(BIF_P, BADARG);
  777. }
  778. res = process_info_list(BIF_P, BIF_ARG_1, pi_1_keys_list, 0, &fail_type);
  779. if (is_non_value(res)) {
  780. switch (fail_type) {
  781. case ERTS_PI_FAIL_TYPE_BADARG:
  782. BIF_ERROR(BIF_P, BADARG);
  783. case ERTS_PI_FAIL_TYPE_YIELD:
  784. ERTS_BIF_YIELD1(bif_export[BIF_process_info_1], BIF_P, BIF_ARG_1);
  785. case ERTS_PI_FAIL_TYPE_AWAIT_EXIT:
  786. ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_undefined);
  787. default:
  788. erl_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error", __FILE__, __LINE__);
  789. }
  790. }
  791. ASSERT(!(BIF_P->flags & F_P2PNR_RESCHED));
  792. BIF_RET(res);
  793. }
  794. BIF_RETTYPE process_info_2(BIF_ALIST_2)
  795. {
  796. Eterm res;
  797. Process *rp;
  798. Eterm pid = BIF_ARG_1;
  799. ErtsProcLocks info_locks;
  800. int fail_type;
  801. if (is_external_pid(pid)
  802. && external_pid_dist_entry(pid) == erts_this_dist_entry)
  803. BIF_RET(am_undefined);
  804. if (is_not_internal_pid(pid)
  805. || internal_pid_index(BIF_ARG_1) >= erts_max_processes) {
  806. BIF_ERROR(BIF_P, BADARG);
  807. }
  808. if (is_nil(BIF_ARG_2))
  809. BIF_RET(NIL);
  810. if (is_list(BIF_ARG_2)) {
  811. res = process_info_list(BIF_P, BIF_ARG_1, BIF_ARG_2, 1, &fail_type);
  812. if (is_non_value(res)) {
  813. switch (fail_type) {
  814. case ERTS_PI_FAIL_TYPE_BADARG:
  815. BIF_ERROR(BIF_P, BADARG);
  816. case ERTS_PI_FAIL_TYPE_YIELD:
  817. ERTS_BIF_YIELD2(bif_export[BIF_process_info_2], BIF_P,
  818. BIF_ARG_1, BIF_ARG_2);
  819. case ERTS_PI_FAIL_TYPE_AWAIT_EXIT:
  820. ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_undefined);
  821. default:
  822. erl_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error",
  823. __FILE__, __LINE__);
  824. }
  825. }
  826. ASSERT(!(BIF_P->flags & F_P2PNR_RESCHED));
  827. BIF_RET(res);
  828. }
  829. if (pi_arg2ix(BIF_ARG_2) < 0)
  830. BIF_ERROR(BIF_P, BADARG);
  831. info_locks = pi_locks(BIF_ARG_2);
  832. rp = pi_pid2proc(BIF_P, pid, info_locks|ERTS_PROC_LOCK_STATUS);
  833. if (!rp)
  834. res = am_undefined;
  835. else if (rp == ERTS_PROC_LOCK_BUSY)
  836. ERTS_BIF_YIELD2(bif_export[BIF_process_info_2], BIF_P,
  837. BIF_ARG_1, BIF_ARG_2);
  838. else if (rp != BIF_P && ERTS_PROC_PENDING_EXIT(rp)) {
  839. erts_smp_proc_unlock(rp, info_locks|ERTS_PROC_LOCK_STATUS);
  840. ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_undefined);
  841. }
  842. else {
  843. if (!(info_locks & ERTS_PROC_LOCK_STATUS))
  844. erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
  845. res = process_info_aux(BIF_P, rp, pid, BIF_ARG_2, 0);
  846. }
  847. ASSERT(is_value(res));
  848. #ifdef ERTS_SMP
  849. if (BIF_P == rp)
  850. info_locks &= ~ERTS_PROC_LOCK_MAIN;
  851. if (rp && info_locks)
  852. erts_smp_proc_unlock(rp, info_locks);
  853. #endif
  854. ASSERT(!(BIF_P->flags & F_P2PNR_RESCHED));
  855. BIF_RET(res);
  856. }
  857. Eterm
  858. process_info_aux(Process *BIF_P,
  859. Process *rp,
  860. Eterm rpid,
  861. Eterm item,
  862. int always_wrap)
  863. {
  864. Eterm *hp;
  865. Eterm res = NIL;
  866. ASSERT(rp);
  867. /*
  868. * Q: Why this always_wrap argument?
  869. *
  870. * A: registered_name is strange. If process has no registered name,
  871. * process_info(Pid, registered_name) returns [], and
  872. * the result of process_info(Pid) has no {registered_name, Name}
  873. * tuple in the resulting list. This is inconsistent with all other
  874. * options, but we do not dare to change it.
  875. *
  876. * When process_info/2 is called with a list as second argument,
  877. * registered_name behaves as it should, i.e. a
  878. * {registered_name, []} will appear in the resulting list.
  879. *
  880. * If always_wrap != 0, process_info_aux() always wrap the result
  881. * in a key two tuple.
  882. */
  883. switch (item) {
  884. case am_registered_name:
  885. if (rp->reg != NULL) {
  886. hp = HAlloc(BIF_P, 3);
  887. res = rp->reg->name;
  888. } else {
  889. if (always_wrap) {
  890. hp = HAlloc(BIF_P, 3);
  891. res = NIL;
  892. }
  893. else {
  894. return NIL;
  895. }
  896. }
  897. break;
  898. case am_current_function:
  899. if (rp->current == NULL) {
  900. rp->current = find_function_from_pc(rp->i);
  901. }
  902. if (rp->current == NULL) {
  903. hp = HAlloc(BIF_P, 3);
  904. res = am_undefined;
  905. } else {
  906. BeamInstr* current;
  907. if (rp->current[0] == am_erlang &&
  908. rp->current[1] == am_process_info &&
  909. (rp->current[2] == 1 || rp->current[2] == 2) &&
  910. (current = find_function_from_pc(rp->cp)) != NULL) {
  911. /*
  912. * The current function is erlang:process_info/2,
  913. * which is not the answer that the application want.
  914. * We will use the function pointed into by rp->cp
  915. * instead.
  916. */
  917. rp->current = current;
  918. }
  919. hp = HAlloc(BIF_P, 3+4);
  920. res = TUPLE3(hp, rp->current[0],
  921. rp->current[1], make_small(rp->current[2]));
  922. hp += 4;
  923. }
  924. break;
  925. case am_initial_call:
  926. hp = HAlloc(BIF_P, 3+4);
  927. res = TUPLE3(hp,
  928. rp->initial[INITIAL_MOD],
  929. rp->initial[INITIAL_FUN],
  930. make_small(rp->initial[INITIAL_ARI]));
  931. hp += 4;
  932. break;
  933. case am_status:
  934. res = erts_process_status(BIF_P, ERTS_PROC_LOCK_MAIN, rp, rpid);
  935. ASSERT(res != am_undefined);
  936. hp = HAlloc(BIF_P, 3);
  937. break;
  938. case am_messages: {
  939. ErlMessage* mp;
  940. int n;
  941. ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
  942. n = rp->msg.len;
  943. if (n == 0 || rp->trace_flags & F_SENSITIVE) {
  944. hp = HAlloc(BIF_P, 3);
  945. } else {
  946. int remove_bad_messages = 0;
  947. struct {
  948. Uint copy_struct_size;
  949. ErlMessage* msgp;
  950. } *mq = erts_alloc(ERTS_ALC_T_TMP, n*sizeof(*mq));
  951. Sint i = 0;
  952. Uint heap_need = 3;
  953. Eterm *hp_end;
  954. for (mp = rp->msg.first; mp; mp = mp->next) {
  955. heap_need += 2;
  956. mq[i].msgp = mp;
  957. if (rp != BIF_P) {
  958. Eterm msg = ERL_MESSAGE_TERM(mq[i].msgp);
  959. if (is_value(msg)) {
  960. mq[i].copy_struct_size = (is_immed(msg)
  961. #ifdef HYBRID
  962. || NO_COPY(msg)
  963. #endif
  964. ? 0
  965. : size_object(msg));
  966. }
  967. else if (mq[i].msgp->data.attached) {
  968. mq[i].copy_struct_size
  969. = erts_msg_attached_data_size(mq[i].msgp);
  970. }
  971. else {
  972. /* Bad distribution message; ignore */
  973. remove_bad_messages = 1;
  974. mq[i].copy_struct_size = 0;
  975. }
  976. heap_need += mq[i].copy_struct_size;
  977. }
  978. else {
  979. mq[i].copy_struct_size = 0;
  980. if (mp->data.attached)
  981. heap_need += erts_msg_attached_data_size(mp);
  982. }
  983. i++;
  984. }
  985. hp = HAlloc(BIF_P, heap_need);
  986. hp_end = hp + heap_need;
  987. ASSERT(i == n);
  988. for (i--; i >= 0; i--) {
  989. Eterm msg = ERL_MESSAGE_TERM(mq[i].msgp);
  990. if (rp != BIF_P) {
  991. if (is_value(msg)) {
  992. if (mq[i].copy_struct_size)
  993. msg = copy_struct(msg,
  994. mq[i].copy_struct_size,
  995. &hp,
  996. &MSO(BIF_P));
  997. }
  998. else if (mq[i].msgp->data.attached) {
  999. ErlHeapFragment *hfp;
  1000. /*
  1001. * Decode it into a message buffer and attach it
  1002. * to the message instead of the attached external
  1003. * term.
  1004. *
  1005. * Note that we may not pass a process pointer
  1006. * to erts_msg_distext2heap(), since it would then
  1007. * try to alter locks on that process.
  1008. */
  1009. msg = erts_msg_distext2heap(
  1010. NULL, NULL, &hfp, &ERL_MESSAGE_TOKEN(mq[i].msgp),
  1011. mq[i].msgp->data.dist_ext);
  1012. ERL_MESSAGE_TERM(mq[i].msgp) = msg;
  1013. mq[i].msgp->data.heap_frag = hfp;
  1014. if (is_non_value(msg)) {
  1015. ASSERT(!mq[i].msgp->data.heap_frag);
  1016. /* Bad distribution message; ignore */
  1017. remove_bad_messages = 1;
  1018. continue;
  1019. }
  1020. else {
  1021. /* Make our copy of the message */
  1022. ASSERT(size_object(msg) == hfp->used_size);
  1023. msg = copy_struct(msg,
  1024. hfp->used_size,
  1025. &hp,
  1026. &MSO(BIF_P));
  1027. }
  1028. }
  1029. else {
  1030. /* Bad distribution message; ignore */
  1031. remove_bad_messages = 1;
  1032. continue;
  1033. }
  1034. }
  1035. else {
  1036. if (mq[i].msgp->data.attached) {
  1037. /* Decode it on the heap */
  1038. erts_move_msg_attached_data_to_heap(&hp,
  1039. &MSO(BIF_P),
  1040. mq[i].msgp);
  1041. msg = ERL_MESSAGE_TERM(mq[i].msgp);
  1042. ASSERT(!mq[i].msgp->data.attached);
  1043. if (is_non_value(msg)) {
  1044. /* Bad distribution message; ignore */
  1045. remove_bad_messages = 1;
  1046. continue;
  1047. }
  1048. }
  1049. }
  1050. res = CONS(hp, msg, res);
  1051. hp += 2;
  1052. }
  1053. HRelease(BIF_P, hp_end, hp+3);
  1054. erts_free(ERTS_ALC_T_TMP, mq);
  1055. if (remove_bad_messages) {
  1056. ErlMessage **mpp;
  1057. /*
  1058. * We need to remove bad distribution messages from
  1059. * the queue, so that the value returned for
  1060. * 'message_queue_len' is consistent with the value
  1061. * returned for 'messages'.
  1062. */
  1063. mpp = &rp->msg.first;
  1064. mp = rp->msg.first;
  1065. while (mp) {
  1066. if (is_value(ERL_MESSAGE_TERM(mp))) {
  1067. mpp = &mp->next;
  1068. mp = mp->next;
  1069. }
  1070. else {
  1071. ErlMessage* bad_mp = mp;
  1072. ASSERT(!mp->data.attached);
  1073. if (rp->msg.save == &mp->next)
  1074. rp->msg.save = mpp;
  1075. if (rp->msg.last == &mp->next)
  1076. rp->msg.last = mpp;
  1077. *mpp = mp->next;
  1078. mp = mp->next;
  1079. rp->msg.len--;
  1080. free_message(bad_mp);
  1081. }
  1082. }
  1083. }
  1084. }
  1085. break;
  1086. }
  1087. case am_message_queue_len:
  1088. hp = HAlloc(BIF_P, 3);
  1089. ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
  1090. res = make_small(rp->msg.len);
  1091. break;
  1092. case am_links: {
  1093. MonitorInfoCollection mic;
  1094. int i;
  1095. Eterm item;
  1096. INIT_MONITOR_INFOS(mic);
  1097. erts_doforall_links(rp->nlinks,&collect_one_link,&mic);
  1098. hp = HAlloc(BIF_P, 3 + mic.sz);
  1099. res = NIL;
  1100. for (i = 0; i < mic.mi_i; i++) {
  1101. item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
  1102. res = CONS(hp, item, res);
  1103. hp += 2;
  1104. }
  1105. DESTROY_MONITOR_INFOS(mic);
  1106. break;
  1107. }
  1108. case am_monitors: {
  1109. MonitorInfoCollection mic;
  1110. int i;
  1111. INIT_MONITOR_INFOS(mic);
  1112. erts_doforall_monitors(rp->monitors,&collect_one_origin_monitor,&mic);
  1113. hp = HAlloc(BIF_P, 3 + mic.sz);
  1114. res = NIL;
  1115. for (i = 0; i < mic.mi_i; i++) {
  1116. if (is_atom(mic.mi[i].entity)) {
  1117. /* Monitor by name.
  1118. * Build {process, {Name, Node}} and cons it.
  1119. */
  1120. Eterm t1, t2;
  1121. t1 = TUPLE2(hp, mic.mi[i].entity, mic.mi[i].node);
  1122. hp += 3;
  1123. t2 = TUPLE2(hp, am_process, t1);
  1124. hp += 3;
  1125. res = CONS(hp, t2, res);
  1126. hp += 2;
  1127. }
  1128. else {
  1129. /* Monitor by pid. Build {process, Pid} and cons it. */
  1130. Eterm t;
  1131. Eterm pid = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
  1132. t = TUPLE2(hp, am_process, pid);
  1133. hp += 3;
  1134. res = CONS(hp, t, res);
  1135. hp += 2;
  1136. }
  1137. }
  1138. DESTROY_MONITOR_INFOS(mic);
  1139. break;
  1140. }
  1141. case am_monitored_by: {
  1142. MonitorInfoCollection mic;
  1143. int i;
  1144. Eterm item;
  1145. INIT_MONITOR_INFOS(mic);
  1146. erts_doforall_monitors(rp->monitors,&collect_one_target_monitor,&mic);
  1147. hp = HAlloc(BIF_P, 3 + mic.sz);
  1148. res = NIL;
  1149. for (i = 0; i < mic.mi_i; ++i) {
  1150. item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
  1151. res = CONS(hp, item, res);
  1152. hp += 2;
  1153. }
  1154. DESTROY_MONITOR_INFOS(mic);
  1155. break;
  1156. }
  1157. case am_suspending: {
  1158. ErtsSuspendMonitorInfoCollection smic;
  1159. int i;
  1160. Eterm item;
  1161. #ifdef DEBUG
  1162. Eterm *hp_end;
  1163. #endif
  1164. ERTS_INIT_SUSPEND_MONITOR_INFOS(smic,
  1165. BIF_P,
  1166. (BIF_P == rp
  1167. ? ERTS_PROC_LOCK_MAIN
  1168. : 0) | ERTS_PROC_LOCK_LINK);
  1169. erts_doforall_suspend_monitors(rp->suspend_monitors,
  1170. &collect_one_suspend_monitor,
  1171. &smic);
  1172. hp = HAlloc(BIF_P, 3 + smic.sz);
  1173. #ifdef DEBUG
  1174. hp_end = hp + smic.sz;
  1175. #endif
  1176. res = NIL;
  1177. for (i = 0; i < smic.smi_i; i++) {
  1178. Sint a = (Sint) smic.smi[i]->active; /* quiet compiler warnings */
  1179. Sint p = (Sint) smic.smi[i]->pending; /* on 64-bit machines... */
  1180. Eterm active;
  1181. Eterm pending;
  1182. if (IS_SSMALL(a))
  1183. active = make_small(a);
  1184. else {
  1185. active = small_to_big(a, hp);
  1186. hp += BIG_UINT_HEAP_SIZE;
  1187. }
  1188. if (IS_SSMALL(p))
  1189. pending = make_small(p);
  1190. else {
  1191. pending = small_to_big(p, hp);
  1192. hp += BIG_UINT_HEAP_SIZE;
  1193. }
  1194. item = TUPLE3(hp, smic.smi[i]->pid, active, pending);
  1195. hp += 4;
  1196. res = CONS(hp, item, res);
  1197. hp += 2;
  1198. }
  1199. ERTS_DESTROY_SUSPEND_MONITOR_INFOS(smic);
  1200. ASSERT(hp == hp_end);
  1201. break;
  1202. }
  1203. case am_dictionary:
  1204. if (rp->trace_flags & F_SENSITIVE) {
  1205. res = NIL;
  1206. } else {
  1207. res = erts_dictionary_copy(BIF_P, rp->dictionary);
  1208. }
  1209. hp = HAlloc(BIF_P, 3);
  1210. break;
  1211. case am_trap_exit:
  1212. hp = HAlloc(BIF_P, 3);
  1213. if (rp->flags & F_TRAPEXIT)
  1214. res = am_true;
  1215. else
  1216. res = am_false;
  1217. break;
  1218. case am_error_handler:
  1219. hp = HAlloc(BIF_P, 3);
  1220. res = erts_proc_get_error_handler(BIF_P);
  1221. break;
  1222. case am_heap_size: {
  1223. Uint hsz = 3;
  1224. (void) erts_bld_uint(NULL, &hsz, HEAP_SIZE(rp));
  1225. hp = HAlloc(BIF_P, hsz);
  1226. res = erts_bld_uint(&hp, NULL, HEAP_SIZE(rp));
  1227. break;
  1228. }
  1229. case am_fullsweep_after: {
  1230. Uint hsz = 3;
  1231. (void) erts_bld_uint(NULL, &hsz, MAX_GEN_GCS(rp));
  1232. hp = HAlloc(BIF_P, hsz);
  1233. res = erts_bld_uint(&hp, NULL, MAX_GEN_GCS(rp));
  1234. break;
  1235. }
  1236. case am_min_heap_size: {
  1237. Uint hsz = 3;
  1238. (void) erts_bld_uint(NULL, &hsz, MIN_HEAP_SIZE(rp));
  1239. hp = HAlloc(BIF_P, hsz);
  1240. res = erts_bld_uint(&hp, NULL, MIN_HEAP_SIZE(rp));
  1241. break;
  1242. }
  1243. case am_min_bin_vheap_size: {
  1244. Uint hsz = 3;
  1245. (void) erts_bld_uint(NULL, &hsz, MIN_VHEAP_SIZE(rp));
  1246. hp = HAlloc(BIF_P, hsz);
  1247. res = erts_bld_uint(&hp, NULL, MIN_VHEAP_SIZE(rp));
  1248. break;
  1249. }
  1250. case am_total_heap_size: {
  1251. ErlMessage *mp;
  1252. Uint total_heap_size;
  1253. Uint hsz = 3;
  1254. total_heap_size = rp->heap_sz;
  1255. if (rp->old_hend && rp->old_heap)
  1256. total_heap_size += rp->old_hend - rp->old_heap;
  1257. total_heap_size += rp->mbuf_sz;
  1258. ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
  1259. for (mp = rp->msg.first; mp; mp = mp->next)
  1260. if (mp->data.attached)
  1261. total_heap_size += erts_msg_attached_data_size(mp);
  1262. (void) erts_bld_uint(NULL, &hsz, total_heap_size);
  1263. hp = HAlloc(BIF_P, hsz);
  1264. res = erts_bld_uint(&hp, NULL, total_heap_size);
  1265. break;
  1266. }
  1267. case am_stack_size: {
  1268. Uint stack_size = STACK_START(rp) - rp->stop;
  1269. Uint hsz = 3;
  1270. (void) erts_bld_uint(NULL, &hsz, stack_size);
  1271. hp = HAlloc(BIF_P, hsz);
  1272. res = erts_bld_uint(&hp, NULL, stack_size);
  1273. break;
  1274. }
  1275. case am_memory: { /* Memory consumed in bytes */
  1276. ErlMessage *mp;
  1277. Uint size = 0;
  1278. Uint hsz = 3;
  1279. struct saved_calls *scb;
  1280. size += sizeof(Process);
  1281. ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
  1282. erts_doforall_links(rp->nlinks, &one_link_size, &size);
  1283. erts_doforall_monitors(rp->monitors, &one_mon_size, &size);
  1284. size += (rp->heap_sz + rp->mbuf_sz) * sizeof(Eterm);
  1285. if (rp->old_hend && rp->old_heap)
  1286. size += (rp->old_hend - rp->old_heap) * sizeof(Eterm);
  1287. size += rp->msg.len * sizeof(ErlMessage);
  1288. for (mp = rp->msg.first; mp; mp = mp->next)
  1289. if (mp->data.attached)
  1290. size += erts_msg_attached_data_size(mp)*sizeof(Eterm);
  1291. if (rp->arg_reg != rp->def_arg_reg) {
  1292. size += rp->arity * sizeof(rp->arg_reg[0]);
  1293. }
  1294. if (rp->psd)
  1295. size += sizeof(ErtsPSD);
  1296. scb = ERTS_PROC_GET_SAVED_CALLS_BUF(rp);
  1297. if (scb) {
  1298. size += (sizeof(struct saved_calls)
  1299. + (scb->len-1) * sizeof(scb->ct[0]));
  1300. }
  1301. size += erts_dicts_mem_size(rp);
  1302. (void) erts_bld_uint(NULL, &hsz, size);
  1303. hp = HAlloc(BIF_P, hsz);
  1304. res = erts_bld_uint(&hp, NULL, size);
  1305. break;
  1306. }
  1307. case am_garbage_collection: {
  1308. DECL_AM(minor_gcs);
  1309. Eterm t;
  1310. hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2 + 3+2 + 3); /* last "3" is for outside tuple */
  1311. t = TUPLE2(hp, AM_minor_gcs, make_small(GEN_GCS(rp))); hp += 3;
  1312. res = CONS(hp, t, NIL); hp += 2;
  1313. t = TUPLE2(hp, am_fullsweep_after, make_small(MAX_GEN_GCS(rp))); hp += 3;
  1314. res = CONS(hp, t, res); hp += 2;
  1315. t = TUPLE2(hp, am_min_heap_size, make_small(MIN_HEAP_SIZE(rp))); hp += 3;
  1316. res = CONS(hp, t, res); hp += 2;
  1317. t = TUPLE2(hp, am_min_bin_vheap_size, make_small(MIN_VHEAP_SIZE(rp))); hp += 3;
  1318. res = CONS(hp, t, res); hp += 2;
  1319. break;
  1320. }
  1321. case am_group_leader: {
  1322. int sz = NC_HEAP_SIZE(rp->group_leader);
  1323. hp = HAlloc(BIF_P, 3 + sz);
  1324. res = STORE_NC(&hp, &MSO(BIF_P), rp->group_leader);
  1325. break;
  1326. }
  1327. case am_reductions: {
  1328. Uint reds = rp->reds + erts_current_reductions(BIF_P, rp);
  1329. Uint hsz = 3;
  1330. (void) erts_bld_uint(NULL, &hsz, reds);
  1331. hp = HAlloc(BIF_P, hsz);
  1332. res = erts_bld_uint(&hp, NULL, reds);
  1333. break;
  1334. }
  1335. case am_priority:
  1336. hp = HAlloc(BIF_P, 3);
  1337. res = erts_get_process_priority(rp);
  1338. break;
  1339. case am_trace:
  1340. hp = HAlloc(BIF_P, 3);
  1341. res = make_small(rp->trace_flags & TRACEE_FLAGS);
  1342. break;
  1343. case am_binary: {
  1344. Uint sz = 3;
  1345. (void) bld_bin_list(NULL, &sz, &MSO(rp));
  1346. hp = HAlloc(BIF_P, sz);
  1347. res = bld_bin_list(&hp, NULL, &MSO(rp));
  1348. break;
  1349. }
  1350. #ifdef HYBRID
  1351. case am_message_binary: {
  1352. Uint sz = 3;
  1353. (void) bld_bin_list(NULL, &sz, erts_global_offheap.mso);
  1354. hp = HAlloc(BIF_P, sz);
  1355. res = bld_bin_list(&hp, NULL, erts_global_offheap.mso);
  1356. break;
  1357. }
  1358. #endif
  1359. case am_sequential_trace_token:
  1360. res = copy_object(rp->seq_trace_token, BIF_P);
  1361. hp = HAlloc(BIF_P, 3);
  1362. break;
  1363. case am_catchlevel:
  1364. hp = HAlloc(BIF_P, 3);
  1365. res = make_small(catchlevel(BIF_P));
  1366. break;
  1367. case am_backtrace: {
  1368. erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0);
  1369. erts_stack_dump(ERTS_PRINT_DSBUF, (void *) dsbufp, rp);
  1370. res = new_binary(BIF_P, (byte *) dsbufp->str, (int) dsbufp->str_len);
  1371. erts_destroy_tmp_dsbuf(dsbufp);
  1372. hp = HAlloc(BIF_P, 3);
  1373. break;
  1374. }
  1375. case am_last_calls: {
  1376. struct saved_calls *scb = ERTS_PROC_GET_SAVED_CALLS_BUF(BIF_P);
  1377. if (!scb) {
  1378. hp = HAlloc(BIF_P, 3);
  1379. res = am_false;
  1380. } else {
  1381. /*
  1382. * One cons cell and a 3-struct, and a 2-tuple.
  1383. * Might be less than that, if there are sends, receives or timeouts,
  1384. * so we must do a HRelease() to avoid creating holes.
  1385. */
  1386. Uint needed = scb->n*(2+4) + 3;
  1387. Eterm* limit;
  1388. Eterm term, list;
  1389. int i, j;
  1390. hp = HAlloc(BIF_P, needed);
  1391. limit = hp + needed;
  1392. list = NIL;
  1393. for (i = 0; i < scb->n; i++) {
  1394. j = scb->cur - i - 1;
  1395. if (j < 0)
  1396. j += scb->len;
  1397. if (scb->ct[j] == &exp_send)
  1398. term = am_send;
  1399. else if (scb->ct[j] == &exp_receive)
  1400. term = am_receive;
  1401. else if (scb->ct[j] == &exp_timeout)
  1402. term = am_timeout;
  1403. else {
  1404. term = TUPLE3(hp,
  1405. scb->ct[j]->code[0],
  1406. scb->ct[j]->code[1],
  1407. make_small(scb->ct[j]->code[2]));
  1408. hp += 4;
  1409. }
  1410. list = CONS(hp, term, list);
  1411. hp += 2;
  1412. }
  1413. res = list;
  1414. res = TUPLE2(hp, item, res);
  1415. hp += 3;
  1416. HRelease(BIF_P,limit,hp);
  1417. return res;
  1418. }
  1419. break;
  1420. }
  1421. default:
  1422. return THE_NON_VALUE; /* will produce badarg */
  1423. }
  1424. return TUPLE2(hp, item, res);
  1425. }
  1426. #undef MI_INC
  1427. #if defined(VALGRIND)
  1428. static int check_if_xml(void)
  1429. {
  1430. char buf[1];
  1431. size_t bufsz = sizeof(buf);
  1432. return erts_sys_getenv("VALGRIND_LOG_XML", buf, &bufsz) >= 0;
  1433. }
  1434. #else
  1435. #define check_if_xml() 0
  1436. #endif
  1437. /*
  1438. * This function takes care of calls to erlang:system_info/1 when the argument
  1439. * is a tuple.
  1440. */
  1441. static BIF_RETTYPE
  1442. info_1_tuple(Process* BIF_P, /* Pointer to current process. */
  1443. Eterm* tp, /* Pointer to first element in tuple */
  1444. int arity) /* Arity of tuple (untagged). */
  1445. {
  1446. Eterm ret;
  1447. Eterm sel;
  1448. sel = *tp++;
  1449. if (sel == am_allocator_sizes && arity == 2) {
  1450. return erts_allocator_info_term(BIF_P, *tp, 1);
  1451. } else if (sel == am_wordsize && arity == 2) {
  1452. if (tp[0] == am_internal) {
  1453. return make_small(sizeof(Eterm));
  1454. }
  1455. if (tp[0] == am_external) {
  1456. return make_small(sizeof(UWord));
  1457. }
  1458. goto badarg;
  1459. } else if (sel == am_allocated) {
  1460. if (arity == 2) {
  1461. Eterm res = THE_NON_VALUE;
  1462. char *buf;
  1463. int len = is_string(*tp);
  1464. if (len <= 0)
  1465. return res;
  1466. buf = (char *) erts_alloc(ERTS_ALC_T_TMP, len+1);
  1467. if (intlist_to_buf(*tp, buf, len) != len)
  1468. erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
  1469. buf[len] = '\0';
  1470. res = erts_instr_dump_memory_map(buf) ? am_true : am_false;
  1471. erts_free(ERTS_ALC_T_TMP, (void *) buf);
  1472. if (is_non_value(res))
  1473. goto badarg;
  1474. return res;
  1475. }
  1476. else if (arity == 3 && tp[0] == am_status) {
  1477. if (is_atom(tp[1]))
  1478. return erts_instr_get_stat(BIF_P, tp[1], 1);
  1479. else {
  1480. Eterm res = THE_NON_VALUE;
  1481. char *buf;
  1482. int len = is_string(tp[1]);
  1483. if (len <= 0)
  1484. return res;
  1485. buf = (char *) erts_alloc(ERTS_ALC_T_TMP, len+1);
  1486. if (intlist_to_buf(tp[1], buf, len) != len)
  1487. erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
  1488. buf[len] = '\0';
  1489. res = erts_instr_dump_stat(buf, 1) ? am_true : am_false;
  1490. erts_free(ERTS_ALC_T_TMP, (void *) buf);
  1491. if (is_non_value(res))
  1492. goto badarg;
  1493. return res;
  1494. }
  1495. }
  1496. else
  1497. goto badarg;
  1498. } else if (sel == am_allocator && arity == 2) {
  1499. return erts_allocator_info_term(BIF_P, *tp, 0);
  1500. } else if (ERTS_IS_ATOM_STR("internal_cpu_topology", sel) && arity == 2) {
  1501. return erts_get_cpu_topology_term(BIF_P, *tp);
  1502. } else if (ERTS_IS_ATOM_STR("cpu_topology", sel) && arity == 2) {
  1503. Eterm res = erts_get_cpu_topology_term(BIF_P, *tp);
  1504. if (res == THE_NON_VALUE)
  1505. goto badarg;
  1506. ERTS_BIF_PREP_TRAP1(ret, erts_format_cpu_topology_trap, BIF_P, res);
  1507. return ret;
  1508. #if defined(PURIFY) || defined(VALGRIND)
  1509. } else if (ERTS_IS_ATOM_STR("error_checker", sel)
  1510. #if defined(PURIFY)
  1511. || sel == am_purify
  1512. #elif defined(VALGRIND)
  1513. || ERTS_IS_ATOM_STR("valgrind", sel)
  1514. #endif
  1515. ) {
  1516. if (*tp == am_memory) {
  1517. #if defined(PURIFY)
  1518. BIF_RET(erts_make_integer(purify_new_leaks(), BIF_P));
  1519. #elif defined(VALGRIND)
  1520. VALGRIND_DO_LEAK_CHECK;
  1521. BIF_RET(make_small(0));
  1522. #endif
  1523. } else if (*tp == am_fd) {
  1524. #if defined(PURIFY)
  1525. BIF_RET(erts_make_integer(purify_new_fds_inuse(), BIF_P));
  1526. #elif defined(VALGRIND)
  1527. /* Not present in valgrind... */
  1528. BIF_RET(make_small(0));
  1529. #endif
  1530. } else if (*tp == am_running) {
  1531. #if defined(PURIFY)
  1532. BIF_RET(purify_is_running() ? am_true : am_false);
  1533. #elif defined(VALGRIND)
  1534. BIF_RET(RUNNING_ON_VALGRIND ? am_true : am_false);
  1535. #endif
  1536. } else if (is_list(*tp)) {
  1537. #if defined(PURIFY)
  1538. #define ERTS_ERROR_CHECKER_PRINTF purify_printf
  1539. #elif defined(VALGRIND)
  1540. #define ERTS_ERROR_CHECKER_PRINTF VALGRIND_PRINTF
  1541. #endif
  1542. int buf_size = 8*1024; /* Try with 8KB first */
  1543. char *buf = erts_alloc(ERTS_ALC_T_TMP, buf_size);
  1544. int r = io_list_to_buf(*tp, (char*) buf, buf_size - 1);
  1545. if (r < 0) {
  1546. erts_free(ERTS_ALC_T_TMP, (void *) buf);
  1547. buf_size = io_list_len(*tp);
  1548. if (buf_size < 0)
  1549. goto badarg;
  1550. buf_size++;
  1551. buf = erts_alloc(ERTS_ALC_T_TMP, buf_size);
  1552. r = io_list_to_buf(*tp, (char*) buf, buf_size - 1);
  1553. ASSERT(r == buf_size - 1);
  1554. }
  1555. buf[buf_size - 1 - r] = '\0';
  1556. if (check_if_xml()) {
  1557. ERTS_ERROR_CHECKER_PRINTF("<erlang_info_log>"
  1558. "%s</erlang_info_log>\n", buf);
  1559. } else {
  1560. ERTS_ERROR_CHECKER_PRINTF("%s\n", buf);
  1561. }
  1562. erts_free(ERTS_ALC_T_TMP, (void *) buf);
  1563. BIF_RET(am_true);
  1564. #undef ERTS_ERROR_CHECKER_PRINTF
  1565. }
  1566. #endif
  1567. #ifdef QUANTIFY
  1568. } else if (sel == am_quantify) {
  1569. if (*tp == am_clear) {
  1570. quantify_clear_data();
  1571. BIF_RET(am_true);
  1572. } else if (*tp == am_start) {
  1573. quantify_start_recording_data();
  1574. BIF_RET(am_true);
  1575. } else if (*tp == am_stop) {
  1576. quantify_stop_recording_data();
  1577. BIF_RET(am_true);
  1578. } else if (*tp == am_running) {
  1579. BIF_RET(quantify_is_running() ? am_true : am_false);
  1580. }
  1581. #endif
  1582. #if defined(__GNUC__) && defined(HAVE_SOLARIS_SPARC_PERFMON)
  1583. } else if (ERTS_IS_ATOM_STR("ultrasparc_set_pcr", sel)) {
  1584. unsigned long long tmp;
  1585. int fd;
  1586. int rc;
  1587. if (arity != 2 || !is_small(*tp)) {
  1588. goto badarg;
  1589. }
  1590. tmp = signed_val(*tp);
  1591. if ((fd = open("/dev/perfmon", O_RDONLY)) == -1) {
  1592. BIF_RET(am_false);
  1593. }
  1594. rc = ioctl(fd, PERFMON_SETPCR, &tmp);
  1595. close(fd);
  1596. if (rc < 0) {
  1597. BIF_RET(am_false);
  1598. }
  1599. BIF_RET(am_true);
  1600. #endif
  1601. }
  1602. badarg:
  1603. ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
  1604. return ret;
  1605. }
  1606. #define INFO_DSBUF_INC_SZ 256
  1607. static erts_dsprintf_buf_t *
  1608. grow_info_dsbuf(erts_dsprintf_buf_t *dsbufp, size_t need)
  1609. {
  1610. size_t size;
  1611. size_t free_size = dsbufp->size - dsbufp->str_len;
  1612. ASSERT(dsbufp);
  1613. if (need <= free_size)
  1614. return dsbufp;
  1615. size = need - free_size + INFO_DSBUF_INC_SZ;
  1616. size = ((size + INFO_DSBUF_INC_SZ - 1)/INFO_DSBUF_INC_SZ)*INFO_DSBUF_INC_SZ;
  1617. size += dsbufp->size;
  1618. ASSERT(dsbufp->str_len + need <= size);
  1619. dsbufp->str = (char *) erts_realloc(ERTS_ALC_T_INFO_DSBUF,
  1620. (void *) dsbufp->str,
  1621. size);
  1622. dsbufp->size = size;
  1623. return dsbufp;
  1624. }
  1625. static erts_dsprintf_buf_t *
  1626. erts_create_info_dsbuf(Uint size)
  1627. {
  1628. Uint init_size = size ? size : INFO_DSBUF_INC_SZ;
  1629. erts_dsprintf_buf_t init = ERTS_DSPRINTF_BUF_INITER(grow_info_dsbuf);
  1630. erts_dsprintf_buf_t *dsbufp = erts_alloc(ERTS_ALC_T_INFO_DSBUF,
  1631. sizeof(erts_dsprintf_buf_t));
  1632. sys_memcpy((void *) dsbufp, (void *) &init, sizeof(erts_dsprintf_buf_t));
  1633. dsbufp->str = (char *) erts_alloc(ERTS_ALC_T_INFO_DSBUF, init_size);
  1634. dsbufp->str[0] = '\0';
  1635. dsbufp->size = init_size;
  1636. return dsbufp;
  1637. }
  1638. static void
  1639. erts_destroy_info_dsbuf(erts_dsprintf_buf_t *dsbufp)
  1640. {
  1641. if (dsbufp->str)
  1642. erts_free(ERTS_ALC_T_INFO_DSBUF, (void *) dsbufp->str);
  1643. erts_free(ERTS_ALC_T_INFO_DSBUF, (void *) dsbufp);
  1644. }
  1645. static Eterm
  1646. c_compiler_used(Eterm **hpp, Uint *szp)
  1647. {
  1648. #if defined(__GNUC__)
  1649. # if defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
  1650. # define ERTS_GNUC_VSN_NUMS 3
  1651. # elif defined(__GNUC_MINOR__)
  1652. # define ERTS_GNUC_VSN_NUMS 2
  1653. # else
  1654. # define ERTS_GNUC_VSN_NUMS 1
  1655. # endif
  1656. return erts_bld_tuple(hpp,
  1657. szp,
  1658. 2,
  1659. erts_bld_atom(hpp, szp, "gnuc"),
  1660. #if ERTS_GNUC_VSN_NUMS > 1
  1661. erts_bld_tuple(hpp,
  1662. szp,
  1663. ERTS_GNUC_VSN_NUMS,
  1664. #endif
  1665. erts_bld_uint(hpp, szp,
  1666. (Uint) __GNUC__)
  1667. #ifdef __GNUC_MINOR__
  1668. ,
  1669. erts_bld_uint(hpp, szp,
  1670. (Uint) __GNUC_MINOR__)
  1671. #ifdef __GNUC_PATCHLEVEL__
  1672. ,
  1673. erts_bld_uint(hpp, szp,
  1674. (Uint) __GNUC_PATCHLEVEL__)
  1675. #endif
  1676. #endif
  1677. #if ERTS_GNUC_VSN_NUMS > 1
  1678. )
  1679. #endif
  1680. );
  1681. #elif defined(_MSC_VER)
  1682. return erts_bld_tuple(hpp,
  1683. szp,
  1684. 2,
  1685. erts_bld_atom(hpp, szp, "msc"),
  1686. erts_bld_uint(hpp, szp, (Uint) _MSC_VER));
  1687. #else
  1688. return erts_bld_tuple(hpp,
  1689. szp,
  1690. 2,
  1691. am_undefined,
  1692. am_undefined);
  1693. #endif
  1694. }
  1695. static int is_snif_term(Eterm module_atom) {
  1696. int i;
  1697. Atom *a = atom_tab(atom_val(module_atom));
  1698. char *aname = (char *) a->name;
  1699. /* if a->name has a '.' then the bif (snif) is bogus i.e a package */
  1700. for (i = 0; i < a->len; i++) {
  1701. if (aname[i] == '.')
  1702. return 0;
  1703. }
  1704. return 1;
  1705. }
  1706. static Eterm build_snif_term(Eterm **hpp, Uint *szp, int ix, Eterm res) {
  1707. Eterm tup;
  1708. tup = erts_bld_tuple(hpp, szp, 3, bif_table[ix].module, bif_table[ix].name, make_small(bif_table[ix].arity));
  1709. res = erts_bld_cons( hpp, szp, tup, res);
  1710. return res;
  1711. }
  1712. static Eterm build_snifs_term(Eterm **hpp, Uint *szp, Eterm res) {
  1713. int i;
  1714. for (i = 0; i < BIF_SIZE; i++) {
  1715. if (is_snif_term(bif_table[i].module)) {
  1716. res = build_snif_term(hpp, szp, i, res);
  1717. }
  1718. }
  1719. return res;
  1720. }
  1721. BIF_RETTYPE system_info_1(BIF_ALIST_1)
  1722. {
  1723. Eterm res;
  1724. Eterm* hp;
  1725. Eterm val;
  1726. int i;
  1727. if (is_tuple(BIF_ARG_1)) {
  1728. Eterm* tp = tuple_val(BIF_ARG_1);
  1729. Uint arity = *tp++;
  1730. return info_1_tuple(BIF_P, tp, arityval(arity));
  1731. } else if (BIF_ARG_1 == am_scheduler_id) {
  1732. #ifdef ERTS_SMP
  1733. ASSERT(BIF_P->scheduler_data);
  1734. BIF_RET(make_small(BIF_P->scheduler_data->no));
  1735. #else
  1736. BIF_RET(make_small(1));
  1737. #endif
  1738. } else if (BIF_ARG_1 == am_compat_rel) {
  1739. ASSERT(erts_compat_rel > 0);
  1740. BIF_RET(make_small(erts_compat_rel));
  1741. } else if (BIF_ARG_1 == am_multi_scheduling) {
  1742. #ifndef ERTS_SMP
  1743. BIF_RET(am_disabled);
  1744. #else
  1745. if (erts_no_schedulers == 1)
  1746. BIF_RET(am_disabled);
  1747. else {
  1748. BIF_RET(erts_is_multi_scheduling_blocked()
  1749. ? am_blocked
  1750. : am_enabled);
  1751. }
  1752. #endif
  1753. } else if (BIF_ARG_1 == am_build_type) {
  1754. #if defined(DEBUG)
  1755. ERTS_DECL_AM(debug);
  1756. BIF_RET(AM_debug);
  1757. #elif defined(PURIFY)
  1758. ERTS_DECL_AM(purify);
  1759. BIF_RET(AM_purify);
  1760. #elif defined(QUANTIFY)
  1761. ERTS_DECL_AM(quantify);
  1762. BIF_RET(AM_quantify);
  1763. #elif defined(PURECOV)
  1764. ERTS_DECL_AM(purecov);
  1765. BIF_RET(AM_purecov);
  1766. #elif defined(ERTS_GCOV)
  1767. ERTS_DECL_AM(gcov);
  1768. BIF_RET(AM_gcov);
  1769. #elif defined(VALGRIND)
  1770. ERTS_DECL_AM(valgrind);
  1771. BIF_RET(AM_valgrind);
  1772. #elif defined(GPROF)
  1773. ERTS_DECL_AM(gprof);
  1774. BIF_RET(AM_gprof);
  1775. #elif defined(ERTS_ENABLE_LOCK_COUNT)
  1776. ERTS_DECL_AM(lcnt);
  1777. BIF_RET(AM_lcnt);
  1778. #else
  1779. BIF_RET(am_opt);
  1780. #endif
  1781. BIF_RET(res);
  1782. } else if (BIF_ARG_1 == am_allocated_areas) {
  1783. res = erts_allocated_areas(NULL, NULL, BIF_P);
  1784. BIF_RET(res);
  1785. } else if (BIF_ARG_1 == am_allocated) {
  1786. BIF_RET(erts_instr_get_memory_map(BIF_P));
  1787. } else if (BIF_ARG_1 == am_hipe_architecture) {
  1788. #if defined(HIPE)
  1789. BIF_RET(hipe_arch_name);
  1790. #else
  1791. BIF_RET(am_undefined);
  1792. #endif
  1793. } else if (BIF_ARG_1 == am_trace_control_word) {
  1794. BIF_RET(db_get_trace_control_word_0(BIF_P));
  1795. } else if (ERTS_IS_ATOM_STR("ets_realloc_moves", BIF_ARG_1)) {
  1796. BIF_RET((erts_ets_realloc_always_moves) ? am_true : am_false);
  1797. } else if (ERTS_IS_ATOM_STR("ets_always_compress", BIF_ARG_1)) {
  1798. BIF_RET((erts_ets_always_compress) ? am_true : am_false);
  1799. } else if (ERTS_IS_ATOM_STR("s…

Large files files are truncated, but you can click here to view the full file