PageRenderTime 78ms CodeModel.GetById 19ms RepoModel.GetById 1ms app.codeStats 1ms

/erts/emulator/beam/erl_bif_info.c

https://github.com/notarf/otp
C | 4174 lines | 3567 code | 392 blank | 215 comment | 952 complexity | bde47a34cfe099565509320ecd3c1aef MD5 | raw file
Possible License(s): BSD-2-Clause

Large files files are truncated, but you can click here to view the full file

  1. /*
  2. * %CopyrightBegin%
  3. *
  4. * Copyright Ericsson AB 1999-2011. All Rights Reserved.
  5. *
  6. * The contents of this file are subject to the Erlang Public License,
  7. * Version 1.1, (the "License"); you may not use this file except in
  8. * compliance with the License. You should have received a copy of the
  9. * Erlang Public License along with this software. If not, it can be
  10. * retrieved online at http://www.erlang.org/.
  11. *
  12. * Software distributed under the License is distributed on an "AS IS"
  13. * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
  14. * the License for the specific language governing rights and limitations
  15. * under the License.
  16. *
  17. * %CopyrightEnd%
  18. */
  19. #ifdef HAVE_CONFIG_H
  20. # include "config.h"
  21. #endif
  22. #include "sys.h"
  23. #include "erl_vm.h"
  24. #include "global.h"
  25. #include "erl_process.h"
  26. #include "erl_nmgc.h"
  27. #include "error.h"
  28. #include "erl_driver.h"
  29. #include "bif.h"
  30. #include "big.h"
  31. #include "erl_version.h"
  32. #include "erl_db_util.h"
  33. #include "erl_message.h"
  34. #include "erl_binary.h"
  35. #include "erl_db.h"
  36. #include "erl_instrument.h"
  37. #include "dist.h"
  38. #include "erl_gc.h"
  39. #include "erl_cpu_topology.h"
  40. #include "erl_async.h"
  41. #include "erl_thr_progress.h"
  42. #ifdef HIPE
  43. #include "hipe_arch.h"
  44. #endif
  45. #ifdef ERTS_ENABLE_LOCK_COUNT
  46. #include "erl_lock_count.h"
  47. #endif
  48. #ifdef VALGRIND
  49. #include <valgrind/valgrind.h>
  50. #include <valgrind/memcheck.h>
  51. #endif
  52. static Export* alloc_info_trap = NULL;
  53. static Export* alloc_sizes_trap = NULL;
  54. static Export *gather_sched_wall_time_res_trap;
  55. #define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
  56. /* Keep erts_system_version as a global variable for easy access from a core */
  57. static char erts_system_version[] = ("Erlang " ERLANG_OTP_RELEASE
  58. " (erts-" ERLANG_VERSION ")"
  59. #if !HEAP_ON_C_STACK && !HALFWORD_HEAP
  60. " [no-c-stack-objects]"
  61. #endif
  62. #ifndef OTP_RELEASE
  63. " [source]"
  64. #endif
  65. #ifdef ARCH_64
  66. #if HALFWORD_HEAP
  67. " [64-bit halfword]"
  68. #else
  69. " [64-bit]"
  70. #endif
  71. #endif
  72. #ifdef ERTS_SMP
  73. " [smp:%beu:%beu]"
  74. #endif
  75. #ifdef USE_THREADS
  76. " [async-threads:%d]"
  77. #endif
  78. #ifdef HIPE
  79. " [hipe]"
  80. #endif
  81. #ifdef ERTS_ENABLE_KERNEL_POLL
  82. " [kernel-poll:%s]"
  83. #endif
  84. #ifdef HYBRID
  85. " [hybrid heap]"
  86. #endif
  87. #ifdef INCREMENTAL
  88. " [incremental GC]"
  89. #endif
  90. #ifdef ET_DEBUG
  91. #if ET_DEBUG
  92. " [type-assertions]"
  93. #endif
  94. #endif
  95. #ifdef DEBUG
  96. " [debug-compiled]"
  97. #endif
  98. #ifdef ERTS_ENABLE_LOCK_CHECK
  99. " [lock-checking]"
  100. #endif
  101. #ifdef ERTS_ENABLE_LOCK_COUNT
  102. " [lock-counting]"
  103. #endif
  104. #ifdef PURIFY
  105. " [purify-compiled]"
  106. #endif
  107. #ifdef VALGRIND
  108. " [valgrind-compiled]"
  109. #endif
  110. "\n");
  111. #define ASIZE(a) (sizeof(a)/sizeof(a[0]))
  112. #if defined(HAVE_SOLARIS_SPARC_PERFMON)
  113. # include <sys/ioccom.h>
  114. # define PERFMON_SETPCR _IOW('P', 1, unsigned long long)
  115. # define PERFMON_GETPCR _IOR('P', 2, unsigned long long)
  116. #endif
  117. /* Cached, pre-built {OsType,OsFlavor} and {Major,Minor,Build} tuples */
  118. static Eterm os_type_tuple;
  119. static Eterm os_version_tuple;
  120. static BIF_RETTYPE port_info(Process* p, Eterm portid, Eterm item);
  121. static Eterm
  122. current_function(Process* p, Process* rp, Eterm** hpp, int full_info);
  123. static Eterm current_stacktrace(Process* p, Process* rp, Eterm** hpp);
  124. static Eterm
  125. bld_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
  126. {
  127. struct erl_off_heap_header* ohh;
  128. Eterm res = NIL;
  129. Eterm tuple;
  130. for (ohh = oh->first; ohh; ohh = ohh->next) {
  131. if (ohh->thing_word == HEADER_PROC_BIN) {
  132. ProcBin* pb = (ProcBin*) ohh;
  133. Eterm val = erts_bld_uword(hpp, szp, (UWord) pb->val);
  134. Eterm orig_size = erts_bld_uint(hpp, szp, pb->val->orig_size);
  135. if (szp)
  136. *szp += 4+2;
  137. if (hpp) {
  138. Uint refc = (Uint) erts_smp_atomic_read_nob(&pb->val->refc);
  139. tuple = TUPLE3(*hpp, val, orig_size, make_small(refc));
  140. res = CONS(*hpp + 4, tuple, res);
  141. *hpp += 4+2;
  142. }
  143. }
  144. }
  145. return res;
  146. }
  147. /*
  148. make_monitor_list:
  149. returns a list of records..
  150. -record(erl_monitor, {
  151. type, % MON_ORIGIN or MON_TARGET (1 or 3)
  152. ref,
  153. pid, % Process or nodename
  154. name % registered name or []
  155. }).
  156. */
  157. static void do_calc_mon_size(ErtsMonitor *mon, void *vpsz)
  158. {
  159. Uint *psz = vpsz;
  160. *psz += IS_CONST(mon->ref) ? 0 : NC_HEAP_SIZE(mon->ref);
  161. *psz += IS_CONST(mon->pid) ? 0 : NC_HEAP_SIZE(mon->pid);
  162. *psz += 8; /* CONS + 5-tuple */
  163. }
  164. typedef struct {
  165. Process *p;
  166. Eterm *hp;
  167. Eterm res;
  168. Eterm tag;
  169. } MonListContext;
  170. static void do_make_one_mon_element(ErtsMonitor *mon, void * vpmlc)
  171. {
  172. MonListContext *pmlc = vpmlc;
  173. Eterm tup;
  174. Eterm r = (IS_CONST(mon->ref)
  175. ? mon->ref
  176. : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->ref));
  177. Eterm p = (IS_CONST(mon->pid)
  178. ? mon->pid
  179. : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->pid));
  180. tup = TUPLE5(pmlc->hp, pmlc->tag, make_small(mon->type), r, p, mon->name);
  181. pmlc->hp += 6;
  182. pmlc->res = CONS(pmlc->hp, tup, pmlc->res);
  183. pmlc->hp += 2;
  184. }
  185. static Eterm
  186. make_monitor_list(Process *p, ErtsMonitor *root)
  187. {
  188. DECL_AM(erl_monitor);
  189. Uint sz = 0;
  190. MonListContext mlc;
  191. erts_doforall_monitors(root, &do_calc_mon_size, &sz);
  192. if (sz == 0) {
  193. return NIL;
  194. }
  195. mlc.p = p;
  196. mlc.hp = HAlloc(p,sz);
  197. mlc.res = NIL;
  198. mlc.tag = AM_erl_monitor;
  199. erts_doforall_monitors(root, &do_make_one_mon_element, &mlc);
  200. return mlc.res;
  201. }
  202. /*
  203. make_link_list:
  204. returns a list of records..
  205. -record(erl_link, {
  206. type, % LINK_NODE or LINK_PID (1 or 3)
  207. pid, % Process or nodename
  208. targets % List of erl_link's or nil
  209. }).
  210. */
  211. static void do_calc_lnk_size(ErtsLink *lnk, void *vpsz)
  212. {
  213. Uint *psz = vpsz;
  214. *psz += IS_CONST(lnk->pid) ? 0 : NC_HEAP_SIZE(lnk->pid);
  215. if (lnk->type != LINK_NODE && ERTS_LINK_ROOT(lnk) != NULL) {
  216. /* Node links use this pointer as ref counter... */
  217. erts_doforall_links(ERTS_LINK_ROOT(lnk),&do_calc_lnk_size,vpsz);
  218. }
  219. *psz += 7; /* CONS + 4-tuple */
  220. }
  221. typedef struct {
  222. Process *p;
  223. Eterm *hp;
  224. Eterm res;
  225. Eterm tag;
  226. } LnkListContext;
  227. static void do_make_one_lnk_element(ErtsLink *lnk, void * vpllc)
  228. {
  229. LnkListContext *pllc = vpllc;
  230. Eterm tup;
  231. Eterm old_res, targets = NIL;
  232. Eterm p = (IS_CONST(lnk->pid)
  233. ? lnk->pid
  234. : STORE_NC(&(pllc->hp), &MSO(pllc->p), lnk->pid));
  235. if (lnk->type == LINK_NODE) {
  236. targets = make_small(ERTS_LINK_REFC(lnk));
  237. } else if (ERTS_LINK_ROOT(lnk) != NULL) {
  238. old_res = pllc->res;
  239. pllc->res = NIL;
  240. erts_doforall_links(ERTS_LINK_ROOT(lnk),&do_make_one_lnk_element, vpllc);
  241. targets = pllc->res;
  242. pllc->res = old_res;
  243. }
  244. tup = TUPLE4(pllc->hp, pllc->tag, make_small(lnk->type), p, targets);
  245. pllc->hp += 5;
  246. pllc->res = CONS(pllc->hp, tup, pllc->res);
  247. pllc->hp += 2;
  248. }
  249. static Eterm
  250. make_link_list(Process *p, ErtsLink *root, Eterm tail)
  251. {
  252. DECL_AM(erl_link);
  253. Uint sz = 0;
  254. LnkListContext llc;
  255. erts_doforall_links(root, &do_calc_lnk_size, &sz);
  256. if (sz == 0) {
  257. return tail;
  258. }
  259. llc.p = p;
  260. llc.hp = HAlloc(p,sz);
  261. llc.res = tail;
  262. llc.tag = AM_erl_link;
  263. erts_doforall_links(root, &do_make_one_lnk_element, &llc);
  264. return llc.res;
  265. }
  266. int
  267. erts_print_system_version(int to, void *arg, Process *c_p)
  268. {
  269. #ifdef ERTS_SMP
  270. Uint total, online, active;
  271. (void) erts_schedulers_state(&total, &online, &active, 0);
  272. #endif
  273. return erts_print(to, arg, erts_system_version
  274. #ifdef ERTS_SMP
  275. , total, online
  276. #endif
  277. #ifdef USE_THREADS
  278. , erts_async_max_threads
  279. #endif
  280. #ifdef ERTS_ENABLE_KERNEL_POLL
  281. , erts_use_kernel_poll ? "true" : "false"
  282. #endif
  283. );
  284. }
  285. typedef struct {
  286. Eterm entity;
  287. Eterm node;
  288. } MonitorInfo;
  289. typedef struct {
  290. MonitorInfo *mi;
  291. Uint mi_i;
  292. Uint mi_max;
  293. int sz;
  294. } MonitorInfoCollection;
  295. #define INIT_MONITOR_INFOS(MIC) do { \
  296. (MIC).mi = NULL; \
  297. (MIC).mi_i = (MIC).mi_max = 0; \
  298. (MIC).sz = 0; \
  299. } while(0)
  300. #define MI_INC 50
  301. #define EXTEND_MONITOR_INFOS(MICP) \
  302. do { \
  303. if ((MICP)->mi_i >= (MICP)->mi_max) { \
  304. (MICP)->mi = ((MICP)->mi ? erts_realloc(ERTS_ALC_T_TMP, \
  305. (MICP)->mi, \
  306. ((MICP)->mi_max+MI_INC) \
  307. * sizeof(MonitorInfo)) \
  308. : erts_alloc(ERTS_ALC_T_TMP, \
  309. MI_INC*sizeof(MonitorInfo))); \
  310. (MICP)->mi_max += MI_INC; \
  311. } \
  312. } while (0)
  313. #define DESTROY_MONITOR_INFOS(MIC) \
  314. do { \
  315. if ((MIC).mi != NULL) { \
  316. erts_free(ERTS_ALC_T_TMP, (void *) (MIC).mi); \
  317. } \
  318. } while (0)
  319. static void collect_one_link(ErtsLink *lnk, void *vmicp)
  320. {
  321. MonitorInfoCollection *micp = vmicp;
  322. EXTEND_MONITOR_INFOS(micp);
  323. if (!(lnk->type == LINK_PID)) {
  324. return;
  325. }
  326. micp->mi[micp->mi_i].entity = lnk->pid;
  327. micp->sz += 2 + NC_HEAP_SIZE(lnk->pid);
  328. micp->mi_i++;
  329. }
  330. static void collect_one_origin_monitor(ErtsMonitor *mon, void *vmicp)
  331. {
  332. MonitorInfoCollection *micp = vmicp;
  333. if (mon->type != MON_ORIGIN) {
  334. return;
  335. }
  336. EXTEND_MONITOR_INFOS(micp);
  337. if (is_atom(mon->pid)) { /* external by name */
  338. micp->mi[micp->mi_i].entity = mon->name;
  339. micp->mi[micp->mi_i].node = mon->pid;
  340. micp->sz += 3; /* need one 2-tuple */
  341. } else if (is_external_pid(mon->pid)) { /* external by pid */
  342. micp->mi[micp->mi_i].entity = mon->pid;
  343. micp->mi[micp->mi_i].node = NIL;
  344. micp->sz += NC_HEAP_SIZE(mon->pid);
  345. } else if (!is_nil(mon->name)) { /* internal by name */
  346. micp->mi[micp->mi_i].entity = mon->name;
  347. micp->mi[micp->mi_i].node = erts_this_dist_entry->sysname;
  348. micp->sz += 3; /* need one 2-tuple */
  349. } else { /* internal by pid */
  350. micp->mi[micp->mi_i].entity = mon->pid;
  351. micp->mi[micp->mi_i].node = NIL;
  352. /* no additional heap space needed */
  353. }
  354. micp->mi_i++;
  355. micp->sz += 2 + 3; /* For a cons cell and a 2-tuple */
  356. }
  357. static void collect_one_target_monitor(ErtsMonitor *mon, void *vmicp)
  358. {
  359. MonitorInfoCollection *micp = vmicp;
  360. if (mon->type != MON_TARGET) {
  361. return;
  362. }
  363. EXTEND_MONITOR_INFOS(micp);
  364. micp->mi[micp->mi_i].node = NIL;
  365. micp->mi[micp->mi_i].entity = mon->pid;
  366. micp->sz += (NC_HEAP_SIZE(mon->pid) + 2 /* cons */);
  367. micp->mi_i++;
  368. }
  369. typedef struct {
  370. Process *c_p;
  371. ErtsProcLocks c_p_locks;
  372. ErtsSuspendMonitor **smi;
  373. Uint smi_i;
  374. Uint smi_max;
  375. int sz;
  376. } ErtsSuspendMonitorInfoCollection;
  377. #define ERTS_INIT_SUSPEND_MONITOR_INFOS(SMIC, CP, CPL) do { \
  378. (SMIC).c_p = (CP); \
  379. (SMIC).c_p_locks = (CPL); \
  380. (SMIC).smi = NULL; \
  381. (SMIC).smi_i = (SMIC).smi_max = 0; \
  382. (SMIC).sz = 0; \
  383. } while(0)
  384. #define ERTS_SMI_INC 50
  385. #define ERTS_EXTEND_SUSPEND_MONITOR_INFOS(SMICP) \
  386. do { \
  387. if ((SMICP)->smi_i >= (SMICP)->smi_max) { \
  388. (SMICP)->smi = ((SMICP)->smi \
  389. ? erts_realloc(ERTS_ALC_T_TMP, \
  390. (SMICP)->smi, \
  391. ((SMICP)->smi_max \
  392. + ERTS_SMI_INC) \
  393. * sizeof(ErtsSuspendMonitor *)) \
  394. : erts_alloc(ERTS_ALC_T_TMP, \
  395. ERTS_SMI_INC \
  396. * sizeof(ErtsSuspendMonitor *))); \
  397. (SMICP)->smi_max += ERTS_SMI_INC; \
  398. } \
  399. } while (0)
  400. #define ERTS_DESTROY_SUSPEND_MONITOR_INFOS(SMIC) \
  401. do { \
  402. if ((SMIC).smi != NULL) { \
  403. erts_free(ERTS_ALC_T_TMP, (void *) (SMIC).smi); \
  404. } \
  405. } while (0)
  406. static void
  407. collect_one_suspend_monitor(ErtsSuspendMonitor *smon, void *vsmicp)
  408. {
  409. ErtsSuspendMonitorInfoCollection *smicp = vsmicp;
  410. Process *suspendee = erts_pid2proc(smicp->c_p,
  411. smicp->c_p_locks,
  412. smon->pid,
  413. 0);
  414. if (suspendee) { /* suspendee is alive */
  415. Sint a, p;
  416. if (smon->active) {
  417. smon->active += smon->pending;
  418. smon->pending = 0;
  419. }
  420. ASSERT((smon->active && !smon->pending)
  421. || (smon->pending && !smon->active));
  422. ERTS_EXTEND_SUSPEND_MONITOR_INFOS(smicp);
  423. smicp->smi[smicp->smi_i] = smon;
  424. smicp->sz += 2 /* cons */ + 4 /* 3-tuple */;
  425. a = (Sint) smon->active; /* quiet compiler warnings */
  426. p = (Sint) smon->pending; /* on 64-bit machines */
  427. if (!IS_SSMALL(a))
  428. smicp->sz += BIG_UINT_HEAP_SIZE;
  429. if (!IS_SSMALL(p))
  430. smicp->sz += BIG_UINT_HEAP_SIZE;
  431. smicp->smi_i++;
  432. }
  433. }
  434. static void one_link_size(ErtsLink *lnk, void *vpu)
  435. {
  436. Uint *pu = vpu;
  437. *pu += ERTS_LINK_SIZE*sizeof(Uint);
  438. if(!IS_CONST(lnk->pid))
  439. *pu += NC_HEAP_SIZE(lnk->pid)*sizeof(Uint);
  440. if (lnk->type != LINK_NODE && ERTS_LINK_ROOT(lnk) != NULL) {
  441. erts_doforall_links(ERTS_LINK_ROOT(lnk),&one_link_size,vpu);
  442. }
  443. }
  444. static void one_mon_size(ErtsMonitor *mon, void *vpu)
  445. {
  446. Uint *pu = vpu;
  447. *pu += ERTS_MONITOR_SIZE*sizeof(Uint);
  448. if(!IS_CONST(mon->pid))
  449. *pu += NC_HEAP_SIZE(mon->pid)*sizeof(Uint);
  450. if(!IS_CONST(mon->ref))
  451. *pu += NC_HEAP_SIZE(mon->ref)*sizeof(Uint);
  452. }
  453. /*
  454. * process_info/[1,2]
  455. */
  456. #define ERTS_PI_FAIL_TYPE_BADARG 0
  457. #define ERTS_PI_FAIL_TYPE_YIELD 1
  458. #define ERTS_PI_FAIL_TYPE_AWAIT_EXIT 2
  459. static ERTS_INLINE ErtsProcLocks
  460. pi_locks(Eterm info)
  461. {
  462. switch (info) {
  463. case am_status:
  464. case am_priority:
  465. return ERTS_PROC_LOCK_STATUS;
  466. case am_links:
  467. case am_monitors:
  468. case am_monitored_by:
  469. case am_suspending:
  470. return ERTS_PROC_LOCK_LINK;
  471. case am_messages:
  472. case am_message_queue_len:
  473. case am_total_heap_size:
  474. return ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_MSGQ;
  475. case am_memory:
  476. return ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_MSGQ;
  477. default:
  478. return ERTS_PROC_LOCK_MAIN;
  479. }
  480. }
  481. /*
  482. * All valid process_info arguments.
  483. */
  484. static Eterm pi_args[] = {
  485. am_registered_name,
  486. am_current_function,
  487. am_initial_call,
  488. am_status,
  489. am_messages,
  490. am_message_queue_len,
  491. am_links,
  492. am_monitors,
  493. am_monitored_by,
  494. am_dictionary,
  495. am_trap_exit,
  496. am_error_handler,
  497. am_heap_size,
  498. am_stack_size,
  499. am_memory,
  500. am_garbage_collection,
  501. am_group_leader,
  502. am_reductions,
  503. am_priority,
  504. am_trace,
  505. am_binary,
  506. am_sequential_trace_token,
  507. am_catchlevel,
  508. am_backtrace,
  509. am_last_calls,
  510. am_total_heap_size,
  511. am_suspending,
  512. am_min_heap_size,
  513. am_min_bin_vheap_size,
  514. am_current_location,
  515. am_current_stacktrace,
  516. #ifdef HYBRID
  517. am_message_binary
  518. #endif
  519. };
  520. #define ERTS_PI_ARGS ((int) (sizeof(pi_args)/sizeof(Eterm)))
  521. static ERTS_INLINE Eterm
  522. pi_ix2arg(int ix)
  523. {
  524. if (ix < 0 || ERTS_PI_ARGS <= ix)
  525. return am_undefined;
  526. return pi_args[ix];
  527. }
  528. static ERTS_INLINE int
  529. pi_arg2ix(Eterm arg)
  530. {
  531. switch (arg) {
  532. case am_registered_name: return 0;
  533. case am_current_function: return 1;
  534. case am_initial_call: return 2;
  535. case am_status: return 3;
  536. case am_messages: return 4;
  537. case am_message_queue_len: return 5;
  538. case am_links: return 6;
  539. case am_monitors: return 7;
  540. case am_monitored_by: return 8;
  541. case am_dictionary: return 9;
  542. case am_trap_exit: return 10;
  543. case am_error_handler: return 11;
  544. case am_heap_size: return 12;
  545. case am_stack_size: return 13;
  546. case am_memory: return 14;
  547. case am_garbage_collection: return 15;
  548. case am_group_leader: return 16;
  549. case am_reductions: return 17;
  550. case am_priority: return 18;
  551. case am_trace: return 19;
  552. case am_binary: return 20;
  553. case am_sequential_trace_token: return 21;
  554. case am_catchlevel: return 22;
  555. case am_backtrace: return 23;
  556. case am_last_calls: return 24;
  557. case am_total_heap_size: return 25;
  558. case am_suspending: return 26;
  559. case am_min_heap_size: return 27;
  560. case am_min_bin_vheap_size: return 28;
  561. case am_current_location: return 29;
  562. case am_current_stacktrace: return 30;
  563. #ifdef HYBRID
  564. case am_message_binary: return 31;
  565. #endif
  566. default: return -1;
  567. }
  568. }
  569. static Eterm pi_1_keys[] = {
  570. am_registered_name,
  571. am_current_function,
  572. am_initial_call,
  573. am_status,
  574. am_message_queue_len,
  575. am_messages,
  576. am_links,
  577. am_dictionary,
  578. am_trap_exit,
  579. am_error_handler,
  580. am_priority,
  581. am_group_leader,
  582. am_total_heap_size,
  583. am_heap_size,
  584. am_stack_size,
  585. am_reductions,
  586. am_garbage_collection,
  587. am_suspending
  588. };
  589. #define ERTS_PI_1_NO_OF_KEYS (sizeof(pi_1_keys)/sizeof(Eterm))
  590. static Eterm pi_1_keys_list;
  591. #if HEAP_ON_C_STACK
  592. static Eterm pi_1_keys_list_heap[2*ERTS_PI_1_NO_OF_KEYS];
  593. #endif
  594. static void
  595. process_info_init(void)
  596. {
  597. #if HEAP_ON_C_STACK
  598. Eterm *hp = &pi_1_keys_list_heap[0];
  599. #else
  600. Eterm *hp = erts_alloc(ERTS_ALC_T_LL_TEMP_TERM,sizeof(Eterm)*2*ERTS_PI_1_NO_OF_KEYS);
  601. #endif
  602. int i;
  603. pi_1_keys_list = NIL;
  604. for (i = ERTS_PI_1_NO_OF_KEYS-1; i >= 0; i--) {
  605. pi_1_keys_list = CONS(hp, pi_1_keys[i], pi_1_keys_list);
  606. hp += 2;
  607. }
  608. #ifdef DEBUG
  609. { /* Make sure the process_info argument mappings are consistent */
  610. int ix;
  611. for (ix = 0; ix < ERTS_PI_ARGS; ix++) {
  612. ASSERT(pi_arg2ix(pi_ix2arg(ix)) == ix);
  613. }
  614. }
  615. #endif
  616. }
  617. static ERTS_INLINE Process *
  618. pi_pid2proc(Process *c_p, Eterm pid, ErtsProcLocks info_locks)
  619. {
  620. #ifdef ERTS_SMP
  621. /*
  622. * If the main lock is needed, we use erts_pid2proc_not_running()
  623. * instead of erts_pid2proc() for two reasons:
  624. * * Current function of pid and possibly other information will
  625. * have been updated so that process_info() is consistent with an
  626. * info-request/info-response signal model.
  627. * * We avoid blocking the whole scheduler executing the
  628. * process that is calling process_info() for a long time
  629. * which will happen if pid is currently running.
  630. * The caller of process_info() may have to yield if pid
  631. * is currently running.
  632. */
  633. if (info_locks & ERTS_PROC_LOCK_MAIN)
  634. return erts_pid2proc_not_running(c_p, ERTS_PROC_LOCK_MAIN,
  635. pid, info_locks);
  636. else
  637. #endif
  638. return erts_pid2proc(c_p, ERTS_PROC_LOCK_MAIN,
  639. pid, info_locks);
  640. }
  641. BIF_RETTYPE
  642. process_info_aux(Process *BIF_P,
  643. Process *rp,
  644. Eterm rpid,
  645. Eterm item,
  646. int always_wrap);
  647. #define ERTS_PI_RES_ELEM_IX_BUF_INC 1024
  648. #define ERTS_PI_DEF_RES_ELEM_IX_BUF_SZ ERTS_PI_ARGS
  649. static Eterm
  650. process_info_list(Process *c_p, Eterm pid, Eterm list, int always_wrap,
  651. int *fail_type)
  652. {
  653. int want_messages = 0;
  654. int def_res_elem_ix_buf[ERTS_PI_DEF_RES_ELEM_IX_BUF_SZ];
  655. int *res_elem_ix = &def_res_elem_ix_buf[0];
  656. int res_elem_ix_ix = -1;
  657. int res_elem_ix_sz = ERTS_PI_DEF_RES_ELEM_IX_BUF_SZ;
  658. Eterm part_res[ERTS_PI_ARGS];
  659. Eterm res, arg;
  660. Uint *hp, *hp_end;
  661. ErtsProcLocks locks = (ErtsProcLocks) 0;
  662. int res_len, ix;
  663. Process *rp = NULL;
  664. *fail_type = ERTS_PI_FAIL_TYPE_BADARG;
  665. for (ix = 0; ix < ERTS_PI_ARGS; ix++)
  666. part_res[ix] = THE_NON_VALUE;
  667. ASSERT(is_list(list));
  668. while (is_list(list)) {
  669. Eterm* consp = list_val(list);
  670. arg = CAR(consp);
  671. ix = pi_arg2ix(arg);
  672. if (ix < 0) {
  673. res = THE_NON_VALUE;
  674. goto done;
  675. }
  676. if (arg == am_messages)
  677. want_messages = 1;
  678. locks |= pi_locks(arg);
  679. res_elem_ix_ix++;
  680. if (res_elem_ix_ix >= res_elem_ix_sz) {
  681. if (res_elem_ix != &def_res_elem_ix_buf[0])
  682. res_elem_ix =
  683. erts_realloc(ERTS_ALC_T_TMP,
  684. res_elem_ix,
  685. sizeof(int)*(res_elem_ix_sz
  686. += ERTS_PI_RES_ELEM_IX_BUF_INC));
  687. else {
  688. int new_res_elem_ix_sz = ERTS_PI_RES_ELEM_IX_BUF_INC;
  689. int *new_res_elem_ix = erts_alloc(ERTS_ALC_T_TMP,
  690. sizeof(int)*new_res_elem_ix_sz);
  691. sys_memcpy((void *) new_res_elem_ix,
  692. (void *) res_elem_ix,
  693. sizeof(int)*res_elem_ix_sz);
  694. res_elem_ix = new_res_elem_ix;
  695. res_elem_ix_sz = new_res_elem_ix_sz;
  696. }
  697. }
  698. res_elem_ix[res_elem_ix_ix] = ix;
  699. list = CDR(consp);
  700. }
  701. if (is_not_nil(list)) {
  702. res = THE_NON_VALUE;
  703. goto done;
  704. }
  705. res_len = res_elem_ix_ix+1;
  706. ASSERT(res_len > 0);
  707. rp = pi_pid2proc(c_p, pid, locks|ERTS_PROC_LOCK_STATUS);
  708. if (!rp) {
  709. res = am_undefined;
  710. goto done;
  711. }
  712. else if (rp == ERTS_PROC_LOCK_BUSY) {
  713. rp = NULL;
  714. res = THE_NON_VALUE;
  715. *fail_type = ERTS_PI_FAIL_TYPE_YIELD;
  716. goto done;
  717. }
  718. else if (c_p != rp && ERTS_PROC_PENDING_EXIT(rp)) {
  719. locks |= ERTS_PROC_LOCK_STATUS;
  720. res = THE_NON_VALUE;
  721. *fail_type = ERTS_PI_FAIL_TYPE_AWAIT_EXIT;
  722. goto done;
  723. }
  724. else if (!(locks & ERTS_PROC_LOCK_STATUS)) {
  725. erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
  726. }
  727. /*
  728. * We always handle 'messages' first if it should be part
  729. * of the result. This since if both 'messages' and
  730. * 'message_queue_len' are wanted, 'messages' may
  731. * change the result of 'message_queue_len' (in case
  732. * the queue contain bad distribution messages).
  733. */
  734. if (want_messages) {
  735. ix = pi_arg2ix(am_messages);
  736. ASSERT(part_res[ix] == THE_NON_VALUE);
  737. part_res[ix] = process_info_aux(c_p, rp, pid, am_messages, always_wrap);
  738. ASSERT(part_res[ix] != THE_NON_VALUE);
  739. }
  740. for (; res_elem_ix_ix >= 0; res_elem_ix_ix--) {
  741. ix = res_elem_ix[res_elem_ix_ix];
  742. if (part_res[ix] == THE_NON_VALUE) {
  743. arg = pi_ix2arg(ix);
  744. part_res[ix] = process_info_aux(c_p, rp, pid, arg, always_wrap);
  745. ASSERT(part_res[ix] != THE_NON_VALUE);
  746. }
  747. }
  748. hp = HAlloc(c_p, res_len*2);
  749. hp_end = hp + res_len*2;
  750. res = NIL;
  751. for (res_elem_ix_ix = res_len - 1; res_elem_ix_ix >= 0; res_elem_ix_ix--) {
  752. ix = res_elem_ix[res_elem_ix_ix];
  753. ASSERT(part_res[ix] != THE_NON_VALUE);
  754. /*
  755. * If we should ignore the value of registered_name,
  756. * its value is nil. For more info, see comment in the
  757. * beginning of process_info_aux().
  758. */
  759. if (is_nil(part_res[ix])) {
  760. ASSERT(!always_wrap);
  761. ASSERT(pi_ix2arg(ix) == am_registered_name);
  762. }
  763. else {
  764. res = CONS(hp, part_res[ix], res);
  765. hp += 2;
  766. }
  767. }
  768. if (!always_wrap) {
  769. HRelease(c_p, hp_end, hp);
  770. }
  771. done:
  772. if (c_p == rp)
  773. locks &= ~ERTS_PROC_LOCK_MAIN;
  774. if (locks && rp)
  775. erts_smp_proc_unlock(rp, locks);
  776. if (res_elem_ix != &def_res_elem_ix_buf[0])
  777. erts_free(ERTS_ALC_T_TMP, res_elem_ix);
  778. return res;
  779. }
  780. BIF_RETTYPE process_info_1(BIF_ALIST_1)
  781. {
  782. Eterm res;
  783. int fail_type;
  784. if (is_external_pid(BIF_ARG_1)
  785. && external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry)
  786. BIF_RET(am_undefined);
  787. if (is_not_internal_pid(BIF_ARG_1)
  788. || internal_pid_index(BIF_ARG_1) >= erts_max_processes) {
  789. BIF_ERROR(BIF_P, BADARG);
  790. }
  791. res = process_info_list(BIF_P, BIF_ARG_1, pi_1_keys_list, 0, &fail_type);
  792. if (is_non_value(res)) {
  793. switch (fail_type) {
  794. case ERTS_PI_FAIL_TYPE_BADARG:
  795. BIF_ERROR(BIF_P, BADARG);
  796. case ERTS_PI_FAIL_TYPE_YIELD:
  797. ERTS_BIF_YIELD1(bif_export[BIF_process_info_1], BIF_P, BIF_ARG_1);
  798. case ERTS_PI_FAIL_TYPE_AWAIT_EXIT:
  799. ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_undefined);
  800. default:
  801. erl_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error", __FILE__, __LINE__);
  802. }
  803. }
  804. ASSERT(!(BIF_P->flags & F_P2PNR_RESCHED));
  805. BIF_RET(res);
  806. }
  807. BIF_RETTYPE process_info_2(BIF_ALIST_2)
  808. {
  809. Eterm res;
  810. Process *rp;
  811. Eterm pid = BIF_ARG_1;
  812. ErtsProcLocks info_locks;
  813. int fail_type;
  814. if (is_external_pid(pid)
  815. && external_pid_dist_entry(pid) == erts_this_dist_entry)
  816. BIF_RET(am_undefined);
  817. if (is_not_internal_pid(pid)
  818. || internal_pid_index(BIF_ARG_1) >= erts_max_processes) {
  819. BIF_ERROR(BIF_P, BADARG);
  820. }
  821. if (is_nil(BIF_ARG_2))
  822. BIF_RET(NIL);
  823. if (is_list(BIF_ARG_2)) {
  824. res = process_info_list(BIF_P, BIF_ARG_1, BIF_ARG_2, 1, &fail_type);
  825. if (is_non_value(res)) {
  826. switch (fail_type) {
  827. case ERTS_PI_FAIL_TYPE_BADARG:
  828. BIF_ERROR(BIF_P, BADARG);
  829. case ERTS_PI_FAIL_TYPE_YIELD:
  830. ERTS_BIF_YIELD2(bif_export[BIF_process_info_2], BIF_P,
  831. BIF_ARG_1, BIF_ARG_2);
  832. case ERTS_PI_FAIL_TYPE_AWAIT_EXIT:
  833. ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_undefined);
  834. default:
  835. erl_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error",
  836. __FILE__, __LINE__);
  837. }
  838. }
  839. ASSERT(!(BIF_P->flags & F_P2PNR_RESCHED));
  840. BIF_RET(res);
  841. }
  842. if (pi_arg2ix(BIF_ARG_2) < 0)
  843. BIF_ERROR(BIF_P, BADARG);
  844. info_locks = pi_locks(BIF_ARG_2);
  845. rp = pi_pid2proc(BIF_P, pid, info_locks|ERTS_PROC_LOCK_STATUS);
  846. if (!rp)
  847. res = am_undefined;
  848. else if (rp == ERTS_PROC_LOCK_BUSY)
  849. ERTS_BIF_YIELD2(bif_export[BIF_process_info_2], BIF_P,
  850. BIF_ARG_1, BIF_ARG_2);
  851. else if (rp != BIF_P && ERTS_PROC_PENDING_EXIT(rp)) {
  852. erts_smp_proc_unlock(rp, info_locks|ERTS_PROC_LOCK_STATUS);
  853. ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_undefined);
  854. }
  855. else {
  856. if (!(info_locks & ERTS_PROC_LOCK_STATUS))
  857. erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
  858. res = process_info_aux(BIF_P, rp, pid, BIF_ARG_2, 0);
  859. }
  860. ASSERT(is_value(res));
  861. #ifdef ERTS_SMP
  862. if (BIF_P == rp)
  863. info_locks &= ~ERTS_PROC_LOCK_MAIN;
  864. if (rp && info_locks)
  865. erts_smp_proc_unlock(rp, info_locks);
  866. #endif
  867. ASSERT(!(BIF_P->flags & F_P2PNR_RESCHED));
  868. BIF_RET(res);
  869. }
  870. Eterm
  871. process_info_aux(Process *BIF_P,
  872. Process *rp,
  873. Eterm rpid,
  874. Eterm item,
  875. int always_wrap)
  876. {
  877. Eterm *hp;
  878. Eterm res = NIL;
  879. ASSERT(rp);
  880. /*
  881. * Q: Why this always_wrap argument?
  882. *
  883. * A: registered_name is strange. If process has no registered name,
  884. * process_info(Pid, registered_name) returns [], and
  885. * the result of process_info(Pid) has no {registered_name, Name}
  886. * tuple in the resulting list. This is inconsistent with all other
  887. * options, but we do not dare to change it.
  888. *
  889. * When process_info/2 is called with a list as second argument,
  890. * registered_name behaves as it should, i.e. a
  891. * {registered_name, []} will appear in the resulting list.
  892. *
  893. * If always_wrap != 0, process_info_aux() always wrap the result
  894. * in a key two tuple.
  895. */
  896. switch (item) {
  897. case am_registered_name:
  898. if (rp->reg != NULL) {
  899. hp = HAlloc(BIF_P, 3);
  900. res = rp->reg->name;
  901. } else {
  902. if (always_wrap) {
  903. hp = HAlloc(BIF_P, 3);
  904. res = NIL;
  905. }
  906. else {
  907. return NIL;
  908. }
  909. }
  910. break;
  911. case am_current_function:
  912. res = current_function(BIF_P, rp, &hp, 0);
  913. break;
  914. case am_current_location:
  915. res = current_function(BIF_P, rp, &hp, 1);
  916. break;
  917. case am_current_stacktrace:
  918. res = current_stacktrace(BIF_P, rp, &hp);
  919. break;
  920. case am_initial_call:
  921. hp = HAlloc(BIF_P, 3+4);
  922. res = TUPLE3(hp,
  923. rp->initial[INITIAL_MOD],
  924. rp->initial[INITIAL_FUN],
  925. make_small(rp->initial[INITIAL_ARI]));
  926. hp += 4;
  927. break;
  928. case am_status:
  929. res = erts_process_status(BIF_P, ERTS_PROC_LOCK_MAIN, rp, rpid);
  930. ASSERT(res != am_undefined);
  931. hp = HAlloc(BIF_P, 3);
  932. break;
  933. case am_messages: {
  934. ErlMessage* mp;
  935. int n;
  936. ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
  937. n = rp->msg.len;
  938. if (n == 0 || rp->trace_flags & F_SENSITIVE) {
  939. hp = HAlloc(BIF_P, 3);
  940. } else {
  941. int remove_bad_messages = 0;
  942. struct {
  943. Uint copy_struct_size;
  944. ErlMessage* msgp;
  945. } *mq = erts_alloc(ERTS_ALC_T_TMP, n*sizeof(*mq));
  946. Sint i = 0;
  947. Uint heap_need = 3;
  948. Eterm *hp_end;
  949. for (mp = rp->msg.first; mp; mp = mp->next) {
  950. heap_need += 2;
  951. mq[i].msgp = mp;
  952. if (rp != BIF_P) {
  953. Eterm msg = ERL_MESSAGE_TERM(mq[i].msgp);
  954. if (is_value(msg)) {
  955. mq[i].copy_struct_size = (is_immed(msg)
  956. #ifdef HYBRID
  957. || NO_COPY(msg)
  958. #endif
  959. ? 0
  960. : size_object(msg));
  961. }
  962. else if (mq[i].msgp->data.attached) {
  963. mq[i].copy_struct_size
  964. = erts_msg_attached_data_size(mq[i].msgp);
  965. }
  966. else {
  967. /* Bad distribution message; ignore */
  968. remove_bad_messages = 1;
  969. mq[i].copy_struct_size = 0;
  970. }
  971. heap_need += mq[i].copy_struct_size;
  972. }
  973. else {
  974. mq[i].copy_struct_size = 0;
  975. if (mp->data.attached)
  976. heap_need += erts_msg_attached_data_size(mp);
  977. }
  978. i++;
  979. }
  980. hp = HAlloc(BIF_P, heap_need);
  981. hp_end = hp + heap_need;
  982. ASSERT(i == n);
  983. for (i--; i >= 0; i--) {
  984. Eterm msg = ERL_MESSAGE_TERM(mq[i].msgp);
  985. if (rp != BIF_P) {
  986. if (is_value(msg)) {
  987. if (mq[i].copy_struct_size)
  988. msg = copy_struct(msg,
  989. mq[i].copy_struct_size,
  990. &hp,
  991. &MSO(BIF_P));
  992. }
  993. else if (mq[i].msgp->data.attached) {
  994. ErlHeapFragment *hfp;
  995. /*
  996. * Decode it into a message buffer and attach it
  997. * to the message instead of the attached external
  998. * term.
  999. *
  1000. * Note that we may not pass a process pointer
  1001. * to erts_msg_distext2heap(), since it would then
  1002. * try to alter locks on that process.
  1003. */
  1004. msg = erts_msg_distext2heap(
  1005. NULL, NULL, &hfp, &ERL_MESSAGE_TOKEN(mq[i].msgp),
  1006. mq[i].msgp->data.dist_ext);
  1007. ERL_MESSAGE_TERM(mq[i].msgp) = msg;
  1008. mq[i].msgp->data.heap_frag = hfp;
  1009. if (is_non_value(msg)) {
  1010. ASSERT(!mq[i].msgp->data.heap_frag);
  1011. /* Bad distribution message; ignore */
  1012. remove_bad_messages = 1;
  1013. continue;
  1014. }
  1015. else {
  1016. /* Make our copy of the message */
  1017. ASSERT(size_object(msg) == hfp->used_size);
  1018. msg = copy_struct(msg,
  1019. hfp->used_size,
  1020. &hp,
  1021. &MSO(BIF_P));
  1022. }
  1023. }
  1024. else {
  1025. /* Bad distribution message; ignore */
  1026. remove_bad_messages = 1;
  1027. continue;
  1028. }
  1029. }
  1030. else {
  1031. if (mq[i].msgp->data.attached) {
  1032. /* Decode it on the heap */
  1033. erts_move_msg_attached_data_to_heap(&hp,
  1034. &MSO(BIF_P),
  1035. mq[i].msgp);
  1036. msg = ERL_MESSAGE_TERM(mq[i].msgp);
  1037. ASSERT(!mq[i].msgp->data.attached);
  1038. if (is_non_value(msg)) {
  1039. /* Bad distribution message; ignore */
  1040. remove_bad_messages = 1;
  1041. continue;
  1042. }
  1043. }
  1044. }
  1045. res = CONS(hp, msg, res);
  1046. hp += 2;
  1047. }
  1048. HRelease(BIF_P, hp_end, hp+3);
  1049. erts_free(ERTS_ALC_T_TMP, mq);
  1050. if (remove_bad_messages) {
  1051. ErlMessage **mpp;
  1052. /*
  1053. * We need to remove bad distribution messages from
  1054. * the queue, so that the value returned for
  1055. * 'message_queue_len' is consistent with the value
  1056. * returned for 'messages'.
  1057. */
  1058. mpp = &rp->msg.first;
  1059. mp = rp->msg.first;
  1060. while (mp) {
  1061. if (is_value(ERL_MESSAGE_TERM(mp))) {
  1062. mpp = &mp->next;
  1063. mp = mp->next;
  1064. }
  1065. else {
  1066. ErlMessage* bad_mp = mp;
  1067. ASSERT(!mp->data.attached);
  1068. if (rp->msg.save == &mp->next)
  1069. rp->msg.save = mpp;
  1070. if (rp->msg.last == &mp->next)
  1071. rp->msg.last = mpp;
  1072. *mpp = mp->next;
  1073. mp = mp->next;
  1074. rp->msg.len--;
  1075. free_message(bad_mp);
  1076. }
  1077. }
  1078. }
  1079. }
  1080. break;
  1081. }
  1082. case am_message_queue_len:
  1083. hp = HAlloc(BIF_P, 3);
  1084. ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
  1085. res = make_small(rp->msg.len);
  1086. break;
  1087. case am_links: {
  1088. MonitorInfoCollection mic;
  1089. int i;
  1090. Eterm item;
  1091. INIT_MONITOR_INFOS(mic);
  1092. erts_doforall_links(rp->nlinks,&collect_one_link,&mic);
  1093. hp = HAlloc(BIF_P, 3 + mic.sz);
  1094. res = NIL;
  1095. for (i = 0; i < mic.mi_i; i++) {
  1096. item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
  1097. res = CONS(hp, item, res);
  1098. hp += 2;
  1099. }
  1100. DESTROY_MONITOR_INFOS(mic);
  1101. break;
  1102. }
  1103. case am_monitors: {
  1104. MonitorInfoCollection mic;
  1105. int i;
  1106. INIT_MONITOR_INFOS(mic);
  1107. erts_doforall_monitors(rp->monitors,&collect_one_origin_monitor,&mic);
  1108. hp = HAlloc(BIF_P, 3 + mic.sz);
  1109. res = NIL;
  1110. for (i = 0; i < mic.mi_i; i++) {
  1111. if (is_atom(mic.mi[i].entity)) {
  1112. /* Monitor by name.
  1113. * Build {process, {Name, Node}} and cons it.
  1114. */
  1115. Eterm t1, t2;
  1116. t1 = TUPLE2(hp, mic.mi[i].entity, mic.mi[i].node);
  1117. hp += 3;
  1118. t2 = TUPLE2(hp, am_process, t1);
  1119. hp += 3;
  1120. res = CONS(hp, t2, res);
  1121. hp += 2;
  1122. }
  1123. else {
  1124. /* Monitor by pid. Build {process, Pid} and cons it. */
  1125. Eterm t;
  1126. Eterm pid = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
  1127. t = TUPLE2(hp, am_process, pid);
  1128. hp += 3;
  1129. res = CONS(hp, t, res);
  1130. hp += 2;
  1131. }
  1132. }
  1133. DESTROY_MONITOR_INFOS(mic);
  1134. break;
  1135. }
  1136. case am_monitored_by: {
  1137. MonitorInfoCollection mic;
  1138. int i;
  1139. Eterm item;
  1140. INIT_MONITOR_INFOS(mic);
  1141. erts_doforall_monitors(rp->monitors,&collect_one_target_monitor,&mic);
  1142. hp = HAlloc(BIF_P, 3 + mic.sz);
  1143. res = NIL;
  1144. for (i = 0; i < mic.mi_i; ++i) {
  1145. item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
  1146. res = CONS(hp, item, res);
  1147. hp += 2;
  1148. }
  1149. DESTROY_MONITOR_INFOS(mic);
  1150. break;
  1151. }
  1152. case am_suspending: {
  1153. ErtsSuspendMonitorInfoCollection smic;
  1154. int i;
  1155. Eterm item;
  1156. #ifdef DEBUG
  1157. Eterm *hp_end;
  1158. #endif
  1159. ERTS_INIT_SUSPEND_MONITOR_INFOS(smic,
  1160. BIF_P,
  1161. (BIF_P == rp
  1162. ? ERTS_PROC_LOCK_MAIN
  1163. : 0) | ERTS_PROC_LOCK_LINK);
  1164. erts_doforall_suspend_monitors(rp->suspend_monitors,
  1165. &collect_one_suspend_monitor,
  1166. &smic);
  1167. hp = HAlloc(BIF_P, 3 + smic.sz);
  1168. #ifdef DEBUG
  1169. hp_end = hp + smic.sz;
  1170. #endif
  1171. res = NIL;
  1172. for (i = 0; i < smic.smi_i; i++) {
  1173. Sint a = (Sint) smic.smi[i]->active; /* quiet compiler warnings */
  1174. Sint p = (Sint) smic.smi[i]->pending; /* on 64-bit machines... */
  1175. Eterm active;
  1176. Eterm pending;
  1177. if (IS_SSMALL(a))
  1178. active = make_small(a);
  1179. else {
  1180. active = small_to_big(a, hp);
  1181. hp += BIG_UINT_HEAP_SIZE;
  1182. }
  1183. if (IS_SSMALL(p))
  1184. pending = make_small(p);
  1185. else {
  1186. pending = small_to_big(p, hp);
  1187. hp += BIG_UINT_HEAP_SIZE;
  1188. }
  1189. item = TUPLE3(hp, smic.smi[i]->pid, active, pending);
  1190. hp += 4;
  1191. res = CONS(hp, item, res);
  1192. hp += 2;
  1193. }
  1194. ERTS_DESTROY_SUSPEND_MONITOR_INFOS(smic);
  1195. ASSERT(hp == hp_end);
  1196. break;
  1197. }
  1198. case am_dictionary:
  1199. if (rp->trace_flags & F_SENSITIVE) {
  1200. res = NIL;
  1201. } else {
  1202. res = erts_dictionary_copy(BIF_P, rp->dictionary);
  1203. }
  1204. hp = HAlloc(BIF_P, 3);
  1205. break;
  1206. case am_trap_exit:
  1207. hp = HAlloc(BIF_P, 3);
  1208. if (rp->flags & F_TRAPEXIT)
  1209. res = am_true;
  1210. else
  1211. res = am_false;
  1212. break;
  1213. case am_error_handler:
  1214. hp = HAlloc(BIF_P, 3);
  1215. res = erts_proc_get_error_handler(BIF_P);
  1216. break;
  1217. case am_heap_size: {
  1218. Uint hsz = 3;
  1219. (void) erts_bld_uint(NULL, &hsz, HEAP_SIZE(rp));
  1220. hp = HAlloc(BIF_P, hsz);
  1221. res = erts_bld_uint(&hp, NULL, HEAP_SIZE(rp));
  1222. break;
  1223. }
  1224. case am_fullsweep_after: {
  1225. Uint hsz = 3;
  1226. (void) erts_bld_uint(NULL, &hsz, MAX_GEN_GCS(rp));
  1227. hp = HAlloc(BIF_P, hsz);
  1228. res = erts_bld_uint(&hp, NULL, MAX_GEN_GCS(rp));
  1229. break;
  1230. }
  1231. case am_min_heap_size: {
  1232. Uint hsz = 3;
  1233. (void) erts_bld_uint(NULL, &hsz, MIN_HEAP_SIZE(rp));
  1234. hp = HAlloc(BIF_P, hsz);
  1235. res = erts_bld_uint(&hp, NULL, MIN_HEAP_SIZE(rp));
  1236. break;
  1237. }
  1238. case am_min_bin_vheap_size: {
  1239. Uint hsz = 3;
  1240. (void) erts_bld_uint(NULL, &hsz, MIN_VHEAP_SIZE(rp));
  1241. hp = HAlloc(BIF_P, hsz);
  1242. res = erts_bld_uint(&hp, NULL, MIN_VHEAP_SIZE(rp));
  1243. break;
  1244. }
  1245. case am_total_heap_size: {
  1246. ErlMessage *mp;
  1247. Uint total_heap_size;
  1248. Uint hsz = 3;
  1249. total_heap_size = rp->heap_sz;
  1250. if (rp->old_hend && rp->old_heap)
  1251. total_heap_size += rp->old_hend - rp->old_heap;
  1252. total_heap_size += rp->mbuf_sz;
  1253. ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
  1254. for (mp = rp->msg.first; mp; mp = mp->next)
  1255. if (mp->data.attached)
  1256. total_heap_size += erts_msg_attached_data_size(mp);
  1257. (void) erts_bld_uint(NULL, &hsz, total_heap_size);
  1258. hp = HAlloc(BIF_P, hsz);
  1259. res = erts_bld_uint(&hp, NULL, total_heap_size);
  1260. break;
  1261. }
  1262. case am_stack_size: {
  1263. Uint stack_size = STACK_START(rp) - rp->stop;
  1264. Uint hsz = 3;
  1265. (void) erts_bld_uint(NULL, &hsz, stack_size);
  1266. hp = HAlloc(BIF_P, hsz);
  1267. res = erts_bld_uint(&hp, NULL, stack_size);
  1268. break;
  1269. }
  1270. case am_memory: { /* Memory consumed in bytes */
  1271. ErlMessage *mp;
  1272. Uint size = 0;
  1273. Uint hsz = 3;
  1274. struct saved_calls *scb;
  1275. size += sizeof(Process);
  1276. ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
  1277. erts_doforall_links(rp->nlinks, &one_link_size, &size);
  1278. erts_doforall_monitors(rp->monitors, &one_mon_size, &size);
  1279. size += (rp->heap_sz + rp->mbuf_sz) * sizeof(Eterm);
  1280. if (rp->old_hend && rp->old_heap)
  1281. size += (rp->old_hend - rp->old_heap) * sizeof(Eterm);
  1282. size += rp->msg.len * sizeof(ErlMessage);
  1283. for (mp = rp->msg.first; mp; mp = mp->next)
  1284. if (mp->data.attached)
  1285. size += erts_msg_attached_data_size(mp)*sizeof(Eterm);
  1286. if (rp->arg_reg != rp->def_arg_reg) {
  1287. size += rp->arity * sizeof(rp->arg_reg[0]);
  1288. }
  1289. if (rp->psd)
  1290. size += sizeof(ErtsPSD);
  1291. scb = ERTS_PROC_GET_SAVED_CALLS_BUF(rp);
  1292. if (scb) {
  1293. size += (sizeof(struct saved_calls)
  1294. + (scb->len-1) * sizeof(scb->ct[0]));
  1295. }
  1296. size += erts_dicts_mem_size(rp);
  1297. (void) erts_bld_uint(NULL, &hsz, size);
  1298. hp = HAlloc(BIF_P, hsz);
  1299. res = erts_bld_uint(&hp, NULL, size);
  1300. break;
  1301. }
  1302. case am_garbage_collection: {
  1303. DECL_AM(minor_gcs);
  1304. Eterm t;
  1305. hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2 + 3+2 + 3); /* last "3" is for outside tuple */
  1306. t = TUPLE2(hp, AM_minor_gcs, make_small(GEN_GCS(rp))); hp += 3;
  1307. res = CONS(hp, t, NIL); hp += 2;
  1308. t = TUPLE2(hp, am_fullsweep_after, make_small(MAX_GEN_GCS(rp))); hp += 3;
  1309. res = CONS(hp, t, res); hp += 2;
  1310. t = TUPLE2(hp, am_min_heap_size, make_small(MIN_HEAP_SIZE(rp))); hp += 3;
  1311. res = CONS(hp, t, res); hp += 2;
  1312. t = TUPLE2(hp, am_min_bin_vheap_size, make_small(MIN_VHEAP_SIZE(rp))); hp += 3;
  1313. res = CONS(hp, t, res); hp += 2;
  1314. break;
  1315. }
  1316. case am_group_leader: {
  1317. int sz = NC_HEAP_SIZE(rp->group_leader);
  1318. hp = HAlloc(BIF_P, 3 + sz);
  1319. res = STORE_NC(&hp, &MSO(BIF_P), rp->group_leader);
  1320. break;
  1321. }
  1322. case am_reductions: {
  1323. Uint reds = rp->reds + erts_current_reductions(BIF_P, rp);
  1324. Uint hsz = 3;
  1325. (void) erts_bld_uint(NULL, &hsz, reds);
  1326. hp = HAlloc(BIF_P, hsz);
  1327. res = erts_bld_uint(&hp, NULL, reds);
  1328. break;
  1329. }
  1330. case am_priority:
  1331. hp = HAlloc(BIF_P, 3);
  1332. res = erts_get_process_priority(rp);
  1333. break;
  1334. case am_trace:
  1335. hp = HAlloc(BIF_P, 3);
  1336. res = make_small(rp->trace_flags & TRACEE_FLAGS);
  1337. break;
  1338. case am_binary: {
  1339. Uint sz = 3;
  1340. (void) bld_bin_list(NULL, &sz, &MSO(rp));
  1341. hp = HAlloc(BIF_P, sz);
  1342. res = bld_bin_list(&hp, NULL, &MSO(rp));
  1343. break;
  1344. }
  1345. #ifdef HYBRID
  1346. case am_message_binary: {
  1347. Uint sz = 3;
  1348. (void) bld_bin_list(NULL, &sz, erts_global_offheap.mso);
  1349. hp = HAlloc(BIF_P, sz);
  1350. res = bld_bin_list(&hp, NULL, erts_global_offheap.mso);
  1351. break;
  1352. }
  1353. #endif
  1354. case am_sequential_trace_token:
  1355. res = copy_object(rp->seq_trace_token, BIF_P);
  1356. hp = HAlloc(BIF_P, 3);
  1357. break;
  1358. case am_catchlevel:
  1359. hp = HAlloc(BIF_P, 3);
  1360. res = make_small(catchlevel(BIF_P));
  1361. break;
  1362. case am_backtrace: {
  1363. erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0);
  1364. erts_stack_dump(ERTS_PRINT_DSBUF, (void *) dsbufp, rp);
  1365. res = new_binary(BIF_P, (byte *) dsbufp->str, dsbufp->str_len);
  1366. erts_destroy_tmp_dsbuf(dsbufp);
  1367. hp = HAlloc(BIF_P, 3);
  1368. break;
  1369. }
  1370. case am_last_calls: {
  1371. struct saved_calls *scb = ERTS_PROC_GET_SAVED_CALLS_BUF(BIF_P);
  1372. if (!scb) {
  1373. hp = HAlloc(BIF_P, 3);
  1374. res = am_false;
  1375. } else {
  1376. /*
  1377. * One cons cell and a 3-struct, and a 2-tuple.
  1378. * Might be less than that, if there are sends, receives or timeouts,
  1379. * so we must do a HRelease() to avoid creating holes.
  1380. */
  1381. Uint needed = scb->n*(2+4) + 3;
  1382. Eterm* limit;
  1383. Eterm term, list;
  1384. int i, j;
  1385. hp = HAlloc(BIF_P, needed);
  1386. limit = hp + needed;
  1387. list = NIL;
  1388. for (i = 0; i < scb->n; i++) {
  1389. j = scb->cur - i - 1;
  1390. if (j < 0)
  1391. j += scb->len;
  1392. if (scb->ct[j] == &exp_send)
  1393. term = am_send;
  1394. else if (scb->ct[j] == &exp_receive)
  1395. term = am_receive;
  1396. else if (scb->ct[j] == &exp_timeout)
  1397. term = am_timeout;
  1398. else {
  1399. term = TUPLE3(hp,
  1400. scb->ct[j]->code[0],
  1401. scb->ct[j]->code[1],
  1402. make_small(scb->ct[j]->code[2]));
  1403. hp += 4;
  1404. }
  1405. list = CONS(hp, term, list);
  1406. hp += 2;
  1407. }
  1408. res = list;
  1409. res = TUPLE2(hp, item, res);
  1410. hp += 3;
  1411. HRelease(BIF_P,limit,hp);
  1412. return res;
  1413. }
  1414. break;
  1415. }
  1416. default:
  1417. return THE_NON_VALUE; /* will produce badarg */
  1418. }
  1419. return TUPLE2(hp, item, res);
  1420. }
  1421. #undef MI_INC
  1422. static Eterm
  1423. current_function(Process* BIF_P, Process* rp, Eterm** hpp, int full_info)
  1424. {
  1425. Eterm* hp;
  1426. Eterm res;
  1427. FunctionInfo fi;
  1428. if (rp->current == NULL) {
  1429. erts_lookup_function_info(&fi, rp->i, full_info);
  1430. rp->current = fi.current;
  1431. } else if (full_info) {
  1432. erts_lookup_function_info(&fi, rp->i, full_info);
  1433. if (fi.current == NULL) {
  1434. /* Use the current function without location info */
  1435. erts_set_current_function(&fi, rp->current);
  1436. }
  1437. }
  1438. if (BIF_P->id == rp->id) {
  1439. FunctionInfo fi2;
  1440. /*
  1441. * The current function is erlang:process_info/{1,2},
  1442. * which is not the answer that the application want.
  1443. * We will use the function pointed into by rp->cp
  1444. * instead if it can be looked up.
  1445. */
  1446. erts_lookup_function_info(&fi2, rp->cp, full_info);
  1447. if (fi2.current) {
  1448. fi = fi2;
  1449. rp->current = fi2.current;
  1450. }
  1451. }
  1452. /*
  1453. * Return the result.
  1454. */
  1455. if (rp->current == NULL) {
  1456. hp = HAlloc(BIF_P, 3);
  1457. res = am_undefined;
  1458. } else if (full_info) {
  1459. hp = HAlloc(BIF_P, 3+fi.needed);
  1460. hp = erts_build_mfa_item(&fi, hp, am_true, &res);
  1461. } else {
  1462. hp = HAlloc(BIF_P, 3+4);
  1463. res = TUPLE3(hp, rp->current[0],
  1464. rp->current[1], make_small(rp->current[2]));
  1465. hp += 4;
  1466. }
  1467. *hpp = hp;
  1468. return res;
  1469. }
  1470. static Eterm
  1471. current_stacktrace(Process* p, Process* rp, Eterm** hpp)
  1472. {
  1473. Uint sz;
  1474. struct StackTrace* s;
  1475. int depth;
  1476. FunctionInfo* stk;
  1477. FunctionInfo* stkp;
  1478. Uint heap_size;
  1479. int i;
  1480. Eterm* hp = *hpp;
  1481. Eterm mfa;
  1482. Eterm res = NIL;
  1483. depth = 8;
  1484. sz = offsetof(struct StackTrace, trace) + sizeof(BeamInstr *)*depth;
  1485. s = (struct StackTrace *) erts_alloc(ERTS_ALC_T_TMP, sz);
  1486. s->depth = 0;
  1487. if (rp->i) {
  1488. s->trace[s->depth++] = rp->i;
  1489. depth--;
  1490. }
  1491. if (depth > 0 && rp->cp != 0) {
  1492. s->trace[s->depth++] = rp->cp - 1;
  1493. depth--;
  1494. }
  1495. erts_save_stacktrace(rp, s, depth);
  1496. depth = s->depth;
  1497. stk = stkp = (FunctionInfo *) erts_alloc(ERTS_ALC_T_TMP,
  1498. depth*sizeof(FunctionInfo));
  1499. heap_size = 3;
  1500. for (i = 0; i < depth; i++) {
  1501. erts_lookup_function_info(stkp, s->trace[i], 1);
  1502. if (stkp->current) {
  1503. heap_size += stkp->needed + 2;
  1504. stkp++;
  1505. }
  1506. }
  1507. hp = HAlloc(p, heap_size);
  1508. while (stkp > stk) {
  1509. stkp--;
  1510. hp = erts_build_mfa_item(stkp, hp, am_true, &mfa);
  1511. res = CONS(hp, mfa, res);
  1512. hp += 2;
  1513. }
  1514. erts_free(ERTS_ALC_T_TMP, stk);
  1515. erts_free(ERTS_ALC_T_TMP, s);
  1516. *hpp = hp;
  1517. return res;
  1518. }
  1519. #if defined(VALGRIND)
  1520. static int check_if_xml(void)
  1521. {
  1522. char buf[1];
  1523. size_t bufsz = sizeof(buf);
  1524. return erts_sys_getenv("VALGRIND_LOG_XML", buf, &bufsz) >= 0;
  1525. }
  1526. #else
  1527. #define check_if_xml() 0
  1528. #endif
  1529. /*
  1530. * This function takes care of calls to erlang:system_info/1 when the argument
  1531. * is a tuple.
  1532. */
  1533. static BIF_RETTYPE
  1534. info_1_tuple(Process* BIF_P, /* Pointer to current process. */
  1535. Eterm* tp, /* Pointer to first element in tuple */
  1536. int arity) /* Arity of tuple (untagged). */
  1537. {
  1538. Eterm ret;
  1539. Eterm sel;
  1540. sel = *tp++;
  1541. if (sel == am_allocator_sizes) {
  1542. switch (arity) {
  1543. case 2:
  1544. ERTS_BIF_PREP_TRAP1(ret, alloc_sizes_trap, BIF_P, *tp);
  1545. return ret;
  1546. case 3:
  1547. if (erts_request_alloc_info(BIF_P, tp[0], tp[1], 1))
  1548. return am_true;
  1549. default:
  1550. goto badarg;
  1551. }
  1552. }
  1553. else if (sel == am_wordsize && arity == 2) {
  1554. if (tp[0] == am_internal) {
  1555. return make_small(sizeof(Eterm));
  1556. }
  1557. if (tp[0] == am_external) {
  1558. return make_small(sizeof(UWord));
  1559. }
  1560. goto badarg;
  1561. } else if (sel == am_allocated) {
  1562. if (arity == 2) {
  1563. Eterm res = THE_NON_VALUE;
  1564. char *buf;
  1565. int len = is_string(*tp);
  1566. if (len <= 0)
  1567. return res;
  1568. buf = (char *) erts_alloc(ERTS_ALC_T_TMP, len+1);
  1569. if (intlist_to_buf(*tp, buf, len) != len)
  1570. erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
  1571. buf[len] = '\0';
  1572. res = erts_instr_dump_memory_map(buf) ? am_true : am_false;
  1573. erts_free(ERTS_ALC_T_TMP, (void *) buf);
  1574. if (is_non_value(res))
  1575. goto badarg;
  1576. return res;
  1577. }
  1578. else if (arity == 3 && tp[0] == am_status) {
  1579. if (is_atom(tp[1]))
  1580. return erts_instr_get_stat(BIF_P, tp[1], 1);
  1581. else {
  1582. Eterm res = THE_NON_VALUE;
  1583. char *buf;
  1584. int len = is_string(tp[1]);
  1585. if (len <= 0)
  1586. return res;
  1587. buf = (char *) erts_alloc(ERTS_ALC_T_TMP, len+1);
  1588. if (intlist_to_buf(tp[1], buf, len) != len)
  1589. erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
  1590. buf[len] = '\0';
  1591. res = erts_instr_dump_stat(buf, 1) ? am_true : am_false;
  1592. erts_free(ERTS_ALC_T_TMP, (void *) buf);
  1593. if (is_non_value(res))
  1594. goto badarg;
  1595. return res;
  1596. }
  1597. }
  1598. else
  1599. goto badarg;
  1600. } else if (sel == am_allocator) {
  1601. switch (arity) {
  1602. case 2:
  1603. ERTS_BIF_PREP_TRAP1(ret, alloc_info_trap, BIF_P, *tp);
  1604. return ret;
  1605. case 3:
  1606. if (erts_request_alloc_info(BIF_P, tp[0], tp[1], 0))
  1607. return am_true;
  1608. default:
  1609. goto badarg;
  1610. }
  1611. } else if (ERTS_IS_ATOM_STR("internal_cpu_topology", sel) && arity == 2) {
  1612. return erts_get_cpu_topology_term(BIF_P, *tp);
  1613. } else if (ERTS_IS_ATOM_STR("cpu_topology", sel) && arity == 2) {
  1614. Eterm res = erts_get_cpu_topology_term(BIF_P, *tp);
  1615. if (res == THE_NON_VALUE)
  1616. goto badarg;
  1617. ERTS_BIF_PREP_TRAP1(ret, erts_format_cpu_topology_trap, BIF_P, res);
  1618. return ret;
  1619. #if defined(PURIFY) || defined(VALGRIND)
  1620. } else if (ERTS_IS_ATOM_STR("error_checker", sel)
  1621. #if defined(PURIFY)
  1622. || sel == am_purify
  1623. #elif defined(VALGRIND)
  1624. || ERTS_IS_ATOM_STR("valgrind", sel)
  1625. #endif
  1626. ) {
  1627. if (*tp == am_memory) {
  1628. #if defined(PURIFY)
  1629. BIF_RET(erts_make_integer(purify_new_leaks(), BIF_P));
  1630. #elif defined(VALGRIND)
  1631. VALGRIND_DO_LEAK_CHECK;
  1632. BIF_RET(make_small(0));
  1633. #endif
  1634. } else if (*tp == am_fd) {
  1635. #if defined(PURIFY)
  1636. BIF_RET(erts_make_integer(purify_new_fds_inuse(), BIF_P));
  1637. #elif defined(VALGRIND)
  1638. /* Not present in valgrind... */
  1639. BIF_RET(make_small(0));
  1640. #endif
  1641. } else if (*tp == am_running) {
  1642. #if defined(PURIFY)
  1643. BIF_RET(purify_is_running() ? am_true : am_false);
  1644. #elif defined(VALGRIND)
  1645. BIF_RET(RUNNING_ON_VALGRIND ? am_true : am_false);
  1646. #endif
  1647. } else if (is_list(*tp)) {
  1648. #if defined(PURIFY)
  1649. #define ERTS_ERROR_CHECKER_PRINTF purify_printf
  1650. #define ERTS_ERROR_CHECKER_PRINTF_XML purify_printf
  1651. #elif defined(VALGRIND)
  1652. #define ERTS_ERROR_CHECKER_PRINTF VALGRIND_PRINTF
  1653. # ifndef HAVE_VALGRIND_PRINTF_XML
  1654. # define ERTS_ERROR_CHECKER_PRINTF_XML VALGRIND_PRINTF
  1655. # else
  1656. # define ERTS_ERROR_CHECKER_PRINTF_XML VALGRIND_PRINTF_XML
  1657. # endif
  1658. #endif
  1659. Uint buf_size = 8*1024; /* Try with 8KB first */
  1660. char *buf = erts_alloc(ERTS_ALC_T_TMP, buf_size);
  1661. int r = io_list_to_buf(*tp, (char*) buf, buf_size - 1);
  1662. if (r < 0) {
  1663. erts_free(ERTS_ALC_T_TMP, (void *) buf);
  1664. if (erts_iolist_size(*tp, &buf_size)) {
  1665. goto badarg;
  1666. }
  1667. buf_size++;
  1668. buf = erts_alloc(ERTS_ALC_T_TMP, buf_size);
  1669. r = io_list_to_buf(*tp, (char*) buf, buf_size - 1);
  1670. ASSERT(r == buf_size - 1);
  1671. }
  1672. buf[buf_size - 1 - r] = '\0';
  1673. if (check_if_xml()) {
  1674. ERTS_ERROR_CHECKER_PRINTF_XML("<erlang_info_log>"
  1675. "%s</erlang_info_log>\n", buf);
  1676. } else {
  1677. ERTS_ERROR_CHECKER_PRINTF("%s\n", buf);
  1678. }
  1679. erts_free(ERTS_ALC_T_TMP, (void *) buf);
  1680. BIF_RET(am_true);
  1681. #undef ERTS_ERROR_CHECKER_PRINTF
  1682. }
  1683. #endif
  1684. #ifdef QUANTIFY
  1685. } else if (sel == am_quantify) {
  1686. if (*tp == am_clear) {
  1687. quantify_clear_data();
  1688. BIF_RET(am_true);
  1689. } else if (*tp == am_start) {
  1690. quantify_start_recording_data();
  1691. BIF_RET(am_true);
  1692. } else if (*tp == am_stop) {
  1693. quantify_stop_recording_data();
  1694. BIF_RET(am_true);
  1695. } else if (*tp == am_running) {
  1696. BIF_RET(quantify_is_running() ? am_true : am_false);
  1697. }
  1698. #endif
  1699. #if defined(__GNUC__) && defined(HAVE_SOLARIS_SPARC_PERFMON)
  1700. } else if (ERTS_IS_ATOM_STR("ultrasparc_set_pcr", sel)) {
  1701. unsigned long long tmp;
  1702. int fd;
  1703. int rc;
  1704. if (arity != 2 || !is_small(*tp)) {
  1705. goto badarg;
  1706. }
  1707. tmp = signed_val(*tp);
  1708. if ((fd = open("/dev/perfmon", O_RDONLY)) == -1) {
  1709. BIF_RET(am_false);
  1710. }
  1711. rc = ioctl(fd, PERFMON_SETPCR, &tmp);
  1712. close(fd);
  1713. if (rc < 0) {
  1714. BIF_RET(am_false);
  1715. }
  1716. BIF_RET(am_true);
  1717. #endif
  1718. }
  1719. badarg:
  1720. ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
  1721. return ret;
  1722. }
  1723. #define INFO_DSBUF_INC_SZ 256
  1724. static erts_dsprintf_buf_t *
  1725. grow_info_dsbuf(erts_dsprintf_buf_t *dsbufp, size_t need)
  1726. {
  1727. size_t size;
  1728. size_t free_size = dsbufp->size - dsbufp->str_len;
  1729. ASSERT(dsbufp);
  1730. if (need <= free_size)
  1731. return dsbufp;
  1732. size = need - free_size + INFO_DSBUF_INC_SZ;
  1733. size = ((size + INFO_DSBUF_INC_SZ - 1)/INFO_DSBUF_INC_SZ)*INFO_DSBUF_INC_SZ;
  1734. size += dsbufp->size;
  1735. ASSERT(dsbufp->str_len + need <= size);
  1736. dsbufp->str = (char *) erts_realloc(ERTS_ALC_T_INFO_DSBUF,
  1737. (void *) dsbufp->str,
  1738. size);
  1739. dsbufp->size = size;
  1740. return dsbufp;
  1741. }
  1742. static erts_dsprintf_buf_t *
  1743. erts_create_info_dsbuf(Uint size)
  1744. {
  1745. Uint init_size = size ? size : INFO_DSBUF_INC_SZ;
  1746. erts_dsprintf_buf_t init = ERTS_DSPRINTF_BUF_INITER(grow_info_dsbuf);
  1747. erts_dsprintf_buf_t *dsbufp = erts_alloc(ERTS_ALC_T_INFO_DSBUF,
  1748. sizeof(erts_dsprintf_buf_t));
  1749. sys_memcpy((void *) dsbufp, (void *) &init, sizeof(erts_dsprintf_buf_t));
  1750. dsbufp->str = (char *) erts_alloc(ERTS_ALC_T_INFO_DSBUF, init_size);
  1751. dsbufp->str[0] = '\0';
  1752. dsbufp->size = init_size;
  1753. return dsbufp;
  1754. }
  1755. static void
  1756. erts_destroy_info_dsbuf(erts_dsprintf_buf_t *dsbufp)
  1757. {
  1758. if (dsbufp->str)
  1759. erts_free(ERTS_ALC_T_INFO_DSBUF, (void *) dsbufp->str);
  1760. erts_free(ERTS_ALC_T_INFO_DSBUF, (void *) dsbufp);
  1761. }
  1762. static Eterm
  1763. c_compiler_used(Eterm **hpp, Uint *szp)
  1764. {
  1765. #if defined(__GNUC__)
  1766. # if defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
  1767. # define ERTS_GNUC_VSN_NUMS 3
  1768. # elif defined(__GNUC_MINOR__)
  1769. # define ERTS_GNUC_VSN_NUMS 2
  1770. # else
  1771. # define ERTS_GNUC_VSN_NUMS 1
  1772. # endif
  1773. return erts_bld_tuple(hpp,
  1774. szp,
  1775. 2,
  1776. erts_bld_atom(hpp, szp, "gnuc"),
  1777. #if ERTS_GNUC_VSN_NUMS > 1
  1778. erts_bld_tuple(hpp,
  1779. szp,
  1780. ERTS_GNUC_VSN_NUMS,
  1781. #endif
  1782. erts_bld_uint(hpp, szp,
  1783. (Uint) __GNUC__)
  1784. #ifdef __GNUC_MINOR__
  1785. ,
  1786. erts_bld_uint(hpp, szp,
  1787. (Uint) __GNUC_MINOR__)
  1788. #ifdef __GNUC_PATCHLEVEL__
  1789. ,
  1790. erts_bld_uint(hpp, szp,
  1791. (Uint) __GNUC_PATCHLEVEL__)
  1792. #endif
  1793. #endif
  1794. #if ERTS_GNUC_VSN_NUMS > 1
  1795. )
  1796. #endif
  1797. );
  1798. #elif defined(_MSC_VER)
  1799. return erts_bld_tuple(hpp,
  1800. szp,
  1801. 2,
  1802. erts_bld_atom(hpp, szp, "msc"),
  1803. erts_bld_uint(hpp, szp,

Large files files are truncated, but you can click here to view the full file