PageRenderTime 39ms CodeModel.GetById 13ms RepoModel.GetById 0ms app.codeStats 1ms

/erts/emulator/beam/erl_bif_info.c

https://github.com/notarf/otp
C | 4174 lines | 3567 code | 392 blank | 215 comment | 952 complexity | bde47a34cfe099565509320ecd3c1aef MD5 | raw file
Possible License(s): BSD-2-Clause
  1. /*
  2. * %CopyrightBegin%
  3. *
  4. * Copyright Ericsson AB 1999-2011. All Rights Reserved.
  5. *
  6. * The contents of this file are subject to the Erlang Public License,
  7. * Version 1.1, (the "License"); you may not use this file except in
  8. * compliance with the License. You should have received a copy of the
  9. * Erlang Public License along with this software. If not, it can be
  10. * retrieved online at http://www.erlang.org/.
  11. *
  12. * Software distributed under the License is distributed on an "AS IS"
  13. * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
  14. * the License for the specific language governing rights and limitations
  15. * under the License.
  16. *
  17. * %CopyrightEnd%
  18. */
  19. #ifdef HAVE_CONFIG_H
  20. # include "config.h"
  21. #endif
  22. #include "sys.h"
  23. #include "erl_vm.h"
  24. #include "global.h"
  25. #include "erl_process.h"
  26. #include "erl_nmgc.h"
  27. #include "error.h"
  28. #include "erl_driver.h"
  29. #include "bif.h"
  30. #include "big.h"
  31. #include "erl_version.h"
  32. #include "erl_db_util.h"
  33. #include "erl_message.h"
  34. #include "erl_binary.h"
  35. #include "erl_db.h"
  36. #include "erl_instrument.h"
  37. #include "dist.h"
  38. #include "erl_gc.h"
  39. #include "erl_cpu_topology.h"
  40. #include "erl_async.h"
  41. #include "erl_thr_progress.h"
  42. #ifdef HIPE
  43. #include "hipe_arch.h"
  44. #endif
  45. #ifdef ERTS_ENABLE_LOCK_COUNT
  46. #include "erl_lock_count.h"
  47. #endif
  48. #ifdef VALGRIND
  49. #include <valgrind/valgrind.h>
  50. #include <valgrind/memcheck.h>
  51. #endif
  52. static Export* alloc_info_trap = NULL;
  53. static Export* alloc_sizes_trap = NULL;
  54. static Export *gather_sched_wall_time_res_trap;
  55. #define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
  56. /* Keep erts_system_version as a global variable for easy access from a core */
  57. static char erts_system_version[] = ("Erlang " ERLANG_OTP_RELEASE
  58. " (erts-" ERLANG_VERSION ")"
  59. #if !HEAP_ON_C_STACK && !HALFWORD_HEAP
  60. " [no-c-stack-objects]"
  61. #endif
  62. #ifndef OTP_RELEASE
  63. " [source]"
  64. #endif
  65. #ifdef ARCH_64
  66. #if HALFWORD_HEAP
  67. " [64-bit halfword]"
  68. #else
  69. " [64-bit]"
  70. #endif
  71. #endif
  72. #ifdef ERTS_SMP
  73. " [smp:%beu:%beu]"
  74. #endif
  75. #ifdef USE_THREADS
  76. " [async-threads:%d]"
  77. #endif
  78. #ifdef HIPE
  79. " [hipe]"
  80. #endif
  81. #ifdef ERTS_ENABLE_KERNEL_POLL
  82. " [kernel-poll:%s]"
  83. #endif
  84. #ifdef HYBRID
  85. " [hybrid heap]"
  86. #endif
  87. #ifdef INCREMENTAL
  88. " [incremental GC]"
  89. #endif
  90. #ifdef ET_DEBUG
  91. #if ET_DEBUG
  92. " [type-assertions]"
  93. #endif
  94. #endif
  95. #ifdef DEBUG
  96. " [debug-compiled]"
  97. #endif
  98. #ifdef ERTS_ENABLE_LOCK_CHECK
  99. " [lock-checking]"
  100. #endif
  101. #ifdef ERTS_ENABLE_LOCK_COUNT
  102. " [lock-counting]"
  103. #endif
  104. #ifdef PURIFY
  105. " [purify-compiled]"
  106. #endif
  107. #ifdef VALGRIND
  108. " [valgrind-compiled]"
  109. #endif
  110. "\n");
  111. #define ASIZE(a) (sizeof(a)/sizeof(a[0]))
  112. #if defined(HAVE_SOLARIS_SPARC_PERFMON)
  113. # include <sys/ioccom.h>
  114. # define PERFMON_SETPCR _IOW('P', 1, unsigned long long)
  115. # define PERFMON_GETPCR _IOR('P', 2, unsigned long long)
  116. #endif
  117. /* Cached, pre-built {OsType,OsFlavor} and {Major,Minor,Build} tuples */
  118. static Eterm os_type_tuple;
  119. static Eterm os_version_tuple;
  120. static BIF_RETTYPE port_info(Process* p, Eterm portid, Eterm item);
  121. static Eterm
  122. current_function(Process* p, Process* rp, Eterm** hpp, int full_info);
  123. static Eterm current_stacktrace(Process* p, Process* rp, Eterm** hpp);
  124. static Eterm
  125. bld_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
  126. {
  127. struct erl_off_heap_header* ohh;
  128. Eterm res = NIL;
  129. Eterm tuple;
  130. for (ohh = oh->first; ohh; ohh = ohh->next) {
  131. if (ohh->thing_word == HEADER_PROC_BIN) {
  132. ProcBin* pb = (ProcBin*) ohh;
  133. Eterm val = erts_bld_uword(hpp, szp, (UWord) pb->val);
  134. Eterm orig_size = erts_bld_uint(hpp, szp, pb->val->orig_size);
  135. if (szp)
  136. *szp += 4+2;
  137. if (hpp) {
  138. Uint refc = (Uint) erts_smp_atomic_read_nob(&pb->val->refc);
  139. tuple = TUPLE3(*hpp, val, orig_size, make_small(refc));
  140. res = CONS(*hpp + 4, tuple, res);
  141. *hpp += 4+2;
  142. }
  143. }
  144. }
  145. return res;
  146. }
  147. /*
  148. make_monitor_list:
  149. returns a list of records..
  150. -record(erl_monitor, {
  151. type, % MON_ORIGIN or MON_TARGET (1 or 3)
  152. ref,
  153. pid, % Process or nodename
  154. name % registered name or []
  155. }).
  156. */
  157. static void do_calc_mon_size(ErtsMonitor *mon, void *vpsz)
  158. {
  159. Uint *psz = vpsz;
  160. *psz += IS_CONST(mon->ref) ? 0 : NC_HEAP_SIZE(mon->ref);
  161. *psz += IS_CONST(mon->pid) ? 0 : NC_HEAP_SIZE(mon->pid);
  162. *psz += 8; /* CONS + 5-tuple */
  163. }
  164. typedef struct {
  165. Process *p;
  166. Eterm *hp;
  167. Eterm res;
  168. Eterm tag;
  169. } MonListContext;
  170. static void do_make_one_mon_element(ErtsMonitor *mon, void * vpmlc)
  171. {
  172. MonListContext *pmlc = vpmlc;
  173. Eterm tup;
  174. Eterm r = (IS_CONST(mon->ref)
  175. ? mon->ref
  176. : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->ref));
  177. Eterm p = (IS_CONST(mon->pid)
  178. ? mon->pid
  179. : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->pid));
  180. tup = TUPLE5(pmlc->hp, pmlc->tag, make_small(mon->type), r, p, mon->name);
  181. pmlc->hp += 6;
  182. pmlc->res = CONS(pmlc->hp, tup, pmlc->res);
  183. pmlc->hp += 2;
  184. }
  185. static Eterm
  186. make_monitor_list(Process *p, ErtsMonitor *root)
  187. {
  188. DECL_AM(erl_monitor);
  189. Uint sz = 0;
  190. MonListContext mlc;
  191. erts_doforall_monitors(root, &do_calc_mon_size, &sz);
  192. if (sz == 0) {
  193. return NIL;
  194. }
  195. mlc.p = p;
  196. mlc.hp = HAlloc(p,sz);
  197. mlc.res = NIL;
  198. mlc.tag = AM_erl_monitor;
  199. erts_doforall_monitors(root, &do_make_one_mon_element, &mlc);
  200. return mlc.res;
  201. }
  202. /*
  203. make_link_list:
  204. returns a list of records..
  205. -record(erl_link, {
  206. type, % LINK_NODE or LINK_PID (1 or 3)
  207. pid, % Process or nodename
  208. targets % List of erl_link's or nil
  209. }).
  210. */
  211. static void do_calc_lnk_size(ErtsLink *lnk, void *vpsz)
  212. {
  213. Uint *psz = vpsz;
  214. *psz += IS_CONST(lnk->pid) ? 0 : NC_HEAP_SIZE(lnk->pid);
  215. if (lnk->type != LINK_NODE && ERTS_LINK_ROOT(lnk) != NULL) {
  216. /* Node links use this pointer as ref counter... */
  217. erts_doforall_links(ERTS_LINK_ROOT(lnk),&do_calc_lnk_size,vpsz);
  218. }
  219. *psz += 7; /* CONS + 4-tuple */
  220. }
  221. typedef struct {
  222. Process *p;
  223. Eterm *hp;
  224. Eterm res;
  225. Eterm tag;
  226. } LnkListContext;
  227. static void do_make_one_lnk_element(ErtsLink *lnk, void * vpllc)
  228. {
  229. LnkListContext *pllc = vpllc;
  230. Eterm tup;
  231. Eterm old_res, targets = NIL;
  232. Eterm p = (IS_CONST(lnk->pid)
  233. ? lnk->pid
  234. : STORE_NC(&(pllc->hp), &MSO(pllc->p), lnk->pid));
  235. if (lnk->type == LINK_NODE) {
  236. targets = make_small(ERTS_LINK_REFC(lnk));
  237. } else if (ERTS_LINK_ROOT(lnk) != NULL) {
  238. old_res = pllc->res;
  239. pllc->res = NIL;
  240. erts_doforall_links(ERTS_LINK_ROOT(lnk),&do_make_one_lnk_element, vpllc);
  241. targets = pllc->res;
  242. pllc->res = old_res;
  243. }
  244. tup = TUPLE4(pllc->hp, pllc->tag, make_small(lnk->type), p, targets);
  245. pllc->hp += 5;
  246. pllc->res = CONS(pllc->hp, tup, pllc->res);
  247. pllc->hp += 2;
  248. }
  249. static Eterm
  250. make_link_list(Process *p, ErtsLink *root, Eterm tail)
  251. {
  252. DECL_AM(erl_link);
  253. Uint sz = 0;
  254. LnkListContext llc;
  255. erts_doforall_links(root, &do_calc_lnk_size, &sz);
  256. if (sz == 0) {
  257. return tail;
  258. }
  259. llc.p = p;
  260. llc.hp = HAlloc(p,sz);
  261. llc.res = tail;
  262. llc.tag = AM_erl_link;
  263. erts_doforall_links(root, &do_make_one_lnk_element, &llc);
  264. return llc.res;
  265. }
  266. int
  267. erts_print_system_version(int to, void *arg, Process *c_p)
  268. {
  269. #ifdef ERTS_SMP
  270. Uint total, online, active;
  271. (void) erts_schedulers_state(&total, &online, &active, 0);
  272. #endif
  273. return erts_print(to, arg, erts_system_version
  274. #ifdef ERTS_SMP
  275. , total, online
  276. #endif
  277. #ifdef USE_THREADS
  278. , erts_async_max_threads
  279. #endif
  280. #ifdef ERTS_ENABLE_KERNEL_POLL
  281. , erts_use_kernel_poll ? "true" : "false"
  282. #endif
  283. );
  284. }
  285. typedef struct {
  286. Eterm entity;
  287. Eterm node;
  288. } MonitorInfo;
  289. typedef struct {
  290. MonitorInfo *mi;
  291. Uint mi_i;
  292. Uint mi_max;
  293. int sz;
  294. } MonitorInfoCollection;
  295. #define INIT_MONITOR_INFOS(MIC) do { \
  296. (MIC).mi = NULL; \
  297. (MIC).mi_i = (MIC).mi_max = 0; \
  298. (MIC).sz = 0; \
  299. } while(0)
  300. #define MI_INC 50
  301. #define EXTEND_MONITOR_INFOS(MICP) \
  302. do { \
  303. if ((MICP)->mi_i >= (MICP)->mi_max) { \
  304. (MICP)->mi = ((MICP)->mi ? erts_realloc(ERTS_ALC_T_TMP, \
  305. (MICP)->mi, \
  306. ((MICP)->mi_max+MI_INC) \
  307. * sizeof(MonitorInfo)) \
  308. : erts_alloc(ERTS_ALC_T_TMP, \
  309. MI_INC*sizeof(MonitorInfo))); \
  310. (MICP)->mi_max += MI_INC; \
  311. } \
  312. } while (0)
  313. #define DESTROY_MONITOR_INFOS(MIC) \
  314. do { \
  315. if ((MIC).mi != NULL) { \
  316. erts_free(ERTS_ALC_T_TMP, (void *) (MIC).mi); \
  317. } \
  318. } while (0)
  319. static void collect_one_link(ErtsLink *lnk, void *vmicp)
  320. {
  321. MonitorInfoCollection *micp = vmicp;
  322. EXTEND_MONITOR_INFOS(micp);
  323. if (!(lnk->type == LINK_PID)) {
  324. return;
  325. }
  326. micp->mi[micp->mi_i].entity = lnk->pid;
  327. micp->sz += 2 + NC_HEAP_SIZE(lnk->pid);
  328. micp->mi_i++;
  329. }
  330. static void collect_one_origin_monitor(ErtsMonitor *mon, void *vmicp)
  331. {
  332. MonitorInfoCollection *micp = vmicp;
  333. if (mon->type != MON_ORIGIN) {
  334. return;
  335. }
  336. EXTEND_MONITOR_INFOS(micp);
  337. if (is_atom(mon->pid)) { /* external by name */
  338. micp->mi[micp->mi_i].entity = mon->name;
  339. micp->mi[micp->mi_i].node = mon->pid;
  340. micp->sz += 3; /* need one 2-tuple */
  341. } else if (is_external_pid(mon->pid)) { /* external by pid */
  342. micp->mi[micp->mi_i].entity = mon->pid;
  343. micp->mi[micp->mi_i].node = NIL;
  344. micp->sz += NC_HEAP_SIZE(mon->pid);
  345. } else if (!is_nil(mon->name)) { /* internal by name */
  346. micp->mi[micp->mi_i].entity = mon->name;
  347. micp->mi[micp->mi_i].node = erts_this_dist_entry->sysname;
  348. micp->sz += 3; /* need one 2-tuple */
  349. } else { /* internal by pid */
  350. micp->mi[micp->mi_i].entity = mon->pid;
  351. micp->mi[micp->mi_i].node = NIL;
  352. /* no additional heap space needed */
  353. }
  354. micp->mi_i++;
  355. micp->sz += 2 + 3; /* For a cons cell and a 2-tuple */
  356. }
  357. static void collect_one_target_monitor(ErtsMonitor *mon, void *vmicp)
  358. {
  359. MonitorInfoCollection *micp = vmicp;
  360. if (mon->type != MON_TARGET) {
  361. return;
  362. }
  363. EXTEND_MONITOR_INFOS(micp);
  364. micp->mi[micp->mi_i].node = NIL;
  365. micp->mi[micp->mi_i].entity = mon->pid;
  366. micp->sz += (NC_HEAP_SIZE(mon->pid) + 2 /* cons */);
  367. micp->mi_i++;
  368. }
  369. typedef struct {
  370. Process *c_p;
  371. ErtsProcLocks c_p_locks;
  372. ErtsSuspendMonitor **smi;
  373. Uint smi_i;
  374. Uint smi_max;
  375. int sz;
  376. } ErtsSuspendMonitorInfoCollection;
  377. #define ERTS_INIT_SUSPEND_MONITOR_INFOS(SMIC, CP, CPL) do { \
  378. (SMIC).c_p = (CP); \
  379. (SMIC).c_p_locks = (CPL); \
  380. (SMIC).smi = NULL; \
  381. (SMIC).smi_i = (SMIC).smi_max = 0; \
  382. (SMIC).sz = 0; \
  383. } while(0)
  384. #define ERTS_SMI_INC 50
  385. #define ERTS_EXTEND_SUSPEND_MONITOR_INFOS(SMICP) \
  386. do { \
  387. if ((SMICP)->smi_i >= (SMICP)->smi_max) { \
  388. (SMICP)->smi = ((SMICP)->smi \
  389. ? erts_realloc(ERTS_ALC_T_TMP, \
  390. (SMICP)->smi, \
  391. ((SMICP)->smi_max \
  392. + ERTS_SMI_INC) \
  393. * sizeof(ErtsSuspendMonitor *)) \
  394. : erts_alloc(ERTS_ALC_T_TMP, \
  395. ERTS_SMI_INC \
  396. * sizeof(ErtsSuspendMonitor *))); \
  397. (SMICP)->smi_max += ERTS_SMI_INC; \
  398. } \
  399. } while (0)
  400. #define ERTS_DESTROY_SUSPEND_MONITOR_INFOS(SMIC) \
  401. do { \
  402. if ((SMIC).smi != NULL) { \
  403. erts_free(ERTS_ALC_T_TMP, (void *) (SMIC).smi); \
  404. } \
  405. } while (0)
  406. static void
  407. collect_one_suspend_monitor(ErtsSuspendMonitor *smon, void *vsmicp)
  408. {
  409. ErtsSuspendMonitorInfoCollection *smicp = vsmicp;
  410. Process *suspendee = erts_pid2proc(smicp->c_p,
  411. smicp->c_p_locks,
  412. smon->pid,
  413. 0);
  414. if (suspendee) { /* suspendee is alive */
  415. Sint a, p;
  416. if (smon->active) {
  417. smon->active += smon->pending;
  418. smon->pending = 0;
  419. }
  420. ASSERT((smon->active && !smon->pending)
  421. || (smon->pending && !smon->active));
  422. ERTS_EXTEND_SUSPEND_MONITOR_INFOS(smicp);
  423. smicp->smi[smicp->smi_i] = smon;
  424. smicp->sz += 2 /* cons */ + 4 /* 3-tuple */;
  425. a = (Sint) smon->active; /* quiet compiler warnings */
  426. p = (Sint) smon->pending; /* on 64-bit machines */
  427. if (!IS_SSMALL(a))
  428. smicp->sz += BIG_UINT_HEAP_SIZE;
  429. if (!IS_SSMALL(p))
  430. smicp->sz += BIG_UINT_HEAP_SIZE;
  431. smicp->smi_i++;
  432. }
  433. }
  434. static void one_link_size(ErtsLink *lnk, void *vpu)
  435. {
  436. Uint *pu = vpu;
  437. *pu += ERTS_LINK_SIZE*sizeof(Uint);
  438. if(!IS_CONST(lnk->pid))
  439. *pu += NC_HEAP_SIZE(lnk->pid)*sizeof(Uint);
  440. if (lnk->type != LINK_NODE && ERTS_LINK_ROOT(lnk) != NULL) {
  441. erts_doforall_links(ERTS_LINK_ROOT(lnk),&one_link_size,vpu);
  442. }
  443. }
  444. static void one_mon_size(ErtsMonitor *mon, void *vpu)
  445. {
  446. Uint *pu = vpu;
  447. *pu += ERTS_MONITOR_SIZE*sizeof(Uint);
  448. if(!IS_CONST(mon->pid))
  449. *pu += NC_HEAP_SIZE(mon->pid)*sizeof(Uint);
  450. if(!IS_CONST(mon->ref))
  451. *pu += NC_HEAP_SIZE(mon->ref)*sizeof(Uint);
  452. }
  453. /*
  454. * process_info/[1,2]
  455. */
  456. #define ERTS_PI_FAIL_TYPE_BADARG 0
  457. #define ERTS_PI_FAIL_TYPE_YIELD 1
  458. #define ERTS_PI_FAIL_TYPE_AWAIT_EXIT 2
  459. static ERTS_INLINE ErtsProcLocks
  460. pi_locks(Eterm info)
  461. {
  462. switch (info) {
  463. case am_status:
  464. case am_priority:
  465. return ERTS_PROC_LOCK_STATUS;
  466. case am_links:
  467. case am_monitors:
  468. case am_monitored_by:
  469. case am_suspending:
  470. return ERTS_PROC_LOCK_LINK;
  471. case am_messages:
  472. case am_message_queue_len:
  473. case am_total_heap_size:
  474. return ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_MSGQ;
  475. case am_memory:
  476. return ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_MSGQ;
  477. default:
  478. return ERTS_PROC_LOCK_MAIN;
  479. }
  480. }
  481. /*
  482. * All valid process_info arguments.
  483. */
  484. static Eterm pi_args[] = {
  485. am_registered_name,
  486. am_current_function,
  487. am_initial_call,
  488. am_status,
  489. am_messages,
  490. am_message_queue_len,
  491. am_links,
  492. am_monitors,
  493. am_monitored_by,
  494. am_dictionary,
  495. am_trap_exit,
  496. am_error_handler,
  497. am_heap_size,
  498. am_stack_size,
  499. am_memory,
  500. am_garbage_collection,
  501. am_group_leader,
  502. am_reductions,
  503. am_priority,
  504. am_trace,
  505. am_binary,
  506. am_sequential_trace_token,
  507. am_catchlevel,
  508. am_backtrace,
  509. am_last_calls,
  510. am_total_heap_size,
  511. am_suspending,
  512. am_min_heap_size,
  513. am_min_bin_vheap_size,
  514. am_current_location,
  515. am_current_stacktrace,
  516. #ifdef HYBRID
  517. am_message_binary
  518. #endif
  519. };
  520. #define ERTS_PI_ARGS ((int) (sizeof(pi_args)/sizeof(Eterm)))
  521. static ERTS_INLINE Eterm
  522. pi_ix2arg(int ix)
  523. {
  524. if (ix < 0 || ERTS_PI_ARGS <= ix)
  525. return am_undefined;
  526. return pi_args[ix];
  527. }
  528. static ERTS_INLINE int
  529. pi_arg2ix(Eterm arg)
  530. {
  531. switch (arg) {
  532. case am_registered_name: return 0;
  533. case am_current_function: return 1;
  534. case am_initial_call: return 2;
  535. case am_status: return 3;
  536. case am_messages: return 4;
  537. case am_message_queue_len: return 5;
  538. case am_links: return 6;
  539. case am_monitors: return 7;
  540. case am_monitored_by: return 8;
  541. case am_dictionary: return 9;
  542. case am_trap_exit: return 10;
  543. case am_error_handler: return 11;
  544. case am_heap_size: return 12;
  545. case am_stack_size: return 13;
  546. case am_memory: return 14;
  547. case am_garbage_collection: return 15;
  548. case am_group_leader: return 16;
  549. case am_reductions: return 17;
  550. case am_priority: return 18;
  551. case am_trace: return 19;
  552. case am_binary: return 20;
  553. case am_sequential_trace_token: return 21;
  554. case am_catchlevel: return 22;
  555. case am_backtrace: return 23;
  556. case am_last_calls: return 24;
  557. case am_total_heap_size: return 25;
  558. case am_suspending: return 26;
  559. case am_min_heap_size: return 27;
  560. case am_min_bin_vheap_size: return 28;
  561. case am_current_location: return 29;
  562. case am_current_stacktrace: return 30;
  563. #ifdef HYBRID
  564. case am_message_binary: return 31;
  565. #endif
  566. default: return -1;
  567. }
  568. }
  569. static Eterm pi_1_keys[] = {
  570. am_registered_name,
  571. am_current_function,
  572. am_initial_call,
  573. am_status,
  574. am_message_queue_len,
  575. am_messages,
  576. am_links,
  577. am_dictionary,
  578. am_trap_exit,
  579. am_error_handler,
  580. am_priority,
  581. am_group_leader,
  582. am_total_heap_size,
  583. am_heap_size,
  584. am_stack_size,
  585. am_reductions,
  586. am_garbage_collection,
  587. am_suspending
  588. };
  589. #define ERTS_PI_1_NO_OF_KEYS (sizeof(pi_1_keys)/sizeof(Eterm))
  590. static Eterm pi_1_keys_list;
  591. #if HEAP_ON_C_STACK
  592. static Eterm pi_1_keys_list_heap[2*ERTS_PI_1_NO_OF_KEYS];
  593. #endif
  594. static void
  595. process_info_init(void)
  596. {
  597. #if HEAP_ON_C_STACK
  598. Eterm *hp = &pi_1_keys_list_heap[0];
  599. #else
  600. Eterm *hp = erts_alloc(ERTS_ALC_T_LL_TEMP_TERM,sizeof(Eterm)*2*ERTS_PI_1_NO_OF_KEYS);
  601. #endif
  602. int i;
  603. pi_1_keys_list = NIL;
  604. for (i = ERTS_PI_1_NO_OF_KEYS-1; i >= 0; i--) {
  605. pi_1_keys_list = CONS(hp, pi_1_keys[i], pi_1_keys_list);
  606. hp += 2;
  607. }
  608. #ifdef DEBUG
  609. { /* Make sure the process_info argument mappings are consistent */
  610. int ix;
  611. for (ix = 0; ix < ERTS_PI_ARGS; ix++) {
  612. ASSERT(pi_arg2ix(pi_ix2arg(ix)) == ix);
  613. }
  614. }
  615. #endif
  616. }
  617. static ERTS_INLINE Process *
  618. pi_pid2proc(Process *c_p, Eterm pid, ErtsProcLocks info_locks)
  619. {
  620. #ifdef ERTS_SMP
  621. /*
  622. * If the main lock is needed, we use erts_pid2proc_not_running()
  623. * instead of erts_pid2proc() for two reasons:
  624. * * Current function of pid and possibly other information will
  625. * have been updated so that process_info() is consistent with an
  626. * info-request/info-response signal model.
  627. * * We avoid blocking the whole scheduler executing the
  628. * process that is calling process_info() for a long time
  629. * which will happen if pid is currently running.
  630. * The caller of process_info() may have to yield if pid
  631. * is currently running.
  632. */
  633. if (info_locks & ERTS_PROC_LOCK_MAIN)
  634. return erts_pid2proc_not_running(c_p, ERTS_PROC_LOCK_MAIN,
  635. pid, info_locks);
  636. else
  637. #endif
  638. return erts_pid2proc(c_p, ERTS_PROC_LOCK_MAIN,
  639. pid, info_locks);
  640. }
  641. BIF_RETTYPE
  642. process_info_aux(Process *BIF_P,
  643. Process *rp,
  644. Eterm rpid,
  645. Eterm item,
  646. int always_wrap);
  647. #define ERTS_PI_RES_ELEM_IX_BUF_INC 1024
  648. #define ERTS_PI_DEF_RES_ELEM_IX_BUF_SZ ERTS_PI_ARGS
  649. static Eterm
  650. process_info_list(Process *c_p, Eterm pid, Eterm list, int always_wrap,
  651. int *fail_type)
  652. {
  653. int want_messages = 0;
  654. int def_res_elem_ix_buf[ERTS_PI_DEF_RES_ELEM_IX_BUF_SZ];
  655. int *res_elem_ix = &def_res_elem_ix_buf[0];
  656. int res_elem_ix_ix = -1;
  657. int res_elem_ix_sz = ERTS_PI_DEF_RES_ELEM_IX_BUF_SZ;
  658. Eterm part_res[ERTS_PI_ARGS];
  659. Eterm res, arg;
  660. Uint *hp, *hp_end;
  661. ErtsProcLocks locks = (ErtsProcLocks) 0;
  662. int res_len, ix;
  663. Process *rp = NULL;
  664. *fail_type = ERTS_PI_FAIL_TYPE_BADARG;
  665. for (ix = 0; ix < ERTS_PI_ARGS; ix++)
  666. part_res[ix] = THE_NON_VALUE;
  667. ASSERT(is_list(list));
  668. while (is_list(list)) {
  669. Eterm* consp = list_val(list);
  670. arg = CAR(consp);
  671. ix = pi_arg2ix(arg);
  672. if (ix < 0) {
  673. res = THE_NON_VALUE;
  674. goto done;
  675. }
  676. if (arg == am_messages)
  677. want_messages = 1;
  678. locks |= pi_locks(arg);
  679. res_elem_ix_ix++;
  680. if (res_elem_ix_ix >= res_elem_ix_sz) {
  681. if (res_elem_ix != &def_res_elem_ix_buf[0])
  682. res_elem_ix =
  683. erts_realloc(ERTS_ALC_T_TMP,
  684. res_elem_ix,
  685. sizeof(int)*(res_elem_ix_sz
  686. += ERTS_PI_RES_ELEM_IX_BUF_INC));
  687. else {
  688. int new_res_elem_ix_sz = ERTS_PI_RES_ELEM_IX_BUF_INC;
  689. int *new_res_elem_ix = erts_alloc(ERTS_ALC_T_TMP,
  690. sizeof(int)*new_res_elem_ix_sz);
  691. sys_memcpy((void *) new_res_elem_ix,
  692. (void *) res_elem_ix,
  693. sizeof(int)*res_elem_ix_sz);
  694. res_elem_ix = new_res_elem_ix;
  695. res_elem_ix_sz = new_res_elem_ix_sz;
  696. }
  697. }
  698. res_elem_ix[res_elem_ix_ix] = ix;
  699. list = CDR(consp);
  700. }
  701. if (is_not_nil(list)) {
  702. res = THE_NON_VALUE;
  703. goto done;
  704. }
  705. res_len = res_elem_ix_ix+1;
  706. ASSERT(res_len > 0);
  707. rp = pi_pid2proc(c_p, pid, locks|ERTS_PROC_LOCK_STATUS);
  708. if (!rp) {
  709. res = am_undefined;
  710. goto done;
  711. }
  712. else if (rp == ERTS_PROC_LOCK_BUSY) {
  713. rp = NULL;
  714. res = THE_NON_VALUE;
  715. *fail_type = ERTS_PI_FAIL_TYPE_YIELD;
  716. goto done;
  717. }
  718. else if (c_p != rp && ERTS_PROC_PENDING_EXIT(rp)) {
  719. locks |= ERTS_PROC_LOCK_STATUS;
  720. res = THE_NON_VALUE;
  721. *fail_type = ERTS_PI_FAIL_TYPE_AWAIT_EXIT;
  722. goto done;
  723. }
  724. else if (!(locks & ERTS_PROC_LOCK_STATUS)) {
  725. erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
  726. }
  727. /*
  728. * We always handle 'messages' first if it should be part
  729. * of the result. This since if both 'messages' and
  730. * 'message_queue_len' are wanted, 'messages' may
  731. * change the result of 'message_queue_len' (in case
  732. * the queue contain bad distribution messages).
  733. */
  734. if (want_messages) {
  735. ix = pi_arg2ix(am_messages);
  736. ASSERT(part_res[ix] == THE_NON_VALUE);
  737. part_res[ix] = process_info_aux(c_p, rp, pid, am_messages, always_wrap);
  738. ASSERT(part_res[ix] != THE_NON_VALUE);
  739. }
  740. for (; res_elem_ix_ix >= 0; res_elem_ix_ix--) {
  741. ix = res_elem_ix[res_elem_ix_ix];
  742. if (part_res[ix] == THE_NON_VALUE) {
  743. arg = pi_ix2arg(ix);
  744. part_res[ix] = process_info_aux(c_p, rp, pid, arg, always_wrap);
  745. ASSERT(part_res[ix] != THE_NON_VALUE);
  746. }
  747. }
  748. hp = HAlloc(c_p, res_len*2);
  749. hp_end = hp + res_len*2;
  750. res = NIL;
  751. for (res_elem_ix_ix = res_len - 1; res_elem_ix_ix >= 0; res_elem_ix_ix--) {
  752. ix = res_elem_ix[res_elem_ix_ix];
  753. ASSERT(part_res[ix] != THE_NON_VALUE);
  754. /*
  755. * If we should ignore the value of registered_name,
  756. * its value is nil. For more info, see comment in the
  757. * beginning of process_info_aux().
  758. */
  759. if (is_nil(part_res[ix])) {
  760. ASSERT(!always_wrap);
  761. ASSERT(pi_ix2arg(ix) == am_registered_name);
  762. }
  763. else {
  764. res = CONS(hp, part_res[ix], res);
  765. hp += 2;
  766. }
  767. }
  768. if (!always_wrap) {
  769. HRelease(c_p, hp_end, hp);
  770. }
  771. done:
  772. if (c_p == rp)
  773. locks &= ~ERTS_PROC_LOCK_MAIN;
  774. if (locks && rp)
  775. erts_smp_proc_unlock(rp, locks);
  776. if (res_elem_ix != &def_res_elem_ix_buf[0])
  777. erts_free(ERTS_ALC_T_TMP, res_elem_ix);
  778. return res;
  779. }
  780. BIF_RETTYPE process_info_1(BIF_ALIST_1)
  781. {
  782. Eterm res;
  783. int fail_type;
  784. if (is_external_pid(BIF_ARG_1)
  785. && external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry)
  786. BIF_RET(am_undefined);
  787. if (is_not_internal_pid(BIF_ARG_1)
  788. || internal_pid_index(BIF_ARG_1) >= erts_max_processes) {
  789. BIF_ERROR(BIF_P, BADARG);
  790. }
  791. res = process_info_list(BIF_P, BIF_ARG_1, pi_1_keys_list, 0, &fail_type);
  792. if (is_non_value(res)) {
  793. switch (fail_type) {
  794. case ERTS_PI_FAIL_TYPE_BADARG:
  795. BIF_ERROR(BIF_P, BADARG);
  796. case ERTS_PI_FAIL_TYPE_YIELD:
  797. ERTS_BIF_YIELD1(bif_export[BIF_process_info_1], BIF_P, BIF_ARG_1);
  798. case ERTS_PI_FAIL_TYPE_AWAIT_EXIT:
  799. ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_undefined);
  800. default:
  801. erl_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error", __FILE__, __LINE__);
  802. }
  803. }
  804. ASSERT(!(BIF_P->flags & F_P2PNR_RESCHED));
  805. BIF_RET(res);
  806. }
  807. BIF_RETTYPE process_info_2(BIF_ALIST_2)
  808. {
  809. Eterm res;
  810. Process *rp;
  811. Eterm pid = BIF_ARG_1;
  812. ErtsProcLocks info_locks;
  813. int fail_type;
  814. if (is_external_pid(pid)
  815. && external_pid_dist_entry(pid) == erts_this_dist_entry)
  816. BIF_RET(am_undefined);
  817. if (is_not_internal_pid(pid)
  818. || internal_pid_index(BIF_ARG_1) >= erts_max_processes) {
  819. BIF_ERROR(BIF_P, BADARG);
  820. }
  821. if (is_nil(BIF_ARG_2))
  822. BIF_RET(NIL);
  823. if (is_list(BIF_ARG_2)) {
  824. res = process_info_list(BIF_P, BIF_ARG_1, BIF_ARG_2, 1, &fail_type);
  825. if (is_non_value(res)) {
  826. switch (fail_type) {
  827. case ERTS_PI_FAIL_TYPE_BADARG:
  828. BIF_ERROR(BIF_P, BADARG);
  829. case ERTS_PI_FAIL_TYPE_YIELD:
  830. ERTS_BIF_YIELD2(bif_export[BIF_process_info_2], BIF_P,
  831. BIF_ARG_1, BIF_ARG_2);
  832. case ERTS_PI_FAIL_TYPE_AWAIT_EXIT:
  833. ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_undefined);
  834. default:
  835. erl_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error",
  836. __FILE__, __LINE__);
  837. }
  838. }
  839. ASSERT(!(BIF_P->flags & F_P2PNR_RESCHED));
  840. BIF_RET(res);
  841. }
  842. if (pi_arg2ix(BIF_ARG_2) < 0)
  843. BIF_ERROR(BIF_P, BADARG);
  844. info_locks = pi_locks(BIF_ARG_2);
  845. rp = pi_pid2proc(BIF_P, pid, info_locks|ERTS_PROC_LOCK_STATUS);
  846. if (!rp)
  847. res = am_undefined;
  848. else if (rp == ERTS_PROC_LOCK_BUSY)
  849. ERTS_BIF_YIELD2(bif_export[BIF_process_info_2], BIF_P,
  850. BIF_ARG_1, BIF_ARG_2);
  851. else if (rp != BIF_P && ERTS_PROC_PENDING_EXIT(rp)) {
  852. erts_smp_proc_unlock(rp, info_locks|ERTS_PROC_LOCK_STATUS);
  853. ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_undefined);
  854. }
  855. else {
  856. if (!(info_locks & ERTS_PROC_LOCK_STATUS))
  857. erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
  858. res = process_info_aux(BIF_P, rp, pid, BIF_ARG_2, 0);
  859. }
  860. ASSERT(is_value(res));
  861. #ifdef ERTS_SMP
  862. if (BIF_P == rp)
  863. info_locks &= ~ERTS_PROC_LOCK_MAIN;
  864. if (rp && info_locks)
  865. erts_smp_proc_unlock(rp, info_locks);
  866. #endif
  867. ASSERT(!(BIF_P->flags & F_P2PNR_RESCHED));
  868. BIF_RET(res);
  869. }
  870. Eterm
  871. process_info_aux(Process *BIF_P,
  872. Process *rp,
  873. Eterm rpid,
  874. Eterm item,
  875. int always_wrap)
  876. {
  877. Eterm *hp;
  878. Eterm res = NIL;
  879. ASSERT(rp);
  880. /*
  881. * Q: Why this always_wrap argument?
  882. *
  883. * A: registered_name is strange. If process has no registered name,
  884. * process_info(Pid, registered_name) returns [], and
  885. * the result of process_info(Pid) has no {registered_name, Name}
  886. * tuple in the resulting list. This is inconsistent with all other
  887. * options, but we do not dare to change it.
  888. *
  889. * When process_info/2 is called with a list as second argument,
  890. * registered_name behaves as it should, i.e. a
  891. * {registered_name, []} will appear in the resulting list.
  892. *
  893. * If always_wrap != 0, process_info_aux() always wrap the result
  894. * in a key two tuple.
  895. */
  896. switch (item) {
  897. case am_registered_name:
  898. if (rp->reg != NULL) {
  899. hp = HAlloc(BIF_P, 3);
  900. res = rp->reg->name;
  901. } else {
  902. if (always_wrap) {
  903. hp = HAlloc(BIF_P, 3);
  904. res = NIL;
  905. }
  906. else {
  907. return NIL;
  908. }
  909. }
  910. break;
  911. case am_current_function:
  912. res = current_function(BIF_P, rp, &hp, 0);
  913. break;
  914. case am_current_location:
  915. res = current_function(BIF_P, rp, &hp, 1);
  916. break;
  917. case am_current_stacktrace:
  918. res = current_stacktrace(BIF_P, rp, &hp);
  919. break;
  920. case am_initial_call:
  921. hp = HAlloc(BIF_P, 3+4);
  922. res = TUPLE3(hp,
  923. rp->initial[INITIAL_MOD],
  924. rp->initial[INITIAL_FUN],
  925. make_small(rp->initial[INITIAL_ARI]));
  926. hp += 4;
  927. break;
  928. case am_status:
  929. res = erts_process_status(BIF_P, ERTS_PROC_LOCK_MAIN, rp, rpid);
  930. ASSERT(res != am_undefined);
  931. hp = HAlloc(BIF_P, 3);
  932. break;
  933. case am_messages: {
  934. ErlMessage* mp;
  935. int n;
  936. ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
  937. n = rp->msg.len;
  938. if (n == 0 || rp->trace_flags & F_SENSITIVE) {
  939. hp = HAlloc(BIF_P, 3);
  940. } else {
  941. int remove_bad_messages = 0;
  942. struct {
  943. Uint copy_struct_size;
  944. ErlMessage* msgp;
  945. } *mq = erts_alloc(ERTS_ALC_T_TMP, n*sizeof(*mq));
  946. Sint i = 0;
  947. Uint heap_need = 3;
  948. Eterm *hp_end;
  949. for (mp = rp->msg.first; mp; mp = mp->next) {
  950. heap_need += 2;
  951. mq[i].msgp = mp;
  952. if (rp != BIF_P) {
  953. Eterm msg = ERL_MESSAGE_TERM(mq[i].msgp);
  954. if (is_value(msg)) {
  955. mq[i].copy_struct_size = (is_immed(msg)
  956. #ifdef HYBRID
  957. || NO_COPY(msg)
  958. #endif
  959. ? 0
  960. : size_object(msg));
  961. }
  962. else if (mq[i].msgp->data.attached) {
  963. mq[i].copy_struct_size
  964. = erts_msg_attached_data_size(mq[i].msgp);
  965. }
  966. else {
  967. /* Bad distribution message; ignore */
  968. remove_bad_messages = 1;
  969. mq[i].copy_struct_size = 0;
  970. }
  971. heap_need += mq[i].copy_struct_size;
  972. }
  973. else {
  974. mq[i].copy_struct_size = 0;
  975. if (mp->data.attached)
  976. heap_need += erts_msg_attached_data_size(mp);
  977. }
  978. i++;
  979. }
  980. hp = HAlloc(BIF_P, heap_need);
  981. hp_end = hp + heap_need;
  982. ASSERT(i == n);
  983. for (i--; i >= 0; i--) {
  984. Eterm msg = ERL_MESSAGE_TERM(mq[i].msgp);
  985. if (rp != BIF_P) {
  986. if (is_value(msg)) {
  987. if (mq[i].copy_struct_size)
  988. msg = copy_struct(msg,
  989. mq[i].copy_struct_size,
  990. &hp,
  991. &MSO(BIF_P));
  992. }
  993. else if (mq[i].msgp->data.attached) {
  994. ErlHeapFragment *hfp;
  995. /*
  996. * Decode it into a message buffer and attach it
  997. * to the message instead of the attached external
  998. * term.
  999. *
  1000. * Note that we may not pass a process pointer
  1001. * to erts_msg_distext2heap(), since it would then
  1002. * try to alter locks on that process.
  1003. */
  1004. msg = erts_msg_distext2heap(
  1005. NULL, NULL, &hfp, &ERL_MESSAGE_TOKEN(mq[i].msgp),
  1006. mq[i].msgp->data.dist_ext);
  1007. ERL_MESSAGE_TERM(mq[i].msgp) = msg;
  1008. mq[i].msgp->data.heap_frag = hfp;
  1009. if (is_non_value(msg)) {
  1010. ASSERT(!mq[i].msgp->data.heap_frag);
  1011. /* Bad distribution message; ignore */
  1012. remove_bad_messages = 1;
  1013. continue;
  1014. }
  1015. else {
  1016. /* Make our copy of the message */
  1017. ASSERT(size_object(msg) == hfp->used_size);
  1018. msg = copy_struct(msg,
  1019. hfp->used_size,
  1020. &hp,
  1021. &MSO(BIF_P));
  1022. }
  1023. }
  1024. else {
  1025. /* Bad distribution message; ignore */
  1026. remove_bad_messages = 1;
  1027. continue;
  1028. }
  1029. }
  1030. else {
  1031. if (mq[i].msgp->data.attached) {
  1032. /* Decode it on the heap */
  1033. erts_move_msg_attached_data_to_heap(&hp,
  1034. &MSO(BIF_P),
  1035. mq[i].msgp);
  1036. msg = ERL_MESSAGE_TERM(mq[i].msgp);
  1037. ASSERT(!mq[i].msgp->data.attached);
  1038. if (is_non_value(msg)) {
  1039. /* Bad distribution message; ignore */
  1040. remove_bad_messages = 1;
  1041. continue;
  1042. }
  1043. }
  1044. }
  1045. res = CONS(hp, msg, res);
  1046. hp += 2;
  1047. }
  1048. HRelease(BIF_P, hp_end, hp+3);
  1049. erts_free(ERTS_ALC_T_TMP, mq);
  1050. if (remove_bad_messages) {
  1051. ErlMessage **mpp;
  1052. /*
  1053. * We need to remove bad distribution messages from
  1054. * the queue, so that the value returned for
  1055. * 'message_queue_len' is consistent with the value
  1056. * returned for 'messages'.
  1057. */
  1058. mpp = &rp->msg.first;
  1059. mp = rp->msg.first;
  1060. while (mp) {
  1061. if (is_value(ERL_MESSAGE_TERM(mp))) {
  1062. mpp = &mp->next;
  1063. mp = mp->next;
  1064. }
  1065. else {
  1066. ErlMessage* bad_mp = mp;
  1067. ASSERT(!mp->data.attached);
  1068. if (rp->msg.save == &mp->next)
  1069. rp->msg.save = mpp;
  1070. if (rp->msg.last == &mp->next)
  1071. rp->msg.last = mpp;
  1072. *mpp = mp->next;
  1073. mp = mp->next;
  1074. rp->msg.len--;
  1075. free_message(bad_mp);
  1076. }
  1077. }
  1078. }
  1079. }
  1080. break;
  1081. }
  1082. case am_message_queue_len:
  1083. hp = HAlloc(BIF_P, 3);
  1084. ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
  1085. res = make_small(rp->msg.len);
  1086. break;
  1087. case am_links: {
  1088. MonitorInfoCollection mic;
  1089. int i;
  1090. Eterm item;
  1091. INIT_MONITOR_INFOS(mic);
  1092. erts_doforall_links(rp->nlinks,&collect_one_link,&mic);
  1093. hp = HAlloc(BIF_P, 3 + mic.sz);
  1094. res = NIL;
  1095. for (i = 0; i < mic.mi_i; i++) {
  1096. item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
  1097. res = CONS(hp, item, res);
  1098. hp += 2;
  1099. }
  1100. DESTROY_MONITOR_INFOS(mic);
  1101. break;
  1102. }
  1103. case am_monitors: {
  1104. MonitorInfoCollection mic;
  1105. int i;
  1106. INIT_MONITOR_INFOS(mic);
  1107. erts_doforall_monitors(rp->monitors,&collect_one_origin_monitor,&mic);
  1108. hp = HAlloc(BIF_P, 3 + mic.sz);
  1109. res = NIL;
  1110. for (i = 0; i < mic.mi_i; i++) {
  1111. if (is_atom(mic.mi[i].entity)) {
  1112. /* Monitor by name.
  1113. * Build {process, {Name, Node}} and cons it.
  1114. */
  1115. Eterm t1, t2;
  1116. t1 = TUPLE2(hp, mic.mi[i].entity, mic.mi[i].node);
  1117. hp += 3;
  1118. t2 = TUPLE2(hp, am_process, t1);
  1119. hp += 3;
  1120. res = CONS(hp, t2, res);
  1121. hp += 2;
  1122. }
  1123. else {
  1124. /* Monitor by pid. Build {process, Pid} and cons it. */
  1125. Eterm t;
  1126. Eterm pid = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
  1127. t = TUPLE2(hp, am_process, pid);
  1128. hp += 3;
  1129. res = CONS(hp, t, res);
  1130. hp += 2;
  1131. }
  1132. }
  1133. DESTROY_MONITOR_INFOS(mic);
  1134. break;
  1135. }
  1136. case am_monitored_by: {
  1137. MonitorInfoCollection mic;
  1138. int i;
  1139. Eterm item;
  1140. INIT_MONITOR_INFOS(mic);
  1141. erts_doforall_monitors(rp->monitors,&collect_one_target_monitor,&mic);
  1142. hp = HAlloc(BIF_P, 3 + mic.sz);
  1143. res = NIL;
  1144. for (i = 0; i < mic.mi_i; ++i) {
  1145. item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
  1146. res = CONS(hp, item, res);
  1147. hp += 2;
  1148. }
  1149. DESTROY_MONITOR_INFOS(mic);
  1150. break;
  1151. }
  1152. case am_suspending: {
  1153. ErtsSuspendMonitorInfoCollection smic;
  1154. int i;
  1155. Eterm item;
  1156. #ifdef DEBUG
  1157. Eterm *hp_end;
  1158. #endif
  1159. ERTS_INIT_SUSPEND_MONITOR_INFOS(smic,
  1160. BIF_P,
  1161. (BIF_P == rp
  1162. ? ERTS_PROC_LOCK_MAIN
  1163. : 0) | ERTS_PROC_LOCK_LINK);
  1164. erts_doforall_suspend_monitors(rp->suspend_monitors,
  1165. &collect_one_suspend_monitor,
  1166. &smic);
  1167. hp = HAlloc(BIF_P, 3 + smic.sz);
  1168. #ifdef DEBUG
  1169. hp_end = hp + smic.sz;
  1170. #endif
  1171. res = NIL;
  1172. for (i = 0; i < smic.smi_i; i++) {
  1173. Sint a = (Sint) smic.smi[i]->active; /* quiet compiler warnings */
  1174. Sint p = (Sint) smic.smi[i]->pending; /* on 64-bit machines... */
  1175. Eterm active;
  1176. Eterm pending;
  1177. if (IS_SSMALL(a))
  1178. active = make_small(a);
  1179. else {
  1180. active = small_to_big(a, hp);
  1181. hp += BIG_UINT_HEAP_SIZE;
  1182. }
  1183. if (IS_SSMALL(p))
  1184. pending = make_small(p);
  1185. else {
  1186. pending = small_to_big(p, hp);
  1187. hp += BIG_UINT_HEAP_SIZE;
  1188. }
  1189. item = TUPLE3(hp, smic.smi[i]->pid, active, pending);
  1190. hp += 4;
  1191. res = CONS(hp, item, res);
  1192. hp += 2;
  1193. }
  1194. ERTS_DESTROY_SUSPEND_MONITOR_INFOS(smic);
  1195. ASSERT(hp == hp_end);
  1196. break;
  1197. }
  1198. case am_dictionary:
  1199. if (rp->trace_flags & F_SENSITIVE) {
  1200. res = NIL;
  1201. } else {
  1202. res = erts_dictionary_copy(BIF_P, rp->dictionary);
  1203. }
  1204. hp = HAlloc(BIF_P, 3);
  1205. break;
  1206. case am_trap_exit:
  1207. hp = HAlloc(BIF_P, 3);
  1208. if (rp->flags & F_TRAPEXIT)
  1209. res = am_true;
  1210. else
  1211. res = am_false;
  1212. break;
  1213. case am_error_handler:
  1214. hp = HAlloc(BIF_P, 3);
  1215. res = erts_proc_get_error_handler(BIF_P);
  1216. break;
  1217. case am_heap_size: {
  1218. Uint hsz = 3;
  1219. (void) erts_bld_uint(NULL, &hsz, HEAP_SIZE(rp));
  1220. hp = HAlloc(BIF_P, hsz);
  1221. res = erts_bld_uint(&hp, NULL, HEAP_SIZE(rp));
  1222. break;
  1223. }
  1224. case am_fullsweep_after: {
  1225. Uint hsz = 3;
  1226. (void) erts_bld_uint(NULL, &hsz, MAX_GEN_GCS(rp));
  1227. hp = HAlloc(BIF_P, hsz);
  1228. res = erts_bld_uint(&hp, NULL, MAX_GEN_GCS(rp));
  1229. break;
  1230. }
  1231. case am_min_heap_size: {
  1232. Uint hsz = 3;
  1233. (void) erts_bld_uint(NULL, &hsz, MIN_HEAP_SIZE(rp));
  1234. hp = HAlloc(BIF_P, hsz);
  1235. res = erts_bld_uint(&hp, NULL, MIN_HEAP_SIZE(rp));
  1236. break;
  1237. }
  1238. case am_min_bin_vheap_size: {
  1239. Uint hsz = 3;
  1240. (void) erts_bld_uint(NULL, &hsz, MIN_VHEAP_SIZE(rp));
  1241. hp = HAlloc(BIF_P, hsz);
  1242. res = erts_bld_uint(&hp, NULL, MIN_VHEAP_SIZE(rp));
  1243. break;
  1244. }
  1245. case am_total_heap_size: {
  1246. ErlMessage *mp;
  1247. Uint total_heap_size;
  1248. Uint hsz = 3;
  1249. total_heap_size = rp->heap_sz;
  1250. if (rp->old_hend && rp->old_heap)
  1251. total_heap_size += rp->old_hend - rp->old_heap;
  1252. total_heap_size += rp->mbuf_sz;
  1253. ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
  1254. for (mp = rp->msg.first; mp; mp = mp->next)
  1255. if (mp->data.attached)
  1256. total_heap_size += erts_msg_attached_data_size(mp);
  1257. (void) erts_bld_uint(NULL, &hsz, total_heap_size);
  1258. hp = HAlloc(BIF_P, hsz);
  1259. res = erts_bld_uint(&hp, NULL, total_heap_size);
  1260. break;
  1261. }
  1262. case am_stack_size: {
  1263. Uint stack_size = STACK_START(rp) - rp->stop;
  1264. Uint hsz = 3;
  1265. (void) erts_bld_uint(NULL, &hsz, stack_size);
  1266. hp = HAlloc(BIF_P, hsz);
  1267. res = erts_bld_uint(&hp, NULL, stack_size);
  1268. break;
  1269. }
  1270. case am_memory: { /* Memory consumed in bytes */
  1271. ErlMessage *mp;
  1272. Uint size = 0;
  1273. Uint hsz = 3;
  1274. struct saved_calls *scb;
  1275. size += sizeof(Process);
  1276. ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
  1277. erts_doforall_links(rp->nlinks, &one_link_size, &size);
  1278. erts_doforall_monitors(rp->monitors, &one_mon_size, &size);
  1279. size += (rp->heap_sz + rp->mbuf_sz) * sizeof(Eterm);
  1280. if (rp->old_hend && rp->old_heap)
  1281. size += (rp->old_hend - rp->old_heap) * sizeof(Eterm);
  1282. size += rp->msg.len * sizeof(ErlMessage);
  1283. for (mp = rp->msg.first; mp; mp = mp->next)
  1284. if (mp->data.attached)
  1285. size += erts_msg_attached_data_size(mp)*sizeof(Eterm);
  1286. if (rp->arg_reg != rp->def_arg_reg) {
  1287. size += rp->arity * sizeof(rp->arg_reg[0]);
  1288. }
  1289. if (rp->psd)
  1290. size += sizeof(ErtsPSD);
  1291. scb = ERTS_PROC_GET_SAVED_CALLS_BUF(rp);
  1292. if (scb) {
  1293. size += (sizeof(struct saved_calls)
  1294. + (scb->len-1) * sizeof(scb->ct[0]));
  1295. }
  1296. size += erts_dicts_mem_size(rp);
  1297. (void) erts_bld_uint(NULL, &hsz, size);
  1298. hp = HAlloc(BIF_P, hsz);
  1299. res = erts_bld_uint(&hp, NULL, size);
  1300. break;
  1301. }
  1302. case am_garbage_collection: {
  1303. DECL_AM(minor_gcs);
  1304. Eterm t;
  1305. hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2 + 3+2 + 3); /* last "3" is for outside tuple */
  1306. t = TUPLE2(hp, AM_minor_gcs, make_small(GEN_GCS(rp))); hp += 3;
  1307. res = CONS(hp, t, NIL); hp += 2;
  1308. t = TUPLE2(hp, am_fullsweep_after, make_small(MAX_GEN_GCS(rp))); hp += 3;
  1309. res = CONS(hp, t, res); hp += 2;
  1310. t = TUPLE2(hp, am_min_heap_size, make_small(MIN_HEAP_SIZE(rp))); hp += 3;
  1311. res = CONS(hp, t, res); hp += 2;
  1312. t = TUPLE2(hp, am_min_bin_vheap_size, make_small(MIN_VHEAP_SIZE(rp))); hp += 3;
  1313. res = CONS(hp, t, res); hp += 2;
  1314. break;
  1315. }
  1316. case am_group_leader: {
  1317. int sz = NC_HEAP_SIZE(rp->group_leader);
  1318. hp = HAlloc(BIF_P, 3 + sz);
  1319. res = STORE_NC(&hp, &MSO(BIF_P), rp->group_leader);
  1320. break;
  1321. }
  1322. case am_reductions: {
  1323. Uint reds = rp->reds + erts_current_reductions(BIF_P, rp);
  1324. Uint hsz = 3;
  1325. (void) erts_bld_uint(NULL, &hsz, reds);
  1326. hp = HAlloc(BIF_P, hsz);
  1327. res = erts_bld_uint(&hp, NULL, reds);
  1328. break;
  1329. }
  1330. case am_priority:
  1331. hp = HAlloc(BIF_P, 3);
  1332. res = erts_get_process_priority(rp);
  1333. break;
  1334. case am_trace:
  1335. hp = HAlloc(BIF_P, 3);
  1336. res = make_small(rp->trace_flags & TRACEE_FLAGS);
  1337. break;
  1338. case am_binary: {
  1339. Uint sz = 3;
  1340. (void) bld_bin_list(NULL, &sz, &MSO(rp));
  1341. hp = HAlloc(BIF_P, sz);
  1342. res = bld_bin_list(&hp, NULL, &MSO(rp));
  1343. break;
  1344. }
  1345. #ifdef HYBRID
  1346. case am_message_binary: {
  1347. Uint sz = 3;
  1348. (void) bld_bin_list(NULL, &sz, erts_global_offheap.mso);
  1349. hp = HAlloc(BIF_P, sz);
  1350. res = bld_bin_list(&hp, NULL, erts_global_offheap.mso);
  1351. break;
  1352. }
  1353. #endif
  1354. case am_sequential_trace_token:
  1355. res = copy_object(rp->seq_trace_token, BIF_P);
  1356. hp = HAlloc(BIF_P, 3);
  1357. break;
  1358. case am_catchlevel:
  1359. hp = HAlloc(BIF_P, 3);
  1360. res = make_small(catchlevel(BIF_P));
  1361. break;
  1362. case am_backtrace: {
  1363. erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0);
  1364. erts_stack_dump(ERTS_PRINT_DSBUF, (void *) dsbufp, rp);
  1365. res = new_binary(BIF_P, (byte *) dsbufp->str, dsbufp->str_len);
  1366. erts_destroy_tmp_dsbuf(dsbufp);
  1367. hp = HAlloc(BIF_P, 3);
  1368. break;
  1369. }
  1370. case am_last_calls: {
  1371. struct saved_calls *scb = ERTS_PROC_GET_SAVED_CALLS_BUF(BIF_P);
  1372. if (!scb) {
  1373. hp = HAlloc(BIF_P, 3);
  1374. res = am_false;
  1375. } else {
  1376. /*
  1377. * One cons cell and a 3-struct, and a 2-tuple.
  1378. * Might be less than that, if there are sends, receives or timeouts,
  1379. * so we must do a HRelease() to avoid creating holes.
  1380. */
  1381. Uint needed = scb->n*(2+4) + 3;
  1382. Eterm* limit;
  1383. Eterm term, list;
  1384. int i, j;
  1385. hp = HAlloc(BIF_P, needed);
  1386. limit = hp + needed;
  1387. list = NIL;
  1388. for (i = 0; i < scb->n; i++) {
  1389. j = scb->cur - i - 1;
  1390. if (j < 0)
  1391. j += scb->len;
  1392. if (scb->ct[j] == &exp_send)
  1393. term = am_send;
  1394. else if (scb->ct[j] == &exp_receive)
  1395. term = am_receive;
  1396. else if (scb->ct[j] == &exp_timeout)
  1397. term = am_timeout;
  1398. else {
  1399. term = TUPLE3(hp,
  1400. scb->ct[j]->code[0],
  1401. scb->ct[j]->code[1],
  1402. make_small(scb->ct[j]->code[2]));
  1403. hp += 4;
  1404. }
  1405. list = CONS(hp, term, list);
  1406. hp += 2;
  1407. }
  1408. res = list;
  1409. res = TUPLE2(hp, item, res);
  1410. hp += 3;
  1411. HRelease(BIF_P,limit,hp);
  1412. return res;
  1413. }
  1414. break;
  1415. }
  1416. default:
  1417. return THE_NON_VALUE; /* will produce badarg */
  1418. }
  1419. return TUPLE2(hp, item, res);
  1420. }
  1421. #undef MI_INC
  1422. static Eterm
  1423. current_function(Process* BIF_P, Process* rp, Eterm** hpp, int full_info)
  1424. {
  1425. Eterm* hp;
  1426. Eterm res;
  1427. FunctionInfo fi;
  1428. if (rp->current == NULL) {
  1429. erts_lookup_function_info(&fi, rp->i, full_info);
  1430. rp->current = fi.current;
  1431. } else if (full_info) {
  1432. erts_lookup_function_info(&fi, rp->i, full_info);
  1433. if (fi.current == NULL) {
  1434. /* Use the current function without location info */
  1435. erts_set_current_function(&fi, rp->current);
  1436. }
  1437. }
  1438. if (BIF_P->id == rp->id) {
  1439. FunctionInfo fi2;
  1440. /*
  1441. * The current function is erlang:process_info/{1,2},
  1442. * which is not the answer that the application want.
  1443. * We will use the function pointed into by rp->cp
  1444. * instead if it can be looked up.
  1445. */
  1446. erts_lookup_function_info(&fi2, rp->cp, full_info);
  1447. if (fi2.current) {
  1448. fi = fi2;
  1449. rp->current = fi2.current;
  1450. }
  1451. }
  1452. /*
  1453. * Return the result.
  1454. */
  1455. if (rp->current == NULL) {
  1456. hp = HAlloc(BIF_P, 3);
  1457. res = am_undefined;
  1458. } else if (full_info) {
  1459. hp = HAlloc(BIF_P, 3+fi.needed);
  1460. hp = erts_build_mfa_item(&fi, hp, am_true, &res);
  1461. } else {
  1462. hp = HAlloc(BIF_P, 3+4);
  1463. res = TUPLE3(hp, rp->current[0],
  1464. rp->current[1], make_small(rp->current[2]));
  1465. hp += 4;
  1466. }
  1467. *hpp = hp;
  1468. return res;
  1469. }
  1470. static Eterm
  1471. current_stacktrace(Process* p, Process* rp, Eterm** hpp)
  1472. {
  1473. Uint sz;
  1474. struct StackTrace* s;
  1475. int depth;
  1476. FunctionInfo* stk;
  1477. FunctionInfo* stkp;
  1478. Uint heap_size;
  1479. int i;
  1480. Eterm* hp = *hpp;
  1481. Eterm mfa;
  1482. Eterm res = NIL;
  1483. depth = 8;
  1484. sz = offsetof(struct StackTrace, trace) + sizeof(BeamInstr *)*depth;
  1485. s = (struct StackTrace *) erts_alloc(ERTS_ALC_T_TMP, sz);
  1486. s->depth = 0;
  1487. if (rp->i) {
  1488. s->trace[s->depth++] = rp->i;
  1489. depth--;
  1490. }
  1491. if (depth > 0 && rp->cp != 0) {
  1492. s->trace[s->depth++] = rp->cp - 1;
  1493. depth--;
  1494. }
  1495. erts_save_stacktrace(rp, s, depth);
  1496. depth = s->depth;
  1497. stk = stkp = (FunctionInfo *) erts_alloc(ERTS_ALC_T_TMP,
  1498. depth*sizeof(FunctionInfo));
  1499. heap_size = 3;
  1500. for (i = 0; i < depth; i++) {
  1501. erts_lookup_function_info(stkp, s->trace[i], 1);
  1502. if (stkp->current) {
  1503. heap_size += stkp->needed + 2;
  1504. stkp++;
  1505. }
  1506. }
  1507. hp = HAlloc(p, heap_size);
  1508. while (stkp > stk) {
  1509. stkp--;
  1510. hp = erts_build_mfa_item(stkp, hp, am_true, &mfa);
  1511. res = CONS(hp, mfa, res);
  1512. hp += 2;
  1513. }
  1514. erts_free(ERTS_ALC_T_TMP, stk);
  1515. erts_free(ERTS_ALC_T_TMP, s);
  1516. *hpp = hp;
  1517. return res;
  1518. }
  1519. #if defined(VALGRIND)
  1520. static int check_if_xml(void)
  1521. {
  1522. char buf[1];
  1523. size_t bufsz = sizeof(buf);
  1524. return erts_sys_getenv("VALGRIND_LOG_XML", buf, &bufsz) >= 0;
  1525. }
  1526. #else
  1527. #define check_if_xml() 0
  1528. #endif
  1529. /*
  1530. * This function takes care of calls to erlang:system_info/1 when the argument
  1531. * is a tuple.
  1532. */
  1533. static BIF_RETTYPE
  1534. info_1_tuple(Process* BIF_P, /* Pointer to current process. */
  1535. Eterm* tp, /* Pointer to first element in tuple */
  1536. int arity) /* Arity of tuple (untagged). */
  1537. {
  1538. Eterm ret;
  1539. Eterm sel;
  1540. sel = *tp++;
  1541. if (sel == am_allocator_sizes) {
  1542. switch (arity) {
  1543. case 2:
  1544. ERTS_BIF_PREP_TRAP1(ret, alloc_sizes_trap, BIF_P, *tp);
  1545. return ret;
  1546. case 3:
  1547. if (erts_request_alloc_info(BIF_P, tp[0], tp[1], 1))
  1548. return am_true;
  1549. default:
  1550. goto badarg;
  1551. }
  1552. }
  1553. else if (sel == am_wordsize && arity == 2) {
  1554. if (tp[0] == am_internal) {
  1555. return make_small(sizeof(Eterm));
  1556. }
  1557. if (tp[0] == am_external) {
  1558. return make_small(sizeof(UWord));
  1559. }
  1560. goto badarg;
  1561. } else if (sel == am_allocated) {
  1562. if (arity == 2) {
  1563. Eterm res = THE_NON_VALUE;
  1564. char *buf;
  1565. int len = is_string(*tp);
  1566. if (len <= 0)
  1567. return res;
  1568. buf = (char *) erts_alloc(ERTS_ALC_T_TMP, len+1);
  1569. if (intlist_to_buf(*tp, buf, len) != len)
  1570. erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
  1571. buf[len] = '\0';
  1572. res = erts_instr_dump_memory_map(buf) ? am_true : am_false;
  1573. erts_free(ERTS_ALC_T_TMP, (void *) buf);
  1574. if (is_non_value(res))
  1575. goto badarg;
  1576. return res;
  1577. }
  1578. else if (arity == 3 && tp[0] == am_status) {
  1579. if (is_atom(tp[1]))
  1580. return erts_instr_get_stat(BIF_P, tp[1], 1);
  1581. else {
  1582. Eterm res = THE_NON_VALUE;
  1583. char *buf;
  1584. int len = is_string(tp[1]);
  1585. if (len <= 0)
  1586. return res;
  1587. buf = (char *) erts_alloc(ERTS_ALC_T_TMP, len+1);
  1588. if (intlist_to_buf(tp[1], buf, len) != len)
  1589. erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
  1590. buf[len] = '\0';
  1591. res = erts_instr_dump_stat(buf, 1) ? am_true : am_false;
  1592. erts_free(ERTS_ALC_T_TMP, (void *) buf);
  1593. if (is_non_value(res))
  1594. goto badarg;
  1595. return res;
  1596. }
  1597. }
  1598. else
  1599. goto badarg;
  1600. } else if (sel == am_allocator) {
  1601. switch (arity) {
  1602. case 2:
  1603. ERTS_BIF_PREP_TRAP1(ret, alloc_info_trap, BIF_P, *tp);
  1604. return ret;
  1605. case 3:
  1606. if (erts_request_alloc_info(BIF_P, tp[0], tp[1], 0))
  1607. return am_true;
  1608. default:
  1609. goto badarg;
  1610. }
  1611. } else if (ERTS_IS_ATOM_STR("internal_cpu_topology", sel) && arity == 2) {
  1612. return erts_get_cpu_topology_term(BIF_P, *tp);
  1613. } else if (ERTS_IS_ATOM_STR("cpu_topology", sel) && arity == 2) {
  1614. Eterm res = erts_get_cpu_topology_term(BIF_P, *tp);
  1615. if (res == THE_NON_VALUE)
  1616. goto badarg;
  1617. ERTS_BIF_PREP_TRAP1(ret, erts_format_cpu_topology_trap, BIF_P, res);
  1618. return ret;
  1619. #if defined(PURIFY) || defined(VALGRIND)
  1620. } else if (ERTS_IS_ATOM_STR("error_checker", sel)
  1621. #if defined(PURIFY)
  1622. || sel == am_purify
  1623. #elif defined(VALGRIND)
  1624. || ERTS_IS_ATOM_STR("valgrind", sel)
  1625. #endif
  1626. ) {
  1627. if (*tp == am_memory) {
  1628. #if defined(PURIFY)
  1629. BIF_RET(erts_make_integer(purify_new_leaks(), BIF_P));
  1630. #elif defined(VALGRIND)
  1631. VALGRIND_DO_LEAK_CHECK;
  1632. BIF_RET(make_small(0));
  1633. #endif
  1634. } else if (*tp == am_fd) {
  1635. #if defined(PURIFY)
  1636. BIF_RET(erts_make_integer(purify_new_fds_inuse(), BIF_P));
  1637. #elif defined(VALGRIND)
  1638. /* Not present in valgrind... */
  1639. BIF_RET(make_small(0));
  1640. #endif
  1641. } else if (*tp == am_running) {
  1642. #if defined(PURIFY)
  1643. BIF_RET(purify_is_running() ? am_true : am_false);
  1644. #elif defined(VALGRIND)
  1645. BIF_RET(RUNNING_ON_VALGRIND ? am_true : am_false);
  1646. #endif
  1647. } else if (is_list(*tp)) {
  1648. #if defined(PURIFY)
  1649. #define ERTS_ERROR_CHECKER_PRINTF purify_printf
  1650. #define ERTS_ERROR_CHECKER_PRINTF_XML purify_printf
  1651. #elif defined(VALGRIND)
  1652. #define ERTS_ERROR_CHECKER_PRINTF VALGRIND_PRINTF
  1653. # ifndef HAVE_VALGRIND_PRINTF_XML
  1654. # define ERTS_ERROR_CHECKER_PRINTF_XML VALGRIND_PRINTF
  1655. # else
  1656. # define ERTS_ERROR_CHECKER_PRINTF_XML VALGRIND_PRINTF_XML
  1657. # endif
  1658. #endif
  1659. Uint buf_size = 8*1024; /* Try with 8KB first */
  1660. char *buf = erts_alloc(ERTS_ALC_T_TMP, buf_size);
  1661. int r = io_list_to_buf(*tp, (char*) buf, buf_size - 1);
  1662. if (r < 0) {
  1663. erts_free(ERTS_ALC_T_TMP, (void *) buf);
  1664. if (erts_iolist_size(*tp, &buf_size)) {
  1665. goto badarg;
  1666. }
  1667. buf_size++;
  1668. buf = erts_alloc(ERTS_ALC_T_TMP, buf_size);
  1669. r = io_list_to_buf(*tp, (char*) buf, buf_size - 1);
  1670. ASSERT(r == buf_size - 1);
  1671. }
  1672. buf[buf_size - 1 - r] = '\0';
  1673. if (check_if_xml()) {
  1674. ERTS_ERROR_CHECKER_PRINTF_XML("<erlang_info_log>"
  1675. "%s</erlang_info_log>\n", buf);
  1676. } else {
  1677. ERTS_ERROR_CHECKER_PRINTF("%s\n", buf);
  1678. }
  1679. erts_free(ERTS_ALC_T_TMP, (void *) buf);
  1680. BIF_RET(am_true);
  1681. #undef ERTS_ERROR_CHECKER_PRINTF
  1682. }
  1683. #endif
  1684. #ifdef QUANTIFY
  1685. } else if (sel == am_quantify) {
  1686. if (*tp == am_clear) {
  1687. quantify_clear_data();
  1688. BIF_RET(am_true);
  1689. } else if (*tp == am_start) {
  1690. quantify_start_recording_data();
  1691. BIF_RET(am_true);
  1692. } else if (*tp == am_stop) {
  1693. quantify_stop_recording_data();
  1694. BIF_RET(am_true);
  1695. } else if (*tp == am_running) {
  1696. BIF_RET(quantify_is_running() ? am_true : am_false);
  1697. }
  1698. #endif
  1699. #if defined(__GNUC__) && defined(HAVE_SOLARIS_SPARC_PERFMON)
  1700. } else if (ERTS_IS_ATOM_STR("ultrasparc_set_pcr", sel)) {
  1701. unsigned long long tmp;
  1702. int fd;
  1703. int rc;
  1704. if (arity != 2 || !is_small(*tp)) {
  1705. goto badarg;
  1706. }
  1707. tmp = signed_val(*tp);
  1708. if ((fd = open("/dev/perfmon", O_RDONLY)) == -1) {
  1709. BIF_RET(am_false);
  1710. }
  1711. rc = ioctl(fd, PERFMON_SETPCR, &tmp);
  1712. close(fd);
  1713. if (rc < 0) {
  1714. BIF_RET(am_false);
  1715. }
  1716. BIF_RET(am_true);
  1717. #endif
  1718. }
  1719. badarg:
  1720. ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
  1721. return ret;
  1722. }
  1723. #define INFO_DSBUF_INC_SZ 256
  1724. static erts_dsprintf_buf_t *
  1725. grow_info_dsbuf(erts_dsprintf_buf_t *dsbufp, size_t need)
  1726. {
  1727. size_t size;
  1728. size_t free_size = dsbufp->size - dsbufp->str_len;
  1729. ASSERT(dsbufp);
  1730. if (need <= free_size)
  1731. return dsbufp;
  1732. size = need - free_size + INFO_DSBUF_INC_SZ;
  1733. size = ((size + INFO_DSBUF_INC_SZ - 1)/INFO_DSBUF_INC_SZ)*INFO_DSBUF_INC_SZ;
  1734. size += dsbufp->size;
  1735. ASSERT(dsbufp->str_len + need <= size);
  1736. dsbufp->str = (char *) erts_realloc(ERTS_ALC_T_INFO_DSBUF,
  1737. (void *) dsbufp->str,
  1738. size);
  1739. dsbufp->size = size;
  1740. return dsbufp;
  1741. }
  1742. static erts_dsprintf_buf_t *
  1743. erts_create_info_dsbuf(Uint size)
  1744. {
  1745. Uint init_size = size ? size : INFO_DSBUF_INC_SZ;
  1746. erts_dsprintf_buf_t init = ERTS_DSPRINTF_BUF_INITER(grow_info_dsbuf);
  1747. erts_dsprintf_buf_t *dsbufp = erts_alloc(ERTS_ALC_T_INFO_DSBUF,
  1748. sizeof(erts_dsprintf_buf_t));
  1749. sys_memcpy((void *) dsbufp, (void *) &init, sizeof(erts_dsprintf_buf_t));
  1750. dsbufp->str = (char *) erts_alloc(ERTS_ALC_T_INFO_DSBUF, init_size);
  1751. dsbufp->str[0] = '\0';
  1752. dsbufp->size = init_size;
  1753. return dsbufp;
  1754. }
  1755. static void
  1756. erts_destroy_info_dsbuf(erts_dsprintf_buf_t *dsbufp)
  1757. {
  1758. if (dsbufp->str)
  1759. erts_free(ERTS_ALC_T_INFO_DSBUF, (void *) dsbufp->str);
  1760. erts_free(ERTS_ALC_T_INFO_DSBUF, (void *) dsbufp);
  1761. }
  1762. static Eterm
  1763. c_compiler_used(Eterm **hpp, Uint *szp)
  1764. {
  1765. #if defined(__GNUC__)
  1766. # if defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
  1767. # define ERTS_GNUC_VSN_NUMS 3
  1768. # elif defined(__GNUC_MINOR__)
  1769. # define ERTS_GNUC_VSN_NUMS 2
  1770. # else
  1771. # define ERTS_GNUC_VSN_NUMS 1
  1772. # endif
  1773. return erts_bld_tuple(hpp,
  1774. szp,
  1775. 2,
  1776. erts_bld_atom(hpp, szp, "gnuc"),
  1777. #if ERTS_GNUC_VSN_NUMS > 1
  1778. erts_bld_tuple(hpp,
  1779. szp,
  1780. ERTS_GNUC_VSN_NUMS,
  1781. #endif
  1782. erts_bld_uint(hpp, szp,
  1783. (Uint) __GNUC__)
  1784. #ifdef __GNUC_MINOR__
  1785. ,
  1786. erts_bld_uint(hpp, szp,
  1787. (Uint) __GNUC_MINOR__)
  1788. #ifdef __GNUC_PATCHLEVEL__
  1789. ,
  1790. erts_bld_uint(hpp, szp,
  1791. (Uint) __GNUC_PATCHLEVEL__)
  1792. #endif
  1793. #endif
  1794. #if ERTS_GNUC_VSN_NUMS > 1
  1795. )
  1796. #endif
  1797. );
  1798. #elif defined(_MSC_VER)
  1799. return erts_bld_tuple(hpp,
  1800. szp,
  1801. 2,
  1802. erts_bld_atom(hpp, szp, "msc"),
  1803. erts_bld_uint(hpp, szp, (Uint) _MSC_VER));
  1804. #else
  1805. return erts_bld_tuple(hpp,
  1806. szp,
  1807. 2,
  1808. am_undefined,
  1809. am_undefined);
  1810. #endif
  1811. }
  1812. static int is_snif_term(Eterm module_atom) {
  1813. int i;
  1814. Atom *a = atom_tab(atom_val(module_atom));
  1815. char *aname = (char *) a->name;
  1816. /* if a->name has a '.' then the bif (snif) is bogus i.e a package */
  1817. for (i = 0; i < a->len; i++) {
  1818. if (aname[i] == '.')
  1819. return 0;
  1820. }
  1821. return 1;
  1822. }
  1823. static Eterm build_snif_term(Eterm **hpp, Uint *szp, int ix, Eterm res) {
  1824. Eterm tup;
  1825. tup = erts_bld_tuple(hpp, szp, 3, bif_table[ix].module, bif_table[ix].name, make_small(bif_table[ix].arity));
  1826. res = erts_bld_cons( hpp, szp, tup, res);
  1827. return res;
  1828. }
  1829. static Eterm build_snifs_term(Eterm **hpp, Uint *szp, Eterm res) {
  1830. int i;
  1831. for (i = 0; i < BIF_SIZE; i++) {
  1832. if (is_snif_term(bif_table[i].module)) {
  1833. res = build_snif_term(hpp, szp, i, res);
  1834. }
  1835. }
  1836. return res;
  1837. }
  1838. BIF_RETTYPE system_info_1(BIF_ALIST_1)
  1839. {
  1840. Eterm res;
  1841. Eterm* hp;
  1842. Eterm val;
  1843. int i;
  1844. if (is_tuple(BIF_ARG_1)) {
  1845. Eterm* tp = tuple_val(BIF_ARG_1);
  1846. Uint arity = *tp++;
  1847. return info_1_tuple(BIF_P, tp, arityval(arity));
  1848. } else if (BIF_ARG_1 == am_scheduler_id) {
  1849. #ifdef ERTS_SMP
  1850. ASSERT(BIF_P->scheduler_data);
  1851. BIF_RET(make_small(BIF_P->scheduler_data->no));
  1852. #else
  1853. BIF_RET(make_small(1));
  1854. #endif
  1855. } else if (BIF_ARG_1 == am_compat_rel) {
  1856. ASSERT(erts_compat_rel > 0);
  1857. BIF_RET(make_small(erts_compat_rel));
  1858. } else if (BIF_ARG_1 == am_multi_scheduling) {
  1859. #ifndef ERTS_SMP
  1860. BIF_RET(am_disabled);
  1861. #else
  1862. if (erts_no_schedulers == 1)
  1863. BIF_RET(am_disabled);
  1864. else {
  1865. BIF_RET(erts_is_multi_scheduling_blocked()
  1866. ? am_blocked
  1867. : am_enabled);
  1868. }
  1869. #endif
  1870. } else if (BIF_ARG_1 == am_build_type) {
  1871. #if defined(DEBUG)
  1872. ERTS_DECL_AM(debug);
  1873. BIF_RET(AM_debug);
  1874. #elif defined(PURIFY)
  1875. ERTS_DECL_AM(purify);
  1876. BIF_RET(AM_purify);
  1877. #elif defined(QUANTIFY)
  1878. ERTS_DECL_AM(quantify);
  1879. BIF_RET(AM_quantify);
  1880. #elif defined(PURECOV)
  1881. ERTS_DECL_AM(purecov);
  1882. BIF_RET(AM_purecov);
  1883. #elif defined(ERTS_GCOV)
  1884. ERTS_DECL_AM(gcov);
  1885. BIF_RET(AM_gcov);
  1886. #elif defined(VALGRIND)
  1887. ERTS_DECL_AM(valgrind);
  1888. BIF_RET(AM_valgrind);
  1889. #elif defined(GPROF)
  1890. ERTS_DECL_AM(gprof);
  1891. BIF_RET(AM_gprof);
  1892. #elif defined(ERTS_ENABLE_LOCK_COUNT)
  1893. ERTS_DECL_AM(lcnt);
  1894. BIF_RET(AM_lcnt);
  1895. #else
  1896. BIF_RET(am_opt);
  1897. #endif
  1898. BIF_RET(res);
  1899. } else if (BIF_ARG_1 == am_allocated_areas) {
  1900. res = erts_allocated_areas(NULL, NULL, BIF_P);
  1901. BIF_RET(res);
  1902. } else if (BIF_ARG_1 == am_allocated) {
  1903. BIF_RET(erts_instr_get_memory_map(BIF_P));
  1904. } else if (BIF_ARG_1 == am_hipe_architecture) {
  1905. #if defined(HIPE)
  1906. BIF_RET(hipe_arch_name);
  1907. #else
  1908. BIF_RET(am_undefined);
  1909. #endif
  1910. } else if (BIF_ARG_1 == am_trace_control_word) {
  1911. BIF_RET(db_get_trace_control_word(BIF_P));
  1912. } else if (ERTS_IS_ATOM_STR("ets_realloc_moves", BIF_ARG_1)) {
  1913. BIF_RET((erts_ets_realloc_always_moves) ? am_true : am_false);
  1914. } else if (ERTS_IS_ATOM_STR("ets_always_compress", BIF_ARG_1)) {
  1915. BIF_RET((erts_ets_always_compress) ? am_true : am_false);
  1916. } else if (ERTS_IS_ATOM_STR("snifs", BIF_ARG_1)) {
  1917. Uint size = 0;
  1918. Uint *szp;
  1919. szp = &size;
  1920. build_snifs_term(NULL, szp, NIL);
  1921. hp = HAlloc(BIF_P, size);
  1922. res = build_snifs_term(&hp, NULL, NIL);
  1923. BIF_RET(res);
  1924. } else if (BIF_ARG_1 == am_sequential_tracer) {
  1925. val = erts_get_system_seq_tracer();
  1926. ASSERT(is_internal_pid(val) || is_internal_port(val) || val==am_false)
  1927. hp = HAlloc(BIF_P, 3);
  1928. res = TUPLE2(hp, am_sequential_tracer, val);
  1929. BIF_RET(res);
  1930. } else if (BIF_ARG_1 == am_garbage_collection){
  1931. Uint val = (Uint) erts_smp_atomic32_read_nob(&erts_max_gen_gcs);
  1932. Eterm tup;
  1933. hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2);
  1934. tup = TUPLE2(hp, am_fullsweep_after, make_small(val)); hp += 3;
  1935. res = CONS(hp, tup, NIL); hp += 2;
  1936. tup = TUPLE2(hp, am_min_heap_size, make_small(H_MIN_SIZE)); hp += 3;
  1937. res = CONS(hp, tup, res); hp += 2;
  1938. tup = TUPLE2(hp, am_min_bin_vheap_size, make_small(BIN_VH_MIN_SIZE)); hp += 3;
  1939. res = CONS(hp, tup, res); hp += 2;
  1940. BIF_RET(res);
  1941. } else if (BIF_ARG_1 == am_fullsweep_after){
  1942. Uint val = (Uint) erts_smp_atomic32_read_nob(&erts_max_gen_gcs);
  1943. hp = HAlloc(BIF_P, 3);
  1944. res = TUPLE2(hp, am_fullsweep_after, make_small(val));
  1945. BIF_RET(res);
  1946. } else if (BIF_ARG_1 == am_min_heap_size) {
  1947. hp = HAlloc(BIF_P, 3);
  1948. res = TUPLE2(hp, am_min_heap_size,make_small(H_MIN_SIZE));
  1949. BIF_RET(res);
  1950. } else if (BIF_ARG_1 == am_min_bin_vheap_size) {
  1951. hp = HAlloc(BIF_P, 3);
  1952. res = TUPLE2(hp, am_min_bin_vheap_size,make_small(BIN_VH_MIN_SIZE));
  1953. BIF_RET(res);
  1954. } else if (BIF_ARG_1 == am_process_count) {
  1955. BIF_RET(make_small(erts_process_count()));
  1956. } else if (BIF_ARG_1 == am_process_limit) {
  1957. BIF_RET(make_small(erts_max_processes));
  1958. } else if (BIF_ARG_1 == am_info
  1959. || BIF_ARG_1 == am_procs
  1960. || BIF_ARG_1 == am_loaded
  1961. || BIF_ARG_1 == am_dist) {
  1962. erts_dsprintf_buf_t *dsbufp = erts_create_info_dsbuf(0);
  1963. /* Need to be the only thread running... */
  1964. erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
  1965. erts_smp_thr_progress_block();
  1966. if (BIF_ARG_1 == am_info)
  1967. info(ERTS_PRINT_DSBUF, (void *) dsbufp);
  1968. else if (BIF_ARG_1 == am_procs)
  1969. process_info(ERTS_PRINT_DSBUF, (void *) dsbufp);
  1970. else if (BIF_ARG_1 == am_loaded)
  1971. loaded(ERTS_PRINT_DSBUF, (void *) dsbufp);
  1972. else
  1973. distribution_info(ERTS_PRINT_DSBUF, (void *) dsbufp);
  1974. erts_smp_thr_progress_unblock();
  1975. erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
  1976. ASSERT(dsbufp && dsbufp->str);
  1977. res = new_binary(BIF_P, (byte *) dsbufp->str, dsbufp->str_len);
  1978. erts_destroy_info_dsbuf(dsbufp);
  1979. BIF_RET(res);
  1980. } else if (ERTS_IS_ATOM_STR("dist_ctrl", BIF_ARG_1)) {
  1981. DistEntry *dep;
  1982. i = 0;
  1983. /* Need to be the only thread running... */
  1984. erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
  1985. erts_smp_thr_progress_block();
  1986. for (dep = erts_visible_dist_entries; dep; dep = dep->next)
  1987. ++i;
  1988. for (dep = erts_hidden_dist_entries; dep; dep = dep->next)
  1989. ++i;
  1990. hp = HAlloc(BIF_P,i*(3+2));
  1991. res = NIL;
  1992. for (dep = erts_hidden_dist_entries; dep; dep = dep->next) {
  1993. Eterm tpl;
  1994. ASSERT(is_immed(dep->cid));
  1995. tpl = TUPLE2(hp, dep->sysname, dep->cid);
  1996. hp +=3;
  1997. res = CONS(hp, tpl, res);
  1998. hp += 2;
  1999. }
  2000. for (dep = erts_visible_dist_entries; dep; dep = dep->next) {
  2001. Eterm tpl;
  2002. ASSERT(is_immed(dep->cid));
  2003. tpl = TUPLE2(hp, dep->sysname, dep->cid);
  2004. hp +=3;
  2005. res = CONS(hp, tpl, res);
  2006. hp += 2;
  2007. }
  2008. erts_smp_thr_progress_unblock();
  2009. erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
  2010. BIF_RET(res);
  2011. } else if (BIF_ARG_1 == am_system_version) {
  2012. erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0);
  2013. erts_print_system_version(ERTS_PRINT_DSBUF, (void *) dsbufp, BIF_P);
  2014. hp = HAlloc(BIF_P, dsbufp->str_len*2);
  2015. res = buf_to_intlist(&hp, dsbufp->str, dsbufp->str_len, NIL);
  2016. erts_destroy_tmp_dsbuf(dsbufp);
  2017. BIF_RET(res);
  2018. } else if (BIF_ARG_1 == am_system_architecture) {
  2019. hp = HAlloc(BIF_P, 2*(sizeof(ERLANG_ARCHITECTURE)-1));
  2020. BIF_RET(buf_to_intlist(&hp,
  2021. ERLANG_ARCHITECTURE,
  2022. sizeof(ERLANG_ARCHITECTURE)-1,
  2023. NIL));
  2024. }
  2025. else if (BIF_ARG_1 == am_memory_types) {
  2026. return erts_instr_get_type_info(BIF_P);
  2027. }
  2028. else if (BIF_ARG_1 == am_os_type) {
  2029. BIF_RET(os_type_tuple);
  2030. }
  2031. else if (BIF_ARG_1 == am_allocator) {
  2032. BIF_RET(erts_allocator_options((void *) BIF_P));
  2033. }
  2034. else if (BIF_ARG_1 == am_thread_pool_size) {
  2035. #ifdef USE_THREADS
  2036. extern int erts_async_max_threads;
  2037. #endif
  2038. int n;
  2039. #ifdef USE_THREADS
  2040. n = erts_async_max_threads;
  2041. #else
  2042. n = 0;
  2043. #endif
  2044. BIF_RET(make_small(n));
  2045. }
  2046. else if (BIF_ARG_1 == am_alloc_util_allocators) {
  2047. BIF_RET(erts_alloc_util_allocators((void *) BIF_P));
  2048. }
  2049. else if (BIF_ARG_1 == am_elib_malloc) {
  2050. /* To be removed in R15 */
  2051. BIF_RET(am_false);
  2052. }
  2053. else if (BIF_ARG_1 == am_os_version) {
  2054. BIF_RET(os_version_tuple);
  2055. }
  2056. else if (BIF_ARG_1 == am_version) {
  2057. int n = strlen(ERLANG_VERSION);
  2058. hp = HAlloc(BIF_P, ((sizeof ERLANG_VERSION)-1) * 2);
  2059. BIF_RET(buf_to_intlist(&hp, ERLANG_VERSION, n, NIL));
  2060. }
  2061. else if (BIF_ARG_1 == am_machine) {
  2062. int n = strlen(EMULATOR);
  2063. hp = HAlloc(BIF_P, n*2);
  2064. BIF_RET(buf_to_intlist(&hp, EMULATOR, n, NIL));
  2065. }
  2066. else if (BIF_ARG_1 == am_garbage_collection) {
  2067. BIF_RET(am_generational);
  2068. #ifdef ERTS_OPCODE_COUNTER_SUPPORT
  2069. } else if (BIF_ARG_1 == am_instruction_counts) {
  2070. #ifdef DEBUG
  2071. Eterm *endp;
  2072. #endif
  2073. Eterm *hp, **hpp;
  2074. Uint hsz, *hszp;
  2075. int i;
  2076. hpp = NULL;
  2077. hsz = 0;
  2078. hszp = &hsz;
  2079. bld_instruction_counts:
  2080. res = NIL;
  2081. for (i = num_instructions-1; i >= 0; i--) {
  2082. res = erts_bld_cons(hpp, hszp,
  2083. erts_bld_tuple(hpp, hszp, 2,
  2084. am_atom_put(opc[i].name,
  2085. strlen(opc[i].name)),
  2086. erts_bld_uint(hpp, hszp,
  2087. opc[i].count)),
  2088. res);
  2089. }
  2090. if (!hpp) {
  2091. hp = HAlloc(BIF_P, hsz);
  2092. hpp = &hp;
  2093. #ifdef DEBUG
  2094. endp = hp + hsz;
  2095. #endif
  2096. hszp = NULL;
  2097. goto bld_instruction_counts;
  2098. }
  2099. #ifdef DEBUG
  2100. ASSERT(endp == hp);
  2101. #endif
  2102. BIF_RET(res);
  2103. #endif /* #ifndef ERTS_SMP */
  2104. } else if (BIF_ARG_1 == am_wordsize) {
  2105. return make_small(sizeof(Eterm));
  2106. } else if (BIF_ARG_1 == am_endian) {
  2107. #if defined(WORDS_BIGENDIAN)
  2108. return am_big;
  2109. #else
  2110. return am_little;
  2111. #endif
  2112. } else if (BIF_ARG_1 == am_heap_sizes) {
  2113. return erts_heap_sizes(BIF_P);
  2114. } else if (BIF_ARG_1 == am_global_heaps_size) {
  2115. #ifdef HYBRID
  2116. Uint hsz = 0;
  2117. Uint sz = 0;
  2118. sz += global_heap_sz;
  2119. #ifdef INCREMENTAL
  2120. /* The size of the old generation is a bit hard to define here...
  2121. * The amount of live data in the last collection perhaps..? */
  2122. sz = 0;
  2123. #else
  2124. if (global_old_hend && global_old_heap)
  2125. sz += global_old_hend - global_old_heap;
  2126. #endif
  2127. sz *= sizeof(Eterm);
  2128. (void) erts_bld_uint(NULL, &hsz, sz);
  2129. hp = hsz ? HAlloc(BIF_P, hsz) : NULL;
  2130. res = erts_bld_uint(&hp, NULL, sz);
  2131. #else
  2132. res = make_small(0);
  2133. #endif
  2134. return res;
  2135. } else if (BIF_ARG_1 == am_heap_type) {
  2136. #if defined(HYBRID)
  2137. return am_hybrid;
  2138. #else
  2139. return am_private;
  2140. #endif
  2141. } else if (ERTS_IS_ATOM_STR("cpu_topology", BIF_ARG_1)) {
  2142. res = erts_get_cpu_topology_term(BIF_P, am_used);
  2143. BIF_TRAP1(erts_format_cpu_topology_trap, BIF_P, res);
  2144. } else if (ERTS_IS_ATOM_STR("update_cpu_info", BIF_ARG_1)) {
  2145. if (erts_update_cpu_info()) {
  2146. ERTS_DECL_AM(changed);
  2147. BIF_RET(AM_changed);
  2148. }
  2149. else {
  2150. ERTS_DECL_AM(unchanged);
  2151. BIF_RET(AM_unchanged);
  2152. }
  2153. #if defined(__GNUC__) && defined(HAVE_SOLARIS_SPARC_PERFMON)
  2154. } else if (ERTS_IS_ATOM_STR("ultrasparc_read_tick1", BIF_ARG_1)) {
  2155. register unsigned high asm("%l0");
  2156. register unsigned low asm("%l1");
  2157. hp = HAlloc(BIF_P, 5);
  2158. asm volatile (".word 0xa3410000;" /* rd %tick, %l1 */
  2159. ".word 0xa1347020" /* srlx %l1, 0x20, %l0 */
  2160. : "=r" (high), "=r" (low));
  2161. res = TUPLE4(hp, make_small(high >> 16),
  2162. make_small(high & 0xFFFF),
  2163. make_small(low >> 16),
  2164. make_small(low & 0xFFFF));
  2165. BIF_RET(res);
  2166. } else if (ERTS_IS_ATOM_STR("ultrasparc_read_tick2", BIF_ARG_1)) {
  2167. register unsigned high asm("%l0");
  2168. register unsigned low asm("%l1");
  2169. asm volatile (".word 0xa3410000;" /* rd %tick, %l1 */
  2170. ".word 0xa1347020" /* srlx %l1, 0x20, %l0 */
  2171. : "=r" (high), "=r" (low));
  2172. hp = HAlloc(BIF_P, 5);
  2173. res = TUPLE4(hp, make_small(high >> 16),
  2174. make_small(high & 0xFFFF),
  2175. make_small(low >> 16),
  2176. make_small(low & 0xFFFF));
  2177. BIF_RET(res);
  2178. } else if (ERTS_IS_ATOM_STR("ultrasparc_read_pic1", BIF_ARG_1)) {
  2179. register unsigned high asm("%l0");
  2180. register unsigned low asm("%l1");
  2181. hp = HAlloc(BIF_P, 5);
  2182. asm volatile (".word 0xa3444000;" /* rd %asr17, %l1 */
  2183. ".word 0xa1347020" /* srlx %l1, 0x20, %l0 */
  2184. : "=r" (high), "=r" (low));
  2185. res = TUPLE4(hp, make_small(high >> 16),
  2186. make_small(high & 0xFFFF),
  2187. make_small(low >> 16),
  2188. make_small(low & 0xFFFF));
  2189. BIF_RET(res);
  2190. } else if (ERTS_IS_ATOM_STR("ultrasparc_read_pic2", BIF_ARG_1)) {
  2191. register unsigned high asm("%l0");
  2192. register unsigned low asm("%l1");
  2193. asm volatile (".word 0xa3444000;" /* rd %asr17, %l1 */
  2194. ".word 0xa1347020" /* srlx %l1, 0x20, %l0 */
  2195. : "=r" (high), "=r" (low));
  2196. hp = HAlloc(BIF_P, 5);
  2197. res = TUPLE4(hp, make_small(high >> 16),
  2198. make_small(high & 0xFFFF),
  2199. make_small(low >> 16),
  2200. make_small(low & 0xFFFF));
  2201. BIF_RET(res);
  2202. #endif
  2203. } else if (BIF_ARG_1 == am_threads) {
  2204. #ifdef USE_THREADS
  2205. return am_true;
  2206. #else
  2207. return am_false;
  2208. #endif
  2209. } else if (BIF_ARG_1 == am_creation) {
  2210. return make_small(erts_this_node->creation);
  2211. } else if (BIF_ARG_1 == am_break_ignored) {
  2212. extern int ignore_break;
  2213. if (ignore_break)
  2214. return am_true;
  2215. else
  2216. return am_false;
  2217. }
  2218. /* Arguments that are unusual follow ... */
  2219. else if (ERTS_IS_ATOM_STR("logical_processors", BIF_ARG_1)) {
  2220. int no;
  2221. erts_get_logical_processors(&no, NULL, NULL);
  2222. if (no > 0)
  2223. BIF_RET(make_small((Uint) no));
  2224. else {
  2225. DECL_AM(unknown);
  2226. BIF_RET(AM_unknown);
  2227. }
  2228. }
  2229. else if (ERTS_IS_ATOM_STR("logical_processors_online", BIF_ARG_1)) {
  2230. int no;
  2231. erts_get_logical_processors(NULL, &no, NULL);
  2232. if (no > 0)
  2233. BIF_RET(make_small((Uint) no));
  2234. else {
  2235. DECL_AM(unknown);
  2236. BIF_RET(AM_unknown);
  2237. }
  2238. }
  2239. else if (ERTS_IS_ATOM_STR("logical_processors_available", BIF_ARG_1)) {
  2240. int no;
  2241. erts_get_logical_processors(NULL, NULL, &no);
  2242. if (no > 0)
  2243. BIF_RET(make_small((Uint) no));
  2244. else {
  2245. DECL_AM(unknown);
  2246. BIF_RET(AM_unknown);
  2247. }
  2248. } else if (ERTS_IS_ATOM_STR("otp_release", BIF_ARG_1)) {
  2249. int n = sizeof(ERLANG_OTP_RELEASE)-1;
  2250. hp = HAlloc(BIF_P, 2*n);
  2251. BIF_RET(buf_to_intlist(&hp, ERLANG_OTP_RELEASE, n, NIL));
  2252. } else if (ERTS_IS_ATOM_STR("driver_version", BIF_ARG_1)) {
  2253. char buf[42];
  2254. int n = erts_snprintf(buf, 42, "%d.%d",
  2255. ERL_DRV_EXTENDED_MAJOR_VERSION,
  2256. ERL_DRV_EXTENDED_MINOR_VERSION);
  2257. hp = HAlloc(BIF_P, 2*n);
  2258. BIF_RET(buf_to_intlist(&hp, buf, n, NIL));
  2259. } else if (ERTS_IS_ATOM_STR("smp_support", BIF_ARG_1)) {
  2260. #ifdef ERTS_SMP
  2261. BIF_RET(am_true);
  2262. #else
  2263. BIF_RET(am_false);
  2264. #endif
  2265. } else if (ERTS_IS_ATOM_STR("scheduler_bind_type", BIF_ARG_1)) {
  2266. BIF_RET(erts_bound_schedulers_term(BIF_P));
  2267. } else if (ERTS_IS_ATOM_STR("scheduler_bindings", BIF_ARG_1)) {
  2268. BIF_RET(erts_get_schedulers_binds(BIF_P));
  2269. } else if (ERTS_IS_ATOM_STR("constant_pool_support", BIF_ARG_1)) {
  2270. BIF_RET(am_true);
  2271. } else if (ERTS_IS_ATOM_STR("schedulers", BIF_ARG_1)
  2272. || ERTS_IS_ATOM_STR("schedulers_total", BIF_ARG_1)) {
  2273. res = make_small(erts_no_schedulers);
  2274. BIF_RET(res);
  2275. } else if (ERTS_IS_ATOM_STR("schedulers_state", BIF_ARG_1)) {
  2276. #ifndef ERTS_SMP
  2277. Eterm *hp = HAlloc(BIF_P, 4);
  2278. res = TUPLE3(hp, make_small(1), make_small(1), make_small(1));
  2279. BIF_RET(res);
  2280. #else
  2281. Uint total, online, active;
  2282. switch (erts_schedulers_state(&total,
  2283. &online,
  2284. &active,
  2285. 1)) {
  2286. case ERTS_SCHDLR_SSPND_DONE: {
  2287. Eterm *hp = HAlloc(BIF_P, 4);
  2288. res = TUPLE3(hp,
  2289. make_small(total),
  2290. make_small(online),
  2291. make_small(active));
  2292. BIF_RET(res);
  2293. }
  2294. case ERTS_SCHDLR_SSPND_YIELD_RESTART:
  2295. ERTS_VBUMP_ALL_REDS(BIF_P);
  2296. BIF_TRAP1(bif_export[BIF_system_info_1],
  2297. BIF_P, BIF_ARG_1);
  2298. default:
  2299. ASSERT(0);
  2300. BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
  2301. }
  2302. #endif
  2303. } else if (ERTS_IS_ATOM_STR("schedulers_online", BIF_ARG_1)) {
  2304. #ifndef ERTS_SMP
  2305. BIF_RET(make_small(1));
  2306. #else
  2307. Uint total, online, active;
  2308. switch (erts_schedulers_state(&total, &online, &active, 1)) {
  2309. case ERTS_SCHDLR_SSPND_DONE:
  2310. BIF_RET(make_small(online));
  2311. case ERTS_SCHDLR_SSPND_YIELD_RESTART:
  2312. ERTS_VBUMP_ALL_REDS(BIF_P);
  2313. BIF_TRAP1(bif_export[BIF_system_info_1],
  2314. BIF_P, BIF_ARG_1);
  2315. default:
  2316. ASSERT(0);
  2317. BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
  2318. }
  2319. #endif
  2320. } else if (ERTS_IS_ATOM_STR("schedulers_active", BIF_ARG_1)) {
  2321. #ifndef ERTS_SMP
  2322. BIF_RET(make_small(1));
  2323. #else
  2324. Uint total, online, active;
  2325. switch (erts_schedulers_state(&total, &online, &active, 1)) {
  2326. case ERTS_SCHDLR_SSPND_DONE:
  2327. BIF_RET(make_small(active));
  2328. case ERTS_SCHDLR_SSPND_YIELD_RESTART:
  2329. ERTS_VBUMP_ALL_REDS(BIF_P);
  2330. BIF_TRAP1(bif_export[BIF_system_info_1],
  2331. BIF_P, BIF_ARG_1);
  2332. default:
  2333. ASSERT(0);
  2334. BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
  2335. }
  2336. #endif
  2337. } else if (ERTS_IS_ATOM_STR("run_queues", BIF_ARG_1)) {
  2338. res = make_small(erts_no_run_queues);
  2339. BIF_RET(res);
  2340. } else if (ERTS_IS_ATOM_STR("c_compiler_used", BIF_ARG_1)) {
  2341. Eterm *hp = NULL;
  2342. Uint sz = 0;
  2343. (void) c_compiler_used(NULL, &sz);
  2344. if (sz)
  2345. hp = HAlloc(BIF_P, sz);
  2346. BIF_RET(c_compiler_used(&hp, NULL));
  2347. } else if (ERTS_IS_ATOM_STR("stop_memory_trace", BIF_ARG_1)) {
  2348. erts_mtrace_stop();
  2349. BIF_RET(am_true);
  2350. } else if (ERTS_IS_ATOM_STR("context_reductions", BIF_ARG_1)) {
  2351. BIF_RET(make_small(CONTEXT_REDS));
  2352. } else if (ERTS_IS_ATOM_STR("kernel_poll", BIF_ARG_1)) {
  2353. #ifdef ERTS_ENABLE_KERNEL_POLL
  2354. BIF_RET(erts_use_kernel_poll ? am_true : am_false);
  2355. #else
  2356. BIF_RET(am_false);
  2357. #endif
  2358. } else if (ERTS_IS_ATOM_STR("lock_checking", BIF_ARG_1)) {
  2359. #ifdef ERTS_ENABLE_LOCK_CHECK
  2360. BIF_RET(am_true);
  2361. #else
  2362. BIF_RET(am_false);
  2363. #endif
  2364. } else if (ERTS_IS_ATOM_STR("lock_counting", BIF_ARG_1)) {
  2365. #ifdef ERTS_ENABLE_LOCK_COUNT
  2366. BIF_RET(am_true);
  2367. #else
  2368. BIF_RET(am_false);
  2369. #endif
  2370. } else if (ERTS_IS_ATOM_STR("debug_compiled", BIF_ARG_1)) {
  2371. #ifdef DEBUG
  2372. BIF_RET(am_true);
  2373. #else
  2374. BIF_RET(am_false);
  2375. #endif
  2376. } else if (ERTS_IS_ATOM_STR("check_io", BIF_ARG_1)) {
  2377. BIF_RET(erts_check_io_info(BIF_P));
  2378. } else if (ERTS_IS_ATOM_STR("multi_scheduling_blockers", BIF_ARG_1)) {
  2379. #ifndef ERTS_SMP
  2380. BIF_RET(NIL);
  2381. #else
  2382. if (erts_no_schedulers == 1)
  2383. BIF_RET(NIL);
  2384. else
  2385. BIF_RET(erts_multi_scheduling_blockers(BIF_P));
  2386. #endif
  2387. } else if (ERTS_IS_ATOM_STR("modified_timing_level", BIF_ARG_1)) {
  2388. BIF_RET(ERTS_USE_MODIFIED_TIMING()
  2389. ? make_small(erts_modified_timing_level)
  2390. : am_undefined);
  2391. } else if (ERTS_IS_ATOM_STR("port_tasks", BIF_ARG_1)) {
  2392. BIF_RET(am_true);
  2393. } else if (ERTS_IS_ATOM_STR("io_thread", BIF_ARG_1)) {
  2394. BIF_RET(am_false);
  2395. } else if (ERTS_IS_ATOM_STR("scheduling_statistics", BIF_ARG_1)) {
  2396. BIF_RET(erts_sched_stat_term(BIF_P, 0));
  2397. } else if (ERTS_IS_ATOM_STR("total_scheduling_statistics", BIF_ARG_1)) {
  2398. BIF_RET(erts_sched_stat_term(BIF_P, 1));
  2399. } else if (ERTS_IS_ATOM_STR("taints", BIF_ARG_1)) {
  2400. BIF_RET(erts_nif_taints(BIF_P));
  2401. } else if (ERTS_IS_ATOM_STR("reader_groups_map", BIF_ARG_1)) {
  2402. BIF_RET(erts_get_reader_groups_map(BIF_P));
  2403. } else if (ERTS_IS_ATOM_STR("dist_buf_busy_limit", BIF_ARG_1)) {
  2404. Uint hsz = 0;
  2405. (void) erts_bld_uint(NULL, &hsz, erts_dist_buf_busy_limit);
  2406. hp = hsz ? HAlloc(BIF_P, hsz) : NULL;
  2407. res = erts_bld_uint(&hp, NULL, erts_dist_buf_busy_limit);
  2408. BIF_RET(res);
  2409. } else if (ERTS_IS_ATOM_STR("print_ethread_info", BIF_ARG_1)) {
  2410. #if defined(ETHR_NATIVE_ATOMIC32_IMPL) \
  2411. || defined(ETHR_NATIVE_ATOMIC64_IMPL) \
  2412. || defined(ETHR_NATIVE_DW_ATOMIC_IMPL)
  2413. int i;
  2414. char **str;
  2415. #endif
  2416. #ifdef ETHR_NATIVE_ATOMIC32_IMPL
  2417. erts_printf("32-bit native atomics: %s\n",
  2418. ETHR_NATIVE_ATOMIC32_IMPL);
  2419. str = ethr_native_atomic32_ops();
  2420. for (i = 0; str[i]; i++)
  2421. erts_printf("ethr_native_atomic32_%s()\n", str[i]);
  2422. #endif
  2423. #ifdef ETHR_NATIVE_ATOMIC64_IMPL
  2424. erts_printf("64-bit native atomics: %s\n",
  2425. ETHR_NATIVE_ATOMIC64_IMPL);
  2426. str = ethr_native_atomic64_ops();
  2427. for (i = 0; str[i]; i++)
  2428. erts_printf("ethr_native_atomic64_%s()\n", str[i]);
  2429. #endif
  2430. #ifdef ETHR_NATIVE_DW_ATOMIC_IMPL
  2431. if (ethr_have_native_dw_atomic()) {
  2432. erts_printf("Double word native atomics: %s\n",
  2433. ETHR_NATIVE_DW_ATOMIC_IMPL);
  2434. str = ethr_native_dw_atomic_ops();
  2435. for (i = 0; str[i]; i++)
  2436. erts_printf("ethr_native_dw_atomic_%s()\n", str[i]);
  2437. str = ethr_native_su_dw_atomic_ops();
  2438. for (i = 0; str[i]; i++)
  2439. erts_printf("ethr_native_su_dw_atomic_%s()\n", str[i]);
  2440. }
  2441. #endif
  2442. #ifdef ETHR_NATIVE_SPINLOCK_IMPL
  2443. erts_printf("Native spin-locks: %s\n", ETHR_NATIVE_SPINLOCK_IMPL);
  2444. #endif
  2445. #ifdef ETHR_NATIVE_RWSPINLOCK_IMPL
  2446. erts_printf("Native rwspin-locks: %s\n", ETHR_NATIVE_RWSPINLOCK_IMPL);
  2447. #endif
  2448. #ifdef ETHR_X86_RUNTIME_CONF_HAVE_SSE2__
  2449. erts_printf("SSE2 support: %s\n", (ETHR_X86_RUNTIME_CONF_HAVE_SSE2__
  2450. ? "yes" : "no"));
  2451. #endif
  2452. #ifdef ETHR_X86_OUT_OF_ORDER
  2453. erts_printf("x86"
  2454. #ifdef ARCH_64
  2455. "_64"
  2456. #endif
  2457. " out of order\n");
  2458. #endif
  2459. #ifdef ETHR_SPARC_TSO
  2460. erts_printf("Sparc TSO\n");
  2461. #endif
  2462. #ifdef ETHR_SPARC_PSO
  2463. erts_printf("Sparc PSO\n");
  2464. #endif
  2465. #ifdef ETHR_SPARC_RMO
  2466. erts_printf("Sparc RMO\n");
  2467. #endif
  2468. #if defined(ETHR_PPC_HAVE_LWSYNC)
  2469. erts_printf("Have lwsync instruction: yes\n");
  2470. #elif defined(ETHR_PPC_HAVE_NO_LWSYNC)
  2471. erts_printf("Have lwsync instruction: no\n");
  2472. #elif defined(ETHR_PPC_RUNTIME_CONF_HAVE_LWSYNC__)
  2473. erts_printf("Have lwsync instruction: %s (runtime test)\n",
  2474. ETHR_PPC_RUNTIME_CONF_HAVE_LWSYNC__ ? "yes" : "no");
  2475. #endif
  2476. BIF_RET(am_true);
  2477. }
  2478. #ifdef ERTS_SMP
  2479. else if (ERTS_IS_ATOM_STR("thread_progress", BIF_ARG_1)) {
  2480. erts_thr_progress_dbg_print_state();
  2481. BIF_RET(am_true);
  2482. }
  2483. #endif
  2484. BIF_ERROR(BIF_P, BADARG);
  2485. }
  2486. BIF_RETTYPE
  2487. port_info_1(BIF_ALIST_1)
  2488. {
  2489. Process* p = BIF_P;
  2490. Eterm pid = BIF_ARG_1;
  2491. static Eterm keys[] = {
  2492. am_name,
  2493. am_links,
  2494. am_id,
  2495. am_connected,
  2496. am_input,
  2497. am_output
  2498. };
  2499. Eterm items[ASIZE(keys)];
  2500. Eterm result = NIL;
  2501. Eterm reg_name;
  2502. Eterm* hp;
  2503. Uint need;
  2504. int i;
  2505. /*
  2506. * Collect all information about the port.
  2507. */
  2508. for (i = 0; i < ASIZE(keys); i++) {
  2509. Eterm item;
  2510. item = port_info(p, pid, keys[i]);
  2511. if (is_non_value(item)) {
  2512. return THE_NON_VALUE;
  2513. }
  2514. if (item == am_undefined) {
  2515. return am_undefined;
  2516. }
  2517. items[i] = item;
  2518. }
  2519. reg_name = port_info(p, pid, am_registered_name);
  2520. /*
  2521. * Build the resulting list.
  2522. */
  2523. need = 2*ASIZE(keys);
  2524. if (is_tuple(reg_name)) {
  2525. need += 2;
  2526. }
  2527. hp = HAlloc(p, need);
  2528. for (i = ASIZE(keys) - 1; i >= 0; i--) {
  2529. result = CONS(hp, items[i], result);
  2530. hp += 2;
  2531. }
  2532. if (is_tuple(reg_name)) {
  2533. result = CONS(hp, reg_name, result);
  2534. }
  2535. return result;
  2536. }
  2537. /**********************************************************************/
  2538. /* Return information on ports */
  2539. /* Info:
  2540. ** id Port index
  2541. ** connected (Pid)
  2542. ** links List of pids
  2543. ** name String
  2544. ** input Number of bytes input from port program
  2545. ** output Number of bytes output to the port program
  2546. */
  2547. BIF_RETTYPE port_info_2(BIF_ALIST_2)
  2548. {
  2549. return port_info(BIF_P, BIF_ARG_1, BIF_ARG_2);
  2550. }
  2551. static BIF_RETTYPE port_info(Process* p, Eterm portid, Eterm item)
  2552. {
  2553. BIF_RETTYPE ret;
  2554. Port *prt;
  2555. Eterm res;
  2556. Eterm* hp;
  2557. int count;
  2558. if (is_internal_port(portid))
  2559. prt = erts_id2port(portid, p, ERTS_PROC_LOCK_MAIN);
  2560. else if (is_atom(portid))
  2561. erts_whereis_name(p, ERTS_PROC_LOCK_MAIN,
  2562. portid, NULL, 0, 0, &prt);
  2563. else if (is_external_port(portid)
  2564. && external_port_dist_entry(portid) == erts_this_dist_entry)
  2565. BIF_RET(am_undefined);
  2566. else {
  2567. BIF_ERROR(p, BADARG);
  2568. }
  2569. if (!prt) {
  2570. BIF_RET(am_undefined);
  2571. }
  2572. if (item == am_id) {
  2573. hp = HAlloc(p, 3);
  2574. res = make_small(internal_port_number(portid));
  2575. }
  2576. else if (item == am_links) {
  2577. MonitorInfoCollection mic;
  2578. int i;
  2579. Eterm item;
  2580. INIT_MONITOR_INFOS(mic);
  2581. erts_doforall_links(prt->nlinks, &collect_one_link, &mic);
  2582. hp = HAlloc(p, 3 + mic.sz);
  2583. res = NIL;
  2584. for (i = 0; i < mic.mi_i; i++) {
  2585. item = STORE_NC(&hp, &MSO(p), mic.mi[i].entity);
  2586. res = CONS(hp, item, res);
  2587. hp += 2;
  2588. }
  2589. DESTROY_MONITOR_INFOS(mic);
  2590. }
  2591. else if (item == am_monitors) {
  2592. MonitorInfoCollection mic;
  2593. int i;
  2594. Eterm item;
  2595. INIT_MONITOR_INFOS(mic);
  2596. erts_doforall_monitors(prt->monitors, &collect_one_origin_monitor, &mic);
  2597. hp = HAlloc(p, 3 + mic.sz);
  2598. res = NIL;
  2599. for (i = 0; i < mic.mi_i; i++) {
  2600. Eterm t;
  2601. item = STORE_NC(&hp, &MSO(p), mic.mi[i].entity);
  2602. t = TUPLE2(hp, am_process, item);
  2603. hp += 3;
  2604. res = CONS(hp, t, res);
  2605. hp += 2;
  2606. }
  2607. DESTROY_MONITOR_INFOS(mic);
  2608. }
  2609. else if (item == am_name) {
  2610. count = sys_strlen(prt->name);
  2611. hp = HAlloc(p, 3 + 2*count);
  2612. res = buf_to_intlist(&hp, prt->name, count, NIL);
  2613. }
  2614. else if (item == am_connected) {
  2615. hp = HAlloc(p, 3);
  2616. res = prt->connected; /* internal pid */
  2617. }
  2618. else if (item == am_input) {
  2619. Uint hsz = 3;
  2620. Uint n = prt->bytes_in;
  2621. (void) erts_bld_uint(NULL, &hsz, n);
  2622. hp = HAlloc(p, hsz);
  2623. res = erts_bld_uint(&hp, NULL, n);
  2624. }
  2625. else if (item == am_output) {
  2626. Uint hsz = 3;
  2627. Uint n = prt->bytes_out;
  2628. (void) erts_bld_uint(NULL, &hsz, n);
  2629. hp = HAlloc(p, hsz);
  2630. res = erts_bld_uint(&hp, NULL, n);
  2631. }
  2632. else if (item == am_registered_name) {
  2633. RegProc *reg;
  2634. reg = prt->reg;
  2635. if (reg == NULL) {
  2636. ERTS_BIF_PREP_RET(ret, NIL);
  2637. goto done;
  2638. } else {
  2639. hp = HAlloc(p, 3);
  2640. res = reg->name;
  2641. }
  2642. }
  2643. else if (item == am_memory) {
  2644. /* All memory consumed in bytes (the Port struct should not be
  2645. included though).
  2646. */
  2647. Uint hsz = 3;
  2648. Uint size = 0;
  2649. ErlHeapFragment* bp;
  2650. hp = HAlloc(p, 3);
  2651. erts_doforall_links(prt->nlinks, &one_link_size, &size);
  2652. for (bp = prt->bp; bp; bp = bp->next)
  2653. size += sizeof(ErlHeapFragment) + (bp->alloc_size - 1)*sizeof(Eterm);
  2654. if (prt->linebuf)
  2655. size += sizeof(LineBuf) + prt->linebuf->ovsiz;
  2656. /* ... */
  2657. /* All memory allocated by the driver should be included, but it is
  2658. hard to retrieve... */
  2659. (void) erts_bld_uint(NULL, &hsz, size);
  2660. hp = HAlloc(p, hsz);
  2661. res = erts_bld_uint(&hp, NULL, size);
  2662. }
  2663. else if (item == am_queue_size) {
  2664. Uint ioq_size = erts_port_ioq_size(prt);
  2665. Uint hsz = 3;
  2666. (void) erts_bld_uint(NULL, &hsz, ioq_size);
  2667. hp = HAlloc(p, hsz);
  2668. res = erts_bld_uint(&hp, NULL, ioq_size);
  2669. }
  2670. else if (ERTS_IS_ATOM_STR("locking", item)) {
  2671. hp = HAlloc(p, 3);
  2672. #ifndef ERTS_SMP
  2673. res = am_false;
  2674. #else
  2675. if (prt->status & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) {
  2676. DECL_AM(port_level);
  2677. ASSERT(prt->drv_ptr->flags
  2678. & ERL_DRV_FLAG_USE_PORT_LOCKING);
  2679. res = AM_port_level;
  2680. }
  2681. else {
  2682. DECL_AM(driver_level);
  2683. ASSERT(!(prt->drv_ptr->flags
  2684. & ERL_DRV_FLAG_USE_PORT_LOCKING));
  2685. res = AM_driver_level;
  2686. }
  2687. #endif
  2688. }
  2689. else {
  2690. ERTS_BIF_PREP_ERROR(ret, p, BADARG);
  2691. goto done;
  2692. }
  2693. ERTS_BIF_PREP_RET(ret, TUPLE2(hp, item, res));
  2694. done:
  2695. erts_smp_port_unlock(prt);
  2696. return ret;
  2697. }
  2698. BIF_RETTYPE
  2699. fun_info_2(BIF_ALIST_2)
  2700. {
  2701. Process* p = BIF_P;
  2702. Eterm fun = BIF_ARG_1;
  2703. Eterm what = BIF_ARG_2;
  2704. Eterm* hp;
  2705. Eterm val;
  2706. if (is_fun(fun)) {
  2707. ErlFunThing* funp = (ErlFunThing *) fun_val(fun);
  2708. switch (what) {
  2709. case am_type:
  2710. hp = HAlloc(p, 3);
  2711. val = am_local;
  2712. break;
  2713. case am_pid:
  2714. hp = HAlloc(p, 3);
  2715. val = funp->creator;
  2716. break;
  2717. case am_module:
  2718. hp = HAlloc(p, 3);
  2719. val = funp->fe->module;
  2720. break;
  2721. case am_new_index:
  2722. hp = HAlloc(p, 3);
  2723. val = make_small(funp->fe->index);
  2724. break;
  2725. case am_new_uniq:
  2726. val = new_binary(p, funp->fe->uniq, 16);
  2727. hp = HAlloc(p, 3);
  2728. break;
  2729. case am_index:
  2730. hp = HAlloc(p, 3);
  2731. val = make_small(funp->fe->old_index);
  2732. break;
  2733. case am_uniq:
  2734. hp = HAlloc(p, 3);
  2735. val = make_small(funp->fe->old_uniq);
  2736. break;
  2737. case am_env:
  2738. {
  2739. Uint num_free = funp->num_free;
  2740. int i;
  2741. hp = HAlloc(p, 3 + 2*num_free);
  2742. val = NIL;
  2743. for (i = num_free-1; i >= 0; i--) {
  2744. val = CONS(hp, funp->env[i], val);
  2745. hp += 2;
  2746. }
  2747. }
  2748. break;
  2749. case am_refc:
  2750. val = erts_make_integer(erts_smp_atomic_read_nob(&funp->fe->refc), p);
  2751. hp = HAlloc(p, 3);
  2752. break;
  2753. case am_arity:
  2754. hp = HAlloc(p, 3);
  2755. val = make_small(funp->arity);
  2756. break;
  2757. case am_name:
  2758. hp = HAlloc(p, 3);
  2759. val = funp->fe->address[-2];
  2760. break;
  2761. default:
  2762. goto error;
  2763. }
  2764. } else if (is_export(fun)) {
  2765. Export* exp = (Export *) ((UWord) (export_val(fun))[1]);
  2766. switch (what) {
  2767. case am_type:
  2768. hp = HAlloc(p, 3);
  2769. val = am_external;
  2770. break;
  2771. case am_pid:
  2772. hp = HAlloc(p, 3);
  2773. val = am_undefined;
  2774. break;
  2775. case am_module:
  2776. hp = HAlloc(p, 3);
  2777. val = exp->code[0];
  2778. break;
  2779. case am_new_index:
  2780. hp = HAlloc(p, 3);
  2781. val = am_undefined;
  2782. break;
  2783. case am_new_uniq:
  2784. hp = HAlloc(p, 3);
  2785. val = am_undefined;
  2786. break;
  2787. case am_index:
  2788. hp = HAlloc(p, 3);
  2789. val = am_undefined;
  2790. break;
  2791. case am_uniq:
  2792. hp = HAlloc(p, 3);
  2793. val = am_undefined;
  2794. break;
  2795. case am_env:
  2796. hp = HAlloc(p, 3);
  2797. val = NIL;
  2798. break;
  2799. case am_refc:
  2800. hp = HAlloc(p, 3);
  2801. val = am_undefined;
  2802. break;
  2803. case am_arity:
  2804. hp = HAlloc(p, 3);
  2805. val = make_small(exp->code[2]);
  2806. break;
  2807. case am_name:
  2808. hp = HAlloc(p, 3);
  2809. val = exp->code[1];
  2810. break;
  2811. default:
  2812. goto error;
  2813. }
  2814. } else {
  2815. error:
  2816. BIF_ERROR(p, BADARG);
  2817. }
  2818. return TUPLE2(hp, what, val);
  2819. }
  2820. BIF_RETTYPE is_process_alive_1(BIF_ALIST_1)
  2821. {
  2822. if(is_internal_pid(BIF_ARG_1)) {
  2823. Process *rp;
  2824. if (BIF_ARG_1 == BIF_P->id)
  2825. BIF_RET(am_true);
  2826. if(internal_pid_index(BIF_ARG_1) >= erts_max_processes)
  2827. BIF_ERROR(BIF_P, BADARG);
  2828. rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
  2829. BIF_ARG_1, ERTS_PROC_LOCK_STATUS);
  2830. if (!rp) {
  2831. BIF_RET(am_false);
  2832. }
  2833. else {
  2834. int have_pending_exit = ERTS_PROC_PENDING_EXIT(rp);
  2835. erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
  2836. if (have_pending_exit)
  2837. ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_false);
  2838. else
  2839. BIF_RET(am_true);
  2840. }
  2841. }
  2842. else if(is_external_pid(BIF_ARG_1)) {
  2843. if(external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry)
  2844. BIF_RET(am_false); /* A pid from an old incarnation of this node */
  2845. else
  2846. BIF_ERROR(BIF_P, BADARG);
  2847. }
  2848. else {
  2849. BIF_ERROR(BIF_P, BADARG);
  2850. }
  2851. }
  2852. BIF_RETTYPE process_display_2(BIF_ALIST_2)
  2853. {
  2854. Process *rp;
  2855. if (BIF_ARG_2 != am_backtrace)
  2856. BIF_ERROR(BIF_P, BADARG);
  2857. rp = erts_pid2proc_nropt(BIF_P, ERTS_PROC_LOCK_MAIN,
  2858. BIF_ARG_1, ERTS_PROC_LOCKS_ALL);
  2859. if(!rp) {
  2860. BIF_ERROR(BIF_P, BADARG);
  2861. }
  2862. if (rp == ERTS_PROC_LOCK_BUSY)
  2863. ERTS_BIF_YIELD2(bif_export[BIF_process_display_2], BIF_P,
  2864. BIF_ARG_1, BIF_ARG_2);
  2865. if (rp != BIF_P && ERTS_PROC_PENDING_EXIT(rp)) {
  2866. Eterm args[2] = {BIF_ARG_1, BIF_ARG_2};
  2867. erts_smp_proc_unlock(rp, ERTS_PROC_LOCKS_ALL);
  2868. ERTS_BIF_AWAIT_X_APPLY_TRAP(BIF_P,
  2869. BIF_ARG_1,
  2870. am_erlang,
  2871. am_process_display,
  2872. args,
  2873. 2);
  2874. }
  2875. erts_stack_dump(ERTS_PRINT_STDERR, NULL, rp);
  2876. #ifdef ERTS_SMP
  2877. erts_smp_proc_unlock(rp, (BIF_P == rp
  2878. ? ERTS_PROC_LOCKS_ALL_MINOR
  2879. : ERTS_PROC_LOCKS_ALL));
  2880. #endif
  2881. BIF_RET(am_true);
  2882. }
  2883. /* this is a general call which return some possibly useful information */
  2884. BIF_RETTYPE statistics_1(BIF_ALIST_1)
  2885. {
  2886. Eterm res;
  2887. Eterm* hp;
  2888. if (BIF_ARG_1 == am_scheduler_wall_time) {
  2889. res = erts_sched_wall_time_request(BIF_P, 0, 0);
  2890. if (is_non_value(res))
  2891. BIF_RET(am_undefined);
  2892. BIF_TRAP1(gather_sched_wall_time_res_trap, BIF_P, res);
  2893. } else if (BIF_ARG_1 == am_context_switches) {
  2894. Eterm cs = erts_make_integer(erts_get_total_context_switches(), BIF_P);
  2895. hp = HAlloc(BIF_P, 3);
  2896. res = TUPLE2(hp, cs, SMALL_ZERO);
  2897. BIF_RET(res);
  2898. } else if (BIF_ARG_1 == am_garbage_collection) {
  2899. Uint hsz = 4;
  2900. ErtsGCInfo gc_info;
  2901. Eterm gcs;
  2902. Eterm recl;
  2903. erts_gc_info(&gc_info);
  2904. (void) erts_bld_uint(NULL, &hsz, gc_info.garbage_collections);
  2905. (void) erts_bld_uint(NULL, &hsz, gc_info.reclaimed);
  2906. hp = HAlloc(BIF_P, hsz);
  2907. gcs = erts_bld_uint(&hp, NULL, gc_info.garbage_collections);
  2908. recl = erts_bld_uint(&hp, NULL, gc_info.reclaimed);
  2909. res = TUPLE3(hp, gcs, recl, SMALL_ZERO);
  2910. BIF_RET(res);
  2911. } else if (BIF_ARG_1 == am_reductions) {
  2912. Uint reds;
  2913. Uint diff;
  2914. Uint hsz = 3;
  2915. Eterm b1, b2;
  2916. erts_get_total_reductions(&reds, &diff);
  2917. (void) erts_bld_uint(NULL, &hsz, reds);
  2918. (void) erts_bld_uint(NULL, &hsz, diff);
  2919. hp = HAlloc(BIF_P, hsz);
  2920. b1 = erts_bld_uint(&hp, NULL, reds);
  2921. b2 = erts_bld_uint(&hp, NULL, diff);
  2922. res = TUPLE2(hp, b1, b2);
  2923. BIF_RET(res);
  2924. } else if (BIF_ARG_1 == am_exact_reductions) {
  2925. Uint reds;
  2926. Uint diff;
  2927. Uint hsz = 3;
  2928. Eterm b1, b2;
  2929. erts_get_exact_total_reductions(BIF_P, &reds, &diff);
  2930. (void) erts_bld_uint(NULL, &hsz, reds);
  2931. (void) erts_bld_uint(NULL, &hsz, diff);
  2932. hp = HAlloc(BIF_P, hsz);
  2933. b1 = erts_bld_uint(&hp, NULL, reds);
  2934. b2 = erts_bld_uint(&hp, NULL, diff);
  2935. res = TUPLE2(hp, b1, b2);
  2936. BIF_RET(res);
  2937. } else if (BIF_ARG_1 == am_runtime) {
  2938. UWord u1, u2, dummy;
  2939. Eterm b1, b2;
  2940. elapsed_time_both(&u1,&dummy,&u2,&dummy);
  2941. b1 = erts_make_integer(u1,BIF_P);
  2942. b2 = erts_make_integer(u2,BIF_P);
  2943. hp = HAlloc(BIF_P,3);
  2944. res = TUPLE2(hp, b1, b2);
  2945. BIF_RET(res);
  2946. } else if (BIF_ARG_1 == am_run_queue) {
  2947. res = erts_run_queues_len(NULL);
  2948. BIF_RET(make_small(res));
  2949. } else if (BIF_ARG_1 == am_wall_clock) {
  2950. UWord w1, w2;
  2951. Eterm b1, b2;
  2952. wall_clock_elapsed_time_both(&w1, &w2);
  2953. b1 = erts_make_integer((Uint) w1,BIF_P);
  2954. b2 = erts_make_integer((Uint) w2,BIF_P);
  2955. hp = HAlloc(BIF_P,3);
  2956. res = TUPLE2(hp, b1, b2);
  2957. BIF_RET(res);
  2958. } else if (BIF_ARG_1 == am_io) {
  2959. Eterm r1, r2;
  2960. Eterm in, out;
  2961. Uint hsz = 9;
  2962. Uint bytes_in = (Uint) erts_smp_atomic_read_nob(&erts_bytes_in);
  2963. Uint bytes_out = (Uint) erts_smp_atomic_read_nob(&erts_bytes_out);
  2964. (void) erts_bld_uint(NULL, &hsz, bytes_in);
  2965. (void) erts_bld_uint(NULL, &hsz, bytes_out);
  2966. hp = HAlloc(BIF_P, hsz);
  2967. in = erts_bld_uint(&hp, NULL, bytes_in);
  2968. out = erts_bld_uint(&hp, NULL, bytes_out);
  2969. r1 = TUPLE2(hp, am_input, in);
  2970. hp += 3;
  2971. r2 = TUPLE2(hp, am_output, out);
  2972. hp += 3;
  2973. BIF_RET(TUPLE2(hp, r1, r2));
  2974. }
  2975. else if (ERTS_IS_ATOM_STR("run_queues", BIF_ARG_1)) {
  2976. Eterm res, *hp, **hpp;
  2977. Uint sz, *szp;
  2978. int no_qs = erts_no_run_queues;
  2979. Uint *qszs = erts_alloc(ERTS_ALC_T_TMP,sizeof(Uint)*no_qs*2);
  2980. (void) erts_run_queues_len(qszs);
  2981. sz = 0;
  2982. szp = &sz;
  2983. hpp = NULL;
  2984. while (1) {
  2985. int i;
  2986. for (i = 0; i < no_qs; i++)
  2987. qszs[no_qs+i] = erts_bld_uint(hpp, szp, qszs[i]);
  2988. res = erts_bld_tuplev(hpp, szp, no_qs, &qszs[no_qs]);
  2989. if (hpp) {
  2990. erts_free(ERTS_ALC_T_TMP, qszs);
  2991. BIF_RET(res);
  2992. }
  2993. hp = HAlloc(BIF_P, sz);
  2994. szp = NULL;
  2995. hpp = &hp;
  2996. }
  2997. }
  2998. BIF_ERROR(BIF_P, BADARG);
  2999. }
  3000. BIF_RETTYPE error_logger_warning_map_0(BIF_ALIST_0)
  3001. {
  3002. BIF_RET(erts_error_logger_warnings);
  3003. }
  3004. static erts_smp_atomic_t available_internal_state;
  3005. BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
  3006. {
  3007. /*
  3008. * NOTE: Only supposed to be used for testing, and debugging.
  3009. */
  3010. if (!erts_smp_atomic_read_nob(&available_internal_state)) {
  3011. BIF_ERROR(BIF_P, EXC_UNDEF);
  3012. }
  3013. if (is_atom(BIF_ARG_1)) {
  3014. if (ERTS_IS_ATOM_STR("reds_left", BIF_ARG_1)) {
  3015. /* Used by (emulator) */
  3016. BIF_RET(make_small((Uint) ERTS_BIF_REDS_LEFT(BIF_P)));
  3017. }
  3018. else if (ERTS_IS_ATOM_STR("node_and_dist_references", BIF_ARG_1)) {
  3019. /* Used by node_container_SUITE (emulator) */
  3020. Eterm res = erts_get_node_and_dist_references(BIF_P);
  3021. BIF_RET(res);
  3022. }
  3023. else if (ERTS_IS_ATOM_STR("monitoring_nodes", BIF_ARG_1)) {
  3024. BIF_RET(erts_processes_monitoring_nodes(BIF_P));
  3025. }
  3026. else if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1)
  3027. || ERTS_IS_ATOM_STR("next_port", BIF_ARG_1)) {
  3028. /* Used by node_container_SUITE (emulator) */
  3029. Eterm res;
  3030. if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1))
  3031. res = erts_test_next_pid(0, 0);
  3032. else {
  3033. res = erts_test_next_port(0, 0);
  3034. }
  3035. if (res < 0)
  3036. BIF_RET(am_false);
  3037. BIF_RET(erts_make_integer(res, BIF_P));
  3038. }
  3039. else if (ERTS_IS_ATOM_STR("DbTable_words", BIF_ARG_1)) {
  3040. /* Used by ets_SUITE (stdlib) */
  3041. size_t words = (sizeof(DbTable) + sizeof(Uint) - 1)/sizeof(Uint);
  3042. BIF_RET(make_small((Uint) words));
  3043. }
  3044. else if (ERTS_IS_ATOM_STR("check_io_debug", BIF_ARG_1)) {
  3045. /* Used by (emulator) */
  3046. int res;
  3047. #ifdef HAVE_ERTS_CHECK_IO_DEBUG
  3048. erts_smp_proc_unlock(BIF_P,ERTS_PROC_LOCK_MAIN);
  3049. res = erts_check_io_debug();
  3050. erts_smp_proc_lock(BIF_P,ERTS_PROC_LOCK_MAIN);
  3051. #else
  3052. res = 0;
  3053. #endif
  3054. ASSERT(res >= 0);
  3055. BIF_RET(erts_make_integer((Uint) res, BIF_P));
  3056. }
  3057. else if (ERTS_IS_ATOM_STR("process_info_args", BIF_ARG_1)) {
  3058. /* Used by process_SUITE (emulator) */
  3059. int i;
  3060. Eterm res = NIL;
  3061. Uint *hp = HAlloc(BIF_P, 2*ERTS_PI_ARGS);
  3062. for (i = ERTS_PI_ARGS-1; i >= 0; i--) {
  3063. res = CONS(hp, pi_args[i], res);
  3064. hp += 2;
  3065. }
  3066. BIF_RET(res);
  3067. }
  3068. else if (ERTS_IS_ATOM_STR("processes", BIF_ARG_1)) {
  3069. /* Used by process_SUITE (emulator) */
  3070. BIF_RET(erts_debug_processes(BIF_P));
  3071. }
  3072. else if (ERTS_IS_ATOM_STR("processes_bif_info", BIF_ARG_1)) {
  3073. /* Used by process_SUITE (emulator) */
  3074. BIF_RET(erts_debug_processes_bif_info(BIF_P));
  3075. }
  3076. else if (ERTS_IS_ATOM_STR("max_atom_out_cache_index", BIF_ARG_1)) {
  3077. /* Used by distribution_SUITE (emulator) */
  3078. BIF_RET(make_small((Uint) erts_debug_max_atom_out_cache_index()));
  3079. }
  3080. else if (ERTS_IS_ATOM_STR("nbalance", BIF_ARG_1)) {
  3081. Uint n;
  3082. erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3083. n = erts_debug_nbalance();
  3084. erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3085. BIF_RET(erts_make_integer(n, BIF_P));
  3086. }
  3087. else if (ERTS_IS_ATOM_STR("available_internal_state", BIF_ARG_1)) {
  3088. BIF_RET(am_true);
  3089. }
  3090. else if (ERTS_IS_ATOM_STR("force_heap_frags", BIF_ARG_1)) {
  3091. #ifdef FORCE_HEAP_FRAGS
  3092. BIF_RET(am_true);
  3093. #else
  3094. BIF_RET(am_false);
  3095. #endif
  3096. }
  3097. else if (ERTS_IS_ATOM_STR("memory", BIF_ARG_1)) {
  3098. Eterm res;
  3099. erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3100. erts_smp_thr_progress_block();
  3101. erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3102. res = erts_memory(NULL, NULL, BIF_P, THE_NON_VALUE);
  3103. erts_smp_thr_progress_unblock();
  3104. BIF_RET(res);
  3105. }
  3106. }
  3107. else if (is_tuple(BIF_ARG_1)) {
  3108. Eterm* tp = tuple_val(BIF_ARG_1);
  3109. switch (arityval(tp[0])) {
  3110. case 2: {
  3111. if (ERTS_IS_ATOM_STR("process_status", tp[1])) {
  3112. /* Used by timer process_SUITE, timer_bif_SUITE, and
  3113. node_container_SUITE (emulator) */
  3114. if (is_internal_pid(tp[2])) {
  3115. BIF_RET(erts_process_status(BIF_P,
  3116. ERTS_PROC_LOCK_MAIN,
  3117. NULL,
  3118. tp[2]));
  3119. }
  3120. }
  3121. else if (ERTS_IS_ATOM_STR("link_list", tp[1])) {
  3122. /* Used by erl_link_SUITE (emulator) */
  3123. if(is_internal_pid(tp[2])) {
  3124. Eterm res;
  3125. Process *p;
  3126. p = erts_pid2proc(BIF_P,
  3127. ERTS_PROC_LOCK_MAIN,
  3128. tp[2],
  3129. ERTS_PROC_LOCK_LINK);
  3130. if (!p) {
  3131. ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P);
  3132. BIF_RET(am_undefined);
  3133. }
  3134. res = make_link_list(BIF_P, p->nlinks, NIL);
  3135. erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
  3136. BIF_RET(res);
  3137. }
  3138. else if(is_internal_port(tp[2])) {
  3139. Eterm res;
  3140. Port *p = erts_id2port(tp[2], BIF_P, ERTS_PROC_LOCK_MAIN);
  3141. if(!p)
  3142. BIF_RET(am_undefined);
  3143. res = make_link_list(BIF_P, p->nlinks, NIL);
  3144. erts_smp_port_unlock(p);
  3145. BIF_RET(res);
  3146. }
  3147. else if(is_node_name_atom(tp[2])) {
  3148. DistEntry *dep = erts_find_dist_entry(tp[2]);
  3149. if(dep) {
  3150. Eterm subres;
  3151. erts_smp_de_links_lock(dep);
  3152. subres = make_link_list(BIF_P, dep->nlinks, NIL);
  3153. subres = make_link_list(BIF_P, dep->node_links, subres);
  3154. erts_smp_de_links_unlock(dep);
  3155. erts_deref_dist_entry(dep);
  3156. BIF_RET(subres);
  3157. } else {
  3158. BIF_RET(am_undefined);
  3159. }
  3160. }
  3161. }
  3162. else if (ERTS_IS_ATOM_STR("monitor_list", tp[1])) {
  3163. /* Used by erl_link_SUITE (emulator) */
  3164. if(is_internal_pid(tp[2])) {
  3165. Process *p;
  3166. Eterm res;
  3167. p = erts_pid2proc(BIF_P,
  3168. ERTS_PROC_LOCK_MAIN,
  3169. tp[2],
  3170. ERTS_PROC_LOCK_LINK);
  3171. if (!p) {
  3172. ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P);
  3173. BIF_RET(am_undefined);
  3174. }
  3175. res = make_monitor_list(BIF_P, p->monitors);
  3176. erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
  3177. BIF_RET(res);
  3178. } else if(is_node_name_atom(tp[2])) {
  3179. DistEntry *dep = erts_find_dist_entry(tp[2]);
  3180. if(dep) {
  3181. Eterm ml;
  3182. erts_smp_de_links_lock(dep);
  3183. ml = make_monitor_list(BIF_P, dep->monitors);
  3184. erts_smp_de_links_unlock(dep);
  3185. erts_deref_dist_entry(dep);
  3186. BIF_RET(ml);
  3187. } else {
  3188. BIF_RET(am_undefined);
  3189. }
  3190. }
  3191. }
  3192. else if (ERTS_IS_ATOM_STR("channel_number", tp[1])) {
  3193. Eterm res;
  3194. DistEntry *dep = erts_find_dist_entry(tp[2]);
  3195. if (!dep)
  3196. res = am_undefined;
  3197. else {
  3198. Uint cno = dist_entry_channel_no(dep);
  3199. res = make_small(cno);
  3200. erts_deref_dist_entry(dep);
  3201. }
  3202. BIF_RET(res);
  3203. }
  3204. else if (ERTS_IS_ATOM_STR("have_pending_exit", tp[1])) {
  3205. Process *rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
  3206. tp[2], ERTS_PROC_LOCK_STATUS);
  3207. if (!rp) {
  3208. BIF_RET(am_undefined);
  3209. }
  3210. else {
  3211. Eterm res = ERTS_PROC_PENDING_EXIT(rp) ? am_true : am_false;
  3212. erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
  3213. BIF_RET(res);
  3214. }
  3215. }
  3216. else if (ERTS_IS_ATOM_STR("binary_info", tp[1])) {
  3217. Eterm bin = tp[2];
  3218. if (is_binary(bin)) {
  3219. Eterm real_bin = bin;
  3220. Eterm res = am_true;
  3221. ErlSubBin* sb = (ErlSubBin *) binary_val(real_bin);
  3222. if (sb->thing_word == HEADER_SUB_BIN) {
  3223. real_bin = sb->orig;
  3224. }
  3225. if (*binary_val(real_bin) == HEADER_PROC_BIN) {
  3226. ProcBin* pb;
  3227. Binary* val;
  3228. Eterm SzTerm;
  3229. Uint hsz = 3 + 5;
  3230. Eterm* hp;
  3231. DECL_AM(refc_binary);
  3232. pb = (ProcBin *) binary_val(real_bin);
  3233. val = pb->val;
  3234. (void) erts_bld_uint(NULL, &hsz, pb->size);
  3235. (void) erts_bld_uint(NULL, &hsz, val->orig_size);
  3236. hp = HAlloc(BIF_P, hsz);
  3237. /* Info about the Binary* object */
  3238. SzTerm = erts_bld_uint(&hp, NULL, val->orig_size);
  3239. res = TUPLE2(hp, am_binary, SzTerm);
  3240. hp += 3;
  3241. /* Info about the ProcBin* object */
  3242. SzTerm = erts_bld_uint(&hp, NULL, pb->size);
  3243. res = TUPLE4(hp, AM_refc_binary, SzTerm,
  3244. res, make_small(pb->flags));
  3245. } else { /* heap binary */
  3246. DECL_AM(heap_binary);
  3247. res = AM_heap_binary;
  3248. }
  3249. BIF_RET(res);
  3250. }
  3251. }
  3252. else if (ERTS_IS_ATOM_STR("term_to_binary_no_funs", tp[1])) {
  3253. Uint dflags = (DFLAG_EXTENDED_REFERENCES |
  3254. DFLAG_EXTENDED_PIDS_PORTS |
  3255. DFLAG_BIT_BINARIES);
  3256. BIF_RET(erts_term_to_binary(BIF_P, tp[2], 0, dflags));
  3257. }
  3258. else if (ERTS_IS_ATOM_STR("dist_port", tp[1])) {
  3259. Eterm res = am_undefined;
  3260. DistEntry *dep = erts_sysname_to_connected_dist_entry(tp[2]);
  3261. if (dep) {
  3262. erts_smp_de_rlock(dep);
  3263. if (is_internal_port(dep->cid))
  3264. res = dep->cid;
  3265. erts_smp_de_runlock(dep);
  3266. erts_deref_dist_entry(dep);
  3267. }
  3268. BIF_RET(res);
  3269. }
  3270. else if (ERTS_IS_ATOM_STR("atom_out_cache_index", tp[1])) {
  3271. /* Used by distribution_SUITE (emulator) */
  3272. if (is_atom(tp[2])) {
  3273. BIF_RET(make_small(
  3274. (Uint)
  3275. erts_debug_atom_to_out_cache_index(tp[2])));
  3276. }
  3277. }
  3278. else if (ERTS_IS_ATOM_STR("fake_scheduler_bindings", tp[1])) {
  3279. return erts_fake_scheduler_bindings(BIF_P, tp[2]);
  3280. }
  3281. else if (ERTS_IS_ATOM_STR("reader_groups_map", tp[1])) {
  3282. Sint groups;
  3283. if (is_not_small(tp[2]))
  3284. BIF_ERROR(BIF_P, BADARG);
  3285. groups = signed_val(tp[2]);
  3286. if (groups < (Sint) 1 || groups > (Sint) INT_MAX)
  3287. BIF_ERROR(BIF_P, BADARG);
  3288. BIF_RET(erts_debug_reader_groups_map(BIF_P, (int) groups));
  3289. }
  3290. break;
  3291. }
  3292. default:
  3293. break;
  3294. }
  3295. }
  3296. BIF_ERROR(BIF_P, BADARG);
  3297. }
  3298. static erts_smp_atomic_t hipe_test_reschedule_flag;
  3299. BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
  3300. {
  3301. /*
  3302. * NOTE: Only supposed to be used for testing, and debugging.
  3303. */
  3304. if (ERTS_IS_ATOM_STR("available_internal_state", BIF_ARG_1)
  3305. && (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false)) {
  3306. erts_aint_t on = (erts_aint_t) (BIF_ARG_2 == am_true);
  3307. erts_aint_t prev_on = erts_smp_atomic_xchg_nob(&available_internal_state, on);
  3308. if (on) {
  3309. erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
  3310. erts_dsprintf(dsbufp, "Process %T ", BIF_P->id);
  3311. if (erts_is_alive)
  3312. erts_dsprintf(dsbufp, "on node %T ", erts_this_node->sysname);
  3313. erts_dsprintf(dsbufp,
  3314. "enabled access to the emulator internal state.\n");
  3315. erts_dsprintf(dsbufp,
  3316. "NOTE: This is an erts internal test feature and "
  3317. "should *only* be used by OTP test-suites.\n");
  3318. erts_send_warning_to_logger(BIF_P->group_leader, dsbufp);
  3319. }
  3320. BIF_RET(prev_on ? am_true : am_false);
  3321. }
  3322. if (!erts_smp_atomic_read_nob(&available_internal_state)) {
  3323. BIF_ERROR(BIF_P, EXC_UNDEF);
  3324. }
  3325. if (is_atom(BIF_ARG_1)) {
  3326. if (ERTS_IS_ATOM_STR("reds_left", BIF_ARG_1)) {
  3327. Sint reds;
  3328. if (term_to_Sint(BIF_ARG_2, &reds) != 0) {
  3329. if (0 <= reds && reds <= CONTEXT_REDS) {
  3330. if (!ERTS_PROC_GET_SAVED_CALLS_BUF(BIF_P))
  3331. BIF_P->fcalls = reds;
  3332. else
  3333. BIF_P->fcalls = reds - CONTEXT_REDS;
  3334. }
  3335. BIF_RET(am_true);
  3336. }
  3337. }
  3338. else if (ERTS_IS_ATOM_STR("block", BIF_ARG_1)
  3339. || ERTS_IS_ATOM_STR("sleep", BIF_ARG_1)) {
  3340. int block = ERTS_IS_ATOM_STR("block", BIF_ARG_1);
  3341. Sint ms;
  3342. if (term_to_Sint(BIF_ARG_2, &ms) != 0) {
  3343. if (ms > 0) {
  3344. erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3345. if (block)
  3346. erts_smp_thr_progress_block();
  3347. while (erts_milli_sleep((long) ms) != 0);
  3348. if (block)
  3349. erts_smp_thr_progress_unblock();
  3350. erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3351. }
  3352. BIF_RET(am_true);
  3353. }
  3354. }
  3355. else if (ERTS_IS_ATOM_STR("block_scheduler", BIF_ARG_1)) {
  3356. Sint ms;
  3357. if (term_to_Sint(BIF_ARG_2, &ms) != 0) {
  3358. if (ms > 0) {
  3359. erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3360. while (erts_milli_sleep((long) ms) != 0);
  3361. erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3362. }
  3363. BIF_RET(am_true);
  3364. }
  3365. }
  3366. else if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1)
  3367. || ERTS_IS_ATOM_STR("next_port", BIF_ARG_1)) {
  3368. /* Used by node_container_SUITE (emulator) */
  3369. Uint next;
  3370. if (term_to_Uint(BIF_ARG_2, &next) != 0) {
  3371. Eterm res;
  3372. if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1))
  3373. res = erts_test_next_pid(1, next);
  3374. else {
  3375. res = erts_test_next_port(1, next);
  3376. }
  3377. if (res < 0)
  3378. BIF_RET(am_false);
  3379. BIF_RET(erts_make_integer(res, BIF_P));
  3380. }
  3381. }
  3382. else if (ERTS_IS_ATOM_STR("force_gc", BIF_ARG_1)) {
  3383. /* Used by signal_SUITE (emulator) */
  3384. Process *rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
  3385. BIF_ARG_2, ERTS_PROC_LOCK_MAIN);
  3386. if (!rp) {
  3387. BIF_RET(am_false);
  3388. }
  3389. else {
  3390. FLAGS(rp) |= F_FORCE_GC;
  3391. if (BIF_P != rp)
  3392. erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
  3393. BIF_RET(am_true);
  3394. }
  3395. }
  3396. else if (ERTS_IS_ATOM_STR("send_fake_exit_signal", BIF_ARG_1)) {
  3397. /* Used by signal_SUITE (emulator) */
  3398. /* Testcases depend on the exit being received via
  3399. a pending exit when the receiver is the same as
  3400. the caller. */
  3401. if (is_tuple(BIF_ARG_2)) {
  3402. Eterm* tp = tuple_val(BIF_ARG_2);
  3403. if (arityval(tp[0]) == 3
  3404. && (is_pid(tp[1]) || is_port(tp[1]))
  3405. && is_internal_pid(tp[2])) {
  3406. int xres;
  3407. ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
  3408. Process *rp = erts_pid2proc_opt(BIF_P, ERTS_PROC_LOCK_MAIN,
  3409. tp[2], rp_locks,
  3410. ERTS_P2P_FLG_SMP_INC_REFC);
  3411. if (!rp) {
  3412. DECL_AM(dead);
  3413. BIF_RET(AM_dead);
  3414. }
  3415. #ifdef ERTS_SMP
  3416. if (BIF_P == rp)
  3417. rp_locks |= ERTS_PROC_LOCK_MAIN;
  3418. #endif
  3419. xres = erts_send_exit_signal(NULL, /* NULL in order to
  3420. force a pending exit
  3421. when we send to our
  3422. selves. */
  3423. tp[1],
  3424. rp,
  3425. &rp_locks,
  3426. tp[3],
  3427. NIL,
  3428. NULL,
  3429. 0);
  3430. #ifdef ERTS_SMP
  3431. if (BIF_P == rp)
  3432. rp_locks &= ~ERTS_PROC_LOCK_MAIN;
  3433. #endif
  3434. erts_smp_proc_unlock(rp, rp_locks);
  3435. erts_smp_proc_dec_refc(rp);
  3436. if (xres > 1) {
  3437. DECL_AM(message);
  3438. BIF_RET(AM_message);
  3439. }
  3440. else if (xres == 0) {
  3441. DECL_AM(unaffected);
  3442. BIF_RET(AM_unaffected);
  3443. }
  3444. else {
  3445. DECL_AM(exit);
  3446. BIF_RET(AM_exit);
  3447. }
  3448. }
  3449. }
  3450. }
  3451. else if (ERTS_IS_ATOM_STR("colliding_names", BIF_ARG_1)) {
  3452. /* Used by ets_SUITE (stdlib) */
  3453. if (is_tuple(BIF_ARG_2)) {
  3454. Eterm* tpl = tuple_val(BIF_ARG_2);
  3455. Uint cnt;
  3456. if (arityval(tpl[0]) == 2 && is_atom(tpl[1]) &&
  3457. term_to_Uint(tpl[2], &cnt)) {
  3458. BIF_RET(erts_ets_colliding_names(BIF_P,tpl[1],cnt));
  3459. }
  3460. }
  3461. }
  3462. else if (ERTS_IS_ATOM_STR("binary_loop_limit", BIF_ARG_1)) {
  3463. /* Used by binary_module_SUITE (stdlib) */
  3464. Uint max_loops;
  3465. if (is_atom(BIF_ARG_2) && ERTS_IS_ATOM_STR("default", BIF_ARG_2)) {
  3466. max_loops = erts_binary_set_loop_limit(-1);
  3467. BIF_RET(make_small(max_loops));
  3468. } else if (term_to_Uint(BIF_ARG_2, &max_loops) != 0) {
  3469. max_loops = erts_binary_set_loop_limit(max_loops);
  3470. BIF_RET(make_small(max_loops));
  3471. }
  3472. }
  3473. else if (ERTS_IS_ATOM_STR("re_loop_limit", BIF_ARG_1)) {
  3474. /* Used by re_SUITE (stdlib) */
  3475. Uint max_loops;
  3476. if (is_atom(BIF_ARG_2) && ERTS_IS_ATOM_STR("default", BIF_ARG_2)) {
  3477. max_loops = erts_re_set_loop_limit(-1);
  3478. BIF_RET(make_small(max_loops));
  3479. } else if (term_to_Uint(BIF_ARG_2, &max_loops) != 0) {
  3480. max_loops = erts_re_set_loop_limit(max_loops);
  3481. BIF_RET(make_small(max_loops));
  3482. }
  3483. }
  3484. else if (ERTS_IS_ATOM_STR("unicode_loop_limit", BIF_ARG_1)) {
  3485. /* Used by unicode_SUITE (stdlib) */
  3486. Uint max_loops;
  3487. if (is_atom(BIF_ARG_2) && ERTS_IS_ATOM_STR("default", BIF_ARG_2)) {
  3488. max_loops = erts_unicode_set_loop_limit(-1);
  3489. BIF_RET(make_small(max_loops));
  3490. } else if (term_to_Uint(BIF_ARG_2, &max_loops) != 0) {
  3491. max_loops = erts_unicode_set_loop_limit(max_loops);
  3492. BIF_RET(make_small(max_loops));
  3493. }
  3494. }
  3495. else if (ERTS_IS_ATOM_STR("hipe_test_reschedule_suspend", BIF_ARG_1)) {
  3496. /* Used by hipe test suites */
  3497. erts_aint_t flag = erts_smp_atomic_read_nob(&hipe_test_reschedule_flag);
  3498. if (!flag && BIF_ARG_2 != am_false) {
  3499. erts_smp_atomic_set_nob(&hipe_test_reschedule_flag, 1);
  3500. erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
  3501. ERTS_BIF_YIELD2(bif_export[BIF_erts_debug_set_internal_state_2],
  3502. BIF_P, BIF_ARG_1, BIF_ARG_2);
  3503. }
  3504. erts_smp_atomic_set_nob(&hipe_test_reschedule_flag, !flag);
  3505. BIF_RET(NIL);
  3506. }
  3507. else if (ERTS_IS_ATOM_STR("hipe_test_reschedule_resume", BIF_ARG_1)) {
  3508. /* Used by hipe test suites */
  3509. Eterm res = am_false;
  3510. Process *rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
  3511. BIF_ARG_2, ERTS_PROC_LOCK_STATUS);
  3512. if (rp) {
  3513. erts_resume(rp, ERTS_PROC_LOCK_STATUS);
  3514. res = am_true;
  3515. erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
  3516. }
  3517. BIF_RET(res);
  3518. }
  3519. else if (ERTS_IS_ATOM_STR("test_long_gc_sleep", BIF_ARG_1)) {
  3520. if (term_to_Uint(BIF_ARG_2, &erts_test_long_gc_sleep) > 0)
  3521. BIF_RET(am_true);
  3522. }
  3523. else if (ERTS_IS_ATOM_STR("abort", BIF_ARG_1)) {
  3524. erl_exit(ERTS_ABORT_EXIT, "%T\n", BIF_ARG_2);
  3525. }
  3526. else if (ERTS_IS_ATOM_STR("kill_dist_connection", BIF_ARG_1)) {
  3527. DistEntry *dep = erts_sysname_to_connected_dist_entry(BIF_ARG_2);
  3528. if (!dep)
  3529. BIF_RET(am_false);
  3530. else {
  3531. Uint32 con_id;
  3532. erts_smp_de_rlock(dep);
  3533. con_id = dep->connection_id;
  3534. erts_smp_de_runlock(dep);
  3535. erts_kill_dist_connection(dep, con_id);
  3536. erts_deref_dist_entry(dep);
  3537. BIF_RET(am_true);
  3538. }
  3539. }
  3540. else if (ERTS_IS_ATOM_STR("not_running_optimization", BIF_ARG_1)) {
  3541. #ifdef ERTS_SMP
  3542. int old_use_opt, use_opt;
  3543. switch (BIF_ARG_2) {
  3544. case am_true:
  3545. use_opt = 1;
  3546. break;
  3547. case am_false:
  3548. use_opt = 0;
  3549. break;
  3550. default:
  3551. BIF_ERROR(BIF_P, BADARG);
  3552. }
  3553. erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3554. erts_smp_thr_progress_block();
  3555. old_use_opt = !erts_disable_proc_not_running_opt;
  3556. erts_disable_proc_not_running_opt = !use_opt;
  3557. erts_smp_thr_progress_unblock();
  3558. erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3559. BIF_RET(old_use_opt ? am_true : am_false);
  3560. #else
  3561. BIF_ERROR(BIF_P, EXC_NOTSUP);
  3562. #endif
  3563. }
  3564. else if (ERTS_IS_ATOM_STR("wait", BIF_ARG_1)) {
  3565. if (ERTS_IS_ATOM_STR("deallocations", BIF_ARG_2)) {
  3566. if (erts_debug_wait_deallocations(BIF_P)) {
  3567. ERTS_BIF_YIELD_RETURN(BIF_P, am_ok);
  3568. }
  3569. }
  3570. }
  3571. }
  3572. BIF_ERROR(BIF_P, BADARG);
  3573. }
  3574. #ifdef ERTS_ENABLE_LOCK_COUNT
  3575. static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_stats_t *stats, Eterm res) {
  3576. Uint tries = 0, colls = 0;
  3577. unsigned long timer_s = 0, timer_ns = 0, timer_n = 0;
  3578. unsigned int line = 0;
  3579. Eterm af, uil;
  3580. Eterm uit, uic;
  3581. Eterm uits, uitns, uitn;
  3582. Eterm tt, tstat, tloc, t;
  3583. /* term:
  3584. * [{{file, line}, {tries, colls, {seconds, nanoseconds, n_blocks}}}]
  3585. */
  3586. tries = (Uint) ethr_atomic_read(&stats->tries);
  3587. colls = (Uint) ethr_atomic_read(&stats->colls);
  3588. line = stats->line;
  3589. timer_s = stats->timer.s;
  3590. timer_ns = stats->timer.ns;
  3591. timer_n = stats->timer_n;
  3592. af = am_atom_put(stats->file, strlen(stats->file));
  3593. uil = erts_bld_uint( hpp, szp, line);
  3594. tloc = erts_bld_tuple(hpp, szp, 2, af, uil);
  3595. uit = erts_bld_uint( hpp, szp, tries);
  3596. uic = erts_bld_uint( hpp, szp, colls);
  3597. uits = erts_bld_uint( hpp, szp, timer_s);
  3598. uitns = erts_bld_uint( hpp, szp, timer_ns);
  3599. uitn = erts_bld_uint( hpp, szp, timer_n);
  3600. tt = erts_bld_tuple(hpp, szp, 3, uits, uitns, uitn);
  3601. tstat = erts_bld_tuple(hpp, szp, 3, uit, uic, tt);
  3602. t = erts_bld_tuple(hpp, szp, 2, tloc, tstat);
  3603. res = erts_bld_cons( hpp, szp, t, res);
  3604. return res;
  3605. }
  3606. static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_t *lock, Eterm res) {
  3607. Eterm name, type, id, stats = NIL, t;
  3608. Process *proc = NULL;
  3609. char *ltype;
  3610. int i;
  3611. /* term:
  3612. * [{name, id, type, stats()}]
  3613. */
  3614. ASSERT(lock->name);
  3615. ltype = erts_lcnt_lock_type(lock->flag);
  3616. ASSERT(ltype);
  3617. type = am_atom_put(ltype, strlen(ltype));
  3618. name = am_atom_put(lock->name, strlen(lock->name));
  3619. if (lock->flag & ERTS_LCNT_LT_ALLOC) {
  3620. /* use allocator types names as id's for allocator locks */
  3621. ltype = (char *) ERTS_ALC_A2AD(signed_val(lock->id));
  3622. id = am_atom_put(ltype, strlen(ltype));
  3623. } else if (lock->flag & ERTS_LCNT_LT_PROCLOCK) {
  3624. /* use registered names as id's for process locks if available */
  3625. proc = erts_pid2proc_unlocked(lock->id);
  3626. if (proc && proc->reg) {
  3627. id = proc->reg->name;
  3628. } else {
  3629. /* otherwise use process id */
  3630. id = lock->id;
  3631. }
  3632. } else {
  3633. id = lock->id;
  3634. }
  3635. for (i = 0; i < lock->n_stats; i++) {
  3636. stats = lcnt_build_lock_stats_term(hpp, szp, &(lock->stats[i]), stats);
  3637. }
  3638. t = erts_bld_tuple(hpp, szp, 4, name, id, type, stats);
  3639. res = erts_bld_cons( hpp, szp, t, res);
  3640. return res;
  3641. }
  3642. static Eterm lcnt_build_result_term(Eterm **hpp, Uint *szp, erts_lcnt_data_t *data, Eterm res) {
  3643. Eterm dts, dtns, tdt, adur, tdur, aloc, lloc = NIL, tloc;
  3644. erts_lcnt_lock_t *lock = NULL;
  3645. char *str_duration = "duration";
  3646. char *str_locks = "locks";
  3647. /* term:
  3648. * [{'duration', {seconds, nanoseconds}}, {'locks', locks()}]
  3649. */
  3650. /* duration tuple */
  3651. dts = erts_bld_uint( hpp, szp, data->duration.s);
  3652. dtns = erts_bld_uint( hpp, szp, data->duration.ns);
  3653. tdt = erts_bld_tuple(hpp, szp, 2, dts, dtns);
  3654. adur = am_atom_put(str_duration, strlen(str_duration));
  3655. tdur = erts_bld_tuple(hpp, szp, 2, adur, tdt);
  3656. /* lock tuple */
  3657. aloc = am_atom_put(str_locks, strlen(str_locks));
  3658. for (lock = data->current_locks->head; lock != NULL ; lock = lock->next ) {
  3659. lloc = lcnt_build_lock_term(hpp, szp, lock, lloc);
  3660. }
  3661. for (lock = data->deleted_locks->head; lock != NULL ; lock = lock->next ) {
  3662. lloc = lcnt_build_lock_term(hpp, szp, lock, lloc);
  3663. }
  3664. tloc = erts_bld_tuple(hpp, szp, 2, aloc, lloc);
  3665. res = erts_bld_cons( hpp, szp, tloc, res);
  3666. res = erts_bld_cons( hpp, szp, tdur, res);
  3667. return res;
  3668. }
  3669. #endif
  3670. BIF_RETTYPE erts_debug_lock_counters_1(BIF_ALIST_1)
  3671. {
  3672. #ifdef ERTS_ENABLE_LOCK_COUNT
  3673. Eterm res = NIL;
  3674. #endif
  3675. if (BIF_ARG_1 == am_enabled) {
  3676. #ifdef ERTS_ENABLE_LOCK_COUNT
  3677. BIF_RET(am_true);
  3678. #else
  3679. BIF_RET(am_false);
  3680. #endif
  3681. }
  3682. #ifdef ERTS_ENABLE_LOCK_COUNT
  3683. else if (BIF_ARG_1 == am_info) {
  3684. erts_lcnt_data_t *data;
  3685. Uint hsize = 0;
  3686. Uint *szp;
  3687. Eterm* hp;
  3688. erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3689. erts_smp_thr_progress_block();
  3690. erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_SUSPEND);
  3691. data = erts_lcnt_get_data();
  3692. /* calculate size */
  3693. szp = &hsize;
  3694. lcnt_build_result_term(NULL, szp, data, NIL);
  3695. /* alloc and build */
  3696. hp = HAlloc(BIF_P, hsize);
  3697. res = lcnt_build_result_term(&hp, NULL, data, res);
  3698. erts_lcnt_clear_rt_opt(ERTS_LCNT_OPT_SUSPEND);
  3699. erts_smp_thr_progress_unblock();
  3700. erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3701. BIF_RET(res);
  3702. } else if (BIF_ARG_1 == am_clear) {
  3703. erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3704. erts_smp_thr_progress_block();
  3705. erts_lcnt_clear_counters();
  3706. erts_smp_thr_progress_unblock();
  3707. erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3708. BIF_RET(am_ok);
  3709. } else if (is_tuple(BIF_ARG_1)) {
  3710. Eterm* tp = tuple_val(BIF_ARG_1);
  3711. switch (arityval(tp[0])) {
  3712. case 2:
  3713. if (ERTS_IS_ATOM_STR("copy_save", tp[1])) {
  3714. erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3715. erts_smp_thr_progress_block();
  3716. if (tp[2] == am_true) {
  3717. res = erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_COPYSAVE) ? am_true : am_false;
  3718. } else if (tp[2] == am_false) {
  3719. res = erts_lcnt_clear_rt_opt(ERTS_LCNT_OPT_COPYSAVE) ? am_true : am_false;
  3720. } else {
  3721. erts_smp_thr_progress_unblock();
  3722. erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3723. BIF_ERROR(BIF_P, BADARG);
  3724. }
  3725. erts_smp_thr_progress_unblock();
  3726. erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3727. BIF_RET(res);
  3728. } else if (ERTS_IS_ATOM_STR("process_locks", tp[1])) {
  3729. erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3730. erts_smp_thr_progress_block();
  3731. if (tp[2] == am_true) {
  3732. res = erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_PROCLOCK) ? am_true : am_false;
  3733. } else if (tp[2] == am_false) {
  3734. res = erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_PROCLOCK) ? am_true : am_false;
  3735. } else {
  3736. erts_smp_thr_progress_unblock();
  3737. erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3738. BIF_ERROR(BIF_P, BADARG);
  3739. }
  3740. erts_smp_thr_progress_unblock();
  3741. erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3742. BIF_RET(res);
  3743. }
  3744. break;
  3745. default:
  3746. break;
  3747. }
  3748. }
  3749. #endif
  3750. BIF_ERROR(BIF_P, BADARG);
  3751. }
  3752. static void os_info_init(void)
  3753. {
  3754. Eterm type = am_atom_put(os_type, strlen(os_type));
  3755. Eterm flav;
  3756. int major, minor, build;
  3757. char* buf = erts_alloc(ERTS_ALC_T_TMP, 1024); /* More than enough */
  3758. Eterm* hp;
  3759. os_flavor(buf, 1024);
  3760. flav = am_atom_put(buf, strlen(buf));
  3761. erts_free(ERTS_ALC_T_TMP, (void *) buf);
  3762. hp = erts_alloc(ERTS_ALC_T_LL_TEMP_TERM, (3+4)*sizeof(Eterm));
  3763. os_type_tuple = TUPLE2(hp, type, flav);
  3764. hp += 3;
  3765. os_version(&major, &minor, &build);
  3766. os_version_tuple = TUPLE3(hp,
  3767. make_small(major),
  3768. make_small(minor),
  3769. make_small(build));
  3770. }
  3771. void
  3772. erts_bif_info_init(void)
  3773. {
  3774. erts_smp_atomic_init_nob(&available_internal_state, 0);
  3775. erts_smp_atomic_init_nob(&hipe_test_reschedule_flag, 0);
  3776. alloc_info_trap = erts_export_put(am_erlang, am_alloc_info, 1);
  3777. alloc_sizes_trap = erts_export_put(am_erlang, am_alloc_sizes, 1);
  3778. gather_sched_wall_time_res_trap
  3779. = erts_export_put(am_erlang, am_gather_sched_wall_time_result, 1);
  3780. process_info_init();
  3781. os_info_init();
  3782. }