PageRenderTime 80ms CodeModel.GetById 21ms RepoModel.GetById 0ms app.codeStats 1ms

/erts/emulator/beam/erl_bif_info.c

https://github.com/bsmr-erlang/otp
C | 5205 lines | 4420 code | 555 blank | 230 comment | 1150 complexity | dc4aed70d905f906d55343d1d7cc2755 MD5 | raw file
Possible License(s): BSD-3-Clause, LGPL-2.1, MPL-2.0-no-copyleft-exception, Apache-2.0
  1. /*
  2. * %CopyrightBegin%
  3. *
  4. * Copyright Ericsson AB 1999-2018. All Rights Reserved.
  5. *
  6. * Licensed under the Apache License, Version 2.0 (the "License");
  7. * you may not use this file except in compliance with the License.
  8. * You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. *
  18. * %CopyrightEnd%
  19. */
  20. #ifdef HAVE_CONFIG_H
  21. # include "config.h"
  22. #endif
  23. #define ERTS_WANT_MEM_MAPPERS
  24. #include "sys.h"
  25. #include "erl_vm.h"
  26. #include "global.h"
  27. #include "erl_process.h"
  28. #include "error.h"
  29. #include "erl_driver.h"
  30. #include "erl_nif.h"
  31. #include "bif.h"
  32. #include "big.h"
  33. #include "erl_version.h"
  34. #include "erl_compile_flags.h"
  35. #include "erl_db_util.h"
  36. #include "erl_message.h"
  37. #include "erl_binary.h"
  38. #include "erl_db.h"
  39. #include "erl_mtrace.h"
  40. #include "dist.h"
  41. #include "erl_gc.h"
  42. #include "erl_cpu_topology.h"
  43. #include "erl_async.h"
  44. #include "erl_thr_progress.h"
  45. #include "erl_bif_unique.h"
  46. #include "erl_map.h"
  47. #include "erl_check_io.h"
  48. #define ERTS_PTAB_WANT_DEBUG_FUNCS__
  49. #include "erl_ptab.h"
  50. #include "erl_time.h"
  51. #include "erl_proc_sig_queue.h"
  52. #include "erl_alloc_util.h"
  53. #ifdef HIPE
  54. #include "hipe_arch.h"
  55. #endif
  56. #ifdef ERTS_ENABLE_LOCK_COUNT
  57. #include "erl_lock_count.h"
  58. #endif
  59. #ifdef VALGRIND
  60. #include <valgrind/valgrind.h>
  61. #include <valgrind/memcheck.h>
  62. #endif
  63. static Export* alloc_info_trap = NULL;
  64. static Export* alloc_sizes_trap = NULL;
  65. static Export* gather_io_bytes_trap = NULL;
  66. static Export *gather_sched_wall_time_res_trap;
  67. static Export *gather_msacc_res_trap;
  68. static Export *gather_gc_info_res_trap;
  69. static Export *gather_system_check_res_trap;
  70. static Export *is_process_alive_trap;
  71. #define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
  72. static char otp_version[] = ERLANG_OTP_VERSION;
  73. /* Keep erts_system_version as a global variable for easy access from a core */
  74. static char erts_system_version[] = ("Erlang/OTP " ERLANG_OTP_RELEASE
  75. "%s"
  76. " [erts-" ERLANG_VERSION "]"
  77. #ifndef OTP_RELEASE
  78. #ifdef ERLANG_GIT_VERSION
  79. " [source-" ERLANG_GIT_VERSION "]"
  80. #else
  81. " [source]"
  82. #endif
  83. #endif
  84. #ifdef ARCH_64
  85. " [64-bit]"
  86. #endif
  87. " [smp:%beu:%beu]"
  88. " [ds:%beu:%beu:%beu]"
  89. #if defined(ERTS_DIRTY_SCHEDULERS_TEST)
  90. " [dirty-schedulers-TEST]"
  91. #endif
  92. " [async-threads:%d]"
  93. #ifdef HIPE
  94. " [hipe]"
  95. #endif
  96. #ifdef ET_DEBUG
  97. #if ET_DEBUG
  98. " [type-assertions]"
  99. #endif
  100. #endif
  101. #ifdef DEBUG
  102. " [debug-compiled]"
  103. #endif
  104. #ifdef ERTS_ENABLE_LOCK_CHECK
  105. " [lock-checking]"
  106. #endif
  107. #ifdef ERTS_ENABLE_LOCK_COUNT
  108. " [lock-counting]"
  109. #endif
  110. #ifdef ERTS_OPCODE_COUNTER_SUPPORT
  111. " [instruction-counting]"
  112. #endif
  113. #ifdef PURIFY
  114. " [purify-compiled]"
  115. #endif
  116. #ifdef VALGRIND
  117. " [valgrind-compiled]"
  118. #endif
  119. #ifdef ERTS_FRMPTR
  120. " [frame-pointer]"
  121. #endif
  122. #ifdef USE_LTTNG
  123. " [lttng]"
  124. #endif
  125. #ifdef USE_DTRACE
  126. " [dtrace]"
  127. #endif
  128. #ifdef USE_SYSTEMTAP
  129. " [systemtap]"
  130. #endif
  131. #ifdef SHCOPY
  132. " [sharing-preserving]"
  133. #endif
  134. "\n");
  135. #define ASIZE(a) (sizeof(a)/sizeof(a[0]))
  136. #if defined(HAVE_SOLARIS_SPARC_PERFMON)
  137. # include <sys/ioccom.h>
  138. # define PERFMON_SETPCR _IOW('P', 1, unsigned long long)
  139. # define PERFMON_GETPCR _IOR('P', 2, unsigned long long)
  140. #endif
  141. /* Cached, pre-built {OsType,OsFlavor} and {Major,Minor,Build} tuples */
  142. static Eterm os_type_tuple;
  143. static Eterm os_version_tuple;
  144. static Eterm
  145. current_function(Process* p, ErtsHeapFactory *hfact, Process* rp,
  146. int full_info, Uint reserve_size, int flags);
  147. static Eterm current_stacktrace(ErtsHeapFactory *hfact, Process* rp,
  148. Uint reserve_size);
  149. static Eterm
  150. bld_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
  151. {
  152. struct erl_off_heap_header* ohh;
  153. Eterm res = NIL;
  154. Eterm tuple;
  155. for (ohh = oh->first; ohh; ohh = ohh->next) {
  156. if (ohh->thing_word == HEADER_PROC_BIN) {
  157. ProcBin* pb = (ProcBin*) ohh;
  158. Eterm val = erts_bld_uword(hpp, szp, (UWord) pb->val);
  159. Eterm orig_size = erts_bld_uint(hpp, szp, pb->val->orig_size);
  160. if (szp)
  161. *szp += 4+2;
  162. if (hpp) {
  163. Uint refc = (Uint) erts_refc_read(&pb->val->intern.refc, 1);
  164. tuple = TUPLE3(*hpp, val, orig_size, make_small(refc));
  165. res = CONS(*hpp + 4, tuple, res);
  166. *hpp += 4+2;
  167. }
  168. }
  169. }
  170. return res;
  171. }
  172. static Eterm
  173. bld_magic_ref_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
  174. {
  175. struct erl_off_heap_header* ohh;
  176. Eterm res = NIL;
  177. Eterm tuple;
  178. for (ohh = oh->first; ohh; ohh = ohh->next) {
  179. if (is_ref_thing_header((*((Eterm *) ohh)))) {
  180. ErtsMRefThing *mrtp = (ErtsMRefThing *) ohh;
  181. Eterm val = erts_bld_uword(hpp, szp, (UWord) mrtp->mb);
  182. Eterm orig_size = erts_bld_uint(hpp, szp, mrtp->mb->orig_size);
  183. if (szp)
  184. *szp += 4+2;
  185. if (hpp) {
  186. Uint refc = (Uint) erts_refc_read(&mrtp->mb->intern.refc, 1);
  187. tuple = TUPLE3(*hpp, val, orig_size, make_small(refc));
  188. res = CONS(*hpp + 4, tuple, res);
  189. *hpp += 4+2;
  190. }
  191. }
  192. }
  193. return res;
  194. }
  195. /*
  196. make_monitor_list:
  197. returns a list of records..
  198. -record(erl_monitor, {
  199. type, % process | port | time_offset | dist_process | resource
  200. % | node | nodes | suspend
  201. dir, % origin | target
  202. ref, % reference or []
  203. pid, % Process or nodename
  204. extra % registered name, integer or []
  205. }).
  206. */
  207. static int do_calc_mon_size(ErtsMonitor *mon, void *vpsz, Sint reds)
  208. {
  209. ErtsMonitorData *mdp = erts_monitor_to_data(mon);
  210. Uint *psz = vpsz;
  211. *psz += is_immed(mdp->ref) ? 0 : NC_HEAP_SIZE(mdp->ref);
  212. if (mon->type == ERTS_MON_TYPE_RESOURCE && erts_monitor_is_target(mon))
  213. *psz += erts_resource_ref_size(mon->other.ptr);
  214. else
  215. *psz += is_immed(mon->other.item) ? 0 : NC_HEAP_SIZE(mon->other.item);
  216. *psz += 9; /* CONS + 6-tuple */
  217. return 1;
  218. }
  219. typedef struct {
  220. Process *p;
  221. Eterm *hp;
  222. Eterm res;
  223. Eterm tag;
  224. } MonListContext;
  225. static int do_make_one_mon_element(ErtsMonitor *mon, void * vpmlc, Sint reds)
  226. {
  227. ErtsMonitorData *mdp = erts_monitor_to_data(mon);
  228. MonListContext *pmlc = vpmlc;
  229. Eterm tup, t, d, r, p, x;
  230. r = is_immed(mdp->ref) ? mdp->ref : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mdp->ref);
  231. if (mon->type == ERTS_MON_TYPE_RESOURCE && erts_monitor_is_target(mon))
  232. p = erts_bld_resource_ref(&(pmlc->hp), &MSO(pmlc->p), mon->other.ptr);
  233. else
  234. p = (is_immed(mon->other.item)
  235. ? mon->other.item
  236. : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->other.item));
  237. if (mon->flags & ERTS_ML_FLG_NAME)
  238. x = ((ErtsMonitorDataExtended *) mdp)->u.name;
  239. else if (erts_monitor_is_target(mon))
  240. x = NIL;
  241. else if (mon->type == ERTS_MON_TYPE_NODE || mon->type == ERTS_MON_TYPE_NODES)
  242. x = make_small(((ErtsMonitorDataExtended *) mdp)->u.refc);
  243. else
  244. x = NIL;
  245. switch (mon->type) {
  246. case ERTS_MON_TYPE_PROC:
  247. t = am_process;
  248. break;
  249. case ERTS_MON_TYPE_PORT:
  250. t = am_port;
  251. break;
  252. case ERTS_MON_TYPE_TIME_OFFSET:
  253. t = am_time_offset;
  254. break;
  255. case ERTS_MON_TYPE_DIST_PROC: {
  256. ERTS_DECL_AM(dist_process);
  257. t = AM_dist_process;
  258. break;
  259. }
  260. case ERTS_MON_TYPE_RESOURCE: {
  261. ERTS_DECL_AM(resource);
  262. t = AM_resource;
  263. break;
  264. }
  265. case ERTS_MON_TYPE_NODE:
  266. t = am_node;
  267. break;
  268. case ERTS_MON_TYPE_NODES: {
  269. ERTS_DECL_AM(nodes);
  270. t = AM_nodes;
  271. break;
  272. }
  273. case ERTS_MON_TYPE_SUSPEND:
  274. t = am_suspend;
  275. break;
  276. default:
  277. ERTS_INTERNAL_ERROR("Unknown monitor type");
  278. t = am_error;
  279. break;
  280. }
  281. if (erts_monitor_is_target(mon)) {
  282. ERTS_DECL_AM(target);
  283. d = AM_target;
  284. }
  285. else {
  286. ERTS_DECL_AM(origin);
  287. d = AM_origin;
  288. }
  289. tup = TUPLE6(pmlc->hp, pmlc->tag, t, d, r, p, x);
  290. pmlc->hp += 7;
  291. pmlc->res = CONS(pmlc->hp, tup, pmlc->res);
  292. pmlc->hp += 2;
  293. return 1;
  294. }
  295. static Eterm
  296. make_monitor_list(Process *p, int tree, ErtsMonitor *root, Eterm tail)
  297. {
  298. DECL_AM(erl_monitor);
  299. Uint sz = 0;
  300. MonListContext mlc;
  301. void (*foreach)(ErtsMonitor *,
  302. ErtsMonitorFunc,
  303. void *);
  304. foreach = tree ? erts_monitor_tree_foreach : erts_monitor_list_foreach;
  305. (*foreach)(root, do_calc_mon_size, &sz);
  306. if (sz == 0)
  307. return tail;
  308. mlc.p = p;
  309. mlc.hp = HAlloc(p,sz);
  310. mlc.res = tail;
  311. mlc.tag = AM_erl_monitor;
  312. (*foreach)(root, do_make_one_mon_element, &mlc);
  313. return mlc.res;
  314. }
  315. /*
  316. make_link_list:
  317. returns a list of records..
  318. -record(erl_link, {
  319. type, % process | port | dist_process
  320. pid, % Process or port
  321. id % (address)
  322. }).
  323. */
  324. static int calc_lnk_size(ErtsLink *lnk, void *vpsz, Sint reds)
  325. {
  326. Uint *psz = vpsz;
  327. Uint sz = 0;
  328. ErtsLinkData *ldp = erts_link_to_data(lnk);
  329. (void) erts_bld_uword(NULL, &sz, (UWord) ldp);
  330. *psz += sz;
  331. *psz += is_immed(lnk->other.item) ? 0 : size_object(lnk->other.item);
  332. *psz += 7; /* CONS + 4-tuple */
  333. return 1;
  334. }
  335. typedef struct {
  336. Process *p;
  337. Eterm *hp;
  338. Eterm res;
  339. Eterm tag;
  340. } LnkListContext;
  341. static int make_one_lnk_element(ErtsLink *lnk, void * vpllc, Sint reds)
  342. {
  343. LnkListContext *pllc = vpllc;
  344. Eterm tup, t, pid, id;
  345. ErtsLinkData *ldp = erts_link_to_data(lnk);
  346. id = erts_bld_uword(&pllc->hp, NULL, (UWord) ldp);
  347. if (is_immed(lnk->other.item))
  348. pid = lnk->other.item;
  349. else {
  350. Uint sz = size_object(lnk->other.item);
  351. pid = copy_struct(lnk->other.item, sz, &(pllc->hp), &MSO(pllc->p));
  352. }
  353. switch (lnk->type) {
  354. case ERTS_LNK_TYPE_PROC:
  355. t = am_process;
  356. break;
  357. case ERTS_LNK_TYPE_PORT:
  358. t = am_port;
  359. break;
  360. case ERTS_LNK_TYPE_DIST_PROC: {
  361. ERTS_DECL_AM(dist_process);
  362. t = AM_dist_process;
  363. break;
  364. }
  365. default:
  366. ERTS_INTERNAL_ERROR("Unkown link type");
  367. t = am_undefined;
  368. break;
  369. }
  370. tup = TUPLE4(pllc->hp, pllc->tag, t, pid, id);
  371. pllc->hp += 5;
  372. pllc->res = CONS(pllc->hp, tup, pllc->res);
  373. pllc->hp += 2;
  374. return 1;
  375. }
  376. static Eterm
  377. make_link_list(Process *p, int tree, ErtsLink *root, Eterm tail)
  378. {
  379. DECL_AM(erl_link);
  380. Uint sz = 0;
  381. LnkListContext llc;
  382. void (*foreach)(ErtsLink *,
  383. ErtsLinkFunc,
  384. void *);
  385. foreach = tree ? erts_link_tree_foreach : erts_link_list_foreach;
  386. (*foreach)(root, calc_lnk_size, (void *) &sz);
  387. if (sz == 0) {
  388. return tail;
  389. }
  390. llc.p = p;
  391. llc.hp = HAlloc(p,sz);
  392. llc.res = tail;
  393. llc.tag = AM_erl_link;
  394. (*foreach)(root, make_one_lnk_element, (void *) &llc);
  395. return llc.res;
  396. }
  397. int
  398. erts_print_system_version(fmtfn_t to, void *arg, Process *c_p)
  399. {
  400. int i, rc = -1;
  401. char *rc_str = "";
  402. char rc_buf[100];
  403. char *ov = otp_version;
  404. Uint total, online, active;
  405. Uint dirty_cpu, dirty_cpu_onln, dirty_io;
  406. erts_schedulers_state(&total, &online, &active,
  407. &dirty_cpu, &dirty_cpu_onln, NULL,
  408. &dirty_io, NULL);
  409. for (i = 0; i < sizeof(otp_version)-4; i++) {
  410. if (ov[i] == '-' && ov[i+1] == 'r' && ov[i+2] == 'c')
  411. rc = atoi(&ov[i+3]);
  412. }
  413. if (rc >= 0) {
  414. if (rc == 0)
  415. rc_str = " [DEVELOPMENT]";
  416. else {
  417. erts_snprintf(rc_buf, sizeof(rc_buf), " [RELEASE CANDIDATE %d]", rc);
  418. rc_str = rc_buf;
  419. }
  420. }
  421. return erts_print(to, arg, erts_system_version,
  422. rc_str
  423. , total, online
  424. , dirty_cpu, dirty_cpu_onln, dirty_io
  425. , erts_async_max_threads
  426. );
  427. }
  428. typedef struct {
  429. /* {Entity,Node} = {monitor.Name,monitor.Pid} for external by name
  430. * {Entity,Node} = {monitor.Pid,NIL} for external/external by pid
  431. * {Entity,Node} = {monitor.Name,erlang:node()} for internal by name
  432. * {Entity,Node} = {monitor.resource,MON_NIF_TARGET}*/
  433. union {
  434. Eterm term;
  435. ErtsResource* resource;
  436. }entity;
  437. int named;
  438. Uint16 type;
  439. Eterm node;
  440. /* pid is actual target being monitored, no matter pid/port or name */
  441. Eterm pid;
  442. } MonitorInfo;
  443. typedef struct {
  444. MonitorInfo *mi;
  445. Uint mi_i;
  446. Uint mi_max;
  447. int sz;
  448. } MonitorInfoCollection;
  449. #define INIT_MONITOR_INFOS(MIC) do { \
  450. (MIC).mi = NULL; \
  451. (MIC).mi_i = (MIC).mi_max = 0; \
  452. (MIC).sz = 0; \
  453. } while(0)
  454. #define MI_INC 50
  455. #define EXTEND_MONITOR_INFOS(MICP) \
  456. do { \
  457. if ((MICP)->mi_i >= (MICP)->mi_max) { \
  458. (MICP)->mi = ((MICP)->mi ? erts_realloc(ERTS_ALC_T_TMP, \
  459. (MICP)->mi, \
  460. ((MICP)->mi_max+MI_INC) \
  461. * sizeof(MonitorInfo)) \
  462. : erts_alloc(ERTS_ALC_T_TMP, \
  463. MI_INC*sizeof(MonitorInfo))); \
  464. (MICP)->mi_max += MI_INC; \
  465. } \
  466. } while (0)
  467. #define DESTROY_MONITOR_INFOS(MIC) \
  468. do { \
  469. if ((MIC).mi != NULL) { \
  470. erts_free(ERTS_ALC_T_TMP, (void *) (MIC).mi); \
  471. } \
  472. } while (0)
  473. static int collect_one_link(ErtsLink *lnk, void *vmicp, Sint reds)
  474. {
  475. MonitorInfoCollection *micp = vmicp;
  476. EXTEND_MONITOR_INFOS(micp);
  477. micp->mi[micp->mi_i].entity.term = lnk->other.item;
  478. micp->sz += 2 + NC_HEAP_SIZE(lnk->other.item);
  479. micp->mi_i++;
  480. return 1;
  481. }
  482. static int collect_one_origin_monitor(ErtsMonitor *mon, void *vmicp, Sint reds)
  483. {
  484. if (erts_monitor_is_origin(mon)) {
  485. MonitorInfoCollection *micp = vmicp;
  486. EXTEND_MONITOR_INFOS(micp);
  487. micp->mi[micp->mi_i].type = mon->type;
  488. switch (mon->type) {
  489. case ERTS_MON_TYPE_PROC:
  490. case ERTS_MON_TYPE_PORT:
  491. case ERTS_MON_TYPE_DIST_PROC:
  492. case ERTS_MON_TYPE_TIME_OFFSET:
  493. if (!(mon->flags & ERTS_ML_FLG_NAME)) {
  494. micp->mi[micp->mi_i].named = 0;
  495. micp->mi[micp->mi_i].entity.term = mon->other.item;
  496. micp->mi[micp->mi_i].node = NIL;
  497. if (is_not_atom(mon->other.item))
  498. micp->sz += NC_HEAP_SIZE(mon->other.item);
  499. }
  500. else {
  501. ErtsMonitorDataExtended *mdep;
  502. micp->mi[micp->mi_i].named = !0;
  503. mdep = (ErtsMonitorDataExtended *) erts_monitor_to_data(mon);
  504. micp->mi[micp->mi_i].entity.term = mdep->u.name;
  505. if (mdep->dist)
  506. micp->mi[micp->mi_i].node = mdep->dist->nodename;
  507. else
  508. micp->mi[micp->mi_i].node = erts_this_dist_entry->sysname;
  509. micp->sz += 3; /* need one 2-tuple */
  510. }
  511. /* have always pid at hand, to assist with figuring out if its a port or
  512. * a process, when we monitored by name and process_info is requested.
  513. * See: erl_bif_info.c:process_info_aux section for am_monitors */
  514. micp->mi[micp->mi_i].pid = mon->other.item;
  515. micp->mi_i++;
  516. micp->sz += 2 + 3; /* For a cons cell and a 2-tuple */
  517. break;
  518. default:
  519. break;
  520. }
  521. }
  522. return 1;
  523. }
  524. static int collect_one_target_monitor(ErtsMonitor *mon, void *vmicp, Sint reds)
  525. {
  526. MonitorInfoCollection *micp = vmicp;
  527. if (erts_monitor_is_target(mon)) {
  528. EXTEND_MONITOR_INFOS(micp);
  529. micp->mi[micp->mi_i].type = mon->type;
  530. micp->mi[micp->mi_i].named = !!(mon->flags & ERTS_ML_FLG_NAME);
  531. switch (mon->type) {
  532. case ERTS_MON_TYPE_PROC:
  533. case ERTS_MON_TYPE_PORT:
  534. case ERTS_MON_TYPE_DIST_PROC:
  535. micp->mi[micp->mi_i].entity.term = mon->other.item;
  536. micp->mi[micp->mi_i].node = NIL;
  537. micp->sz += NC_HEAP_SIZE(mon->other.item);
  538. micp->sz += 2; /* cons */;
  539. micp->mi_i++;
  540. break;
  541. case ERTS_MON_TYPE_RESOURCE:
  542. micp->mi[micp->mi_i].entity.resource = mon->other.ptr;
  543. micp->mi[micp->mi_i].node = NIL;
  544. micp->sz += erts_resource_ref_size(mon->other.ptr);
  545. micp->sz += 2; /* cons */;
  546. micp->mi_i++;
  547. break;
  548. default:
  549. break;
  550. }
  551. }
  552. return 1;
  553. }
  554. typedef struct {
  555. ErtsMonitorSuspend **smi;
  556. Uint smi_i;
  557. Uint smi_max;
  558. Uint sz;
  559. } ErtsSuspendMonitorInfoCollection;
  560. #define ERTS_INIT_SUSPEND_MONITOR_INFOS(SMIC) do { \
  561. (SMIC).smi = NULL; \
  562. (SMIC).smi_i = (SMIC).smi_max = 0; \
  563. (SMIC).sz = 0; \
  564. } while(0)
  565. #define ERTS_SMI_INC 50
  566. #define ERTS_EXTEND_SUSPEND_MONITOR_INFOS(SMICP) \
  567. do { \
  568. if ((SMICP)->smi_i >= (SMICP)->smi_max) { \
  569. (SMICP)->smi = ((SMICP)->smi \
  570. ? erts_realloc(ERTS_ALC_T_TMP, \
  571. (SMICP)->smi, \
  572. ((SMICP)->smi_max \
  573. + ERTS_SMI_INC) \
  574. * sizeof(ErtsMonitorSuspend *)) \
  575. : erts_alloc(ERTS_ALC_T_TMP, \
  576. ERTS_SMI_INC \
  577. * sizeof(ErtsMonitorSuspend *))); \
  578. (SMICP)->smi_max += ERTS_SMI_INC; \
  579. } \
  580. } while (0)
  581. #define ERTS_DESTROY_SUSPEND_MONITOR_INFOS(SMIC) \
  582. do { \
  583. if ((SMIC).smi != NULL) { \
  584. erts_free(ERTS_ALC_T_TMP, (void *) (SMIC).smi); \
  585. } \
  586. } while (0)
  587. static int
  588. collect_one_suspend_monitor(ErtsMonitor *mon, void *vsmicp, Sint reds)
  589. {
  590. if (mon->type == ERTS_MON_TYPE_SUSPEND) {
  591. Sint count;
  592. erts_aint_t mstate;
  593. ErtsMonitorSuspend *msp;
  594. ErtsSuspendMonitorInfoCollection *smicp;
  595. msp = (ErtsMonitorSuspend *) erts_monitor_to_data(mon);
  596. smicp = vsmicp;
  597. ERTS_EXTEND_SUSPEND_MONITOR_INFOS(smicp);
  598. smicp->smi[smicp->smi_i] = msp;
  599. smicp->sz += 2 /* cons */ + 4 /* 3-tuple */;
  600. mstate = erts_atomic_read_nob(&msp->state);
  601. count = (Sint) (mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK);
  602. if (!IS_SSMALL(count))
  603. smicp->sz += BIG_UINT_HEAP_SIZE;
  604. smicp->smi_i++;
  605. }
  606. return 1;
  607. }
  608. /*
  609. * process_info/[1,2]
  610. */
  611. /*
  612. * All valid process_info arguments.
  613. */
  614. #define ERTS_PI_IX_REGISTERED_NAME 0
  615. #define ERTS_PI_IX_CURRENT_FUNCTION 1
  616. #define ERTS_PI_IX_INITIAL_CALL 2
  617. #define ERTS_PI_IX_STATUS 3
  618. #define ERTS_PI_IX_MESSAGES 4
  619. #define ERTS_PI_IX_MESSAGE_QUEUE_LEN 5
  620. #define ERTS_PI_IX_LINKS 6
  621. #define ERTS_PI_IX_MONITORS 7
  622. #define ERTS_PI_IX_MONITORED_BY 8
  623. #define ERTS_PI_IX_DICTIONARY 9
  624. #define ERTS_PI_IX_TRAP_EXIT 10
  625. #define ERTS_PI_IX_ERROR_HANDLER 11
  626. #define ERTS_PI_IX_HEAP_SIZE 12
  627. #define ERTS_PI_IX_STACK_SIZE 13
  628. #define ERTS_PI_IX_MEMORY 14
  629. #define ERTS_PI_IX_GARBAGE_COLLECTION 15
  630. #define ERTS_PI_IX_GROUP_LEADER 16
  631. #define ERTS_PI_IX_REDUCTIONS 17
  632. #define ERTS_PI_IX_PRIORITY 18
  633. #define ERTS_PI_IX_TRACE 19
  634. #define ERTS_PI_IX_BINARY 20
  635. #define ERTS_PI_IX_SEQUENTIAL_TRACE_TOKEN 21
  636. #define ERTS_PI_IX_CATCHLEVEL 22
  637. #define ERTS_PI_IX_BACKTRACE 23
  638. #define ERTS_PI_IX_LAST_CALLS 24
  639. #define ERTS_PI_IX_TOTAL_HEAP_SIZE 25
  640. #define ERTS_PI_IX_SUSPENDING 26
  641. #define ERTS_PI_IX_MIN_HEAP_SIZE 27
  642. #define ERTS_PI_IX_MIN_BIN_VHEAP_SIZE 28
  643. #define ERTS_PI_IX_MAX_HEAP_SIZE 29
  644. #define ERTS_PI_IX_CURRENT_LOCATION 30
  645. #define ERTS_PI_IX_CURRENT_STACKTRACE 31
  646. #define ERTS_PI_IX_MESSAGE_QUEUE_DATA 32
  647. #define ERTS_PI_IX_GARBAGE_COLLECTION_INFO 33
  648. #define ERTS_PI_IX_MAGIC_REF 34
  649. #define ERTS_PI_IX_FULLSWEEP_AFTER 35
  650. #define ERTS_PI_FLAG_SINGELTON (1 << 0)
  651. #define ERTS_PI_FLAG_ALWAYS_WRAP (1 << 1)
  652. #define ERTS_PI_FLAG_WANT_MSGS (1 << 2)
  653. #define ERTS_PI_FLAG_NEED_MSGQ_LEN (1 << 3)
  654. #define ERTS_PI_FLAG_FORCE_SIG_SEND (1 << 4)
  655. #define ERTS_PI_FLAG_REQUEST_FOR_OTHER (1 << 5)
  656. #define ERTS_PI_UNRESERVE(RS, SZ) \
  657. (ASSERT((RS) >= (SZ)), (RS) -= (SZ))
  658. typedef struct {
  659. Eterm name;
  660. Uint reserve_size;
  661. int flags;
  662. ErtsProcLocks locks;
  663. } ErtsProcessInfoArgs;
  664. static ErtsProcessInfoArgs pi_args[] = {
  665. {am_registered_name, 0, 0, ERTS_PROC_LOCK_MAIN},
  666. {am_current_function, 4, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
  667. {am_initial_call, 4, 0, ERTS_PROC_LOCK_MAIN},
  668. {am_status, 0, 0, 0},
  669. {am_messages, 0, ERTS_PI_FLAG_WANT_MSGS|ERTS_PI_FLAG_NEED_MSGQ_LEN|ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
  670. {am_message_queue_len, 0, ERTS_PI_FLAG_NEED_MSGQ_LEN, ERTS_PROC_LOCK_MAIN},
  671. {am_links, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
  672. {am_monitors, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
  673. {am_monitored_by, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
  674. {am_dictionary, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
  675. {am_trap_exit, 0, 0, ERTS_PROC_LOCK_MAIN},
  676. {am_error_handler, 0, 0, ERTS_PROC_LOCK_MAIN},
  677. {am_heap_size, 0, 0, ERTS_PROC_LOCK_MAIN},
  678. {am_stack_size, 0, 0, ERTS_PROC_LOCK_MAIN},
  679. {am_memory, 0, ERTS_PI_FLAG_NEED_MSGQ_LEN|ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
  680. {am_garbage_collection, 3+2 + 3+2 + 3+2 + 3+2 + 3+2 + ERTS_MAX_HEAP_SIZE_MAP_SZ, 0, ERTS_PROC_LOCK_MAIN},
  681. {am_group_leader, 0, 0, ERTS_PROC_LOCK_MAIN},
  682. {am_reductions, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
  683. {am_priority, 0, 0, 0},
  684. {am_trace, 0, 0, ERTS_PROC_LOCK_MAIN},
  685. {am_binary, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
  686. {am_sequential_trace_token, 0, 0, ERTS_PROC_LOCK_MAIN},
  687. {am_catchlevel, 0, 0, ERTS_PROC_LOCK_MAIN},
  688. {am_backtrace, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
  689. {am_last_calls, 0, 0, ERTS_PROC_LOCK_MAIN},
  690. {am_total_heap_size, 0, ERTS_PI_FLAG_NEED_MSGQ_LEN|ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
  691. {am_suspending, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, 0},
  692. {am_min_heap_size, 0, 0, ERTS_PROC_LOCK_MAIN},
  693. {am_min_bin_vheap_size, 0, 0, ERTS_PROC_LOCK_MAIN},
  694. {am_max_heap_size, 0, 0, ERTS_PROC_LOCK_MAIN},
  695. {am_current_location, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
  696. {am_current_stacktrace, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
  697. {am_message_queue_data, 0, 0, ERTS_PROC_LOCK_MAIN},
  698. {am_garbage_collection_info, ERTS_PROCESS_GC_INFO_MAX_SIZE, 0, ERTS_PROC_LOCK_MAIN},
  699. {am_magic_ref, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
  700. {am_fullsweep_after, 0, 0, ERTS_PROC_LOCK_MAIN}
  701. };
  702. #define ERTS_PI_ARGS ((int) (sizeof(pi_args)/sizeof(pi_args[0])))
  703. #ifdef DEBUG
  704. # define ERTS_PI_DEF_ARR_SZ 2
  705. #else
  706. # define ERTS_PI_DEF_ARR_SZ ERTS_PI_ARGS
  707. #endif
  708. static ERTS_INLINE Eterm
  709. pi_ix2arg(int ix)
  710. {
  711. if (ix < 0 || ERTS_PI_ARGS <= ix)
  712. return am_undefined;
  713. return pi_args[ix].name;
  714. }
  715. static ERTS_INLINE int
  716. pi_ix2flags(int ix)
  717. {
  718. if (ix < 0 || ERTS_PI_ARGS <= ix)
  719. return 0;
  720. return pi_args[ix].flags;
  721. }
  722. static ERTS_INLINE Uint
  723. pi_ix2rsz(int ix)
  724. {
  725. if (ix < 0 || ERTS_PI_ARGS <= ix)
  726. return 0;
  727. return pi_args[ix].reserve_size;
  728. }
  729. static ERTS_INLINE ErtsProcLocks
  730. pi_ix2locks(int ix)
  731. {
  732. if (ix < 0 || ERTS_PI_ARGS <= ix)
  733. return 0;
  734. return pi_args[ix].locks;
  735. }
  736. static ERTS_INLINE int
  737. pi_arg2ix(Eterm arg)
  738. {
  739. switch (arg) {
  740. case am_registered_name:
  741. return ERTS_PI_IX_REGISTERED_NAME;
  742. case am_current_function:
  743. return ERTS_PI_IX_CURRENT_FUNCTION;
  744. case am_initial_call:
  745. return ERTS_PI_IX_INITIAL_CALL;
  746. case am_status:
  747. return ERTS_PI_IX_STATUS;
  748. case am_messages:
  749. return ERTS_PI_IX_MESSAGES;
  750. case am_message_queue_len:
  751. return ERTS_PI_IX_MESSAGE_QUEUE_LEN;
  752. case am_links:
  753. return ERTS_PI_IX_LINKS;
  754. case am_monitors:
  755. return ERTS_PI_IX_MONITORS;
  756. case am_monitored_by:
  757. return ERTS_PI_IX_MONITORED_BY;
  758. case am_dictionary:
  759. return ERTS_PI_IX_DICTIONARY;
  760. case am_trap_exit:
  761. return ERTS_PI_IX_TRAP_EXIT;
  762. case am_error_handler:
  763. return ERTS_PI_IX_ERROR_HANDLER;
  764. case am_heap_size:
  765. return ERTS_PI_IX_HEAP_SIZE;
  766. case am_stack_size:
  767. return ERTS_PI_IX_STACK_SIZE;
  768. case am_memory:
  769. return ERTS_PI_IX_MEMORY;
  770. case am_garbage_collection:
  771. return ERTS_PI_IX_GARBAGE_COLLECTION;
  772. case am_group_leader:
  773. return ERTS_PI_IX_GROUP_LEADER;
  774. case am_reductions:
  775. return ERTS_PI_IX_REDUCTIONS;
  776. case am_priority:
  777. return ERTS_PI_IX_PRIORITY;
  778. case am_trace:
  779. return ERTS_PI_IX_TRACE;
  780. case am_binary:
  781. return ERTS_PI_IX_BINARY;
  782. case am_sequential_trace_token:
  783. return ERTS_PI_IX_SEQUENTIAL_TRACE_TOKEN;
  784. case am_catchlevel:
  785. return ERTS_PI_IX_CATCHLEVEL;
  786. case am_backtrace:
  787. return ERTS_PI_IX_BACKTRACE;
  788. case am_last_calls:
  789. return ERTS_PI_IX_LAST_CALLS;
  790. case am_total_heap_size:
  791. return ERTS_PI_IX_TOTAL_HEAP_SIZE;
  792. case am_suspending:
  793. return ERTS_PI_IX_SUSPENDING;
  794. case am_min_heap_size:
  795. return ERTS_PI_IX_MIN_HEAP_SIZE;
  796. case am_min_bin_vheap_size:
  797. return ERTS_PI_IX_MIN_BIN_VHEAP_SIZE;
  798. case am_max_heap_size:
  799. return ERTS_PI_IX_MAX_HEAP_SIZE;
  800. case am_current_location:
  801. return ERTS_PI_IX_CURRENT_LOCATION;
  802. case am_current_stacktrace:
  803. return ERTS_PI_IX_CURRENT_STACKTRACE;
  804. case am_message_queue_data:
  805. return ERTS_PI_IX_MESSAGE_QUEUE_DATA;
  806. case am_garbage_collection_info:
  807. return ERTS_PI_IX_GARBAGE_COLLECTION_INFO;
  808. case am_magic_ref:
  809. return ERTS_PI_IX_MAGIC_REF;
  810. case am_fullsweep_after:
  811. return ERTS_PI_IX_FULLSWEEP_AFTER;
  812. default:
  813. return -1;
  814. }
  815. }
  816. static Eterm pi_1_keys[] = {
  817. am_registered_name,
  818. am_current_function,
  819. am_initial_call,
  820. am_status,
  821. am_message_queue_len,
  822. am_links,
  823. am_dictionary,
  824. am_trap_exit,
  825. am_error_handler,
  826. am_priority,
  827. am_group_leader,
  828. am_total_heap_size,
  829. am_heap_size,
  830. am_stack_size,
  831. am_reductions,
  832. am_garbage_collection,
  833. am_suspending
  834. };
  835. #define ERTS_PI_1_NO_OF_KEYS (sizeof(pi_1_keys)/sizeof(Eterm))
  836. static Eterm pi_1_keys_list;
  837. static Eterm pi_1_keys_list_heap[2*ERTS_PI_1_NO_OF_KEYS];
  838. static void
  839. process_info_init(void)
  840. {
  841. Eterm *hp = &pi_1_keys_list_heap[0];
  842. int i;
  843. pi_1_keys_list = NIL;
  844. for (i = ERTS_PI_1_NO_OF_KEYS-1; i >= 0; i--) {
  845. pi_1_keys_list = CONS(hp, pi_1_keys[i], pi_1_keys_list);
  846. hp += 2;
  847. }
  848. #ifdef DEBUG
  849. { /* Make sure the process_info argument mappings are consistent */
  850. int ix;
  851. for (ix = 0; ix < ERTS_PI_ARGS; ix++) {
  852. ASSERT(pi_arg2ix(pi_ix2arg(ix)) == ix);
  853. }
  854. }
  855. #endif
  856. }
  857. static BIF_RETTYPE
  858. process_info_aux(Process *c_p,
  859. ErtsHeapFactory *hfact,
  860. Process *rp,
  861. ErtsProcLocks rp_locks,
  862. int item_ix,
  863. int flags,
  864. Uint *reserve_sizep,
  865. Uint *reds);
  866. Eterm
  867. erts_process_info(Process *c_p,
  868. ErtsHeapFactory *hfact,
  869. Process *rp,
  870. ErtsProcLocks rp_locks,
  871. int *item_ix,
  872. int item_ix_len,
  873. int flags,
  874. Uint reserve_size,
  875. Uint *reds)
  876. {
  877. Eterm res;
  878. Eterm part_res[ERTS_PI_ARGS];
  879. int item_ix_ix, ix;
  880. if (ERTS_PI_FLAG_SINGELTON & flags) {
  881. ASSERT(item_ix_len == 1);
  882. res = process_info_aux(c_p, hfact, rp, rp_locks, item_ix[0],
  883. flags, &reserve_size, reds);
  884. return res;
  885. }
  886. for (ix = 0; ix < ERTS_PI_ARGS; ix++)
  887. part_res[ix] = THE_NON_VALUE;
  888. /*
  889. * We always handle 'messages' first if it should be part
  890. * of the result. This since if both 'messages' and
  891. * 'message_queue_len' are wanted, 'messages' may
  892. * change the result of 'message_queue_len' (in case
  893. * the queue contain bad distribution messages).
  894. */
  895. if (flags & ERTS_PI_FLAG_WANT_MSGS) {
  896. ix = pi_arg2ix(am_messages);
  897. ASSERT(part_res[ix] == THE_NON_VALUE);
  898. res = process_info_aux(c_p, hfact, rp, rp_locks, ix,
  899. flags, &reserve_size, reds);
  900. ASSERT(res != am_undefined);
  901. ASSERT(res != THE_NON_VALUE);
  902. part_res[ix] = res;
  903. }
  904. for (item_ix_ix = item_ix_len - 1; item_ix_ix >= 0; item_ix_ix--) {
  905. ix = item_ix[item_ix_ix];
  906. if (part_res[ix] == THE_NON_VALUE) {
  907. res = process_info_aux(c_p, hfact, rp, rp_locks, ix,
  908. flags, &reserve_size, reds);
  909. ASSERT(res != am_undefined);
  910. ASSERT(res != THE_NON_VALUE);
  911. part_res[ix] = res;
  912. }
  913. }
  914. res = NIL;
  915. for (item_ix_ix = item_ix_len - 1; item_ix_ix >= 0; item_ix_ix--) {
  916. ix = item_ix[item_ix_ix];
  917. ASSERT(part_res[ix] != THE_NON_VALUE);
  918. /*
  919. * If we should ignore the value of registered_name,
  920. * its value is nil. For more info, see comment in the
  921. * beginning of process_info_aux().
  922. */
  923. if (is_nil(part_res[ix])) {
  924. ASSERT(!(flags & ERTS_PI_FLAG_ALWAYS_WRAP));
  925. ASSERT(pi_ix2arg(ix) == am_registered_name);
  926. }
  927. else {
  928. Eterm *hp;
  929. ERTS_PI_UNRESERVE(reserve_size, 2);
  930. hp = erts_produce_heap(hfact, 2, reserve_size);
  931. res = CONS(hp, part_res[ix], res);
  932. }
  933. }
  934. return res;
  935. }
  936. static void
  937. pi_setup_grow(int **arr, int *def_arr, Uint *sz, int ix);
  938. static BIF_RETTYPE
  939. process_info_bif(Process *c_p, Eterm pid, Eterm opt, int always_wrap, int pi2)
  940. {
  941. ErtsHeapFactory hfact;
  942. int def_arr[ERTS_PI_DEF_ARR_SZ];
  943. int *item_ix = &def_arr[0];
  944. Process *rp = NULL;
  945. erts_aint32_t state;
  946. BIF_RETTYPE ret;
  947. Uint reds = 0;
  948. ErtsProcLocks locks = 0;
  949. int flags;
  950. Uint reserve_size;
  951. int len;
  952. Eterm res;
  953. ERTS_CT_ASSERT(ERTS_PI_DEF_ARR_SZ > 0);
  954. if (c_p->common.id == pid) {
  955. int local_only = c_p->flags & F_LOCAL_SIGS_ONLY;
  956. int sres, sreds, reds_left;
  957. reds_left = ERTS_BIF_REDS_LEFT(c_p);
  958. sreds = reds_left;
  959. if (!local_only) {
  960. erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ);
  961. erts_proc_sig_fetch(c_p);
  962. erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ);
  963. }
  964. sres = erts_proc_sig_handle_incoming(c_p, &state, &sreds, sreds, !0);
  965. BUMP_REDS(c_p, (int) sreds);
  966. reds_left -= sreds;
  967. if (state & ERTS_PSFLG_EXITING) {
  968. c_p->flags &= ~F_LOCAL_SIGS_ONLY;
  969. goto exited;
  970. }
  971. if (!sres | (reds_left <= 0)) {
  972. /*
  973. * More signals to handle or out of reds; need
  974. * to yield and continue. Prevent fetching of
  975. * more signals by setting local-sigs-only flag.
  976. */
  977. c_p->flags |= F_LOCAL_SIGS_ONLY;
  978. goto yield;
  979. }
  980. c_p->flags &= ~F_LOCAL_SIGS_ONLY;
  981. }
  982. if (is_atom(opt)) {
  983. int ix = pi_arg2ix(opt);
  984. item_ix[0] = ix;
  985. len = 1;
  986. locks = pi_ix2locks(ix);
  987. reserve_size = 3 + pi_ix2rsz(ix);
  988. flags = ERTS_PI_FLAG_SINGELTON;
  989. flags |= pi_ix2flags(ix);
  990. if (ix < 0)
  991. goto badarg;
  992. }
  993. else {
  994. Eterm list = opt;
  995. Uint size = ERTS_PI_DEF_ARR_SZ;
  996. len = 0;
  997. reserve_size = 0;
  998. locks = 0;
  999. flags = 0;
  1000. while (is_list(list)) {
  1001. Eterm *consp = list_val(list);
  1002. Eterm arg = CAR(consp);
  1003. int ix = pi_arg2ix(arg);
  1004. if (ix < 0)
  1005. goto badarg;
  1006. if (len >= size)
  1007. pi_setup_grow(&item_ix, def_arr, &size, len);
  1008. item_ix[len++] = ix;
  1009. locks |= pi_ix2locks(ix);
  1010. flags |= pi_ix2flags(ix);
  1011. reserve_size += pi_ix2rsz(ix);
  1012. reserve_size += 3; /* 2-tuple */
  1013. reserve_size += 2; /* cons */
  1014. list = CDR(consp);
  1015. }
  1016. if (is_not_nil(list))
  1017. goto badarg;
  1018. }
  1019. if (is_not_internal_pid(pid)) {
  1020. if (is_external_pid(pid)
  1021. && external_pid_dist_entry(pid) == erts_this_dist_entry)
  1022. goto undefined;
  1023. goto badarg;
  1024. }
  1025. if (always_wrap)
  1026. flags |= ERTS_PI_FLAG_ALWAYS_WRAP;
  1027. if (c_p->common.id == pid) {
  1028. rp = c_p;
  1029. if (locks & ~ERTS_PROC_LOCK_MAIN)
  1030. erts_proc_lock(c_p, locks & ~ERTS_PROC_LOCK_MAIN);
  1031. locks |= ERTS_PROC_LOCK_MAIN;
  1032. }
  1033. else {
  1034. if (flags & ERTS_PI_FLAG_FORCE_SIG_SEND)
  1035. goto send_signal;
  1036. state = ERTS_PSFLG_RUNNING; /* fail state... */
  1037. rp = erts_try_lock_sig_free_proc(pid, locks, &state);
  1038. if (!rp)
  1039. goto undefined;
  1040. if (rp == ERTS_PROC_LOCK_BUSY) {
  1041. rp = NULL;
  1042. goto send_signal;
  1043. }
  1044. if (state & ERTS_PSFLG_EXITING) {
  1045. if (locks)
  1046. erts_proc_unlock(rp, locks);
  1047. locks = 0;
  1048. /* wait for it to terminate properly... */
  1049. goto send_signal;
  1050. }
  1051. if (flags & ERTS_PI_FLAG_NEED_MSGQ_LEN) {
  1052. ASSERT(locks & ERTS_PROC_LOCK_MAIN);
  1053. erts_proc_lock(rp, ERTS_PROC_LOCK_MSGQ);
  1054. erts_proc_sig_fetch(rp);
  1055. if (c_p->sig_qs.cont) {
  1056. erts_proc_unlock(rp, locks|ERTS_PROC_LOCK_MSGQ);
  1057. locks = 0;
  1058. goto send_signal;
  1059. }
  1060. erts_proc_unlock(rp, ERTS_PROC_LOCK_MSGQ);
  1061. }
  1062. }
  1063. erts_factory_proc_init(&hfact, c_p);
  1064. res = erts_process_info(c_p, &hfact, rp, locks, item_ix, len,
  1065. flags, reserve_size, &reds);
  1066. erts_factory_close(&hfact);
  1067. if (reds > INT_MAX/2)
  1068. reds = INT_MAX/2;
  1069. BUMP_REDS(c_p, (int) reds);
  1070. state = erts_atomic32_read_acqb(&rp->state);
  1071. if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_FREE)) {
  1072. if (state & ERTS_PSFLG_FREE) {
  1073. ASSERT(!locks);
  1074. goto undefined;
  1075. }
  1076. if (locks)
  1077. erts_proc_unlock(rp, locks);
  1078. locks = 0;
  1079. /* wait for it to terminate properly... */
  1080. goto send_signal;
  1081. }
  1082. ERTS_BIF_PREP_RET(ret, res);
  1083. done:
  1084. if (c_p == rp)
  1085. locks &= ~ERTS_PROC_LOCK_MAIN;
  1086. if (locks && rp)
  1087. erts_proc_unlock(rp, locks);
  1088. if (item_ix != def_arr)
  1089. erts_free(ERTS_ALC_T_TMP, item_ix);
  1090. return ret;
  1091. badarg:
  1092. ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
  1093. goto done;
  1094. undefined:
  1095. ERTS_BIF_PREP_RET(ret, am_undefined);
  1096. goto done;
  1097. exited:
  1098. ERTS_BIF_PREP_EXITED(ret, c_p);
  1099. goto done;
  1100. yield:
  1101. if (pi2)
  1102. ERTS_BIF_PREP_YIELD2(ret, bif_export[BIF_process_info_2], c_p, pid, opt);
  1103. else
  1104. ERTS_BIF_PREP_YIELD1(ret, bif_export[BIF_process_info_1], c_p, pid);
  1105. goto done;
  1106. send_signal: {
  1107. Eterm ref = erts_make_ref(c_p);
  1108. int enqueued, need_msgq_len;
  1109. flags |= ERTS_PI_FLAG_REQUEST_FOR_OTHER;
  1110. need_msgq_len = (flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
  1111. /*
  1112. * Set receive mark so we wont have to scan the whole
  1113. * message queue for the result. Note caller unconditionally
  1114. * has to enter a receive only matching messages containing
  1115. * 'ref', or restore save pointer.
  1116. */
  1117. ERTS_RECV_MARK_SAVE(c_p);
  1118. ERTS_RECV_MARK_SET(c_p);
  1119. enqueued = erts_proc_sig_send_process_info_request(c_p, pid, item_ix,
  1120. len, need_msgq_len,
  1121. flags, reserve_size,
  1122. ref);
  1123. if (!enqueued) {
  1124. /* Restore save pointer... */
  1125. JOIN_MESSAGE(c_p);
  1126. goto undefined;
  1127. }
  1128. ERTS_BIF_PREP_TRAP1(ret, erts_await_result, c_p, ref);
  1129. goto done;
  1130. }
  1131. }
  1132. static void
  1133. pi_setup_grow(int **arr, int *def_arr, Uint *sz, int ix)
  1134. {
  1135. *sz = (ix+1) + ERTS_PI_DEF_ARR_SZ;
  1136. if (*arr != def_arr)
  1137. *arr = erts_realloc(ERTS_ALC_T_TMP, *arr, (*sz)*sizeof(int));
  1138. else {
  1139. int *new_arr = erts_alloc(ERTS_ALC_T_TMP, (*sz)*sizeof(int));
  1140. sys_memcpy((void *) new_arr, (void *) def_arr,
  1141. sizeof(int)*ERTS_PI_DEF_ARR_SZ);
  1142. *arr = new_arr;
  1143. }
  1144. }
  1145. BIF_RETTYPE process_info_2(BIF_ALIST_2)
  1146. {
  1147. return process_info_bif(BIF_P, BIF_ARG_1, BIF_ARG_2, !is_atom(BIF_ARG_2), !0);
  1148. }
  1149. BIF_RETTYPE process_info_1(BIF_ALIST_1)
  1150. {
  1151. return process_info_bif(BIF_P, BIF_ARG_1, pi_1_keys_list, 0, 0);
  1152. }
  1153. Eterm
  1154. process_info_aux(Process *c_p,
  1155. ErtsHeapFactory *hfact,
  1156. Process *rp,
  1157. ErtsProcLocks rp_locks,
  1158. int item_ix,
  1159. int flags,
  1160. Uint *reserve_sizep,
  1161. Uint *reds)
  1162. {
  1163. Eterm *hp;
  1164. Eterm res = NIL;
  1165. Uint reserved;
  1166. Uint reserve_size = *reserve_sizep;
  1167. #ifdef ERTS_ENABLE_LOCK_CHECK
  1168. ErtsProcLocks locks = erts_proc_lc_my_proc_locks(rp);
  1169. switch (item_ix) {
  1170. case ERTS_PI_IX_STATUS:
  1171. case ERTS_PI_IX_PRIORITY:
  1172. case ERTS_PI_IX_SUSPENDING:
  1173. ERTS_LC_ASSERT((locks & ~ERTS_PROC_LOCK_MAIN) == 0);
  1174. break;
  1175. default:
  1176. ERTS_LC_ASSERT(locks == ERTS_PROC_LOCK_MAIN);
  1177. break;
  1178. }
  1179. #endif
  1180. reserved = pi_ix2rsz(item_ix);
  1181. ERTS_PI_UNRESERVE(reserve_size, reserved);
  1182. (*reds)++;
  1183. ASSERT(rp);
  1184. /*
  1185. * Q: Why this ERTS_PI_FLAG_ALWAYS_WRAP flag?
  1186. *
  1187. * A: registered_name is strange. If process has no registered name,
  1188. * process_info(Pid, registered_name) returns [], and
  1189. * the result of process_info(Pid) has no {registered_name, Name}
  1190. * tuple in the resulting list. This is inconsistent with all other
  1191. * options, but we do not dare to change it.
  1192. *
  1193. * When process_info/2 is called with a list as second argument,
  1194. * registered_name behaves as it should, i.e. a
  1195. * {registered_name, []} will appear in the resulting list.
  1196. *
  1197. * If ERTS_PI_FLAG_ALWAYS_WRAP is set, process_info_aux() always
  1198. * wrap the result in a key two tuple.
  1199. */
  1200. switch (item_ix) {
  1201. case ERTS_PI_IX_REGISTERED_NAME:
  1202. if (rp->common.u.alive.reg)
  1203. res = rp->common.u.alive.reg->name;
  1204. else {
  1205. if (flags & ERTS_PI_FLAG_ALWAYS_WRAP)
  1206. res = NIL;
  1207. else
  1208. return NIL;
  1209. }
  1210. break;
  1211. case ERTS_PI_IX_CURRENT_FUNCTION:
  1212. res = current_function(c_p, hfact, rp, 0,
  1213. reserve_size, flags);
  1214. break;
  1215. case ERTS_PI_IX_CURRENT_LOCATION:
  1216. res = current_function(c_p, hfact, rp, 1,
  1217. reserve_size, flags);
  1218. break;
  1219. case ERTS_PI_IX_CURRENT_STACKTRACE:
  1220. res = current_stacktrace(hfact, rp, reserve_size);
  1221. break;
  1222. case ERTS_PI_IX_INITIAL_CALL:
  1223. hp = erts_produce_heap(hfact, 4, reserve_size);
  1224. res = TUPLE3(hp,
  1225. rp->u.initial.module,
  1226. rp->u.initial.function,
  1227. make_small(rp->u.initial.arity));
  1228. hp += 4;
  1229. break;
  1230. case ERTS_PI_IX_STATUS: {
  1231. erts_aint32_t state = erts_atomic32_read_nob(&rp->state);
  1232. res = erts_process_state2status(state);
  1233. if (res == am_running && (state & ERTS_PSFLG_RUNNING_SYS)) {
  1234. ASSERT(c_p == rp);
  1235. ASSERT(flags & ERTS_PI_FLAG_REQUEST_FOR_OTHER);
  1236. if (!(state & (ERTS_PSFLG_SYS_TASKS
  1237. | ERTS_PSFLG_ACTIVE
  1238. | ERTS_PSFLG_SIG_Q
  1239. | ERTS_PSFLG_SIG_IN_Q))) {
  1240. /*
  1241. * We are servicing a process-info request from
  1242. * another process. If that other process could
  1243. * have inspected our state itself, we would have
  1244. * been in the 'waiting' state.
  1245. */
  1246. res = am_waiting;
  1247. }
  1248. }
  1249. break;
  1250. }
  1251. case ERTS_PI_IX_MESSAGES: {
  1252. ASSERT(flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
  1253. if (rp->sig_qs.len == 0 || (ERTS_TRACE_FLAGS(rp) & F_SENSITIVE))
  1254. res = NIL;
  1255. else {
  1256. int info_on_self = !(flags & ERTS_PI_FLAG_REQUEST_FOR_OTHER);
  1257. ErtsMessageInfo *mip;
  1258. Sint i, len;
  1259. Uint heap_need;
  1260. mip = erts_alloc(ERTS_ALC_T_TMP,
  1261. rp->sig_qs.len*sizeof(ErtsMessageInfo));
  1262. /*
  1263. * Note that message queue may shrink when calling
  1264. * erts_proc_sig_prep_msgq_for_inspection() since it removes
  1265. * corrupt distribution messages.
  1266. */
  1267. heap_need = erts_proc_sig_prep_msgq_for_inspection(c_p, rp,
  1268. rp_locks,
  1269. info_on_self,
  1270. mip);
  1271. len = rp->sig_qs.len;
  1272. heap_need += len*2; /* Cons cells */
  1273. reserve_size += heap_need;
  1274. /* Build list of messages... */
  1275. for (i = len - 1, res = NIL; i >= 0; i--) {
  1276. Eterm msg = ERL_MESSAGE_TERM(mip[i].msgp);
  1277. Uint sz = mip[i].size;
  1278. ERTS_PI_UNRESERVE(reserve_size, sz+2);
  1279. hp = erts_produce_heap(hfact, sz+2, reserve_size);
  1280. if (sz != 0)
  1281. msg = copy_struct(msg, sz, &hp, hfact->off_heap);
  1282. res = CONS(hp, msg, res);
  1283. hp += 2;
  1284. }
  1285. *reds += (Uint) len / 4;
  1286. erts_free(ERTS_ALC_T_TMP, mip);
  1287. }
  1288. break;
  1289. }
  1290. case ERTS_PI_IX_MESSAGE_QUEUE_LEN: {
  1291. Sint len = rp->sig_qs.len;
  1292. ASSERT(flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
  1293. ASSERT(len >= 0);
  1294. if (len <= MAX_SMALL)
  1295. res = make_small(len);
  1296. else {
  1297. hp = erts_produce_heap(hfact, BIG_UINT_HEAP_SIZE, reserve_size);
  1298. res = uint_to_big((Uint) len, hp);
  1299. }
  1300. break;
  1301. }
  1302. case ERTS_PI_IX_LINKS: {
  1303. MonitorInfoCollection mic;
  1304. int i;
  1305. Eterm item;
  1306. INIT_MONITOR_INFOS(mic);
  1307. erts_link_tree_foreach(ERTS_P_LINKS(rp), collect_one_link, (void *) &mic);
  1308. reserve_size += mic.sz;
  1309. res = NIL;
  1310. for (i = 0; i < mic.mi_i; i++) {
  1311. Eterm item_src = mic.mi[i].entity.term;
  1312. Uint sz = NC_HEAP_SIZE(item_src) + 2;
  1313. ERTS_PI_UNRESERVE(reserve_size, sz);
  1314. hp = erts_produce_heap(hfact, sz, reserve_size);
  1315. item = STORE_NC(&hp, hfact->off_heap, item_src);
  1316. res = CONS(hp, item, res);
  1317. }
  1318. *reds += (Uint) mic.mi_i / 4;
  1319. DESTROY_MONITOR_INFOS(mic);
  1320. break;
  1321. }
  1322. case ERTS_PI_IX_MONITORS: {
  1323. MonitorInfoCollection mic;
  1324. int i;
  1325. INIT_MONITOR_INFOS(mic);
  1326. erts_monitor_tree_foreach(ERTS_P_MONITORS(rp),
  1327. collect_one_origin_monitor,
  1328. (void *) &mic);
  1329. reserve_size += mic.sz;
  1330. res = NIL;
  1331. for (i = 0; i < mic.mi_i; i++) {
  1332. if (mic.mi[i].named) {
  1333. /* Monitor by name.
  1334. * Build {process|port, {Name, Node}} and cons it.
  1335. */
  1336. Eterm t1, t2;
  1337. /* If pid is an atom, then it is a remote named monitor, which
  1338. has to be a process */
  1339. Eterm m_type = is_port(mic.mi[i].pid) ? am_port : am_process;
  1340. ASSERT(is_pid(mic.mi[i].pid)
  1341. || is_port(mic.mi[i].pid)
  1342. || is_atom(mic.mi[i].pid));
  1343. ERTS_PI_UNRESERVE(reserve_size, 3+3+2);
  1344. hp = erts_produce_heap(hfact, 3+3+2, reserve_size);
  1345. t1 = TUPLE2(hp, mic.mi[i].entity.term, mic.mi[i].node);
  1346. hp += 3;
  1347. t2 = TUPLE2(hp, m_type, t1);
  1348. hp += 3;
  1349. res = CONS(hp, t2, res);
  1350. }
  1351. else {
  1352. /* Build {process|port|time_offset, Pid|clock_service} and cons it. */
  1353. Eterm t;
  1354. Eterm pid;
  1355. Eterm m_type;
  1356. Eterm pid_src = mic.mi[i].entity.term;
  1357. Uint sz = is_atom(pid_src) ? 0 : NC_HEAP_SIZE(pid_src);
  1358. sz += 3 + 2;
  1359. ERTS_PI_UNRESERVE(reserve_size, sz);
  1360. hp = erts_produce_heap(hfact, sz, reserve_size);
  1361. pid = (is_atom(pid_src)
  1362. ? pid_src
  1363. : STORE_NC(&hp, hfact->off_heap, pid_src));
  1364. switch (mic.mi[i].type) {
  1365. case ERTS_MON_TYPE_PORT:
  1366. m_type = am_port;
  1367. break;
  1368. case ERTS_MON_TYPE_TIME_OFFSET:
  1369. m_type = am_time_offset;
  1370. break;
  1371. default:
  1372. m_type = am_process;
  1373. break;
  1374. }
  1375. ASSERT(is_pid(mic.mi[i].pid)
  1376. || is_port(mic.mi[i].pid));
  1377. t = TUPLE2(hp, m_type, pid);
  1378. hp += 3;
  1379. res = CONS(hp, t, res);
  1380. }
  1381. }
  1382. *reds += (Uint) mic.mi_i / 4;
  1383. DESTROY_MONITOR_INFOS(mic);
  1384. break;
  1385. }
  1386. case ERTS_PI_IX_MONITORED_BY: {
  1387. MonitorInfoCollection mic;
  1388. int i;
  1389. Eterm item;
  1390. INIT_MONITOR_INFOS(mic);
  1391. erts_monitor_list_foreach(ERTS_P_LT_MONITORS(rp),
  1392. collect_one_target_monitor,
  1393. (void *) &mic);
  1394. erts_monitor_tree_foreach(ERTS_P_MONITORS(rp),
  1395. collect_one_target_monitor,
  1396. (void *) &mic);
  1397. reserve_size += mic.sz;
  1398. res = NIL;
  1399. for (i = 0; i < mic.mi_i; ++i) {
  1400. Uint sz = 2;
  1401. if (mic.mi[i].type == ERTS_MON_TYPE_RESOURCE)
  1402. sz += erts_resource_ref_size(mic.mi[i].entity.resource);
  1403. else
  1404. sz += NC_HEAP_SIZE(mic.mi[i].entity.term);
  1405. ERTS_PI_UNRESERVE(reserve_size, sz);
  1406. hp = erts_produce_heap(hfact, sz, reserve_size);
  1407. if (mic.mi[i].type == ERTS_MON_TYPE_RESOURCE)
  1408. item = erts_bld_resource_ref(&hp,
  1409. hfact->off_heap,
  1410. mic.mi[i].entity.resource);
  1411. else
  1412. item = STORE_NC(&hp,
  1413. hfact->off_heap,
  1414. mic.mi[i].entity.term);
  1415. res = CONS(hp, item, res);
  1416. }
  1417. *reds += (Uint) mic.mi_i / 4;
  1418. DESTROY_MONITOR_INFOS(mic);
  1419. break;
  1420. }
  1421. case ERTS_PI_IX_SUSPENDING: {
  1422. ErtsSuspendMonitorInfoCollection smic;
  1423. int i;
  1424. ERTS_INIT_SUSPEND_MONITOR_INFOS(smic);
  1425. erts_monitor_tree_foreach(ERTS_P_MONITORS(rp),
  1426. collect_one_suspend_monitor,
  1427. (void *) &smic);
  1428. reserve_size += smic.sz;
  1429. res = NIL;
  1430. for (i = 0; i < smic.smi_i; i++) {
  1431. ErtsMonitorSuspend *msp;
  1432. erts_aint_t mstate;
  1433. Sint ci;
  1434. Eterm ct, active, pending, item;
  1435. Uint sz = 4 + 2;
  1436. msp = smic.smi[i];
  1437. mstate = erts_atomic_read_nob(&msp->state);
  1438. ci = (Sint) (mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK);
  1439. if (!IS_SSMALL(ci))
  1440. sz += BIG_UINT_HEAP_SIZE;
  1441. ERTS_PI_UNRESERVE(reserve_size, sz);
  1442. hp = erts_produce_heap(hfact, sz, reserve_size);
  1443. if (IS_SSMALL(ci))
  1444. ct = make_small(ci);
  1445. else {
  1446. ct = small_to_big(ci, hp);
  1447. hp += BIG_UINT_HEAP_SIZE;
  1448. }
  1449. if (mstate & ERTS_MSUSPEND_STATE_FLG_ACTIVE) {
  1450. active = ct;
  1451. pending = make_small(0);
  1452. }
  1453. else {
  1454. active = make_small(0);
  1455. pending = ct;
  1456. }
  1457. ASSERT(is_internal_pid(msp->md.origin.other.item));
  1458. item = TUPLE3(hp, msp->md.origin.other.item, active, pending);
  1459. hp += 4;
  1460. res = CONS(hp, item, res);
  1461. }
  1462. *reds += (Uint) smic.smi_i / 4;
  1463. ERTS_DESTROY_SUSPEND_MONITOR_INFOS(smic);
  1464. break;
  1465. }
  1466. case ERTS_PI_IX_DICTIONARY:
  1467. if (!rp->dictionary || (ERTS_TRACE_FLAGS(rp) & F_SENSITIVE)) {
  1468. res = NIL;
  1469. } else {
  1470. Uint num = rp->dictionary->numElements;
  1471. res = erts_dictionary_copy(hfact, rp->dictionary, reserve_size);
  1472. *reds += (Uint) num / 4;
  1473. }
  1474. break;
  1475. case ERTS_PI_IX_TRAP_EXIT:
  1476. res = (rp->flags & F_TRAP_EXIT) ? am_true : am_false;
  1477. break;
  1478. case ERTS_PI_IX_ERROR_HANDLER:
  1479. res = erts_proc_get_error_handler(rp);
  1480. break;
  1481. case ERTS_PI_IX_HEAP_SIZE: {
  1482. Uint hsz = 0;
  1483. (void) erts_bld_uint(NULL, &hsz, HEAP_SIZE(rp));
  1484. hp = erts_produce_heap(hfact, hsz, reserve_size);
  1485. res = erts_bld_uint(&hp, NULL, HEAP_SIZE(rp));
  1486. break;
  1487. }
  1488. case ERTS_PI_IX_FULLSWEEP_AFTER: {
  1489. Uint hsz = 0;
  1490. (void) erts_bld_uint(NULL, &hsz, MAX_GEN_GCS(rp));
  1491. hp = erts_produce_heap(hfact, hsz, reserve_size);
  1492. res = erts_bld_uint(&hp, NULL, MAX_GEN_GCS(rp));
  1493. break;
  1494. }
  1495. case ERTS_PI_IX_MIN_HEAP_SIZE: {
  1496. Uint hsz = 0;
  1497. (void) erts_bld_uint(NULL, &hsz, MIN_HEAP_SIZE(rp));
  1498. hp = erts_produce_heap(hfact, hsz, reserve_size);
  1499. res = erts_bld_uint(&hp, NULL, MIN_HEAP_SIZE(rp));
  1500. break;
  1501. }
  1502. case ERTS_PI_IX_MIN_BIN_VHEAP_SIZE: {
  1503. Uint hsz = 0;
  1504. (void) erts_bld_uint(NULL, &hsz, MIN_VHEAP_SIZE(rp));
  1505. hp = erts_produce_heap(hfact, hsz, reserve_size);
  1506. res = erts_bld_uint(&hp, NULL, MIN_VHEAP_SIZE(rp));
  1507. break;
  1508. }
  1509. case ERTS_PI_IX_MAX_HEAP_SIZE: {
  1510. Uint hsz = 0;
  1511. (void) erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp),
  1512. MAX_HEAP_SIZE_FLAGS_GET(rp),
  1513. NULL, &hsz);
  1514. hp = erts_produce_heap(hfact, hsz, reserve_size);
  1515. res = erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp),
  1516. MAX_HEAP_SIZE_FLAGS_GET(rp),
  1517. &hp, NULL);
  1518. break;
  1519. }
  1520. case ERTS_PI_IX_TOTAL_HEAP_SIZE: {
  1521. Uint total_heap_size;
  1522. Uint hsz = 0;
  1523. total_heap_size = rp->heap_sz;
  1524. if (rp->old_hend && rp->old_heap)
  1525. total_heap_size += rp->old_hend - rp->old_heap;
  1526. total_heap_size += rp->mbuf_sz;
  1527. if (rp->flags & F_ON_HEAP_MSGQ) {
  1528. ErtsMessage *mp;
  1529. ASSERT(flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
  1530. for (mp = rp->sig_qs.first; mp; mp = mp->next) {
  1531. ASSERT(ERTS_SIG_IS_MSG(mp));
  1532. if (mp->data.attached)
  1533. total_heap_size += erts_msg_attached_data_size(mp);
  1534. }
  1535. *reds += (Uint) rp->sig_qs.len / 4;
  1536. }
  1537. (void) erts_bld_uint(NULL, &hsz, total_heap_size);
  1538. hp = erts_produce_heap(hfact, hsz, reserve_size);
  1539. res = erts_bld_uint(&hp, NULL, total_heap_size);
  1540. break;
  1541. }
  1542. case ERTS_PI_IX_STACK_SIZE: {
  1543. Uint stack_size = STACK_START(rp) - rp->stop;
  1544. Uint hsz = 0;
  1545. (void) erts_bld_uint(NULL, &hsz, stack_size);
  1546. hp = erts_produce_heap(hfact, hsz, reserve_size);
  1547. res = erts_bld_uint(&hp, NULL, stack_size);
  1548. break;
  1549. }
  1550. case ERTS_PI_IX_MEMORY: { /* Memory consumed in bytes */
  1551. Uint hsz = 0;
  1552. Uint size = erts_process_memory(rp, 0);
  1553. (void) erts_bld_uint(NULL, &hsz, size);
  1554. hp = erts_produce_heap(hfact, hsz, reserve_size);
  1555. res = erts_bld_uint(&hp, NULL, size);
  1556. ASSERT(flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
  1557. *reds += (Uint) rp->sig_qs.len / 4;
  1558. break;
  1559. }
  1560. case ERTS_PI_IX_GARBAGE_COLLECTION: {
  1561. DECL_AM(minor_gcs);
  1562. Eterm t;
  1563. Uint map_sz = 0;
  1564. erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp), MAX_HEAP_SIZE_FLAGS_GET(rp), NULL, &map_sz);
  1565. hp = erts_produce_heap(hfact, 3+2 + 3+2 + 3+2 + 3+2 + 3+2 + map_sz, reserve_size);
  1566. t = TUPLE2(hp, AM_minor_gcs, make_small(GEN_GCS(rp))); hp += 3;
  1567. res = CONS(hp, t, NIL); hp += 2;
  1568. t = TUPLE2(hp, am_fullsweep_after, make_small(MAX_GEN_GCS(rp))); hp += 3;
  1569. res = CONS(hp, t, res); hp += 2;
  1570. t = TUPLE2(hp, am_min_heap_size, make_small(MIN_HEAP_SIZE(rp))); hp += 3;
  1571. res = CONS(hp, t, res); hp += 2;
  1572. t = TUPLE2(hp, am_min_bin_vheap_size, make_small(MIN_VHEAP_SIZE(rp))); hp += 3;
  1573. res = CONS(hp, t, res); hp += 2;
  1574. t = erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp), MAX_HEAP_SIZE_FLAGS_GET(rp), &hp, NULL);
  1575. t = TUPLE2(hp, am_max_heap_size, t); hp += 3;
  1576. res = CONS(hp, t, res); hp += 2;
  1577. break;
  1578. }
  1579. case ERTS_PI_IX_GARBAGE_COLLECTION_INFO: {
  1580. Uint sz = 0, actual_sz = 0;
  1581. erts_process_gc_info(rp, &sz, NULL, 0, 0);
  1582. hp = erts_produce_heap(hfact, sz, reserve_size);
  1583. res = erts_process_gc_info(rp, &actual_sz, &hp, 0, 0);
  1584. break;
  1585. }
  1586. case ERTS_PI_IX_GROUP_LEADER: {
  1587. int sz = NC_HEAP_SIZE(rp->group_leader);
  1588. hp = erts_produce_heap(hfact, sz, reserve_size);
  1589. res = STORE_NC(&hp, hfact->off_heap, rp->group_leader);
  1590. break;
  1591. }
  1592. case ERTS_PI_IX_REDUCTIONS: {
  1593. Uint reds = rp->reds + erts_current_reductions(c_p, rp);
  1594. Uint hsz = 0;
  1595. (void) erts_bld_uint(NULL, &hsz, reds);
  1596. hp = erts_produce_heap(hfact, hsz, reserve_size);
  1597. res = erts_bld_uint(&hp, NULL, reds);
  1598. break;
  1599. }
  1600. case ERTS_PI_IX_PRIORITY: {
  1601. erts_aint32_t state = erts_atomic32_read_nob(&rp->state);
  1602. if (ERTS_PSFLG_EXITING & state)
  1603. return am_undefined;
  1604. res = erts_get_process_priority(state);
  1605. break;
  1606. }
  1607. case ERTS_PI_IX_TRACE:
  1608. res = make_small(ERTS_TRACE_FLAGS(rp) & TRACEE_FLAGS);
  1609. break;
  1610. case ERTS_PI_IX_BINARY: {
  1611. Uint sz = 0;
  1612. (void) bld_bin_list(NULL, &sz, &MSO(rp));
  1613. hp = erts_produce_heap(hfact, sz, reserve_size);
  1614. res = bld_bin_list(&hp, NULL, &MSO(rp));
  1615. break;
  1616. }
  1617. case ERTS_PI_IX_SEQUENTIAL_TRACE_TOKEN: {
  1618. Uint sz = size_object(rp->seq_trace_token);
  1619. hp = erts_produce_heap(hfact, sz, reserve_size);
  1620. res = copy_struct(rp->seq_trace_token, sz, &hp, hfact->off_heap);
  1621. break;
  1622. }
  1623. case ERTS_PI_IX_CATCHLEVEL:
  1624. res = make_small(catchlevel(rp));
  1625. break;
  1626. case ERTS_PI_IX_BACKTRACE: {
  1627. erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0);
  1628. erts_stack_dump(ERTS_PRINT_DSBUF, (void *) dsbufp, rp);
  1629. res = erts_heap_factory_new_binary(hfact, (byte *) dsbufp->str,
  1630. dsbufp->str_len, reserve_size);
  1631. erts_destroy_tmp_dsbuf(dsbufp);
  1632. break;
  1633. }
  1634. case ERTS_PI_IX_LAST_CALLS: {
  1635. struct saved_calls *scb = ERTS_PROC_GET_SAVED_CALLS_BUF(rp);
  1636. if (!scb) {
  1637. res = am_false;
  1638. } else {
  1639. /*
  1640. * One cons cell and a 3-struct, and a 2-tuple.
  1641. * Might be less than that, if there are sends, receives or timeouts,
  1642. * so we must do a HRelease() to avoid creating holes.
  1643. */
  1644. Sint needed = scb->n*(2+4);
  1645. Eterm term, list;
  1646. int i, j;
  1647. Export *exp;
  1648. reserve_size += needed;
  1649. list = NIL;
  1650. for (i = 0; i < scb->n; i++) {
  1651. Uint sz;
  1652. j = scb->cur - i - 1;
  1653. if (j < 0)
  1654. j += scb->len;
  1655. sz = 2;
  1656. exp = scb->ct[j];
  1657. if (exp != &exp_send && exp != &exp_receive && exp != &exp_timeout)
  1658. sz += 4;
  1659. needed -= sz;
  1660. ERTS_PI_UNRESERVE(reserve_size, sz);
  1661. hp = erts_produce_heap(hfact, sz, reserve_size);
  1662. if (exp == &exp_send)
  1663. term = am_send;
  1664. else if (exp == &exp_receive)
  1665. term = am_receive;
  1666. else if (exp == &exp_timeout)
  1667. term = am_timeout;
  1668. else {
  1669. term = TUPLE3(hp,
  1670. scb->ct[j]->info.mfa.module,
  1671. scb->ct[j]->info.mfa.function,
  1672. make_small(scb->ct[j]->info.mfa.arity));
  1673. hp += 4;
  1674. }
  1675. list = CONS(hp, term, list);
  1676. }
  1677. ASSERT(needed >= 0);
  1678. if (needed > 0)
  1679. reserve_size -= needed;
  1680. res = list;
  1681. }
  1682. break;
  1683. }
  1684. case ERTS_PI_IX_MESSAGE_QUEUE_DATA:
  1685. switch (rp->flags & (F_OFF_HEAP_MSGQ|F_ON_HEAP_MSGQ)) {
  1686. case F_OFF_HEAP_MSGQ:
  1687. res = am_off_heap;
  1688. break;
  1689. case F_ON_HEAP_MSGQ:
  1690. res = am_on_heap;
  1691. break;
  1692. default:
  1693. res = am_error;
  1694. ERTS_INTERNAL_ERROR("Inconsistent message queue management state");
  1695. break;
  1696. }
  1697. break;
  1698. case ERTS_PI_IX_MAGIC_REF: {
  1699. Uint sz = 0;
  1700. (void) bld_magic_ref_bin_list(NULL, &sz, &MSO(rp));
  1701. hp = erts_produce_heap(hfact, sz, 0);
  1702. res = bld_magic_ref_bin_list(&hp, NULL, &MSO(rp));
  1703. *reds += (Uint) 10;
  1704. break;
  1705. }
  1706. default:
  1707. return THE_NON_VALUE; /* will produce badarg */
  1708. }
  1709. ERTS_PI_UNRESERVE(reserve_size, 3);
  1710. *reserve_sizep = reserve_size;
  1711. hp = erts_produce_heap(hfact, 3, reserve_size);
  1712. return TUPLE2(hp, pi_ix2arg(item_ix), res);
  1713. }
  1714. #undef MI_INC
  1715. static Eterm
  1716. current_function(Process *c_p, ErtsHeapFactory *hfact, Process* rp,
  1717. int full_info, Uint reserve_size, int flags)
  1718. {
  1719. Eterm* hp;
  1720. Eterm res;
  1721. FunctionInfo fi;
  1722. if (rp->current == NULL) {
  1723. erts_lookup_function_info(&fi, rp->i, full_info);
  1724. rp->current = fi.mfa;
  1725. } else if (full_info) {
  1726. erts_lookup_function_info(&fi, rp->i, full_info);
  1727. if (fi.mfa == NULL) {
  1728. /* Use the current function without location info */
  1729. erts_set_current_function(&fi, rp->current);
  1730. }
  1731. }
  1732. if (c_p == rp && !(flags & ERTS_PI_FLAG_REQUEST_FOR_OTHER)) {
  1733. FunctionInfo fi2;
  1734. /*
  1735. * The current function is erlang:process_info/{1,2},
  1736. * which is not the answer that the application want.
  1737. * We will use the function pointed into by rp->cp
  1738. * instead if it can be looked up.
  1739. */
  1740. erts_lookup_function_info(&fi2, rp->cp, full_info);
  1741. if (fi2.mfa) {
  1742. fi = fi2;
  1743. rp->current = fi2.mfa;
  1744. }
  1745. }
  1746. /*
  1747. * Return the result.
  1748. */
  1749. if (rp->current == NULL) {
  1750. res = am_undefined;
  1751. } else if (full_info) {
  1752. hp = erts_produce_heap(hfact, fi.needed, reserve_size);
  1753. erts_build_mfa_item(&fi, hp, am_true, &res);
  1754. } else {
  1755. hp = erts_produce_heap(hfact, 4, reserve_size);
  1756. res = TUPLE3(hp, rp->current->module,
  1757. rp->current->function,
  1758. make_small(rp->current->arity));
  1759. }
  1760. return res;
  1761. }
  1762. static Eterm
  1763. current_stacktrace(ErtsHeapFactory *hfact, Process* rp,
  1764. Uint reserve_size)
  1765. {
  1766. Uint sz;
  1767. struct StackTrace* s;
  1768. int depth;
  1769. FunctionInfo* stk;
  1770. FunctionInfo* stkp;
  1771. Uint heap_size;
  1772. int i;
  1773. Eterm* hp;
  1774. Eterm mfa;
  1775. Eterm res = NIL;
  1776. depth = erts_backtrace_depth;
  1777. sz = offsetof(struct StackTrace, trace) + sizeof(BeamInstr *)*depth;
  1778. s = (struct StackTrace *) erts_alloc(ERTS_ALC_T_TMP, sz);
  1779. s->depth = 0;
  1780. if (depth > 0 && rp->i) {
  1781. s->trace[s->depth++] = rp->i;
  1782. depth--;
  1783. }
  1784. if (depth > 0 && rp->cp != 0) {
  1785. s->trace[s->depth++] = rp->cp - 1;
  1786. depth--;
  1787. }
  1788. erts_save_stacktrace(rp, s, depth);
  1789. depth = s->depth;
  1790. stk = stkp = (FunctionInfo *) erts_alloc(ERTS_ALC_T_TMP,
  1791. depth*sizeof(FunctionInfo));
  1792. heap_size = 3;
  1793. for (i = 0; i < depth; i++) {
  1794. erts_lookup_function_info(stkp, s->trace[i], 1);
  1795. if (stkp->mfa) {
  1796. heap_size += stkp->needed + 2;
  1797. stkp++;
  1798. }
  1799. }
  1800. reserve_size += heap_size;
  1801. /*
  1802. * We intentionally produce heap in small chunks
  1803. * (for more info see process_info_aux()).
  1804. */
  1805. while (stkp > stk) {
  1806. stkp--;
  1807. sz = stkp->needed + 2;
  1808. ERTS_PI_UNRESERVE(reserve_size, sz);
  1809. hp = erts_produce_heap(hfact, sz, reserve_size);
  1810. hp = erts_build_mfa_item(stkp, hp, am_true, &mfa);
  1811. res = CONS(hp, mfa, res);
  1812. }
  1813. erts_free(ERTS_ALC_T_TMP, stk);
  1814. erts_free(ERTS_ALC_T_TMP, s);
  1815. return res;
  1816. }
  1817. /*
  1818. * This function takes care of calls to erlang:system_info/1 when the argument
  1819. * is a tuple.
  1820. */
  1821. static BIF_RETTYPE
  1822. info_1_tuple(Process* BIF_P, /* Pointer to current process. */
  1823. Eterm* tp, /* Pointer to first element in tuple */
  1824. int arity) /* Arity of tuple (untagged). */
  1825. {
  1826. Eterm ret;
  1827. Eterm sel;
  1828. sel = *tp++;
  1829. if (sel == am_memory_internal) {
  1830. switch (arity) {
  1831. case 3:
  1832. if (erts_request_alloc_info(BIF_P, tp[0], tp[1], 1, 1))
  1833. return am_true;
  1834. default:
  1835. goto badarg;
  1836. }
  1837. }
  1838. else if (sel == am_allocator_sizes) {
  1839. switch (arity) {
  1840. case 2:
  1841. ERTS_BIF_PREP_TRAP1(ret, alloc_sizes_trap, BIF_P, *tp);
  1842. return ret;
  1843. case 3:
  1844. if (erts_request_alloc_info(BIF_P, tp[0], tp[1], 1, 0))
  1845. return am_true;
  1846. default:
  1847. goto badarg;
  1848. }
  1849. }
  1850. else if (sel == am_wordsize && arity == 2) {
  1851. if (tp[0] == am_internal) {
  1852. return make_small(sizeof(Eterm));
  1853. }
  1854. if (tp[0] == am_external) {
  1855. return make_small(sizeof(UWord));
  1856. }
  1857. goto badarg;
  1858. } else if (sel == am_allocator) {
  1859. switch (arity) {
  1860. case 2:
  1861. ERTS_BIF_PREP_TRAP1(ret, alloc_info_trap, BIF_P, *tp);
  1862. return ret;
  1863. case 3:
  1864. if (erts_request_alloc_info(BIF_P, tp[0], tp[1], 0, 0))
  1865. return am_true;
  1866. default:
  1867. goto badarg;
  1868. }
  1869. } else if (ERTS_IS_ATOM_STR("internal_cpu_topology", sel) && arity == 2) {
  1870. return erts_get_cpu_topology_term(BIF_P, *tp);
  1871. } else if (ERTS_IS_ATOM_STR("cpu_topology", sel) && arity == 2) {
  1872. Eterm res = erts_get_cpu_topology_term(BIF_P, *tp);
  1873. if (res == THE_NON_VALUE)
  1874. goto badarg;
  1875. ERTS_BIF_PREP_TRAP1(ret, erts_format_cpu_topology_trap, BIF_P, res);
  1876. return ret;
  1877. #if defined(PURIFY) || defined(VALGRIND)
  1878. } else if (ERTS_IS_ATOM_STR("error_checker", sel)
  1879. #if defined(PURIFY)
  1880. || sel == am_purify
  1881. #elif defined(VALGRIND)
  1882. || ERTS_IS_ATOM_STR("valgrind", sel)
  1883. #endif
  1884. ) {
  1885. if (*tp == am_memory) {
  1886. #if defined(PURIFY)
  1887. BIF_RET(erts_make_integer(purify_new_leaks(), BIF_P));
  1888. #elif defined(VALGRIND)
  1889. # ifdef VALGRIND_DO_ADDED_LEAK_CHECK
  1890. VALGRIND_DO_ADDED_LEAK_CHECK;
  1891. # else
  1892. VALGRIND_DO_LEAK_CHECK;
  1893. # endif
  1894. BIF_RET(make_small(0));
  1895. #endif
  1896. } else if (*tp == am_fd) {
  1897. #if defined(PURIFY)
  1898. BIF_RET(erts_make_integer(purify_new_fds_inuse(), BIF_P));
  1899. #elif defined(VALGRIND)
  1900. /* Not present in valgrind... */
  1901. BIF_RET(make_small(0));
  1902. #endif
  1903. } else if (*tp == am_running) {
  1904. #if defined(PURIFY)
  1905. BIF_RET(purify_is_running() ? am_true : am_false);
  1906. #elif defined(VALGRIND)
  1907. BIF_RET(RUNNING_ON_VALGRIND ? am_true : am_false);
  1908. #endif
  1909. } else if (is_list(*tp)) {
  1910. #if defined(PURIFY)
  1911. # define ERTS_ERROR_CHECKER_PRINTF purify_printf
  1912. #elif defined(VALGRIND)
  1913. # define ERTS_ERROR_CHECKER_PRINTF VALGRIND_PRINTF
  1914. #endif
  1915. ErlDrvSizeT buf_size = 8*1024; /* Try with 8KB first */
  1916. char *buf = erts_alloc(ERTS_ALC_T_TMP, buf_size);
  1917. ErlDrvSizeT r = erts_iolist_to_buf(*tp, (char*) buf, buf_size - 1);
  1918. if (ERTS_IOLIST_TO_BUF_FAILED(r)) {
  1919. erts_free(ERTS_ALC_T_TMP, (void *) buf);
  1920. if (erts_iolist_size(*tp, &buf_size)) {
  1921. goto badarg;
  1922. }
  1923. buf_size++;
  1924. buf = erts_alloc(ERTS_ALC_T_TMP, buf_size);
  1925. r = erts_iolist_to_buf(*tp, (char*) buf, buf_size - 1);
  1926. ASSERT(r == buf_size - 1);
  1927. }
  1928. buf[buf_size - 1 - r] = '\0';
  1929. ERTS_ERROR_CHECKER_PRINTF("%s\n", buf);
  1930. erts_free(ERTS_ALC_T_TMP, (void *) buf);
  1931. BIF_RET(am_true);
  1932. #undef ERTS_ERROR_CHECKER_PRINTF
  1933. }
  1934. #endif
  1935. #ifdef QUANTIFY
  1936. } else if (sel == am_quantify) {
  1937. if (*tp == am_clear) {
  1938. quantify_clear_data();
  1939. BIF_RET(am_true);
  1940. } else if (*tp == am_start) {
  1941. quantify_start_recording_data();
  1942. BIF_RET(am_true);
  1943. } else if (*tp == am_stop) {
  1944. quantify_stop_recording_data();
  1945. BIF_RET(am_true);
  1946. } else if (*tp == am_running) {
  1947. BIF_RET(quantify_is_running() ? am_true : am_false);
  1948. }
  1949. #endif
  1950. #if defined(__GNUC__) && defined(HAVE_SOLARIS_SPARC_PERFMON)
  1951. } else if (ERTS_IS_ATOM_STR("ultrasparc_set_pcr", sel)) {
  1952. unsigned long long tmp;
  1953. int fd;
  1954. int rc;
  1955. if (arity != 2 || !is_small(*tp)) {
  1956. goto badarg;
  1957. }
  1958. tmp = signed_val(*tp);
  1959. if ((fd = open("/dev/perfmon", O_RDONLY)) == -1) {
  1960. BIF_RET(am_false);
  1961. }
  1962. rc = ioctl(fd, PERFMON_SETPCR, &tmp);
  1963. close(fd);
  1964. if (rc < 0) {
  1965. BIF_RET(am_false);
  1966. }
  1967. BIF_RET(am_true);
  1968. #endif
  1969. }
  1970. badarg:
  1971. ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
  1972. return ret;
  1973. }
  1974. #define INFO_DSBUF_INC_SZ 256
  1975. static erts_dsprintf_buf_t *
  1976. grow_info_dsbuf(erts_dsprintf_buf_t *dsbufp, size_t need)
  1977. {
  1978. size_t size;
  1979. size_t free_size = dsbufp->size - dsbufp->str_len;
  1980. ASSERT(dsbufp);
  1981. if (need <= free_size)
  1982. return dsbufp;
  1983. size = need - free_size + INFO_DSBUF_INC_SZ;
  1984. size = ((size + INFO_DSBUF_INC_SZ - 1)/INFO_DSBUF_INC_SZ)*INFO_DSBUF_INC_SZ;
  1985. size += dsbufp->size;
  1986. ASSERT(dsbufp->str_len + need <= size);
  1987. dsbufp->str = (char *) erts_realloc(ERTS_ALC_T_INFO_DSBUF,
  1988. (void *) dsbufp->str,
  1989. size);
  1990. dsbufp->size = size;
  1991. return dsbufp;
  1992. }
  1993. static erts_dsprintf_buf_t *
  1994. erts_create_info_dsbuf(Uint size)
  1995. {
  1996. Uint init_size = size ? size : INFO_DSBUF_INC_SZ;
  1997. erts_dsprintf_buf_t init = ERTS_DSPRINTF_BUF_INITER(grow_info_dsbuf);
  1998. erts_dsprintf_buf_t *dsbufp = erts_alloc(ERTS_ALC_T_INFO_DSBUF,
  1999. sizeof(erts_dsprintf_buf_t));
  2000. sys_memcpy((void *) dsbufp, (void *) &init, sizeof(erts_dsprintf_buf_t));
  2001. dsbufp->str = (char *) erts_alloc(ERTS_ALC_T_INFO_DSBUF, init_size);
  2002. dsbufp->str[0] = '\0';
  2003. dsbufp->size = init_size;
  2004. return dsbufp;
  2005. }
  2006. static void
  2007. erts_destroy_info_dsbuf(erts_dsprintf_buf_t *dsbufp)
  2008. {
  2009. if (dsbufp->str)
  2010. erts_free(ERTS_ALC_T_INFO_DSBUF, (void *) dsbufp->str);
  2011. erts_free(ERTS_ALC_T_INFO_DSBUF, (void *) dsbufp);
  2012. }
  2013. static Eterm
  2014. c_compiler_used(Eterm **hpp, Uint *szp)
  2015. {
  2016. #if defined(__GNUC__)
  2017. # if defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
  2018. # define ERTS_GNUC_VSN_NUMS 3
  2019. # elif defined(__GNUC_MINOR__)
  2020. # define ERTS_GNUC_VSN_NUMS 2
  2021. # else
  2022. # define ERTS_GNUC_VSN_NUMS 1
  2023. # endif
  2024. return erts_bld_tuple(hpp,
  2025. szp,
  2026. 2,
  2027. erts_bld_atom(hpp, szp, "gnuc"),
  2028. #if ERTS_GNUC_VSN_NUMS > 1
  2029. erts_bld_tuple(hpp,
  2030. szp,
  2031. ERTS_GNUC_VSN_NUMS,
  2032. #endif
  2033. erts_bld_uint(hpp, szp,
  2034. (Uint) __GNUC__)
  2035. #ifdef __GNUC_MINOR__
  2036. ,
  2037. erts_bld_uint(hpp, szp,
  2038. (Uint) __GNUC_MINOR__)
  2039. #ifdef __GNUC_PATCHLEVEL__
  2040. ,
  2041. erts_bld_uint(hpp, szp,
  2042. (Uint) __GNUC_PATCHLEVEL__)
  2043. #endif
  2044. #endif
  2045. #if ERTS_GNUC_VSN_NUMS > 1
  2046. )
  2047. #endif
  2048. );
  2049. #elif defined(_MSC_VER)
  2050. return erts_bld_tuple(hpp,
  2051. szp,
  2052. 2,
  2053. erts_bld_atom(hpp, szp, "msc"),
  2054. erts_bld_uint(hpp, szp, (Uint) _MSC_VER));
  2055. #else
  2056. return erts_bld_tuple(hpp,
  2057. szp,
  2058. 2,
  2059. am_undefined,
  2060. am_undefined);
  2061. #endif
  2062. }
  2063. static int is_snif_term(Eterm module_atom) {
  2064. int i;
  2065. Atom *a = atom_tab(atom_val(module_atom));
  2066. char *aname = (char *) a->name;
  2067. /* if a->name has a '.' then the bif (snif) is bogus i.e a package */
  2068. for (i = 0; i < a->len; i++) {
  2069. if (aname[i] == '.')
  2070. return 0;
  2071. }
  2072. return 1;
  2073. }
  2074. static Eterm build_snif_term(Eterm **hpp, Uint *szp, int ix, Eterm res) {
  2075. Eterm tup;
  2076. tup = erts_bld_tuple(hpp, szp, 3, bif_table[ix].module, bif_table[ix].name, make_small(bif_table[ix].arity));
  2077. res = erts_bld_cons( hpp, szp, tup, res);
  2078. return res;
  2079. }
  2080. static Eterm build_snifs_term(Eterm **hpp, Uint *szp, Eterm res) {
  2081. int i;
  2082. for (i = 0; i < BIF_SIZE; i++) {
  2083. if (is_snif_term(bif_table[i].module)) {
  2084. res = build_snif_term(hpp, szp, i, res);
  2085. }
  2086. }
  2087. return res;
  2088. }
  2089. BIF_RETTYPE system_info_1(BIF_ALIST_1)
  2090. {
  2091. Eterm res;
  2092. Eterm* hp;
  2093. Eterm val;
  2094. int i;
  2095. if (is_tuple(BIF_ARG_1)) {
  2096. Eterm* tp = tuple_val(BIF_ARG_1);
  2097. Uint arity = *tp++;
  2098. return info_1_tuple(BIF_P, tp, arityval(arity));
  2099. } else if (BIF_ARG_1 == am_scheduler_id) {
  2100. ErtsSchedulerData *esdp = erts_proc_sched_data(BIF_P);
  2101. BIF_RET(make_small(esdp->no));
  2102. } else if (BIF_ARG_1 == am_compat_rel) {
  2103. ASSERT(erts_compat_rel > 0);
  2104. BIF_RET(make_small(erts_compat_rel));
  2105. } else if (BIF_ARG_1 == am_multi_scheduling) {
  2106. {
  2107. int msb = erts_is_multi_scheduling_blocked();
  2108. BIF_RET(!msb
  2109. ? am_enabled
  2110. : (msb > 0
  2111. ? am_blocked
  2112. : am_blocked_normal));
  2113. }
  2114. } else if (BIF_ARG_1 == am_build_type) {
  2115. #if defined(DEBUG)
  2116. ERTS_DECL_AM(debug);
  2117. BIF_RET(AM_debug);
  2118. #elif defined(PURIFY)
  2119. ERTS_DECL_AM(purify);
  2120. BIF_RET(AM_purify);
  2121. #elif defined(QUANTIFY)
  2122. ERTS_DECL_AM(quantify);
  2123. BIF_RET(AM_quantify);
  2124. #elif defined(PURECOV)
  2125. ERTS_DECL_AM(purecov);
  2126. BIF_RET(AM_purecov);
  2127. #elif defined(ERTS_GCOV)
  2128. ERTS_DECL_AM(gcov);
  2129. BIF_RET(AM_gcov);
  2130. #elif defined(VALGRIND)
  2131. ERTS_DECL_AM(valgrind);
  2132. BIF_RET(AM_valgrind);
  2133. #elif defined(GPROF)
  2134. ERTS_DECL_AM(gprof);
  2135. BIF_RET(AM_gprof);
  2136. #elif defined(ERTS_ENABLE_LOCK_COUNT)
  2137. ERTS_DECL_AM(lcnt);
  2138. BIF_RET(AM_lcnt);
  2139. #elif defined(ERTS_FRMPTR)
  2140. ERTS_DECL_AM(frmptr);
  2141. BIF_RET(AM_frmptr);
  2142. #else
  2143. BIF_RET(am_opt);
  2144. #endif
  2145. BIF_RET(res);
  2146. } else if (BIF_ARG_1 == am_time_offset) {
  2147. switch (erts_time_offset_state()) {
  2148. case ERTS_TIME_OFFSET_PRELIMINARY: {
  2149. ERTS_DECL_AM(preliminary);
  2150. BIF_RET(AM_preliminary);
  2151. }
  2152. case ERTS_TIME_OFFSET_FINAL: {
  2153. ERTS_DECL_AM(final);
  2154. BIF_RET(AM_final);
  2155. }
  2156. case ERTS_TIME_OFFSET_VOLATILE: {
  2157. ERTS_DECL_AM(volatile);
  2158. BIF_RET(AM_volatile);
  2159. }
  2160. default:
  2161. ERTS_INTERNAL_ERROR("Invalid time offset state");
  2162. }
  2163. } else if (ERTS_IS_ATOM_STR("os_monotonic_time_source", BIF_ARG_1)) {
  2164. BIF_RET(erts_monotonic_time_source(BIF_P));
  2165. } else if (ERTS_IS_ATOM_STR("os_system_time_source", BIF_ARG_1)) {
  2166. BIF_RET(erts_system_time_source(BIF_P));
  2167. } else if (ERTS_IS_ATOM_STR("time_correction", BIF_ARG_1)) {
  2168. BIF_RET(erts_has_time_correction() ? am_true : am_false);
  2169. } else if (ERTS_IS_ATOM_STR("start_time", BIF_ARG_1)) {
  2170. BIF_RET(erts_get_monotonic_start_time(BIF_P));
  2171. } else if (ERTS_IS_ATOM_STR("end_time", BIF_ARG_1)) {
  2172. BIF_RET(erts_get_monotonic_end_time(BIF_P));
  2173. } else if (ERTS_IS_ATOM_STR("time_warp_mode", BIF_ARG_1)) {
  2174. switch (erts_time_warp_mode()) {
  2175. case ERTS_NO_TIME_WARP_MODE: {
  2176. ERTS_DECL_AM(no_time_warp);
  2177. BIF_RET(AM_no_time_warp);
  2178. }
  2179. case ERTS_SINGLE_TIME_WARP_MODE: {
  2180. ERTS_DECL_AM(single_time_warp);
  2181. BIF_RET(AM_single_time_warp);
  2182. }
  2183. case ERTS_MULTI_TIME_WARP_MODE: {
  2184. ERTS_DECL_AM(multi_time_warp);
  2185. BIF_RET(AM_multi_time_warp);
  2186. }
  2187. default:
  2188. ERTS_INTERNAL_ERROR("Invalid time warp mode");
  2189. }
  2190. } else if (BIF_ARG_1 == am_allocated_areas) {
  2191. res = erts_allocated_areas(NULL, NULL, BIF_P);
  2192. BIF_RET(res);
  2193. } else if (BIF_ARG_1 == am_hipe_architecture) {
  2194. #if defined(HIPE)
  2195. BIF_RET(hipe_arch_name);
  2196. #else
  2197. BIF_RET(am_undefined);
  2198. #endif
  2199. } else if (BIF_ARG_1 == am_trace_control_word) {
  2200. BIF_RET(db_get_trace_control_word(BIF_P));
  2201. } else if (ERTS_IS_ATOM_STR("ets_realloc_moves", BIF_ARG_1)) {
  2202. BIF_RET((erts_ets_realloc_always_moves) ? am_true : am_false);
  2203. } else if (ERTS_IS_ATOM_STR("ets_always_compress", BIF_ARG_1)) {
  2204. BIF_RET((erts_ets_always_compress) ? am_true : am_false);
  2205. } else if (ERTS_IS_ATOM_STR("snifs", BIF_ARG_1)) {
  2206. Uint size = 0;
  2207. Uint *szp;
  2208. szp = &size;
  2209. build_snifs_term(NULL, szp, NIL);
  2210. hp = HAlloc(BIF_P, size);
  2211. res = build_snifs_term(&hp, NULL, NIL);
  2212. BIF_RET(res);
  2213. } else if (BIF_ARG_1 == am_sequential_tracer) {
  2214. ErtsTracer seq_tracer = erts_get_system_seq_tracer();
  2215. val = erts_tracer_to_term(BIF_P, seq_tracer);
  2216. hp = HAlloc(BIF_P, 3);
  2217. res = TUPLE2(hp, am_sequential_tracer, val);
  2218. BIF_RET(res);
  2219. } else if (BIF_ARG_1 == am_garbage_collection){
  2220. Uint val = (Uint) erts_atomic32_read_nob(&erts_max_gen_gcs);
  2221. Eterm tup;
  2222. hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2 + 3+2);
  2223. tup = TUPLE2(hp, am_fullsweep_after, make_small(val)); hp += 3;
  2224. res = CONS(hp, tup, NIL); hp += 2;
  2225. tup = TUPLE2(hp, am_min_heap_size, make_small(H_MIN_SIZE)); hp += 3;
  2226. res = CONS(hp, tup, res); hp += 2;
  2227. tup = TUPLE2(hp, am_min_bin_vheap_size, make_small(BIN_VH_MIN_SIZE)); hp += 3;
  2228. res = CONS(hp, tup, res); hp += 2;
  2229. tup = TUPLE2(hp, am_max_heap_size, make_small(H_MAX_SIZE)); hp += 3;
  2230. res = CONS(hp, tup, res); hp += 2;
  2231. BIF_RET(res);
  2232. } else if (BIF_ARG_1 == am_fullsweep_after){
  2233. Uint val = (Uint) erts_atomic32_read_nob(&erts_max_gen_gcs);
  2234. hp = HAlloc(BIF_P, 3);
  2235. res = TUPLE2(hp, am_fullsweep_after, make_small(val));
  2236. BIF_RET(res);
  2237. } else if (BIF_ARG_1 == am_min_heap_size) {
  2238. hp = HAlloc(BIF_P, 3);
  2239. res = TUPLE2(hp, am_min_heap_size,make_small(H_MIN_SIZE));
  2240. BIF_RET(res);
  2241. } else if (BIF_ARG_1 == am_max_heap_size) {
  2242. Uint sz = 0;
  2243. erts_max_heap_size_map(H_MAX_SIZE, H_MAX_FLAGS, NULL, &sz);
  2244. hp = HAlloc(BIF_P, sz);
  2245. res = erts_max_heap_size_map(H_MAX_SIZE, H_MAX_FLAGS, &hp, NULL);
  2246. BIF_RET(res);
  2247. } else if (BIF_ARG_1 == am_min_bin_vheap_size) {
  2248. hp = HAlloc(BIF_P, 3);
  2249. res = TUPLE2(hp, am_min_bin_vheap_size,make_small(BIN_VH_MIN_SIZE));
  2250. BIF_RET(res);
  2251. } else if (BIF_ARG_1 == am_process_count) {
  2252. BIF_RET(make_small(erts_ptab_count(&erts_proc)));
  2253. } else if (BIF_ARG_1 == am_process_limit) {
  2254. BIF_RET(make_small(erts_ptab_max(&erts_proc)));
  2255. } else if (BIF_ARG_1 == am_port_count) {
  2256. BIF_RET(make_small(erts_ptab_count(&erts_port)));
  2257. } else if (BIF_ARG_1 == am_port_limit) {
  2258. BIF_RET(make_small(erts_ptab_max(&erts_port)));
  2259. } else if (BIF_ARG_1 == am_info
  2260. || BIF_ARG_1 == am_procs
  2261. || BIF_ARG_1 == am_loaded
  2262. || BIF_ARG_1 == am_dist) {
  2263. erts_dsprintf_buf_t *dsbufp = erts_create_info_dsbuf(0);
  2264. /* Need to be the only thread running... */
  2265. erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
  2266. BIF_P->scheduler_data->current_process = NULL;
  2267. erts_thr_progress_block();
  2268. if (BIF_ARG_1 == am_info)
  2269. info(ERTS_PRINT_DSBUF, (void *) dsbufp);
  2270. else if (BIF_ARG_1 == am_procs)
  2271. process_info(ERTS_PRINT_DSBUF, (void *) dsbufp);
  2272. else if (BIF_ARG_1 == am_loaded)
  2273. loaded(ERTS_PRINT_DSBUF, (void *) dsbufp);
  2274. else
  2275. distribution_info(ERTS_PRINT_DSBUF, (void *) dsbufp);
  2276. erts_thr_progress_unblock();
  2277. erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
  2278. BIF_P->scheduler_data->current_process = BIF_P;
  2279. ASSERT(dsbufp && dsbufp->str);
  2280. res = new_binary(BIF_P, (byte *) dsbufp->str, dsbufp->str_len);
  2281. erts_destroy_info_dsbuf(dsbufp);
  2282. BIF_RET(res);
  2283. } else if (ERTS_IS_ATOM_STR("dist_ctrl", BIF_ARG_1)) {
  2284. DistEntry *dep;
  2285. i = 0;
  2286. erts_rwmtx_rlock(&erts_dist_table_rwmtx);
  2287. for (dep = erts_visible_dist_entries; dep; dep = dep->next)
  2288. ++i;
  2289. for (dep = erts_hidden_dist_entries; dep; dep = dep->next)
  2290. ++i;
  2291. hp = HAlloc(BIF_P,i*(3+2));
  2292. res = NIL;
  2293. for (dep = erts_hidden_dist_entries; dep; dep = dep->next) {
  2294. Eterm tpl;
  2295. ASSERT(is_immed(dep->cid));
  2296. tpl = TUPLE2(hp, dep->sysname, dep->cid);
  2297. hp +=3;
  2298. res = CONS(hp, tpl, res);
  2299. hp += 2;
  2300. }
  2301. for (dep = erts_visible_dist_entries; dep; dep = dep->next) {
  2302. Eterm tpl;
  2303. ASSERT(is_immed(dep->cid));
  2304. tpl = TUPLE2(hp, dep->sysname, dep->cid);
  2305. hp +=3;
  2306. res = CONS(hp, tpl, res);
  2307. hp += 2;
  2308. }
  2309. erts_rwmtx_runlock(&erts_dist_table_rwmtx);
  2310. BIF_RET(res);
  2311. } else if (BIF_ARG_1 == am_system_version) {
  2312. erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0);
  2313. erts_print_system_version(ERTS_PRINT_DSBUF, (void *) dsbufp, BIF_P);
  2314. hp = HAlloc(BIF_P, dsbufp->str_len*2);
  2315. res = buf_to_intlist(&hp, dsbufp->str, dsbufp->str_len, NIL);
  2316. erts_destroy_tmp_dsbuf(dsbufp);
  2317. BIF_RET(res);
  2318. } else if (BIF_ARG_1 == am_system_architecture) {
  2319. hp = HAlloc(BIF_P, 2*(sizeof(ERLANG_ARCHITECTURE)-1));
  2320. BIF_RET(buf_to_intlist(&hp,
  2321. ERLANG_ARCHITECTURE,
  2322. sizeof(ERLANG_ARCHITECTURE)-1,
  2323. NIL));
  2324. }
  2325. else if (BIF_ARG_1 == am_os_type) {
  2326. BIF_RET(os_type_tuple);
  2327. }
  2328. else if (BIF_ARG_1 == am_allocator) {
  2329. BIF_RET(erts_allocator_options((void *) BIF_P));
  2330. }
  2331. else if (BIF_ARG_1 == am_thread_pool_size) {
  2332. extern int erts_async_max_threads;
  2333. int n;
  2334. n = erts_async_max_threads;
  2335. BIF_RET(make_small(n));
  2336. }
  2337. else if (BIF_ARG_1 == am_alloc_util_allocators) {
  2338. BIF_RET(erts_alloc_util_allocators((void *) BIF_P));
  2339. }
  2340. else if (BIF_ARG_1 == am_elib_malloc) {
  2341. /* To be removed in R15 */
  2342. BIF_RET(am_false);
  2343. }
  2344. else if (BIF_ARG_1 == am_os_version) {
  2345. BIF_RET(os_version_tuple);
  2346. }
  2347. else if (BIF_ARG_1 == am_version) {
  2348. int n = sys_strlen(ERLANG_VERSION);
  2349. hp = HAlloc(BIF_P, ((sizeof ERLANG_VERSION)-1) * 2);
  2350. BIF_RET(buf_to_intlist(&hp, ERLANG_VERSION, n, NIL));
  2351. }
  2352. else if (BIF_ARG_1 == am_machine) {
  2353. int n = sys_strlen(EMULATOR);
  2354. hp = HAlloc(BIF_P, n*2);
  2355. BIF_RET(buf_to_intlist(&hp, EMULATOR, n, NIL));
  2356. }
  2357. else if (BIF_ARG_1 == am_garbage_collection) {
  2358. BIF_RET(am_generational);
  2359. #ifdef ERTS_OPCODE_COUNTER_SUPPORT
  2360. } else if (BIF_ARG_1 == am_instruction_counts) {
  2361. #ifdef DEBUG
  2362. Eterm *endp;
  2363. #endif
  2364. Eterm *hp, **hpp;
  2365. Uint hsz, *hszp;
  2366. int i;
  2367. hpp = NULL;
  2368. hsz = 0;
  2369. hszp = &hsz;
  2370. bld_instruction_counts:
  2371. res = NIL;
  2372. for (i = num_instructions-1; i >= 0; i--) {
  2373. res = erts_bld_cons(hpp, hszp,
  2374. erts_bld_tuple(hpp, hszp, 2,
  2375. erts_atom_put((byte *)opc[i].name,
  2376. sys_strlen(opc[i].name),
  2377. ERTS_ATOM_ENC_LATIN1,
  2378. 1),
  2379. erts_bld_uint(hpp, hszp,
  2380. erts_instr_count[i])),
  2381. res);
  2382. }
  2383. if (!hpp) {
  2384. hp = HAlloc(BIF_P, hsz);
  2385. hpp = &hp;
  2386. #ifdef DEBUG
  2387. endp = hp + hsz;
  2388. #endif
  2389. hszp = NULL;
  2390. goto bld_instruction_counts;
  2391. }
  2392. ASSERT(endp == hp);
  2393. BIF_RET(res);
  2394. #endif /* #ifndef ERTS_OPCODE_COUNTER_SUPPORT */
  2395. } else if (BIF_ARG_1 == am_wordsize) {
  2396. return make_small(sizeof(Eterm));
  2397. } else if (BIF_ARG_1 == am_endian) {
  2398. #if defined(WORDS_BIGENDIAN)
  2399. return am_big;
  2400. #else
  2401. return am_little;
  2402. #endif
  2403. } else if (BIF_ARG_1 == am_heap_sizes) {
  2404. return erts_heap_sizes(BIF_P);
  2405. } else if (BIF_ARG_1 == am_heap_type) {
  2406. return am_private;
  2407. } else if (ERTS_IS_ATOM_STR("cpu_topology", BIF_ARG_1)) {
  2408. res = erts_get_cpu_topology_term(BIF_P, am_used);
  2409. BIF_TRAP1(erts_format_cpu_topology_trap, BIF_P, res);
  2410. } else if (ERTS_IS_ATOM_STR("update_cpu_info", BIF_ARG_1)) {
  2411. if (erts_update_cpu_info()) {
  2412. ERTS_DECL_AM(changed);
  2413. BIF_RET(AM_changed);
  2414. }
  2415. else {
  2416. ERTS_DECL_AM(unchanged);
  2417. BIF_RET(AM_unchanged);
  2418. }
  2419. #if defined(__GNUC__) && defined(HAVE_SOLARIS_SPARC_PERFMON)
  2420. } else if (ERTS_IS_ATOM_STR("ultrasparc_read_tick1", BIF_ARG_1)) {
  2421. register unsigned high asm("%l0");
  2422. register unsigned low asm("%l1");
  2423. hp = HAlloc(BIF_P, 5);
  2424. asm volatile (".word 0xa3410000;" /* rd %tick, %l1 */
  2425. ".word 0xa1347020" /* srlx %l1, 0x20, %l0 */
  2426. : "=r" (high), "=r" (low));
  2427. res = TUPLE4(hp, make_small(high >> 16),
  2428. make_small(high & 0xFFFF),
  2429. make_small(low >> 16),
  2430. make_small(low & 0xFFFF));
  2431. BIF_RET(res);
  2432. } else if (ERTS_IS_ATOM_STR("ultrasparc_read_tick2", BIF_ARG_1)) {
  2433. register unsigned high asm("%l0");
  2434. register unsigned low asm("%l1");
  2435. asm volatile (".word 0xa3410000;" /* rd %tick, %l1 */
  2436. ".word 0xa1347020" /* srlx %l1, 0x20, %l0 */
  2437. : "=r" (high), "=r" (low));
  2438. hp = HAlloc(BIF_P, 5);
  2439. res = TUPLE4(hp, make_small(high >> 16),
  2440. make_small(high & 0xFFFF),
  2441. make_small(low >> 16),
  2442. make_small(low & 0xFFFF));
  2443. BIF_RET(res);
  2444. } else if (ERTS_IS_ATOM_STR("ultrasparc_read_pic1", BIF_ARG_1)) {
  2445. register unsigned high asm("%l0");
  2446. register unsigned low asm("%l1");
  2447. hp = HAlloc(BIF_P, 5);
  2448. asm volatile (".word 0xa3444000;" /* rd %asr17, %l1 */
  2449. ".word 0xa1347020" /* srlx %l1, 0x20, %l0 */
  2450. : "=r" (high), "=r" (low));
  2451. res = TUPLE4(hp, make_small(high >> 16),
  2452. make_small(high & 0xFFFF),
  2453. make_small(low >> 16),
  2454. make_small(low & 0xFFFF));
  2455. BIF_RET(res);
  2456. } else if (ERTS_IS_ATOM_STR("ultrasparc_read_pic2", BIF_ARG_1)) {
  2457. register unsigned high asm("%l0");
  2458. register unsigned low asm("%l1");
  2459. asm volatile (".word 0xa3444000;" /* rd %asr17, %l1 */
  2460. ".word 0xa1347020" /* srlx %l1, 0x20, %l0 */
  2461. : "=r" (high), "=r" (low));
  2462. hp = HAlloc(BIF_P, 5);
  2463. res = TUPLE4(hp, make_small(high >> 16),
  2464. make_small(high & 0xFFFF),
  2465. make_small(low >> 16),
  2466. make_small(low & 0xFFFF));
  2467. BIF_RET(res);
  2468. #endif
  2469. } else if (BIF_ARG_1 == am_threads) {
  2470. return am_true;
  2471. } else if (BIF_ARG_1 == am_creation) {
  2472. return make_small(erts_this_node->creation);
  2473. } else if (BIF_ARG_1 == am_break_ignored) {
  2474. extern int ignore_break;
  2475. if (ignore_break)
  2476. return am_true;
  2477. else
  2478. return am_false;
  2479. }
  2480. /* Arguments that are unusual follow ... */
  2481. else if (ERTS_IS_ATOM_STR("logical_processors", BIF_ARG_1)) {
  2482. int no;
  2483. erts_get_logical_processors(&no, NULL, NULL);
  2484. if (no > 0)
  2485. BIF_RET(make_small((Uint) no));
  2486. else {
  2487. DECL_AM(unknown);
  2488. BIF_RET(AM_unknown);
  2489. }
  2490. }
  2491. else if (ERTS_IS_ATOM_STR("logical_processors_online", BIF_ARG_1)) {
  2492. int no;
  2493. erts_get_logical_processors(NULL, &no, NULL);
  2494. if (no > 0)
  2495. BIF_RET(make_small((Uint) no));
  2496. else {
  2497. DECL_AM(unknown);
  2498. BIF_RET(AM_unknown);
  2499. }
  2500. }
  2501. else if (ERTS_IS_ATOM_STR("logical_processors_available", BIF_ARG_1)) {
  2502. int no;
  2503. erts_get_logical_processors(NULL, NULL, &no);
  2504. if (no > 0)
  2505. BIF_RET(make_small((Uint) no));
  2506. else {
  2507. DECL_AM(unknown);
  2508. BIF_RET(AM_unknown);
  2509. }
  2510. } else if (ERTS_IS_ATOM_STR("otp_release", BIF_ARG_1)) {
  2511. int n = sizeof(ERLANG_OTP_RELEASE)-1;
  2512. hp = HAlloc(BIF_P, 2*n);
  2513. BIF_RET(buf_to_intlist(&hp, ERLANG_OTP_RELEASE, n, NIL));
  2514. } else if (ERTS_IS_ATOM_STR("driver_version", BIF_ARG_1)) {
  2515. char buf[42];
  2516. int n = erts_snprintf(buf, 42, "%d.%d",
  2517. ERL_DRV_EXTENDED_MAJOR_VERSION,
  2518. ERL_DRV_EXTENDED_MINOR_VERSION);
  2519. hp = HAlloc(BIF_P, 2*n);
  2520. BIF_RET(buf_to_intlist(&hp, buf, n, NIL));
  2521. } else if (ERTS_IS_ATOM_STR("nif_version", BIF_ARG_1)) {
  2522. char buf[42];
  2523. int n = erts_snprintf(buf, 42, "%d.%d",
  2524. ERL_NIF_MAJOR_VERSION,
  2525. ERL_NIF_MINOR_VERSION);
  2526. hp = HAlloc(BIF_P, 2*n);
  2527. BIF_RET(buf_to_intlist(&hp, buf, n, NIL));
  2528. } else if (ERTS_IS_ATOM_STR("smp_support", BIF_ARG_1)) {
  2529. BIF_RET(am_true);
  2530. } else if (ERTS_IS_ATOM_STR("scheduler_bind_type", BIF_ARG_1)) {
  2531. BIF_RET(erts_bound_schedulers_term(BIF_P));
  2532. } else if (ERTS_IS_ATOM_STR("scheduler_bindings", BIF_ARG_1)) {
  2533. BIF_RET(erts_get_schedulers_binds(BIF_P));
  2534. } else if (ERTS_IS_ATOM_STR("constant_pool_support", BIF_ARG_1)) {
  2535. BIF_RET(am_true);
  2536. } else if (ERTS_IS_ATOM_STR("schedulers", BIF_ARG_1)
  2537. || ERTS_IS_ATOM_STR("schedulers_total", BIF_ARG_1)) {
  2538. res = make_small(erts_no_schedulers);
  2539. BIF_RET(res);
  2540. } else if (ERTS_IS_ATOM_STR("schedulers_state", BIF_ARG_1)) {
  2541. Eterm *hp;
  2542. Uint total, online, active;
  2543. erts_schedulers_state(&total, &online, &active,
  2544. NULL, NULL, NULL, NULL, NULL);
  2545. hp = HAlloc(BIF_P, 4);
  2546. res = TUPLE3(hp,
  2547. make_small(total),
  2548. make_small(online),
  2549. make_small(active));
  2550. BIF_RET(res);
  2551. } else if (ERTS_IS_ATOM_STR("schedulers_state", BIF_ARG_1)) {
  2552. Eterm *hp;
  2553. Uint total, online, active;
  2554. erts_schedulers_state(&total, &online, &active,
  2555. NULL, NULL, NULL, NULL, NULL);
  2556. hp = HAlloc(BIF_P, 4);
  2557. res = TUPLE3(hp,
  2558. make_small(total),
  2559. make_small(online),
  2560. make_small(active));
  2561. BIF_RET(res);
  2562. } else if (ERTS_IS_ATOM_STR("all_schedulers_state", BIF_ARG_1)) {
  2563. Eterm *hp, tpl;
  2564. Uint sz, total, online, active,
  2565. dirty_cpu_total, dirty_cpu_online, dirty_cpu_active,
  2566. dirty_io_total, dirty_io_active;
  2567. erts_schedulers_state(&total, &online, &active,
  2568. &dirty_cpu_total, &dirty_cpu_online, &dirty_cpu_active,
  2569. &dirty_io_total, &dirty_io_active);
  2570. sz = 2+5;
  2571. if (dirty_cpu_total)
  2572. sz += 2+5;
  2573. if (dirty_io_total)
  2574. sz += 2+5;
  2575. hp = HAlloc(BIF_P, sz);
  2576. res = NIL;
  2577. if (dirty_io_total) {
  2578. tpl = TUPLE4(hp,
  2579. am_dirty_io,
  2580. make_small(dirty_io_total),
  2581. make_small(dirty_io_total),
  2582. make_small(dirty_io_active));
  2583. hp += 5;
  2584. res = CONS(hp, tpl, res);
  2585. hp += 2;
  2586. }
  2587. if (dirty_cpu_total) {
  2588. tpl = TUPLE4(hp,
  2589. am_dirty_cpu,
  2590. make_small(dirty_cpu_total),
  2591. make_small(dirty_cpu_online),
  2592. make_small(dirty_cpu_active));
  2593. hp += 5;
  2594. res = CONS(hp, tpl, res);
  2595. hp += 2;
  2596. }
  2597. tpl = TUPLE4(hp,
  2598. am_normal,
  2599. make_small(total),
  2600. make_small(online),
  2601. make_small(active));
  2602. hp += 5;
  2603. res = CONS(hp, tpl, res);
  2604. BIF_RET(res);
  2605. } else if (ERTS_IS_ATOM_STR("schedulers_online", BIF_ARG_1)) {
  2606. Uint online;
  2607. erts_schedulers_state(NULL, &online, NULL, NULL, NULL, NULL, NULL, NULL);
  2608. BIF_RET(make_small(online));
  2609. } else if (ERTS_IS_ATOM_STR("schedulers_active", BIF_ARG_1)) {
  2610. Uint active;
  2611. erts_schedulers_state(NULL, NULL, &active, NULL, NULL, NULL, NULL, NULL);
  2612. BIF_RET(make_small(active));
  2613. } else if (ERTS_IS_ATOM_STR("dirty_cpu_schedulers", BIF_ARG_1)) {
  2614. Uint dirty_cpu;
  2615. erts_schedulers_state(NULL, NULL, NULL, &dirty_cpu, NULL, NULL, NULL, NULL);
  2616. BIF_RET(make_small(dirty_cpu));
  2617. } else if (ERTS_IS_ATOM_STR("dirty_cpu_schedulers_online", BIF_ARG_1)) {
  2618. Uint dirty_cpu_onln;
  2619. erts_schedulers_state(NULL, NULL, NULL, NULL, &dirty_cpu_onln, NULL, NULL, NULL);
  2620. BIF_RET(make_small(dirty_cpu_onln));
  2621. } else if (ERTS_IS_ATOM_STR("dirty_io_schedulers", BIF_ARG_1)) {
  2622. Uint dirty_io;
  2623. erts_schedulers_state(NULL, NULL, NULL, NULL, NULL, NULL, &dirty_io, NULL);
  2624. BIF_RET(make_small(dirty_io));
  2625. } else if (ERTS_IS_ATOM_STR("run_queues", BIF_ARG_1)) {
  2626. res = make_small(erts_no_run_queues);
  2627. BIF_RET(res);
  2628. } else if (ERTS_IS_ATOM_STR("port_parallelism", BIF_ARG_1)) {
  2629. res = erts_port_parallelism ? am_true : am_false;
  2630. BIF_RET(res);
  2631. } else if (ERTS_IS_ATOM_STR("c_compiler_used", BIF_ARG_1)) {
  2632. Eterm *hp = NULL;
  2633. Uint sz = 0;
  2634. (void) c_compiler_used(NULL, &sz);
  2635. if (sz)
  2636. hp = HAlloc(BIF_P, sz);
  2637. BIF_RET(c_compiler_used(&hp, NULL));
  2638. } else if (ERTS_IS_ATOM_STR("stop_memory_trace", BIF_ARG_1)) {
  2639. erts_mtrace_stop();
  2640. BIF_RET(am_true);
  2641. } else if (ERTS_IS_ATOM_STR("context_reductions", BIF_ARG_1)) {
  2642. BIF_RET(make_small(CONTEXT_REDS));
  2643. } else if (ERTS_IS_ATOM_STR("kernel_poll", BIF_ARG_1)) {
  2644. #if ERTS_ENABLE_KERNEL_POLL
  2645. BIF_RET(am_true);
  2646. #else
  2647. BIF_RET(am_false);
  2648. #endif
  2649. } else if (ERTS_IS_ATOM_STR("lock_checking", BIF_ARG_1)) {
  2650. #ifdef ERTS_ENABLE_LOCK_CHECK
  2651. BIF_RET(am_true);
  2652. #else
  2653. BIF_RET(am_false);
  2654. #endif
  2655. } else if (ERTS_IS_ATOM_STR("lock_counting", BIF_ARG_1)) {
  2656. #ifdef ERTS_ENABLE_LOCK_COUNT
  2657. BIF_RET(am_true);
  2658. #else
  2659. BIF_RET(am_false);
  2660. #endif
  2661. } else if (ERTS_IS_ATOM_STR("debug_compiled", BIF_ARG_1)) {
  2662. #ifdef DEBUG
  2663. BIF_RET(am_true);
  2664. #else
  2665. BIF_RET(am_false);
  2666. #endif
  2667. } else if (ERTS_IS_ATOM_STR("check_io", BIF_ARG_1)) {
  2668. BIF_RET(erts_check_io_info(BIF_P));
  2669. } else if (ERTS_IS_ATOM_STR("multi_scheduling_blockers", BIF_ARG_1)) {
  2670. if (erts_no_schedulers == 1)
  2671. BIF_RET(NIL);
  2672. else
  2673. BIF_RET(erts_multi_scheduling_blockers(BIF_P, 0));
  2674. } else if (ERTS_IS_ATOM_STR("normal_multi_scheduling_blockers", BIF_ARG_1)) {
  2675. if (erts_no_schedulers == 1)
  2676. BIF_RET(NIL);
  2677. else
  2678. BIF_RET(erts_multi_scheduling_blockers(BIF_P, 1));
  2679. } else if (ERTS_IS_ATOM_STR("modified_timing_level", BIF_ARG_1)) {
  2680. BIF_RET(ERTS_USE_MODIFIED_TIMING()
  2681. ? make_small(erts_modified_timing_level)
  2682. : am_undefined);
  2683. } else if (ERTS_IS_ATOM_STR("port_tasks", BIF_ARG_1)) {
  2684. BIF_RET(am_true);
  2685. } else if (ERTS_IS_ATOM_STR("io_thread", BIF_ARG_1)) {
  2686. BIF_RET(am_false);
  2687. } else if (ERTS_IS_ATOM_STR("scheduling_statistics", BIF_ARG_1)) {
  2688. BIF_RET(erts_sched_stat_term(BIF_P, 0));
  2689. } else if (ERTS_IS_ATOM_STR("total_scheduling_statistics", BIF_ARG_1)) {
  2690. BIF_RET(erts_sched_stat_term(BIF_P, 1));
  2691. } else if (ERTS_IS_ATOM_STR("taints", BIF_ARG_1)) {
  2692. BIF_RET(erts_nif_taints(BIF_P));
  2693. } else if (ERTS_IS_ATOM_STR("reader_groups_map", BIF_ARG_1)) {
  2694. BIF_RET(erts_get_reader_groups_map(BIF_P));
  2695. } else if (ERTS_IS_ATOM_STR("decentralized_counter_groups_map", BIF_ARG_1)) {
  2696. BIF_RET(erts_get_decentralized_counter_groups_map(BIF_P));
  2697. } else if (ERTS_IS_ATOM_STR("dist_buf_busy_limit", BIF_ARG_1)) {
  2698. Uint hsz = 0;
  2699. (void) erts_bld_uint(NULL, &hsz, erts_dist_buf_busy_limit);
  2700. hp = hsz ? HAlloc(BIF_P, hsz) : NULL;
  2701. res = erts_bld_uint(&hp, NULL, erts_dist_buf_busy_limit);
  2702. BIF_RET(res);
  2703. } else if (ERTS_IS_ATOM_STR("delayed_node_table_gc", BIF_ARG_1)) {
  2704. Uint hsz = 0;
  2705. Uint dntgc = erts_delayed_node_table_gc();
  2706. if (dntgc == ERTS_NODE_TAB_DELAY_GC_INFINITY)
  2707. BIF_RET(am_infinity);
  2708. (void) erts_bld_uint(NULL, &hsz, dntgc);
  2709. hp = hsz ? HAlloc(BIF_P, hsz) : NULL;
  2710. res = erts_bld_uint(&hp, NULL, dntgc);
  2711. BIF_RET(res);
  2712. } else if (ERTS_IS_ATOM_STR("ethread_info", BIF_ARG_1)) {
  2713. BIF_RET(erts_get_ethread_info(BIF_P));
  2714. }
  2715. else if (ERTS_IS_ATOM_STR("emu_args", BIF_ARG_1)) {
  2716. BIF_RET(erts_get_emu_args(BIF_P));
  2717. }
  2718. else if (ERTS_IS_ATOM_STR("beam_jump_table", BIF_ARG_1)) {
  2719. BIF_RET(erts_beam_jump_table() ? am_true : am_false);
  2720. }
  2721. else if (ERTS_IS_ATOM_STR("dynamic_trace", BIF_ARG_1)) {
  2722. #if defined(USE_DTRACE)
  2723. DECL_AM(dtrace);
  2724. BIF_RET(AM_dtrace);
  2725. #elif defined(USE_SYSTEMTAP)
  2726. DECL_AM(systemtap);
  2727. BIF_RET(AM_systemtap);
  2728. #elif defined(USE_LTTNG)
  2729. DECL_AM(lttng);
  2730. BIF_RET(AM_lttng);
  2731. #else
  2732. BIF_RET(am_none);
  2733. #endif
  2734. }
  2735. else if (ERTS_IS_ATOM_STR("dynamic_trace_probes", BIF_ARG_1)) {
  2736. #if defined(USE_VM_PROBES)
  2737. BIF_RET(am_true);
  2738. #else
  2739. BIF_RET(am_false);
  2740. #endif
  2741. }
  2742. else if (ERTS_IS_ATOM_STR("thread_progress", BIF_ARG_1)) {
  2743. erts_thr_progress_dbg_print_state();
  2744. BIF_RET(am_true);
  2745. }
  2746. else if (BIF_ARG_1 == am_message_queue_data) {
  2747. switch (erts_default_spo_flags & (SPO_ON_HEAP_MSGQ|SPO_OFF_HEAP_MSGQ)) {
  2748. case SPO_OFF_HEAP_MSGQ:
  2749. BIF_RET(am_off_heap);
  2750. case SPO_ON_HEAP_MSGQ:
  2751. BIF_RET(am_on_heap);
  2752. default:
  2753. ERTS_INTERNAL_ERROR("Inconsistent message queue management state");
  2754. BIF_RET(am_error);
  2755. }
  2756. }
  2757. else if (ERTS_IS_ATOM_STR("compile_info",BIF_ARG_1)) {
  2758. Uint sz;
  2759. Eterm res = NIL, tup, text;
  2760. Eterm *hp = HAlloc(BIF_P, 3*(2 + 3) + /* three 2-tuples and three cons */
  2761. 2*(sys_strlen(erts_build_flags_CONFIG_H) +
  2762. sys_strlen(erts_build_flags_CFLAGS) +
  2763. sys_strlen(erts_build_flags_LDFLAGS)));
  2764. sz = sys_strlen(erts_build_flags_CONFIG_H);
  2765. text = buf_to_intlist(&hp, erts_build_flags_CONFIG_H, sz, NIL);
  2766. tup = TUPLE2(hp, am_config_h, text); hp += 3;
  2767. res = CONS(hp, tup, res); hp += 2;
  2768. sz = sys_strlen(erts_build_flags_CFLAGS);
  2769. text = buf_to_intlist(&hp, erts_build_flags_CFLAGS, sz, NIL);
  2770. tup = TUPLE2(hp, am_cflags, text); hp += 3;
  2771. res = CONS(hp, tup, res); hp += 2;
  2772. sz = sys_strlen(erts_build_flags_LDFLAGS);
  2773. text = buf_to_intlist(&hp, erts_build_flags_LDFLAGS, sz, NIL);
  2774. tup = TUPLE2(hp, am_ldflags, text); hp += 3;
  2775. res = CONS(hp, tup, res); hp += 2;
  2776. BIF_RET(res);
  2777. }
  2778. else if (ERTS_IS_ATOM_STR("ets_limit",BIF_ARG_1)) {
  2779. BIF_RET(make_small(erts_db_get_max_tabs()));
  2780. }
  2781. else if (ERTS_IS_ATOM_STR("ets_count",BIF_ARG_1)) {
  2782. BIF_RET(make_small(erts_ets_table_count()));
  2783. }
  2784. else if (ERTS_IS_ATOM_STR("atom_limit",BIF_ARG_1)) {
  2785. BIF_RET(make_small(erts_get_atom_limit()));
  2786. }
  2787. else if (ERTS_IS_ATOM_STR("atom_count",BIF_ARG_1)) {
  2788. BIF_RET(make_small(atom_table_size()));
  2789. }
  2790. else if (ERTS_IS_ATOM_STR("tolerant_timeofday",BIF_ARG_1)) {
  2791. if (erts_has_time_correction()
  2792. && erts_time_offset_state() == ERTS_TIME_OFFSET_FINAL) {
  2793. BIF_RET(am_enabled);
  2794. }
  2795. BIF_RET(am_disabled);
  2796. }
  2797. else if (ERTS_IS_ATOM_STR("eager_check_io",BIF_ARG_1)) {
  2798. BIF_RET(am_true);
  2799. }
  2800. else if (ERTS_IS_ATOM_STR("literal_test",BIF_ARG_1)) {
  2801. #ifdef ERTS_HAVE_IS_IN_LITERAL_RANGE
  2802. #ifdef ARCH_64
  2803. DECL_AM(range);
  2804. BIF_RET(AM_range);
  2805. #else /* ARCH_32 */
  2806. DECL_AM(range_bitmask);
  2807. BIF_RET(AM_range_bitmask);
  2808. #endif /* ARCH_32 */
  2809. #else /* ! ERTS_HAVE_IS_IN_LITERAL_RANGE */
  2810. DECL_AM(tag);
  2811. BIF_RET(AM_tag);
  2812. #endif
  2813. } else if (ERTS_IS_ATOM_STR("system_logger", BIF_ARG_1)) {
  2814. BIF_RET(erts_get_system_logger());
  2815. }
  2816. BIF_ERROR(BIF_P, BADARG);
  2817. }
  2818. static int monitor_size(ErtsMonitor *mon, void *vsz, Sint reds)
  2819. {
  2820. *((Uint *) vsz) = erts_monitor_size(mon);
  2821. return 1;
  2822. }
  2823. static int link_size(ErtsMonitor *lnk, void *vsz, Sint reds)
  2824. {
  2825. *((Uint *) vsz) = erts_link_size(lnk);
  2826. return 1;
  2827. }
  2828. /**********************************************************************/
  2829. /* Return information on ports */
  2830. /* Info:
  2831. ** id Port index
  2832. ** connected (Pid)
  2833. ** links List of pids
  2834. ** name String
  2835. ** input Number of bytes input from port program
  2836. ** output Number of bytes output to the port program
  2837. ** os_pid The child's process ID
  2838. */
  2839. Eterm
  2840. erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
  2841. Eterm item)
  2842. {
  2843. Eterm res = THE_NON_VALUE;
  2844. ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
  2845. if (item == am_id) {
  2846. if (hpp)
  2847. res = make_small(internal_port_index(prt->common.id));
  2848. if (szp) {
  2849. res = am_true;
  2850. goto done;
  2851. }
  2852. }
  2853. else if (item == am_links) {
  2854. MonitorInfoCollection mic;
  2855. int i;
  2856. Eterm item;
  2857. INIT_MONITOR_INFOS(mic);
  2858. erts_link_tree_foreach(ERTS_P_LINKS(prt), collect_one_link, (void *) &mic);
  2859. if (szp)
  2860. *szp += mic.sz;
  2861. if (hpp) {
  2862. res = NIL;
  2863. for (i = 0; i < mic.mi_i; i++) {
  2864. item = STORE_NC(hpp, ohp, mic.mi[i].entity.term);
  2865. res = CONS(*hpp, item, res);
  2866. *hpp += 2;
  2867. }
  2868. }
  2869. DESTROY_MONITOR_INFOS(mic);
  2870. if (szp) {
  2871. res = am_true;
  2872. goto done;
  2873. }
  2874. }
  2875. else if (item == am_monitors) {
  2876. MonitorInfoCollection mic;
  2877. int i;
  2878. INIT_MONITOR_INFOS(mic);
  2879. erts_monitor_tree_foreach(ERTS_P_MONITORS(prt),
  2880. collect_one_origin_monitor,
  2881. (void *) &mic);
  2882. if (szp)
  2883. *szp += mic.sz;
  2884. if (hpp) {
  2885. res = NIL;
  2886. for (i = 0; i < mic.mi_i; i++) {
  2887. Eterm t;
  2888. ASSERT(mic.mi[i].type == ERTS_MON_TYPE_PORT);
  2889. ASSERT(is_internal_pid(mic.mi[i].entity.term));
  2890. t = TUPLE2(*hpp, am_process, mic.mi[i].entity.term);
  2891. *hpp += 3;
  2892. res = CONS(*hpp, t, res);
  2893. *hpp += 2;
  2894. }
  2895. } // hpp
  2896. DESTROY_MONITOR_INFOS(mic);
  2897. if (szp) {
  2898. res = am_true;
  2899. goto done;
  2900. }
  2901. }
  2902. else if (item == am_monitored_by) {
  2903. MonitorInfoCollection mic;
  2904. int i;
  2905. Eterm item;
  2906. INIT_MONITOR_INFOS(mic);
  2907. erts_monitor_list_foreach(ERTS_P_LT_MONITORS(prt),
  2908. collect_one_target_monitor,
  2909. (void *) &mic);
  2910. erts_monitor_tree_foreach(ERTS_P_MONITORS(prt),
  2911. collect_one_target_monitor,
  2912. (void *) &mic);
  2913. if (szp)
  2914. *szp += mic.sz;
  2915. if (hpp) {
  2916. res = NIL;
  2917. for (i = 0; i < mic.mi_i; ++i) {
  2918. ASSERT(mic.mi[i].type != ERTS_MON_TYPE_RESOURCE);
  2919. item = STORE_NC(hpp, ohp, mic.mi[i].entity.term);
  2920. res = CONS(*hpp, item, res);
  2921. *hpp += 2;
  2922. }
  2923. } // hpp
  2924. DESTROY_MONITOR_INFOS(mic);
  2925. if (szp) {
  2926. res = am_true;
  2927. goto done;
  2928. }
  2929. }
  2930. else if (item == am_name) {
  2931. int count = sys_strlen(prt->name);
  2932. if (hpp)
  2933. res = buf_to_intlist(hpp, prt->name, count, NIL);
  2934. if (szp) {
  2935. *szp += 2*count;
  2936. res = am_true;
  2937. goto done;
  2938. }
  2939. }
  2940. else if (item == am_connected) {
  2941. if (hpp)
  2942. res = ERTS_PORT_GET_CONNECTED(prt); /* internal pid */
  2943. if (szp) {
  2944. res = am_true;
  2945. goto done;
  2946. }
  2947. }
  2948. else if (item == am_input) {
  2949. res = erts_bld_uint(hpp, szp, prt->bytes_in);
  2950. if (szp) {
  2951. res = am_true;
  2952. goto done;
  2953. }
  2954. }
  2955. else if (item == am_output) {
  2956. res = erts_bld_uint(hpp, szp, prt->bytes_out);
  2957. if (szp) {
  2958. res = am_true;
  2959. goto done;
  2960. }
  2961. }
  2962. else if (item == am_os_pid) {
  2963. res = (prt->os_pid < 0
  2964. ? am_undefined
  2965. : erts_bld_uword(hpp, szp, (UWord) prt->os_pid));
  2966. if (szp) {
  2967. res = am_true;
  2968. goto done;
  2969. }
  2970. }
  2971. else if (item == am_registered_name) {
  2972. RegProc *reg = prt->common.u.alive.reg;
  2973. if (reg) {
  2974. res = reg->name;
  2975. if (szp) {
  2976. res = am_true;
  2977. goto done;
  2978. }
  2979. }
  2980. else {
  2981. if (szp)
  2982. return am_undefined;
  2983. return NIL;
  2984. }
  2985. }
  2986. else if (item == am_memory) {
  2987. /* All memory consumed in bytes (the Port struct should not be
  2988. included though).
  2989. */
  2990. Uint size = 0;
  2991. erts_link_tree_foreach(ERTS_P_LINKS(prt),
  2992. link_size, (void *) &size);
  2993. erts_monitor_tree_foreach(ERTS_P_MONITORS(prt),
  2994. monitor_size, (void *) &size);
  2995. erts_monitor_list_foreach(ERTS_P_LT_MONITORS(prt),
  2996. monitor_size, (void *) &size);
  2997. size += erts_port_data_size(prt);
  2998. if (prt->linebuf)
  2999. size += sizeof(LineBuf) + prt->linebuf->ovsiz;
  3000. /* ... */
  3001. /* All memory allocated by the driver should be included, but it is
  3002. hard to retrieve... */
  3003. res = erts_bld_uint(hpp, szp, size);
  3004. if (szp) {
  3005. res = am_true;
  3006. goto done;
  3007. }
  3008. }
  3009. else if (item == am_queue_size) {
  3010. Uint ioq_size = erts_port_ioq_size(prt);
  3011. res = erts_bld_uint(hpp, szp, ioq_size);
  3012. if (szp) {
  3013. res = am_true;
  3014. goto done;
  3015. }
  3016. }
  3017. else if (ERTS_IS_ATOM_STR("locking", item)) {
  3018. if (hpp) {
  3019. if (erts_atomic32_read_nob(&prt->state)
  3020. & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) {
  3021. DECL_AM(port_level);
  3022. ASSERT(prt->drv_ptr->flags
  3023. & ERL_DRV_FLAG_USE_PORT_LOCKING);
  3024. res = AM_port_level;
  3025. }
  3026. else {
  3027. DECL_AM(driver_level);
  3028. ASSERT(!(prt->drv_ptr->flags
  3029. & ERL_DRV_FLAG_USE_PORT_LOCKING));
  3030. res = AM_driver_level;
  3031. }
  3032. }
  3033. if (szp) {
  3034. res = am_true;
  3035. goto done;
  3036. }
  3037. }
  3038. else if (item == am_parallelism) {
  3039. if (szp) {
  3040. res = am_true;
  3041. goto done;
  3042. }
  3043. res = ((ERTS_PTS_FLG_PARALLELISM &
  3044. erts_atomic32_read_nob(&prt->sched.flags))
  3045. ? am_true
  3046. : am_false);
  3047. }
  3048. else {
  3049. if (szp)
  3050. return am_false;
  3051. return THE_NON_VALUE;
  3052. }
  3053. done:
  3054. if (szp)
  3055. *szp += 3;
  3056. if (hpp) {
  3057. res = TUPLE2(*hpp, item, res);
  3058. *hpp += 3;
  3059. }
  3060. return res;
  3061. }
  3062. BIF_RETTYPE
  3063. fun_info_2(BIF_ALIST_2)
  3064. {
  3065. Process* p = BIF_P;
  3066. Eterm fun = BIF_ARG_1;
  3067. Eterm what = BIF_ARG_2;
  3068. Eterm* hp;
  3069. Eterm val;
  3070. if (is_fun(fun)) {
  3071. ErlFunThing* funp = (ErlFunThing *) fun_val(fun);
  3072. switch (what) {
  3073. case am_type:
  3074. hp = HAlloc(p, 3);
  3075. val = am_local;
  3076. break;
  3077. case am_pid:
  3078. hp = HAlloc(p, 3);
  3079. val = funp->creator;
  3080. break;
  3081. case am_module:
  3082. hp = HAlloc(p, 3);
  3083. val = funp->fe->module;
  3084. break;
  3085. case am_new_index:
  3086. hp = HAlloc(p, 3);
  3087. val = make_small(funp->fe->index);
  3088. break;
  3089. case am_new_uniq:
  3090. val = new_binary(p, funp->fe->uniq, 16);
  3091. hp = HAlloc(p, 3);
  3092. break;
  3093. case am_index:
  3094. hp = HAlloc(p, 3);
  3095. val = make_small(funp->fe->old_index);
  3096. break;
  3097. case am_uniq:
  3098. hp = HAlloc(p, 3);
  3099. val = make_small(funp->fe->old_uniq);
  3100. break;
  3101. case am_env:
  3102. {
  3103. Uint num_free = funp->num_free;
  3104. int i;
  3105. hp = HAlloc(p, 3 + 2*num_free);
  3106. val = NIL;
  3107. for (i = num_free-1; i >= 0; i--) {
  3108. val = CONS(hp, funp->env[i], val);
  3109. hp += 2;
  3110. }
  3111. }
  3112. break;
  3113. case am_refc:
  3114. val = erts_make_integer(erts_atomic_read_nob(&funp->fe->refc), p);
  3115. hp = HAlloc(p, 3);
  3116. break;
  3117. case am_arity:
  3118. hp = HAlloc(p, 3);
  3119. val = make_small(funp->arity);
  3120. break;
  3121. case am_name:
  3122. hp = HAlloc(p, 3);
  3123. val = funp->fe->address[-2];
  3124. break;
  3125. default:
  3126. goto error;
  3127. }
  3128. } else if (is_export(fun)) {
  3129. Export* exp = (Export *) ((UWord) (export_val(fun))[1]);
  3130. switch (what) {
  3131. case am_type:
  3132. hp = HAlloc(p, 3);
  3133. val = am_external;
  3134. break;
  3135. case am_pid:
  3136. hp = HAlloc(p, 3);
  3137. val = am_undefined;
  3138. break;
  3139. case am_module:
  3140. hp = HAlloc(p, 3);
  3141. val = exp->info.mfa.module;
  3142. break;
  3143. case am_new_index:
  3144. hp = HAlloc(p, 3);
  3145. val = am_undefined;
  3146. break;
  3147. case am_new_uniq:
  3148. hp = HAlloc(p, 3);
  3149. val = am_undefined;
  3150. break;
  3151. case am_index:
  3152. hp = HAlloc(p, 3);
  3153. val = am_undefined;
  3154. break;
  3155. case am_uniq:
  3156. hp = HAlloc(p, 3);
  3157. val = am_undefined;
  3158. break;
  3159. case am_env:
  3160. hp = HAlloc(p, 3);
  3161. val = NIL;
  3162. break;
  3163. case am_refc:
  3164. hp = HAlloc(p, 3);
  3165. val = am_undefined;
  3166. break;
  3167. case am_arity:
  3168. hp = HAlloc(p, 3);
  3169. val = make_small(exp->info.mfa.arity);
  3170. break;
  3171. case am_name:
  3172. hp = HAlloc(p, 3);
  3173. val = exp->info.mfa.function;
  3174. break;
  3175. default:
  3176. goto error;
  3177. }
  3178. } else {
  3179. error:
  3180. BIF_ERROR(p, BADARG);
  3181. }
  3182. return TUPLE2(hp, what, val);
  3183. }
  3184. BIF_RETTYPE
  3185. fun_info_mfa_1(BIF_ALIST_1)
  3186. {
  3187. Process* p = BIF_P;
  3188. Eterm fun = BIF_ARG_1;
  3189. Eterm* hp;
  3190. if (is_fun(fun)) {
  3191. ErlFunThing* funp = (ErlFunThing *) fun_val(fun);
  3192. hp = HAlloc(p, 4);
  3193. BIF_RET(TUPLE3(hp,funp->fe->module,funp->fe->address[-2],make_small(funp->arity)));
  3194. } else if (is_export(fun)) {
  3195. Export* exp = (Export *) ((UWord) (export_val(fun))[1]);
  3196. hp = HAlloc(p, 4);
  3197. BIF_RET(TUPLE3(hp,exp->info.mfa.module,
  3198. exp->info.mfa.function,
  3199. make_small(exp->info.mfa.arity)));
  3200. }
  3201. BIF_ERROR(p, BADARG);
  3202. }
  3203. BIF_RETTYPE erts_internal_is_process_alive_2(BIF_ALIST_2)
  3204. {
  3205. if (!is_internal_pid(BIF_ARG_1) || !is_internal_ordinary_ref(BIF_ARG_2))
  3206. BIF_ERROR(BIF_P, BADARG);
  3207. erts_proc_sig_send_is_alive_request(BIF_P, BIF_ARG_1, BIF_ARG_2);
  3208. BIF_RET(am_ok);
  3209. }
  3210. BIF_RETTYPE is_process_alive_1(BIF_ALIST_1)
  3211. {
  3212. if (is_internal_pid(BIF_ARG_1)) {
  3213. erts_aint32_t state;
  3214. Process *rp;
  3215. if (BIF_ARG_1 == BIF_P->common.id)
  3216. BIF_RET(am_true);
  3217. rp = erts_proc_lookup_raw(BIF_ARG_1);
  3218. if (!rp)
  3219. BIF_RET(am_false);
  3220. state = erts_atomic32_read_acqb(&rp->state);
  3221. if (state & (ERTS_PSFLG_EXITING
  3222. | ERTS_PSFLG_SIG_Q
  3223. | ERTS_PSFLG_SIG_IN_Q)) {
  3224. /*
  3225. * If in exiting state, trap out and send 'is alive'
  3226. * request and wait for it to complete termination.
  3227. *
  3228. * If process has signals enqueued, we need to
  3229. * send it an 'is alive' request via its signal
  3230. * queue in order to ensure that signal order is
  3231. * preserved (we may earlier have sent it an
  3232. * exit signal that has not been processed yet).
  3233. */
  3234. BIF_TRAP1(is_process_alive_trap, BIF_P, BIF_ARG_1);
  3235. }
  3236. BIF_RET(am_true);
  3237. }
  3238. if (is_external_pid(BIF_ARG_1)) {
  3239. if (external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry)
  3240. BIF_RET(am_false); /* A pid from an old incarnation of this node */
  3241. }
  3242. BIF_ERROR(BIF_P, BADARG);
  3243. }
  3244. static Eterm
  3245. process_display(Process *c_p, void *arg, int *redsp, ErlHeapFragment **bpp)
  3246. {
  3247. if (redsp)
  3248. *redsp = 1;
  3249. if (ERTS_PROC_IS_EXITING(c_p))
  3250. return am_badarg;
  3251. erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
  3252. erts_stack_dump(ERTS_PRINT_STDERR, NULL, c_p);
  3253. erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
  3254. return am_true;
  3255. }
  3256. BIF_RETTYPE erts_internal_process_display_2(BIF_ALIST_2)
  3257. {
  3258. Eterm res;
  3259. if (BIF_ARG_2 != am_backtrace)
  3260. BIF_RET(am_badarg);
  3261. if (BIF_P->common.id == BIF_ARG_1) {
  3262. res = process_display(BIF_P, NULL, NULL, NULL);
  3263. BIF_RET(res);
  3264. }
  3265. if (is_not_internal_pid(BIF_ARG_1))
  3266. BIF_RET(am_badarg);
  3267. res = erts_proc_sig_send_rpc_request(BIF_P, BIF_ARG_1,
  3268. !0,
  3269. process_display,
  3270. NULL);
  3271. if (is_non_value(res))
  3272. BIF_RET(am_badarg);
  3273. BIF_RET(res);
  3274. }
  3275. /* this is a general call which return some possibly useful information */
  3276. BIF_RETTYPE statistics_1(BIF_ALIST_1)
  3277. {
  3278. Eterm res;
  3279. Eterm* hp;
  3280. if (BIF_ARG_1 == am_scheduler_wall_time) {
  3281. res = erts_sched_wall_time_request(BIF_P, 0, 0, 1, 0);
  3282. if (is_non_value(res))
  3283. BIF_RET(am_undefined);
  3284. BIF_TRAP1(gather_sched_wall_time_res_trap, BIF_P, res);
  3285. } else if (BIF_ARG_1 == am_scheduler_wall_time_all) {
  3286. res = erts_sched_wall_time_request(BIF_P, 0, 0, 1, 1);
  3287. if (is_non_value(res))
  3288. BIF_RET(am_undefined);
  3289. BIF_TRAP1(gather_sched_wall_time_res_trap, BIF_P, res);
  3290. } else if ((BIF_ARG_1 == am_total_active_tasks)
  3291. | (BIF_ARG_1 == am_total_run_queue_lengths)
  3292. | (BIF_ARG_1 == am_total_active_tasks_all)
  3293. | (BIF_ARG_1 == am_total_run_queue_lengths_all)) {
  3294. Uint no = erts_run_queues_len(NULL, 0,
  3295. ((BIF_ARG_1 == am_total_active_tasks)
  3296. | (BIF_ARG_1 == am_total_active_tasks_all)),
  3297. ((BIF_ARG_1 == am_total_active_tasks_all)
  3298. | (BIF_ARG_1 == am_total_run_queue_lengths_all)));
  3299. if (IS_USMALL(0, no))
  3300. res = make_small(no);
  3301. else {
  3302. Eterm *hp = HAlloc(BIF_P, BIG_UINT_HEAP_SIZE);
  3303. res = uint_to_big(no, hp);
  3304. }
  3305. BIF_RET(res);
  3306. } else if ((BIF_ARG_1 == am_active_tasks)
  3307. | (BIF_ARG_1 == am_run_queue_lengths)
  3308. | (BIF_ARG_1 == am_active_tasks_all)
  3309. | (BIF_ARG_1 == am_run_queue_lengths_all)) {
  3310. Eterm res, *hp, **hpp;
  3311. Uint sz, *szp;
  3312. int incl_dirty_io = ((BIF_ARG_1 == am_active_tasks_all)
  3313. | (BIF_ARG_1 == am_run_queue_lengths_all));
  3314. int no_qs = (erts_no_run_queues + ERTS_NUM_DIRTY_CPU_RUNQS +
  3315. (incl_dirty_io ? ERTS_NUM_DIRTY_IO_RUNQS : 0));
  3316. Uint *qszs = erts_alloc(ERTS_ALC_T_TMP,sizeof(Uint)*no_qs*2);
  3317. (void) erts_run_queues_len(qszs, 0,
  3318. ((BIF_ARG_1 == am_active_tasks)
  3319. | (BIF_ARG_1 == am_active_tasks_all)),
  3320. incl_dirty_io);
  3321. sz = 0;
  3322. szp = &sz;
  3323. hpp = NULL;
  3324. while (1) {
  3325. int i;
  3326. for (i = 0; i < no_qs; i++)
  3327. qszs[no_qs+i] = erts_bld_uint(hpp, szp, qszs[i]);
  3328. res = erts_bld_list(hpp, szp, no_qs, &qszs[no_qs]);
  3329. if (hpp) {
  3330. erts_free(ERTS_ALC_T_TMP, qszs);
  3331. BIF_RET(res);
  3332. }
  3333. hp = HAlloc(BIF_P, sz);
  3334. szp = NULL;
  3335. hpp = &hp;
  3336. }
  3337. #ifdef ERTS_ENABLE_MSACC
  3338. } else if (BIF_ARG_1 == am_microstate_accounting) {
  3339. Eterm threads;
  3340. res = erts_msacc_request(BIF_P, ERTS_MSACC_GATHER, &threads);
  3341. if (is_non_value(res))
  3342. BIF_RET(am_undefined);
  3343. BIF_TRAP2(gather_msacc_res_trap, BIF_P, res, threads);
  3344. #endif
  3345. } else if (BIF_ARG_1 == am_context_switches) {
  3346. Eterm cs = erts_make_integer(erts_get_total_context_switches(), BIF_P);
  3347. hp = HAlloc(BIF_P, 3);
  3348. res = TUPLE2(hp, cs, SMALL_ZERO);
  3349. BIF_RET(res);
  3350. } else if (BIF_ARG_1 == am_garbage_collection) {
  3351. res = erts_gc_info_request(BIF_P);
  3352. if (is_non_value(res))
  3353. BIF_RET(am_undefined);
  3354. BIF_TRAP1(gather_gc_info_res_trap, BIF_P, res);
  3355. } else if (BIF_ARG_1 == am_reductions) {
  3356. Uint reds;
  3357. Uint diff;
  3358. Uint hsz = 3;
  3359. Eterm b1, b2;
  3360. erts_get_total_reductions(&reds, &diff);
  3361. (void) erts_bld_uint(NULL, &hsz, reds);
  3362. (void) erts_bld_uint(NULL, &hsz, diff);
  3363. hp = HAlloc(BIF_P, hsz);
  3364. b1 = erts_bld_uint(&hp, NULL, reds);
  3365. b2 = erts_bld_uint(&hp, NULL, diff);
  3366. res = TUPLE2(hp, b1, b2);
  3367. BIF_RET(res);
  3368. } else if (BIF_ARG_1 == am_exact_reductions) {
  3369. Uint reds;
  3370. Uint diff;
  3371. Uint hsz = 3;
  3372. Eterm b1, b2;
  3373. erts_get_exact_total_reductions(BIF_P, &reds, &diff);
  3374. (void) erts_bld_uint(NULL, &hsz, reds);
  3375. (void) erts_bld_uint(NULL, &hsz, diff);
  3376. hp = HAlloc(BIF_P, hsz);
  3377. b1 = erts_bld_uint(&hp, NULL, reds);
  3378. b2 = erts_bld_uint(&hp, NULL, diff);
  3379. res = TUPLE2(hp, b1, b2);
  3380. BIF_RET(res);
  3381. } else if (BIF_ARG_1 == am_runtime) {
  3382. ErtsMonotonicTime u1, u2;
  3383. Eterm b1, b2;
  3384. Uint hsz;
  3385. erts_runtime_elapsed_both(&u1, NULL, &u2, NULL);
  3386. hsz = 3; /* 2-tuple */
  3387. (void) erts_bld_monotonic_time(NULL, &hsz, u1);
  3388. (void) erts_bld_monotonic_time(NULL, &hsz, u2);
  3389. hp = HAlloc(BIF_P, hsz);
  3390. b1 = erts_bld_monotonic_time(&hp, NULL, u1);
  3391. b2 = erts_bld_monotonic_time(&hp, NULL, u2);
  3392. res = TUPLE2(hp, b1, b2);
  3393. BIF_RET(res);
  3394. } else if (BIF_ARG_1 == am_run_queue) {
  3395. res = erts_run_queues_len(NULL, 1, 0, 0);
  3396. BIF_RET(make_small(res));
  3397. } else if (BIF_ARG_1 == am_wall_clock) {
  3398. ErtsMonotonicTime w1, w2;
  3399. Eterm b1, b2;
  3400. Uint hsz;
  3401. erts_wall_clock_elapsed_both(&w1, &w2);
  3402. hsz = 3; /* 2-tuple */
  3403. (void) erts_bld_monotonic_time(NULL, &hsz, w1);
  3404. (void) erts_bld_monotonic_time(NULL, &hsz, w2);
  3405. hp = HAlloc(BIF_P, hsz);
  3406. b1 = erts_bld_monotonic_time(&hp, NULL, w1);
  3407. b2 = erts_bld_monotonic_time(&hp, NULL, w2);
  3408. res = TUPLE2(hp, b1, b2);
  3409. BIF_RET(res);
  3410. } else if (BIF_ARG_1 == am_io) {
  3411. Eterm ref = erts_request_io_bytes(BIF_P);
  3412. BIF_TRAP2(gather_io_bytes_trap, BIF_P, ref, make_small(erts_no_schedulers));
  3413. }
  3414. else if (ERTS_IS_ATOM_STR("run_queues", BIF_ARG_1)) {
  3415. Eterm res, *hp, **hpp;
  3416. Uint sz, *szp;
  3417. int no_qs = erts_no_run_queues + ERTS_NUM_DIRTY_RUNQS;
  3418. Uint *qszs = erts_alloc(ERTS_ALC_T_TMP,sizeof(Uint)*no_qs*2);
  3419. (void) erts_run_queues_len(qszs, 0, 0, 1);
  3420. sz = 0;
  3421. szp = &sz;
  3422. hpp = NULL;
  3423. while (1) {
  3424. int i;
  3425. for (i = 0; i < no_qs; i++)
  3426. qszs[no_qs+i] = erts_bld_uint(hpp, szp, qszs[i]);
  3427. res = erts_bld_tuplev(hpp, szp, no_qs, &qszs[no_qs]);
  3428. if (hpp) {
  3429. erts_free(ERTS_ALC_T_TMP, qszs);
  3430. BIF_RET(res);
  3431. }
  3432. hp = HAlloc(BIF_P, sz);
  3433. szp = NULL;
  3434. hpp = &hp;
  3435. }
  3436. }
  3437. BIF_ERROR(BIF_P, BADARG);
  3438. }
  3439. BIF_RETTYPE error_logger_warning_map_0(BIF_ALIST_0)
  3440. {
  3441. BIF_RET(erts_error_logger_warnings);
  3442. }
  3443. static erts_atomic_t available_internal_state;
  3444. static int empty_magic_ref_destructor(Binary *bin)
  3445. {
  3446. return 1;
  3447. }
  3448. BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
  3449. {
  3450. /*
  3451. * NOTE: Only supposed to be used for testing, and debugging.
  3452. */
  3453. if (!erts_atomic_read_nob(&available_internal_state)) {
  3454. BIF_ERROR(BIF_P, EXC_UNDEF);
  3455. }
  3456. if (is_atom(BIF_ARG_1)) {
  3457. if (ERTS_IS_ATOM_STR("reds_left", BIF_ARG_1)) {
  3458. /* Used by (emulator) */
  3459. BIF_RET(make_small((Uint) ERTS_BIF_REDS_LEFT(BIF_P)));
  3460. }
  3461. else if (ERTS_IS_ATOM_STR("node_and_dist_references", BIF_ARG_1)) {
  3462. /* Used by node_container_SUITE (emulator) */
  3463. Eterm res = erts_get_node_and_dist_references(BIF_P);
  3464. BIF_RET(res);
  3465. }
  3466. else if (ERTS_IS_ATOM_STR("monitoring_nodes", BIF_ARG_1)) {
  3467. BIF_RET(erts_processes_monitoring_nodes(BIF_P));
  3468. }
  3469. else if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1)
  3470. || ERTS_IS_ATOM_STR("next_port", BIF_ARG_1)) {
  3471. /* Used by node_container_SUITE (emulator) */
  3472. Sint res;
  3473. if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1))
  3474. res = erts_ptab_test_next_id(&erts_proc, 0, 0);
  3475. else
  3476. res = erts_ptab_test_next_id(&erts_port, 0, 0);
  3477. if (res < 0)
  3478. BIF_RET(am_false);
  3479. BIF_RET(erts_make_integer(res, BIF_P));
  3480. }
  3481. else if (ERTS_IS_ATOM_STR("DbTable_words", BIF_ARG_1)) {
  3482. /* Used by ets_SUITE (stdlib) */
  3483. size_t words = (sizeof(DbTable) + sizeof(Uint) - 1)/sizeof(Uint);
  3484. Eterm* hp = HAlloc(BIF_P ,3);
  3485. BIF_RET(TUPLE2(hp, make_small((Uint) words),
  3486. erts_ets_hash_sizeof_ext_segtab()));
  3487. }
  3488. else if (ERTS_IS_ATOM_STR("check_io_debug", BIF_ARG_1)) {
  3489. /* Used by driver_SUITE (emulator) */
  3490. Uint sz, *szp;
  3491. Eterm res, *hp, **hpp;
  3492. int no_errors;
  3493. ErtsCheckIoDebugInfo ciodi = {0};
  3494. #ifdef HAVE_ERTS_CHECK_IO_DEBUG
  3495. erts_proc_unlock(BIF_P,ERTS_PROC_LOCK_MAIN);
  3496. no_errors = erts_check_io_debug(&ciodi);
  3497. erts_proc_lock(BIF_P,ERTS_PROC_LOCK_MAIN);
  3498. #else
  3499. no_errors = 0;
  3500. #endif
  3501. sz = 0;
  3502. szp = &sz;
  3503. hpp = NULL;
  3504. while (1) {
  3505. res = erts_bld_tuple(hpp, szp, 4,
  3506. erts_bld_uint(hpp, szp,
  3507. (Uint) no_errors),
  3508. erts_bld_uint(hpp, szp,
  3509. (Uint) ciodi.no_used_fds),
  3510. erts_bld_uint(hpp, szp,
  3511. (Uint) ciodi.no_driver_select_structs),
  3512. erts_bld_uint(hpp, szp,
  3513. (Uint) ciodi.no_enif_select_structs));
  3514. if (hpp)
  3515. break;
  3516. hp = HAlloc(BIF_P, sz);
  3517. szp = NULL;
  3518. hpp = &hp;
  3519. }
  3520. BIF_RET(res);
  3521. }
  3522. else if (ERTS_IS_ATOM_STR("process_info_args", BIF_ARG_1)) {
  3523. /* Used by process_SUITE (emulator) */
  3524. int i;
  3525. Eterm res = NIL;
  3526. Uint *hp = HAlloc(BIF_P, 2*ERTS_PI_ARGS);
  3527. for (i = ERTS_PI_ARGS-1; i >= 0; i--) {
  3528. res = CONS(hp, pi_args[i].name, res);
  3529. hp += 2;
  3530. }
  3531. BIF_RET(res);
  3532. }
  3533. else if (ERTS_IS_ATOM_STR("processes", BIF_ARG_1)) {
  3534. /* Used by process_SUITE (emulator) */
  3535. BIF_RET(erts_debug_ptab_list(BIF_P, &erts_proc));
  3536. }
  3537. else if (ERTS_IS_ATOM_STR("processes_bif_info", BIF_ARG_1)) {
  3538. /* Used by process_SUITE (emulator) */
  3539. BIF_RET(erts_debug_ptab_list_bif_info(BIF_P, &erts_proc));
  3540. }
  3541. else if (ERTS_IS_ATOM_STR("max_atom_out_cache_index", BIF_ARG_1)) {
  3542. /* Used by distribution_SUITE (emulator) */
  3543. BIF_RET(make_small((Uint) erts_debug_max_atom_out_cache_index()));
  3544. }
  3545. else if (ERTS_IS_ATOM_STR("nbalance", BIF_ARG_1)) {
  3546. Uint n;
  3547. erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3548. n = erts_debug_nbalance();
  3549. erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3550. BIF_RET(erts_make_integer(n, BIF_P));
  3551. }
  3552. else if (ERTS_IS_ATOM_STR("available_internal_state", BIF_ARG_1)) {
  3553. BIF_RET(am_true);
  3554. }
  3555. else if (ERTS_IS_ATOM_STR("force_heap_frags", BIF_ARG_1)) {
  3556. #ifdef FORCE_HEAP_FRAGS
  3557. BIF_RET(am_true);
  3558. #else
  3559. BIF_RET(am_false);
  3560. #endif
  3561. }
  3562. else if (ERTS_IS_ATOM_STR("memory", BIF_ARG_1)) {
  3563. Eterm res;
  3564. erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3565. erts_thr_progress_block();
  3566. erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
  3567. res = erts_memory(NULL, NULL, BIF_P, THE_NON_VALUE);
  3568. erts_thr_progress_unblock();
  3569. BIF_RET(res);
  3570. }
  3571. else if (ERTS_IS_ATOM_STR("mmap", BIF_ARG_1)) {
  3572. BIF_RET(erts_mmap_debug_info(BIF_P));
  3573. }
  3574. else if (ERTS_IS_ATOM_STR("unique_monotonic_integer_state", BIF_ARG_1)) {
  3575. BIF_RET(erts_debug_get_unique_monotonic_integer_state(BIF_P));
  3576. }
  3577. else if (ERTS_IS_ATOM_STR("min_unique_monotonic_integer", BIF_ARG_1)) {
  3578. Sint64 value = erts_get_min_unique_monotonic_integer();
  3579. if (IS_SSMALL(value))
  3580. BIF_RET(make_small(value));
  3581. else {
  3582. Uint hsz = ERTS_SINT64_HEAP_SIZE(value);
  3583. Eterm *hp = HAlloc(BIF_P, hsz);
  3584. BIF_RET(erts_sint64_to_big(value, &hp));
  3585. }
  3586. }
  3587. else if (ERTS_IS_ATOM_STR("min_unique_integer", BIF_ARG_1)) {
  3588. Sint64 value = erts_get_min_unique_integer();
  3589. if (IS_SSMALL(value))
  3590. BIF_RET(make_small(value));
  3591. else {
  3592. Uint hsz = ERTS_SINT64_HEAP_SIZE(value);
  3593. Eterm *hp = HAlloc(BIF_P, hsz);
  3594. BIF_RET(erts_sint64_to_big(value, &hp));
  3595. }
  3596. }
  3597. else if (ERTS_IS_ATOM_STR("stack_check", BIF_ARG_1)) {
  3598. UWord size;
  3599. char c;
  3600. if (erts_is_above_stack_limit(&c))
  3601. size = erts_check_stack_recursion_downwards(&c);
  3602. else
  3603. size = erts_check_stack_recursion_upwards(&c);
  3604. if (IS_SSMALL(size))
  3605. BIF_RET(make_small(size));
  3606. else {
  3607. Uint hsz = BIG_UWORD_HEAP_SIZE(size);
  3608. Eterm *hp = HAlloc(BIF_P, hsz);
  3609. BIF_RET(uword_to_big(size, hp));
  3610. }
  3611. } else if (ERTS_IS_ATOM_STR("scheduler_dump", BIF_ARG_1)) {
  3612. #if defined(ERTS_HAVE_TRY_CATCH) && defined(ERTS_SYS_SUSPEND_SIGNAL)
  3613. BIF_RET(am_true);
  3614. #else
  3615. BIF_RET(am_false);
  3616. #endif
  3617. }
  3618. else if (ERTS_IS_ATOM_STR("lc_graph", BIF_ARG_1)) {
  3619. #ifdef ERTS_ENABLE_LOCK_CHECK
  3620. Eterm res = erts_lc_dump_graph();
  3621. BIF_RET(res);
  3622. #else
  3623. BIF_RET(am_notsup);
  3624. #endif
  3625. }
  3626. }
  3627. else if (is_tuple(BIF_ARG_1)) {
  3628. Eterm* tp = tuple_val(BIF_ARG_1);
  3629. switch (arityval(tp[0])) {
  3630. case 2: {
  3631. if (ERTS_IS_ATOM_STR("process_status", tp[1])) {
  3632. /* Used by timer process_SUITE, timer_bif_SUITE, and
  3633. node_container_SUITE (emulator) */
  3634. if (is_internal_pid(tp[2])) {
  3635. BIF_RET(erts_process_status(NULL, tp[2]));
  3636. }
  3637. }
  3638. else if (ERTS_IS_ATOM_STR("connection_id", tp[1])) {
  3639. DistEntry *dep;
  3640. Eterm *hp, res;
  3641. Uint con_id, hsz = 0;
  3642. if (!is_atom(tp[2]))
  3643. BIF_ERROR(BIF_P, BADARG);
  3644. dep = erts_sysname_to_connected_dist_entry(tp[2]);
  3645. if (!dep)
  3646. BIF_ERROR(BIF_P, BADARG);
  3647. erts_de_rlock(dep);
  3648. con_id = (Uint) dep->connection_id;
  3649. erts_de_runlock(dep);
  3650. (void) erts_bld_uint(NULL, &hsz, con_id);
  3651. hp = hsz ? HAlloc(BIF_P, hsz) : NULL;
  3652. res = erts_bld_uint(&hp, NULL, con_id);
  3653. BIF_RET(res);
  3654. }
  3655. else if (ERTS_IS_ATOM_STR("link_list", tp[1])) {
  3656. /* Used by erl_link_SUITE (emulator) */
  3657. if(is_internal_pid(tp[2])) {
  3658. erts_aint32_t state;
  3659. Eterm res;
  3660. Process *p;
  3661. int sigs_done, local_only;
  3662. p = erts_pid2proc(BIF_P,
  3663. ERTS_PROC_LOCK_MAIN,
  3664. tp[2],
  3665. ERTS_PROC_LOCK_MAIN);
  3666. if (!p) {
  3667. ERTS_ASSERT_IS_NOT_EXITING(BIF_P);
  3668. BIF_RET(am_undefined);
  3669. }
  3670. local_only = 0;
  3671. do {
  3672. int reds = CONTEXT_REDS;
  3673. sigs_done = erts_proc_sig_handle_incoming(p,
  3674. &state,
  3675. &reds,
  3676. CONTEXT_REDS,
  3677. local_only);
  3678. local_only = !0;
  3679. } while (!sigs_done && !(state & ERTS_PSFLG_EXITING));
  3680. if (!(state & ERTS_PSFLG_EXITING))
  3681. res = make_link_list(BIF_P, 1, ERTS_P_LINKS(p), NIL);
  3682. else if (BIF_P == p)
  3683. ERTS_BIF_EXITED(BIF_P);
  3684. else
  3685. res = am_undefined;
  3686. if (BIF_P != p)
  3687. erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
  3688. BIF_RET(res);
  3689. }
  3690. else if(is_internal_port(tp[2])) {
  3691. Eterm res;
  3692. Port *p = erts_id2port_sflgs(tp[2],
  3693. BIF_P,
  3694. ERTS_PROC_LOCK_MAIN,
  3695. ERTS_PORT_SFLGS_INVALID_LOOKUP);
  3696. if(!p)
  3697. BIF_RET(am_undefined);
  3698. res = make_link_list(BIF_P, 1, ERTS_P_LINKS(p), NIL);
  3699. erts_port_release(p);
  3700. BIF_RET(res);
  3701. }
  3702. else if(is_node_name_atom(tp[2])) {
  3703. DistEntry *dep = erts_find_dist_entry(tp[2]);
  3704. if(dep) {
  3705. Eterm res = NIL;
  3706. if (dep->mld) {
  3707. erts_mtx_lock(&dep->mld->mtx);
  3708. res = make_link_list(BIF_P, 0, dep->mld->links, NIL);
  3709. erts_mtx_unlock(&dep->mld->mtx);
  3710. }
  3711. BIF_RET(res);
  3712. } else {
  3713. BIF_RET(am_undefined);
  3714. }
  3715. }
  3716. }
  3717. else if (ERTS_IS_ATOM_STR("monitor_list", tp[1])) {
  3718. /* Used by erl_link_SUITE (emulator) */
  3719. if(is_internal_pid(tp[2])) {
  3720. erts_aint32_t state;
  3721. Process *p;
  3722. Eterm res;
  3723. int sigs_done, local_only;
  3724. p = erts_pid2proc(BIF_P,
  3725. ERTS_PROC_LOCK_MAIN,
  3726. tp[2],
  3727. ERTS_PROC_LOCK_MAIN);
  3728. if (!p) {
  3729. ERTS_ASSERT_IS_NOT_EXITING(BIF_P);
  3730. BIF_RET(am_undefined);
  3731. }
  3732. local_only = 0;
  3733. do {
  3734. int reds = CONTEXT_REDS;
  3735. sigs_done = erts_proc_sig_handle_incoming(p,
  3736. &state,
  3737. &reds,
  3738. CONTEXT_REDS,
  3739. local_only);
  3740. local_only = !0;
  3741. } while (!sigs_done && !(state & ERTS_PSFLG_EXITING));
  3742. if (!(state & ERTS_PSFLG_EXITING)) {
  3743. res = make_monitor_list(BIF_P, 1, ERTS_P_MONITORS(p), NIL);
  3744. res = make_monitor_list(BIF_P, 0, ERTS_P_LT_MONITORS(p), res);
  3745. }
  3746. else {
  3747. if (BIF_P == p)
  3748. ERTS_BIF_EXITED(BIF_P);
  3749. else
  3750. res = am_undefined;
  3751. }
  3752. if (BIF_P != p)
  3753. erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
  3754. BIF_RET(res);
  3755. } else if(is_node_name_atom(tp[2])) {
  3756. DistEntry *dep = erts_find_dist_entry(tp[2]);
  3757. if(dep) {
  3758. Eterm ml = NIL;
  3759. if (dep->mld) {
  3760. erts_mtx_lock(&dep->mld->mtx);
  3761. ml = make_monitor_list(BIF_P, 1, dep->mld->orig_name_monitors, NIL);
  3762. ml = make_monitor_list(BIF_P, 0, dep->mld->monitors, ml);
  3763. erts_mtx_unlock(&dep->mld->mtx);
  3764. }
  3765. BIF_RET(ml);
  3766. } else {
  3767. BIF_RET(am_undefined);
  3768. }
  3769. }
  3770. }
  3771. else if (ERTS_IS_ATOM_STR("channel_number", tp[1])) {
  3772. Eterm res;
  3773. DistEntry *dep = erts_find_dist_entry(tp[2]);
  3774. if (!dep)
  3775. res = am_undefined;
  3776. else {
  3777. Uint cno = dist_entry_channel_no(dep);
  3778. res = make_small(cno);
  3779. }
  3780. BIF_RET(res);
  3781. }
  3782. else if (ERTS_IS_ATOM_STR("binary_info", tp[1])) {
  3783. Eterm bin = tp[2];
  3784. if (is_binary(bin)) {
  3785. Eterm real_bin = bin;
  3786. Eterm res = am_true;
  3787. ErlSubBin* sb = (ErlSubBin *) binary_val(real_bin);
  3788. if (sb->thing_word == HEADER_SUB_BIN) {
  3789. real_bin = sb->orig;
  3790. }
  3791. if (*binary_val(real_bin) == HEADER_PROC_BIN) {
  3792. ProcBin* pb;
  3793. Binary* val;
  3794. Eterm SzTerm;
  3795. Uint hsz = 3 + 5;
  3796. Eterm* hp;
  3797. DECL_AM(refc_binary);
  3798. pb = (ProcBin *) binary_val(real_bin);
  3799. val = pb->val;
  3800. (void) erts_bld_uint(NULL, &hsz, pb->size);
  3801. (void) erts_bld_uint(NULL, &hsz, val->orig_size);
  3802. hp = HAlloc(BIF_P, hsz);
  3803. /* Info about the Binary* object */
  3804. SzTerm = erts_bld_uint(&hp, NULL, val->orig_size);
  3805. res = TUPLE2(hp, am_binary, SzTerm);
  3806. hp += 3;
  3807. /* Info about the ProcBin* object */
  3808. SzTerm = erts_bld_uint(&hp, NULL, pb->size);
  3809. res = TUPLE4(hp, AM_refc_binary, SzTerm,
  3810. res, make_small(pb->flags));
  3811. } else { /* heap binary */
  3812. DECL_AM(heap_binary);
  3813. res = AM_heap_binary;
  3814. }
  3815. BIF_RET(res);
  3816. }
  3817. }
  3818. else if (ERTS_IS_ATOM_STR("term_to_binary_tuple_fallbacks", tp[1])) {
  3819. Uint dflags = (TERM_TO_BINARY_DFLAGS
  3820. & ~DFLAG_EXPORT_PTR_TAG
  3821. & ~DFLAG_BIT_BINARIES);
  3822. Eterm res = erts_term_to_binary(BIF_P, tp[2], 0, dflags);
  3823. if (is_value(res))
  3824. BIF_RET(res);
  3825. BIF_ERROR(BIF_P, SYSTEM_LIMIT);
  3826. }
  3827. else if (ERTS_IS_ATOM_STR("dist_ctrl", tp[1])) {
  3828. Eterm res = am_undefined;
  3829. DistEntry *dep = erts_sysname_to_connected_dist_entry(tp[2]);
  3830. if (dep) {
  3831. erts_de_rlock(dep);
  3832. if (is_internal_port(dep->cid) || is_internal_pid(dep->cid))
  3833. res = dep->cid;
  3834. erts_de_runlock(dep);
  3835. }
  3836. BIF_RET(res);
  3837. }
  3838. else if (ERTS_IS_ATOM_STR("atom_out_cache_index", tp[1])) {
  3839. /* Used by distribution_SUITE (emulator) */
  3840. if (is_atom(tp[2])) {
  3841. BIF_RET(make_small(
  3842. (Uint)
  3843. erts_debug_atom_to_out_cache_index(tp[2])));
  3844. }
  3845. }
  3846. else if (ERTS_IS_ATOM_STR("fake_scheduler_bindings", tp[1])) {
  3847. return erts_fake_scheduler_bindings(BIF_P, tp[2]);
  3848. }
  3849. else if (ERTS_IS_ATOM_STR("reader_groups_map", tp[1])) {
  3850. Sint groups;
  3851. if (is_not_small(tp[2]))
  3852. BIF_ERROR(BIF_P, BADARG);
  3853. groups = signed_val(tp[2]);
  3854. if (groups < (Sint) 1 || groups > (Sint) INT_MAX)
  3855. BIF_ERROR(BIF_P, BADARG);
  3856. BIF_RET(erts_debug_reader_groups_map(BIF_P, (int) groups));
  3857. }
  3858. else if (ERTS_IS_ATOM_STR("internal_hash", tp[1])) {
  3859. Uint hash = (Uint) make_internal_hash(tp[2], 0);
  3860. Uint hsz = 0;
  3861. Eterm* hp;
  3862. erts_bld_uint(NULL, &hsz, hash);
  3863. hp = HAlloc(BIF_P,hsz);
  3864. return erts_bld_uint(&hp, NULL, hash);
  3865. }
  3866. else if (ERTS_IS_ATOM_STR("atom", tp[1])) {
  3867. Uint ix;
  3868. if (!term_to_Uint(tp[2], &ix))
  3869. BIF_ERROR(BIF_P, BADARG);
  3870. while (ix >= atom_table_size()) {
  3871. char tmp[20];
  3872. erts_snprintf(tmp, sizeof(tmp), "am%x", atom_table_size());
  3873. erts_atom_put((byte *) tmp, sys_strlen(tmp), ERTS_ATOM_ENC_LATIN1, 1);
  3874. }
  3875. return make_atom(ix);
  3876. }
  3877. else if (ERTS_IS_ATOM_STR("magic_ref", tp[1])) {
  3878. Binary *bin;
  3879. UWord bin_addr, refc;
  3880. Eterm bin_addr_term, refc_term, test_type;
  3881. Uint sz;
  3882. Eterm *hp;
  3883. if (!is_internal_magic_ref(tp[2])) {
  3884. if (is_internal_ordinary_ref(tp[2])) {
  3885. ErtsORefThing *rtp;
  3886. rtp = (ErtsORefThing *) internal_ref_val(tp[2]);
  3887. if (erts_is_ref_numbers_magic(rtp->num))
  3888. BIF_RET(am_true);
  3889. }
  3890. BIF_RET(am_false);
  3891. }
  3892. bin = erts_magic_ref2bin(tp[2]);
  3893. refc = erts_refc_read(&bin->intern.refc, 1);
  3894. bin_addr = (UWord) bin;
  3895. sz = 4;
  3896. erts_bld_uword(NULL, &sz, bin_addr);
  3897. erts_bld_uword(NULL, &sz, refc);
  3898. hp = HAlloc(BIF_P, sz);
  3899. bin_addr_term = erts_bld_uword(&hp, NULL, bin_addr);
  3900. refc_term = erts_bld_uword(&hp, NULL, refc);
  3901. test_type = (ERTS_MAGIC_BIN_DESTRUCTOR(bin) == empty_magic_ref_destructor
  3902. ? am_true : am_false);
  3903. BIF_RET(TUPLE3(hp, bin_addr_term, refc_term, test_type));
  3904. }
  3905. break;
  3906. }
  3907. case 3: {
  3908. if (ERTS_IS_ATOM_STR("check_time_config", tp[1])) {
  3909. int res, time_correction;
  3910. ErtsTimeWarpMode time_warp_mode;
  3911. if (tp[2] == am_true)
  3912. time_correction = !0;
  3913. else if (tp[2] == am_false)
  3914. time_correction = 0;
  3915. else
  3916. break;
  3917. if (ERTS_IS_ATOM_STR("no_time_warp", tp[3]))
  3918. time_warp_mode = ERTS_NO_TIME_WARP_MODE;
  3919. else if (ERTS_IS_ATOM_STR("single_time_warp", tp[3]))
  3920. time_warp_mode = ERTS_SINGLE_TIME_WARP_MODE;
  3921. else if (ERTS_IS_ATOM_STR("multi_time_warp", tp[3]))
  3922. time_warp_mode = ERTS_MULTI_TIME_WARP_MODE;
  3923. else
  3924. break;
  3925. res = erts_check_time_adj_support(time_correction,
  3926. time_warp_mode);
  3927. BIF_RET(res ? am_true : am_false);
  3928. }
  3929. else if (ERTS_IS_ATOM_STR("make_unique_integer", tp[1])) {
  3930. Eterm res = erts_debug_make_unique_integer(BIF_P,
  3931. tp[2],
  3932. tp[3]);
  3933. if (is_non_value(res))
  3934. break;
  3935. BIF_RET(res);
  3936. }
  3937. break;
  3938. }
  3939. default:
  3940. break;
  3941. }
  3942. }
  3943. BIF_ERROR(BIF_P, BADARG);
  3944. }
  3945. BIF_RETTYPE erts_internal_is_system_process_1(BIF_ALIST_1)
  3946. {
  3947. if (is_internal_pid(BIF_ARG_1)) {
  3948. Process *rp = erts_proc_lookup(BIF_ARG_1);
  3949. if (rp && (rp->static_flags & ERTS_STC_FLG_SYSTEM_PROC))
  3950. BIF_RET(am_true);
  3951. BIF_RET(am_false);
  3952. }
  3953. if (is_external_pid(BIF_ARG_1)
  3954. && external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry) {
  3955. BIF_RET(am_false);
  3956. }
  3957. BIF_ERROR(BIF_P, BADARG);
  3958. }
  3959. BIF_RETTYPE erts_internal_system_check_1(BIF_ALIST_1)
  3960. {
  3961. Eterm res;
  3962. if (ERTS_IS_ATOM_STR("schedulers", BIF_ARG_1)) {
  3963. res = erts_system_check_request(BIF_P);
  3964. if (is_non_value(res))
  3965. BIF_RET(am_undefined);
  3966. BIF_TRAP1(gather_system_check_res_trap, BIF_P, res);
  3967. }
  3968. BIF_ERROR(BIF_P, BADARG);
  3969. }
  3970. static erts_atomic_t hipe_test_reschedule_flag;
  3971. #if defined(VALGRIND) && defined(__GNUC__)
  3972. /* Force noinline for valgrind suppression */
  3973. static void broken_halt_test(Eterm bif_arg_2) __attribute__((noinline));
  3974. #endif
  3975. static void broken_halt_test(Eterm bif_arg_2)
  3976. {
  3977. /* Ugly ugly code used by bif_SUITE:erlang_halt/1 */
  3978. #if defined(ERTS_HAVE_TRY_CATCH)
  3979. erts_get_scheduler_data()->run_queue = NULL;
  3980. #endif
  3981. erts_exit(ERTS_DUMP_EXIT, "%T", bif_arg_2);
  3982. }
  3983. BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
  3984. {
  3985. /*
  3986. * NOTE: Only supposed to be used for testing, and debugging.
  3987. */
  3988. if (ERTS_IS_ATOM_STR("available_internal_state", BIF_ARG_1)
  3989. && (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false)) {
  3990. erts_aint_t on = (erts_aint_t) (BIF_ARG_2 == am_true);
  3991. erts_aint_t prev_on = erts_atomic_xchg_nob(&available_internal_state, on);
  3992. if (on) {
  3993. erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
  3994. erts_dsprintf(dsbufp, "Process %T ", BIF_P->common.id);
  3995. if (erts_is_alive)
  3996. erts_dsprintf(dsbufp, "on node %T ", erts_this_node->sysname);
  3997. erts_dsprintf(dsbufp,
  3998. "enabled access to the emulator internal state.\n");
  3999. erts_dsprintf(dsbufp,
  4000. "NOTE: This is an erts internal test feature and "
  4001. "should *only* be used by OTP test-suites.\n");
  4002. erts_send_warning_to_logger(BIF_P->group_leader, dsbufp);
  4003. }
  4004. BIF_RET(prev_on ? am_true : am_false);
  4005. }
  4006. if (!erts_atomic_read_nob(&available_internal_state)) {
  4007. BIF_ERROR(BIF_P, EXC_UNDEF);
  4008. }
  4009. if (is_atom(BIF_ARG_1)) {
  4010. if (ERTS_IS_ATOM_STR("reds_left", BIF_ARG_1)) {
  4011. Sint reds;
  4012. if (term_to_Sint(BIF_ARG_2, &reds) != 0) {
  4013. if (0 <= reds && reds <= CONTEXT_REDS) {
  4014. if (!ERTS_PROC_GET_SAVED_CALLS_BUF(BIF_P))
  4015. BIF_P->fcalls = reds;
  4016. else
  4017. BIF_P->fcalls = reds - CONTEXT_REDS;
  4018. BIF_P->scheduler_data->virtual_reds = 0;
  4019. }
  4020. BIF_RET(am_true);
  4021. }
  4022. }
  4023. else if (ERTS_IS_ATOM_STR("block", BIF_ARG_1)
  4024. || ERTS_IS_ATOM_STR("sleep", BIF_ARG_1)) {
  4025. int block = ERTS_IS_ATOM_STR("block", BIF_ARG_1);
  4026. Sint ms;
  4027. if (term_to_Sint(BIF_ARG_2, &ms) != 0) {
  4028. if (ms > 0) {
  4029. erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
  4030. if (block)
  4031. erts_thr_progress_block();
  4032. while (erts_milli_sleep((long) ms) != 0);
  4033. if (block)
  4034. erts_thr_progress_unblock();
  4035. erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
  4036. }
  4037. BIF_RET(am_true);
  4038. }
  4039. }
  4040. else if (ERTS_IS_ATOM_STR("block_scheduler", BIF_ARG_1)) {
  4041. Sint ms;
  4042. if (term_to_Sint(BIF_ARG_2, &ms) != 0) {
  4043. if (ms > 0) {
  4044. erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
  4045. while (erts_milli_sleep((long) ms) != 0);
  4046. erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
  4047. }
  4048. BIF_RET(am_true);
  4049. }
  4050. }
  4051. else if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1)
  4052. || ERTS_IS_ATOM_STR("next_port", BIF_ARG_1)) {
  4053. /* Used by node_container_SUITE (emulator) */
  4054. Uint next;
  4055. if (term_to_Uint(BIF_ARG_2, &next) != 0) {
  4056. Sint res;
  4057. if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1))
  4058. res = erts_ptab_test_next_id(&erts_proc, 1, next);
  4059. else
  4060. res = erts_ptab_test_next_id(&erts_port, 1, next);
  4061. if (res < 0)
  4062. BIF_RET(am_false);
  4063. BIF_RET(erts_make_integer(res, BIF_P));
  4064. }
  4065. }
  4066. else if (ERTS_IS_ATOM_STR("force_gc", BIF_ARG_1)) {
  4067. /* Used by signal_SUITE (emulator) */
  4068. Process *rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
  4069. BIF_ARG_2, ERTS_PROC_LOCK_MAIN);
  4070. if (!rp) {
  4071. BIF_RET(am_false);
  4072. }
  4073. else {
  4074. ERTS_FORCE_GC(BIF_P);
  4075. BIF_RET(am_true);
  4076. }
  4077. }
  4078. else if (ERTS_IS_ATOM_STR("gc_state", BIF_ARG_1)) {
  4079. /* Used by process_SUITE (emulator) */
  4080. int res, enable;
  4081. switch (BIF_ARG_2) {
  4082. case am_true: enable = 1; break;
  4083. case am_false: enable = 0; break;
  4084. default: BIF_ERROR(BIF_P, BADARG); break;
  4085. }
  4086. res = (BIF_P->flags & F_DISABLE_GC) ? am_false : am_true;
  4087. erts_set_gc_state(BIF_P, enable);
  4088. BIF_RET(res);
  4089. }
  4090. else if (ERTS_IS_ATOM_STR("colliding_names", BIF_ARG_1)) {
  4091. /* Used by ets_SUITE (stdlib) */
  4092. if (is_tuple(BIF_ARG_2)) {
  4093. Eterm* tpl = tuple_val(BIF_ARG_2);
  4094. Uint cnt;
  4095. if (arityval(tpl[0]) == 2 && is_atom(tpl[1]) &&
  4096. term_to_Uint(tpl[2], &cnt)) {
  4097. BIF_RET(erts_ets_colliding_names(BIF_P,tpl[1],cnt));
  4098. }
  4099. }
  4100. }
  4101. else if (ERTS_IS_ATOM_STR("binary_loop_limit", BIF_ARG_1)) {
  4102. /* Used by binary_module_SUITE (stdlib) */
  4103. Uint max_loops;
  4104. if (is_atom(BIF_ARG_2) && ERTS_IS_ATOM_STR("default", BIF_ARG_2)) {
  4105. max_loops = erts_binary_set_loop_limit(-1);
  4106. BIF_RET(make_small(max_loops));
  4107. } else if (term_to_Uint(BIF_ARG_2, &max_loops) != 0) {
  4108. max_loops = erts_binary_set_loop_limit(max_loops);
  4109. BIF_RET(make_small(max_loops));
  4110. }
  4111. }
  4112. else if (ERTS_IS_ATOM_STR("re_loop_limit", BIF_ARG_1)) {
  4113. /* Used by re_SUITE (stdlib) */
  4114. Uint max_loops;
  4115. if (is_atom(BIF_ARG_2) && ERTS_IS_ATOM_STR("default", BIF_ARG_2)) {
  4116. max_loops = erts_re_set_loop_limit(-1);
  4117. BIF_RET(make_small(max_loops));
  4118. } else if (term_to_Uint(BIF_ARG_2, &max_loops) != 0) {
  4119. max_loops = erts_re_set_loop_limit(max_loops);
  4120. BIF_RET(make_small(max_loops));
  4121. }
  4122. }
  4123. else if (ERTS_IS_ATOM_STR("unicode_loop_limit", BIF_ARG_1)) {
  4124. /* Used by unicode_SUITE (stdlib) */
  4125. Uint max_loops;
  4126. if (is_atom(BIF_ARG_2) && ERTS_IS_ATOM_STR("default", BIF_ARG_2)) {
  4127. max_loops = erts_unicode_set_loop_limit(-1);
  4128. BIF_RET(make_small(max_loops));
  4129. } else if (term_to_Uint(BIF_ARG_2, &max_loops) != 0) {
  4130. max_loops = erts_unicode_set_loop_limit(max_loops);
  4131. BIF_RET(make_small(max_loops));
  4132. }
  4133. }
  4134. else if (ERTS_IS_ATOM_STR("hipe_test_reschedule_suspend", BIF_ARG_1)) {
  4135. /* Used by hipe test suites */
  4136. erts_aint_t flag = erts_atomic_read_nob(&hipe_test_reschedule_flag);
  4137. if (!flag && BIF_ARG_2 != am_false) {
  4138. erts_atomic_set_nob(&hipe_test_reschedule_flag, 1);
  4139. erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
  4140. ERTS_BIF_YIELD2(bif_export[BIF_erts_debug_set_internal_state_2],
  4141. BIF_P, BIF_ARG_1, BIF_ARG_2);
  4142. }
  4143. erts_atomic_set_nob(&hipe_test_reschedule_flag, !flag);
  4144. BIF_RET(NIL);
  4145. }
  4146. else if (ERTS_IS_ATOM_STR("hipe_test_reschedule_resume", BIF_ARG_1)) {
  4147. /* Used by hipe test suites */
  4148. Eterm res = am_false;
  4149. Process *rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
  4150. BIF_ARG_2, ERTS_PROC_LOCK_STATUS);
  4151. if (rp) {
  4152. erts_resume(rp, ERTS_PROC_LOCK_STATUS);
  4153. res = am_true;
  4154. erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
  4155. }
  4156. BIF_RET(res);
  4157. }
  4158. else if (ERTS_IS_ATOM_STR("test_long_gc_sleep", BIF_ARG_1)) {
  4159. if (term_to_Uint(BIF_ARG_2, &erts_test_long_gc_sleep) > 0)
  4160. BIF_RET(am_true);
  4161. }
  4162. else if (ERTS_IS_ATOM_STR("abort", BIF_ARG_1)) {
  4163. erts_exit(ERTS_ABORT_EXIT, "%T\n", BIF_ARG_2);
  4164. }
  4165. else if (ERTS_IS_ATOM_STR("kill_dist_connection", BIF_ARG_1)) {
  4166. DistEntry *dep = erts_sysname_to_connected_dist_entry(BIF_ARG_2);
  4167. if (!dep)
  4168. BIF_RET(am_false);
  4169. else {
  4170. Uint32 con_id;
  4171. erts_de_rlock(dep);
  4172. con_id = dep->connection_id;
  4173. erts_de_runlock(dep);
  4174. erts_kill_dist_connection(dep, con_id);
  4175. BIF_RET(am_true);
  4176. }
  4177. }
  4178. else if (ERTS_IS_ATOM_STR("wait", BIF_ARG_1)) {
  4179. int flag = 0;
  4180. if (ERTS_IS_ATOM_STR("deallocations", BIF_ARG_2))
  4181. flag = ERTS_DEBUG_WAIT_COMPLETED_DEALLOCATIONS;
  4182. else if (ERTS_IS_ATOM_STR("timer_cancellations", BIF_ARG_2))
  4183. flag = ERTS_DEBUG_WAIT_COMPLETED_TIMER_CANCELLATIONS;
  4184. else if (ERTS_IS_ATOM_STR("aux_work", BIF_ARG_2))
  4185. flag = ERTS_DEBUG_WAIT_COMPLETED_AUX_WORK;
  4186. if (flag && erts_debug_wait_completed(BIF_P, flag)) {
  4187. ERTS_BIF_YIELD_RETURN(BIF_P, am_ok);
  4188. }
  4189. }
  4190. else if (ERTS_IS_ATOM_STR("broken_halt", BIF_ARG_1)) {
  4191. erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
  4192. broken_halt_test(BIF_ARG_2);
  4193. }
  4194. else if (ERTS_IS_ATOM_STR("unique_monotonic_integer_state", BIF_ARG_1)) {
  4195. int res = erts_debug_set_unique_monotonic_integer_state(BIF_ARG_2);
  4196. BIF_RET(res ? am_true : am_false);
  4197. }
  4198. else if (ERTS_IS_ATOM_STR("node_tab_delayed_delete", BIF_ARG_1)) {
  4199. /* node_container_SUITE */
  4200. Sint64 msecs;
  4201. if (term_to_Sint64(BIF_ARG_2, &msecs)) {
  4202. /* Negative value restore original value... */
  4203. erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
  4204. erts_debug_test_node_tab_delayed_delete(msecs);
  4205. erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
  4206. BIF_RET(am_ok);
  4207. }
  4208. }
  4209. else if (ERTS_IS_ATOM_STR("fill_heap", BIF_ARG_1)) {
  4210. UWord left = HeapWordsLeft(BIF_P);
  4211. if (left > 1) {
  4212. Eterm* hp = HAlloc(BIF_P, left);
  4213. *hp = make_pos_bignum_header(left - 1);
  4214. }
  4215. if (BIF_ARG_2 == am_true) {
  4216. FLAGS(BIF_P) |= F_NEED_FULLSWEEP;
  4217. }
  4218. BIF_RET(am_ok);
  4219. }
  4220. else if (ERTS_IS_ATOM_STR("make", BIF_ARG_1)) {
  4221. if (ERTS_IS_ATOM_STR("magic_ref", BIF_ARG_2)) {
  4222. Binary *bin = erts_create_magic_binary(0, empty_magic_ref_destructor);
  4223. UWord bin_addr = (UWord) bin;
  4224. Eterm bin_addr_term, magic_ref, res;
  4225. Eterm *hp;
  4226. Uint sz = ERTS_MAGIC_REF_THING_SIZE + 3;
  4227. erts_bld_uword(NULL, &sz, bin_addr);
  4228. hp = HAlloc(BIF_P, sz);
  4229. bin_addr_term = erts_bld_uword(&hp, NULL, bin_addr);
  4230. magic_ref = erts_mk_magic_ref(&hp, &BIF_P->off_heap, bin);
  4231. res = TUPLE2(hp, magic_ref, bin_addr_term);
  4232. BIF_RET(res);
  4233. }
  4234. }
  4235. else if (ERTS_IS_ATOM_STR("binary", BIF_ARG_1)) {
  4236. Sint64 size;
  4237. if (term_to_Sint64(BIF_ARG_2, &size)) {
  4238. Binary* refbin = erts_bin_drv_alloc_fnf(size);
  4239. if (!refbin)
  4240. BIF_RET(am_false);
  4241. sys_memset(refbin->orig_bytes, 0, size);
  4242. BIF_RET(erts_build_proc_bin(&MSO(BIF_P),
  4243. HAlloc(BIF_P, PROC_BIN_SIZE),
  4244. refbin));
  4245. }
  4246. }
  4247. else if (ERTS_IS_ATOM_STR("ets_force_trap", BIF_ARG_1)) {
  4248. #ifdef ETS_DBG_FORCE_TRAP
  4249. erts_ets_dbg_force_trap = (BIF_ARG_2 == am_true) ? 1 : 0;
  4250. BIF_RET(am_ok);
  4251. #else
  4252. BIF_RET(am_notsup);
  4253. #endif
  4254. }
  4255. else if (ERTS_IS_ATOM_STR("ets_force_split", BIF_ARG_1)) {
  4256. if (is_tuple(BIF_ARG_2)) {
  4257. Eterm* tpl = tuple_val(BIF_ARG_2);
  4258. if (erts_ets_force_split(tpl[1], tpl[2] == am_true))
  4259. BIF_RET(am_ok);
  4260. }
  4261. }
  4262. else if (ERTS_IS_ATOM_STR("ets_debug_random_split_join", BIF_ARG_1)) {
  4263. if (is_tuple(BIF_ARG_2)) {
  4264. Eterm* tpl = tuple_val(BIF_ARG_2);
  4265. if (erts_ets_debug_random_split_join(tpl[1], tpl[2] == am_true))
  4266. BIF_RET(am_ok);
  4267. }
  4268. }
  4269. else if (ERTS_IS_ATOM_STR("mbuf", BIF_ARG_1)) {
  4270. Uint sz = size_object(BIF_ARG_2);
  4271. ErlHeapFragment* frag = new_message_buffer(sz);
  4272. Eterm *hp = frag->mem;
  4273. Eterm copy = copy_struct(BIF_ARG_2, sz, &hp, &frag->off_heap);
  4274. frag->next = BIF_P->mbuf;
  4275. BIF_P->mbuf = frag;
  4276. BIF_P->mbuf_sz += sz;
  4277. BIF_RET(copy);
  4278. }
  4279. }
  4280. BIF_ERROR(BIF_P, BADARG);
  4281. }
  4282. static BIF_RETTYPE
  4283. gather_histograms_helper(Process * c_p, Eterm arg_tuple,
  4284. int gather(Process *, int, int, int, UWord, Eterm))
  4285. {
  4286. SWord hist_start, hist_width, sched_id;
  4287. int msg_count, alloc_num;
  4288. Eterm *args;
  4289. /* This is an internal BIF, so the error checking is mostly left to erlang
  4290. * code. */
  4291. ASSERT(is_tuple_arity(arg_tuple, 5));
  4292. args = tuple_val(arg_tuple);
  4293. for (alloc_num = ERTS_ALC_A_MIN; alloc_num <= ERTS_ALC_A_MAX; alloc_num++) {
  4294. if(erts_is_atom_str(ERTS_ALC_A2AD(alloc_num), args[1], 0)) {
  4295. break;
  4296. }
  4297. }
  4298. if (alloc_num > ERTS_ALC_A_MAX) {
  4299. BIF_ERROR(c_p, BADARG);
  4300. }
  4301. sched_id = signed_val(args[2]);
  4302. hist_width = signed_val(args[3]);
  4303. hist_start = signed_val(args[4]);
  4304. if (sched_id < 0 || sched_id > erts_no_schedulers) {
  4305. BIF_ERROR(c_p, BADARG);
  4306. }
  4307. msg_count = gather(c_p, alloc_num, sched_id, hist_width, hist_start, args[5]);
  4308. BIF_RET(make_small(msg_count));
  4309. }
  4310. BIF_RETTYPE erts_internal_gather_alloc_histograms_1(BIF_ALIST_1)
  4311. {
  4312. return gather_histograms_helper(BIF_P, BIF_ARG_1,
  4313. erts_alcu_gather_alloc_histograms);
  4314. }
  4315. BIF_RETTYPE erts_internal_gather_carrier_info_1(BIF_ALIST_1)
  4316. {
  4317. return gather_histograms_helper(BIF_P, BIF_ARG_1,
  4318. erts_alcu_gather_carrier_info);
  4319. }
  4320. #ifdef ERTS_ENABLE_LOCK_COUNT
  4321. typedef struct {
  4322. /* info->location_count may increase between size calculation and term
  4323. * building, so we cap it at the value sampled in lcnt_build_result_vector.
  4324. *
  4325. * Shrinking is safe though. */
  4326. int max_location_count;
  4327. erts_lcnt_lock_info_t *info;
  4328. } lcnt_sample_t;
  4329. typedef struct lcnt_sample_vector_ {
  4330. lcnt_sample_t *elements;
  4331. size_t size;
  4332. } lcnt_sample_vector_t;
  4333. static lcnt_sample_vector_t lcnt_build_sample_vector(erts_lcnt_lock_info_list_t *list) {
  4334. erts_lcnt_lock_info_t *iterator;
  4335. lcnt_sample_vector_t result;
  4336. size_t allocated_entries;
  4337. allocated_entries = 64;
  4338. result.size = 0;
  4339. result.elements = erts_alloc(ERTS_ALC_T_LCNT_VECTOR,
  4340. allocated_entries * sizeof(lcnt_sample_t));
  4341. iterator = NULL;
  4342. while(erts_lcnt_iterate_list(list, &iterator)) {
  4343. erts_lcnt_retain_lock_info(iterator);
  4344. result.elements[result.size].max_location_count = iterator->location_count;
  4345. result.elements[result.size].info = iterator;
  4346. result.size++;
  4347. if(result.size >= allocated_entries) {
  4348. allocated_entries *= 2;
  4349. result.elements = erts_realloc(ERTS_ALC_T_LCNT_VECTOR, result.elements,
  4350. allocated_entries * sizeof(lcnt_sample_t));
  4351. }
  4352. }
  4353. return result;
  4354. }
  4355. static void lcnt_destroy_sample_vector(lcnt_sample_vector_t *vector) {
  4356. size_t i;
  4357. for(i = 0; i < vector->size; i++) {
  4358. erts_lcnt_release_lock_info(vector->elements[i].info);
  4359. }
  4360. erts_free(ERTS_ALC_T_LCNT_VECTOR, vector->elements);
  4361. }
  4362. /* The size of an integer is not guaranteed to be constant since we're walking
  4363. * over live data, and may cross over into bignum territory between size calc
  4364. * and the actual build. This takes care of that through always assuming the
  4365. * worst, but needs to be fixed up with HRelease once the final term has been
  4366. * built. */
  4367. static ERTS_INLINE Eterm bld_unstable_uint64(Uint **hpp, Uint *szp, Uint64 ui) {
  4368. Eterm res = THE_NON_VALUE;
  4369. if(szp) {
  4370. *szp += ERTS_UINT64_HEAP_SIZE(~((Uint64) 0));
  4371. }
  4372. if(hpp) {
  4373. if (IS_USMALL(0, ui)) {
  4374. res = make_small(ui);
  4375. } else {
  4376. res = erts_uint64_to_big(ui, hpp);
  4377. }
  4378. }
  4379. return res;
  4380. }
  4381. static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_stats_t *stats, Eterm res) {
  4382. unsigned int i;
  4383. const char *file;
  4384. Eterm af, uil;
  4385. Eterm uit, uic;
  4386. Eterm uits, uitns, uitn;
  4387. Eterm tt, tstat, tloc, t;
  4388. Eterm thist, vhist[ERTS_LCNT_HISTOGRAM_SLOT_SIZE];
  4389. /* term:
  4390. * [{{file, line},
  4391. {tries, colls, {seconds, nanoseconds, n_blocks}},
  4392. * { .. histogram .. }] */
  4393. file = stats->file ? stats->file : "undefined";
  4394. af = erts_atom_put((byte *)file, sys_strlen(file), ERTS_ATOM_ENC_LATIN1, 1);
  4395. uil = erts_bld_uint( hpp, szp, stats->line);
  4396. tloc = erts_bld_tuple(hpp, szp, 2, af, uil);
  4397. uit = bld_unstable_uint64(hpp, szp, (Uint)ethr_atomic_read(&stats->attempts));
  4398. uic = bld_unstable_uint64(hpp, szp, (Uint)ethr_atomic_read(&stats->collisions));
  4399. uits = bld_unstable_uint64(hpp, szp, stats->total_time_waited.s);
  4400. uitns = bld_unstable_uint64(hpp, szp, stats->total_time_waited.ns);
  4401. uitn = bld_unstable_uint64(hpp, szp, stats->times_waited);
  4402. tt = erts_bld_tuple(hpp, szp, 3, uits, uitns, uitn);
  4403. tstat = erts_bld_tuple(hpp, szp, 3, uit, uic, tt);
  4404. for(i = 0; i < ERTS_LCNT_HISTOGRAM_SLOT_SIZE; i++) {
  4405. vhist[i] = bld_unstable_uint64(hpp, szp, stats->wait_time_histogram.ns[i]);
  4406. }
  4407. thist = erts_bld_tuplev(hpp, szp, ERTS_LCNT_HISTOGRAM_SLOT_SIZE, vhist);
  4408. t = erts_bld_tuple(hpp, szp, 3, tloc, tstat, thist);
  4409. res = erts_bld_cons( hpp, szp, t, res);
  4410. return res;
  4411. }
  4412. static Eterm lcnt_pretty_print_lock_id(erts_lcnt_lock_info_t *info) {
  4413. Eterm id = info->id;
  4414. if((info->flags & ERTS_LOCK_FLAGS_MASK_TYPE) == ERTS_LOCK_FLAGS_TYPE_PROCLOCK) {
  4415. /* Use registered names as id's for process locks if available. Thread
  4416. * progress is delayed since we may be running on a dirty scheduler. */
  4417. ErtsThrPrgrDelayHandle delay_handle;
  4418. Process *process;
  4419. delay_handle = erts_thr_progress_unmanaged_delay();
  4420. process = erts_proc_lookup(info->id);
  4421. if (process && process->common.u.alive.reg) {
  4422. id = process->common.u.alive.reg->name;
  4423. }
  4424. erts_thr_progress_unmanaged_continue(delay_handle);
  4425. } else if(info->flags & ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR) {
  4426. if(is_small(id) && !sys_strcmp(info->name, "alcu_allocator")) {
  4427. const char *name = (const char*)ERTS_ALC_A2AD(signed_val(id));
  4428. id = erts_atom_put((byte*)name, sys_strlen(name), ERTS_ATOM_ENC_LATIN1, 1);
  4429. }
  4430. }
  4431. return id;
  4432. }
  4433. static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, lcnt_sample_t *sample, Eterm res) {
  4434. erts_lcnt_lock_info_t *info = sample->info;
  4435. Eterm name, type, id, stats = NIL, t;
  4436. const char *lock_desc;
  4437. int i;
  4438. /* term: [{name, id, type, stats()}] */
  4439. ASSERT(info->name);
  4440. lock_desc = erts_lock_flags_get_type_name(info->flags);
  4441. type = erts_atom_put((byte*)lock_desc, sys_strlen(lock_desc), ERTS_ATOM_ENC_LATIN1, 1);
  4442. name = erts_atom_put((byte*)info->name, sys_strlen(info->name), ERTS_ATOM_ENC_LATIN1, 1);
  4443. /* Only attempt to resolve ids when actually emitting the term. This ought
  4444. * to be safe since all immediates are the same size. */
  4445. if(hpp != NULL) {
  4446. id = lcnt_pretty_print_lock_id(info);
  4447. } else {
  4448. id = NIL;
  4449. }
  4450. for(i = 0; i < MIN(info->location_count, sample->max_location_count); i++) {
  4451. stats = lcnt_build_lock_stats_term(hpp, szp, &(info->location_stats[i]), stats);
  4452. }
  4453. t = erts_bld_tuple(hpp, szp, 4, name, id, type, stats);
  4454. res = erts_bld_cons(hpp, szp, t, res);
  4455. return res;
  4456. }
  4457. static Eterm lcnt_build_result_term(Eterm **hpp, Uint *szp, erts_lcnt_time_t *duration,
  4458. lcnt_sample_vector_t *current_locks,
  4459. lcnt_sample_vector_t *deleted_locks, Eterm res) {
  4460. const char *str_duration = "duration";
  4461. const char *str_locks = "locks";
  4462. Eterm dts, dtns, tdt, adur, tdur, aloc, lloc = NIL, tloc;
  4463. size_t i;
  4464. /* term: [{'duration', {seconds, nanoseconds}}, {'locks', locks()}] */
  4465. /* duration tuple */
  4466. dts = bld_unstable_uint64(hpp, szp, duration->s);
  4467. dtns = bld_unstable_uint64(hpp, szp, duration->ns);
  4468. tdt = erts_bld_tuple(hpp, szp, 2, dts, dtns);
  4469. adur = erts_atom_put((byte *)str_duration, sys_strlen(str_duration), ERTS_ATOM_ENC_LATIN1, 1);
  4470. tdur = erts_bld_tuple(hpp, szp, 2, adur, tdt);
  4471. /* lock tuple */
  4472. aloc = erts_atom_put((byte *)str_locks, sys_strlen(str_locks), ERTS_ATOM_ENC_LATIN1, 1);
  4473. for(i = 0; i < current_locks->size; i++) {
  4474. lloc = lcnt_build_lock_term(hpp, szp, &current_locks->elements[i], lloc);
  4475. }
  4476. for(i = 0; i < deleted_locks->size; i++) {
  4477. lloc = lcnt_build_lock_term(hpp, szp, &deleted_locks->elements[i], lloc);
  4478. }
  4479. tloc = erts_bld_tuple(hpp, szp, 2, aloc, lloc);
  4480. res = erts_bld_cons(hpp, szp, tloc, res);
  4481. res = erts_bld_cons(hpp, szp, tdur, res);
  4482. return res;
  4483. }
  4484. static struct {
  4485. const char *name;
  4486. erts_lock_flags_t flag;
  4487. } lcnt_category_map[] = {
  4488. {"allocator", ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR},
  4489. {"db", ERTS_LOCK_FLAGS_CATEGORY_DB},
  4490. {"debug", ERTS_LOCK_FLAGS_CATEGORY_DEBUG},
  4491. {"distribution", ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION},
  4492. {"generic", ERTS_LOCK_FLAGS_CATEGORY_GENERIC},
  4493. {"io", ERTS_LOCK_FLAGS_CATEGORY_IO},
  4494. {"process", ERTS_LOCK_FLAGS_CATEGORY_PROCESS},
  4495. {"scheduler", ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER},
  4496. {NULL, 0}
  4497. };
  4498. static erts_lock_flags_t lcnt_atom_to_lock_category(Eterm atom) {
  4499. int i = 0;
  4500. for(i = 0; lcnt_category_map[i].name != NULL; i++) {
  4501. if(erts_is_atom_str(lcnt_category_map[i].name, atom, 0)) {
  4502. return lcnt_category_map[i].flag;
  4503. }
  4504. }
  4505. return 0;
  4506. }
  4507. static Eterm lcnt_build_category_list(Eterm **hpp, Uint *szp, erts_lock_flags_t mask) {
  4508. Eterm res;
  4509. int i;
  4510. res = NIL;
  4511. for(i = 0; lcnt_category_map[i].name != NULL; i++) {
  4512. if(mask & lcnt_category_map[i].flag) {
  4513. Eterm category = erts_atom_put((byte*)lcnt_category_map[i].name,
  4514. sys_strlen(lcnt_category_map[i].name),
  4515. ERTS_ATOM_ENC_UTF8, 0);
  4516. res = erts_bld_cons(hpp, szp, category, res);
  4517. }
  4518. }
  4519. return res;
  4520. }
  4521. #endif
  4522. BIF_RETTYPE erts_debug_lcnt_clear_0(BIF_ALIST_0)
  4523. {
  4524. #ifndef ERTS_ENABLE_LOCK_COUNT
  4525. BIF_RET(am_error);
  4526. #else
  4527. erts_lcnt_clear_counters();
  4528. BIF_RET(am_ok);
  4529. #endif
  4530. }
  4531. BIF_RETTYPE erts_debug_lcnt_collect_0(BIF_ALIST_0)
  4532. {
  4533. #ifndef ERTS_ENABLE_LOCK_COUNT
  4534. BIF_RET(am_error);
  4535. #else
  4536. lcnt_sample_vector_t current_locks, deleted_locks;
  4537. erts_lcnt_data_t data;
  4538. Eterm *term_heap_start, *term_heap_end;
  4539. Uint term_heap_size = 0;
  4540. Eterm result;
  4541. data = erts_lcnt_get_data();
  4542. current_locks = lcnt_build_sample_vector(data.current_locks);
  4543. deleted_locks = lcnt_build_sample_vector(data.deleted_locks);
  4544. lcnt_build_result_term(NULL, &term_heap_size, &data.duration,
  4545. &current_locks, &deleted_locks, NIL);
  4546. term_heap_start = HAlloc(BIF_P, term_heap_size);
  4547. term_heap_end = term_heap_start;
  4548. result = lcnt_build_result_term(&term_heap_end, NULL,
  4549. &data.duration, &current_locks, &deleted_locks, NIL);
  4550. HRelease(BIF_P, term_heap_start + term_heap_size, term_heap_end);
  4551. lcnt_destroy_sample_vector(&current_locks);
  4552. lcnt_destroy_sample_vector(&deleted_locks);
  4553. BIF_RET(result);
  4554. #endif
  4555. }
  4556. BIF_RETTYPE erts_debug_lcnt_control_1(BIF_ALIST_1)
  4557. {
  4558. #ifdef ERTS_ENABLE_LOCK_COUNT
  4559. if(ERTS_IS_ATOM_STR("mask", BIF_ARG_1)) {
  4560. erts_lock_flags_t mask;
  4561. Eterm *term_heap_block;
  4562. Uint term_heap_size;
  4563. mask = erts_lcnt_get_category_mask();
  4564. term_heap_size = 0;
  4565. lcnt_build_category_list(NULL, &term_heap_size, mask);
  4566. term_heap_block = HAlloc(BIF_P, term_heap_size);
  4567. BIF_RET(lcnt_build_category_list(&term_heap_block, NULL, mask));
  4568. } else if(ERTS_IS_ATOM_STR("copy_save", BIF_ARG_1)) {
  4569. if(erts_lcnt_get_preserve_info()) {
  4570. BIF_RET(am_true);
  4571. }
  4572. BIF_RET(am_false);
  4573. }
  4574. #endif
  4575. BIF_ERROR(BIF_P, BADARG);
  4576. }
  4577. BIF_RETTYPE erts_debug_lcnt_control_2(BIF_ALIST_2)
  4578. {
  4579. #ifdef ERTS_ENABLE_LOCK_COUNT
  4580. if(ERTS_IS_ATOM_STR("mask", BIF_ARG_1)) {
  4581. erts_lock_flags_t category_mask = 0;
  4582. Eterm categories = BIF_ARG_2;
  4583. if(!(is_list(categories) || is_nil(categories))) {
  4584. BIF_ERROR(BIF_P, BADARG);
  4585. }
  4586. while(is_list(categories)) {
  4587. Eterm *cell = list_val(categories);
  4588. erts_lock_flags_t category;
  4589. category = lcnt_atom_to_lock_category(CAR(cell));
  4590. if(!category) {
  4591. Eterm *hp = HAlloc(BIF_P, 4);
  4592. BIF_RET(TUPLE3(hp, am_error, am_badarg, CAR(cell)));
  4593. }
  4594. category_mask |= category;
  4595. categories = CDR(cell);
  4596. }
  4597. erts_lcnt_set_category_mask(category_mask);
  4598. BIF_RET(am_ok);
  4599. } else if(BIF_ARG_2 == am_true || BIF_ARG_2 == am_false) {
  4600. int enabled = (BIF_ARG_2 == am_true);
  4601. if(ERTS_IS_ATOM_STR("copy_save", BIF_ARG_1)) {
  4602. erts_lcnt_set_preserve_info(enabled);
  4603. BIF_RET(am_ok);
  4604. }
  4605. }
  4606. #endif
  4607. BIF_ERROR(BIF_P, BADARG);
  4608. }
  4609. static void os_info_init(void)
  4610. {
  4611. Eterm type = erts_atom_put((byte *) os_type, sys_strlen(os_type), ERTS_ATOM_ENC_LATIN1, 1);
  4612. Eterm flav;
  4613. int major, minor, build;
  4614. char* buf = erts_alloc(ERTS_ALC_T_TMP, 1024); /* More than enough */
  4615. Eterm* hp;
  4616. os_flavor(buf, 1024);
  4617. flav = erts_atom_put((byte *) buf, sys_strlen(buf), ERTS_ATOM_ENC_LATIN1, 1);
  4618. erts_free(ERTS_ALC_T_TMP, (void *) buf);
  4619. hp = erts_alloc(ERTS_ALC_T_LITERAL, (3+4)*sizeof(Eterm));
  4620. os_type_tuple = TUPLE2(hp, type, flav);
  4621. erts_set_literal_tag(&os_type_tuple, hp, 3);
  4622. hp += 3;
  4623. os_version(&major, &minor, &build);
  4624. os_version_tuple = TUPLE3(hp,
  4625. make_small(major),
  4626. make_small(minor),
  4627. make_small(build));
  4628. erts_set_literal_tag(&os_version_tuple, hp, 4);
  4629. }
  4630. void
  4631. erts_bif_info_init(void)
  4632. {
  4633. erts_atomic_init_nob(&available_internal_state, 0);
  4634. erts_atomic_init_nob(&hipe_test_reschedule_flag, 0);
  4635. alloc_info_trap = erts_export_put(am_erlang, am_alloc_info, 1);
  4636. alloc_sizes_trap = erts_export_put(am_erlang, am_alloc_sizes, 1);
  4637. gather_sched_wall_time_res_trap
  4638. = erts_export_put(am_erts_internal, am_gather_sched_wall_time_result, 1);
  4639. gather_gc_info_res_trap
  4640. = erts_export_put(am_erlang, am_gather_gc_info_result, 1);
  4641. gather_io_bytes_trap
  4642. = erts_export_put(am_erts_internal, am_gather_io_bytes, 2);
  4643. gather_msacc_res_trap
  4644. = erts_export_put(am_erts_internal, am_gather_microstate_accounting_result, 2);
  4645. gather_system_check_res_trap
  4646. = erts_export_put(am_erts_internal, am_gather_system_check_result, 1);
  4647. is_process_alive_trap = erts_export_put(am_erts_internal, am_is_process_alive, 1);
  4648. process_info_init();
  4649. os_info_init();
  4650. }