PageRenderTime 55ms CodeModel.GetById 16ms RepoModel.GetById 0ms app.codeStats 0ms

/erts/emulator/beam/erl_node_tables.c

https://github.com/Bwooce/otp
C | 1694 lines | 1356 code | 231 blank | 107 comment | 248 complexity | 6b18dd1a55b30c75a6b849118d672238 MD5 | raw file
Possible License(s): LGPL-2.1, MPL-2.0-no-copyleft-exception, BSD-2-Clause
  1. /*
  2. * %CopyrightBegin%
  3. *
  4. * Copyright Ericsson AB 2001-2010. All Rights Reserved.
  5. *
  6. * The contents of this file are subject to the Erlang Public License,
  7. * Version 1.1, (the "License"); you may not use this file except in
  8. * compliance with the License. You should have received a copy of the
  9. * Erlang Public License along with this software. If not, it can be
  10. * retrieved online at http://www.erlang.org/.
  11. *
  12. * Software distributed under the License is distributed on an "AS IS"
  13. * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
  14. * the License for the specific language governing rights and limitations
  15. * under the License.
  16. *
  17. * %CopyrightEnd%
  18. */
  19. #ifdef HAVE_CONFIG_H
  20. # include "config.h"
  21. #endif
  22. #include "global.h"
  23. #include "erl_node_tables.h"
  24. #include "dist.h"
  25. #include "big.h"
  26. #include "error.h"
  27. Hash erts_dist_table;
  28. Hash erts_node_table;
  29. erts_smp_rwmtx_t erts_dist_table_rwmtx;
  30. erts_smp_rwmtx_t erts_node_table_rwmtx;
  31. DistEntry *erts_hidden_dist_entries;
  32. DistEntry *erts_visible_dist_entries;
  33. DistEntry *erts_not_connected_dist_entries;
  34. Sint erts_no_of_hidden_dist_entries;
  35. Sint erts_no_of_visible_dist_entries;
  36. Sint erts_no_of_not_connected_dist_entries;
  37. DistEntry *erts_this_dist_entry;
  38. ErlNode *erts_this_node;
  39. static Uint node_entries;
  40. static Uint dist_entries;
  41. static int references_atoms_need_init = 1;
  42. /* -- The distribution table ---------------------------------------------- */
  43. #ifdef DEBUG
  44. static int
  45. is_in_de_list(DistEntry *dep, DistEntry *dep_list)
  46. {
  47. DistEntry *tdep;
  48. for(tdep = dep_list; tdep; tdep = tdep->next)
  49. if(tdep == dep)
  50. return 1;
  51. return 0;
  52. }
  53. #endif
  54. static HashValue
  55. dist_table_hash(void *dep)
  56. {
  57. return atom_tab(atom_val(((DistEntry *) dep)->sysname))->slot.bucket.hvalue;
  58. }
  59. static int
  60. dist_table_cmp(void *dep1, void *dep2)
  61. {
  62. return (((DistEntry *) dep1)->sysname == ((DistEntry *) dep2)->sysname
  63. ? 0 : 1);
  64. }
  65. static void*
  66. dist_table_alloc(void *dep_tmpl)
  67. {
  68. Eterm chnl_nr;
  69. Eterm sysname;
  70. DistEntry *dep;
  71. erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
  72. rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
  73. if(((DistEntry *) dep_tmpl) == erts_this_dist_entry)
  74. return dep_tmpl;
  75. sysname = ((DistEntry *) dep_tmpl)->sysname;
  76. chnl_nr = make_small((Uint) atom_val(sysname));
  77. dep = (DistEntry *) erts_alloc(ERTS_ALC_T_DIST_ENTRY, sizeof(DistEntry));
  78. dist_entries++;
  79. dep->prev = NULL;
  80. erts_refc_init(&dep->refc, -1);
  81. erts_smp_rwmtx_init_opt_x(&dep->rwmtx, &rwmtx_opt, "dist_entry", chnl_nr);
  82. dep->sysname = sysname;
  83. dep->cid = NIL;
  84. dep->connection_id = 0;
  85. dep->status = 0;
  86. dep->flags = 0;
  87. dep->version = 0;
  88. erts_smp_mtx_init_x(&dep->lnk_mtx, "dist_entry_links", chnl_nr);
  89. dep->node_links = NULL;
  90. dep->nlinks = NULL;
  91. dep->monitors = NULL;
  92. erts_smp_mtx_init_x(&dep->qlock, "dist_entry_out_queue", chnl_nr);
  93. dep->qflgs = 0;
  94. dep->qsize = 0;
  95. dep->out_queue.first = NULL;
  96. dep->out_queue.last = NULL;
  97. dep->suspended.first = NULL;
  98. dep->suspended.last = NULL;
  99. dep->finalized_out_queue.first = NULL;
  100. dep->finalized_out_queue.last = NULL;
  101. erts_smp_atomic_init(&dep->dist_cmd_scheduled, 0);
  102. erts_port_task_handle_init(&dep->dist_cmd);
  103. dep->send = NULL;
  104. dep->cache = NULL;
  105. /* Link in */
  106. /* All new dist entries are "not connected" */
  107. dep->next = erts_not_connected_dist_entries;
  108. if(erts_not_connected_dist_entries) {
  109. ASSERT(erts_not_connected_dist_entries->prev == NULL);
  110. erts_not_connected_dist_entries->prev = dep;
  111. }
  112. erts_not_connected_dist_entries = dep;
  113. erts_no_of_not_connected_dist_entries++;
  114. return (void *) dep;
  115. }
  116. static void
  117. dist_table_free(void *vdep)
  118. {
  119. DistEntry *dep = (DistEntry *) vdep;
  120. if(dep == erts_this_dist_entry)
  121. return;
  122. ASSERT(is_nil(dep->cid));
  123. ASSERT(dep->nlinks == NULL);
  124. ASSERT(dep->node_links == NULL);
  125. ASSERT(dep->monitors == NULL);
  126. /* Link out */
  127. /* All dist entries about to be removed are "not connected" */
  128. if(dep->prev) {
  129. ASSERT(is_in_de_list(dep, erts_not_connected_dist_entries));
  130. dep->prev->next = dep->next;
  131. }
  132. else {
  133. ASSERT(erts_not_connected_dist_entries == dep);
  134. erts_not_connected_dist_entries = dep->next;
  135. }
  136. if(dep->next)
  137. dep->next->prev = dep->prev;
  138. ASSERT(erts_no_of_not_connected_dist_entries > 0);
  139. erts_no_of_not_connected_dist_entries--;
  140. ASSERT(!dep->cache);
  141. erts_smp_rwmtx_destroy(&dep->rwmtx);
  142. erts_smp_mtx_destroy(&dep->lnk_mtx);
  143. erts_smp_mtx_destroy(&dep->qlock);
  144. #ifdef DEBUG
  145. sys_memset(vdep, 0x77, sizeof(DistEntry));
  146. #endif
  147. erts_free(ERTS_ALC_T_DIST_ENTRY, (void *) dep);
  148. ASSERT(dist_entries > 1);
  149. dist_entries--;
  150. }
  151. void
  152. erts_dist_table_info(int to, void *to_arg)
  153. {
  154. int lock = !ERTS_IS_CRASH_DUMPING;
  155. if (lock)
  156. erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
  157. hash_info(to, to_arg, &erts_dist_table);
  158. if (lock)
  159. erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
  160. }
  161. DistEntry *
  162. erts_channel_no_to_dist_entry(Uint cno)
  163. {
  164. /*
  165. * For this node (and previous incarnations of this node),
  166. * ERST_INTERNAL_CHANNEL_NO (will always be 0 I guess) is used as
  167. * channel no. For other nodes, the atom index of the atom corresponding
  168. * to the node name is used as channel no.
  169. */
  170. if(cno == ERST_INTERNAL_CHANNEL_NO) {
  171. erts_refc_inc(&erts_this_dist_entry->refc, 2);
  172. return erts_this_dist_entry;
  173. }
  174. if((cno > MAX_ATOM_INDEX)
  175. || (cno >= atom_table_size())
  176. || (atom_tab(cno) == NULL))
  177. return NULL;
  178. /* cno is a valid atom index; find corresponding dist entry (if there
  179. is one) */
  180. return erts_find_dist_entry(make_atom(cno));
  181. }
  182. DistEntry *
  183. erts_sysname_to_connected_dist_entry(Eterm sysname)
  184. {
  185. DistEntry de;
  186. DistEntry *res_dep;
  187. de.sysname = sysname;
  188. if(erts_this_dist_entry->sysname == sysname) {
  189. erts_refc_inc(&erts_this_dist_entry->refc, 2);
  190. return erts_this_dist_entry;
  191. }
  192. erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
  193. res_dep = (DistEntry *) hash_get(&erts_dist_table, (void *) &de);
  194. if (res_dep) {
  195. erts_aint_t refc = erts_refc_inctest(&res_dep->refc, 1);
  196. if (refc < 2) /* Pending delete */
  197. erts_refc_inc(&res_dep->refc, 1);
  198. }
  199. erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
  200. if (res_dep) {
  201. int deref;
  202. erts_smp_rwmtx_rlock(&res_dep->rwmtx);
  203. deref = is_nil(res_dep->cid);
  204. erts_smp_rwmtx_runlock(&res_dep->rwmtx);
  205. if (deref) {
  206. erts_deref_dist_entry(res_dep);
  207. res_dep = NULL;
  208. }
  209. }
  210. return res_dep;
  211. }
  212. DistEntry *erts_find_or_insert_dist_entry(Eterm sysname)
  213. {
  214. DistEntry *res;
  215. DistEntry de;
  216. erts_aint_t refc;
  217. res = erts_find_dist_entry(sysname);
  218. if (res)
  219. return res;
  220. de.sysname = sysname;
  221. erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);
  222. res = hash_put(&erts_dist_table, (void *) &de);
  223. refc = erts_refc_inctest(&res->refc, 0);
  224. if (refc < 2) /* New or pending delete */
  225. erts_refc_inc(&res->refc, 1);
  226. erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
  227. return res;
  228. }
  229. DistEntry *erts_find_dist_entry(Eterm sysname)
  230. {
  231. DistEntry *res;
  232. DistEntry de;
  233. de.sysname = sysname;
  234. erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
  235. res = hash_get(&erts_dist_table, (void *) &de);
  236. if (res) {
  237. erts_aint_t refc = erts_refc_inctest(&res->refc, 1);
  238. if (refc < 2) /* Pending delete */
  239. erts_refc_inc(&res->refc, 1);
  240. }
  241. erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
  242. return res;
  243. }
  244. void erts_delete_dist_entry(DistEntry *dep)
  245. {
  246. ASSERT(dep != erts_this_dist_entry);
  247. if(dep != erts_this_dist_entry) {
  248. erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);
  249. /*
  250. * Another thread might have looked up this dist entry after
  251. * we decided to delete it (refc became zero). If so, the other
  252. * thread incremented refc twice. Once for the new reference
  253. * and once for this thread. Therefore, delete dist entry if
  254. * refc is 0 or -1 after a decrement.
  255. */
  256. if (erts_refc_dectest(&dep->refc, -1) <= 0)
  257. (void) hash_erase(&erts_dist_table, (void *) dep);
  258. erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
  259. }
  260. }
  261. Uint
  262. erts_dist_table_size(void)
  263. {
  264. Uint res;
  265. #ifdef DEBUG
  266. HashInfo hi;
  267. DistEntry *dep;
  268. int i;
  269. #endif
  270. int lock = !ERTS_IS_CRASH_DUMPING;
  271. if (lock)
  272. erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
  273. #ifdef DEBUG
  274. hash_get_info(&hi, &erts_dist_table);
  275. ASSERT(dist_entries == hi.objs);
  276. i = 0;
  277. for(dep = erts_visible_dist_entries; dep; dep = dep->next)
  278. i++;
  279. ASSERT(i == erts_no_of_visible_dist_entries);
  280. i = 0;
  281. for(dep = erts_hidden_dist_entries; dep; dep = dep->next)
  282. i++;
  283. ASSERT(i == erts_no_of_hidden_dist_entries);
  284. i = 0;
  285. for(dep = erts_not_connected_dist_entries; dep; dep = dep->next)
  286. i++;
  287. ASSERT(i == erts_no_of_not_connected_dist_entries);
  288. ASSERT(dist_entries == (erts_no_of_visible_dist_entries
  289. + erts_no_of_hidden_dist_entries
  290. + erts_no_of_not_connected_dist_entries
  291. + 1 /* erts_this_dist_entry */));
  292. #endif
  293. res = (hash_table_sz(&erts_dist_table)
  294. + dist_entries*sizeof(DistEntry)
  295. + erts_dist_cache_size());
  296. if (lock)
  297. erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
  298. return res;
  299. }
  300. void
  301. erts_set_dist_entry_not_connected(DistEntry *dep)
  302. {
  303. ERTS_SMP_LC_ASSERT(erts_lc_is_de_rwlocked(dep));
  304. erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);
  305. ASSERT(dep != erts_this_dist_entry);
  306. ASSERT(is_internal_port(dep->cid));
  307. if(dep->flags & DFLAG_PUBLISHED) {
  308. if(dep->prev) {
  309. ASSERT(is_in_de_list(dep, erts_visible_dist_entries));
  310. dep->prev->next = dep->next;
  311. }
  312. else {
  313. ASSERT(erts_visible_dist_entries == dep);
  314. erts_visible_dist_entries = dep->next;
  315. }
  316. ASSERT(erts_no_of_visible_dist_entries > 0);
  317. erts_no_of_visible_dist_entries--;
  318. }
  319. else {
  320. if(dep->prev) {
  321. ASSERT(is_in_de_list(dep, erts_hidden_dist_entries));
  322. dep->prev->next = dep->next;
  323. }
  324. else {
  325. ASSERT(erts_hidden_dist_entries == dep);
  326. erts_hidden_dist_entries = dep->next;
  327. }
  328. ASSERT(erts_no_of_hidden_dist_entries > 0);
  329. erts_no_of_hidden_dist_entries--;
  330. }
  331. if(dep->next)
  332. dep->next->prev = dep->prev;
  333. dep->status &= ~ERTS_DE_SFLG_CONNECTED;
  334. dep->flags = 0;
  335. dep->prev = NULL;
  336. dep->cid = NIL;
  337. dep->next = erts_not_connected_dist_entries;
  338. if(erts_not_connected_dist_entries) {
  339. ASSERT(erts_not_connected_dist_entries->prev == NULL);
  340. erts_not_connected_dist_entries->prev = dep;
  341. }
  342. erts_not_connected_dist_entries = dep;
  343. erts_no_of_not_connected_dist_entries++;
  344. erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
  345. }
  346. void
  347. erts_set_dist_entry_connected(DistEntry *dep, Eterm cid, Uint flags)
  348. {
  349. ERTS_SMP_LC_ASSERT(erts_lc_is_de_rwlocked(dep));
  350. erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);
  351. ASSERT(dep != erts_this_dist_entry);
  352. ASSERT(is_nil(dep->cid));
  353. ASSERT(is_internal_port(cid));
  354. if(dep->prev) {
  355. ASSERT(is_in_de_list(dep, erts_not_connected_dist_entries));
  356. dep->prev->next = dep->next;
  357. }
  358. else {
  359. ASSERT(erts_not_connected_dist_entries == dep);
  360. erts_not_connected_dist_entries = dep->next;
  361. }
  362. if(dep->next)
  363. dep->next->prev = dep->prev;
  364. ASSERT(erts_no_of_not_connected_dist_entries > 0);
  365. erts_no_of_not_connected_dist_entries--;
  366. dep->status |= ERTS_DE_SFLG_CONNECTED;
  367. dep->flags = flags;
  368. dep->cid = cid;
  369. dep->connection_id++;
  370. dep->connection_id &= ERTS_DIST_EXT_CON_ID_MASK;
  371. dep->prev = NULL;
  372. if(flags & DFLAG_PUBLISHED) {
  373. dep->next = erts_visible_dist_entries;
  374. if(erts_visible_dist_entries) {
  375. ASSERT(erts_visible_dist_entries->prev == NULL);
  376. erts_visible_dist_entries->prev = dep;
  377. }
  378. erts_visible_dist_entries = dep;
  379. erts_no_of_visible_dist_entries++;
  380. }
  381. else {
  382. dep->next = erts_hidden_dist_entries;
  383. if(erts_hidden_dist_entries) {
  384. ASSERT(erts_hidden_dist_entries->prev == NULL);
  385. erts_hidden_dist_entries->prev = dep;
  386. }
  387. erts_hidden_dist_entries = dep;
  388. erts_no_of_hidden_dist_entries++;
  389. }
  390. erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
  391. }
  392. /* -- Node table --------------------------------------------------------- */
  393. /* Some large primes */
  394. #define PRIME0 ((HashValue) 268438039)
  395. #define PRIME1 ((HashValue) 268440479)
  396. #define PRIME2 ((HashValue) 268439161)
  397. #define PRIME3 ((HashValue) 268437017)
  398. static HashValue
  399. node_table_hash(void *venp)
  400. {
  401. Uint32 cre = ((ErlNode *) venp)->creation;
  402. HashValue h = atom_tab(atom_val(((ErlNode *) venp)->sysname))->slot.bucket.hvalue;
  403. h *= PRIME0;
  404. h += cre & 0xff;
  405. #if MAX_CREATION >= (1 << 8)
  406. h *= PRIME1;
  407. h += (cre >> 8) & 0xff;
  408. #endif
  409. #if MAX_CREATION >= (1 << 16)
  410. h *= PRIME2;
  411. h += (cre >> 16) & 0xff;
  412. #endif
  413. #if MAX_CREATION >= (1 << 24)
  414. h *= PRIME3;
  415. h += (cre >> 24) & 0xff;
  416. #endif
  417. #if 0
  418. /* XXX Problems in older versions of GCC */
  419. #if MAX_CREATION >= (1UL << 32)
  420. #error "MAX_CREATION larger than size of expected creation storage (Uint32)"
  421. #endif
  422. #endif
  423. return h;
  424. }
  425. static int
  426. node_table_cmp(void *venp1, void *venp2)
  427. {
  428. return ((((ErlNode *) venp1)->sysname == ((ErlNode *) venp2)->sysname
  429. && ((ErlNode *) venp1)->creation == ((ErlNode *) venp2)->creation)
  430. ? 0
  431. : 1);
  432. }
  433. static void*
  434. node_table_alloc(void *venp_tmpl)
  435. {
  436. ErlNode *enp;
  437. if(((ErlNode *) venp_tmpl) == erts_this_node)
  438. return venp_tmpl;
  439. enp = (ErlNode *) erts_alloc(ERTS_ALC_T_NODE_ENTRY, sizeof(ErlNode));
  440. node_entries++;
  441. erts_refc_init(&enp->refc, -1);
  442. enp->creation = ((ErlNode *) venp_tmpl)->creation;
  443. enp->sysname = ((ErlNode *) venp_tmpl)->sysname;
  444. enp->dist_entry = erts_find_or_insert_dist_entry(((ErlNode *) venp_tmpl)->sysname);
  445. return (void *) enp;
  446. }
  447. static void
  448. node_table_free(void *venp)
  449. {
  450. ErlNode *enp = (ErlNode *) venp;
  451. if(enp == erts_this_node)
  452. return;
  453. erts_deref_dist_entry(enp->dist_entry);
  454. #ifdef DEBUG
  455. sys_memset(venp, 0x55, sizeof(ErlNode));
  456. #endif
  457. erts_free(ERTS_ALC_T_NODE_ENTRY, venp);
  458. ASSERT(node_entries > 1);
  459. node_entries--;
  460. }
  461. Uint
  462. erts_node_table_size(void)
  463. {
  464. Uint res;
  465. #ifdef DEBUG
  466. HashInfo hi;
  467. #endif
  468. int lock = !ERTS_IS_CRASH_DUMPING;
  469. if (lock)
  470. erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx);
  471. #ifdef DEBUG
  472. hash_get_info(&hi, &erts_node_table);
  473. ASSERT(node_entries == hi.objs);
  474. #endif
  475. res = hash_table_sz(&erts_node_table) + node_entries*sizeof(ErlNode);
  476. if (lock)
  477. erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx);
  478. return res;
  479. }
  480. void
  481. erts_node_table_info(int to, void *to_arg)
  482. {
  483. int lock = !ERTS_IS_CRASH_DUMPING;
  484. if (lock)
  485. erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx);
  486. hash_info(to, to_arg, &erts_node_table);
  487. if (lock)
  488. erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx);
  489. }
  490. ErlNode *erts_find_or_insert_node(Eterm sysname, Uint creation)
  491. {
  492. ErlNode *res;
  493. ErlNode ne;
  494. ne.sysname = sysname;
  495. ne.creation = creation;
  496. erts_smp_rwmtx_rlock(&erts_node_table_rwmtx);
  497. res = hash_get(&erts_node_table, (void *) &ne);
  498. if (res && res != erts_this_node) {
  499. erts_aint_t refc = erts_refc_inctest(&res->refc, 0);
  500. if (refc < 2) /* New or pending delete */
  501. erts_refc_inc(&res->refc, 1);
  502. }
  503. erts_smp_rwmtx_runlock(&erts_node_table_rwmtx);
  504. if (res)
  505. return res;
  506. erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx);
  507. res = hash_put(&erts_node_table, (void *) &ne);
  508. ASSERT(res);
  509. if (res != erts_this_node) {
  510. erts_aint_t refc = erts_refc_inctest(&res->refc, 0);
  511. if (refc < 2) /* New or pending delete */
  512. erts_refc_inc(&res->refc, 1);
  513. }
  514. erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx);
  515. return res;
  516. }
  517. void erts_delete_node(ErlNode *enp)
  518. {
  519. ASSERT(enp != erts_this_node);
  520. if(enp != erts_this_node) {
  521. erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx);
  522. /*
  523. * Another thread might have looked up this node after we
  524. * decided to delete it (refc became zero). If so, the other
  525. * thread incremented refc twice. Once for the new reference
  526. * and once for this thread. Therefore, delete node if refc
  527. * is 0 or -1 after a decrement.
  528. */
  529. if (erts_refc_dectest(&enp->refc, -1) <= 0)
  530. (void) hash_erase(&erts_node_table, (void *) enp);
  531. erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx);
  532. }
  533. }
  534. struct pn_data {
  535. int to;
  536. void *to_arg;
  537. Eterm sysname;
  538. int no_sysname;
  539. int no_total;
  540. };
  541. static void print_node(void *venp, void *vpndp)
  542. {
  543. struct pn_data *pndp = ((struct pn_data *) vpndp);
  544. ErlNode *enp = ((ErlNode *) venp);
  545. if(pndp->sysname == NIL
  546. || enp->sysname == pndp->sysname) {
  547. if (pndp->no_sysname == 0) {
  548. erts_print(pndp->to, pndp->to_arg, "Creation:");
  549. }
  550. if(pndp->sysname == NIL) {
  551. erts_print(pndp->to, pndp->to_arg, "Name: %T ", enp->sysname);
  552. }
  553. erts_print(pndp->to, pndp->to_arg, " %d", enp->creation);
  554. #ifdef DEBUG
  555. erts_print(pndp->to, pndp->to_arg, " (refc=%ld)",
  556. erts_refc_read(&enp->refc, 1));
  557. #endif
  558. pndp->no_sysname++;
  559. }
  560. pndp->no_total++;
  561. }
  562. void erts_print_node_info(int to,
  563. void *to_arg,
  564. Eterm sysname,
  565. int *no_sysname,
  566. int *no_total)
  567. {
  568. int lock = !ERTS_IS_CRASH_DUMPING;
  569. struct pn_data pnd;
  570. pnd.to = to;
  571. pnd.to_arg = to_arg;
  572. pnd.sysname = sysname;
  573. pnd.no_sysname = 0;
  574. pnd.no_total = 0;
  575. if (lock)
  576. erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx);
  577. hash_foreach(&erts_node_table, print_node, (void *) &pnd);
  578. if (pnd.no_sysname != 0) {
  579. erts_print(to, to_arg, "\n");
  580. }
  581. if (lock)
  582. erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx);
  583. if(no_sysname)
  584. *no_sysname = pnd.no_sysname;
  585. if(no_total)
  586. *no_total = pnd.no_total;
  587. }
  588. /* ----------------------------------------------------------------------- */
  589. void
  590. erts_set_this_node(Eterm sysname, Uint creation)
  591. {
  592. erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx);
  593. erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);
  594. (void) hash_erase(&erts_dist_table, (void *) erts_this_dist_entry);
  595. erts_this_dist_entry->sysname = sysname;
  596. erts_this_dist_entry->creation = creation;
  597. (void) hash_put(&erts_dist_table, (void *) erts_this_dist_entry);
  598. (void) hash_erase(&erts_node_table, (void *) erts_this_node);
  599. erts_this_node->sysname = sysname;
  600. erts_this_node->creation = creation;
  601. (void) hash_put(&erts_node_table, (void *) erts_this_node);
  602. erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
  603. erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx);
  604. }
  605. void erts_init_node_tables(void)
  606. {
  607. erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
  608. HashFunctions f;
  609. rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
  610. rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
  611. f.hash = (H_FUN) dist_table_hash;
  612. f.cmp = (HCMP_FUN) dist_table_cmp;
  613. f.alloc = (HALLOC_FUN) dist_table_alloc;
  614. f.free = (HFREE_FUN) dist_table_free;
  615. erts_this_dist_entry = erts_alloc(ERTS_ALC_T_DIST_ENTRY, sizeof(DistEntry));
  616. dist_entries = 1;
  617. hash_init(ERTS_ALC_T_DIST_TABLE, &erts_dist_table, "dist_table", 11, f);
  618. erts_hidden_dist_entries = NULL;
  619. erts_visible_dist_entries = NULL;
  620. erts_not_connected_dist_entries = NULL;
  621. erts_no_of_hidden_dist_entries = 0;
  622. erts_no_of_visible_dist_entries = 0;
  623. erts_no_of_not_connected_dist_entries = 0;
  624. erts_this_dist_entry->next = NULL;
  625. erts_this_dist_entry->prev = NULL;
  626. erts_refc_init(&erts_this_dist_entry->refc, 1); /* erts_this_node */
  627. erts_smp_rwmtx_init_opt_x(&erts_this_dist_entry->rwmtx,
  628. &rwmtx_opt,
  629. "dist_entry",
  630. make_small(ERST_INTERNAL_CHANNEL_NO));
  631. erts_this_dist_entry->sysname = am_Noname;
  632. erts_this_dist_entry->cid = NIL;
  633. erts_this_dist_entry->connection_id = 0;
  634. erts_this_dist_entry->status = 0;
  635. erts_this_dist_entry->flags = 0;
  636. erts_this_dist_entry->version = 0;
  637. erts_smp_mtx_init_x(&erts_this_dist_entry->lnk_mtx,
  638. "dist_entry_links",
  639. make_small(ERST_INTERNAL_CHANNEL_NO));
  640. erts_this_dist_entry->node_links = NULL;
  641. erts_this_dist_entry->nlinks = NULL;
  642. erts_this_dist_entry->monitors = NULL;
  643. erts_smp_mtx_init_x(&erts_this_dist_entry->qlock,
  644. "dist_entry_out_queue",
  645. make_small(ERST_INTERNAL_CHANNEL_NO));
  646. erts_this_dist_entry->qflgs = 0;
  647. erts_this_dist_entry->qsize = 0;
  648. erts_this_dist_entry->out_queue.first = NULL;
  649. erts_this_dist_entry->out_queue.last = NULL;
  650. erts_this_dist_entry->suspended.first = NULL;
  651. erts_this_dist_entry->suspended.last = NULL;
  652. erts_this_dist_entry->finalized_out_queue.first = NULL;
  653. erts_this_dist_entry->finalized_out_queue.last = NULL;
  654. erts_smp_atomic_init(&erts_this_dist_entry->dist_cmd_scheduled, 0);
  655. erts_port_task_handle_init(&erts_this_dist_entry->dist_cmd);
  656. erts_this_dist_entry->send = NULL;
  657. erts_this_dist_entry->cache = NULL;
  658. (void) hash_put(&erts_dist_table, (void *) erts_this_dist_entry);
  659. f.hash = (H_FUN) node_table_hash;
  660. f.cmp = (HCMP_FUN) node_table_cmp;
  661. f.alloc = (HALLOC_FUN) node_table_alloc;
  662. f.free = (HFREE_FUN) node_table_free;
  663. hash_init(ERTS_ALC_T_NODE_TABLE, &erts_node_table, "node_table", 11, f);
  664. erts_this_node = erts_alloc(ERTS_ALC_T_NODE_ENTRY, sizeof(ErlNode));
  665. node_entries = 1;
  666. erts_refc_init(&erts_this_node->refc, 1); /* The system itself */
  667. erts_this_node->sysname = am_Noname;
  668. erts_this_node->creation = 0;
  669. erts_this_node->dist_entry = erts_this_dist_entry;
  670. (void) hash_put(&erts_node_table, (void *) erts_this_node);
  671. erts_smp_rwmtx_init_opt(&erts_node_table_rwmtx, &rwmtx_opt, "node_table");
  672. erts_smp_rwmtx_init_opt(&erts_dist_table_rwmtx, &rwmtx_opt, "dist_table");
  673. references_atoms_need_init = 1;
  674. }
  675. #ifdef ERTS_SMP
  676. #ifdef ERTS_ENABLE_LOCK_CHECK
  677. int erts_lc_is_de_rwlocked(DistEntry *dep)
  678. {
  679. return erts_smp_lc_rwmtx_is_rwlocked(&dep->rwmtx);
  680. }
  681. int erts_lc_is_de_rlocked(DistEntry *dep)
  682. {
  683. return erts_smp_lc_rwmtx_is_rlocked(&dep->rwmtx);
  684. }
  685. #endif
  686. #endif
  687. /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
  688. * The following is only supposed to be used for testing, and debugging. *
  689. * *
  690. * erts_get_node_and_dist_references() returns a table of all references to *
  691. * all entries in the node and dist tables. The hole system will be searched *
  692. * at once. This will give a consistent view over the references, but can *
  693. * can damage the real-time properties of the system. *
  694. \* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
  695. #include "erl_db.h"
  696. #undef INIT_AM
  697. #define INIT_AM(S) AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
  698. static Eterm AM_heap;
  699. static Eterm AM_link;
  700. static Eterm AM_monitor;
  701. static Eterm AM_process;
  702. static Eterm AM_port;
  703. static Eterm AM_ets;
  704. static Eterm AM_binary;
  705. static Eterm AM_match_spec;
  706. static Eterm AM_control;
  707. static Eterm AM_dist;
  708. static Eterm AM_node;
  709. static Eterm AM_dist_references;
  710. static Eterm AM_node_references;
  711. static Eterm AM_system;
  712. static Eterm AM_timer;
  713. #ifdef HYBRID
  714. static Eterm AM_processes;
  715. #endif
  716. static void setup_reference_table(void);
  717. static Eterm reference_table_term(Uint **hpp, Uint *szp);
  718. static void delete_reference_table(void);
  719. #if BIG_UINT_HEAP_SIZE > 3 /* 2-tuple */
  720. #define ID_HEAP_SIZE BIG_UINT_HEAP_SIZE
  721. #else
  722. #define ID_HEAP_SIZE 3 /* 2-tuple */
  723. #endif
  724. typedef struct node_referrer_ {
  725. struct node_referrer_ *next;
  726. int heap_ref;
  727. int link_ref;
  728. int monitor_ref;
  729. int ets_ref;
  730. int bin_ref;
  731. int timer_ref;
  732. int system_ref;
  733. Eterm id;
  734. Uint id_heap[ID_HEAP_SIZE];
  735. } NodeReferrer;
  736. typedef struct {
  737. ErlNode *node;
  738. NodeReferrer *referrers;
  739. } ReferredNode;
  740. typedef struct dist_referrer_ {
  741. struct dist_referrer_ *next;
  742. int heap_ref;
  743. int node_ref;
  744. int ctrl_ref;
  745. Eterm id;
  746. Uint creation;
  747. } DistReferrer;
  748. typedef struct {
  749. DistEntry *dist;
  750. DistReferrer *referrers;
  751. } ReferredDist;
  752. typedef struct inserted_bin_ {
  753. struct inserted_bin_ *next;
  754. Binary *bin_val;
  755. } InsertedBin;
  756. static ReferredNode *referred_nodes;
  757. static int no_referred_nodes;
  758. static ReferredDist *referred_dists;
  759. static int no_referred_dists;
  760. static InsertedBin *inserted_bins;
  761. Eterm
  762. erts_get_node_and_dist_references(struct process *proc)
  763. {
  764. Uint *hp;
  765. Uint size;
  766. Eterm res;
  767. #ifdef DEBUG
  768. Uint *endp;
  769. #endif
  770. erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
  771. erts_smp_block_system(0);
  772. /* No need to lock any thing since we are alone... */
  773. if (references_atoms_need_init) {
  774. INIT_AM(heap);
  775. INIT_AM(link);
  776. INIT_AM(monitor);
  777. INIT_AM(process);
  778. INIT_AM(port);
  779. INIT_AM(ets);
  780. INIT_AM(binary);
  781. INIT_AM(match_spec);
  782. INIT_AM(control);
  783. INIT_AM(dist);
  784. INIT_AM(node);
  785. INIT_AM(dist_references);
  786. INIT_AM(node_references);
  787. INIT_AM(timer);
  788. INIT_AM(system);
  789. #ifdef HYBRID
  790. INIT_AM(processes);
  791. #endif
  792. references_atoms_need_init = 0;
  793. }
  794. setup_reference_table();
  795. /* Get term size */
  796. size = 0;
  797. (void) reference_table_term(NULL, &size);
  798. hp = HAlloc(proc, size);
  799. #ifdef DEBUG
  800. ASSERT(size > 0);
  801. endp = hp + size;
  802. #endif
  803. /* Write term */
  804. res = reference_table_term(&hp, NULL);
  805. ASSERT(endp == hp);
  806. delete_reference_table();
  807. erts_smp_release_system();
  808. erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
  809. return res;
  810. }
  811. #define HEAP_REF 1
  812. #define LINK_REF 2
  813. #define ETS_REF 3
  814. #define BIN_REF 4
  815. #define NODE_REF 5
  816. #define CTRL_REF 6
  817. #define MONITOR_REF 7
  818. #define TIMER_REF 8
  819. #define SYSTEM_REF 9
  820. #define INC_TAB_SZ 10
  821. static void
  822. insert_dist_referrer(ReferredDist *referred_dist,
  823. int type,
  824. Eterm id,
  825. Uint creation)
  826. {
  827. DistReferrer *drp;
  828. for(drp = referred_dist->referrers; drp; drp = drp->next)
  829. if(id == drp->id && (type == CTRL_REF
  830. || creation == drp->creation))
  831. break;
  832. if(!drp) {
  833. drp = (DistReferrer *) erts_alloc(ERTS_ALC_T_NC_TMP,
  834. sizeof(DistReferrer));
  835. drp->next = referred_dist->referrers;
  836. referred_dist->referrers = drp;
  837. drp->id = id;
  838. drp->creation = creation;
  839. drp->heap_ref = 0;
  840. drp->node_ref = 0;
  841. drp->ctrl_ref = 0;
  842. }
  843. switch (type) {
  844. case NODE_REF: drp->node_ref++; break;
  845. case CTRL_REF: drp->ctrl_ref++; break;
  846. case HEAP_REF: drp->heap_ref++; break;
  847. default: ASSERT(0);
  848. }
  849. }
  850. static void
  851. insert_dist_entry(DistEntry *dist, int type, Eterm id, Uint creation)
  852. {
  853. ReferredDist *rdp = NULL;
  854. int i;
  855. for(i = 0; i < no_referred_dists; i++) {
  856. if(dist == referred_dists[i].dist) {
  857. rdp = &referred_dists[i];
  858. break;
  859. }
  860. }
  861. if(!rdp)
  862. erl_exit(1,
  863. "Reference to non-existing distribution table entry found!\n");
  864. insert_dist_referrer(rdp, type, id, creation);
  865. }
  866. static void
  867. insert_node_referrer(ReferredNode *referred_node, int type, Eterm id)
  868. {
  869. NodeReferrer *nrp;
  870. for(nrp = referred_node->referrers; nrp; nrp = nrp->next)
  871. if(EQ(id, nrp->id))
  872. break;
  873. if(!nrp) {
  874. nrp = (NodeReferrer *) erts_alloc(ERTS_ALC_T_NC_TMP,
  875. sizeof(NodeReferrer));
  876. nrp->next = referred_node->referrers;
  877. referred_node->referrers = nrp;
  878. if(IS_CONST(id))
  879. nrp->id = id;
  880. else {
  881. Uint *hp = &nrp->id_heap[0];
  882. ASSERT(is_big(id) || is_tuple(id));
  883. nrp->id = copy_struct(id, size_object(id), &hp, NULL);
  884. }
  885. nrp->heap_ref = 0;
  886. nrp->link_ref = 0;
  887. nrp->monitor_ref = 0;
  888. nrp->ets_ref = 0;
  889. nrp->bin_ref = 0;
  890. nrp->timer_ref = 0;
  891. nrp->system_ref = 0;
  892. }
  893. switch (type) {
  894. case HEAP_REF: nrp->heap_ref++; break;
  895. case LINK_REF: nrp->link_ref++; break;
  896. case ETS_REF: nrp->ets_ref++; break;
  897. case BIN_REF: nrp->bin_ref++; break;
  898. case MONITOR_REF: nrp->monitor_ref++; break;
  899. case TIMER_REF: nrp->timer_ref++; break;
  900. case SYSTEM_REF: nrp->system_ref++; break;
  901. default: ASSERT(0);
  902. }
  903. }
  904. static void
  905. insert_node(ErlNode *node, int type, Eterm id)
  906. {
  907. int i;
  908. ReferredNode *rnp = NULL;
  909. for(i = 0; i < no_referred_nodes; i++) {
  910. if(node == referred_nodes[i].node) {
  911. rnp = &referred_nodes[i];
  912. break;
  913. }
  914. }
  915. if (!rnp)
  916. erl_exit(1, "Reference to non-existing node table entry found!\n");
  917. insert_node_referrer(rnp, type, id);
  918. }
  919. static void
  920. insert_erl_node(void *venp, void *unused)
  921. {
  922. ErlNode *enp = (ErlNode *) venp;
  923. insert_dist_entry(enp->dist_entry, NODE_REF, enp->sysname, enp->creation);
  924. }
  925. struct insert_offheap2_arg {
  926. int type;
  927. Eterm id;
  928. };
  929. static void insert_offheap(ErlOffHeap *, int, Eterm);
  930. static void
  931. insert_offheap2(ErlOffHeap *oh, void *arg)
  932. {
  933. struct insert_offheap2_arg *a = (struct insert_offheap2_arg *) arg;
  934. insert_offheap(oh, a->type, a->id);
  935. }
  936. static void
  937. insert_offheap(ErlOffHeap *oh, int type, Eterm id)
  938. {
  939. union erl_off_heap_ptr u;
  940. struct insert_offheap2_arg a;
  941. a.type = BIN_REF;
  942. for (u.hdr = oh->first; u.hdr; u.hdr = u.hdr->next) {
  943. switch (thing_subtag(u.hdr->thing_word)) {
  944. case REFC_BINARY_SUBTAG:
  945. if(IsMatchProgBinary(u.pb->val)) {
  946. InsertedBin *ib;
  947. int insert_bin = 1;
  948. for (ib = inserted_bins; ib; ib = ib->next)
  949. if(ib->bin_val == u.pb->val) {
  950. insert_bin = 0;
  951. break;
  952. }
  953. if (insert_bin) {
  954. #if HALFWORD_HEAP
  955. UWord val = (UWord) u.pb->val;
  956. DeclareTmpHeapNoproc(id_heap,BIG_UINT_HEAP_SIZE*2); /* extra place allocated */
  957. #else
  958. DeclareTmpHeapNoproc(id_heap,BIG_UINT_HEAP_SIZE);
  959. #endif
  960. Uint *hp = &id_heap[0];
  961. InsertedBin *nib;
  962. #if HALFWORD_HEAP
  963. int actual_need = BIG_UWORD_HEAP_SIZE(val);
  964. ASSERT(actual_need <= (BIG_UINT_HEAP_SIZE*2));
  965. UseTmpHeapNoproc(actual_need);
  966. a.id = erts_bld_uword(&hp, NULL, (UWord) val);
  967. #else
  968. UseTmpHeapNoproc(BIG_UINT_HEAP_SIZE);
  969. a.id = erts_bld_uint(&hp, NULL, (Uint) u.pb->val);
  970. #endif
  971. erts_match_prog_foreach_offheap(u.pb->val,
  972. insert_offheap2,
  973. (void *) &a);
  974. nib = erts_alloc(ERTS_ALC_T_NC_TMP, sizeof(InsertedBin));
  975. nib->bin_val = u.pb->val;
  976. nib->next = inserted_bins;
  977. inserted_bins = nib;
  978. #if HALFWORD_HEAP
  979. UnUseTmpHeapNoproc(actual_need);
  980. #else
  981. UnUseTmpHeapNoproc(BIG_UINT_HEAP_SIZE);
  982. #endif
  983. }
  984. }
  985. break;
  986. case FUN_SUBTAG:
  987. break; /* No need to */
  988. default:
  989. ASSERT(is_external_header(u.hdr->thing_word));
  990. insert_node(u.ext->node, type, id);
  991. break;
  992. }
  993. }
  994. }
  995. static void doit_insert_monitor(ErtsMonitor *monitor, void *p)
  996. {
  997. Eterm *idp = p;
  998. if(is_external(monitor->pid))
  999. insert_node(external_thing_ptr(monitor->pid)->node, MONITOR_REF, *idp);
  1000. if(is_external(monitor->ref))
  1001. insert_node(external_thing_ptr(monitor->ref)->node, MONITOR_REF, *idp);
  1002. }
  1003. static void doit_insert_link(ErtsLink *lnk, void *p)
  1004. {
  1005. Eterm *idp = p;
  1006. if(is_external(lnk->pid))
  1007. insert_node(external_thing_ptr(lnk->pid)->node, LINK_REF,
  1008. *idp);
  1009. }
  1010. static void
  1011. insert_monitors(ErtsMonitor *monitors, Eterm id)
  1012. {
  1013. erts_doforall_monitors(monitors,&doit_insert_monitor,&id);
  1014. }
  1015. static void
  1016. insert_links(ErtsLink *lnk, Eterm id)
  1017. {
  1018. erts_doforall_links(lnk,&doit_insert_link,&id);
  1019. }
  1020. static void doit_insert_link2(ErtsLink *lnk, void *p)
  1021. {
  1022. Eterm *idp = p;
  1023. if(is_external(lnk->pid))
  1024. insert_node(external_thing_ptr(lnk->pid)->node, LINK_REF,
  1025. *idp);
  1026. insert_links(ERTS_LINK_ROOT(lnk), *idp);
  1027. }
  1028. static void
  1029. insert_links2(ErtsLink *lnk, Eterm id)
  1030. {
  1031. erts_doforall_links(lnk,&doit_insert_link2,&id);
  1032. }
  1033. static void
  1034. insert_ets_table(DbTable *tab, void *unused)
  1035. {
  1036. struct insert_offheap2_arg a;
  1037. a.type = ETS_REF;
  1038. a.id = tab->common.id;
  1039. erts_db_foreach_offheap(tab, insert_offheap2, (void *) &a);
  1040. }
  1041. static void
  1042. insert_bif_timer(Eterm receiver, Eterm msg, ErlHeapFragment *bp, void *arg)
  1043. {
  1044. if (bp) {
  1045. DeclareTmpHeapNoproc(heap,3);
  1046. UseTmpHeapNoproc(3);
  1047. insert_offheap(&bp->off_heap,
  1048. TIMER_REF,
  1049. (is_internal_pid(receiver)
  1050. ? receiver
  1051. : TUPLE2(&heap[0], AM_process, receiver)));
  1052. UnUseTmpHeapNoproc(3);
  1053. }
  1054. }
  1055. static void
  1056. init_referred_node(void *node, void *unused)
  1057. {
  1058. referred_nodes[no_referred_nodes].node = (ErlNode *) node;
  1059. referred_nodes[no_referred_nodes].referrers = NULL;
  1060. no_referred_nodes++;
  1061. }
  1062. static void
  1063. init_referred_dist(void *dist, void *unused)
  1064. {
  1065. referred_dists[no_referred_dists].dist = (DistEntry *) dist;
  1066. referred_dists[no_referred_dists].referrers = NULL;
  1067. no_referred_dists++;
  1068. }
  1069. #ifdef ERTS_SMP
  1070. static void
  1071. insert_sys_msg(Eterm from, Eterm to, Eterm msg, ErlHeapFragment *bp)
  1072. {
  1073. insert_offheap(&bp->off_heap, HEAP_REF, to);
  1074. }
  1075. #endif
  1076. static void
  1077. setup_reference_table(void)
  1078. {
  1079. ErlHeapFragment *hfp;
  1080. DistEntry *dep;
  1081. HashInfo hi;
  1082. int i;
  1083. DeclareTmpHeapNoproc(heap,3);
  1084. inserted_bins = NULL;
  1085. hash_get_info(&hi, &erts_node_table);
  1086. referred_nodes = erts_alloc(ERTS_ALC_T_NC_TMP,
  1087. hi.objs*sizeof(ReferredNode));
  1088. no_referred_nodes = 0;
  1089. hash_foreach(&erts_node_table, init_referred_node, NULL);
  1090. ASSERT(no_referred_nodes == hi.objs);
  1091. hash_get_info(&hi, &erts_dist_table);
  1092. referred_dists = erts_alloc(ERTS_ALC_T_NC_TMP,
  1093. hi.objs*sizeof(ReferredDist));
  1094. no_referred_dists = 0;
  1095. hash_foreach(&erts_dist_table, init_referred_dist, NULL);
  1096. ASSERT(no_referred_dists == hi.objs);
  1097. /* Go through the hole system, and build a table of all references
  1098. to ErlNode and DistEntry structures */
  1099. UseTmpHeapNoproc(3);
  1100. insert_node(erts_this_node,
  1101. SYSTEM_REF,
  1102. TUPLE2(&heap[0], AM_system, am_undefined));
  1103. #ifdef HYBRID
  1104. /* Insert Heap */
  1105. insert_offheap(&erts_global_offheap,
  1106. HEAP_REF,
  1107. TUPLE2(&heap[0], AM_processes, am_undefined));
  1108. #endif
  1109. UnUseTmpHeapNoproc(3);
  1110. /* Insert all processes */
  1111. for (i = 0; i < erts_max_processes; i++)
  1112. if (process_tab[i]) {
  1113. ErlMessage *msg;
  1114. /* Insert Heap */
  1115. insert_offheap(&(process_tab[i]->off_heap),
  1116. HEAP_REF,
  1117. process_tab[i]->id);
  1118. /* Insert message buffers */
  1119. for(hfp = process_tab[i]->mbuf; hfp; hfp = hfp->next)
  1120. insert_offheap(&(hfp->off_heap),
  1121. HEAP_REF,
  1122. process_tab[i]->id);
  1123. /* Insert msg msg buffers */
  1124. for (msg = process_tab[i]->msg.first; msg; msg = msg->next) {
  1125. ErlHeapFragment *heap_frag = NULL;
  1126. if (msg->data.attached) {
  1127. if (is_value(ERL_MESSAGE_TERM(msg)))
  1128. heap_frag = msg->data.heap_frag;
  1129. else {
  1130. if (msg->data.dist_ext->dep)
  1131. insert_dist_entry(msg->data.dist_ext->dep,
  1132. HEAP_REF, process_tab[i]->id, 0);
  1133. if (is_not_nil(ERL_MESSAGE_TOKEN(msg)))
  1134. heap_frag = erts_dist_ext_trailer(msg->data.dist_ext);
  1135. }
  1136. }
  1137. if (heap_frag)
  1138. insert_offheap(&(heap_frag->off_heap),
  1139. HEAP_REF,
  1140. process_tab[i]->id);
  1141. }
  1142. #ifdef ERTS_SMP
  1143. for (msg = process_tab[i]->msg_inq.first; msg; msg = msg->next) {
  1144. ErlHeapFragment *heap_frag = NULL;
  1145. if (msg->data.attached) {
  1146. if (is_value(ERL_MESSAGE_TERM(msg)))
  1147. heap_frag = msg->data.heap_frag;
  1148. else {
  1149. if (msg->data.dist_ext->dep)
  1150. insert_dist_entry(msg->data.dist_ext->dep,
  1151. HEAP_REF, process_tab[i]->id, 0);
  1152. if (is_not_nil(ERL_MESSAGE_TOKEN(msg)))
  1153. heap_frag = erts_dist_ext_trailer(msg->data.dist_ext);
  1154. }
  1155. }
  1156. if (heap_frag)
  1157. insert_offheap(&(heap_frag->off_heap),
  1158. HEAP_REF,
  1159. process_tab[i]->id);
  1160. }
  1161. #endif
  1162. /* Insert links */
  1163. if(process_tab[i]->nlinks)
  1164. insert_links(process_tab[i]->nlinks, process_tab[i]->id);
  1165. if(process_tab[i]->monitors)
  1166. insert_monitors(process_tab[i]->monitors, process_tab[i]->id);
  1167. /* Insert controller */
  1168. {
  1169. DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(process_tab[i]);
  1170. if (dep)
  1171. insert_dist_entry(dep, CTRL_REF, process_tab[i]->id, 0);
  1172. }
  1173. }
  1174. #ifdef ERTS_SMP
  1175. erts_foreach_sys_msg_in_q(insert_sys_msg);
  1176. #endif
  1177. /* Insert all ports */
  1178. for (i = 0; i < erts_max_ports; i++) {
  1179. if (erts_port[i].status & ERTS_PORT_SFLGS_DEAD)
  1180. continue;
  1181. /* Insert links */
  1182. if(erts_port[i].nlinks)
  1183. insert_links(erts_port[i].nlinks, erts_port[i].id);
  1184. /* Insert port data */
  1185. for(hfp = erts_port[i].bp; hfp; hfp = hfp->next)
  1186. insert_offheap(&(hfp->off_heap), HEAP_REF, erts_port[i].id);
  1187. /* Insert controller */
  1188. if (erts_port[i].dist_entry)
  1189. insert_dist_entry(erts_port[i].dist_entry,
  1190. CTRL_REF,
  1191. erts_port[i].id,
  1192. 0);
  1193. }
  1194. { /* Add binaries stored elsewhere ... */
  1195. ErlOffHeap oh;
  1196. ProcBin pb[2];
  1197. int i = 0;
  1198. Binary *default_match_spec;
  1199. Binary *default_meta_match_spec;
  1200. oh.first = NULL;
  1201. /* Only the ProcBin members thing_word, val and next will be inspected
  1202. (by insert_offheap()) */
  1203. #undef ADD_BINARY
  1204. #define ADD_BINARY(Bin) \
  1205. if ((Bin)) { \
  1206. pb[i].thing_word = REFC_BINARY_SUBTAG; \
  1207. pb[i].val = (Bin); \
  1208. pb[i].next = oh.first; \
  1209. oh.first = (struct erl_off_heap_header*) &pb[i]; \
  1210. i++; \
  1211. }
  1212. erts_get_default_trace_pattern(NULL,
  1213. &default_match_spec,
  1214. &default_meta_match_spec,
  1215. NULL,
  1216. NULL);
  1217. ADD_BINARY(default_match_spec);
  1218. ADD_BINARY(default_meta_match_spec);
  1219. insert_offheap(&oh, BIN_REF, AM_match_spec);
  1220. #undef ADD_BINARY
  1221. }
  1222. /* Insert all dist links */
  1223. for(dep = erts_visible_dist_entries; dep; dep = dep->next) {
  1224. if(dep->nlinks)
  1225. insert_links2(dep->nlinks, dep->sysname);
  1226. if(dep->node_links)
  1227. insert_links(dep->node_links, dep->sysname);
  1228. if(dep->monitors)
  1229. insert_monitors(dep->monitors, dep->sysname);
  1230. }
  1231. for(dep = erts_hidden_dist_entries; dep; dep = dep->next) {
  1232. if(dep->nlinks)
  1233. insert_links2(dep->nlinks, dep->sysname);
  1234. if(dep->node_links)
  1235. insert_links(dep->node_links, dep->sysname);
  1236. if(dep->monitors)
  1237. insert_monitors(dep->monitors, dep->sysname);
  1238. }
  1239. /* Not connected dist entries should not have any links,
  1240. but inspect them anyway */
  1241. for(dep = erts_not_connected_dist_entries; dep; dep = dep->next) {
  1242. if(dep->nlinks)
  1243. insert_links2(dep->nlinks, dep->sysname);
  1244. if(dep->node_links)
  1245. insert_links(dep->node_links, dep->sysname);
  1246. if(dep->monitors)
  1247. insert_monitors(dep->monitors, dep->sysname);
  1248. }
  1249. /* Insert all ets tables */
  1250. erts_db_foreach_table(insert_ets_table, NULL);
  1251. /* Insert all bif timers */
  1252. erts_bif_timer_foreach(insert_bif_timer, NULL);
  1253. /* Insert node table (references to dist) */
  1254. hash_foreach(&erts_node_table, insert_erl_node, NULL);
  1255. }
  1256. /*
  1257. Returns an erlang term on this format:
  1258. {{node_references,
  1259. [{{Node, Creation}, Refc,
  1260. [{{ReferrerType, ID},
  1261. [{ReferenceType,References},
  1262. '...']},
  1263. '...']},
  1264. '...']},
  1265. {dist_references,
  1266. [{Node, Refc,
  1267. [{{ReferrerType, ID},
  1268. [{ReferenceType,References},
  1269. '...']},
  1270. '...']},
  1271. '...']}}
  1272. */
  1273. static Eterm
  1274. reference_table_term(Uint **hpp, Uint *szp)
  1275. {
  1276. #undef MK_2TUP
  1277. #undef MK_3TUP
  1278. #undef MK_CONS
  1279. #undef MK_UINT
  1280. #define MK_2TUP(E1, E2) erts_bld_tuple(hpp, szp, 2, (E1), (E2))
  1281. #define MK_3TUP(E1, E2, E3) erts_bld_tuple(hpp, szp, 3, (E1), (E2), (E3))
  1282. #define MK_CONS(CAR, CDR) erts_bld_cons(hpp, szp, (CAR), (CDR))
  1283. #define MK_UINT(UI) erts_bld_uint(hpp, szp, (UI))
  1284. int i;
  1285. Eterm tup;
  1286. Eterm tup2;
  1287. Eterm nl = NIL;
  1288. Eterm dl = NIL;
  1289. Eterm nrid;
  1290. for(i = 0; i < no_referred_nodes; i++) {
  1291. NodeReferrer *nrp;
  1292. Eterm nril = NIL;
  1293. for(nrp = referred_nodes[i].referrers; nrp; nrp = nrp->next) {
  1294. Eterm nrl = NIL;
  1295. /* NodeReferenceList = [{ReferenceType,References}] */
  1296. if(nrp->heap_ref) {
  1297. tup = MK_2TUP(AM_heap, MK_UINT(nrp->heap_ref));
  1298. nrl = MK_CONS(tup, nrl);
  1299. }
  1300. if(nrp->link_ref) {
  1301. tup = MK_2TUP(AM_link, MK_UINT(nrp->link_ref));
  1302. nrl = MK_CONS(tup, nrl);
  1303. }
  1304. if(nrp->monitor_ref) {
  1305. tup = MK_2TUP(AM_monitor, MK_UINT(nrp->monitor_ref));
  1306. nrl = MK_CONS(tup, nrl);
  1307. }
  1308. if(nrp->ets_ref) {
  1309. tup = MK_2TUP(AM_ets, MK_UINT(nrp->ets_ref));
  1310. nrl = MK_CONS(tup, nrl);
  1311. }
  1312. if(nrp->bin_ref) {
  1313. tup = MK_2TUP(AM_binary, MK_UINT(nrp->bin_ref));
  1314. nrl = MK_CONS(tup, nrl);
  1315. }
  1316. if(nrp->timer_ref) {
  1317. tup = MK_2TUP(AM_timer, MK_UINT(nrp->timer_ref));
  1318. nrl = MK_CONS(tup, nrl);
  1319. }
  1320. if(nrp->system_ref) {
  1321. tup = MK_2TUP(AM_system, MK_UINT(nrp->system_ref));
  1322. nrl = MK_CONS(tup, nrl);
  1323. }
  1324. nrid = nrp->id;
  1325. if (!IS_CONST(nrp->id)) {
  1326. Uint nrid_sz = size_object(nrp->id);
  1327. if (szp)
  1328. *szp += nrid_sz;
  1329. if (hpp)
  1330. nrid = copy_struct(nrp->id, nrid_sz, hpp, NULL);
  1331. }
  1332. if (is_internal_pid(nrid) || nrid == am_error_logger) {
  1333. ASSERT(!nrp->ets_ref && !nrp->bin_ref && !nrp->system_ref);
  1334. tup = MK_2TUP(AM_process, nrid);
  1335. }
  1336. else if (is_tuple(nrid)) {
  1337. Eterm *t;
  1338. ASSERT(!nrp->ets_ref && !nrp->bin_ref);
  1339. t = tuple_val(nrid);
  1340. ASSERT(2 == arityval(t[0]));
  1341. tup = MK_2TUP(t[1], t[2]);
  1342. }
  1343. else if(is_internal_port(nrid)) {
  1344. ASSERT(!nrp->heap_ref && !nrp->ets_ref && !nrp->bin_ref
  1345. && !nrp->timer_ref && !nrp->system_ref);
  1346. tup = MK_2TUP(AM_port, nrid);
  1347. }
  1348. else if(nrp->ets_ref) {
  1349. ASSERT(!nrp->heap_ref && !nrp->link_ref &&
  1350. !nrp->monitor_ref && !nrp->bin_ref
  1351. && !nrp->timer_ref && !nrp->system_ref);
  1352. tup = MK_2TUP(AM_ets, nrid);
  1353. }
  1354. else if(nrp->bin_ref) {
  1355. ASSERT(is_small(nrid) || is_big(nrid));
  1356. ASSERT(!nrp->heap_ref && !nrp->ets_ref && !nrp->link_ref &&
  1357. !nrp->monitor_ref && !nrp->timer_ref
  1358. && !nrp->system_ref);
  1359. tup = MK_2TUP(AM_match_spec, nrid);
  1360. }
  1361. else {
  1362. ASSERT(!nrp->heap_ref && !nrp->ets_ref && !nrp->bin_ref);
  1363. ASSERT(is_atom(nrid));
  1364. tup = MK_2TUP(AM_dist, nrid);
  1365. }
  1366. tup = MK_2TUP(tup, nrl);
  1367. /* NodeReferenceIdList = [{{ReferrerType, ID}, NodeReferenceList}] */
  1368. nril = MK_CONS(tup, nril);
  1369. }
  1370. /* NodeList = [{{Node, Creation}, Refc, NodeReferenceIdList}] */
  1371. tup = MK_2TUP(referred_nodes[i].node->sysname,
  1372. MK_UINT(referred_nodes[i].node->creation));
  1373. tup = MK_3TUP(tup, MK_UINT(erts_refc_read(&referred_nodes[i].node->refc, 1)), nril);
  1374. nl = MK_CONS(tup, nl);
  1375. }
  1376. for(i = 0; i < no_referred_dists; i++) {
  1377. DistReferrer *drp;
  1378. Eterm dril = NIL;
  1379. for(drp = referred_dists[i].referrers; drp; drp = drp->next) {
  1380. Eterm drl = NIL;
  1381. /* DistReferenceList = [{ReferenceType,References}] */
  1382. if(drp->node_ref) {
  1383. tup = MK_2TUP(AM_node, MK_UINT(drp->node_ref));
  1384. drl = MK_CONS(tup, drl);
  1385. }
  1386. if(drp->ctrl_ref) {
  1387. tup = MK_2TUP(AM_control, MK_UINT(drp->ctrl_ref));
  1388. drl = MK_CONS(tup, drl);
  1389. }
  1390. if(drp->heap_ref) {
  1391. tup = MK_2TUP(AM_heap, MK_UINT(drp->heap_ref));
  1392. drl = MK_CONS(tup, drl);
  1393. }
  1394. if (is_internal_pid(drp->id)) {
  1395. ASSERT(!drp->node_ref);
  1396. tup = MK_2TUP(AM_process, drp->id);
  1397. }
  1398. else if(is_internal_port(drp->id)) {
  1399. ASSERT(drp->ctrl_ref && !drp->node_ref);
  1400. tup = MK_2TUP(AM_port, drp->id);
  1401. }
  1402. else {
  1403. ASSERT(!drp->ctrl_ref && drp->node_ref);
  1404. ASSERT(is_atom(drp->id));
  1405. tup = MK_2TUP(drp->id, MK_UINT(drp->creation));
  1406. tup = MK_2TUP(AM_node, tup);
  1407. }
  1408. tup = MK_2TUP(tup, drl);
  1409. /* DistReferenceIdList =
  1410. [{{ReferrerType, ID}, DistReferenceList}] */
  1411. dril = MK_CONS(tup, dril);
  1412. }
  1413. /* DistList = [{Dist, Refc, ReferenceIdList}] */
  1414. tup = MK_3TUP(referred_dists[i].dist->sysname,
  1415. MK_UINT(erts_refc_read(&referred_dists[i].dist->refc, 1)),
  1416. dril);
  1417. dl = MK_CONS(tup, dl);
  1418. }
  1419. /* {{node_references, NodeList}, {dist_references, DistList}} */
  1420. tup = MK_2TUP(AM_node_references, nl);
  1421. tup2 = MK_2TUP(AM_dist_references, dl);
  1422. tup = MK_2TUP(tup, tup2);
  1423. return tup;
  1424. #undef MK_2TUP
  1425. #undef MK_3TUP
  1426. #undef MK_CONS
  1427. #undef MK_UINT
  1428. }
  1429. static void
  1430. delete_reference_table(void)
  1431. {
  1432. Uint i;
  1433. for(i = 0; i < no_referred_nodes; i++) {
  1434. NodeReferrer *nrp;
  1435. NodeReferrer *tnrp;
  1436. nrp = referred_nodes[i].referrers;
  1437. while(nrp) {
  1438. tnrp = nrp;
  1439. nrp = nrp->next;
  1440. erts_free(ERTS_ALC_T_NC_TMP, (void *) tnrp);
  1441. }
  1442. }
  1443. if (referred_nodes)
  1444. erts_free(ERTS_ALC_T_NC_TMP, (void *) referred_nodes);
  1445. for(i = 0; i < no_referred_dists; i++) {
  1446. DistReferrer *drp;
  1447. DistReferrer *tdrp;
  1448. drp = referred_dists[i].referrers;
  1449. while(drp) {
  1450. tdrp = drp;
  1451. drp = drp->next;
  1452. erts_free(ERTS_ALC_T_NC_TMP, (void *) tdrp);
  1453. }
  1454. }
  1455. if (referred_dists)
  1456. erts_free(ERTS_ALC_T_NC_TMP, (void *) referred_dists);
  1457. while(inserted_bins) {
  1458. InsertedBin *ib = inserted_bins;
  1459. inserted_bins = inserted_bins->next;
  1460. erts_free(ERTS_ALC_T_NC_TMP, (void *)ib);
  1461. }
  1462. }