/erts/emulator/beam/erl_message.c

https://github.com/Bwooce/otp · C · 1006 lines · 818 code · 103 blank · 85 comment · 143 complexity · f283de4f8cf989f68405e51d4c2f8725 MD5 · raw file

  1. /*
  2. * %CopyrightBegin%
  3. *
  4. * Copyright Ericsson AB 1997-2010. All Rights Reserved.
  5. *
  6. * The contents of this file are subject to the Erlang Public License,
  7. * Version 1.1, (the "License"); you may not use this file except in
  8. * compliance with the License. You should have received a copy of the
  9. * Erlang Public License along with this software. If not, it can be
  10. * retrieved online at http://www.erlang.org/.
  11. *
  12. * Software distributed under the License is distributed on an "AS IS"
  13. * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
  14. * the License for the specific language governing rights and limitations
  15. * under the License.
  16. *
  17. * %CopyrightEnd%
  18. */
  19. /*
  20. * Message passing primitives.
  21. */
  22. #ifdef HAVE_CONFIG_H
  23. # include "config.h"
  24. #endif
  25. #include "sys.h"
  26. #include "erl_vm.h"
  27. #include "global.h"
  28. #include "erl_message.h"
  29. #include "erl_process.h"
  30. #include "erl_nmgc.h"
  31. #include "erl_binary.h"
  32. ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(message,
  33. ErlMessage,
  34. ERL_MESSAGE_BUF_SZ,
  35. ERTS_ALC_T_MSG_REF)
  36. #if defined(DEBUG) && 0
  37. #define HARD_DEBUG
  38. #else
  39. #undef HARD_DEBUG
  40. #endif
  41. static ERTS_INLINE int in_heapfrag(const Eterm* ptr, const ErlHeapFragment *bp)
  42. {
  43. return ((unsigned)(ptr - bp->mem) < bp->used_size);
  44. }
  45. void
  46. init_message(void)
  47. {
  48. init_message_alloc();
  49. }
  50. void
  51. free_message(ErlMessage* mp)
  52. {
  53. message_free(mp);
  54. }
  55. /* Allocate message buffer (size in words) */
  56. ErlHeapFragment*
  57. new_message_buffer(Uint size)
  58. {
  59. ErlHeapFragment* bp;
  60. bp = (ErlHeapFragment*) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP_FRAG,
  61. ERTS_HEAP_FRAG_SIZE(size));
  62. ERTS_INIT_HEAP_FRAG(bp, size);
  63. return bp;
  64. }
  65. ErlHeapFragment*
  66. erts_resize_message_buffer(ErlHeapFragment *bp, Uint size,
  67. Eterm *brefs, Uint brefs_size)
  68. {
  69. #ifdef DEBUG
  70. int i;
  71. #endif
  72. #ifdef HARD_DEBUG
  73. ErlHeapFragment *dbg_bp;
  74. Eterm *dbg_brefs;
  75. Uint dbg_size;
  76. Uint dbg_tot_size;
  77. Eterm *dbg_hp;
  78. #endif
  79. ErlHeapFragment* nbp;
  80. /* ToDo: Make use of 'used_size' to avoid realloc
  81. when shrinking just a few words */
  82. #ifdef DEBUG
  83. {
  84. Uint off_sz = size < bp->used_size ? size : bp->used_size;
  85. for (i = 0; i < brefs_size; i++) {
  86. Eterm *ptr;
  87. if (is_immed(brefs[i]))
  88. continue;
  89. ptr = ptr_val(brefs[i]);
  90. ASSERT(&bp->mem[0] <= ptr && ptr < &bp->mem[0] + off_sz);
  91. }
  92. }
  93. #endif
  94. if (size == bp->used_size)
  95. return bp;
  96. #ifdef HARD_DEBUG
  97. dbg_brefs = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(Eterm *)*brefs_size);
  98. dbg_bp = new_message_buffer(bp->used_size);
  99. dbg_hp = dbg_bp->mem;
  100. dbg_tot_size = 0;
  101. for (i = 0; i < brefs_size; i++) {
  102. dbg_size = size_object(brefs[i]);
  103. dbg_tot_size += dbg_size;
  104. dbg_brefs[i] = copy_struct(brefs[i], dbg_size, &dbg_hp,
  105. &dbg_bp->off_heap);
  106. }
  107. ASSERT(dbg_tot_size == (size < bp->used_size ? size : bp->used_size));
  108. #endif
  109. nbp = (ErlHeapFragment*) ERTS_HEAP_REALLOC(ERTS_ALC_T_HEAP_FRAG,
  110. (void *) bp,
  111. ERTS_HEAP_FRAG_SIZE(bp->alloc_size),
  112. ERTS_HEAP_FRAG_SIZE(size));
  113. if (bp != nbp) {
  114. Uint off_sz = size < nbp->used_size ? size : nbp->used_size;
  115. Eterm *sp = &bp->mem[0];
  116. Eterm *ep = sp + off_sz;
  117. Sint offs = &nbp->mem[0] - sp;
  118. erts_offset_off_heap(&nbp->off_heap, offs, sp, ep);
  119. erts_offset_heap(&nbp->mem[0], off_sz, offs, sp, ep);
  120. if (brefs && brefs_size)
  121. erts_offset_heap_ptr(brefs, brefs_size, offs, sp, ep);
  122. #ifdef DEBUG
  123. for (i = 0; i < brefs_size; i++) {
  124. Eterm *ptr;
  125. if (is_immed(brefs[i]))
  126. continue;
  127. ptr = ptr_val(brefs[i]);
  128. ASSERT(&nbp->mem[0] <= ptr && ptr < &nbp->mem[0] + off_sz);
  129. }
  130. #endif
  131. }
  132. nbp->alloc_size = size;
  133. nbp->used_size = size;
  134. #ifdef HARD_DEBUG
  135. for (i = 0; i < brefs_size; i++)
  136. ASSERT(eq(dbg_brefs[i], brefs[i]));
  137. free_message_buffer(dbg_bp);
  138. erts_free(ERTS_ALC_T_UNDEF, dbg_brefs);
  139. #endif
  140. return nbp;
  141. }
  142. void
  143. erts_cleanup_offheap(ErlOffHeap *offheap)
  144. {
  145. union erl_off_heap_ptr u;
  146. for (u.hdr = offheap->first; u.hdr; u.hdr = u.hdr->next) {
  147. switch (thing_subtag(u.hdr->thing_word)) {
  148. case REFC_BINARY_SUBTAG:
  149. if (erts_refc_dectest(&u.pb->val->refc, 0) == 0) {
  150. erts_bin_free(u.pb->val);
  151. }
  152. break;
  153. case FUN_SUBTAG:
  154. if (erts_refc_dectest(&u.fun->fe->refc, 0) == 0) {
  155. erts_erase_fun_entry(u.fun->fe);
  156. }
  157. break;
  158. default:
  159. ASSERT(is_external_header(u.hdr->thing_word));
  160. erts_deref_node_entry(u.ext->node);
  161. break;
  162. }
  163. }
  164. }
  165. void
  166. free_message_buffer(ErlHeapFragment* bp)
  167. {
  168. ASSERT(bp != NULL);
  169. do {
  170. ErlHeapFragment* next_bp = bp->next;
  171. erts_cleanup_offheap(&bp->off_heap);
  172. ERTS_HEAP_FREE(ERTS_ALC_T_HEAP_FRAG, (void *) bp,
  173. ERTS_HEAP_FRAG_SIZE(bp->size));
  174. bp = next_bp;
  175. }while (bp != NULL);
  176. }
  177. static ERTS_INLINE void
  178. link_mbuf_to_proc(Process *proc, ErlHeapFragment *bp)
  179. {
  180. if (bp) {
  181. /* Link the message buffer */
  182. bp->next = MBUF(proc);
  183. MBUF(proc) = bp;
  184. MBUF_SIZE(proc) += bp->used_size;
  185. FLAGS(proc) |= F_FORCE_GC;
  186. /* Move any off_heap's into the process */
  187. if (bp->off_heap.first != NULL) {
  188. struct erl_off_heap_header** next_p = &bp->off_heap.first;
  189. while (*next_p != NULL) {
  190. next_p = &((*next_p)->next);
  191. }
  192. *next_p = MSO(proc).first;
  193. MSO(proc).first = bp->off_heap.first;
  194. bp->off_heap.first = NULL;
  195. OH_OVERHEAD(&(MSO(proc)), bp->off_heap.overhead);
  196. }
  197. }
  198. }
  199. Eterm
  200. erts_msg_distext2heap(Process *pp,
  201. ErtsProcLocks *plcksp,
  202. ErlHeapFragment **bpp,
  203. Eterm *tokenp,
  204. ErtsDistExternal *dist_extp)
  205. {
  206. Eterm msg;
  207. Uint tok_sz = 0;
  208. Eterm *hp = NULL;
  209. Eterm *hp_end = NULL;
  210. ErlOffHeap *ohp;
  211. Sint sz;
  212. *bpp = NULL;
  213. sz = erts_decode_dist_ext_size(dist_extp, 0);
  214. if (sz < 0)
  215. goto decode_error;
  216. if (is_not_nil(*tokenp)) {
  217. ErlHeapFragment *heap_frag = erts_dist_ext_trailer(dist_extp);
  218. tok_sz = heap_frag->used_size;
  219. sz += tok_sz;
  220. }
  221. if (pp)
  222. hp = erts_alloc_message_heap(sz, bpp, &ohp, pp, plcksp);
  223. else {
  224. *bpp = new_message_buffer(sz);
  225. hp = (*bpp)->mem;
  226. ohp = &(*bpp)->off_heap;
  227. }
  228. hp_end = hp + sz;
  229. msg = erts_decode_dist_ext(&hp, ohp, dist_extp);
  230. if (is_non_value(msg))
  231. goto decode_error;
  232. if (is_not_nil(*tokenp)) {
  233. ErlHeapFragment *heap_frag = erts_dist_ext_trailer(dist_extp);
  234. *tokenp = copy_struct(*tokenp, tok_sz, &hp, ohp);
  235. erts_cleanup_offheap(&heap_frag->off_heap);
  236. }
  237. erts_free_dist_ext_copy(dist_extp);
  238. if (hp_end != hp) {
  239. if (!(*bpp)) {
  240. HRelease(pp, hp_end, hp);
  241. }
  242. else {
  243. Uint final_size = hp - &(*bpp)->mem[0];
  244. Eterm brefs[2] = {msg, *tokenp};
  245. ASSERT(sz - (hp_end - hp) == final_size);
  246. *bpp = erts_resize_message_buffer(*bpp, final_size, &brefs[0], 2);
  247. msg = brefs[0];
  248. *tokenp = brefs[1];
  249. }
  250. }
  251. return msg;
  252. decode_error:
  253. if (is_not_nil(*tokenp)) {
  254. ErlHeapFragment *heap_frag = erts_dist_ext_trailer(dist_extp);
  255. erts_cleanup_offheap(&heap_frag->off_heap);
  256. }
  257. erts_free_dist_ext_copy(dist_extp);
  258. if (*bpp) {
  259. free_message_buffer(*bpp);
  260. *bpp = NULL;
  261. }
  262. else if (hp) {
  263. HRelease(pp, hp_end, hp);
  264. }
  265. return THE_NON_VALUE;
  266. }
  267. static ERTS_INLINE void
  268. notify_new_message(Process *receiver)
  269. {
  270. ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS
  271. & erts_proc_lc_my_proc_locks(receiver));
  272. ACTIVATE(receiver);
  273. switch (receiver->status) {
  274. case P_GARBING:
  275. switch (receiver->gcstatus) {
  276. case P_SUSPENDED:
  277. goto suspended;
  278. case P_WAITING:
  279. goto waiting;
  280. default:
  281. break;
  282. }
  283. break;
  284. case P_SUSPENDED:
  285. suspended:
  286. receiver->rstatus = P_RUNABLE;
  287. break;
  288. case P_WAITING:
  289. waiting:
  290. erts_add_to_runq(receiver);
  291. break;
  292. default:
  293. break;
  294. }
  295. }
  296. void
  297. erts_queue_dist_message(Process *rcvr,
  298. ErtsProcLocks *rcvr_locks,
  299. ErtsDistExternal *dist_ext,
  300. Eterm token)
  301. {
  302. ErlMessage* mp;
  303. #ifdef ERTS_SMP
  304. ErtsProcLocks need_locks;
  305. #endif
  306. ERTS_SMP_LC_ASSERT(*rcvr_locks == erts_proc_lc_my_proc_locks(rcvr));
  307. mp = message_alloc();
  308. #ifdef ERTS_SMP
  309. need_locks = ~(*rcvr_locks) & (ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
  310. if (need_locks) {
  311. *rcvr_locks |= need_locks;
  312. if (erts_smp_proc_trylock(rcvr, need_locks) == EBUSY) {
  313. if (need_locks == ERTS_PROC_LOCK_MSGQ) {
  314. erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_STATUS);
  315. need_locks = (ERTS_PROC_LOCK_MSGQ
  316. | ERTS_PROC_LOCK_STATUS);
  317. }
  318. erts_smp_proc_lock(rcvr, need_locks);
  319. }
  320. }
  321. if (rcvr->is_exiting || ERTS_PROC_PENDING_EXIT(rcvr)) {
  322. /* Drop message if receiver is exiting or has a pending exit ... */
  323. if (is_not_nil(token)) {
  324. ErlHeapFragment *heap_frag;
  325. heap_frag = erts_dist_ext_trailer(mp->data.dist_ext);
  326. erts_cleanup_offheap(&heap_frag->off_heap);
  327. }
  328. erts_free_dist_ext_copy(dist_ext);
  329. message_free(mp);
  330. }
  331. else
  332. #endif
  333. if (IS_TRACED_FL(rcvr, F_TRACE_RECEIVE)) {
  334. /* Ahh... need to decode it in order to trace it... */
  335. ErlHeapFragment *mbuf;
  336. Eterm msg;
  337. message_free(mp);
  338. msg = erts_msg_distext2heap(rcvr, rcvr_locks, &mbuf, &token, dist_ext);
  339. if (is_value(msg))
  340. erts_queue_message(rcvr, rcvr_locks, mbuf, msg, token);
  341. }
  342. else {
  343. /* Enqueue message on external format */
  344. ERL_MESSAGE_TERM(mp) = THE_NON_VALUE;
  345. ERL_MESSAGE_TOKEN(mp) = token;
  346. mp->next = NULL;
  347. mp->data.dist_ext = dist_ext;
  348. LINK_MESSAGE(rcvr, mp);
  349. notify_new_message(rcvr);
  350. }
  351. }
  352. /* Add a message last in message queue */
  353. void
  354. erts_queue_message(Process* receiver,
  355. ErtsProcLocks *receiver_locks,
  356. ErlHeapFragment* bp,
  357. Eterm message,
  358. Eterm seq_trace_token)
  359. {
  360. ErlMessage* mp;
  361. #ifdef ERTS_SMP
  362. ErtsProcLocks need_locks;
  363. #else
  364. ASSERT(bp != NULL || receiver->mbuf == NULL);
  365. #endif
  366. ERTS_SMP_LC_ASSERT(*receiver_locks == erts_proc_lc_my_proc_locks(receiver));
  367. mp = message_alloc();
  368. #ifdef ERTS_SMP
  369. need_locks = ~(*receiver_locks) & (ERTS_PROC_LOCK_MSGQ
  370. | ERTS_PROC_LOCK_STATUS);
  371. if (need_locks) {
  372. *receiver_locks |= need_locks;
  373. if (erts_smp_proc_trylock(receiver, need_locks) == EBUSY) {
  374. if (need_locks == ERTS_PROC_LOCK_MSGQ) {
  375. erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_STATUS);
  376. need_locks = (ERTS_PROC_LOCK_MSGQ
  377. | ERTS_PROC_LOCK_STATUS);
  378. }
  379. erts_smp_proc_lock(receiver, need_locks);
  380. }
  381. }
  382. if (receiver->is_exiting || ERTS_PROC_PENDING_EXIT(receiver)) {
  383. /* Drop message if receiver is exiting or has a pending
  384. * exit ...
  385. */
  386. if (bp)
  387. free_message_buffer(bp);
  388. message_free(mp);
  389. return;
  390. }
  391. #endif
  392. ERL_MESSAGE_TERM(mp) = message;
  393. ERL_MESSAGE_TOKEN(mp) = seq_trace_token;
  394. mp->next = NULL;
  395. mp->data.heap_frag = bp;
  396. #ifdef ERTS_SMP
  397. if (*receiver_locks & ERTS_PROC_LOCK_MAIN) {
  398. /*
  399. * We move 'in queue' to 'private queue' and place
  400. * message at the end of 'private queue' in order
  401. * to ensure that the 'in queue' doesn't contain
  402. * references into the heap. By ensuring this,
  403. * we don't need to include the 'in queue' in
  404. * the root set when garbage collecting.
  405. */
  406. ERTS_SMP_MSGQ_MV_INQ2PRIVQ(receiver);
  407. LINK_MESSAGE_PRIVQ(receiver, mp);
  408. }
  409. else {
  410. LINK_MESSAGE(receiver, mp);
  411. }
  412. #else
  413. LINK_MESSAGE(receiver, mp);
  414. #endif
  415. notify_new_message(receiver);
  416. if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) {
  417. trace_receive(receiver, message);
  418. }
  419. #ifndef ERTS_SMP
  420. ERTS_HOLE_CHECK(receiver);
  421. #endif
  422. }
  423. void
  424. erts_link_mbuf_to_proc(struct process *proc, ErlHeapFragment *bp)
  425. {
  426. Eterm* htop = HEAP_TOP(proc);
  427. link_mbuf_to_proc(proc, bp);
  428. if (htop < HEAP_LIMIT(proc)) {
  429. *htop = make_pos_bignum_header(HEAP_LIMIT(proc)-htop-1);
  430. HEAP_TOP(proc) = HEAP_LIMIT(proc);
  431. }
  432. }
  433. /*
  434. * Moves content of message buffer attached to a message into a heap.
  435. * The message buffer is deallocated.
  436. */
  437. void
  438. erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
  439. {
  440. struct erl_off_heap_header* oh;
  441. Eterm term, token, *fhp, *hp;
  442. Sint offs;
  443. Uint sz;
  444. ErlHeapFragment *bp;
  445. #ifdef HARD_DEBUG
  446. ProcBin *dbg_mso_start = off_heap->mso;
  447. ErlFunThing *dbg_fun_start = off_heap->funs;
  448. ExternalThing *dbg_external_start = off_heap->externals;
  449. Eterm dbg_term, dbg_token;
  450. ErlHeapFragment *dbg_bp;
  451. Uint *dbg_hp, *dbg_thp_start;
  452. Uint dbg_term_sz, dbg_token_sz;
  453. #endif
  454. bp = msg->data.heap_frag;
  455. term = ERL_MESSAGE_TERM(msg);
  456. token = ERL_MESSAGE_TOKEN(msg);
  457. if (!bp) {
  458. ASSERT(is_immed(term) && is_immed(token));
  459. return;
  460. }
  461. #ifdef HARD_DEBUG
  462. dbg_term_sz = size_object(term);
  463. dbg_token_sz = size_object(token);
  464. /*ASSERT(dbg_term_sz + dbg_token_sz == erts_msg_used_frag_sz(msg));
  465. Copied size may be smaller due to removed SubBins's or garbage.
  466. Copied size may be larger due to duplicated shared terms.
  467. */
  468. dbg_bp = new_message_buffer(dbg_term_sz + dbg_token_sz);
  469. dbg_hp = dbg_bp->mem;
  470. dbg_term = copy_struct(term, dbg_term_sz, &dbg_hp, &dbg_bp->off_heap);
  471. dbg_token = copy_struct(token, dbg_token_sz, &dbg_hp, &dbg_bp->off_heap);
  472. dbg_thp_start = *hpp;
  473. #endif
  474. if (bp->next != NULL) {
  475. move_multi_frags(hpp, off_heap, bp, msg->m, 2);
  476. goto copy_done;
  477. }
  478. OH_OVERHEAD(off_heap, bp->off_heap.overhead);
  479. sz = bp->used_size;
  480. ASSERT(is_immed(term) || in_heapfrag(ptr_val(term),bp));
  481. ASSERT(is_immed(token) || in_heapfrag(ptr_val(token),bp));
  482. fhp = bp->mem;
  483. hp = *hpp;
  484. offs = hp - fhp;
  485. oh = NULL;
  486. while (sz--) {
  487. Uint cpy_sz;
  488. Eterm val = *fhp++;
  489. switch (primary_tag(val)) {
  490. case TAG_PRIMARY_IMMED1:
  491. *hp++ = val;
  492. break;
  493. case TAG_PRIMARY_LIST:
  494. case TAG_PRIMARY_BOXED:
  495. ASSERT(in_heapfrag(ptr_val(val), bp));
  496. *hp++ = offset_ptr(val, offs);
  497. break;
  498. case TAG_PRIMARY_HEADER:
  499. *hp++ = val;
  500. switch (val & _HEADER_SUBTAG_MASK) {
  501. case ARITYVAL_SUBTAG:
  502. break;
  503. case REFC_BINARY_SUBTAG:
  504. case FUN_SUBTAG:
  505. case EXTERNAL_PID_SUBTAG:
  506. case EXTERNAL_PORT_SUBTAG:
  507. case EXTERNAL_REF_SUBTAG:
  508. oh = (struct erl_off_heap_header*) (hp-1);
  509. cpy_sz = thing_arityval(val);
  510. goto cpy_words;
  511. default:
  512. cpy_sz = header_arity(val);
  513. cpy_words:
  514. ASSERT(sz >= cpy_sz);
  515. sz -= cpy_sz;
  516. while (cpy_sz >= 8) {
  517. cpy_sz -= 8;
  518. *hp++ = *fhp++;
  519. *hp++ = *fhp++;
  520. *hp++ = *fhp++;
  521. *hp++ = *fhp++;
  522. *hp++ = *fhp++;
  523. *hp++ = *fhp++;
  524. *hp++ = *fhp++;
  525. *hp++ = *fhp++;
  526. }
  527. switch (cpy_sz) {
  528. case 7: *hp++ = *fhp++;
  529. case 6: *hp++ = *fhp++;
  530. case 5: *hp++ = *fhp++;
  531. case 4: *hp++ = *fhp++;
  532. case 3: *hp++ = *fhp++;
  533. case 2: *hp++ = *fhp++;
  534. case 1: *hp++ = *fhp++;
  535. default: break;
  536. }
  537. if (oh) {
  538. /* Add to offheap list */
  539. oh->next = off_heap->first;
  540. off_heap->first = oh;
  541. ASSERT(*hpp <= (Eterm*)oh);
  542. ASSERT(hp > (Eterm*)oh);
  543. oh = NULL;
  544. }
  545. break;
  546. }
  547. break;
  548. }
  549. }
  550. ASSERT(bp->used_size == hp - *hpp);
  551. *hpp = hp;
  552. if (is_not_immed(token)) {
  553. ASSERT(in_heapfrag(ptr_val(token), bp));
  554. ERL_MESSAGE_TOKEN(msg) = offset_ptr(token, offs);
  555. #ifdef HARD_DEBUG
  556. ASSERT(dbg_thp_start <= ptr_val(ERL_MESSAGE_TOKEN(msg)));
  557. ASSERT(hp > ptr_val(ERL_MESSAGE_TOKEN(msg)));
  558. #endif
  559. }
  560. if (is_not_immed(term)) {
  561. ASSERT(in_heapfrag(ptr_val(term),bp));
  562. ERL_MESSAGE_TERM(msg) = offset_ptr(term, offs);
  563. #ifdef HARD_DEBUG
  564. ASSERT(dbg_thp_start <= ptr_val(ERL_MESSAGE_TERM(msg)));
  565. ASSERT(hp > ptr_val(ERL_MESSAGE_TERM(msg)));
  566. #endif
  567. }
  568. copy_done:
  569. #ifdef HARD_DEBUG
  570. {
  571. int i, j;
  572. ErlHeapFragment* frag;
  573. {
  574. ProcBin *mso = off_heap->mso;
  575. i = j = 0;
  576. while (mso != dbg_mso_start) {
  577. mso = mso->next;
  578. i++;
  579. }
  580. for (frag=bp; frag; frag=frag->next) {
  581. mso = frag->off_heap.mso;
  582. while (mso) {
  583. mso = mso->next;
  584. j++;
  585. }
  586. }
  587. ASSERT(i == j);
  588. }
  589. {
  590. ErlFunThing *fun = off_heap->funs;
  591. i = j = 0;
  592. while (fun != dbg_fun_start) {
  593. fun = fun->next;
  594. i++;
  595. }
  596. for (frag=bp; frag; frag=frag->next) {
  597. fun = frag->off_heap.funs;
  598. while (fun) {
  599. fun = fun->next;
  600. j++;
  601. }
  602. }
  603. ASSERT(i == j);
  604. }
  605. {
  606. ExternalThing *external = off_heap->externals;
  607. i = j = 0;
  608. while (external != dbg_external_start) {
  609. external = external->next;
  610. i++;
  611. }
  612. for (frag=bp; frag; frag=frag->next) {
  613. external = frag->off_heap.externals;
  614. while (external) {
  615. external = external->next;
  616. j++;
  617. }
  618. }
  619. ASSERT(i == j);
  620. }
  621. }
  622. #endif
  623. bp->off_heap.first = NULL;
  624. free_message_buffer(bp);
  625. msg->data.heap_frag = NULL;
  626. #ifdef HARD_DEBUG
  627. ASSERT(eq(ERL_MESSAGE_TERM(msg), dbg_term));
  628. ASSERT(eq(ERL_MESSAGE_TOKEN(msg), dbg_token));
  629. free_message_buffer(dbg_bp);
  630. #endif
  631. }
  632. Uint
  633. erts_msg_attached_data_size_aux(ErlMessage *msg)
  634. {
  635. Sint sz;
  636. ASSERT(is_non_value(ERL_MESSAGE_TERM(msg)));
  637. ASSERT(msg->data.dist_ext);
  638. ASSERT(msg->data.dist_ext->heap_size < 0);
  639. sz = erts_decode_dist_ext_size(msg->data.dist_ext, 0);
  640. if (sz < 0) {
  641. /* Bad external; remove it */
  642. if (is_not_nil(ERL_MESSAGE_TOKEN(msg))) {
  643. ErlHeapFragment *heap_frag;
  644. heap_frag = erts_dist_ext_trailer(msg->data.dist_ext);
  645. erts_cleanup_offheap(&heap_frag->off_heap);
  646. }
  647. erts_free_dist_ext_copy(msg->data.dist_ext);
  648. msg->data.dist_ext = NULL;
  649. return 0;
  650. }
  651. msg->data.dist_ext->heap_size = sz;
  652. if (is_not_nil(msg->m[1])) {
  653. ErlHeapFragment *heap_frag;
  654. heap_frag = erts_dist_ext_trailer(msg->data.dist_ext);
  655. sz += heap_frag->used_size;
  656. }
  657. return sz;
  658. }
  659. void
  660. erts_move_msg_attached_data_to_heap(Eterm **hpp, ErlOffHeap *ohp, ErlMessage *msg)
  661. {
  662. if (is_value(ERL_MESSAGE_TERM(msg)))
  663. erts_move_msg_mbuf_to_heap(hpp, ohp, msg);
  664. else if (msg->data.dist_ext) {
  665. ASSERT(msg->data.dist_ext->heap_size >= 0);
  666. if (is_not_nil(ERL_MESSAGE_TOKEN(msg))) {
  667. ErlHeapFragment *heap_frag;
  668. heap_frag = erts_dist_ext_trailer(msg->data.dist_ext);
  669. ERL_MESSAGE_TOKEN(msg) = copy_struct(ERL_MESSAGE_TOKEN(msg),
  670. heap_frag->used_size,
  671. hpp,
  672. ohp);
  673. erts_cleanup_offheap(&heap_frag->off_heap);
  674. }
  675. ERL_MESSAGE_TERM(msg) = erts_decode_dist_ext(hpp,
  676. ohp,
  677. msg->data.dist_ext);
  678. erts_free_dist_ext_copy(msg->data.dist_ext);
  679. msg->data.dist_ext = NULL;
  680. }
  681. /* else: bad external detected when calculating size */
  682. }
  683. /*
  684. * Send a local message when sender & receiver processes are known.
  685. */
  686. void
  687. erts_send_message(Process* sender,
  688. Process* receiver,
  689. ErtsProcLocks *receiver_locks,
  690. Eterm message,
  691. unsigned flags)
  692. {
  693. Uint msize;
  694. ErlHeapFragment* bp = NULL;
  695. Eterm token = NIL;
  696. BM_STOP_TIMER(system);
  697. BM_MESSAGE(message,sender,receiver);
  698. BM_START_TIMER(send);
  699. if (SEQ_TRACE_TOKEN(sender) != NIL && !(flags & ERTS_SND_FLG_NO_SEQ_TRACE)) {
  700. Eterm* hp;
  701. BM_SWAP_TIMER(send,size);
  702. msize = size_object(message);
  703. BM_SWAP_TIMER(size,send);
  704. seq_trace_update_send(sender);
  705. seq_trace_output(SEQ_TRACE_TOKEN(sender), message, SEQ_TRACE_SEND,
  706. receiver->id, sender);
  707. bp = new_message_buffer(msize + 6 /* TUPLE5 */);
  708. hp = bp->mem;
  709. BM_SWAP_TIMER(send,copy);
  710. token = copy_struct(SEQ_TRACE_TOKEN(sender),
  711. 6 /* TUPLE5 */,
  712. &hp,
  713. &bp->off_heap);
  714. message = copy_struct(message, msize, &hp, &bp->off_heap);
  715. BM_MESSAGE_COPIED(msize);
  716. BM_SWAP_TIMER(copy,send);
  717. erts_queue_message(receiver,
  718. receiver_locks,
  719. bp,
  720. message,
  721. token);
  722. BM_SWAP_TIMER(send,system);
  723. #ifdef HYBRID
  724. } else {
  725. ErlMessage* mp = message_alloc();
  726. BM_SWAP_TIMER(send,copy);
  727. #ifdef INCREMENTAL
  728. /* TODO: During GC activate processes if the message relies in
  729. * the fromspace and the sender is active. During major
  730. * collections add the message to the gray stack if it relies
  731. * in the old generation and the sender is active and the
  732. * receiver is inactive.
  733. if (!IS_CONST(message) && (ma_gc_flags & GC_CYCLE) &&
  734. (ptr_val(message) >= inc_fromspc &&
  735. ptr_val(message) < inc_fromend) && INC_IS_ACTIVE(sender))
  736. INC_ACTIVATE(receiver);
  737. else if (!IS_CONST(message) && (ma_gc_flags & GC_CYCLE) &&
  738. (ptr_val(message) >= global_old_heap &&
  739. ptr_val(message) < global_old_hend) &&
  740. INC_IS_ACTIVE(sender) && !INC_IS_ACTIVE(receiver))
  741. Mark message in blackmap and add it to the gray stack
  742. */
  743. if (!IS_CONST(message))
  744. INC_ACTIVATE(receiver);
  745. #endif
  746. LAZY_COPY(sender,message);
  747. BM_SWAP_TIMER(copy,send);
  748. ERL_MESSAGE_TERM(mp) = message;
  749. ERL_MESSAGE_TOKEN(mp) = NIL;
  750. mp->next = NULL;
  751. LINK_MESSAGE(receiver, mp);
  752. ACTIVATE(receiver);
  753. if (receiver->status == P_WAITING) {
  754. erts_add_to_runq(receiver);
  755. } else if (receiver->status == P_SUSPENDED) {
  756. receiver->rstatus = P_RUNABLE;
  757. }
  758. if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) {
  759. trace_receive(receiver, message);
  760. }
  761. BM_SWAP_TIMER(send,system);
  762. return;
  763. #else
  764. } else if (sender == receiver) {
  765. /* Drop message if receiver has a pending exit ... */
  766. #ifdef ERTS_SMP
  767. ErtsProcLocks need_locks = (~(*receiver_locks)
  768. & (ERTS_PROC_LOCK_MSGQ
  769. | ERTS_PROC_LOCK_STATUS));
  770. if (need_locks) {
  771. *receiver_locks |= need_locks;
  772. if (erts_smp_proc_trylock(receiver, need_locks) == EBUSY) {
  773. if (need_locks == ERTS_PROC_LOCK_MSGQ) {
  774. erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_STATUS);
  775. need_locks = ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS;
  776. }
  777. erts_smp_proc_lock(receiver, need_locks);
  778. }
  779. }
  780. if (!ERTS_PROC_PENDING_EXIT(receiver))
  781. #endif
  782. {
  783. ErlMessage* mp = message_alloc();
  784. mp->data.attached = NULL;
  785. ERL_MESSAGE_TERM(mp) = message;
  786. ERL_MESSAGE_TOKEN(mp) = NIL;
  787. mp->next = NULL;
  788. /*
  789. * We move 'in queue' to 'private queue' and place
  790. * message at the end of 'private queue' in order
  791. * to ensure that the 'in queue' doesn't contain
  792. * references into the heap. By ensuring this,
  793. * we don't need to include the 'in queue' in
  794. * the root set when garbage collecting.
  795. */
  796. ERTS_SMP_MSGQ_MV_INQ2PRIVQ(receiver);
  797. LINK_MESSAGE_PRIVQ(receiver, mp);
  798. if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) {
  799. trace_receive(receiver, message);
  800. }
  801. }
  802. BM_SWAP_TIMER(send,system);
  803. return;
  804. } else {
  805. #ifdef ERTS_SMP
  806. ErlOffHeap *ohp;
  807. Eterm *hp;
  808. BM_SWAP_TIMER(send,size);
  809. msize = size_object(message);
  810. BM_SWAP_TIMER(size,send);
  811. hp = erts_alloc_message_heap(msize,&bp,&ohp,receiver,receiver_locks);
  812. BM_SWAP_TIMER(send,copy);
  813. message = copy_struct(message, msize, &hp, ohp);
  814. BM_MESSAGE_COPIED(msz);
  815. BM_SWAP_TIMER(copy,send);
  816. erts_queue_message(receiver, receiver_locks, bp, message, token);
  817. BM_SWAP_TIMER(send,system);
  818. #else
  819. ErlMessage* mp = message_alloc();
  820. Eterm *hp;
  821. BM_SWAP_TIMER(send,size);
  822. msize = size_object(message);
  823. BM_SWAP_TIMER(size,send);
  824. if (receiver->stop - receiver->htop <= msize) {
  825. BM_SWAP_TIMER(send,system);
  826. erts_garbage_collect(receiver, msize, receiver->arg_reg, receiver->arity);
  827. BM_SWAP_TIMER(system,send);
  828. }
  829. hp = receiver->htop;
  830. receiver->htop = hp + msize;
  831. BM_SWAP_TIMER(send,copy);
  832. message = copy_struct(message, msize, &hp, &receiver->off_heap);
  833. BM_MESSAGE_COPIED(msize);
  834. BM_SWAP_TIMER(copy,send);
  835. ERL_MESSAGE_TERM(mp) = message;
  836. ERL_MESSAGE_TOKEN(mp) = NIL;
  837. mp->next = NULL;
  838. mp->data.attached = NULL;
  839. LINK_MESSAGE(receiver, mp);
  840. if (receiver->status == P_WAITING) {
  841. erts_add_to_runq(receiver);
  842. } else if (receiver->status == P_SUSPENDED) {
  843. receiver->rstatus = P_RUNABLE;
  844. }
  845. if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) {
  846. trace_receive(receiver, message);
  847. }
  848. BM_SWAP_TIMER(send,system);
  849. #endif /* #ifndef ERTS_SMP */
  850. return;
  851. #endif /* HYBRID */
  852. }
  853. }
  854. /*
  855. * This function delivers an EXIT message to a process
  856. * which is trapping EXITs.
  857. */
  858. void
  859. erts_deliver_exit_message(Eterm from, Process *to, ErtsProcLocks *to_locksp,
  860. Eterm reason, Eterm token)
  861. {
  862. Eterm mess;
  863. Eterm save;
  864. Eterm from_copy;
  865. Uint sz_reason;
  866. Uint sz_token;
  867. Uint sz_from;
  868. Eterm* hp;
  869. Eterm temptoken;
  870. ErlHeapFragment* bp = NULL;
  871. if (token != NIL) {
  872. ASSERT(is_tuple(token));
  873. sz_reason = size_object(reason);
  874. sz_token = size_object(token);
  875. sz_from = size_object(from);
  876. bp = new_message_buffer(sz_reason + sz_from + sz_token + 4);
  877. hp = bp->mem;
  878. mess = copy_struct(reason, sz_reason, &hp, &bp->off_heap);
  879. from_copy = copy_struct(from, sz_from, &hp, &bp->off_heap);
  880. save = TUPLE3(hp, am_EXIT, from_copy, mess);
  881. hp += 4;
  882. /* the trace token must in this case be updated by the caller */
  883. seq_trace_output(token, save, SEQ_TRACE_SEND, to->id, NULL);
  884. temptoken = copy_struct(token, sz_token, &hp, &bp->off_heap);
  885. erts_queue_message(to, to_locksp, bp, save, temptoken);
  886. } else {
  887. ErlOffHeap *ohp;
  888. sz_reason = size_object(reason);
  889. sz_from = IS_CONST(from) ? 0 : size_object(from);
  890. hp = erts_alloc_message_heap(sz_reason+sz_from+4,
  891. &bp,
  892. &ohp,
  893. to,
  894. to_locksp);
  895. mess = copy_struct(reason, sz_reason, &hp, ohp);
  896. from_copy = (IS_CONST(from)
  897. ? from
  898. : copy_struct(from, sz_from, &hp, ohp));
  899. save = TUPLE3(hp, am_EXIT, from_copy, mess);
  900. erts_queue_message(to, to_locksp, bp, save, NIL);
  901. }
  902. }