PageRenderTime 86ms CodeModel.GetById 28ms RepoModel.GetById 0ms app.codeStats 0ms

/mono/metadata/sgen-marksweep.c

https://bitbucket.org/danipen/mono
C | 2450 lines | 1831 code | 390 blank | 229 comment | 300 complexity | 8bfe33542d8aa1fc627cff0091da143f MD5 | raw file
Possible License(s): Unlicense, Apache-2.0, LGPL-2.0, MPL-2.0-no-copyleft-exception, CC-BY-SA-3.0, GPL-2.0

Large files files are truncated, but you can click here to view the full file

  1. /*
  2. * sgen-marksweep.c: The Mark & Sweep major collector.
  3. *
  4. * Author:
  5. * Mark Probst <mark.probst@gmail.com>
  6. *
  7. * Copyright 2009-2010 Novell, Inc.
  8. * Copyright (C) 2012 Xamarin Inc
  9. *
  10. * This library is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Library General Public
  12. * License 2.0 as published by the Free Software Foundation;
  13. *
  14. * This library is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Library General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Library General Public
  20. * License 2.0 along with this library; if not, write to the Free
  21. * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  22. */
  23. #include "config.h"
  24. #ifdef HAVE_SGEN_GC
  25. #include <math.h>
  26. #include <errno.h>
  27. #include "utils/mono-counters.h"
  28. #include "utils/mono-semaphore.h"
  29. #include "utils/mono-time.h"
  30. #include "metadata/object-internals.h"
  31. #include "metadata/profiler-private.h"
  32. #include "metadata/sgen-gc.h"
  33. #include "metadata/sgen-protocol.h"
  34. #include "metadata/sgen-cardtable.h"
  35. #include "metadata/sgen-memory-governor.h"
  36. #include "metadata/gc-internal.h"
  37. #if !defined(SGEN_PARALLEL_MARK) && !defined(FIXED_HEAP)
  38. #define SGEN_HAVE_CONCURRENT_MARK
  39. #endif
  40. #define MS_BLOCK_SIZE (16*1024)
  41. #define MS_BLOCK_SIZE_SHIFT 14
  42. #define MAJOR_SECTION_SIZE MS_BLOCK_SIZE
  43. #define CARDS_PER_BLOCK (MS_BLOCK_SIZE / CARD_SIZE_IN_BYTES)
  44. #ifdef FIXED_HEAP
  45. #define MS_DEFAULT_HEAP_NUM_BLOCKS (32 * 1024) /* 512 MB */
  46. #endif
  47. /*
  48. * Don't allocate single blocks, but alloc a contingent of this many
  49. * blocks in one swoop.
  50. */
  51. #define MS_BLOCK_ALLOC_NUM 32
  52. /*
  53. * Number of bytes before the first object in a block. At the start
  54. * of a block is the MSBlockHeader, then opional padding, then come
  55. * the objects, so this must be >= sizeof (MSBlockHeader).
  56. */
  57. #ifdef FIXED_HEAP
  58. #define MS_BLOCK_SKIP 0
  59. #else
  60. #define MS_BLOCK_SKIP 16
  61. #endif
  62. #define MS_BLOCK_FREE (MS_BLOCK_SIZE - MS_BLOCK_SKIP)
  63. #define MS_NUM_MARK_WORDS ((MS_BLOCK_SIZE / SGEN_ALLOC_ALIGN + sizeof (mword) * 8 - 1) / (sizeof (mword) * 8))
  64. #if SGEN_MAX_SMALL_OBJ_SIZE > MS_BLOCK_FREE / 2
  65. #error MAX_SMALL_OBJ_SIZE must be at most MS_BLOCK_FREE / 2
  66. #endif
  67. typedef struct _MSBlockInfo MSBlockInfo;
  68. struct _MSBlockInfo {
  69. int obj_size;
  70. int obj_size_index;
  71. int pin_queue_num_entries;
  72. unsigned int pinned : 1;
  73. unsigned int has_references : 1;
  74. unsigned int has_pinned : 1; /* means cannot evacuate */
  75. unsigned int is_to_space : 1;
  76. unsigned int swept : 1;
  77. #ifdef FIXED_HEAP
  78. unsigned int used : 1;
  79. unsigned int zeroed : 1;
  80. #endif
  81. MSBlockInfo *next;
  82. char *block;
  83. void **free_list;
  84. MSBlockInfo *next_free;
  85. void **pin_queue_start;
  86. #ifdef SGEN_HAVE_CONCURRENT_MARK
  87. guint8 *cardtable_mod_union;
  88. #endif
  89. mword mark_words [MS_NUM_MARK_WORDS];
  90. };
  91. #ifdef FIXED_HEAP
  92. static mword ms_heap_num_blocks = MS_DEFAULT_HEAP_NUM_BLOCKS;
  93. static char *ms_heap_start;
  94. static char *ms_heap_end;
  95. #define MS_PTR_IN_SMALL_MAJOR_HEAP(p) ((char*)(p) >= ms_heap_start && (char*)(p) < ms_heap_end)
  96. /* array of all all block infos in the system */
  97. static MSBlockInfo *block_infos;
  98. #endif
  99. #define MS_BLOCK_OBJ(b,i) ((b)->block + MS_BLOCK_SKIP + (b)->obj_size * (i))
  100. #define MS_BLOCK_OBJ_FOR_SIZE(b,i,obj_size) ((b)->block + MS_BLOCK_SKIP + (obj_size) * (i))
  101. #define MS_BLOCK_DATA_FOR_OBJ(o) ((char*)((mword)(o) & ~(mword)(MS_BLOCK_SIZE - 1)))
  102. #ifdef FIXED_HEAP
  103. #define MS_BLOCK_FOR_OBJ(o) (&block_infos [(mword)((char*)(o) - ms_heap_start) >> MS_BLOCK_SIZE_SHIFT])
  104. #else
  105. typedef struct {
  106. MSBlockInfo *info;
  107. } MSBlockHeader;
  108. #define MS_BLOCK_FOR_OBJ(o) (((MSBlockHeader*)MS_BLOCK_DATA_FOR_OBJ ((o)))->info)
  109. #endif
  110. #define MS_BLOCK_OBJ_INDEX(o,b) (((char*)(o) - ((b)->block + MS_BLOCK_SKIP)) / (b)->obj_size)
  111. #define MS_CALC_MARK_BIT(w,b,o) do { \
  112. int i = ((char*)(o) - MS_BLOCK_DATA_FOR_OBJ ((o))) >> SGEN_ALLOC_ALIGN_BITS; \
  113. if (sizeof (mword) == 4) { \
  114. (w) = i >> 5; \
  115. (b) = i & 31; \
  116. } else { \
  117. (w) = i >> 6; \
  118. (b) = i & 63; \
  119. } \
  120. } while (0)
  121. #define MS_MARK_BIT(bl,w,b) ((bl)->mark_words [(w)] & (1L << (b)))
  122. #define MS_SET_MARK_BIT(bl,w,b) ((bl)->mark_words [(w)] |= (1L << (b)))
  123. #define MS_PAR_SET_MARK_BIT(was_marked,bl,w,b) do { \
  124. mword __old = (bl)->mark_words [(w)]; \
  125. mword __bitmask = 1L << (b); \
  126. if (__old & __bitmask) { \
  127. was_marked = TRUE; \
  128. break; \
  129. } \
  130. if (SGEN_CAS_PTR ((gpointer*)&(bl)->mark_words [(w)], \
  131. (gpointer)(__old | __bitmask), \
  132. (gpointer)__old) == \
  133. (gpointer)__old) { \
  134. was_marked = FALSE; \
  135. break; \
  136. } \
  137. } while (1)
  138. #define MS_OBJ_ALLOCED(o,b) (*(void**)(o) && (*(char**)(o) < (b)->block || *(char**)(o) >= (b)->block + MS_BLOCK_SIZE))
  139. #define MS_BLOCK_OBJ_SIZE_FACTOR (sqrt (2.0))
  140. /*
  141. * This way we can lookup block object size indexes for sizes up to
  142. * 256 bytes with a single load.
  143. */
  144. #define MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES 32
  145. static int *block_obj_sizes;
  146. static int num_block_obj_sizes;
  147. static int fast_block_obj_size_indexes [MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES];
  148. #define MS_BLOCK_FLAG_PINNED 1
  149. #define MS_BLOCK_FLAG_REFS 2
  150. #define MS_BLOCK_TYPE_MAX 4
  151. #ifdef SGEN_PARALLEL_MARK
  152. static LOCK_DECLARE (ms_block_list_mutex);
  153. #define LOCK_MS_BLOCK_LIST mono_mutex_lock (&ms_block_list_mutex)
  154. #define UNLOCK_MS_BLOCK_LIST mono_mutex_unlock (&ms_block_list_mutex)
  155. #endif
  156. static gboolean *evacuate_block_obj_sizes;
  157. static float evacuation_threshold = 0.666;
  158. #ifdef SGEN_HAVE_CONCURRENT_MARK
  159. static float concurrent_evacuation_threshold = 0.666;
  160. static gboolean want_evacuation = FALSE;
  161. #endif
  162. static gboolean lazy_sweep = TRUE;
  163. static gboolean have_swept;
  164. #ifdef SGEN_HAVE_CONCURRENT_MARK
  165. static gboolean concurrent_mark;
  166. #endif
  167. /* all allocated blocks in the system */
  168. static MSBlockInfo *all_blocks;
  169. #ifdef FIXED_HEAP
  170. /* non-allocated block free-list */
  171. static MSBlockInfo *empty_blocks = NULL;
  172. #else
  173. /* non-allocated block free-list */
  174. static void *empty_blocks = NULL;
  175. static int num_empty_blocks = 0;
  176. #endif
  177. #define FOREACH_BLOCK(bl) for ((bl) = all_blocks; (bl); (bl) = (bl)->next) {
  178. #define END_FOREACH_BLOCK }
  179. static int num_major_sections = 0;
  180. /* one free block list for each block object size */
  181. static MSBlockInfo **free_block_lists [MS_BLOCK_TYPE_MAX];
  182. #ifdef SGEN_PARALLEL_MARK
  183. #ifdef HAVE_KW_THREAD
  184. static __thread MSBlockInfo ***workers_free_block_lists;
  185. #else
  186. static MonoNativeTlsKey workers_free_block_lists_key;
  187. #endif
  188. #endif
  189. static long long stat_major_blocks_alloced = 0;
  190. static long long stat_major_blocks_freed = 0;
  191. static long long stat_major_blocks_lazy_swept = 0;
  192. static long long stat_major_objects_evacuated = 0;
  193. #ifdef SGEN_COUNT_NUMBER_OF_MAJOR_OBJECTS_MARKED
  194. static long long num_major_objects_marked = 0;
  195. #define INC_NUM_MAJOR_OBJECTS_MARKED() (++num_major_objects_marked)
  196. #else
  197. #define INC_NUM_MAJOR_OBJECTS_MARKED()
  198. #endif
  199. static void
  200. sweep_block (MSBlockInfo *block, gboolean during_major_collection);
  201. static int
  202. ms_find_block_obj_size_index (int size)
  203. {
  204. int i;
  205. SGEN_ASSERT (9, size <= SGEN_MAX_SMALL_OBJ_SIZE, "size %d is bigger than max small object size %d", size, SGEN_MAX_SMALL_OBJ_SIZE);
  206. for (i = 0; i < num_block_obj_sizes; ++i)
  207. if (block_obj_sizes [i] >= size)
  208. return i;
  209. g_error ("no object of size %d\n", size);
  210. }
  211. #define FREE_BLOCKS_FROM(lists,p,r) (lists [((p) ? MS_BLOCK_FLAG_PINNED : 0) | ((r) ? MS_BLOCK_FLAG_REFS : 0)])
  212. #define FREE_BLOCKS(p,r) (FREE_BLOCKS_FROM (free_block_lists, (p), (r)))
  213. #ifdef SGEN_PARALLEL_MARK
  214. #ifdef HAVE_KW_THREAD
  215. #define FREE_BLOCKS_LOCAL(p,r) (FREE_BLOCKS_FROM (workers_free_block_lists, (p), (r)))
  216. #else
  217. #define FREE_BLOCKS_LOCAL(p,r) (FREE_BLOCKS_FROM (((MSBlockInfo***)(mono_native_tls_get_value (workers_free_block_lists_key))), (p), (r)))
  218. #endif
  219. #else
  220. //#define FREE_BLOCKS_LOCAL(p,r) (FREE_BLOCKS_FROM (free_block_lists, (p), (r)))
  221. #endif
  222. #define MS_BLOCK_OBJ_SIZE_INDEX(s) \
  223. (((s)+7)>>3 < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES ? \
  224. fast_block_obj_size_indexes [((s)+7)>>3] : \
  225. ms_find_block_obj_size_index ((s)))
  226. #ifdef FIXED_HEAP
  227. static void*
  228. major_alloc_heap (mword nursery_size, mword nursery_align, int the_nursery_bits)
  229. {
  230. char *nursery_start;
  231. mword major_heap_size = ms_heap_num_blocks * MS_BLOCK_SIZE;
  232. mword alloc_size = nursery_size + major_heap_size;
  233. mword i;
  234. g_assert (ms_heap_num_blocks > 0);
  235. g_assert (nursery_size % MS_BLOCK_SIZE == 0);
  236. if (nursery_align)
  237. g_assert (nursery_align % MS_BLOCK_SIZE == 0);
  238. nursery_start = sgen_alloc_os_memory_aligned (alloc_size, nursery_align ? nursery_align : MS_BLOCK_SIZE, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, "heap");
  239. ms_heap_start = nursery_start + nursery_size;
  240. ms_heap_end = ms_heap_start + major_heap_size;
  241. block_infos = sgen_alloc_internal_dynamic (sizeof (MSBlockInfo) * ms_heap_num_blocks, INTERNAL_MEM_MS_BLOCK_INFO, TRUE);
  242. for (i = 0; i < ms_heap_num_blocks; ++i) {
  243. block_infos [i].block = ms_heap_start + i * MS_BLOCK_SIZE;
  244. if (i < ms_heap_num_blocks - 1)
  245. block_infos [i].next_free = &block_infos [i + 1];
  246. else
  247. block_infos [i].next_free = NULL;
  248. block_infos [i].zeroed = TRUE;
  249. }
  250. empty_blocks = &block_infos [0];
  251. return nursery_start;
  252. }
  253. #else
  254. static void*
  255. major_alloc_heap (mword nursery_size, mword nursery_align, int the_nursery_bits)
  256. {
  257. char *start;
  258. if (nursery_align)
  259. start = sgen_alloc_os_memory_aligned (nursery_size, nursery_align, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, "nursery");
  260. else
  261. start = sgen_alloc_os_memory (nursery_size, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, "nursery");
  262. return start;
  263. }
  264. #endif
  265. static void
  266. update_heap_boundaries_for_block (MSBlockInfo *block)
  267. {
  268. sgen_update_heap_boundaries ((mword)block->block, (mword)block->block + MS_BLOCK_SIZE);
  269. }
  270. #ifdef FIXED_HEAP
  271. static MSBlockInfo*
  272. ms_get_empty_block (void)
  273. {
  274. MSBlockInfo *block;
  275. g_assert (empty_blocks);
  276. do {
  277. block = empty_blocks;
  278. } while (SGEN_CAS_PTR ((gpointer*)&empty_blocks, block->next_free, block) != block);
  279. block->used = TRUE;
  280. if (!block->zeroed)
  281. memset (block->block, 0, MS_BLOCK_SIZE);
  282. return block;
  283. }
  284. static void
  285. ms_free_block (MSBlockInfo *block)
  286. {
  287. block->next_free = empty_blocks;
  288. empty_blocks = block;
  289. block->used = FALSE;
  290. block->zeroed = FALSE;
  291. sgen_memgov_release_space (MS_BLOCK_SIZE, SPACE_MAJOR);
  292. }
  293. #else
  294. static void*
  295. ms_get_empty_block (void)
  296. {
  297. char *p;
  298. int i;
  299. void *block, *empty, *next;
  300. retry:
  301. if (!empty_blocks) {
  302. p = sgen_alloc_os_memory_aligned (MS_BLOCK_SIZE * MS_BLOCK_ALLOC_NUM, MS_BLOCK_SIZE, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, "major heap section");
  303. for (i = 0; i < MS_BLOCK_ALLOC_NUM; ++i) {
  304. block = p;
  305. /*
  306. * We do the free list update one after the
  307. * other so that other threads can use the new
  308. * blocks as quickly as possible.
  309. */
  310. do {
  311. empty = empty_blocks;
  312. *(void**)block = empty;
  313. } while (SGEN_CAS_PTR ((gpointer*)&empty_blocks, block, empty) != empty);
  314. p += MS_BLOCK_SIZE;
  315. }
  316. SGEN_ATOMIC_ADD (num_empty_blocks, MS_BLOCK_ALLOC_NUM);
  317. stat_major_blocks_alloced += MS_BLOCK_ALLOC_NUM;
  318. }
  319. do {
  320. empty = empty_blocks;
  321. if (!empty)
  322. goto retry;
  323. block = empty;
  324. next = *(void**)block;
  325. } while (SGEN_CAS_PTR (&empty_blocks, next, empty) != empty);
  326. SGEN_ATOMIC_ADD (num_empty_blocks, -1);
  327. *(void**)block = NULL;
  328. g_assert (!((mword)block & (MS_BLOCK_SIZE - 1)));
  329. return block;
  330. }
  331. static void
  332. ms_free_block (void *block)
  333. {
  334. void *empty;
  335. sgen_memgov_release_space (MS_BLOCK_SIZE, SPACE_MAJOR);
  336. memset (block, 0, MS_BLOCK_SIZE);
  337. do {
  338. empty = empty_blocks;
  339. *(void**)block = empty;
  340. } while (SGEN_CAS_PTR (&empty_blocks, block, empty) != empty);
  341. SGEN_ATOMIC_ADD (num_empty_blocks, 1);
  342. }
  343. #endif
  344. //#define MARKSWEEP_CONSISTENCY_CHECK
  345. #ifdef MARKSWEEP_CONSISTENCY_CHECK
  346. static void
  347. check_block_free_list (MSBlockInfo *block, int size, gboolean pinned)
  348. {
  349. MSBlockInfo *b;
  350. for (; block; block = block->next_free) {
  351. g_assert (block->obj_size == size);
  352. g_assert ((pinned && block->pinned) || (!pinned && !block->pinned));
  353. /* blocks in the free lists must have at least
  354. one free slot */
  355. if (block->swept)
  356. g_assert (block->free_list);
  357. #ifdef FIXED_HEAP
  358. /* the block must not be in the empty_blocks list */
  359. for (b = empty_blocks; b; b = b->next_free)
  360. g_assert (b != block);
  361. #endif
  362. /* the block must be in the all_blocks list */
  363. for (b = all_blocks; b; b = b->next) {
  364. if (b == block)
  365. break;
  366. }
  367. g_assert (b == block);
  368. }
  369. }
  370. static void
  371. check_empty_blocks (void)
  372. {
  373. #ifndef FIXED_HEAP
  374. void *p;
  375. int i = 0;
  376. for (p = empty_blocks; p; p = *(void**)p)
  377. ++i;
  378. g_assert (i == num_empty_blocks);
  379. #endif
  380. }
  381. static void
  382. consistency_check (void)
  383. {
  384. MSBlockInfo *block;
  385. int i;
  386. /* check all blocks */
  387. FOREACH_BLOCK (block) {
  388. int count = MS_BLOCK_FREE / block->obj_size;
  389. int num_free = 0;
  390. void **free;
  391. #ifndef FIXED_HEAP
  392. /* check block header */
  393. g_assert (((MSBlockHeader*)block->block)->info == block);
  394. #endif
  395. /* count number of free slots */
  396. for (i = 0; i < count; ++i) {
  397. void **obj = (void**) MS_BLOCK_OBJ (block, i);
  398. if (!MS_OBJ_ALLOCED (obj, block))
  399. ++num_free;
  400. }
  401. /* check free list */
  402. for (free = block->free_list; free; free = (void**)*free) {
  403. g_assert (MS_BLOCK_FOR_OBJ (free) == block);
  404. --num_free;
  405. }
  406. g_assert (num_free == 0);
  407. /* check all mark words are zero */
  408. if (block->swept) {
  409. for (i = 0; i < MS_NUM_MARK_WORDS; ++i)
  410. g_assert (block->mark_words [i] == 0);
  411. }
  412. } END_FOREACH_BLOCK;
  413. /* check free blocks */
  414. for (i = 0; i < num_block_obj_sizes; ++i) {
  415. int j;
  416. for (j = 0; j < MS_BLOCK_TYPE_MAX; ++j)
  417. check_block_free_list (free_block_lists [j][i], block_obj_sizes [i], j & MS_BLOCK_FLAG_PINNED);
  418. }
  419. check_empty_blocks ();
  420. }
  421. #endif
  422. static gboolean
  423. ms_alloc_block (int size_index, gboolean pinned, gboolean has_references)
  424. {
  425. int size = block_obj_sizes [size_index];
  426. int count = MS_BLOCK_FREE / size;
  427. MSBlockInfo *info;
  428. #ifdef SGEN_PARALLEL_MARK
  429. MSBlockInfo *next;
  430. #endif
  431. #ifndef FIXED_HEAP
  432. MSBlockHeader *header;
  433. #endif
  434. MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, has_references);
  435. char *obj_start;
  436. int i;
  437. if (!sgen_memgov_try_alloc_space (MS_BLOCK_SIZE, SPACE_MAJOR))
  438. return FALSE;
  439. #ifdef FIXED_HEAP
  440. info = ms_get_empty_block ();
  441. #else
  442. info = sgen_alloc_internal (INTERNAL_MEM_MS_BLOCK_INFO);
  443. #endif
  444. SGEN_ASSERT (9, count >= 2, "block with %d objects, it must hold at least 2", count);
  445. info->obj_size = size;
  446. info->obj_size_index = size_index;
  447. info->pinned = pinned;
  448. info->has_references = has_references;
  449. info->has_pinned = pinned;
  450. info->is_to_space = (sgen_get_current_collection_generation () == GENERATION_OLD); /*FIXME WHY??? */
  451. info->swept = 1;
  452. #ifndef FIXED_HEAP
  453. info->block = ms_get_empty_block ();
  454. header = (MSBlockHeader*) info->block;
  455. header->info = info;
  456. #endif
  457. #ifdef SGEN_HAVE_CONCURRENT_MARK
  458. info->cardtable_mod_union = NULL;
  459. #endif
  460. update_heap_boundaries_for_block (info);
  461. /* build free list */
  462. obj_start = info->block + MS_BLOCK_SKIP;
  463. info->free_list = (void**)obj_start;
  464. /* we're skipping the last one - it must be nulled */
  465. for (i = 0; i < count - 1; ++i) {
  466. char *next_obj_start = obj_start + size;
  467. *(void**)obj_start = next_obj_start;
  468. obj_start = next_obj_start;
  469. }
  470. /* the last one */
  471. *(void**)obj_start = NULL;
  472. #ifdef SGEN_PARALLEL_MARK
  473. do {
  474. next = info->next_free = free_blocks [size_index];
  475. } while (SGEN_CAS_PTR ((void**)&free_blocks [size_index], info, next) != next);
  476. do {
  477. next = info->next = all_blocks;
  478. } while (SGEN_CAS_PTR ((void**)&all_blocks, info, next) != next);
  479. #else
  480. info->next_free = free_blocks [size_index];
  481. free_blocks [size_index] = info;
  482. info->next = all_blocks;
  483. all_blocks = info;
  484. #endif
  485. ++num_major_sections;
  486. return TRUE;
  487. }
  488. static gboolean
  489. obj_is_from_pinned_alloc (char *ptr)
  490. {
  491. MSBlockInfo *block;
  492. FOREACH_BLOCK (block) {
  493. if (ptr >= block->block && ptr <= block->block + MS_BLOCK_SIZE)
  494. return block->pinned;
  495. } END_FOREACH_BLOCK;
  496. return FALSE;
  497. }
  498. static void*
  499. unlink_slot_from_free_list_uncontested (MSBlockInfo **free_blocks, int size_index)
  500. {
  501. MSBlockInfo *block;
  502. void *obj;
  503. block = free_blocks [size_index];
  504. SGEN_ASSERT (9, block, "no free block to unlink from free_blocks %p size_index %d", free_blocks, size_index);
  505. if (G_UNLIKELY (!block->swept)) {
  506. stat_major_blocks_lazy_swept ++;
  507. sweep_block (block, FALSE);
  508. }
  509. obj = block->free_list;
  510. SGEN_ASSERT (9, obj, "block %p in free list had no available object to alloc from", block);
  511. block->free_list = *(void**)obj;
  512. if (!block->free_list) {
  513. free_blocks [size_index] = block->next_free;
  514. block->next_free = NULL;
  515. }
  516. return obj;
  517. }
  518. #ifdef SGEN_PARALLEL_MARK
  519. static gboolean
  520. try_remove_block_from_free_list (MSBlockInfo *block, MSBlockInfo **free_blocks, int size_index)
  521. {
  522. /*
  523. * No more free slots in the block, so try to free the block.
  524. * Don't try again if we don't succeed - another thread will
  525. * already have done it.
  526. */
  527. MSBlockInfo *next_block = block->next_free;
  528. if (SGEN_CAS_PTR ((void**)&free_blocks [size_index], next_block, block) == block) {
  529. /*
  530. void *old = SGEN_CAS_PTR ((void**)&block->next_free, NULL, next_block);
  531. g_assert (old == next_block);
  532. */
  533. block->next_free = NULL;
  534. return TRUE;
  535. }
  536. return FALSE;
  537. }
  538. static void*
  539. alloc_obj_par (MonoVTable *vtable, int size, gboolean pinned, gboolean has_references)
  540. {
  541. int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
  542. MSBlockInfo **free_blocks_local = FREE_BLOCKS_LOCAL (pinned, has_references);
  543. MSBlockInfo *block;
  544. void *obj;
  545. #ifdef SGEN_HAVE_CONCURRENT_MARK
  546. if (concurrent_mark)
  547. g_assert_not_reached ();
  548. #endif
  549. SGEN_ASSERT (9, current_collection_generation == GENERATION_OLD, "old gen parallel allocator called from a %d collection", current_collection_generation);
  550. if (free_blocks_local [size_index]) {
  551. get_slot:
  552. obj = unlink_slot_from_free_list_uncontested (free_blocks_local, size_index);
  553. } else {
  554. MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, has_references);
  555. get_block:
  556. block = free_blocks [size_index];
  557. if (block) {
  558. if (!try_remove_block_from_free_list (block, free_blocks, size_index))
  559. goto get_block;
  560. g_assert (block->next_free == NULL);
  561. g_assert (block->free_list);
  562. block->next_free = free_blocks_local [size_index];
  563. free_blocks_local [size_index] = block;
  564. goto get_slot;
  565. } else {
  566. gboolean success;
  567. LOCK_MS_BLOCK_LIST;
  568. success = ms_alloc_block (size_index, pinned, has_references);
  569. UNLOCK_MS_BLOCK_LIST;
  570. if (G_UNLIKELY (!success))
  571. return NULL;
  572. goto get_block;
  573. }
  574. }
  575. *(MonoVTable**)obj = vtable;
  576. return obj;
  577. }
  578. static void*
  579. major_par_alloc_object (MonoVTable *vtable, int size, gboolean has_references)
  580. {
  581. return alloc_obj_par (vtable, size, FALSE, has_references);
  582. }
  583. #endif
  584. static void*
  585. alloc_obj (MonoVTable *vtable, int size, gboolean pinned, gboolean has_references)
  586. {
  587. int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
  588. MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, has_references);
  589. void *obj;
  590. #ifdef SGEN_PARALLEL_MARK
  591. SGEN_ASSERT (9, current_collection_generation == GENERATION_OLD, "old gen parallel allocator called from a %d collection", current_collection_generation);
  592. #endif
  593. if (!free_blocks [size_index]) {
  594. if (G_UNLIKELY (!ms_alloc_block (size_index, pinned, has_references)))
  595. return NULL;
  596. }
  597. obj = unlink_slot_from_free_list_uncontested (free_blocks, size_index);
  598. *(MonoVTable**)obj = vtable;
  599. return obj;
  600. }
  601. static void*
  602. major_alloc_object (MonoVTable *vtable, int size, gboolean has_references)
  603. {
  604. return alloc_obj (vtable, size, FALSE, has_references);
  605. }
  606. /*
  607. * We're not freeing the block if it's empty. We leave that work for
  608. * the next major collection.
  609. *
  610. * This is just called from the domain clearing code, which runs in a
  611. * single thread and has the GC lock, so we don't need an extra lock.
  612. */
  613. static void
  614. free_object (char *obj, size_t size, gboolean pinned)
  615. {
  616. MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
  617. int word, bit;
  618. if (!block->swept)
  619. sweep_block (block, FALSE);
  620. SGEN_ASSERT (9, (pinned && block->pinned) || (!pinned && !block->pinned), "free-object pinning mixup object %p pinned %d block %p pinned %d", obj, pinned, block, block->pinned);
  621. SGEN_ASSERT (9, MS_OBJ_ALLOCED (obj, block), "object %p is already free", obj);
  622. MS_CALC_MARK_BIT (word, bit, obj);
  623. SGEN_ASSERT (9, !MS_MARK_BIT (block, word, bit), "object %p has mark bit set");
  624. if (!block->free_list) {
  625. MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, block->has_references);
  626. int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
  627. SGEN_ASSERT (9, !block->next_free, "block %p doesn't have a free-list of object but belongs to a free-list of blocks");
  628. block->next_free = free_blocks [size_index];
  629. free_blocks [size_index] = block;
  630. }
  631. memset (obj, 0, size);
  632. *(void**)obj = block->free_list;
  633. block->free_list = (void**)obj;
  634. }
  635. static void
  636. major_free_non_pinned_object (char *obj, size_t size)
  637. {
  638. free_object (obj, size, FALSE);
  639. }
  640. /* size is a multiple of SGEN_ALLOC_ALIGN */
  641. static void*
  642. major_alloc_small_pinned_obj (MonoVTable *vtable, size_t size, gboolean has_references)
  643. {
  644. void *res;
  645. res = alloc_obj (vtable, size, TRUE, has_references);
  646. /*If we failed to alloc memory, we better try releasing memory
  647. *as pinned alloc is requested by the runtime.
  648. */
  649. if (!res) {
  650. sgen_perform_collection (0, GENERATION_OLD, "pinned alloc failure", TRUE);
  651. res = alloc_obj (vtable, size, TRUE, has_references);
  652. }
  653. return res;
  654. }
  655. static void
  656. free_pinned_object (char *obj, size_t size)
  657. {
  658. free_object (obj, size, TRUE);
  659. }
  660. /*
  661. * size is already rounded up and we hold the GC lock.
  662. */
  663. static void*
  664. major_alloc_degraded (MonoVTable *vtable, size_t size)
  665. {
  666. void *obj;
  667. int old_num_sections;
  668. old_num_sections = num_major_sections;
  669. obj = alloc_obj (vtable, size, FALSE, SGEN_VTABLE_HAS_REFERENCES (vtable));
  670. if (G_LIKELY (obj)) {
  671. HEAVY_STAT (++stat_objects_alloced_degraded);
  672. HEAVY_STAT (stat_bytes_alloced_degraded += size);
  673. g_assert (num_major_sections >= old_num_sections);
  674. sgen_register_major_sections_alloced (num_major_sections - old_num_sections);
  675. }
  676. return obj;
  677. }
  678. #define MAJOR_OBJ_IS_IN_TO_SPACE(obj) FALSE
  679. /*
  680. * obj is some object. If it's not in the major heap (i.e. if it's in
  681. * the nursery or LOS), return FALSE. Otherwise return whether it's
  682. * been marked or copied.
  683. */
  684. static gboolean
  685. major_is_object_live (char *obj)
  686. {
  687. MSBlockInfo *block;
  688. int word, bit;
  689. #ifndef FIXED_HEAP
  690. mword objsize;
  691. #endif
  692. if (sgen_ptr_in_nursery (obj))
  693. return FALSE;
  694. #ifdef FIXED_HEAP
  695. /* LOS */
  696. if (!MS_PTR_IN_SMALL_MAJOR_HEAP (obj))
  697. return FALSE;
  698. #else
  699. objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)obj));
  700. /* LOS */
  701. if (objsize > SGEN_MAX_SMALL_OBJ_SIZE)
  702. return FALSE;
  703. #endif
  704. /* now we know it's in a major block */
  705. block = MS_BLOCK_FOR_OBJ (obj);
  706. SGEN_ASSERT (9, !block->pinned, "block %p is pinned, BTW why is this bad?");
  707. MS_CALC_MARK_BIT (word, bit, obj);
  708. return MS_MARK_BIT (block, word, bit) ? TRUE : FALSE;
  709. }
  710. static gboolean
  711. major_ptr_is_in_non_pinned_space (char *ptr, char **start)
  712. {
  713. MSBlockInfo *block;
  714. FOREACH_BLOCK (block) {
  715. if (ptr >= block->block && ptr <= block->block + MS_BLOCK_SIZE) {
  716. int count = MS_BLOCK_FREE / block->obj_size;
  717. int i;
  718. *start = NULL;
  719. for (i = 0; i <= count; ++i) {
  720. if (ptr >= MS_BLOCK_OBJ (block, i) && ptr < MS_BLOCK_OBJ (block, i + 1)) {
  721. *start = MS_BLOCK_OBJ (block, i);
  722. break;
  723. }
  724. }
  725. return !block->pinned;
  726. }
  727. } END_FOREACH_BLOCK;
  728. return FALSE;
  729. }
  730. static void
  731. major_iterate_objects (gboolean non_pinned, gboolean pinned, IterateObjectCallbackFunc callback, void *data)
  732. {
  733. MSBlockInfo *block;
  734. FOREACH_BLOCK (block) {
  735. int count = MS_BLOCK_FREE / block->obj_size;
  736. int i;
  737. if (block->pinned && !pinned)
  738. continue;
  739. if (!block->pinned && !non_pinned)
  740. continue;
  741. if (lazy_sweep)
  742. sweep_block (block, FALSE);
  743. for (i = 0; i < count; ++i) {
  744. void **obj = (void**) MS_BLOCK_OBJ (block, i);
  745. if (MS_OBJ_ALLOCED (obj, block))
  746. callback ((char*)obj, block->obj_size, data);
  747. }
  748. } END_FOREACH_BLOCK;
  749. }
  750. static gboolean
  751. major_is_valid_object (char *object)
  752. {
  753. MSBlockInfo *block;
  754. FOREACH_BLOCK (block) {
  755. int idx;
  756. char *obj;
  757. if ((block->block > object) || ((block->block + MS_BLOCK_SIZE) <= object))
  758. continue;
  759. idx = MS_BLOCK_OBJ_INDEX (object, block);
  760. obj = (char*)MS_BLOCK_OBJ (block, idx);
  761. if (obj != object)
  762. return FALSE;
  763. return MS_OBJ_ALLOCED (obj, block);
  764. } END_FOREACH_BLOCK;
  765. return FALSE;
  766. }
  767. static gboolean
  768. major_describe_pointer (char *ptr)
  769. {
  770. MSBlockInfo *block;
  771. FOREACH_BLOCK (block) {
  772. int idx;
  773. char *obj;
  774. gboolean live;
  775. MonoVTable *vtable;
  776. int w, b;
  777. gboolean marked;
  778. if ((block->block > ptr) || ((block->block + MS_BLOCK_SIZE) <= ptr))
  779. continue;
  780. SGEN_LOG (0, "major-ptr (block %p sz %d pin %d ref %d)\n",
  781. block->block, block->obj_size, block->pinned, block->has_references);
  782. idx = MS_BLOCK_OBJ_INDEX (ptr, block);
  783. obj = (char*)MS_BLOCK_OBJ (block, idx);
  784. live = MS_OBJ_ALLOCED (obj, block);
  785. vtable = live ? (MonoVTable*)SGEN_LOAD_VTABLE (obj) : NULL;
  786. MS_CALC_MARK_BIT (w, b, obj);
  787. marked = MS_MARK_BIT (block, w, b);
  788. if (obj == ptr) {
  789. SGEN_LOG (0, "\t(");
  790. if (live)
  791. SGEN_LOG (0, "object");
  792. else
  793. SGEN_LOG (0, "dead-object");
  794. } else {
  795. if (live)
  796. SGEN_LOG (0, "interior-ptr offset %td", ptr - obj);
  797. else
  798. SGEN_LOG (0, "dead-interior-ptr offset %td", ptr - obj);
  799. }
  800. SGEN_LOG (0, " marked %d)\n", marked ? 1 : 0);
  801. return TRUE;
  802. } END_FOREACH_BLOCK;
  803. return FALSE;
  804. }
  805. static void
  806. major_check_scan_starts (void)
  807. {
  808. }
  809. static void
  810. major_dump_heap (FILE *heap_dump_file)
  811. {
  812. MSBlockInfo *block;
  813. int *slots_available = alloca (sizeof (int) * num_block_obj_sizes);
  814. int *slots_used = alloca (sizeof (int) * num_block_obj_sizes);
  815. int i;
  816. for (i = 0; i < num_block_obj_sizes; ++i)
  817. slots_available [i] = slots_used [i] = 0;
  818. FOREACH_BLOCK (block) {
  819. int index = ms_find_block_obj_size_index (block->obj_size);
  820. int count = MS_BLOCK_FREE / block->obj_size;
  821. slots_available [index] += count;
  822. for (i = 0; i < count; ++i) {
  823. if (MS_OBJ_ALLOCED (MS_BLOCK_OBJ (block, i), block))
  824. ++slots_used [index];
  825. }
  826. } END_FOREACH_BLOCK;
  827. fprintf (heap_dump_file, "<occupancies>\n");
  828. for (i = 0; i < num_block_obj_sizes; ++i) {
  829. fprintf (heap_dump_file, "<occupancy size=\"%d\" available=\"%d\" used=\"%d\" />\n",
  830. block_obj_sizes [i], slots_available [i], slots_used [i]);
  831. }
  832. fprintf (heap_dump_file, "</occupancies>\n");
  833. FOREACH_BLOCK (block) {
  834. int count = MS_BLOCK_FREE / block->obj_size;
  835. int i;
  836. int start = -1;
  837. fprintf (heap_dump_file, "<section type=\"%s\" size=\"%zu\">\n", "old", (size_t)MS_BLOCK_FREE);
  838. for (i = 0; i <= count; ++i) {
  839. if ((i < count) && MS_OBJ_ALLOCED (MS_BLOCK_OBJ (block, i), block)) {
  840. if (start < 0)
  841. start = i;
  842. } else {
  843. if (start >= 0) {
  844. sgen_dump_occupied (MS_BLOCK_OBJ (block, start), MS_BLOCK_OBJ (block, i), block->block);
  845. start = -1;
  846. }
  847. }
  848. }
  849. fprintf (heap_dump_file, "</section>\n");
  850. } END_FOREACH_BLOCK;
  851. }
  852. #define LOAD_VTABLE SGEN_LOAD_VTABLE
  853. #define MS_MARK_OBJECT_AND_ENQUEUE_CHECKED(obj,block,queue) do { \
  854. int __word, __bit; \
  855. MS_CALC_MARK_BIT (__word, __bit, (obj)); \
  856. if (!MS_MARK_BIT ((block), __word, __bit) && MS_OBJ_ALLOCED ((obj), (block))) { \
  857. MS_SET_MARK_BIT ((block), __word, __bit); \
  858. if ((block)->has_references) \
  859. GRAY_OBJECT_ENQUEUE ((queue), (obj)); \
  860. binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((MonoObject*)(obj))); \
  861. INC_NUM_MAJOR_OBJECTS_MARKED (); \
  862. } \
  863. } while (0)
  864. #define MS_MARK_OBJECT_AND_ENQUEUE(obj,block,queue) do { \
  865. int __word, __bit; \
  866. MS_CALC_MARK_BIT (__word, __bit, (obj)); \
  867. SGEN_ASSERT (9, MS_OBJ_ALLOCED ((obj), (block)), "object %p not allocated", obj); \
  868. if (!MS_MARK_BIT ((block), __word, __bit)) { \
  869. MS_SET_MARK_BIT ((block), __word, __bit); \
  870. if ((block)->has_references) \
  871. GRAY_OBJECT_ENQUEUE ((queue), (obj)); \
  872. binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((MonoObject*)(obj))); \
  873. INC_NUM_MAJOR_OBJECTS_MARKED (); \
  874. } \
  875. } while (0)
  876. #define MS_PAR_MARK_OBJECT_AND_ENQUEUE(obj,block,queue) do { \
  877. int __word, __bit; \
  878. gboolean __was_marked; \
  879. SGEN_ASSERT (9, MS_OBJ_ALLOCED ((obj), (block)), "object %p not allocated", obj); \
  880. MS_CALC_MARK_BIT (__word, __bit, (obj)); \
  881. MS_PAR_SET_MARK_BIT (__was_marked, (block), __word, __bit); \
  882. if (!__was_marked) { \
  883. if ((block)->has_references) \
  884. GRAY_OBJECT_ENQUEUE ((queue), (obj)); \
  885. binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((MonoObject*)(obj))); \
  886. INC_NUM_MAJOR_OBJECTS_MARKED (); \
  887. } \
  888. } while (0)
  889. static void
  890. pin_major_object (char *obj, SgenGrayQueue *queue)
  891. {
  892. MSBlockInfo *block;
  893. #ifdef SGEN_HAVE_CONCURRENT_MARK
  894. if (concurrent_mark)
  895. g_assert_not_reached ();
  896. #endif
  897. block = MS_BLOCK_FOR_OBJ (obj);
  898. block->has_pinned = TRUE;
  899. MS_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
  900. }
  901. #include "sgen-major-copy-object.h"
  902. #ifdef SGEN_PARALLEL_MARK
  903. static void
  904. major_copy_or_mark_object (void **ptr, void *obj, SgenGrayQueue *queue)
  905. {
  906. mword objsize;
  907. MSBlockInfo *block;
  908. MonoVTable *vt;
  909. HEAVY_STAT (++stat_copy_object_called_major);
  910. SGEN_ASSERT (9, obj, "null object from pointer %p", ptr);
  911. SGEN_ASSERT (9, current_collection_generation == GENERATION_OLD, "old gen parallel allocator called from a %d collection", current_collection_generation);
  912. if (sgen_ptr_in_nursery (obj)) {
  913. int word, bit;
  914. gboolean has_references;
  915. void *destination;
  916. mword vtable_word = *(mword*)obj;
  917. vt = (MonoVTable*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
  918. if (vtable_word & SGEN_FORWARDED_BIT) {
  919. *ptr = (void*)vt;
  920. return;
  921. }
  922. if (vtable_word & SGEN_PINNED_BIT)
  923. return;
  924. /* An object in the nursery To Space has already been copied and grayed. Nothing to do. */
  925. if (sgen_nursery_is_to_space (obj))
  926. return;
  927. HEAVY_STAT (++stat_objects_copied_major);
  928. do_copy_object:
  929. objsize = SGEN_ALIGN_UP (sgen_par_object_get_size (vt, (MonoObject*)obj));
  930. has_references = SGEN_VTABLE_HAS_REFERENCES (vt);
  931. destination = sgen_minor_collector.par_alloc_for_promotion (vt, obj, objsize, has_references);
  932. if (G_UNLIKELY (!destination)) {
  933. if (!sgen_ptr_in_nursery (obj)) {
  934. int size_index;
  935. block = MS_BLOCK_FOR_OBJ (obj);
  936. size_index = block->obj_size_index;
  937. evacuate_block_obj_sizes [size_index] = FALSE;
  938. }
  939. sgen_parallel_pin_or_update (ptr, obj, vt, queue);
  940. sgen_set_pinned_from_failed_allocation (objsize);
  941. return;
  942. }
  943. if (SGEN_CAS_PTR (obj, (void*)((mword)destination | SGEN_FORWARDED_BIT), vt) == vt) {
  944. gboolean was_marked;
  945. par_copy_object_no_checks (destination, vt, obj, objsize, has_references ? queue : NULL);
  946. obj = destination;
  947. *ptr = obj;
  948. /*
  949. * FIXME: If we make major_alloc_object() give
  950. * us the block info, too, we won't have to
  951. * re-fetch it here.
  952. *
  953. * FIXME (2): We should rework this to avoid all those nursery checks.
  954. */
  955. /*
  956. * For the split nursery allocator the object
  957. * might still be in the nursery despite
  958. * having being promoted, in which case we
  959. * can't mark it.
  960. */
  961. if (!sgen_ptr_in_nursery (obj)) {
  962. block = MS_BLOCK_FOR_OBJ (obj);
  963. MS_CALC_MARK_BIT (word, bit, obj);
  964. SGEN_ASSERT (9, !MS_MARK_BIT (block, word, bit), "object %p already marked", obj);
  965. MS_PAR_SET_MARK_BIT (was_marked, block, word, bit);
  966. }
  967. } else {
  968. /*
  969. * FIXME: We have allocated destination, but
  970. * we cannot use it. Give it back to the
  971. * allocator.
  972. */
  973. *(void**)destination = NULL;
  974. vtable_word = *(mword*)obj;
  975. g_assert (vtable_word & SGEN_FORWARDED_BIT);
  976. obj = (void*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
  977. *ptr = obj;
  978. HEAVY_STAT (++stat_slots_allocated_in_vain);
  979. }
  980. } else {
  981. #ifdef FIXED_HEAP
  982. if (MS_PTR_IN_SMALL_MAJOR_HEAP (obj))
  983. #else
  984. mword vtable_word = *(mword*)obj;
  985. vt = (MonoVTable*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
  986. /* see comment in the non-parallel version below */
  987. if (vtable_word & SGEN_FORWARDED_BIT) {
  988. *ptr = (void*)vt;
  989. return;
  990. }
  991. objsize = SGEN_ALIGN_UP (sgen_par_object_get_size (vt, (MonoObject*)obj));
  992. if (objsize <= SGEN_MAX_SMALL_OBJ_SIZE)
  993. #endif
  994. {
  995. int size_index;
  996. block = MS_BLOCK_FOR_OBJ (obj);
  997. size_index = block->obj_size_index;
  998. if (!block->has_pinned && evacuate_block_obj_sizes [size_index]) {
  999. if (block->is_to_space)
  1000. return;
  1001. #ifdef FIXED_HEAP
  1002. {
  1003. mword vtable_word = *(mword*)obj;
  1004. vt = (MonoVTable*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
  1005. if (vtable_word & SGEN_FORWARDED_BIT) {
  1006. *ptr = (void*)vt;
  1007. return;
  1008. }
  1009. }
  1010. #endif
  1011. HEAVY_STAT (++stat_major_objects_evacuated);
  1012. goto do_copy_object;
  1013. }
  1014. MS_PAR_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
  1015. } else {
  1016. LOSObject *bigobj = sgen_los_header_for_object (obj);
  1017. mword size_word = bigobj->size;
  1018. #ifdef FIXED_HEAP
  1019. mword vtable_word = *(mword*)obj;
  1020. vt = (MonoVTable*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
  1021. #endif
  1022. if (size_word & 1)
  1023. return;
  1024. binary_protocol_pin (obj, vt, sgen_safe_object_get_size ((MonoObject*)obj));
  1025. if (SGEN_CAS_PTR ((void*)&bigobj->size, (void*)(size_word | 1), (void*)size_word) == (void*)size_word) {
  1026. if (SGEN_VTABLE_HAS_REFERENCES (vt))
  1027. GRAY_OBJECT_ENQUEUE (queue, obj);
  1028. } else {
  1029. g_assert (sgen_los_object_is_pinned (obj));
  1030. }
  1031. }
  1032. }
  1033. }
  1034. #else
  1035. #ifdef SGEN_HAVE_CONCURRENT_MARK
  1036. static void
  1037. major_copy_or_mark_object_concurrent (void **ptr, void *obj, SgenGrayQueue *queue)
  1038. {
  1039. g_assert (!SGEN_OBJECT_IS_FORWARDED (obj));
  1040. if (!sgen_ptr_in_nursery (obj)) {
  1041. #ifdef FIXED_HEAP
  1042. if (MS_PTR_IN_SMALL_MAJOR_HEAP (obj))
  1043. #else
  1044. mword objsize;
  1045. objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)obj));
  1046. if (objsize <= SGEN_MAX_SMALL_OBJ_SIZE)
  1047. #endif
  1048. {
  1049. MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
  1050. MS_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
  1051. } else {
  1052. if (sgen_los_object_is_pinned (obj))
  1053. return;
  1054. #ifdef ENABLE_DTRACE
  1055. if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
  1056. MonoVTable *vt = (MonoVTable*)SGEN_LOAD_VTABLE (obj);
  1057. MONO_GC_OBJ_PINNED ((mword)obj, sgen_safe_object_get_size (obj), vt->klass->name_space, vt->klass->name, GENERATION_OLD);
  1058. }
  1059. #endif
  1060. sgen_los_pin_object (obj);
  1061. /* FIXME: only enqueue if object has references */
  1062. GRAY_OBJECT_ENQUEUE (queue, obj);
  1063. INC_NUM_MAJOR_OBJECTS_MARKED ();
  1064. }
  1065. }
  1066. }
  1067. #endif
  1068. static void
  1069. major_copy_or_mark_object (void **ptr, void *obj, SgenGrayQueue *queue)
  1070. {
  1071. MSBlockInfo *block;
  1072. HEAVY_STAT (++stat_copy_object_called_major);
  1073. SGEN_ASSERT (9, obj, "null object from pointer %p", ptr);
  1074. SGEN_ASSERT (9, current_collection_generation == GENERATION_OLD, "old gen parallel allocator called from a %d collection", current_collection_generation);
  1075. if (sgen_ptr_in_nursery (obj)) {
  1076. int word, bit;
  1077. char *forwarded, *old_obj;
  1078. if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
  1079. *ptr = forwarded;
  1080. return;
  1081. }
  1082. if (SGEN_OBJECT_IS_PINNED (obj))
  1083. return;
  1084. /* An object in the nursery To Space has already been copied and grayed. Nothing to do. */
  1085. if (sgen_nursery_is_to_space (obj))
  1086. return;
  1087. HEAVY_STAT (++stat_objects_copied_major);
  1088. do_copy_object:
  1089. old_obj = obj;
  1090. obj = copy_object_no_checks (obj, queue);
  1091. if (G_UNLIKELY (old_obj == obj)) {
  1092. /*If we fail to evacuate an object we just stop doing it for a given block size as all other will surely fail too.*/
  1093. if (!sgen_ptr_in_nursery (obj)) {
  1094. int size_index;
  1095. block = MS_BLOCK_FOR_OBJ (obj);
  1096. size_index = block->obj_size_index;
  1097. evacuate_block_obj_sizes [size_index] = FALSE;
  1098. MS_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
  1099. }
  1100. return;
  1101. }
  1102. *ptr = obj;
  1103. /*
  1104. * FIXME: See comment for copy_object_no_checks(). If
  1105. * we have that, we can let the allocation function
  1106. * give us the block info, too, and we won't have to
  1107. * re-fetch it.
  1108. *
  1109. * FIXME (2): We should rework this to avoid all those nursery checks.
  1110. */
  1111. /*
  1112. * For the split nursery allocator the object might
  1113. * still be in the nursery despite having being
  1114. * promoted, in which case we can't mark it.
  1115. */
  1116. if (!sgen_ptr_in_nursery (obj)) {
  1117. block = MS_BLOCK_FOR_OBJ (obj);
  1118. MS_CALC_MARK_BIT (word, bit, obj);
  1119. SGEN_ASSERT (9, !MS_MARK_BIT (block, word, bit), "object %p already marked", obj);
  1120. MS_SET_MARK_BIT (block, word, bit);
  1121. binary_protocol_mark (obj, (gpointer)LOAD_VTABLE (obj), sgen_safe_object_get_size ((MonoObject*)obj));
  1122. }
  1123. } else {
  1124. char *forwarded;
  1125. #ifdef FIXED_HEAP
  1126. if (MS_PTR_IN_SMALL_MAJOR_HEAP (obj))
  1127. #else
  1128. mword objsize;
  1129. /*
  1130. * If we have don't have a fixed heap we cannot know
  1131. * whether an object is in the LOS or in the small
  1132. * object major heap without checking its size. To do
  1133. * that, however, we need to know that we actually
  1134. * have a valid object, not a forwarding pointer, so
  1135. * we have to do this check first.
  1136. */
  1137. if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
  1138. *ptr = forwarded;
  1139. return;
  1140. }
  1141. objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)obj));
  1142. if (objsize <= SGEN_MAX_SMALL_OBJ_SIZE)
  1143. #endif
  1144. {
  1145. int size_index;
  1146. gboolean evacuate;
  1147. block = MS_BLOCK_FOR_OBJ (obj);
  1148. size_index = block->obj_size_index;
  1149. evacuate = evacuate_block_obj_sizes [size_index];
  1150. #ifdef FIXED_HEAP
  1151. /*
  1152. * We could also check for !block->has_pinned
  1153. * here, but it would only make an uncommon case
  1154. * faster, namely objects that are in blocks
  1155. * whose slot sizes are evacuated but which have
  1156. * pinned objects.
  1157. */
  1158. if (evacuate && (forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
  1159. *ptr = forwarded;
  1160. return;
  1161. }
  1162. #endif
  1163. if (evacuate && !block->has_pinned) {
  1164. g_assert (!SGEN_OBJECT_IS_PINNED (obj));
  1165. if (block->is_to_space)
  1166. return;
  1167. HEAVY_STAT (++stat_major_objects_evacuated);
  1168. goto do_copy_object;
  1169. } else {
  1170. MS_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
  1171. }
  1172. } else {
  1173. if (sgen_los_object_is_pinned (obj))
  1174. return;
  1175. binary_protocol_pin (obj, (gpointer)SGEN_LOAD_VTABLE (obj), sgen_safe_object_get_size ((MonoObject*)obj));
  1176. #ifdef ENABLE_DTRACE
  1177. if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
  1178. MonoVTable *vt = (MonoVTable*)SGEN_LOAD_VTABLE (obj);
  1179. MONO_GC_OBJ_PINNED ((mword)obj, sgen_safe_object_get_size (obj), vt->klass->name_space, vt->klass->name, GENERATION_OLD);
  1180. }
  1181. #endif
  1182. sgen_los_pin_object (obj);
  1183. /* FIXME: only enqueue if object has references */
  1184. GRAY_OBJECT_ENQUEUE (queue, obj);
  1185. }
  1186. }
  1187. }
  1188. #endif
  1189. static void
  1190. major_copy_or_mark_object_canonical (void **ptr, SgenGrayQueue *queue)
  1191. {
  1192. major_copy_or_mark_object (ptr, *ptr, queue);
  1193. }
  1194. #ifdef SGEN_HAVE_CONCURRENT_MARK
  1195. static void
  1196. major_copy_or_mark_object_concurrent_canonical (void **ptr, SgenGrayQueue *queue)
  1197. {
  1198. major_copy_or_mark_object_concurrent (ptr, *ptr, queue);
  1199. }
  1200. static long long
  1201. major_get_and_reset_num_major_objects_marked (void)
  1202. {
  1203. #ifdef SGEN_COUNT_NUMBER_OF_MAJOR_OBJECTS_MARKED
  1204. long long num = num_major_objects_marked;
  1205. num_major_objects_marked = 0;
  1206. return num;
  1207. #else
  1208. return 0;
  1209. #endif
  1210. }
  1211. #endif
  1212. #include "sgen-major-scan-object.h"
  1213. #ifdef SGEN_HAVE_CONCURRENT_MARK
  1214. #define SCAN_FOR_CONCURRENT_MARK
  1215. #include "sgen-major-scan-object.h"
  1216. #undef SCAN_FOR_CONCURRENT_MARK
  1217. #endif
  1218. static void
  1219. mark_pinned_objects_in_block (MSBlockInfo *block, SgenGrayQueue *queue)
  1220. {
  1221. int i;
  1222. int last_index = -1;
  1223. if (!block->pin_queue_num_entries)
  1224. return;
  1225. block->has_pinned = TRUE;
  1226. for (i = 0; i < block->pin_queue_num_entries; ++i) {
  1227. int index = MS_BLOCK_OBJ_INDEX (block->pin_queue_start [i], block);
  1228. SGEN_ASSERT (9, index >= 0 && index < MS_BLOCK_FREE / block->obj_size, "invalid object %p index %d max-index %d", block->pin_queue_start [i], index, MS_BLOCK_FREE / block->obj_size);
  1229. if (index == last_index)
  1230. continue;
  1231. MS_MARK_OBJECT_AND_ENQUEUE_CHECKED (MS_BLOCK_OBJ (block, index), block, queue);
  1232. last_index = index;
  1233. }
  1234. }
  1235. static inline void
  1236. sweep_block_for_size (MSBlockInfo *block, int count, int obj_size)
  1237. {
  1238. int obj_index;
  1239. for (obj_index = 0; obj_index < count; ++obj_index) {
  1240. int word, bit;
  1241. void *obj = MS_BLOCK_OBJ_FOR_SIZE (block, obj_index, obj_size);
  1242. MS_CALC_MARK_BIT (word, bit, obj);
  1243. if (MS_MARK_BIT (block, word, bit)) {
  1244. SGEN_ASSERT (9, MS_OBJ_ALLOCED (obj, block), "object %p not allocated", obj);
  1245. } else {
  1246. /* an unmarked object */
  1247. if (MS_OBJ_ALLOCED (obj, block)) {
  1248. /*
  1249. * FIXME: Merge consecutive
  1250. * slots for lower reporting
  1251. * overhead. Maybe memset
  1252. * will also benefit?
  1253. */
  1254. binary_protocol_empty (obj, obj_size);
  1255. MONO_GC_MAJOR_SWEPT ((mword)obj, obj_size);
  1256. memset (obj, 0, obj_size);
  1257. }
  1258. *(void**)obj = block->free_list;
  1259. block->free_list = obj;
  1260. }
  1261. }
  1262. }
  1263. /*
  1264. * sweep_block:
  1265. *
  1266. * Traverse BLOCK, freeing and zeroing unused objects.
  1267. */
  1268. static void
  1269. sweep_block (MSBlockInfo *block, gboolean during_major_collection)
  1270. {
  1271. int count;
  1272. if (!during_major_collection)
  1273. g_assert (!sgen_concurrent_collection_in_progress ());
  1274. if (block->swept)
  1275. return;
  1276. count = MS_BLOCK_FREE / block->obj_size;
  1277. block->free_list = NULL;
  1278. /* Use inline instances specialized to constant sizes, this allows the compiler to replace the memset calls with inline code */
  1279. // FIXME: Add more sizes
  1280. switch (block->obj_size) {
  1281. case 16:
  1282. sweep_block_for_size (block, count, 16);
  1283. break;
  1284. default:
  1285. sweep_block_for_size (block, count, block->obj_size);
  1286. break;
  1287. }
  1288. /* reset mark bits */
  1289. memset (block->mark_words, 0, sizeof (mword) * MS_NUM_MARK_WORDS);
  1290. /*
  1291. * FIXME: reverse free list so that it's in address
  1292. * order
  1293. */
  1294. block->swept = 1;
  1295. }
  1296. static inline int
  1297. bitcount (mword d)
  1298. {
  1299. #if SIZEOF_VOID_P == 8
  1300. /* http://www.jjj.de/bitwizardry/bitwizardrypage.html */
  1301. d -= (d>>1) & 0x5555555555555555;
  1302. d = ((d>>2) & 0x3333333333333333) + (d & 0x3333333333333333);
  1303. d = ((d>>4) + d) & 0x0f0f0f0f0f0f0f0f;
  1304. d *= 0x0101010101010101;
  1305. return d >> 56;
  1306. #else
  1307. /* http://aggregate.org/MAGIC/ */
  1308. d -= ((d >> 1) & 0x55555555);
  1309. d = (((d >> 2) & 0x33333333) + (d & 0x33333333));
  1310. d = (((d >> 4) + d) & 0x0f0f0f0f);
  1311. d += (d >> 8);
  1312. d += (d >> 16);
  1313. return (d & 0x0000003f);
  1314. #endif
  1315. }
  1316. static void
  1317. ms_sweep (void)
  1318. {
  1319. int i;
  1320. MSBlockInfo **iter;
  1321. /* statistics for evacuation */
  1322. int *slots_available = alloca (sizeof (int) * num_block_obj_sizes);
  1323. int *slots_used = alloca (sizeof (int) * num_block_obj_sizes);
  1324. int *num_blocks = alloca (sizeof (int) * num_block_obj_sizes);
  1325. #ifdef SGEN_HAVE_CONCURRENT_MARK
  1326. mword total_evacuate_heap = 0;
  1327. mword total_evacuate_saved = 0;
  1328. #endif
  1329. for (i = 0; i < num_block_obj_sizes; ++i)
  1330. slots_available [i] = slots_used [i] = num_blocks [i] = 0;
  1331. /* clear all the free lists */
  1332. for (i = 0; i < MS_BLOCK_TYPE_MAX; ++i) {
  1333. MSBlockInfo **free_blocks = free_block_lists [i];
  1334. int j;
  1335. for (j = 0; j < num_block_obj_sizes; ++j)
  1336. free_blocks [j] = NULL;
  1337. }
  1338. /* traverse all blocks, free and zero unmarked objects */
  1339. iter = &all_blocks;
  1340. while (*iter) {
  1341. MSBlockInfo *block = *iter;
  1342. int count;
  1343. gboolean have_live = FALSE;
  1344. gboolean has_pinned;
  1345. gboolean have_free = FALSE;
  1346. int obj_size_index;
  1347. int nused = 0;
  1348. obj_size_index = block->obj_size_index;
  1349. has_pinned = block->has_pinned;
  1350. block->has_pinned = block->pinned;
  1351. block->is_to_space = FALSE;
  1352. block->swept = 0;
  1353. count = MS_BLOCK_FREE / block->obj_size;
  1354. #ifdef SGEN_HAVE_CONCURRENT_MARK
  1355. if (block->cardtable_mod_union) {
  1356. sgen_free_internal_dynamic (block->cardtable_mod_union, CARDS_PER_BLOCK, INTERNAL_MEM_CARDTABLE_MOD_UNION);
  1357. block->cardtable_mod_union = NULL;
  1358. }
  1359. #endif
  1360. /* Count marked objects in the block */
  1361. for (i = 0; i < MS_NUM_MARK_WORDS; ++i) {
  1362. nused += bitcount (block->mark_words [i]);
  1363. }
  1364. if (nused) {
  1365. have_live = TRUE;
  1366. }
  1367. if (nused < count)
  1368. have_free = TRUE;
  1369. if (!lazy_sweep)
  1370. sweep_block (block, TRUE);
  1371. if (have_live) {
  1372. if (!has_pinned) {
  1373. ++num_blocks [obj_size_index];
  1374. slots_used [obj_size_index] += nused;
  1375. slots_available [obj_size_index] += count;
  1376. }
  1377. iter = &block->next;
  1378. /*
  1379. * If there are free slots in the block, add
  1380. * the block to the corresponding free list.
  1381. */
  1382. if (have_free) {
  1383. MSBlockInfo **free_blocks = FREE_BLOCKS (block->pinned, block->has_references);
  1384. int index = MS_BLOCK_OBJ_SIZE_INDEX (block->obj_size);
  1385. block->next_free = free_blocks [index];
  1386. free_blocks [index] = block;
  1387. }
  1388. update_heap_boundaries_for_block (block);
  1389. } else {
  1390. /*
  1391. * Blocks without live objects are removed from the
  1392. * block list and freed.
  1393. */
  1394. *iter = block->next;
  1395. #ifdef FIXED_HEAP
  1396. ms_free_block (block);
  1397. #else
  1398. ms_free_block (block->block);
  1399. sgen_free_internal (block, INTERNAL_MEM_MS_BLOCK_INFO);
  1400. #endif
  1401. --num_major_sections;
  1402. }
  1403. }
  1404. for (i = 0; i < num_block_obj_sizes; ++i) {
  1405. float usage = (float)slots_used [i] / (float)slots_available [i];
  1406. if (num_blocks [i] > 5 && usage < evacuation_threshold) {
  1407. evacuate_block_obj_sizes [i] = TRUE;
  1408. /*
  1409. g_print ("slot size %d - %d of %d used\n",
  1410. block_obj_sizes [i], slots_used [i], slots_available [i]);
  1411. */
  1412. } else {
  1413. evacuate_block_obj_sizes [i] = FALSE;
  1414. }
  1415. #ifdef SGEN_HAVE_CONCURRENT_MARK
  1416. {
  1417. mword total_bytes = block_obj_sizes [i] * slots_available [i];
  1418. total_evacuate_heap += total_bytes;
  1419. if (evacuate_block_obj_sizes [i])
  1420. total_evacuate_saved += total_bytes - block_obj_sizes [i] * slots_used [i];
  1421. }
  1422. #endif
  1423. }
  1424. #ifdef SGEN_HAVE_CONCURRENT_MARK
  1425. want_evacuation = (float)total_evacuate_saved / (float)total_evacuate_heap > (1 - concurrent_evacuation_threshold);
  1426. #endif
  1427. have_swept = TRUE;
  1428. }
  1429. static void
  1430. major_sweep (void)
  1431. {
  1432. ms_sweep ();
  1433. }
  1434. static int count_pinned_ref;
  1435. static int count_pinned_nonref;
  1436. static int count_nonpinned_ref;
  1437. static int count_nonpinned_nonref;
  1438. static void
  1439. count_nonpinned_callback (char *obj, size_t size, void *data)
  1440. {
  1441. MonoVTable *vtable = (MonoVTable*)LOAD_VTABLE (obj);
  1442. if (vtable->klass->has_references)
  1443. ++count_nonpinned_ref;
  1444. else
  1445. ++count_nonpinned_nonref;
  1446. }
  1447. static void
  1448. count_pinned_callback (char *obj, size_t size, void *data)
  1449. {
  1450. MonoVTable *vtable = (MonoVTable*)LOAD_VTABLE (obj);
  1451. if (vtable->klass->has_references)
  1452. ++count_pinned_ref;
  1453. else
  1454. ++count_pinned_nonref;
  1455. }
  1456. static G_GNUC_UNUSED void
  1457. count_ref_nonref_objs (void)
  1458. {
  1459. int total;
  1460. count_pinned_ref = 0;
  1461. count_pinned_nonref = 0;
  1462. count_nonpinned_ref = 0;
  1463. count_nonpinned_nonref = 0;
  1464. major_iterate_objects (TRUE, FALSE, count_nonpinned_callback, NULL);
  1465. major_iterate_objects (FALSE, TRUE, count_pinned_callback, NULL);
  1466. total = count_pinned_nonref + count_nonpinned_nonref + count_pinned_ref + count_nonpinned_ref;
  1467. g_print ("ref: %d pinned %d non-pinned non-ref: %d pinned %d non-pinned -- %.1f\n",
  1468. count_pinned_ref, count_nonpinned_ref,
  1469. count_pinned_nonref, count_nonpinned_nonref,
  1470. (count_pinned_nonref + count_nonpinned_nonref) * 100.0 / total);
  1471. }
  1472. static int
  1473. ms_calculate_block_obj_sizes (double factor, int *arr)
  1474. {
  1475. double target_size = sizeof (MonoObject);
  1476. int num_sizes = 0;
  1477. int last_size = 0;
  1478. do {
  1479. int target_count = ceil (MS_BLOCK_FREE / target_size);
  1480. int size = MIN ((MS_BLOCK_FREE / target_count) & ~(SGEN_ALLOC_ALIGN - 1), SGEN_MAX_SMALL_OBJ_SIZE);
  1481. if (size != last_size) {
  1482. if (arr)
  1483. arr [num_sizes] = size;
  1484. ++num_sizes;
  1485. last_size = size;
  1486. }
  1487. target_size *= factor;
  1488. } while (last_size < SGEN_MAX_SMALL_OBJ_SIZE);
  1489. return num_sizes;
  1490. }
  1491. /* only valid during minor collections */
  1492. static int old_num_major_sections;
  1493. static void
  1494. major_start_nursery_collection (void)
  1495. {
  1496. #ifdef MARKSWEEP_CONSISTENCY_CHECK
  1497. consistency_check ();
  1498. #endif
  1499. old_num_major_sections = num_major_sections;
  1500. }
  1501. static void
  1502. major_finish_nursery_collection (void)
  1503. {
  1504. #ifdef MARKSWEEP_CONSISTENCY_CHECK
  1505. consistency_check ();
  1506. #endif
  1507. sgen_register_major_sections_alloced (num_major_sections - old_num_major_sections);
  1508. }
  1509. static void
  1510. major_start_major_collection (void)
  1511. {
  1512. int i;
  1513. /* clear the free lists */
  1514. for (i = 0; i < num_block_obj_sizes; ++i) {
  1515. if (!evacuate_block_obj_sizes [i])
  1516. continue;
  1517. free_block_lists [0][i] = NULL;
  1518. free_block_lists [MS_BLOCK_FLAG_REFS][i] = NULL;
  1519. }
  1520. // Sweep all unswept blocks
  1521. if (lazy_sweep) {
  1522. MSBlockInfo **iter;
  1523. MONO_GC_SWEEP_BEGIN (GENERATION_OLD, TRUE);
  1524. iter = &all_blocks;
  1525. while (*iter) {
  1526. MSBlockInfo *block = *iter;
  1527. sweep_block (block, TRUE);
  1528. iter = &block->next;
  1529. }
  1530. MONO_GC_SWEEP_END (GENERATION_OLD, TRUE);
  1531. }
  1532. }
  1533. static void
  1534. major_finish_major_collection (void)
  1535. {
  1536. }
  1537. static void
  1538. major_have_computer_minor_collection_allowance (void)
  1539. {
  1540. #ifndef FIXED_HEAP
  1541. int section_reserve = sgen_get_minor_collection_allowance () / MS_BLOCK_SIZE;
  1542. g_assert (have_swept);
  1543. /*
  1544. * FIXME: We don't free blocks on 32 bit platforms because it
  1545. * can lead to address space fragmentation, since we're
  1546. * allocating blocks in larger contingents.
  1547. */
  1548. if (sizeof (mword) < 8)
  1549. return;
  1550. while (num_empty_blocks > section_reserve) {
  1551. void *next = *(void**)empty_blocks;
  1552. sgen_free_os_memory (empty_blocks, MS_BLOCK_SIZE, SGEN_ALLOC_HEAP);
  1553. empty_blocks = next;
  1554. /*
  1555. * Needs not be atomic because this is running
  1556. * single-threaded.
  1557. */
  1558. --num_empty_blocks;
  1559. ++stat_major_blocks_freed;
  1560. }
  1561. #endif
  1562. }
  1563. static void
  1564. major_find_pin_queue_start_ends (SgenGrayQueue *queue)
  1565. {
  1566. MSBlockInfo *block;
  1567. FOREACH_BLOCK (block) {
  1568. block->pin_queue_start = sgen_find_optimized_pin_queue_area (block->block + MS_BLOCK_SKIP, block->block + MS_BLOCK_SIZE,
  1569. &block->pin_queue_num_entries);
  1570. } END_FOREACH_BLOCK;
  1571. }
  1572. static void
  1573. major_pin_objects (SgenGrayQueue *queue)
  1574. {
  1575. MSBlockInfo *block;
  1576. FOREACH_BLOCK (block) {
  1577. mark_pinned_objects_in_block (block, queue);
  1578. } END_FOREACH_BLOCK;
  1579. }
  1580. static void
  1581. major_init_to_space (void)
  1582. {
  1583. }
  1584. static void
  1585. major_report_pinned_memory_usage (void)
  1586. {

Large files files are truncated, but you can click here to view the full file