PageRenderTime 78ms CodeModel.GetById 15ms RepoModel.GetById 0ms app.codeStats 1ms

/deps/v8/src/spaces.cc

http://github.com/joyent/node
C++ | 3218 lines | 2293 code | 547 blank | 378 comment | 521 complexity | e0e3c6fb0b11b10047e63c2c1769ac63 MD5 | raw file
Possible License(s): 0BSD, BSD-3-Clause, MPL-2.0-no-copyleft-exception, GPL-2.0, ISC, Apache-2.0, MIT, AGPL-3.0
  1. // Copyright 2011 the V8 project authors. All rights reserved.
  2. // Redistribution and use in source and binary forms, with or without
  3. // modification, are permitted provided that the following conditions are
  4. // met:
  5. //
  6. // * Redistributions of source code must retain the above copyright
  7. // notice, this list of conditions and the following disclaimer.
  8. // * Redistributions in binary form must reproduce the above
  9. // copyright notice, this list of conditions and the following
  10. // disclaimer in the documentation and/or other materials provided
  11. // with the distribution.
  12. // * Neither the name of Google Inc. nor the names of its
  13. // contributors may be used to endorse or promote products derived
  14. // from this software without specific prior written permission.
  15. //
  16. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  17. // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  18. // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  19. // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  20. // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  21. // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  22. // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23. // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24. // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25. // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  26. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27. #include "v8.h"
  28. #include "macro-assembler.h"
  29. #include "mark-compact.h"
  30. #include "platform.h"
  31. namespace v8 {
  32. namespace internal {
  33. // ----------------------------------------------------------------------------
  34. // HeapObjectIterator
  35. HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
  36. // You can't actually iterate over the anchor page. It is not a real page,
  37. // just an anchor for the double linked page list. Initialize as if we have
  38. // reached the end of the anchor page, then the first iteration will move on
  39. // to the first page.
  40. Initialize(space,
  41. NULL,
  42. NULL,
  43. kAllPagesInSpace,
  44. NULL);
  45. }
  46. HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
  47. HeapObjectCallback size_func) {
  48. // You can't actually iterate over the anchor page. It is not a real page,
  49. // just an anchor for the double linked page list. Initialize the current
  50. // address and end as NULL, then the first iteration will move on
  51. // to the first page.
  52. Initialize(space,
  53. NULL,
  54. NULL,
  55. kAllPagesInSpace,
  56. size_func);
  57. }
  58. HeapObjectIterator::HeapObjectIterator(Page* page,
  59. HeapObjectCallback size_func) {
  60. Space* owner = page->owner();
  61. ASSERT(owner == page->heap()->old_pointer_space() ||
  62. owner == page->heap()->old_data_space() ||
  63. owner == page->heap()->map_space() ||
  64. owner == page->heap()->cell_space() ||
  65. owner == page->heap()->property_cell_space() ||
  66. owner == page->heap()->code_space());
  67. Initialize(reinterpret_cast<PagedSpace*>(owner),
  68. page->area_start(),
  69. page->area_end(),
  70. kOnePageOnly,
  71. size_func);
  72. ASSERT(page->WasSweptPrecisely());
  73. }
  74. void HeapObjectIterator::Initialize(PagedSpace* space,
  75. Address cur, Address end,
  76. HeapObjectIterator::PageMode mode,
  77. HeapObjectCallback size_f) {
  78. // Check that we actually can iterate this space.
  79. ASSERT(!space->was_swept_conservatively());
  80. space_ = space;
  81. cur_addr_ = cur;
  82. cur_end_ = end;
  83. page_mode_ = mode;
  84. size_func_ = size_f;
  85. }
  86. // We have hit the end of the page and should advance to the next block of
  87. // objects. This happens at the end of the page.
  88. bool HeapObjectIterator::AdvanceToNextPage() {
  89. ASSERT(cur_addr_ == cur_end_);
  90. if (page_mode_ == kOnePageOnly) return false;
  91. Page* cur_page;
  92. if (cur_addr_ == NULL) {
  93. cur_page = space_->anchor();
  94. } else {
  95. cur_page = Page::FromAddress(cur_addr_ - 1);
  96. ASSERT(cur_addr_ == cur_page->area_end());
  97. }
  98. cur_page = cur_page->next_page();
  99. if (cur_page == space_->anchor()) return false;
  100. cur_addr_ = cur_page->area_start();
  101. cur_end_ = cur_page->area_end();
  102. ASSERT(cur_page->WasSweptPrecisely());
  103. return true;
  104. }
  105. // -----------------------------------------------------------------------------
  106. // CodeRange
  107. CodeRange::CodeRange(Isolate* isolate)
  108. : isolate_(isolate),
  109. code_range_(NULL),
  110. free_list_(0),
  111. allocation_list_(0),
  112. current_allocation_block_index_(0) {
  113. }
  114. bool CodeRange::SetUp(const size_t requested) {
  115. ASSERT(code_range_ == NULL);
  116. code_range_ = new VirtualMemory(requested);
  117. CHECK(code_range_ != NULL);
  118. if (!code_range_->IsReserved()) {
  119. delete code_range_;
  120. code_range_ = NULL;
  121. return false;
  122. }
  123. // We are sure that we have mapped a block of requested addresses.
  124. ASSERT(code_range_->size() == requested);
  125. LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
  126. Address base = reinterpret_cast<Address>(code_range_->address());
  127. Address aligned_base =
  128. RoundUp(reinterpret_cast<Address>(code_range_->address()),
  129. MemoryChunk::kAlignment);
  130. size_t size = code_range_->size() - (aligned_base - base);
  131. allocation_list_.Add(FreeBlock(aligned_base, size));
  132. current_allocation_block_index_ = 0;
  133. return true;
  134. }
  135. int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
  136. const FreeBlock* right) {
  137. // The entire point of CodeRange is that the difference between two
  138. // addresses in the range can be represented as a signed 32-bit int,
  139. // so the cast is semantically correct.
  140. return static_cast<int>(left->start - right->start);
  141. }
  142. void CodeRange::GetNextAllocationBlock(size_t requested) {
  143. for (current_allocation_block_index_++;
  144. current_allocation_block_index_ < allocation_list_.length();
  145. current_allocation_block_index_++) {
  146. if (requested <= allocation_list_[current_allocation_block_index_].size) {
  147. return; // Found a large enough allocation block.
  148. }
  149. }
  150. // Sort and merge the free blocks on the free list and the allocation list.
  151. free_list_.AddAll(allocation_list_);
  152. allocation_list_.Clear();
  153. free_list_.Sort(&CompareFreeBlockAddress);
  154. for (int i = 0; i < free_list_.length();) {
  155. FreeBlock merged = free_list_[i];
  156. i++;
  157. // Add adjacent free blocks to the current merged block.
  158. while (i < free_list_.length() &&
  159. free_list_[i].start == merged.start + merged.size) {
  160. merged.size += free_list_[i].size;
  161. i++;
  162. }
  163. if (merged.size > 0) {
  164. allocation_list_.Add(merged);
  165. }
  166. }
  167. free_list_.Clear();
  168. for (current_allocation_block_index_ = 0;
  169. current_allocation_block_index_ < allocation_list_.length();
  170. current_allocation_block_index_++) {
  171. if (requested <= allocation_list_[current_allocation_block_index_].size) {
  172. return; // Found a large enough allocation block.
  173. }
  174. }
  175. // Code range is full or too fragmented.
  176. V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
  177. }
  178. Address CodeRange::AllocateRawMemory(const size_t requested_size,
  179. const size_t commit_size,
  180. size_t* allocated) {
  181. ASSERT(commit_size <= requested_size);
  182. ASSERT(current_allocation_block_index_ < allocation_list_.length());
  183. if (requested_size > allocation_list_[current_allocation_block_index_].size) {
  184. // Find an allocation block large enough. This function call may
  185. // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
  186. GetNextAllocationBlock(requested_size);
  187. }
  188. // Commit the requested memory at the start of the current allocation block.
  189. size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
  190. FreeBlock current = allocation_list_[current_allocation_block_index_];
  191. if (aligned_requested >= (current.size - Page::kPageSize)) {
  192. // Don't leave a small free block, useless for a large object or chunk.
  193. *allocated = current.size;
  194. } else {
  195. *allocated = aligned_requested;
  196. }
  197. ASSERT(*allocated <= current.size);
  198. ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
  199. if (!MemoryAllocator::CommitExecutableMemory(code_range_,
  200. current.start,
  201. commit_size,
  202. *allocated)) {
  203. *allocated = 0;
  204. return NULL;
  205. }
  206. allocation_list_[current_allocation_block_index_].start += *allocated;
  207. allocation_list_[current_allocation_block_index_].size -= *allocated;
  208. if (*allocated == current.size) {
  209. GetNextAllocationBlock(0); // This block is used up, get the next one.
  210. }
  211. return current.start;
  212. }
  213. bool CodeRange::CommitRawMemory(Address start, size_t length) {
  214. return code_range_->Commit(start, length, true);
  215. }
  216. bool CodeRange::UncommitRawMemory(Address start, size_t length) {
  217. return code_range_->Uncommit(start, length);
  218. }
  219. void CodeRange::FreeRawMemory(Address address, size_t length) {
  220. ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
  221. free_list_.Add(FreeBlock(address, length));
  222. code_range_->Uncommit(address, length);
  223. }
  224. void CodeRange::TearDown() {
  225. delete code_range_; // Frees all memory in the virtual memory range.
  226. code_range_ = NULL;
  227. free_list_.Free();
  228. allocation_list_.Free();
  229. }
  230. // -----------------------------------------------------------------------------
  231. // MemoryAllocator
  232. //
  233. MemoryAllocator::MemoryAllocator(Isolate* isolate)
  234. : isolate_(isolate),
  235. capacity_(0),
  236. capacity_executable_(0),
  237. size_(0),
  238. size_executable_(0) {
  239. }
  240. bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
  241. capacity_ = RoundUp(capacity, Page::kPageSize);
  242. capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
  243. ASSERT_GE(capacity_, capacity_executable_);
  244. size_ = 0;
  245. size_executable_ = 0;
  246. return true;
  247. }
  248. void MemoryAllocator::TearDown() {
  249. // Check that spaces were torn down before MemoryAllocator.
  250. ASSERT(size_ == 0);
  251. // TODO(gc) this will be true again when we fix FreeMemory.
  252. // ASSERT(size_executable_ == 0);
  253. capacity_ = 0;
  254. capacity_executable_ = 0;
  255. }
  256. void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
  257. Executability executable) {
  258. // TODO(gc) make code_range part of memory allocator?
  259. ASSERT(reservation->IsReserved());
  260. size_t size = reservation->size();
  261. ASSERT(size_ >= size);
  262. size_ -= size;
  263. isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
  264. if (executable == EXECUTABLE) {
  265. ASSERT(size_executable_ >= size);
  266. size_executable_ -= size;
  267. }
  268. // Code which is part of the code-range does not have its own VirtualMemory.
  269. ASSERT(!isolate_->code_range()->contains(
  270. static_cast<Address>(reservation->address())));
  271. ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
  272. reservation->Release();
  273. }
  274. void MemoryAllocator::FreeMemory(Address base,
  275. size_t size,
  276. Executability executable) {
  277. // TODO(gc) make code_range part of memory allocator?
  278. ASSERT(size_ >= size);
  279. size_ -= size;
  280. isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
  281. if (executable == EXECUTABLE) {
  282. ASSERT(size_executable_ >= size);
  283. size_executable_ -= size;
  284. }
  285. if (isolate_->code_range()->contains(static_cast<Address>(base))) {
  286. ASSERT(executable == EXECUTABLE);
  287. isolate_->code_range()->FreeRawMemory(base, size);
  288. } else {
  289. ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
  290. bool result = VirtualMemory::ReleaseRegion(base, size);
  291. USE(result);
  292. ASSERT(result);
  293. }
  294. }
  295. Address MemoryAllocator::ReserveAlignedMemory(size_t size,
  296. size_t alignment,
  297. VirtualMemory* controller) {
  298. VirtualMemory reservation(size, alignment);
  299. if (!reservation.IsReserved()) return NULL;
  300. size_ += reservation.size();
  301. Address base = RoundUp(static_cast<Address>(reservation.address()),
  302. alignment);
  303. controller->TakeControl(&reservation);
  304. return base;
  305. }
  306. Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size,
  307. size_t commit_size,
  308. size_t alignment,
  309. Executability executable,
  310. VirtualMemory* controller) {
  311. ASSERT(commit_size <= reserve_size);
  312. VirtualMemory reservation;
  313. Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
  314. if (base == NULL) return NULL;
  315. if (executable == EXECUTABLE) {
  316. if (!CommitExecutableMemory(&reservation,
  317. base,
  318. commit_size,
  319. reserve_size)) {
  320. base = NULL;
  321. }
  322. } else {
  323. if (!reservation.Commit(base, commit_size, false)) {
  324. base = NULL;
  325. }
  326. }
  327. if (base == NULL) {
  328. // Failed to commit the body. Release the mapping and any partially
  329. // commited regions inside it.
  330. reservation.Release();
  331. return NULL;
  332. }
  333. controller->TakeControl(&reservation);
  334. return base;
  335. }
  336. void Page::InitializeAsAnchor(PagedSpace* owner) {
  337. set_owner(owner);
  338. set_prev_page(this);
  339. set_next_page(this);
  340. }
  341. NewSpacePage* NewSpacePage::Initialize(Heap* heap,
  342. Address start,
  343. SemiSpace* semi_space) {
  344. Address area_start = start + NewSpacePage::kObjectStartOffset;
  345. Address area_end = start + Page::kPageSize;
  346. MemoryChunk* chunk = MemoryChunk::Initialize(heap,
  347. start,
  348. Page::kPageSize,
  349. area_start,
  350. area_end,
  351. NOT_EXECUTABLE,
  352. semi_space);
  353. chunk->set_next_chunk(NULL);
  354. chunk->set_prev_chunk(NULL);
  355. chunk->initialize_scan_on_scavenge(true);
  356. bool in_to_space = (semi_space->id() != kFromSpace);
  357. chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
  358. : MemoryChunk::IN_FROM_SPACE);
  359. ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
  360. : MemoryChunk::IN_TO_SPACE));
  361. NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
  362. heap->incremental_marking()->SetNewSpacePageFlags(page);
  363. return page;
  364. }
  365. void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
  366. set_owner(semi_space);
  367. set_next_chunk(this);
  368. set_prev_chunk(this);
  369. // Flags marks this invalid page as not being in new-space.
  370. // All real new-space pages will be in new-space.
  371. SetFlags(0, ~0);
  372. }
  373. MemoryChunk* MemoryChunk::Initialize(Heap* heap,
  374. Address base,
  375. size_t size,
  376. Address area_start,
  377. Address area_end,
  378. Executability executable,
  379. Space* owner) {
  380. MemoryChunk* chunk = FromAddress(base);
  381. ASSERT(base == chunk->address());
  382. chunk->heap_ = heap;
  383. chunk->size_ = size;
  384. chunk->area_start_ = area_start;
  385. chunk->area_end_ = area_end;
  386. chunk->flags_ = 0;
  387. chunk->set_owner(owner);
  388. chunk->InitializeReservedMemory();
  389. chunk->slots_buffer_ = NULL;
  390. chunk->skip_list_ = NULL;
  391. chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
  392. chunk->progress_bar_ = 0;
  393. chunk->high_water_mark_ = static_cast<int>(area_start - base);
  394. chunk->parallel_sweeping_ = 0;
  395. chunk->available_in_small_free_list_ = 0;
  396. chunk->available_in_medium_free_list_ = 0;
  397. chunk->available_in_large_free_list_ = 0;
  398. chunk->available_in_huge_free_list_ = 0;
  399. chunk->non_available_small_blocks_ = 0;
  400. chunk->ResetLiveBytes();
  401. Bitmap::Clear(chunk);
  402. chunk->initialize_scan_on_scavenge(false);
  403. chunk->SetFlag(WAS_SWEPT_PRECISELY);
  404. ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
  405. ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
  406. if (executable == EXECUTABLE) {
  407. chunk->SetFlag(IS_EXECUTABLE);
  408. }
  409. if (owner == heap->old_data_space()) {
  410. chunk->SetFlag(CONTAINS_ONLY_DATA);
  411. }
  412. return chunk;
  413. }
  414. // Commit MemoryChunk area to the requested size.
  415. bool MemoryChunk::CommitArea(size_t requested) {
  416. size_t guard_size = IsFlagSet(IS_EXECUTABLE) ?
  417. MemoryAllocator::CodePageGuardSize() : 0;
  418. size_t header_size = area_start() - address() - guard_size;
  419. size_t commit_size = RoundUp(header_size + requested, OS::CommitPageSize());
  420. size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
  421. OS::CommitPageSize());
  422. if (commit_size > committed_size) {
  423. // Commit size should be less or equal than the reserved size.
  424. ASSERT(commit_size <= size() - 2 * guard_size);
  425. // Append the committed area.
  426. Address start = address() + committed_size + guard_size;
  427. size_t length = commit_size - committed_size;
  428. if (reservation_.IsReserved()) {
  429. if (!reservation_.Commit(start, length, IsFlagSet(IS_EXECUTABLE))) {
  430. return false;
  431. }
  432. } else {
  433. CodeRange* code_range = heap_->isolate()->code_range();
  434. ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
  435. if (!code_range->CommitRawMemory(start, length)) return false;
  436. }
  437. if (Heap::ShouldZapGarbage()) {
  438. heap_->isolate()->memory_allocator()->ZapBlock(start, length);
  439. }
  440. } else if (commit_size < committed_size) {
  441. ASSERT(commit_size > 0);
  442. // Shrink the committed area.
  443. size_t length = committed_size - commit_size;
  444. Address start = address() + committed_size + guard_size - length;
  445. if (reservation_.IsReserved()) {
  446. if (!reservation_.Uncommit(start, length)) return false;
  447. } else {
  448. CodeRange* code_range = heap_->isolate()->code_range();
  449. ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
  450. if (!code_range->UncommitRawMemory(start, length)) return false;
  451. }
  452. }
  453. area_end_ = area_start_ + requested;
  454. return true;
  455. }
  456. void MemoryChunk::InsertAfter(MemoryChunk* other) {
  457. next_chunk_ = other->next_chunk_;
  458. prev_chunk_ = other;
  459. // This memory barrier is needed since concurrent sweeper threads may iterate
  460. // over the list of pages while a new page is inserted.
  461. // TODO(hpayer): find a cleaner way to guarantee that the page list can be
  462. // expanded concurrently
  463. MemoryBarrier();
  464. // The following two write operations can take effect in arbitrary order
  465. // since pages are always iterated by the sweeper threads in LIFO order, i.e,
  466. // the inserted page becomes visible for the sweeper threads after
  467. // other->next_chunk_ = this;
  468. other->next_chunk_->prev_chunk_ = this;
  469. other->next_chunk_ = this;
  470. }
  471. void MemoryChunk::Unlink() {
  472. if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) {
  473. heap_->decrement_scan_on_scavenge_pages();
  474. ClearFlag(SCAN_ON_SCAVENGE);
  475. }
  476. next_chunk_->prev_chunk_ = prev_chunk_;
  477. prev_chunk_->next_chunk_ = next_chunk_;
  478. prev_chunk_ = NULL;
  479. next_chunk_ = NULL;
  480. }
  481. MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
  482. intptr_t commit_area_size,
  483. Executability executable,
  484. Space* owner) {
  485. ASSERT(commit_area_size <= reserve_area_size);
  486. size_t chunk_size;
  487. Heap* heap = isolate_->heap();
  488. Address base = NULL;
  489. VirtualMemory reservation;
  490. Address area_start = NULL;
  491. Address area_end = NULL;
  492. //
  493. // MemoryChunk layout:
  494. //
  495. // Executable
  496. // +----------------------------+<- base aligned with MemoryChunk::kAlignment
  497. // | Header |
  498. // +----------------------------+<- base + CodePageGuardStartOffset
  499. // | Guard |
  500. // +----------------------------+<- area_start_
  501. // | Area |
  502. // +----------------------------+<- area_end_ (area_start + commit_area_size)
  503. // | Committed but not used |
  504. // +----------------------------+<- aligned at OS page boundary
  505. // | Reserved but not committed |
  506. // +----------------------------+<- aligned at OS page boundary
  507. // | Guard |
  508. // +----------------------------+<- base + chunk_size
  509. //
  510. // Non-executable
  511. // +----------------------------+<- base aligned with MemoryChunk::kAlignment
  512. // | Header |
  513. // +----------------------------+<- area_start_ (base + kObjectStartOffset)
  514. // | Area |
  515. // +----------------------------+<- area_end_ (area_start + commit_area_size)
  516. // | Committed but not used |
  517. // +----------------------------+<- aligned at OS page boundary
  518. // | Reserved but not committed |
  519. // +----------------------------+<- base + chunk_size
  520. //
  521. if (executable == EXECUTABLE) {
  522. chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
  523. OS::CommitPageSize()) + CodePageGuardSize();
  524. // Check executable memory limit.
  525. if (size_executable_ + chunk_size > capacity_executable_) {
  526. LOG(isolate_,
  527. StringEvent("MemoryAllocator::AllocateRawMemory",
  528. "V8 Executable Allocation capacity exceeded"));
  529. return NULL;
  530. }
  531. // Size of header (not executable) plus area (executable).
  532. size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
  533. OS::CommitPageSize());
  534. // Allocate executable memory either from code range or from the
  535. // OS.
  536. if (isolate_->code_range()->exists()) {
  537. base = isolate_->code_range()->AllocateRawMemory(chunk_size,
  538. commit_size,
  539. &chunk_size);
  540. ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
  541. MemoryChunk::kAlignment));
  542. if (base == NULL) return NULL;
  543. size_ += chunk_size;
  544. // Update executable memory size.
  545. size_executable_ += chunk_size;
  546. } else {
  547. base = AllocateAlignedMemory(chunk_size,
  548. commit_size,
  549. MemoryChunk::kAlignment,
  550. executable,
  551. &reservation);
  552. if (base == NULL) return NULL;
  553. // Update executable memory size.
  554. size_executable_ += reservation.size();
  555. }
  556. if (Heap::ShouldZapGarbage()) {
  557. ZapBlock(base, CodePageGuardStartOffset());
  558. ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
  559. }
  560. area_start = base + CodePageAreaStartOffset();
  561. area_end = area_start + commit_area_size;
  562. } else {
  563. chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
  564. OS::CommitPageSize());
  565. size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset +
  566. commit_area_size, OS::CommitPageSize());
  567. base = AllocateAlignedMemory(chunk_size,
  568. commit_size,
  569. MemoryChunk::kAlignment,
  570. executable,
  571. &reservation);
  572. if (base == NULL) return NULL;
  573. if (Heap::ShouldZapGarbage()) {
  574. ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
  575. }
  576. area_start = base + Page::kObjectStartOffset;
  577. area_end = area_start + commit_area_size;
  578. }
  579. // Use chunk_size for statistics and callbacks because we assume that they
  580. // treat reserved but not-yet committed memory regions of chunks as allocated.
  581. isolate_->counters()->memory_allocated()->
  582. Increment(static_cast<int>(chunk_size));
  583. LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
  584. if (owner != NULL) {
  585. ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
  586. PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
  587. }
  588. MemoryChunk* result = MemoryChunk::Initialize(heap,
  589. base,
  590. chunk_size,
  591. area_start,
  592. area_end,
  593. executable,
  594. owner);
  595. result->set_reserved_memory(&reservation);
  596. return result;
  597. }
  598. void Page::ResetFreeListStatistics() {
  599. non_available_small_blocks_ = 0;
  600. available_in_small_free_list_ = 0;
  601. available_in_medium_free_list_ = 0;
  602. available_in_large_free_list_ = 0;
  603. available_in_huge_free_list_ = 0;
  604. }
  605. Page* MemoryAllocator::AllocatePage(intptr_t size,
  606. PagedSpace* owner,
  607. Executability executable) {
  608. MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
  609. if (chunk == NULL) return NULL;
  610. return Page::Initialize(isolate_->heap(), chunk, executable, owner);
  611. }
  612. LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
  613. Space* owner,
  614. Executability executable) {
  615. MemoryChunk* chunk = AllocateChunk(object_size,
  616. object_size,
  617. executable,
  618. owner);
  619. if (chunk == NULL) return NULL;
  620. return LargePage::Initialize(isolate_->heap(), chunk);
  621. }
  622. void MemoryAllocator::Free(MemoryChunk* chunk) {
  623. LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
  624. if (chunk->owner() != NULL) {
  625. ObjectSpace space =
  626. static_cast<ObjectSpace>(1 << chunk->owner()->identity());
  627. PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
  628. }
  629. isolate_->heap()->RememberUnmappedPage(
  630. reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate());
  631. delete chunk->slots_buffer();
  632. delete chunk->skip_list();
  633. VirtualMemory* reservation = chunk->reserved_memory();
  634. if (reservation->IsReserved()) {
  635. FreeMemory(reservation, chunk->executable());
  636. } else {
  637. FreeMemory(chunk->address(),
  638. chunk->size(),
  639. chunk->executable());
  640. }
  641. }
  642. bool MemoryAllocator::CommitBlock(Address start,
  643. size_t size,
  644. Executability executable) {
  645. if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
  646. if (Heap::ShouldZapGarbage()) {
  647. ZapBlock(start, size);
  648. }
  649. isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
  650. return true;
  651. }
  652. bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
  653. if (!VirtualMemory::UncommitRegion(start, size)) return false;
  654. isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
  655. return true;
  656. }
  657. void MemoryAllocator::ZapBlock(Address start, size_t size) {
  658. for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
  659. Memory::Address_at(start + s) = kZapValue;
  660. }
  661. }
  662. void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
  663. AllocationAction action,
  664. size_t size) {
  665. for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
  666. MemoryAllocationCallbackRegistration registration =
  667. memory_allocation_callbacks_[i];
  668. if ((registration.space & space) == space &&
  669. (registration.action & action) == action)
  670. registration.callback(space, action, static_cast<int>(size));
  671. }
  672. }
  673. bool MemoryAllocator::MemoryAllocationCallbackRegistered(
  674. MemoryAllocationCallback callback) {
  675. for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
  676. if (memory_allocation_callbacks_[i].callback == callback) return true;
  677. }
  678. return false;
  679. }
  680. void MemoryAllocator::AddMemoryAllocationCallback(
  681. MemoryAllocationCallback callback,
  682. ObjectSpace space,
  683. AllocationAction action) {
  684. ASSERT(callback != NULL);
  685. MemoryAllocationCallbackRegistration registration(callback, space, action);
  686. ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
  687. return memory_allocation_callbacks_.Add(registration);
  688. }
  689. void MemoryAllocator::RemoveMemoryAllocationCallback(
  690. MemoryAllocationCallback callback) {
  691. ASSERT(callback != NULL);
  692. for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
  693. if (memory_allocation_callbacks_[i].callback == callback) {
  694. memory_allocation_callbacks_.Remove(i);
  695. return;
  696. }
  697. }
  698. UNREACHABLE();
  699. }
  700. #ifdef DEBUG
  701. void MemoryAllocator::ReportStatistics() {
  702. float pct = static_cast<float>(capacity_ - size_) / capacity_;
  703. PrintF(" capacity: %" V8_PTR_PREFIX "d"
  704. ", used: %" V8_PTR_PREFIX "d"
  705. ", available: %%%d\n\n",
  706. capacity_, size_, static_cast<int>(pct*100));
  707. }
  708. #endif
  709. int MemoryAllocator::CodePageGuardStartOffset() {
  710. // We are guarding code pages: the first OS page after the header
  711. // will be protected as non-writable.
  712. return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize());
  713. }
  714. int MemoryAllocator::CodePageGuardSize() {
  715. return static_cast<int>(OS::CommitPageSize());
  716. }
  717. int MemoryAllocator::CodePageAreaStartOffset() {
  718. // We are guarding code pages: the first OS page after the header
  719. // will be protected as non-writable.
  720. return CodePageGuardStartOffset() + CodePageGuardSize();
  721. }
  722. int MemoryAllocator::CodePageAreaEndOffset() {
  723. // We are guarding code pages: the last OS page will be protected as
  724. // non-writable.
  725. return Page::kPageSize - static_cast<int>(OS::CommitPageSize());
  726. }
  727. bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm,
  728. Address start,
  729. size_t commit_size,
  730. size_t reserved_size) {
  731. // Commit page header (not executable).
  732. if (!vm->Commit(start,
  733. CodePageGuardStartOffset(),
  734. false)) {
  735. return false;
  736. }
  737. // Create guard page after the header.
  738. if (!vm->Guard(start + CodePageGuardStartOffset())) {
  739. return false;
  740. }
  741. // Commit page body (executable).
  742. if (!vm->Commit(start + CodePageAreaStartOffset(),
  743. commit_size - CodePageGuardStartOffset(),
  744. true)) {
  745. return false;
  746. }
  747. // Create guard page before the end.
  748. if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
  749. return false;
  750. }
  751. return true;
  752. }
  753. // -----------------------------------------------------------------------------
  754. // MemoryChunk implementation
  755. void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
  756. MemoryChunk* chunk = MemoryChunk::FromAddress(address);
  757. if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
  758. static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
  759. }
  760. chunk->IncrementLiveBytes(by);
  761. }
  762. // -----------------------------------------------------------------------------
  763. // PagedSpace implementation
  764. PagedSpace::PagedSpace(Heap* heap,
  765. intptr_t max_capacity,
  766. AllocationSpace id,
  767. Executability executable)
  768. : Space(heap, id, executable),
  769. free_list_(this),
  770. was_swept_conservatively_(false),
  771. first_unswept_page_(Page::FromAddress(NULL)),
  772. unswept_free_bytes_(0) {
  773. if (id == CODE_SPACE) {
  774. area_size_ = heap->isolate()->memory_allocator()->
  775. CodePageAreaSize();
  776. } else {
  777. area_size_ = Page::kPageSize - Page::kObjectStartOffset;
  778. }
  779. max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
  780. * AreaSize();
  781. accounting_stats_.Clear();
  782. allocation_info_.top = NULL;
  783. allocation_info_.limit = NULL;
  784. anchor_.InitializeAsAnchor(this);
  785. }
  786. bool PagedSpace::SetUp() {
  787. return true;
  788. }
  789. bool PagedSpace::HasBeenSetUp() {
  790. return true;
  791. }
  792. void PagedSpace::TearDown() {
  793. PageIterator iterator(this);
  794. while (iterator.has_next()) {
  795. heap()->isolate()->memory_allocator()->Free(iterator.next());
  796. }
  797. anchor_.set_next_page(&anchor_);
  798. anchor_.set_prev_page(&anchor_);
  799. accounting_stats_.Clear();
  800. }
  801. size_t PagedSpace::CommittedPhysicalMemory() {
  802. if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
  803. MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
  804. size_t size = 0;
  805. PageIterator it(this);
  806. while (it.has_next()) {
  807. size += it.next()->CommittedPhysicalMemory();
  808. }
  809. return size;
  810. }
  811. MaybeObject* PagedSpace::FindObject(Address addr) {
  812. // Note: this function can only be called on precisely swept spaces.
  813. ASSERT(!heap()->mark_compact_collector()->in_use());
  814. if (!Contains(addr)) return Failure::Exception();
  815. Page* p = Page::FromAddress(addr);
  816. HeapObjectIterator it(p, NULL);
  817. for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
  818. Address cur = obj->address();
  819. Address next = cur + obj->Size();
  820. if ((cur <= addr) && (addr < next)) return obj;
  821. }
  822. UNREACHABLE();
  823. return Failure::Exception();
  824. }
  825. bool PagedSpace::CanExpand() {
  826. ASSERT(max_capacity_ % AreaSize() == 0);
  827. if (Capacity() == max_capacity_) return false;
  828. ASSERT(Capacity() < max_capacity_);
  829. // Are we going to exceed capacity for this space?
  830. if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
  831. return true;
  832. }
  833. bool PagedSpace::Expand() {
  834. if (!CanExpand()) return false;
  835. intptr_t size = AreaSize();
  836. if (anchor_.next_page() == &anchor_) {
  837. size = SizeOfFirstPage();
  838. }
  839. Page* p = heap()->isolate()->memory_allocator()->AllocatePage(
  840. size, this, executable());
  841. if (p == NULL) return false;
  842. ASSERT(Capacity() <= max_capacity_);
  843. p->InsertAfter(anchor_.prev_page());
  844. return true;
  845. }
  846. intptr_t PagedSpace::SizeOfFirstPage() {
  847. int size = 0;
  848. switch (identity()) {
  849. case OLD_POINTER_SPACE:
  850. size = 64 * kPointerSize * KB;
  851. break;
  852. case OLD_DATA_SPACE:
  853. size = 192 * KB;
  854. break;
  855. case MAP_SPACE:
  856. size = 16 * kPointerSize * KB;
  857. break;
  858. case CELL_SPACE:
  859. size = 16 * kPointerSize * KB;
  860. break;
  861. case PROPERTY_CELL_SPACE:
  862. size = 8 * kPointerSize * KB;
  863. break;
  864. case CODE_SPACE:
  865. if (heap()->isolate()->code_range()->exists()) {
  866. // When code range exists, code pages are allocated in a special way
  867. // (from the reserved code range). That part of the code is not yet
  868. // upgraded to handle small pages.
  869. size = AreaSize();
  870. } else {
  871. size = 384 * KB;
  872. }
  873. break;
  874. default:
  875. UNREACHABLE();
  876. }
  877. return Min(size, AreaSize());
  878. }
  879. int PagedSpace::CountTotalPages() {
  880. PageIterator it(this);
  881. int count = 0;
  882. while (it.has_next()) {
  883. it.next();
  884. count++;
  885. }
  886. return count;
  887. }
  888. void PagedSpace::ObtainFreeListStatistics(Page* page, SizeStats* sizes) {
  889. sizes->huge_size_ = page->available_in_huge_free_list();
  890. sizes->small_size_ = page->available_in_small_free_list();
  891. sizes->medium_size_ = page->available_in_medium_free_list();
  892. sizes->large_size_ = page->available_in_large_free_list();
  893. }
  894. void PagedSpace::ResetFreeListStatistics() {
  895. PageIterator page_iterator(this);
  896. while (page_iterator.has_next()) {
  897. Page* page = page_iterator.next();
  898. page->ResetFreeListStatistics();
  899. }
  900. }
  901. void PagedSpace::ReleasePage(Page* page, bool unlink) {
  902. ASSERT(page->LiveBytes() == 0);
  903. ASSERT(AreaSize() == page->area_size());
  904. // Adjust list of unswept pages if the page is the head of the list.
  905. if (first_unswept_page_ == page) {
  906. first_unswept_page_ = page->next_page();
  907. if (first_unswept_page_ == anchor()) {
  908. first_unswept_page_ = Page::FromAddress(NULL);
  909. }
  910. }
  911. if (page->WasSwept()) {
  912. intptr_t size = free_list_.EvictFreeListItems(page);
  913. accounting_stats_.AllocateBytes(size);
  914. ASSERT_EQ(AreaSize(), static_cast<int>(size));
  915. } else {
  916. DecreaseUnsweptFreeBytes(page);
  917. }
  918. if (Page::FromAllocationTop(allocation_info_.top) == page) {
  919. allocation_info_.top = allocation_info_.limit = NULL;
  920. }
  921. if (unlink) {
  922. page->Unlink();
  923. }
  924. if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
  925. heap()->isolate()->memory_allocator()->Free(page);
  926. } else {
  927. heap()->QueueMemoryChunkForFree(page);
  928. }
  929. ASSERT(Capacity() > 0);
  930. accounting_stats_.ShrinkSpace(AreaSize());
  931. }
  932. #ifdef DEBUG
  933. void PagedSpace::Print() { }
  934. #endif
  935. #ifdef VERIFY_HEAP
  936. void PagedSpace::Verify(ObjectVisitor* visitor) {
  937. // We can only iterate over the pages if they were swept precisely.
  938. if (was_swept_conservatively_) return;
  939. bool allocation_pointer_found_in_space =
  940. (allocation_info_.top == allocation_info_.limit);
  941. PageIterator page_iterator(this);
  942. while (page_iterator.has_next()) {
  943. Page* page = page_iterator.next();
  944. CHECK(page->owner() == this);
  945. if (page == Page::FromAllocationTop(allocation_info_.top)) {
  946. allocation_pointer_found_in_space = true;
  947. }
  948. CHECK(page->WasSweptPrecisely());
  949. HeapObjectIterator it(page, NULL);
  950. Address end_of_previous_object = page->area_start();
  951. Address top = page->area_end();
  952. int black_size = 0;
  953. for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
  954. CHECK(end_of_previous_object <= object->address());
  955. // The first word should be a map, and we expect all map pointers to
  956. // be in map space.
  957. Map* map = object->map();
  958. CHECK(map->IsMap());
  959. CHECK(heap()->map_space()->Contains(map));
  960. // Perform space-specific object verification.
  961. VerifyObject(object);
  962. // The object itself should look OK.
  963. object->Verify();
  964. // All the interior pointers should be contained in the heap.
  965. int size = object->Size();
  966. object->IterateBody(map->instance_type(), size, visitor);
  967. if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
  968. black_size += size;
  969. }
  970. CHECK(object->address() + size <= top);
  971. end_of_previous_object = object->address() + size;
  972. }
  973. CHECK_LE(black_size, page->LiveBytes());
  974. }
  975. CHECK(allocation_pointer_found_in_space);
  976. }
  977. #endif // VERIFY_HEAP
  978. // -----------------------------------------------------------------------------
  979. // NewSpace implementation
  980. bool NewSpace::SetUp(int reserved_semispace_capacity,
  981. int maximum_semispace_capacity) {
  982. // Set up new space based on the preallocated memory block defined by
  983. // start and size. The provided space is divided into two semi-spaces.
  984. // To support fast containment testing in the new space, the size of
  985. // this chunk must be a power of two and it must be aligned to its size.
  986. int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
  987. size_t size = 2 * reserved_semispace_capacity;
  988. Address base =
  989. heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
  990. size, size, &reservation_);
  991. if (base == NULL) return false;
  992. chunk_base_ = base;
  993. chunk_size_ = static_cast<uintptr_t>(size);
  994. LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
  995. ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
  996. ASSERT(IsPowerOf2(maximum_semispace_capacity));
  997. // Allocate and set up the histogram arrays if necessary.
  998. allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
  999. promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
  1000. #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
  1001. promoted_histogram_[name].set_name(#name);
  1002. INSTANCE_TYPE_LIST(SET_NAME)
  1003. #undef SET_NAME
  1004. ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
  1005. ASSERT(static_cast<intptr_t>(chunk_size_) >=
  1006. 2 * heap()->ReservedSemiSpaceSize());
  1007. ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
  1008. to_space_.SetUp(chunk_base_,
  1009. initial_semispace_capacity,
  1010. maximum_semispace_capacity);
  1011. from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
  1012. initial_semispace_capacity,
  1013. maximum_semispace_capacity);
  1014. if (!to_space_.Commit()) {
  1015. return false;
  1016. }
  1017. ASSERT(!from_space_.is_committed()); // No need to use memory yet.
  1018. start_ = chunk_base_;
  1019. address_mask_ = ~(2 * reserved_semispace_capacity - 1);
  1020. object_mask_ = address_mask_ | kHeapObjectTagMask;
  1021. object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
  1022. ResetAllocationInfo();
  1023. return true;
  1024. }
  1025. void NewSpace::TearDown() {
  1026. if (allocated_histogram_) {
  1027. DeleteArray(allocated_histogram_);
  1028. allocated_histogram_ = NULL;
  1029. }
  1030. if (promoted_histogram_) {
  1031. DeleteArray(promoted_histogram_);
  1032. promoted_histogram_ = NULL;
  1033. }
  1034. start_ = NULL;
  1035. allocation_info_.top = NULL;
  1036. allocation_info_.limit = NULL;
  1037. to_space_.TearDown();
  1038. from_space_.TearDown();
  1039. LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
  1040. ASSERT(reservation_.IsReserved());
  1041. heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
  1042. NOT_EXECUTABLE);
  1043. chunk_base_ = NULL;
  1044. chunk_size_ = 0;
  1045. }
  1046. void NewSpace::Flip() {
  1047. SemiSpace::Swap(&from_space_, &to_space_);
  1048. }
  1049. void NewSpace::Grow() {
  1050. // Double the semispace size but only up to maximum capacity.
  1051. ASSERT(Capacity() < MaximumCapacity());
  1052. int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity()));
  1053. if (to_space_.GrowTo(new_capacity)) {
  1054. // Only grow from space if we managed to grow to-space.
  1055. if (!from_space_.GrowTo(new_capacity)) {
  1056. // If we managed to grow to-space but couldn't grow from-space,
  1057. // attempt to shrink to-space.
  1058. if (!to_space_.ShrinkTo(from_space_.Capacity())) {
  1059. // We are in an inconsistent state because we could not
  1060. // commit/uncommit memory from new space.
  1061. V8::FatalProcessOutOfMemory("Failed to grow new space.");
  1062. }
  1063. }
  1064. }
  1065. ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
  1066. }
  1067. void NewSpace::Shrink() {
  1068. int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
  1069. int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
  1070. if (rounded_new_capacity < Capacity() &&
  1071. to_space_.ShrinkTo(rounded_new_capacity)) {
  1072. // Only shrink from-space if we managed to shrink to-space.
  1073. from_space_.Reset();
  1074. if (!from_space_.ShrinkTo(rounded_new_capacity)) {
  1075. // If we managed to shrink to-space but couldn't shrink from
  1076. // space, attempt to grow to-space again.
  1077. if (!to_space_.GrowTo(from_space_.Capacity())) {
  1078. // We are in an inconsistent state because we could not
  1079. // commit/uncommit memory from new space.
  1080. V8::FatalProcessOutOfMemory("Failed to shrink new space.");
  1081. }
  1082. }
  1083. }
  1084. allocation_info_.limit = to_space_.page_high();
  1085. ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
  1086. }
  1087. void NewSpace::UpdateAllocationInfo() {
  1088. MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
  1089. allocation_info_.top = to_space_.page_low();
  1090. allocation_info_.limit = to_space_.page_high();
  1091. // Lower limit during incremental marking.
  1092. if (heap()->incremental_marking()->IsMarking() &&
  1093. inline_allocation_limit_step() != 0) {
  1094. Address new_limit =
  1095. allocation_info_.top + inline_allocation_limit_step();
  1096. allocation_info_.limit = Min(new_limit, allocation_info_.limit);
  1097. }
  1098. ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
  1099. }
  1100. void NewSpace::ResetAllocationInfo() {
  1101. to_space_.Reset();
  1102. UpdateAllocationInfo();
  1103. pages_used_ = 0;
  1104. // Clear all mark-bits in the to-space.
  1105. NewSpacePageIterator it(&to_space_);
  1106. while (it.has_next()) {
  1107. Bitmap::Clear(it.next());
  1108. }
  1109. }
  1110. bool NewSpace::AddFreshPage() {
  1111. Address top = allocation_info_.top;
  1112. if (NewSpacePage::IsAtStart(top)) {
  1113. // The current page is already empty. Don't try to make another.
  1114. // We should only get here if someone asks to allocate more
  1115. // than what can be stored in a single page.
  1116. // TODO(gc): Change the limit on new-space allocation to prevent this
  1117. // from happening (all such allocations should go directly to LOSpace).
  1118. return false;
  1119. }
  1120. if (!to_space_.AdvancePage()) {
  1121. // Failed to get a new page in to-space.
  1122. return false;
  1123. }
  1124. // Clear remainder of current page.
  1125. Address limit = NewSpacePage::FromLimit(top)->area_end();
  1126. if (heap()->gc_state() == Heap::SCAVENGE) {
  1127. heap()->promotion_queue()->SetNewLimit(limit);
  1128. heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
  1129. }
  1130. int remaining_in_page = static_cast<int>(limit - top);
  1131. heap()->CreateFillerObjectAt(top, remaining_in_page);
  1132. pages_used_++;
  1133. UpdateAllocationInfo();
  1134. return true;
  1135. }
  1136. MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
  1137. Address old_top = allocation_info_.top;
  1138. Address new_top = old_top + size_in_bytes;
  1139. Address high = to_space_.page_high();
  1140. if (allocation_info_.limit < high) {
  1141. // Incremental marking has lowered the limit to get a
  1142. // chance to do a step.
  1143. allocation_info_.limit = Min(
  1144. allocation_info_.limit + inline_allocation_limit_step_,
  1145. high);
  1146. int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
  1147. heap()->incremental_marking()->Step(
  1148. bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
  1149. top_on_previous_step_ = new_top;
  1150. return AllocateRaw(size_in_bytes);
  1151. } else if (AddFreshPage()) {
  1152. // Switched to new page. Try allocating again.
  1153. int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
  1154. heap()->incremental_marking()->Step(
  1155. bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
  1156. top_on_previous_step_ = to_space_.page_low();
  1157. return AllocateRaw(size_in_bytes);
  1158. } else {
  1159. return Failure::RetryAfterGC();
  1160. }
  1161. }
  1162. #ifdef VERIFY_HEAP
  1163. // We do not use the SemiSpaceIterator because verification doesn't assume
  1164. // that it works (it depends on the invariants we are checking).
  1165. void NewSpace::Verify() {
  1166. // The allocation pointer should be in the space or at the very end.
  1167. ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
  1168. // There should be objects packed in from the low address up to the
  1169. // allocation pointer.
  1170. Address current = to_space_.first_page()->area_start();
  1171. CHECK_EQ(current, to_space_.space_start());
  1172. while (current != top()) {
  1173. if (!NewSpacePage::IsAtEnd(current)) {
  1174. // The allocation pointer should not be in the middle of an object.
  1175. CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
  1176. current < top());
  1177. HeapObject* object = HeapObject::FromAddress(current);
  1178. // The first word should be a map, and we expect all map pointers to
  1179. // be in map space.
  1180. Map* map = object->map();
  1181. CHECK(map->IsMap());
  1182. CHECK(heap()->map_space()->Contains(map));
  1183. // The object should not be code or a map.
  1184. CHECK(!object->IsMap());
  1185. CHECK(!object->IsCode());
  1186. // The object itself should look OK.
  1187. object->Verify();
  1188. // All the interior pointers should be contained in the heap.
  1189. VerifyPointersVisitor visitor;
  1190. int size = object->Size();
  1191. object->IterateBody(map->instance_type(), size, &visitor);
  1192. current += size;
  1193. } else {
  1194. // At end of page, switch to next page.
  1195. NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
  1196. // Next page should be valid.
  1197. CHECK(!page->is_anchor());
  1198. current = page->area_start();
  1199. }
  1200. }
  1201. // Check semi-spaces.
  1202. CHECK_EQ(from_space_.id(), kFromSpace);
  1203. CHECK_EQ(to_space_.id(), kToSpace);
  1204. from_space_.Verify();
  1205. to_space_.Verify();
  1206. }
  1207. #endif
  1208. // -----------------------------------------------------------------------------
  1209. // SemiSpace implementation
  1210. void SemiSpace::SetUp(Address start,
  1211. int initial_capacity,
  1212. int maximum_capacity) {
  1213. // Creates a space in the young generation. The constructor does not
  1214. // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
  1215. // memory of size 'capacity' when set up, and does not grow or shrink
  1216. // otherwise. In the mark-compact collector, the memory region of the from
  1217. // space is used as the marking stack. It requires contiguous memory
  1218. // addresses.
  1219. ASSERT(maximum_capacity >= Page::kPageSize);
  1220. initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
  1221. capacity_ = initial_capacity;
  1222. maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
  1223. committed_ = false;
  1224. start_ = start;
  1225. address_mask_ = ~(maximum_capacity - 1);
  1226. object_mask_ = address_mask_ | kHeapObjectTagMask;
  1227. object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
  1228. age_mark_ = start_;
  1229. }
  1230. void SemiSpace::TearDown() {
  1231. start_ = NULL;
  1232. capacity_ = 0;
  1233. }
  1234. bool SemiSpace::Commit() {
  1235. ASSERT(!is_committed());
  1236. int pages = capacity_ / Page::kPageSize;
  1237. Address end = start_ + maximum_capacity_;
  1238. Address start = end - pages * Page::kPageSize;
  1239. if (!heap()->isolate()->memory_allocator()->CommitBlock(start,
  1240. capacity_,
  1241. executable())) {
  1242. return false;
  1243. }
  1244. NewSpacePage* page = anchor();
  1245. for (int i = 1; i <= pages; i++) {
  1246. NewSpacePage* new_page =
  1247. NewSpacePage::Initialize(heap(), end - i * Page::kPageSize, this);
  1248. new_page->InsertAfter(page);
  1249. page = new_page;
  1250. }
  1251. committed_ = true;
  1252. Reset();
  1253. return true;
  1254. }
  1255. bool SemiSpace::Uncommit() {
  1256. ASSERT(is_committed());
  1257. Address start = start_ + maximum_capacity_ - capacity_;
  1258. if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) {
  1259. return false;
  1260. }
  1261. anchor()->set_next_page(anchor());
  1262. anchor()->set_prev_page(anchor());
  1263. committed_ = false;
  1264. return true;
  1265. }
  1266. size_t SemiSpace::CommittedPhysicalMemory() {
  1267. if (!is_committed()) return 0;
  1268. size_t size = 0;
  1269. NewSpacePageIterator it(this);
  1270. while (it.has_next()) {
  1271. size += it.next()->CommittedPhysicalMemory();
  1272. }
  1273. return size;
  1274. }
  1275. bool SemiSpace::GrowTo(int new_capacity) {
  1276. if (!is_committed()) {
  1277. if (!Commit()) return false;
  1278. }
  1279. ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
  1280. ASSERT(new_capacity <= maximum_capacity_);
  1281. ASSERT(new_capacity > capacity_);
  1282. int pages_before = capacity_ / Page::kPageSize;
  1283. int pages_after = new_capacity / Page::kPageSize;
  1284. Address end = start_ + maximum_capacity_;
  1285. Address start = end - new_capacity;
  1286. size_t delta = new_capacity - capacity_;
  1287. ASSERT(IsAligned(delta, OS::AllocateAlignment()));
  1288. if (!heap()->isolate()->memory_allocator()->CommitBlock(
  1289. start, delta, executable())) {
  1290. return false;
  1291. }
  1292. capacity_ = new_capacity;
  1293. NewSpacePage* last_page = anchor()->prev_page();
  1294. ASSERT(last_page != anchor());
  1295. for (int i = pages_before + 1; i <= pages_after; i++) {
  1296. Address page_address = end - i * Page::kPageSize;
  1297. NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
  1298. page_address,
  1299. this);
  1300. new_page->InsertAfter(last_page);
  1301. Bitmap::Clear(new_page);
  1302. // Duplicate the flags that was set on the old page.
  1303. new_page->SetFlags(last_page->GetFlags(),
  1304. NewSpacePage::kCopyOnFlipFlagsMask);
  1305. last_page = new_page;
  1306. }
  1307. return true;
  1308. }
  1309. bool SemiSpace::ShrinkTo(int new_capacity) {
  1310. ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
  1311. ASSERT(new_capacity >= initial_capacity_);
  1312. ASSERT(new_capacity < capacity_);
  1313. if (is_committed()) {
  1314. // Semispaces grow backwards from the end of their allocated capacity,
  1315. // so we find the before and after start addresses relative to the
  1316. // end of the space.
  1317. Address space_end = start_ + maximum_capacity_;
  1318. Address old_start = space_end - capacity_;
  1319. size_t delta = capacity_ - new_capacity;
  1320. ASSERT(IsAligned(delta, OS::AllocateAlignment()));
  1321. MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
  1322. if (!allocator->UncommitBlock(old_start, delta)) {
  1323. return false;
  1324. }
  1325. int pages_after = new_capacity / Page::kPageSize;
  1326. NewSpacePage* new_last_page =
  1327. NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize);
  1328. new_last_page->set_next_page(anchor());
  1329. anchor()->set_prev_page(new_last_page);
  1330. ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page));
  1331. }
  1332. capacity_ = new_capacity;
  1333. return true;
  1334. }
  1335. void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
  1336. anchor_.set_owner(this);
  1337. // Fixup back-pointers to anchor. Address of anchor changes
  1338. // when we swap.
  1339. anchor_.prev_page()->set_next_page(&anchor_);
  1340. anchor_.next_page()->set_prev_page(&anchor_);
  1341. bool becomes_to_space = (id_ == kFromSpace);
  1342. id_ = becomes_to_space ? kToSpace : kFromSpace;
  1343. NewSpacePage* page = anchor_.next_page();
  1344. while (page != &anchor_) {
  1345. page->set_owner(this);
  1346. page->SetFlags(flags, mask);
  1347. if (becomes_to_space) {
  1348. page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
  1349. page->SetFlag(MemoryChunk::IN_TO_SPACE);
  1350. page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
  1351. page->ResetLiveBytes();
  1352. } else {
  1353. page->SetFlag(MemoryChunk::IN_FROM_SPACE);
  1354. page->ClearFlag(MemoryChunk::IN_TO_SPACE);
  1355. }
  1356. ASSERT(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
  1357. ASSERT(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
  1358. page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
  1359. page = page->next_page();
  1360. }
  1361. }
  1362. void SemiSpace::Reset() {
  1363. ASSERT(anchor_.next_page() != &anchor_);
  1364. current_page_ = anchor_.next_page();
  1365. }
  1366. void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
  1367. // We won't be swapping semispaces without data in them.
  1368. ASSERT(from->anchor_.next_page() != &from->anchor_);
  1369. ASSERT(to->anchor_.next_page() != &to->anchor_);
  1370. // Swap bits.
  1371. SemiSpace tmp = *from;
  1372. *from = *to;
  1373. *to = tmp;
  1374. // Fixup back-pointers to the page list anchor now that its address
  1375. // has changed.
  1376. // Swap to/from-space bits on pages.
  1377. // Copy GC flags from old active space (from-space) to new (to-space).
  1378. intptr_t flags = from->current_page()->GetFlags();
  1379. to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
  1380. from->FlipPages(0, 0);
  1381. }
  1382. void SemiSpace::set_age_mark(Address mark) {
  1383. ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this);
  1384. age_mark_ = mark;
  1385. // Mark all pages up to the one containing mark.
  1386. NewSpacePageIterator it(space_start(), mark);
  1387. while (it.has_next()) {
  1388. it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
  1389. }
  1390. }
  1391. #ifdef DEBUG
  1392. void SemiSpace::Print() { }
  1393. #endif
  1394. #ifdef VERIFY_HEAP
  1395. void SemiSpace::Verify() {
  1396. bool is_from_space = (id_ == kFromSpace);
  1397. NewSpacePage* page = anchor_.next_page();
  1398. CHECK(anchor_.semi_space() == this);
  1399. while (page != &anchor_) {
  1400. CHECK(page->semi_space() == this);
  1401. CHECK(page->InNewSpace());
  1402. CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
  1403. : MemoryChunk::IN_TO_SPACE));
  1404. CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
  1405. : MemoryChunk::IN_FROM_SPACE));
  1406. CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
  1407. if (!is_from_space) {
  1408. // The pointers-from-here-are-interesting flag isn't updated dynamically
  1409. // on from-space pages, so it might be out of sync with the marking state.
  1410. if (page->heap()->incremental_marking()->IsMarking()) {
  1411. CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
  1412. } else {
  1413. CHECK(!page->IsFlagSet(
  1414. MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
  1415. }
  1416. // TODO(gc): Check that the live_bytes_count_ field matches the
  1417. // black marking on the page (if we make it match in new-space).
  1418. }
  1419. CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
  1420. CHECK(page->prev_page()->next_page() == page);
  1421. page = page->next_page();
  1422. }
  1423. }
  1424. #endif
  1425. #ifdef DEBUG
  1426. void SemiSpace::AssertValidRange(Address start, Address end) {
  1427. // Addresses belong to same semi-space
  1428. NewSpacePage* page = NewSpacePage::FromLimit(start);
  1429. NewSpacePage* end_page = NewSpacePage::FromLimit(end);
  1430. SemiSpace* space = page->semi_space();
  1431. CHECK_EQ(space, end_page->semi_space());
  1432. // Start address is before end address, either on same page,
  1433. // or end address is on a later page in the linked list of
  1434. // semi-space pages.
  1435. if (page == end_page) {
  1436. CHECK(start <= end);
  1437. } else {
  1438. while (page != end_page) {
  1439. page = page->next_page();
  1440. CHECK_NE(page, space->anchor());
  1441. }
  1442. }
  1443. }
  1444. #endif
  1445. // -----------------------------------------------------------------------------
  1446. // SemiSpaceIterator implementation.
  1447. SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
  1448. Initialize(space->bottom(), space->top(), NULL);
  1449. }
  1450. SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
  1451. HeapObjectCallback size_func) {
  1452. Initialize(space->bottom(), space->top(), size_func);
  1453. }
  1454. SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
  1455. Initialize(start, space->top(), NULL);
  1456. }
  1457. SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
  1458. Initialize(from, to, NULL);
  1459. }
  1460. void SemiSpaceIterator::Initialize(Address start,
  1461. Address end,
  1462. HeapObjectCallback size_func) {
  1463. SemiSpace::AssertValidRange(start, end);
  1464. current_ = start;
  1465. limit_ = end;
  1466. size_func_ = size_func;
  1467. }
  1468. #ifdef DEBUG
  1469. // heap_histograms is shared, always clear it before using it.
  1470. static void ClearHistograms() {
  1471. Isolate* isolate = Isolate::Current();
  1472. // We reset the name each time, though it hasn't changed.
  1473. #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
  1474. INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
  1475. #undef DEF_TYPE_NAME
  1476. #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
  1477. INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
  1478. #undef CLEAR_HISTOGRAM
  1479. isolate->js_spill_information()->Clear();
  1480. }
  1481. static void ClearCodeKindStatistics(int* code_kind_statistics) {
  1482. for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
  1483. code_kind_statistics[i] = 0;
  1484. }
  1485. }
  1486. static void ReportCodeKindStatistics(int* code_kind_statistics) {
  1487. PrintF("\n Code kind histograms: \n");
  1488. for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
  1489. if (code_kind_statistics[i] > 0) {
  1490. PrintF(" %-20s: %10d bytes\n",
  1491. Code::Kind2String(static_cast<Code::Kind>(i)),
  1492. code_kind_statistics[i]);
  1493. }
  1494. }
  1495. PrintF("\n");
  1496. }
  1497. static int CollectHistogramInfo(HeapObject* obj) {
  1498. Isolate* isolate = obj->GetIsolate();
  1499. InstanceType type = obj->map()->instance_type();
  1500. ASSERT(0 <= type && type <= LAST_TYPE);
  1501. ASSERT(isolate->heap_histograms()[type].name() != NULL);
  1502. isolate->heap_histograms()[type].increment_number(1);
  1503. isolate->heap_histograms()[type].increment_bytes(obj->Size());
  1504. if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
  1505. JSObject::cast(obj)->IncrementSpillStatistics(
  1506. isolate->js_spill_information());
  1507. }
  1508. return obj->Size();
  1509. }
  1510. static void ReportHistogram(bool print_spill) {
  1511. Isolate* isolate = Isolate::Current();
  1512. PrintF("\n Object Histogram:\n");
  1513. for (int i = 0; i <= LAST_TYPE; i++) {
  1514. if (isolate->heap_histograms()[i].number() > 0) {
  1515. PrintF(" %-34s%10d (%10d bytes)\n",
  1516. isolate->heap_histograms()[i].name(),
  1517. isolate->heap_histograms()[i].number(),
  1518. isolate->heap_histograms()[i].bytes());
  1519. }
  1520. }
  1521. PrintF("\n");
  1522. // Summarize string types.
  1523. int string_number = 0;
  1524. int string_bytes = 0;
  1525. #define INCREMENT(type, size, name, camel_name) \
  1526. string_number += isolate->heap_histograms()[type].number(); \
  1527. string_bytes += isolate->heap_histograms()[type].bytes();
  1528. STRING_TYPE_LIST(INCREMENT)
  1529. #undef INCREMENT
  1530. if (string_number > 0) {
  1531. PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
  1532. string_bytes);
  1533. }
  1534. if (FLAG_collect_heap_spill_statistics && print_spill) {
  1535. isolate->js_spill_information()->Print();
  1536. }
  1537. }
  1538. #endif // DEBUG
  1539. // Support for statistics gathering for --heap-stats and --log-gc.
  1540. void NewSpace::ClearHistograms() {
  1541. for (int i = 0; i <= LAST_TYPE; i++) {
  1542. allocated_histogram_[i].clear();
  1543. promoted_histogram_[i].clear();
  1544. }
  1545. }
  1546. // Because the copying collector does not touch garbage objects, we iterate
  1547. // the new space before a collection to get a histogram of allocated objects.
  1548. // This only happens when --log-gc flag is set.
  1549. void NewSpace::CollectStatistics() {
  1550. ClearHistograms();
  1551. SemiSpaceIterator it(this);
  1552. for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
  1553. RecordAllocation(obj);
  1554. }
  1555. static void DoReportStatistics(Isolate* isolate,
  1556. HistogramInfo* info, const char* description) {
  1557. LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
  1558. // Lump all the string types together.
  1559. int string_number = 0;
  1560. int string_bytes = 0;
  1561. #define INCREMENT(type, size, name, camel_name) \
  1562. string_number += info[type].number(); \
  1563. string_bytes += info[type].bytes();
  1564. STRING_TYPE_LIST(INCREMENT)
  1565. #undef INCREMENT
  1566. if (string_number > 0) {
  1567. LOG(isolate,
  1568. HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
  1569. }
  1570. // Then do the other types.
  1571. for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
  1572. if (info[i].number() > 0) {
  1573. LOG(isolate,
  1574. HeapSampleItemEvent(info[i].name(), info[i].number(),
  1575. info[i].bytes()));
  1576. }
  1577. }
  1578. LOG(isolate, HeapSampleEndEvent("NewSpace", description));
  1579. }
  1580. void NewSpace::ReportStatistics() {
  1581. #ifdef DEBUG
  1582. if (FLAG_heap_stats) {
  1583. float pct = static_cast<float>(Available()) / Capacity();
  1584. PrintF(" capacity: %" V8_PTR_PREFIX "d"
  1585. ", available: %" V8_PTR_PREFIX "d, %%%d\n",
  1586. Capacity(), Available(), static_cast<int>(pct*100));
  1587. PrintF("\n Object Histogram:\n");
  1588. for (int i = 0; i <= LAST_TYPE; i++) {
  1589. if (allocated_histogram_[i].number() > 0) {
  1590. PrintF(" %-34s%10d (%10d bytes)\n",
  1591. allocated_histogram_[i].name(),
  1592. allocated_histogram_[i].number(),
  1593. allocated_histogram_[i].bytes());
  1594. }
  1595. }
  1596. PrintF("\n");
  1597. }
  1598. #endif // DEBUG
  1599. if (FLAG_log_gc) {
  1600. Isolate* isolate = ISOLATE;
  1601. DoReportStatistics(isolate, allocated_histogram_, "allocated");
  1602. DoReportStatistics(isolate, promoted_histogram_, "promoted");
  1603. }
  1604. }
  1605. void NewSpace::RecordAllocation(HeapObject* obj) {
  1606. InstanceType type = obj->map()->instance_type();
  1607. ASSERT(0 <= type && type <= LAST_TYPE);
  1608. allocated_histogram_[type].increment_number(1);
  1609. allocated_histogram_[type].increment_bytes(obj->Size());
  1610. }
  1611. void NewSpace::RecordPromotion(HeapObject* obj) {
  1612. InstanceType type = obj->map()->instance_type();
  1613. ASSERT(0 <= type && type <= LAST_TYPE);
  1614. promoted_histogram_[type].increment_number(1);
  1615. promoted_histogram_[type].increment_bytes(obj->Size());
  1616. }
  1617. size_t NewSpace::CommittedPhysicalMemory() {
  1618. if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
  1619. MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
  1620. size_t size = to_space_.CommittedPhysicalMemory();
  1621. if (from_space_.is_committed()) {
  1622. size += from_space_.CommittedPhysicalMemory();
  1623. }
  1624. return size;
  1625. }
  1626. // -----------------------------------------------------------------------------
  1627. // Free lists for old object spaces implementation
  1628. void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
  1629. ASSERT(size_in_bytes > 0);
  1630. ASSERT(IsAligned(size_in_bytes, kPointerSize));
  1631. // We write a map and possibly size information to the block. If the block
  1632. // is big enough to be a FreeSpace with at least one extra word (the next
  1633. // pointer), we set its map to be the free space map and its size to an
  1634. // appropriate array length for the desired size from HeapObject::Size().
  1635. // If the block is too small (eg, one or two words), to hold both a size
  1636. // field and a next pointer, we give it a filler map that gives it the
  1637. // correct size.
  1638. if (size_in_bytes > FreeSpace::kHeaderSize) {
  1639. set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
  1640. // Can't use FreeSpace::cast because it fails during deserialization.
  1641. FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
  1642. this_as_free_space->set_size(size_in_bytes);
  1643. } else if (size_in_bytes == kPointerSize) {
  1644. set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
  1645. } else if (size_in_bytes == 2 * kPointerSize) {
  1646. set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
  1647. } else {
  1648. UNREACHABLE();
  1649. }
  1650. // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
  1651. // deserialization because the free space map is not done yet.
  1652. }
  1653. FreeListNode* FreeListNode::next() {
  1654. ASSERT(IsFreeListNode(this));
  1655. if (map() == GetHeap()->raw_unchecked_free_space_map()) {
  1656. ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
  1657. return reinterpret_cast<FreeListNode*>(
  1658. Memory::Address_at(address() + kNextOffset));
  1659. } else {
  1660. return reinterpret_cast<FreeListNode*>(
  1661. Memory::Address_at(address() + kPointerSize));
  1662. }
  1663. }
  1664. FreeListNode** FreeListNode::next_address() {
  1665. ASSERT(IsFreeListNode(this));
  1666. if (map() == GetHeap()->raw_unchecked_free_space_map()) {
  1667. ASSERT(Size() >= kNextOffset + kPointerSize);
  1668. return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
  1669. } else {
  1670. return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
  1671. }
  1672. }
  1673. void FreeListNode::set_next(FreeListNode* next) {
  1674. ASSERT(IsFreeListNode(this));
  1675. // While we are booting the VM the free space map will actually be null. So
  1676. // we have to make sure that we don't try to use it for anything at that
  1677. // stage.
  1678. if (map() == GetHeap()->raw_unchecked_free_space_map()) {
  1679. ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
  1680. Memory::Address_at(address() + kNextOffset) =
  1681. reinterpret_cast<Address>(next);
  1682. } else {
  1683. Memory::Address_at(address() + kPointerSize) =
  1684. reinterpret_cast<Address>(next);
  1685. }
  1686. }
  1687. intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
  1688. intptr_t free_bytes = 0;
  1689. if (category->top_ != NULL) {
  1690. ASSERT(category->end_ != NULL);
  1691. // This is safe (not going to deadlock) since Concatenate operations
  1692. // are never performed on the same free lists at the same time in
  1693. // reverse order.
  1694. ScopedLock lock_target(mutex_);
  1695. ScopedLock lock_source(category->mutex());
  1696. free_bytes = category->available();
  1697. if (end_ == NULL) {
  1698. end_ = category->end();
  1699. } else {
  1700. category->end()->set_next(top_);
  1701. }
  1702. top_ = category->top();
  1703. available_ += category->available();
  1704. category->Reset();
  1705. }
  1706. return free_bytes;
  1707. }
  1708. void FreeListCategory::Reset() {
  1709. top_ = NULL;
  1710. end_ = NULL;
  1711. available_ = 0;
  1712. }
  1713. intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
  1714. int sum = 0;
  1715. FreeListNode** n = &top_;
  1716. while (*n != NULL) {
  1717. if (Page::FromAddress((*n)->address()) == p) {
  1718. FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
  1719. sum += free_space->Size();
  1720. *n = (*n)->next();
  1721. } else {
  1722. n = (*n)->next_address();
  1723. }
  1724. }
  1725. if (top_ == NULL) {
  1726. end_ = NULL;
  1727. }
  1728. available_ -= sum;
  1729. return sum;
  1730. }
  1731. FreeListNode* FreeListCategory::PickNodeFromList(int *node_size) {
  1732. FreeListNode* node = top_;
  1733. if (node == NULL) return NULL;
  1734. while (node != NULL &&
  1735. Page::FromAddress(node->address())->IsEvacuationCandidate()) {
  1736. available_ -= reinterpret_cast<FreeSpace*>(node)->Size();
  1737. node = node->next();
  1738. }
  1739. if (node != NULL) {
  1740. set_top(node->next());
  1741. *node_size = reinterpret_cast<FreeSpace*>(node)->Size();
  1742. available_ -= *node_size;
  1743. } else {
  1744. set_top(NULL);
  1745. }
  1746. if (top() == NULL) {
  1747. set_end(NULL);
  1748. }
  1749. return node;
  1750. }
  1751. FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
  1752. int *node_size) {
  1753. FreeListNode* node = PickNodeFromList(node_size);
  1754. if (node != NULL && *node_size < size_in_bytes) {
  1755. Free(node, *node_size);
  1756. *node_size = 0;
  1757. return NULL;
  1758. }
  1759. return node;
  1760. }
  1761. void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
  1762. node->set_next(top_);
  1763. top_ = node;
  1764. if (end_ == NULL) {
  1765. end_ = node;
  1766. }
  1767. available_ += size_in_bytes;
  1768. }
  1769. void FreeListCategory::RepairFreeList(Heap* heap) {
  1770. FreeListNode* n = top_;
  1771. while (n != NULL) {
  1772. Map** map_location = reinterpret_cast<Map**>(n->address());
  1773. if (*map_location == NULL) {
  1774. *map_location = heap->free_space_map();
  1775. } else {
  1776. ASSERT(*map_location == heap->free_space_map());
  1777. }
  1778. n = n->next();
  1779. }
  1780. }
  1781. FreeList::FreeList(PagedSpace* owner)
  1782. : owner_(owner), heap_(owner->heap()) {
  1783. Reset();
  1784. }
  1785. intptr_t FreeList::Concatenate(FreeList* free_list) {
  1786. intptr_t free_bytes = 0;
  1787. free_bytes += small_list_.Concatenate(free_list->small_list());
  1788. free_bytes += medium_list_.Concatenate(free_list->medium_list());
  1789. free_bytes += large_list_.Concatenate(free_list->large_list());
  1790. free_bytes += huge_list_.Concatenate(free_list->huge_list());
  1791. return free_bytes;
  1792. }
  1793. void FreeList::Reset() {
  1794. small_list_.Reset();
  1795. medium_list_.Reset();
  1796. large_list_.Reset();
  1797. huge_list_.Reset();
  1798. }
  1799. int FreeList::Free(Address start, int size_in_bytes) {
  1800. if (size_in_bytes == 0) return 0;
  1801. FreeListNode* node = FreeListNode::FromAddress(start);
  1802. node->set_size(heap_, size_in_bytes);
  1803. Page* page = Page::FromAddress(start);
  1804. // Early return to drop too-small blocks on the floor.
  1805. if (size_in_bytes < kSmallListMin) {
  1806. page->add_non_available_small_blocks(size_in_bytes);
  1807. return size_in_bytes;
  1808. }
  1809. // Insert other blocks at the head of a free list of the appropriate
  1810. // magnitude.
  1811. if (size_in_bytes <= kSmallListMax) {
  1812. small_list_.Free(node, size_in_bytes);
  1813. page->add_available_in_small_free_list(size_in_bytes);
  1814. } else if (size_in_bytes <= kMediumListMax) {
  1815. medium_list_.Free(node, size_in_bytes);
  1816. page->add_available_in_medium_free_list(size_in_bytes);
  1817. } else if (size_in_bytes <= kLargeListMax) {
  1818. large_list_.Free(node, size_in_bytes);
  1819. page->add_available_in_large_free_list(size_in_bytes);
  1820. } else {
  1821. huge_list_.Free(node, size_in_bytes);
  1822. page->add_available_in_huge_free_list(size_in_bytes);
  1823. }
  1824. ASSERT(IsVeryLong() || available() == SumFreeLists());
  1825. return 0;
  1826. }
  1827. FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
  1828. FreeListNode* node = NULL;
  1829. Page* page = NULL;
  1830. if (size_in_bytes <= kSmallAllocationMax) {
  1831. node = small_list_.PickNodeFromList(node_size);
  1832. if (node != NULL) {
  1833. ASSERT(size_in_bytes <= *node_size);
  1834. page = Page::FromAddress(node->address());
  1835. page->add_available_in_small_free_list(-(*node_size));
  1836. ASSERT(IsVeryLong() || available() == SumFreeLists());
  1837. return node;
  1838. }
  1839. }
  1840. if (size_in_bytes <= kMediumAllocationMax) {
  1841. node = medium_list_.PickNodeFromList(node_size);
  1842. if (node != NULL) {
  1843. ASSERT(size_in_bytes <= *node_size);
  1844. page = Page::FromAddress(node->address());
  1845. page->add_available_in_medium_free_list(-(*node_size));
  1846. ASSERT(IsVeryLong() || available() == SumFreeLists());
  1847. return node;
  1848. }
  1849. }
  1850. if (size_in_bytes <= kLargeAllocationMax) {
  1851. node = large_list_.PickNodeFromList(node_size);
  1852. if (node != NULL) {
  1853. ASSERT(size_in_bytes <= *node_size);
  1854. page = Page::FromAddress(node->address());
  1855. page->add_available_in_large_free_list(-(*node_size));
  1856. ASSERT(IsVeryLong() || available() == SumFreeLists());
  1857. return node;
  1858. }
  1859. }
  1860. int huge_list_available = huge_list_.available();
  1861. for (FreeListNode** cur = huge_list_.GetTopAddress();
  1862. *cur != NULL;
  1863. cur = (*cur)->next_address()) {
  1864. FreeListNode* cur_node = *cur;
  1865. while (cur_node != NULL &&
  1866. Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
  1867. int size = reinterpret_cast<FreeSpace*>(cur_node)->Size();
  1868. huge_list_available -= size;
  1869. page = Page::FromAddress(cur_node->address());
  1870. page->add_available_in_huge_free_list(-size);
  1871. cur_node = cur_node->next();
  1872. }
  1873. *cur = cur_node;
  1874. if (cur_node == NULL) {
  1875. huge_list_.set_end(NULL);
  1876. break;
  1877. }
  1878. ASSERT((*cur)->map() == heap_->raw_unchecked_free_space_map());
  1879. FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
  1880. int size = cur_as_free_space->Size();
  1881. if (size >= size_in_bytes) {
  1882. // Large enough node found. Unlink it from the list.
  1883. node = *cur;
  1884. *cur = node->next();
  1885. *node_size = size;
  1886. huge_list_available -= size;
  1887. page = Page::FromAddress(node->address());
  1888. page->add_available_in_huge_free_list(-size);
  1889. break;
  1890. }
  1891. }
  1892. if (huge_list_.top() == NULL) {
  1893. huge_list_.set_end(NULL);
  1894. }
  1895. huge_list_.set_available(huge_list_available);
  1896. if (node != NULL) {
  1897. ASSERT(IsVeryLong() || available() == SumFreeLists());
  1898. return node;
  1899. }
  1900. if (size_in_bytes <= kSmallListMax) {
  1901. node = small_list_.PickNodeFromList(size_in_bytes, node_size);
  1902. if (node != NULL) {
  1903. ASSERT(size_in_bytes <= *node_size);
  1904. page = Page::FromAddress(node->address());
  1905. page->add_available_in_small_free_list(-(*node_size));
  1906. }
  1907. } else if (size_in_bytes <= kMediumListMax) {
  1908. node = medium_list_.PickNodeFromList(size_in_bytes, node_size);
  1909. if (node != NULL) {
  1910. ASSERT(size_in_bytes <= *node_size);
  1911. page = Page::FromAddress(node->address());
  1912. page->add_available_in_medium_free_list(-(*node_size));
  1913. }
  1914. } else if (size_in_bytes <= kLargeListMax) {
  1915. node = large_list_.PickNodeFromList(size_in_bytes, node_size);
  1916. if (node != NULL) {
  1917. ASSERT(size_in_bytes <= *node_size);
  1918. page = Page::FromAddress(node->address());
  1919. page->add_available_in_large_free_list(-(*node_size));
  1920. }
  1921. }
  1922. ASSERT(IsVeryLong() || available() == SumFreeLists());
  1923. return node;
  1924. }
  1925. // Allocation on the old space free list. If it succeeds then a new linear
  1926. // allocation space has been set up with the top and limit of the space. If
  1927. // the allocation fails then NULL is returned, and the caller can perform a GC
  1928. // or allocate a new page before retrying.
  1929. HeapObject* FreeList::Allocate(int size_in_bytes) {
  1930. ASSERT(0 < size_in_bytes);
  1931. ASSERT(size_in_bytes <= kMaxBlockSize);
  1932. ASSERT(IsAligned(size_in_bytes, kPointerSize));
  1933. // Don't free list allocate if there is linear space available.
  1934. ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
  1935. int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
  1936. // Mark the old linear allocation area with a free space map so it can be
  1937. // skipped when scanning the heap. This also puts it back in the free list
  1938. // if it is big enough.
  1939. owner_->Free(owner_->top(), old_linear_size);
  1940. owner_->heap()->incremental_marking()->OldSpaceStep(
  1941. size_in_bytes - old_linear_size);
  1942. int new_node_size = 0;
  1943. FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
  1944. if (new_node == NULL) {
  1945. owner_->SetTop(NULL, NULL);
  1946. return NULL;
  1947. }
  1948. int bytes_left = new_node_size - size_in_bytes;
  1949. ASSERT(bytes_left >= 0);
  1950. #ifdef DEBUG
  1951. for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
  1952. reinterpret_cast<Object**>(new_node->address())[i] =
  1953. Smi::FromInt(kCodeZapValue);
  1954. }
  1955. #endif
  1956. // The old-space-step might have finished sweeping and restarted marking.
  1957. // Verify that it did not turn the page of the new node into an evacuation
  1958. // candidate.
  1959. ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
  1960. const int kThreshold = IncrementalMarking::kAllocatedThreshold;
  1961. // Memory in the linear allocation area is counted as allocated. We may free
  1962. // a little of this again immediately - see below.
  1963. owner_->Allocate(new_node_size);
  1964. if (bytes_left > kThreshold &&
  1965. owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
  1966. FLAG_incremental_marking_steps) {
  1967. int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
  1968. // We don't want to give too large linear areas to the allocator while
  1969. // incremental marking is going on, because we won't check again whether
  1970. // we want to do another increment until the linear area is used up.
  1971. owner_->Free(new_node->address() + size_in_bytes + linear_size,
  1972. new_node_size - size_in_bytes - linear_size);
  1973. owner_->SetTop(new_node->address() + size_in_bytes,
  1974. new_node->address() + size_in_bytes + linear_size);
  1975. } else if (bytes_left > 0) {
  1976. // Normally we give the rest of the node to the allocator as its new
  1977. // linear allocation area.
  1978. owner_->SetTop(new_node->address() + size_in_bytes,
  1979. new_node->address() + new_node_size);
  1980. } else {
  1981. // TODO(gc) Try not freeing linear allocation region when bytes_left
  1982. // are zero.
  1983. owner_->SetTop(NULL, NULL);
  1984. }
  1985. return new_node;
  1986. }
  1987. intptr_t FreeList::EvictFreeListItems(Page* p) {
  1988. intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
  1989. p->set_available_in_huge_free_list(0);
  1990. if (sum < p->area_size()) {
  1991. sum += small_list_.EvictFreeListItemsInList(p) +
  1992. medium_list_.EvictFreeListItemsInList(p) +
  1993. large_list_.EvictFreeListItemsInList(p);
  1994. p->set_available_in_small_free_list(0);
  1995. p->set_available_in_medium_free_list(0);
  1996. p->set_available_in_large_free_list(0);
  1997. }
  1998. return sum;
  1999. }
  2000. void FreeList::RepairLists(Heap* heap) {
  2001. small_list_.RepairFreeList(heap);
  2002. medium_list_.RepairFreeList(heap);
  2003. large_list_.RepairFreeList(heap);
  2004. huge_list_.RepairFreeList(heap);
  2005. }
  2006. #ifdef DEBUG
  2007. intptr_t FreeListCategory::SumFreeList() {
  2008. intptr_t sum = 0;
  2009. FreeListNode* cur = top_;
  2010. while (cur != NULL) {
  2011. ASSERT(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
  2012. FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
  2013. sum += cur_as_free_space->Size();
  2014. cur = cur->next();
  2015. }
  2016. return sum;
  2017. }
  2018. static const int kVeryLongFreeList = 500;
  2019. int FreeListCategory::FreeListLength() {
  2020. int length = 0;
  2021. FreeListNode* cur = top_;
  2022. while (cur != NULL) {
  2023. length++;
  2024. cur = cur->next();
  2025. if (length == kVeryLongFreeList) return length;
  2026. }
  2027. return length;
  2028. }
  2029. bool FreeList::IsVeryLong() {
  2030. if (small_list_.FreeListLength() == kVeryLongFreeList) return true;
  2031. if (medium_list_.FreeListLength() == kVeryLongFreeList) return true;
  2032. if (large_list_.FreeListLength() == kVeryLongFreeList) return true;
  2033. if (huge_list_.FreeListLength() == kVeryLongFreeList) return true;
  2034. return false;
  2035. }
  2036. // This can take a very long time because it is linear in the number of entries
  2037. // on the free list, so it should not be called if FreeListLength returns
  2038. // kVeryLongFreeList.
  2039. intptr_t FreeList::SumFreeLists() {
  2040. intptr_t sum = small_list_.SumFreeList();
  2041. sum += medium_list_.SumFreeList();
  2042. sum += large_list_.SumFreeList();
  2043. sum += huge_list_.SumFreeList();
  2044. return sum;
  2045. }
  2046. #endif
  2047. // -----------------------------------------------------------------------------
  2048. // OldSpace implementation
  2049. bool NewSpace::ReserveSpace(int bytes) {
  2050. // We can't reliably unpack a partial snapshot that needs more new space
  2051. // space than the minimum NewSpace size. The limit can be set lower than
  2052. // the end of new space either because there is more space on the next page
  2053. // or because we have lowered the limit in order to get periodic incremental
  2054. // marking. The most reliable way to ensure that there is linear space is
  2055. // to do the allocation, then rewind the limit.
  2056. ASSERT(bytes <= InitialCapacity());
  2057. MaybeObject* maybe = AllocateRaw(bytes);
  2058. Object* object = NULL;
  2059. if (!maybe->ToObject(&object)) return false;
  2060. HeapObject* allocation = HeapObject::cast(object);
  2061. Address top = allocation_info_.top;
  2062. if ((top - bytes) == allocation->address()) {
  2063. allocation_info_.top = allocation->address();
  2064. return true;
  2065. }
  2066. // There may be a borderline case here where the allocation succeeded, but
  2067. // the limit and top have moved on to a new page. In that case we try again.
  2068. return ReserveSpace(bytes);
  2069. }
  2070. void PagedSpace::PrepareForMarkCompact() {
  2071. // We don't have a linear allocation area while sweeping. It will be restored
  2072. // on the first allocation after the sweep.
  2073. // Mark the old linear allocation area with a free space map so it can be
  2074. // skipped when scanning the heap.
  2075. int old_linear_size = static_cast<int>(limit() - top());
  2076. Free(top(), old_linear_size);
  2077. SetTop(NULL, NULL);
  2078. // Stop lazy sweeping and clear marking bits for unswept pages.
  2079. if (first_unswept_page_ != NULL) {
  2080. Page* p = first_unswept_page_;
  2081. do {
  2082. // Do not use ShouldBeSweptLazily predicate here.
  2083. // New evacuation candidates were selected but they still have
  2084. // to be swept before collection starts.
  2085. if (!p->WasSwept()) {
  2086. Bitmap::Clear(p);
  2087. if (FLAG_gc_verbose) {
  2088. PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n",
  2089. reinterpret_cast<intptr_t>(p));
  2090. }
  2091. }
  2092. p = p->next_page();
  2093. } while (p != anchor());
  2094. }
  2095. first_unswept_page_ = Page::FromAddress(NULL);
  2096. unswept_free_bytes_ = 0;
  2097. // Clear the free list before a full GC---it will be rebuilt afterward.
  2098. free_list_.Reset();
  2099. }
  2100. bool PagedSpace::ReserveSpace(int size_in_bytes) {
  2101. ASSERT(size_in_bytes <= AreaSize());
  2102. ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
  2103. Address current_top = allocation_info_.top;
  2104. Address new_top = current_top + size_in_bytes;
  2105. if (new_top <= allocation_info_.limit) return true;
  2106. HeapObject* new_area = free_list_.Allocate(size_in_bytes);
  2107. if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
  2108. if (new_area == NULL) return false;
  2109. int old_linear_size = static_cast<int>(limit() - top());
  2110. // Mark the old linear allocation area with a free space so it can be
  2111. // skipped when scanning the heap. This also puts it back in the free list
  2112. // if it is big enough.
  2113. Free(top(), old_linear_size);
  2114. SetTop(new_area->address(), new_area->address() + size_in_bytes);
  2115. return true;
  2116. }
  2117. intptr_t PagedSpace::SizeOfObjects() {
  2118. ASSERT(!heap()->IsSweepingComplete() || (unswept_free_bytes_ == 0));
  2119. return Size() - unswept_free_bytes_ - (limit() - top());
  2120. }
  2121. // After we have booted, we have created a map which represents free space
  2122. // on the heap. If there was already a free list then the elements on it
  2123. // were created with the wrong FreeSpaceMap (normally NULL), so we need to
  2124. // fix them.
  2125. void PagedSpace::RepairFreeListsAfterBoot() {
  2126. free_list_.RepairLists(heap());
  2127. }
  2128. // You have to call this last, since the implementation from PagedSpace
  2129. // doesn't know that memory was 'promised' to large object space.
  2130. bool LargeObjectSpace::ReserveSpace(int bytes) {
  2131. return heap()->OldGenerationCapacityAvailable() >= bytes &&
  2132. (!heap()->incremental_marking()->IsStopped() ||
  2133. heap()->OldGenerationSpaceAvailable() >= bytes);
  2134. }
  2135. bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
  2136. if (IsLazySweepingComplete()) return true;
  2137. intptr_t freed_bytes = 0;
  2138. Page* p = first_unswept_page_;
  2139. do {
  2140. Page* next_page = p->next_page();
  2141. if (ShouldBeSweptLazily(p)) {
  2142. if (FLAG_gc_verbose) {
  2143. PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
  2144. reinterpret_cast<intptr_t>(p));
  2145. }
  2146. DecreaseUnsweptFreeBytes(p);
  2147. freed_bytes +=
  2148. MarkCompactCollector::
  2149. SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
  2150. this, NULL, p);
  2151. }
  2152. p = next_page;
  2153. } while (p != anchor() && freed_bytes < bytes_to_sweep);
  2154. if (p == anchor()) {
  2155. first_unswept_page_ = Page::FromAddress(NULL);
  2156. } else {
  2157. first_unswept_page_ = p;
  2158. }
  2159. heap()->FreeQueuedChunks();
  2160. return IsLazySweepingComplete();
  2161. }
  2162. void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
  2163. if (allocation_info_.top >= allocation_info_.limit) return;
  2164. if (Page::FromAllocationTop(allocation_info_.top)->IsEvacuationCandidate()) {
  2165. // Create filler object to keep page iterable if it was iterable.
  2166. int remaining =
  2167. static_cast<int>(allocation_info_.limit - allocation_info_.top);
  2168. heap()->CreateFillerObjectAt(allocation_info_.top, remaining);
  2169. allocation_info_.top = NULL;
  2170. allocation_info_.limit = NULL;
  2171. }
  2172. }
  2173. bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) {
  2174. MarkCompactCollector* collector = heap()->mark_compact_collector();
  2175. if (collector->AreSweeperThreadsActivated()) {
  2176. if (collector->IsConcurrentSweepingInProgress()) {
  2177. if (collector->StealMemoryFromSweeperThreads(this) < size_in_bytes) {
  2178. if (!collector->sequential_sweeping()) {
  2179. collector->WaitUntilSweepingCompleted();
  2180. return true;
  2181. }
  2182. }
  2183. return false;
  2184. }
  2185. return true;
  2186. } else {
  2187. return AdvanceSweeper(size_in_bytes);
  2188. }
  2189. }
  2190. HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
  2191. // Allocation in this space has failed.
  2192. // If there are unswept pages advance lazy sweeper a bounded number of times
  2193. // until we find a size_in_bytes contiguous piece of memory
  2194. const int kMaxSweepingTries = 5;
  2195. bool sweeping_complete = false;
  2196. for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) {
  2197. sweeping_complete = EnsureSweeperProgress(size_in_bytes);
  2198. // Retry the free list allocation.
  2199. HeapObject* object = free_list_.Allocate(size_in_bytes);
  2200. if (object != NULL) return object;
  2201. }
  2202. // Free list allocation failed and there is no next page. Fail if we have
  2203. // hit the old generation size limit that should cause a garbage
  2204. // collection.
  2205. if (!heap()->always_allocate() &&
  2206. heap()->OldGenerationAllocationLimitReached()) {
  2207. return NULL;
  2208. }
  2209. // Try to expand the space and allocate in the new next page.
  2210. if (Expand()) {
  2211. return free_list_.Allocate(size_in_bytes);
  2212. }
  2213. // Last ditch, sweep all the remaining pages to try to find space. This may
  2214. // cause a pause.
  2215. if (!IsLazySweepingComplete()) {
  2216. EnsureSweeperProgress(kMaxInt);
  2217. // Retry the free list allocation.
  2218. HeapObject* object = free_list_.Allocate(size_in_bytes);
  2219. if (object != NULL) return object;
  2220. }
  2221. // Finally, fail.
  2222. return NULL;
  2223. }
  2224. #ifdef DEBUG
  2225. void PagedSpace::ReportCodeStatistics() {
  2226. Isolate* isolate = Isolate::Current();
  2227. CommentStatistic* comments_statistics =
  2228. isolate->paged_space_comments_statistics();
  2229. ReportCodeKindStatistics(isolate->code_kind_statistics());
  2230. PrintF("Code comment statistics (\" [ comment-txt : size/ "
  2231. "count (average)\"):\n");
  2232. for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
  2233. const CommentStatistic& cs = comments_statistics[i];
  2234. if (cs.size > 0) {
  2235. PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
  2236. cs.size/cs.count);
  2237. }
  2238. }
  2239. PrintF("\n");
  2240. }
  2241. void PagedSpace::ResetCodeStatistics() {
  2242. Isolate* isolate = Isolate::Current();
  2243. CommentStatistic* comments_statistics =
  2244. isolate->paged_space_comments_statistics();
  2245. ClearCodeKindStatistics(isolate->code_kind_statistics());
  2246. for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
  2247. comments_statistics[i].Clear();
  2248. }
  2249. comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
  2250. comments_statistics[CommentStatistic::kMaxComments].size = 0;
  2251. comments_statistics[CommentStatistic::kMaxComments].count = 0;
  2252. }
  2253. // Adds comment to 'comment_statistics' table. Performance OK as long as
  2254. // 'kMaxComments' is small
  2255. static void EnterComment(Isolate* isolate, const char* comment, int delta) {
  2256. CommentStatistic* comments_statistics =
  2257. isolate->paged_space_comments_statistics();
  2258. // Do not count empty comments
  2259. if (delta <= 0) return;
  2260. CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
  2261. // Search for a free or matching entry in 'comments_statistics': 'cs'
  2262. // points to result.
  2263. for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
  2264. if (comments_statistics[i].comment == NULL) {
  2265. cs = &comments_statistics[i];
  2266. cs->comment = comment;
  2267. break;
  2268. } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
  2269. cs = &comments_statistics[i];
  2270. break;
  2271. }
  2272. }
  2273. // Update entry for 'comment'
  2274. cs->size += delta;
  2275. cs->count += 1;
  2276. }
  2277. // Call for each nested comment start (start marked with '[ xxx', end marked
  2278. // with ']'. RelocIterator 'it' must point to a comment reloc info.
  2279. static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
  2280. ASSERT(!it->done());
  2281. ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
  2282. const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
  2283. if (tmp[0] != '[') {
  2284. // Not a nested comment; skip
  2285. return;
  2286. }
  2287. // Search for end of nested comment or a new nested comment
  2288. const char* const comment_txt =
  2289. reinterpret_cast<const char*>(it->rinfo()->data());
  2290. const byte* prev_pc = it->rinfo()->pc();
  2291. int flat_delta = 0;
  2292. it->next();
  2293. while (true) {
  2294. // All nested comments must be terminated properly, and therefore exit
  2295. // from loop.
  2296. ASSERT(!it->done());
  2297. if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
  2298. const char* const txt =
  2299. reinterpret_cast<const char*>(it->rinfo()->data());
  2300. flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
  2301. if (txt[0] == ']') break; // End of nested comment
  2302. // A new comment
  2303. CollectCommentStatistics(isolate, it);
  2304. // Skip code that was covered with previous comment
  2305. prev_pc = it->rinfo()->pc();
  2306. }
  2307. it->next();
  2308. }
  2309. EnterComment(isolate, comment_txt, flat_delta);
  2310. }
  2311. // Collects code size statistics:
  2312. // - by code kind
  2313. // - by code comment
  2314. void PagedSpace::CollectCodeStatistics() {
  2315. Isolate* isolate = heap()->isolate();
  2316. HeapObjectIterator obj_it(this);
  2317. for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
  2318. if (obj->IsCode()) {
  2319. Code* code = Code::cast(obj);
  2320. isolate->code_kind_statistics()[code->kind()] += code->Size();
  2321. RelocIterator it(code);
  2322. int delta = 0;
  2323. const byte* prev_pc = code->instruction_start();
  2324. while (!it.done()) {
  2325. if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
  2326. delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
  2327. CollectCommentStatistics(isolate, &it);
  2328. prev_pc = it.rinfo()->pc();
  2329. }
  2330. it.next();
  2331. }
  2332. ASSERT(code->instruction_start() <= prev_pc &&
  2333. prev_pc <= code->instruction_end());
  2334. delta += static_cast<int>(code->instruction_end() - prev_pc);
  2335. EnterComment(isolate, "NoComment", delta);
  2336. }
  2337. }
  2338. }
  2339. void PagedSpace::ReportStatistics() {
  2340. int pct = static_cast<int>(Available() * 100 / Capacity());
  2341. PrintF(" capacity: %" V8_PTR_PREFIX "d"
  2342. ", waste: %" V8_PTR_PREFIX "d"
  2343. ", available: %" V8_PTR_PREFIX "d, %%%d\n",
  2344. Capacity(), Waste(), Available(), pct);
  2345. if (was_swept_conservatively_) return;
  2346. ClearHistograms();
  2347. HeapObjectIterator obj_it(this);
  2348. for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
  2349. CollectHistogramInfo(obj);
  2350. ReportHistogram(true);
  2351. }
  2352. #endif
  2353. // -----------------------------------------------------------------------------
  2354. // FixedSpace implementation
  2355. void FixedSpace::PrepareForMarkCompact() {
  2356. // Call prepare of the super class.
  2357. PagedSpace::PrepareForMarkCompact();
  2358. // During a non-compacting collection, everything below the linear
  2359. // allocation pointer except wasted top-of-page blocks is considered
  2360. // allocated and we will rediscover available bytes during the
  2361. // collection.
  2362. accounting_stats_.AllocateBytes(free_list_.available());
  2363. // Clear the free list before a full GC---it will be rebuilt afterward.
  2364. free_list_.Reset();
  2365. }
  2366. // -----------------------------------------------------------------------------
  2367. // MapSpace implementation
  2368. // TODO(mvstanton): this is weird...the compiler can't make a vtable unless
  2369. // there is at least one non-inlined virtual function. I would prefer to hide
  2370. // the VerifyObject definition behind VERIFY_HEAP.
  2371. void MapSpace::VerifyObject(HeapObject* object) {
  2372. // The object should be a map or a free-list node.
  2373. CHECK(object->IsMap() || object->IsFreeSpace());
  2374. }
  2375. // -----------------------------------------------------------------------------
  2376. // CellSpace and PropertyCellSpace implementation
  2377. // TODO(mvstanton): this is weird...the compiler can't make a vtable unless
  2378. // there is at least one non-inlined virtual function. I would prefer to hide
  2379. // the VerifyObject definition behind VERIFY_HEAP.
  2380. void CellSpace::VerifyObject(HeapObject* object) {
  2381. // The object should be a global object property cell or a free-list node.
  2382. CHECK(object->IsCell() ||
  2383. object->map() == heap()->two_pointer_filler_map());
  2384. }
  2385. void PropertyCellSpace::VerifyObject(HeapObject* object) {
  2386. // The object should be a global object property cell or a free-list node.
  2387. CHECK(object->IsPropertyCell() ||
  2388. object->map() == heap()->two_pointer_filler_map());
  2389. }
  2390. // -----------------------------------------------------------------------------
  2391. // LargeObjectIterator
  2392. LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
  2393. current_ = space->first_page_;
  2394. size_func_ = NULL;
  2395. }
  2396. LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
  2397. HeapObjectCallback size_func) {
  2398. current_ = space->first_page_;
  2399. size_func_ = size_func;
  2400. }
  2401. HeapObject* LargeObjectIterator::Next() {
  2402. if (current_ == NULL) return NULL;
  2403. HeapObject* object = current_->GetObject();
  2404. current_ = current_->next_page();
  2405. return object;
  2406. }
  2407. // -----------------------------------------------------------------------------
  2408. // LargeObjectSpace
  2409. static bool ComparePointers(void* key1, void* key2) {
  2410. return key1 == key2;
  2411. }
  2412. LargeObjectSpace::LargeObjectSpace(Heap* heap,
  2413. intptr_t max_capacity,
  2414. AllocationSpace id)
  2415. : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
  2416. max_capacity_(max_capacity),
  2417. first_page_(NULL),
  2418. size_(0),
  2419. page_count_(0),
  2420. objects_size_(0),
  2421. chunk_map_(ComparePointers, 1024) {}
  2422. bool LargeObjectSpace::SetUp() {
  2423. first_page_ = NULL;
  2424. size_ = 0;
  2425. page_count_ = 0;
  2426. objects_size_ = 0;
  2427. chunk_map_.Clear();
  2428. return true;
  2429. }
  2430. void LargeObjectSpace::TearDown() {
  2431. while (first_page_ != NULL) {
  2432. LargePage* page = first_page_;
  2433. first_page_ = first_page_->next_page();
  2434. LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
  2435. ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
  2436. heap()->isolate()->memory_allocator()->PerformAllocationCallback(
  2437. space, kAllocationActionFree, page->size());
  2438. heap()->isolate()->memory_allocator()->Free(page);
  2439. }
  2440. SetUp();
  2441. }
  2442. MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
  2443. Executability executable) {
  2444. // Check if we want to force a GC before growing the old space further.
  2445. // If so, fail the allocation.
  2446. if (!heap()->always_allocate() &&
  2447. heap()->OldGenerationAllocationLimitReached()) {
  2448. return Failure::RetryAfterGC(identity());
  2449. }
  2450. if (Size() + object_size > max_capacity_) {
  2451. return Failure::RetryAfterGC(identity());
  2452. }
  2453. LargePage* page = heap()->isolate()->memory_allocator()->
  2454. AllocateLargePage(object_size, this, executable);
  2455. if (page == NULL) return Failure::RetryAfterGC(identity());
  2456. ASSERT(page->area_size() >= object_size);
  2457. size_ += static_cast<int>(page->size());
  2458. objects_size_ += object_size;
  2459. page_count_++;
  2460. page->set_next_page(first_page_);
  2461. first_page_ = page;
  2462. // Register all MemoryChunk::kAlignment-aligned chunks covered by
  2463. // this large page in the chunk map.
  2464. uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
  2465. uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
  2466. for (uintptr_t key = base; key <= limit; key++) {
  2467. HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
  2468. static_cast<uint32_t>(key),
  2469. true);
  2470. ASSERT(entry != NULL);
  2471. entry->value = page;
  2472. }
  2473. HeapObject* object = page->GetObject();
  2474. if (Heap::ShouldZapGarbage()) {
  2475. // Make the object consistent so the heap can be verified in OldSpaceStep.
  2476. // We only need to do this in debug builds or if verify_heap is on.
  2477. reinterpret_cast<Object**>(object->address())[0] =
  2478. heap()->fixed_array_map();
  2479. reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
  2480. }
  2481. heap()->incremental_marking()->OldSpaceStep(object_size);
  2482. return object;
  2483. }
  2484. size_t LargeObjectSpace::CommittedPhysicalMemory() {
  2485. if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
  2486. size_t size = 0;
  2487. LargePage* current = first_page_;
  2488. while (current != NULL) {
  2489. size += current->CommittedPhysicalMemory();
  2490. current = current->next_page();
  2491. }
  2492. return size;
  2493. }
  2494. // GC support
  2495. MaybeObject* LargeObjectSpace::FindObject(Address a) {
  2496. LargePage* page = FindPage(a);
  2497. if (page != NULL) {
  2498. return page->GetObject();
  2499. }
  2500. return Failure::Exception();
  2501. }
  2502. LargePage* LargeObjectSpace::FindPage(Address a) {
  2503. uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
  2504. HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
  2505. static_cast<uint32_t>(key),
  2506. false);
  2507. if (e != NULL) {
  2508. ASSERT(e->value != NULL);
  2509. LargePage* page = reinterpret_cast<LargePage*>(e->value);
  2510. ASSERT(page->is_valid());
  2511. if (page->Contains(a)) {
  2512. return page;
  2513. }
  2514. }
  2515. return NULL;
  2516. }
  2517. void LargeObjectSpace::FreeUnmarkedObjects() {
  2518. LargePage* previous = NULL;
  2519. LargePage* current = first_page_;
  2520. while (current != NULL) {
  2521. HeapObject* object = current->GetObject();
  2522. // Can this large page contain pointers to non-trivial objects. No other
  2523. // pointer object is this big.
  2524. bool is_pointer_object = object->IsFixedArray();
  2525. MarkBit mark_bit = Marking::MarkBitFrom(object);
  2526. if (mark_bit.Get()) {
  2527. mark_bit.Clear();
  2528. Page::FromAddress(object->address())->ResetProgressBar();
  2529. Page::FromAddress(object->address())->ResetLiveBytes();
  2530. previous = current;
  2531. current = current->next_page();
  2532. } else {
  2533. LargePage* page = current;
  2534. // Cut the chunk out from the chunk list.
  2535. current = current->next_page();
  2536. if (previous == NULL) {
  2537. first_page_ = current;
  2538. } else {
  2539. previous->set_next_page(current);
  2540. }
  2541. // Free the chunk.
  2542. heap()->mark_compact_collector()->ReportDeleteIfNeeded(
  2543. object, heap()->isolate());
  2544. size_ -= static_cast<int>(page->size());
  2545. objects_size_ -= object->Size();
  2546. page_count_--;
  2547. // Remove entries belonging to this page.
  2548. // Use variable alignment to help pass length check (<= 80 characters)
  2549. // of single line in tools/presubmit.py.
  2550. const intptr_t alignment = MemoryChunk::kAlignment;
  2551. uintptr_t base = reinterpret_cast<uintptr_t>(page)/alignment;
  2552. uintptr_t limit = base + (page->size()-1)/alignment;
  2553. for (uintptr_t key = base; key <= limit; key++) {
  2554. chunk_map_.Remove(reinterpret_cast<void*>(key),
  2555. static_cast<uint32_t>(key));
  2556. }
  2557. if (is_pointer_object) {
  2558. heap()->QueueMemoryChunkForFree(page);
  2559. } else {
  2560. heap()->isolate()->memory_allocator()->Free(page);
  2561. }
  2562. }
  2563. }
  2564. heap()->FreeQueuedChunks();
  2565. }
  2566. bool LargeObjectSpace::Contains(HeapObject* object) {
  2567. Address address = object->address();
  2568. MemoryChunk* chunk = MemoryChunk::FromAddress(address);
  2569. bool owned = (chunk->owner() == this);
  2570. SLOW_ASSERT(!owned || !FindObject(address)->IsFailure());
  2571. return owned;
  2572. }
  2573. #ifdef VERIFY_HEAP
  2574. // We do not assume that the large object iterator works, because it depends
  2575. // on the invariants we are checking during verification.
  2576. void LargeObjectSpace::Verify() {
  2577. for (LargePage* chunk = first_page_;
  2578. chunk != NULL;
  2579. chunk = chunk->next_page()) {
  2580. // Each chunk contains an object that starts at the large object page's
  2581. // object area start.
  2582. HeapObject* object = chunk->GetObject();
  2583. Page* page = Page::FromAddress(object->address());
  2584. CHECK(object->address() == page->area_start());
  2585. // The first word should be a map, and we expect all map pointers to be
  2586. // in map space.
  2587. Map* map = object->map();
  2588. CHECK(map->IsMap());
  2589. CHECK(heap()->map_space()->Contains(map));
  2590. // We have only code, sequential strings, external strings
  2591. // (sequential strings that have been morphed into external
  2592. // strings), fixed arrays, and byte arrays in large object space.
  2593. CHECK(object->IsCode() || object->IsSeqString() ||
  2594. object->IsExternalString() || object->IsFixedArray() ||
  2595. object->IsFixedDoubleArray() || object->IsByteArray());
  2596. // The object itself should look OK.
  2597. object->Verify();
  2598. // Byte arrays and strings don't have interior pointers.
  2599. if (object->IsCode()) {
  2600. VerifyPointersVisitor code_visitor;
  2601. object->IterateBody(map->instance_type(),
  2602. object->Size(),
  2603. &code_visitor);
  2604. } else if (object->IsFixedArray()) {
  2605. FixedArray* array = FixedArray::cast(object);
  2606. for (int j = 0; j < array->length(); j++) {
  2607. Object* element = array->get(j);
  2608. if (element->IsHeapObject()) {
  2609. HeapObject* element_object = HeapObject::cast(element);
  2610. CHECK(heap()->Contains(element_object));
  2611. CHECK(element_object->map()->IsMap());
  2612. }
  2613. }
  2614. }
  2615. }
  2616. }
  2617. #endif
  2618. #ifdef DEBUG
  2619. void LargeObjectSpace::Print() {
  2620. LargeObjectIterator it(this);
  2621. for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
  2622. obj->Print();
  2623. }
  2624. }
  2625. void LargeObjectSpace::ReportStatistics() {
  2626. PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
  2627. int num_objects = 0;
  2628. ClearHistograms();
  2629. LargeObjectIterator it(this);
  2630. for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
  2631. num_objects++;
  2632. CollectHistogramInfo(obj);
  2633. }
  2634. PrintF(" number of objects %d, "
  2635. "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
  2636. if (num_objects > 0) ReportHistogram(false);
  2637. }
  2638. void LargeObjectSpace::CollectCodeStatistics() {
  2639. Isolate* isolate = heap()->isolate();
  2640. LargeObjectIterator obj_it(this);
  2641. for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
  2642. if (obj->IsCode()) {
  2643. Code* code = Code::cast(obj);
  2644. isolate->code_kind_statistics()[code->kind()] += code->Size();
  2645. }
  2646. }
  2647. }
  2648. void Page::Print() {
  2649. // Make a best-effort to print the objects in the page.
  2650. PrintF("Page@%p in %s\n",
  2651. this->address(),
  2652. AllocationSpaceName(this->owner()->identity()));
  2653. printf(" --------------------------------------\n");
  2654. HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
  2655. unsigned mark_size = 0;
  2656. for (HeapObject* object = objects.Next();
  2657. object != NULL;
  2658. object = objects.Next()) {
  2659. bool is_marked = Marking::MarkBitFrom(object).Get();
  2660. PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
  2661. if (is_marked) {
  2662. mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
  2663. }
  2664. object->ShortPrint();
  2665. PrintF("\n");
  2666. }
  2667. printf(" --------------------------------------\n");
  2668. printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
  2669. }
  2670. #endif // DEBUG
  2671. } } // namespace v8::internal