/deps/v8/src/spaces.cc
C++ | 3218 lines | 2293 code | 547 blank | 378 comment | 521 complexity | e0e3c6fb0b11b10047e63c2c1769ac63 MD5 | raw file
Possible License(s): 0BSD, BSD-3-Clause, MPL-2.0-no-copyleft-exception, GPL-2.0, ISC, Apache-2.0, MIT, AGPL-3.0
Large files files are truncated, but you can click here to view the full file
- // Copyright 2011 the V8 project authors. All rights reserved.
- // Redistribution and use in source and binary forms, with or without
- // modification, are permitted provided that the following conditions are
- // met:
- //
- // * Redistributions of source code must retain the above copyright
- // notice, this list of conditions and the following disclaimer.
- // * Redistributions in binary form must reproduce the above
- // copyright notice, this list of conditions and the following
- // disclaimer in the documentation and/or other materials provided
- // with the distribution.
- // * Neither the name of Google Inc. nor the names of its
- // contributors may be used to endorse or promote products derived
- // from this software without specific prior written permission.
- //
- // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- #include "v8.h"
- #include "macro-assembler.h"
- #include "mark-compact.h"
- #include "platform.h"
- namespace v8 {
- namespace internal {
- // ----------------------------------------------------------------------------
- // HeapObjectIterator
- HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
- // You can't actually iterate over the anchor page. It is not a real page,
- // just an anchor for the double linked page list. Initialize as if we have
- // reached the end of the anchor page, then the first iteration will move on
- // to the first page.
- Initialize(space,
- NULL,
- NULL,
- kAllPagesInSpace,
- NULL);
- }
- HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
- HeapObjectCallback size_func) {
- // You can't actually iterate over the anchor page. It is not a real page,
- // just an anchor for the double linked page list. Initialize the current
- // address and end as NULL, then the first iteration will move on
- // to the first page.
- Initialize(space,
- NULL,
- NULL,
- kAllPagesInSpace,
- size_func);
- }
- HeapObjectIterator::HeapObjectIterator(Page* page,
- HeapObjectCallback size_func) {
- Space* owner = page->owner();
- ASSERT(owner == page->heap()->old_pointer_space() ||
- owner == page->heap()->old_data_space() ||
- owner == page->heap()->map_space() ||
- owner == page->heap()->cell_space() ||
- owner == page->heap()->property_cell_space() ||
- owner == page->heap()->code_space());
- Initialize(reinterpret_cast<PagedSpace*>(owner),
- page->area_start(),
- page->area_end(),
- kOnePageOnly,
- size_func);
- ASSERT(page->WasSweptPrecisely());
- }
- void HeapObjectIterator::Initialize(PagedSpace* space,
- Address cur, Address end,
- HeapObjectIterator::PageMode mode,
- HeapObjectCallback size_f) {
- // Check that we actually can iterate this space.
- ASSERT(!space->was_swept_conservatively());
- space_ = space;
- cur_addr_ = cur;
- cur_end_ = end;
- page_mode_ = mode;
- size_func_ = size_f;
- }
- // We have hit the end of the page and should advance to the next block of
- // objects. This happens at the end of the page.
- bool HeapObjectIterator::AdvanceToNextPage() {
- ASSERT(cur_addr_ == cur_end_);
- if (page_mode_ == kOnePageOnly) return false;
- Page* cur_page;
- if (cur_addr_ == NULL) {
- cur_page = space_->anchor();
- } else {
- cur_page = Page::FromAddress(cur_addr_ - 1);
- ASSERT(cur_addr_ == cur_page->area_end());
- }
- cur_page = cur_page->next_page();
- if (cur_page == space_->anchor()) return false;
- cur_addr_ = cur_page->area_start();
- cur_end_ = cur_page->area_end();
- ASSERT(cur_page->WasSweptPrecisely());
- return true;
- }
- // -----------------------------------------------------------------------------
- // CodeRange
- CodeRange::CodeRange(Isolate* isolate)
- : isolate_(isolate),
- code_range_(NULL),
- free_list_(0),
- allocation_list_(0),
- current_allocation_block_index_(0) {
- }
- bool CodeRange::SetUp(const size_t requested) {
- ASSERT(code_range_ == NULL);
- code_range_ = new VirtualMemory(requested);
- CHECK(code_range_ != NULL);
- if (!code_range_->IsReserved()) {
- delete code_range_;
- code_range_ = NULL;
- return false;
- }
- // We are sure that we have mapped a block of requested addresses.
- ASSERT(code_range_->size() == requested);
- LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
- Address base = reinterpret_cast<Address>(code_range_->address());
- Address aligned_base =
- RoundUp(reinterpret_cast<Address>(code_range_->address()),
- MemoryChunk::kAlignment);
- size_t size = code_range_->size() - (aligned_base - base);
- allocation_list_.Add(FreeBlock(aligned_base, size));
- current_allocation_block_index_ = 0;
- return true;
- }
- int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
- const FreeBlock* right) {
- // The entire point of CodeRange is that the difference between two
- // addresses in the range can be represented as a signed 32-bit int,
- // so the cast is semantically correct.
- return static_cast<int>(left->start - right->start);
- }
- void CodeRange::GetNextAllocationBlock(size_t requested) {
- for (current_allocation_block_index_++;
- current_allocation_block_index_ < allocation_list_.length();
- current_allocation_block_index_++) {
- if (requested <= allocation_list_[current_allocation_block_index_].size) {
- return; // Found a large enough allocation block.
- }
- }
- // Sort and merge the free blocks on the free list and the allocation list.
- free_list_.AddAll(allocation_list_);
- allocation_list_.Clear();
- free_list_.Sort(&CompareFreeBlockAddress);
- for (int i = 0; i < free_list_.length();) {
- FreeBlock merged = free_list_[i];
- i++;
- // Add adjacent free blocks to the current merged block.
- while (i < free_list_.length() &&
- free_list_[i].start == merged.start + merged.size) {
- merged.size += free_list_[i].size;
- i++;
- }
- if (merged.size > 0) {
- allocation_list_.Add(merged);
- }
- }
- free_list_.Clear();
- for (current_allocation_block_index_ = 0;
- current_allocation_block_index_ < allocation_list_.length();
- current_allocation_block_index_++) {
- if (requested <= allocation_list_[current_allocation_block_index_].size) {
- return; // Found a large enough allocation block.
- }
- }
- // Code range is full or too fragmented.
- V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
- }
- Address CodeRange::AllocateRawMemory(const size_t requested_size,
- const size_t commit_size,
- size_t* allocated) {
- ASSERT(commit_size <= requested_size);
- ASSERT(current_allocation_block_index_ < allocation_list_.length());
- if (requested_size > allocation_list_[current_allocation_block_index_].size) {
- // Find an allocation block large enough. This function call may
- // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
- GetNextAllocationBlock(requested_size);
- }
- // Commit the requested memory at the start of the current allocation block.
- size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
- FreeBlock current = allocation_list_[current_allocation_block_index_];
- if (aligned_requested >= (current.size - Page::kPageSize)) {
- // Don't leave a small free block, useless for a large object or chunk.
- *allocated = current.size;
- } else {
- *allocated = aligned_requested;
- }
- ASSERT(*allocated <= current.size);
- ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
- if (!MemoryAllocator::CommitExecutableMemory(code_range_,
- current.start,
- commit_size,
- *allocated)) {
- *allocated = 0;
- return NULL;
- }
- allocation_list_[current_allocation_block_index_].start += *allocated;
- allocation_list_[current_allocation_block_index_].size -= *allocated;
- if (*allocated == current.size) {
- GetNextAllocationBlock(0); // This block is used up, get the next one.
- }
- return current.start;
- }
- bool CodeRange::CommitRawMemory(Address start, size_t length) {
- return code_range_->Commit(start, length, true);
- }
- bool CodeRange::UncommitRawMemory(Address start, size_t length) {
- return code_range_->Uncommit(start, length);
- }
- void CodeRange::FreeRawMemory(Address address, size_t length) {
- ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
- free_list_.Add(FreeBlock(address, length));
- code_range_->Uncommit(address, length);
- }
- void CodeRange::TearDown() {
- delete code_range_; // Frees all memory in the virtual memory range.
- code_range_ = NULL;
- free_list_.Free();
- allocation_list_.Free();
- }
- // -----------------------------------------------------------------------------
- // MemoryAllocator
- //
- MemoryAllocator::MemoryAllocator(Isolate* isolate)
- : isolate_(isolate),
- capacity_(0),
- capacity_executable_(0),
- size_(0),
- size_executable_(0) {
- }
- bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
- capacity_ = RoundUp(capacity, Page::kPageSize);
- capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
- ASSERT_GE(capacity_, capacity_executable_);
- size_ = 0;
- size_executable_ = 0;
- return true;
- }
- void MemoryAllocator::TearDown() {
- // Check that spaces were torn down before MemoryAllocator.
- ASSERT(size_ == 0);
- // TODO(gc) this will be true again when we fix FreeMemory.
- // ASSERT(size_executable_ == 0);
- capacity_ = 0;
- capacity_executable_ = 0;
- }
- void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
- Executability executable) {
- // TODO(gc) make code_range part of memory allocator?
- ASSERT(reservation->IsReserved());
- size_t size = reservation->size();
- ASSERT(size_ >= size);
- size_ -= size;
- isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
- if (executable == EXECUTABLE) {
- ASSERT(size_executable_ >= size);
- size_executable_ -= size;
- }
- // Code which is part of the code-range does not have its own VirtualMemory.
- ASSERT(!isolate_->code_range()->contains(
- static_cast<Address>(reservation->address())));
- ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
- reservation->Release();
- }
- void MemoryAllocator::FreeMemory(Address base,
- size_t size,
- Executability executable) {
- // TODO(gc) make code_range part of memory allocator?
- ASSERT(size_ >= size);
- size_ -= size;
- isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
- if (executable == EXECUTABLE) {
- ASSERT(size_executable_ >= size);
- size_executable_ -= size;
- }
- if (isolate_->code_range()->contains(static_cast<Address>(base))) {
- ASSERT(executable == EXECUTABLE);
- isolate_->code_range()->FreeRawMemory(base, size);
- } else {
- ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
- bool result = VirtualMemory::ReleaseRegion(base, size);
- USE(result);
- ASSERT(result);
- }
- }
- Address MemoryAllocator::ReserveAlignedMemory(size_t size,
- size_t alignment,
- VirtualMemory* controller) {
- VirtualMemory reservation(size, alignment);
- if (!reservation.IsReserved()) return NULL;
- size_ += reservation.size();
- Address base = RoundUp(static_cast<Address>(reservation.address()),
- alignment);
- controller->TakeControl(&reservation);
- return base;
- }
- Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size,
- size_t commit_size,
- size_t alignment,
- Executability executable,
- VirtualMemory* controller) {
- ASSERT(commit_size <= reserve_size);
- VirtualMemory reservation;
- Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
- if (base == NULL) return NULL;
- if (executable == EXECUTABLE) {
- if (!CommitExecutableMemory(&reservation,
- base,
- commit_size,
- reserve_size)) {
- base = NULL;
- }
- } else {
- if (!reservation.Commit(base, commit_size, false)) {
- base = NULL;
- }
- }
- if (base == NULL) {
- // Failed to commit the body. Release the mapping and any partially
- // commited regions inside it.
- reservation.Release();
- return NULL;
- }
- controller->TakeControl(&reservation);
- return base;
- }
- void Page::InitializeAsAnchor(PagedSpace* owner) {
- set_owner(owner);
- set_prev_page(this);
- set_next_page(this);
- }
- NewSpacePage* NewSpacePage::Initialize(Heap* heap,
- Address start,
- SemiSpace* semi_space) {
- Address area_start = start + NewSpacePage::kObjectStartOffset;
- Address area_end = start + Page::kPageSize;
- MemoryChunk* chunk = MemoryChunk::Initialize(heap,
- start,
- Page::kPageSize,
- area_start,
- area_end,
- NOT_EXECUTABLE,
- semi_space);
- chunk->set_next_chunk(NULL);
- chunk->set_prev_chunk(NULL);
- chunk->initialize_scan_on_scavenge(true);
- bool in_to_space = (semi_space->id() != kFromSpace);
- chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
- : MemoryChunk::IN_FROM_SPACE);
- ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
- : MemoryChunk::IN_TO_SPACE));
- NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
- heap->incremental_marking()->SetNewSpacePageFlags(page);
- return page;
- }
- void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
- set_owner(semi_space);
- set_next_chunk(this);
- set_prev_chunk(this);
- // Flags marks this invalid page as not being in new-space.
- // All real new-space pages will be in new-space.
- SetFlags(0, ~0);
- }
- MemoryChunk* MemoryChunk::Initialize(Heap* heap,
- Address base,
- size_t size,
- Address area_start,
- Address area_end,
- Executability executable,
- Space* owner) {
- MemoryChunk* chunk = FromAddress(base);
- ASSERT(base == chunk->address());
- chunk->heap_ = heap;
- chunk->size_ = size;
- chunk->area_start_ = area_start;
- chunk->area_end_ = area_end;
- chunk->flags_ = 0;
- chunk->set_owner(owner);
- chunk->InitializeReservedMemory();
- chunk->slots_buffer_ = NULL;
- chunk->skip_list_ = NULL;
- chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
- chunk->progress_bar_ = 0;
- chunk->high_water_mark_ = static_cast<int>(area_start - base);
- chunk->parallel_sweeping_ = 0;
- chunk->available_in_small_free_list_ = 0;
- chunk->available_in_medium_free_list_ = 0;
- chunk->available_in_large_free_list_ = 0;
- chunk->available_in_huge_free_list_ = 0;
- chunk->non_available_small_blocks_ = 0;
- chunk->ResetLiveBytes();
- Bitmap::Clear(chunk);
- chunk->initialize_scan_on_scavenge(false);
- chunk->SetFlag(WAS_SWEPT_PRECISELY);
- ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
- ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
- if (executable == EXECUTABLE) {
- chunk->SetFlag(IS_EXECUTABLE);
- }
- if (owner == heap->old_data_space()) {
- chunk->SetFlag(CONTAINS_ONLY_DATA);
- }
- return chunk;
- }
- // Commit MemoryChunk area to the requested size.
- bool MemoryChunk::CommitArea(size_t requested) {
- size_t guard_size = IsFlagSet(IS_EXECUTABLE) ?
- MemoryAllocator::CodePageGuardSize() : 0;
- size_t header_size = area_start() - address() - guard_size;
- size_t commit_size = RoundUp(header_size + requested, OS::CommitPageSize());
- size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
- OS::CommitPageSize());
- if (commit_size > committed_size) {
- // Commit size should be less or equal than the reserved size.
- ASSERT(commit_size <= size() - 2 * guard_size);
- // Append the committed area.
- Address start = address() + committed_size + guard_size;
- size_t length = commit_size - committed_size;
- if (reservation_.IsReserved()) {
- if (!reservation_.Commit(start, length, IsFlagSet(IS_EXECUTABLE))) {
- return false;
- }
- } else {
- CodeRange* code_range = heap_->isolate()->code_range();
- ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
- if (!code_range->CommitRawMemory(start, length)) return false;
- }
- if (Heap::ShouldZapGarbage()) {
- heap_->isolate()->memory_allocator()->ZapBlock(start, length);
- }
- } else if (commit_size < committed_size) {
- ASSERT(commit_size > 0);
- // Shrink the committed area.
- size_t length = committed_size - commit_size;
- Address start = address() + committed_size + guard_size - length;
- if (reservation_.IsReserved()) {
- if (!reservation_.Uncommit(start, length)) return false;
- } else {
- CodeRange* code_range = heap_->isolate()->code_range();
- ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
- if (!code_range->UncommitRawMemory(start, length)) return false;
- }
- }
- area_end_ = area_start_ + requested;
- return true;
- }
- void MemoryChunk::InsertAfter(MemoryChunk* other) {
- next_chunk_ = other->next_chunk_;
- prev_chunk_ = other;
- // This memory barrier is needed since concurrent sweeper threads may iterate
- // over the list of pages while a new page is inserted.
- // TODO(hpayer): find a cleaner way to guarantee that the page list can be
- // expanded concurrently
- MemoryBarrier();
- // The following two write operations can take effect in arbitrary order
- // since pages are always iterated by the sweeper threads in LIFO order, i.e,
- // the inserted page becomes visible for the sweeper threads after
- // other->next_chunk_ = this;
- other->next_chunk_->prev_chunk_ = this;
- other->next_chunk_ = this;
- }
- void MemoryChunk::Unlink() {
- if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) {
- heap_->decrement_scan_on_scavenge_pages();
- ClearFlag(SCAN_ON_SCAVENGE);
- }
- next_chunk_->prev_chunk_ = prev_chunk_;
- prev_chunk_->next_chunk_ = next_chunk_;
- prev_chunk_ = NULL;
- next_chunk_ = NULL;
- }
- MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
- intptr_t commit_area_size,
- Executability executable,
- Space* owner) {
- ASSERT(commit_area_size <= reserve_area_size);
- size_t chunk_size;
- Heap* heap = isolate_->heap();
- Address base = NULL;
- VirtualMemory reservation;
- Address area_start = NULL;
- Address area_end = NULL;
- //
- // MemoryChunk layout:
- //
- // Executable
- // +----------------------------+<- base aligned with MemoryChunk::kAlignment
- // | Header |
- // +----------------------------+<- base + CodePageGuardStartOffset
- // | Guard |
- // +----------------------------+<- area_start_
- // | Area |
- // +----------------------------+<- area_end_ (area_start + commit_area_size)
- // | Committed but not used |
- // +----------------------------+<- aligned at OS page boundary
- // | Reserved but not committed |
- // +----------------------------+<- aligned at OS page boundary
- // | Guard |
- // +----------------------------+<- base + chunk_size
- //
- // Non-executable
- // +----------------------------+<- base aligned with MemoryChunk::kAlignment
- // | Header |
- // +----------------------------+<- area_start_ (base + kObjectStartOffset)
- // | Area |
- // +----------------------------+<- area_end_ (area_start + commit_area_size)
- // | Committed but not used |
- // +----------------------------+<- aligned at OS page boundary
- // | Reserved but not committed |
- // +----------------------------+<- base + chunk_size
- //
- if (executable == EXECUTABLE) {
- chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
- OS::CommitPageSize()) + CodePageGuardSize();
- // Check executable memory limit.
- if (size_executable_ + chunk_size > capacity_executable_) {
- LOG(isolate_,
- StringEvent("MemoryAllocator::AllocateRawMemory",
- "V8 Executable Allocation capacity exceeded"));
- return NULL;
- }
- // Size of header (not executable) plus area (executable).
- size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
- OS::CommitPageSize());
- // Allocate executable memory either from code range or from the
- // OS.
- if (isolate_->code_range()->exists()) {
- base = isolate_->code_range()->AllocateRawMemory(chunk_size,
- commit_size,
- &chunk_size);
- ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
- MemoryChunk::kAlignment));
- if (base == NULL) return NULL;
- size_ += chunk_size;
- // Update executable memory size.
- size_executable_ += chunk_size;
- } else {
- base = AllocateAlignedMemory(chunk_size,
- commit_size,
- MemoryChunk::kAlignment,
- executable,
- &reservation);
- if (base == NULL) return NULL;
- // Update executable memory size.
- size_executable_ += reservation.size();
- }
- if (Heap::ShouldZapGarbage()) {
- ZapBlock(base, CodePageGuardStartOffset());
- ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
- }
- area_start = base + CodePageAreaStartOffset();
- area_end = area_start + commit_area_size;
- } else {
- chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
- OS::CommitPageSize());
- size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset +
- commit_area_size, OS::CommitPageSize());
- base = AllocateAlignedMemory(chunk_size,
- commit_size,
- MemoryChunk::kAlignment,
- executable,
- &reservation);
- if (base == NULL) return NULL;
- if (Heap::ShouldZapGarbage()) {
- ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
- }
- area_start = base + Page::kObjectStartOffset;
- area_end = area_start + commit_area_size;
- }
- // Use chunk_size for statistics and callbacks because we assume that they
- // treat reserved but not-yet committed memory regions of chunks as allocated.
- isolate_->counters()->memory_allocated()->
- Increment(static_cast<int>(chunk_size));
- LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
- if (owner != NULL) {
- ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
- PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
- }
- MemoryChunk* result = MemoryChunk::Initialize(heap,
- base,
- chunk_size,
- area_start,
- area_end,
- executable,
- owner);
- result->set_reserved_memory(&reservation);
- return result;
- }
- void Page::ResetFreeListStatistics() {
- non_available_small_blocks_ = 0;
- available_in_small_free_list_ = 0;
- available_in_medium_free_list_ = 0;
- available_in_large_free_list_ = 0;
- available_in_huge_free_list_ = 0;
- }
- Page* MemoryAllocator::AllocatePage(intptr_t size,
- PagedSpace* owner,
- Executability executable) {
- MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
- if (chunk == NULL) return NULL;
- return Page::Initialize(isolate_->heap(), chunk, executable, owner);
- }
- LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
- Space* owner,
- Executability executable) {
- MemoryChunk* chunk = AllocateChunk(object_size,
- object_size,
- executable,
- owner);
- if (chunk == NULL) return NULL;
- return LargePage::Initialize(isolate_->heap(), chunk);
- }
- void MemoryAllocator::Free(MemoryChunk* chunk) {
- LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
- if (chunk->owner() != NULL) {
- ObjectSpace space =
- static_cast<ObjectSpace>(1 << chunk->owner()->identity());
- PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
- }
- isolate_->heap()->RememberUnmappedPage(
- reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate());
- delete chunk->slots_buffer();
- delete chunk->skip_list();
- VirtualMemory* reservation = chunk->reserved_memory();
- if (reservation->IsReserved()) {
- FreeMemory(reservation, chunk->executable());
- } else {
- FreeMemory(chunk->address(),
- chunk->size(),
- chunk->executable());
- }
- }
- bool MemoryAllocator::CommitBlock(Address start,
- size_t size,
- Executability executable) {
- if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
- if (Heap::ShouldZapGarbage()) {
- ZapBlock(start, size);
- }
- isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
- return true;
- }
- bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
- if (!VirtualMemory::UncommitRegion(start, size)) return false;
- isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
- return true;
- }
- void MemoryAllocator::ZapBlock(Address start, size_t size) {
- for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
- Memory::Address_at(start + s) = kZapValue;
- }
- }
- void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
- AllocationAction action,
- size_t size) {
- for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
- MemoryAllocationCallbackRegistration registration =
- memory_allocation_callbacks_[i];
- if ((registration.space & space) == space &&
- (registration.action & action) == action)
- registration.callback(space, action, static_cast<int>(size));
- }
- }
- bool MemoryAllocator::MemoryAllocationCallbackRegistered(
- MemoryAllocationCallback callback) {
- for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
- if (memory_allocation_callbacks_[i].callback == callback) return true;
- }
- return false;
- }
- void MemoryAllocator::AddMemoryAllocationCallback(
- MemoryAllocationCallback callback,
- ObjectSpace space,
- AllocationAction action) {
- ASSERT(callback != NULL);
- MemoryAllocationCallbackRegistration registration(callback, space, action);
- ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
- return memory_allocation_callbacks_.Add(registration);
- }
- void MemoryAllocator::RemoveMemoryAllocationCallback(
- MemoryAllocationCallback callback) {
- ASSERT(callback != NULL);
- for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
- if (memory_allocation_callbacks_[i].callback == callback) {
- memory_allocation_callbacks_.Remove(i);
- return;
- }
- }
- UNREACHABLE();
- }
- #ifdef DEBUG
- void MemoryAllocator::ReportStatistics() {
- float pct = static_cast<float>(capacity_ - size_) / capacity_;
- PrintF(" capacity: %" V8_PTR_PREFIX "d"
- ", used: %" V8_PTR_PREFIX "d"
- ", available: %%%d\n\n",
- capacity_, size_, static_cast<int>(pct*100));
- }
- #endif
- int MemoryAllocator::CodePageGuardStartOffset() {
- // We are guarding code pages: the first OS page after the header
- // will be protected as non-writable.
- return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize());
- }
- int MemoryAllocator::CodePageGuardSize() {
- return static_cast<int>(OS::CommitPageSize());
- }
- int MemoryAllocator::CodePageAreaStartOffset() {
- // We are guarding code pages: the first OS page after the header
- // will be protected as non-writable.
- return CodePageGuardStartOffset() + CodePageGuardSize();
- }
- int MemoryAllocator::CodePageAreaEndOffset() {
- // We are guarding code pages: the last OS page will be protected as
- // non-writable.
- return Page::kPageSize - static_cast<int>(OS::CommitPageSize());
- }
- bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm,
- Address start,
- size_t commit_size,
- size_t reserved_size) {
- // Commit page header (not executable).
- if (!vm->Commit(start,
- CodePageGuardStartOffset(),
- false)) {
- return false;
- }
- // Create guard page after the header.
- if (!vm->Guard(start + CodePageGuardStartOffset())) {
- return false;
- }
- // Commit page body (executable).
- if (!vm->Commit(start + CodePageAreaStartOffset(),
- commit_size - CodePageGuardStartOffset(),
- true)) {
- return false;
- }
- // Create guard page before the end.
- if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
- return false;
- }
- return true;
- }
- // -----------------------------------------------------------------------------
- // MemoryChunk implementation
- void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(address);
- if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
- static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
- }
- chunk->IncrementLiveBytes(by);
- }
- // -----------------------------------------------------------------------------
- // PagedSpace implementation
- PagedSpace::PagedSpace(Heap* heap,
- intptr_t max_capacity,
- AllocationSpace id,
- Executability executable)
- : Space(heap, id, executable),
- free_list_(this),
- was_swept_conservatively_(false),
- first_unswept_page_(Page::FromAddress(NULL)),
- unswept_free_bytes_(0) {
- if (id == CODE_SPACE) {
- area_size_ = heap->isolate()->memory_allocator()->
- CodePageAreaSize();
- } else {
- area_size_ = Page::kPageSize - Page::kObjectStartOffset;
- }
- max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
- * AreaSize();
- accounting_stats_.Clear();
- allocation_info_.top = NULL;
- allocation_info_.limit = NULL;
- anchor_.InitializeAsAnchor(this);
- }
- bool PagedSpace::SetUp() {
- return true;
- }
- bool PagedSpace::HasBeenSetUp() {
- return true;
- }
- void PagedSpace::TearDown() {
- PageIterator iterator(this);
- while (iterator.has_next()) {
- heap()->isolate()->memory_allocator()->Free(iterator.next());
- }
- anchor_.set_next_page(&anchor_);
- anchor_.set_prev_page(&anchor_);
- accounting_stats_.Clear();
- }
- size_t PagedSpace::CommittedPhysicalMemory() {
- if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
- size_t size = 0;
- PageIterator it(this);
- while (it.has_next()) {
- size += it.next()->CommittedPhysicalMemory();
- }
- return size;
- }
- MaybeObject* PagedSpace::FindObject(Address addr) {
- // Note: this function can only be called on precisely swept spaces.
- ASSERT(!heap()->mark_compact_collector()->in_use());
- if (!Contains(addr)) return Failure::Exception();
- Page* p = Page::FromAddress(addr);
- HeapObjectIterator it(p, NULL);
- for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
- Address cur = obj->address();
- Address next = cur + obj->Size();
- if ((cur <= addr) && (addr < next)) return obj;
- }
- UNREACHABLE();
- return Failure::Exception();
- }
- bool PagedSpace::CanExpand() {
- ASSERT(max_capacity_ % AreaSize() == 0);
- if (Capacity() == max_capacity_) return false;
- ASSERT(Capacity() < max_capacity_);
- // Are we going to exceed capacity for this space?
- if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
- return true;
- }
- bool PagedSpace::Expand() {
- if (!CanExpand()) return false;
- intptr_t size = AreaSize();
- if (anchor_.next_page() == &anchor_) {
- size = SizeOfFirstPage();
- }
- Page* p = heap()->isolate()->memory_allocator()->AllocatePage(
- size, this, executable());
- if (p == NULL) return false;
- ASSERT(Capacity() <= max_capacity_);
- p->InsertAfter(anchor_.prev_page());
- return true;
- }
- intptr_t PagedSpace::SizeOfFirstPage() {
- int size = 0;
- switch (identity()) {
- case OLD_POINTER_SPACE:
- size = 64 * kPointerSize * KB;
- break;
- case OLD_DATA_SPACE:
- size = 192 * KB;
- break;
- case MAP_SPACE:
- size = 16 * kPointerSize * KB;
- break;
- case CELL_SPACE:
- size = 16 * kPointerSize * KB;
- break;
- case PROPERTY_CELL_SPACE:
- size = 8 * kPointerSize * KB;
- break;
- case CODE_SPACE:
- if (heap()->isolate()->code_range()->exists()) {
- // When code range exists, code pages are allocated in a special way
- // (from the reserved code range). That part of the code is not yet
- // upgraded to handle small pages.
- size = AreaSize();
- } else {
- size = 384 * KB;
- }
- break;
- default:
- UNREACHABLE();
- }
- return Min(size, AreaSize());
- }
- int PagedSpace::CountTotalPages() {
- PageIterator it(this);
- int count = 0;
- while (it.has_next()) {
- it.next();
- count++;
- }
- return count;
- }
- void PagedSpace::ObtainFreeListStatistics(Page* page, SizeStats* sizes) {
- sizes->huge_size_ = page->available_in_huge_free_list();
- sizes->small_size_ = page->available_in_small_free_list();
- sizes->medium_size_ = page->available_in_medium_free_list();
- sizes->large_size_ = page->available_in_large_free_list();
- }
- void PagedSpace::ResetFreeListStatistics() {
- PageIterator page_iterator(this);
- while (page_iterator.has_next()) {
- Page* page = page_iterator.next();
- page->ResetFreeListStatistics();
- }
- }
- void PagedSpace::ReleasePage(Page* page, bool unlink) {
- ASSERT(page->LiveBytes() == 0);
- ASSERT(AreaSize() == page->area_size());
- // Adjust list of unswept pages if the page is the head of the list.
- if (first_unswept_page_ == page) {
- first_unswept_page_ = page->next_page();
- if (first_unswept_page_ == anchor()) {
- first_unswept_page_ = Page::FromAddress(NULL);
- }
- }
- if (page->WasSwept()) {
- intptr_t size = free_list_.EvictFreeListItems(page);
- accounting_stats_.AllocateBytes(size);
- ASSERT_EQ(AreaSize(), static_cast<int>(size));
- } else {
- DecreaseUnsweptFreeBytes(page);
- }
- if (Page::FromAllocationTop(allocation_info_.top) == page) {
- allocation_info_.top = allocation_info_.limit = NULL;
- }
- if (unlink) {
- page->Unlink();
- }
- if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
- heap()->isolate()->memory_allocator()->Free(page);
- } else {
- heap()->QueueMemoryChunkForFree(page);
- }
- ASSERT(Capacity() > 0);
- accounting_stats_.ShrinkSpace(AreaSize());
- }
- #ifdef DEBUG
- void PagedSpace::Print() { }
- #endif
- #ifdef VERIFY_HEAP
- void PagedSpace::Verify(ObjectVisitor* visitor) {
- // We can only iterate over the pages if they were swept precisely.
- if (was_swept_conservatively_) return;
- bool allocation_pointer_found_in_space =
- (allocation_info_.top == allocation_info_.limit);
- PageIterator page_iterator(this);
- while (page_iterator.has_next()) {
- Page* page = page_iterator.next();
- CHECK(page->owner() == this);
- if (page == Page::FromAllocationTop(allocation_info_.top)) {
- allocation_pointer_found_in_space = true;
- }
- CHECK(page->WasSweptPrecisely());
- HeapObjectIterator it(page, NULL);
- Address end_of_previous_object = page->area_start();
- Address top = page->area_end();
- int black_size = 0;
- for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
- CHECK(end_of_previous_object <= object->address());
- // The first word should be a map, and we expect all map pointers to
- // be in map space.
- Map* map = object->map();
- CHECK(map->IsMap());
- CHECK(heap()->map_space()->Contains(map));
- // Perform space-specific object verification.
- VerifyObject(object);
- // The object itself should look OK.
- object->Verify();
- // All the interior pointers should be contained in the heap.
- int size = object->Size();
- object->IterateBody(map->instance_type(), size, visitor);
- if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
- black_size += size;
- }
- CHECK(object->address() + size <= top);
- end_of_previous_object = object->address() + size;
- }
- CHECK_LE(black_size, page->LiveBytes());
- }
- CHECK(allocation_pointer_found_in_space);
- }
- #endif // VERIFY_HEAP
- // -----------------------------------------------------------------------------
- // NewSpace implementation
- bool NewSpace::SetUp(int reserved_semispace_capacity,
- int maximum_semispace_capacity) {
- // Set up new space based on the preallocated memory block defined by
- // start and size. The provided space is divided into two semi-spaces.
- // To support fast containment testing in the new space, the size of
- // this chunk must be a power of two and it must be aligned to its size.
- int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
- size_t size = 2 * reserved_semispace_capacity;
- Address base =
- heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
- size, size, &reservation_);
- if (base == NULL) return false;
- chunk_base_ = base;
- chunk_size_ = static_cast<uintptr_t>(size);
- LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
- ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
- ASSERT(IsPowerOf2(maximum_semispace_capacity));
- // Allocate and set up the histogram arrays if necessary.
- allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
- promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
- #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
- promoted_histogram_[name].set_name(#name);
- INSTANCE_TYPE_LIST(SET_NAME)
- #undef SET_NAME
- ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
- ASSERT(static_cast<intptr_t>(chunk_size_) >=
- 2 * heap()->ReservedSemiSpaceSize());
- ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
- to_space_.SetUp(chunk_base_,
- initial_semispace_capacity,
- maximum_semispace_capacity);
- from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
- initial_semispace_capacity,
- maximum_semispace_capacity);
- if (!to_space_.Commit()) {
- return false;
- }
- ASSERT(!from_space_.is_committed()); // No need to use memory yet.
- start_ = chunk_base_;
- address_mask_ = ~(2 * reserved_semispace_capacity - 1);
- object_mask_ = address_mask_ | kHeapObjectTagMask;
- object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
- ResetAllocationInfo();
- return true;
- }
- void NewSpace::TearDown() {
- if (allocated_histogram_) {
- DeleteArray(allocated_histogram_);
- allocated_histogram_ = NULL;
- }
- if (promoted_histogram_) {
- DeleteArray(promoted_histogram_);
- promoted_histogram_ = NULL;
- }
- start_ = NULL;
- allocation_info_.top = NULL;
- allocation_info_.limit = NULL;
- to_space_.TearDown();
- from_space_.TearDown();
- LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
- ASSERT(reservation_.IsReserved());
- heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
- NOT_EXECUTABLE);
- chunk_base_ = NULL;
- chunk_size_ = 0;
- }
- void NewSpace::Flip() {
- SemiSpace::Swap(&from_space_, &to_space_);
- }
- void NewSpace::Grow() {
- // Double the semispace size but only up to maximum capacity.
- ASSERT(Capacity() < MaximumCapacity());
- int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity()));
- if (to_space_.GrowTo(new_capacity)) {
- // Only grow from space if we managed to grow to-space.
- if (!from_space_.GrowTo(new_capacity)) {
- // If we managed to grow to-space but couldn't grow from-space,
- // attempt to shrink to-space.
- if (!to_space_.ShrinkTo(from_space_.Capacity())) {
- // We are in an inconsistent state because we could not
- // commit/uncommit memory from new space.
- V8::FatalProcessOutOfMemory("Failed to grow new space.");
- }
- }
- }
- ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
- }
- void NewSpace::Shrink() {
- int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
- int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
- if (rounded_new_capacity < Capacity() &&
- to_space_.ShrinkTo(rounded_new_capacity)) {
- // Only shrink from-space if we managed to shrink to-space.
- from_space_.Reset();
- if (!from_space_.ShrinkTo(rounded_new_capacity)) {
- // If we managed to shrink to-space but couldn't shrink from
- // space, attempt to grow to-space again.
- if (!to_space_.GrowTo(from_space_.Capacity())) {
- // We are in an inconsistent state because we could not
- // commit/uncommit memory from new space.
- V8::FatalProcessOutOfMemory("Failed to shrink new space.");
- }
- }
- }
- allocation_info_.limit = to_space_.page_high();
- ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
- }
- void NewSpace::UpdateAllocationInfo() {
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
- allocation_info_.top = to_space_.page_low();
- allocation_info_.limit = to_space_.page_high();
- // Lower limit during incremental marking.
- if (heap()->incremental_marking()->IsMarking() &&
- inline_allocation_limit_step() != 0) {
- Address new_limit =
- allocation_info_.top + inline_allocation_limit_step();
- allocation_info_.limit = Min(new_limit, allocation_info_.limit);
- }
- ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
- }
- void NewSpace::ResetAllocationInfo() {
- to_space_.Reset();
- UpdateAllocationInfo();
- pages_used_ = 0;
- // Clear all mark-bits in the to-space.
- NewSpacePageIterator it(&to_space_);
- while (it.has_next()) {
- Bitmap::Clear(it.next());
- }
- }
- bool NewSpace::AddFreshPage() {
- Address top = allocation_info_.top;
- if (NewSpacePage::IsAtStart(top)) {
- // The current page is already empty. Don't try to make another.
- // We should only get here if someone asks to allocate more
- // than what can be stored in a single page.
- // TODO(gc): Change the limit on new-space allocation to prevent this
- // from happening (all such allocations should go directly to LOSpace).
- return false;
- }
- if (!to_space_.AdvancePage()) {
- // Failed to get a new page in to-space.
- return false;
- }
- // Clear remainder of current page.
- Address limit = NewSpacePage::FromLimit(top)->area_end();
- if (heap()->gc_state() == Heap::SCAVENGE) {
- heap()->promotion_queue()->SetNewLimit(limit);
- heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
- }
- int remaining_in_page = static_cast<int>(limit - top);
- heap()->CreateFillerObjectAt(top, remaining_in_page);
- pages_used_++;
- UpdateAllocationInfo();
- return true;
- }
- MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
- Address old_top = allocation_info_.top;
- Address new_top = old_top + size_in_bytes;
- Address high = to_space_.page_high();
- if (allocation_info_.limit < high) {
- // Incremental marking has lowered the limit to get a
- // chance to do a step.
- allocation_info_.limit = Min(
- allocation_info_.limit + inline_allocation_limit_step_,
- high);
- int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
- heap()->incremental_marking()->Step(
- bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
- top_on_previous_step_ = new_top;
- return AllocateRaw(size_in_bytes);
- } else if (AddFreshPage()) {
- // Switched to new page. Try allocating again.
- int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
- heap()->incremental_marking()->Step(
- bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
- top_on_previous_step_ = to_space_.page_low();
- return AllocateRaw(size_in_bytes);
- } else {
- return Failure::RetryAfterGC();
- }
- }
- #ifdef VERIFY_HEAP
- // We do not use the SemiSpaceIterator because verification doesn't assume
- // that it works (it depends on the invariants we are checking).
- void NewSpace::Verify() {
- // The allocation pointer should be in the space or at the very end.
- ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
- // There should be objects packed in from the low address up to the
- // allocation pointer.
- Address current = to_space_.first_page()->area_start();
- CHECK_EQ(current, to_space_.space_start());
- while (current != top()) {
- if (!NewSpacePage::IsAtEnd(current)) {
- // The allocation pointer should not be in the middle of an object.
- CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
- current < top());
- HeapObject* object = HeapObject::FromAddress(current);
- // The first word should be a map, and we expect all map pointers to
- // be in map space.
- Map* map = object->map();
- CHECK(map->IsMap());
- CHECK(heap()->map_space()->Contains(map));
- // The object should not be code or a map.
- CHECK(!object->IsMap());
- CHECK(!object->IsCode());
- // The object itself should look OK.
- object->Verify();
- // All the interior pointers should be contained in the heap.
- VerifyPointersVisitor visitor;
- int size = object->Size();
- object->IterateBody(map->instance_type(), size, &visitor);
- current += size;
- } else {
- // At end of page, switch to next page.
- NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
- // Next page should be valid.
- CHECK(!page->is_anchor());
- current = page->area_start();
- }
- }
- // Check semi-spaces.
- CHECK_EQ(from_space_.id(), kFromSpace);
- CHECK_EQ(to_space_.id(), kToSpace);
- from_space_.Verify();
- to_space_.Verify();
- }
- #endif
- // -----------------------------------------------------------------------------
- // SemiSpace implementation
- void SemiSpace::SetUp(Address start,
- int initial_capacity,
- int maximum_capacity) {
- // Creates a space in the young generation. The constructor does not
- // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
- // memory of size 'capacity' when set up, and does not grow or shrink
- // otherwise. In the mark-compact collector, the memory region of the from
- // space is used as the marking stack. It requires contiguous memory
- // addresses.
- ASSERT(maximum_capacity >= Page::kPageSize);
- initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
- capacity_ = initial_capacity;
- maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
- committed_ = false;
- start_ = start;
- address_mask_ = ~(maximum_capacity - 1);
- object_mask_ = address_mask_ | kHeapObjectTagMask;
- object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
- age_mark_ = start_;
- }
- void SemiSpace::TearDown() {
- start_ = NULL;
- capacity_ = 0;
- }
- bool SemiSpace::Commit() {
- ASSERT(!is_committed());
- int pages = capacity_ / Page::kPageSize;
- Address end = start_ + maximum_capacity_;
- Address start = end - pages * Page::kPageSize;
- if (!heap()->isolate()->memory_allocator()->CommitBlock(start,
- capacity_,
- executable())) {
- return …
Large files files are truncated, but you can click here to view the full file