Skip to content
This repository has been archived by the owner on Mar 25, 2018. It is now read-only.

Commit

Permalink
Revert of "[heap] Switch to 500k pages" (patchset #1 id:1 of https://…
Browse files Browse the repository at this point in the history
…codereview.chromium.org/2278653003/ )

Reason for revert:
Tanks pretty much alle metrics across the board. Probably LO space limit too low but needs investigation.

Original issue's description:
> [heap] Switch to 500k pages
>
> Decrease regular heap object size to 400k. In a follow up, we can now get rid of
> the new space border page while keeping the 1M minimum new space size.
>
> This reverts commit 1617043.
>
> BUG=chromium:636331
>
> Committed: https://crrev.com/2101e691caeef656eb91f1c98620b3955d337c83
> Cr-Commit-Position: refs/heads/master@{#38916}

[email protected],[email protected]
# Not skipping CQ checks because original CL landed more than 1 days ago.
BUG=chromium:636331
NOPRESUBMIT=true

Review-Url: https://codereview.chromium.org/2289493002
Cr-Commit-Position: refs/heads/master@{#38960}
  • Loading branch information
mlippautz authored and Commit bot committed Aug 28, 2016
1 parent 5127dc0 commit 933195a
Show file tree
Hide file tree
Showing 14 changed files with 131 additions and 296 deletions.
2 changes: 1 addition & 1 deletion src/base/build_config.h
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@
// Bump up for Power Linux due to larger (64K) page size.
const int kPageSizeBits = 22;
#else
const int kPageSizeBits = 19;
const int kPageSizeBits = 20;
#endif

#endif // V8_BASE_BUILD_CONFIG_H_
13 changes: 5 additions & 8 deletions src/heap/heap.cc
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ Heap::Heap()
// semispace_size_ should be a power of 2 and old_generation_size_ should
// be a multiple of Page::kPageSize.
max_semi_space_size_(8 * (kPointerSize / 4) * MB),
initial_semispace_size_(MB),
initial_semispace_size_(Page::kPageSize),
max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
initial_old_generation_size_(max_old_generation_size_ /
kInitalOldGenerationLimitFactor),
Expand Down Expand Up @@ -5435,19 +5435,16 @@ void Heap::PrintAlloctionsHash() {


void Heap::NotifyDeserializationComplete() {
DCHECK_EQ(0, gc_count());
deserialization_complete_ = true;
#ifdef DEBUG
// All pages right after bootstrapping must be marked as never-evacuate.
PagedSpaces spaces(this);
for (PagedSpace* s = spaces.next(); s != NULL; s = spaces.next()) {
if (isolate()->snapshot_available()) s->ShrinkImmortalImmovablePages();
#ifdef DEBUG
// All pages right after bootstrapping must be marked as never-evacuate.
for (Page* p : *s) {
CHECK(p->NeverEvacuate());
}
#endif // DEBUG
}

deserialization_complete_ = true;
#endif // DEBUG
}

void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
Expand Down
8 changes: 4 additions & 4 deletions src/heap/mark-compact.cc
Original file line number Diff line number Diff line change
Expand Up @@ -600,18 +600,18 @@ void MarkCompactCollector::ComputeEvacuationHeuristics(
// For memory reducing and optimize for memory mode we directly define both
// constants.
const int kTargetFragmentationPercentForReduceMemory = 20;
const int kMaxEvacuatedBytesForReduceMemory = 12 * MB;
const int kMaxEvacuatedBytesForReduceMemory = 12 * Page::kPageSize;
const int kTargetFragmentationPercentForOptimizeMemory = 20;
const int kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;

// For regular mode (which is latency critical) we define less aggressive
// defaults to start and switch to a trace-based (using compaction speed)
// approach as soon as we have enough samples.
const int kTargetFragmentationPercent = 70;
const int kMaxEvacuatedBytes = 4 * MB;
const int kMaxEvacuatedBytes = 4 * Page::kPageSize;
// Time to take for a single area (=payload of page). Used as soon as there
// exist enough compaction speed samples.
const float kTargetMsPerArea = 0.5;
const int kTargetMsPerArea = 1;

if (heap()->ShouldReduceMemory()) {
*target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
Expand Down Expand Up @@ -3221,7 +3221,7 @@ int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
// The number of parallel compaction tasks is limited by:
// - #evacuation pages
// - (#cores - 1)
const double kTargetCompactionTimeInMs = .5;
const double kTargetCompactionTimeInMs = 1;
const int kNumSweepingTasks = 3;

double compaction_speed =
Expand Down
81 changes: 10 additions & 71 deletions src/heap/spaces.cc
Original file line number Diff line number Diff line change
Expand Up @@ -617,21 +617,6 @@ void MemoryChunk::Unlink() {
set_next_chunk(NULL);
}

void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) {
DCHECK_GE(bytes_to_shrink, static_cast<size_t>(base::OS::CommitPageSize()));
DCHECK_EQ(0, bytes_to_shrink % base::OS::CommitPageSize());
Address free_start = chunk->area_end_ - bytes_to_shrink;
// Don't adjust the size of the page. The area is just uncomitted but not
// released.
chunk->area_end_ -= bytes_to_shrink;
UncommitBlock(free_start, bytes_to_shrink);
if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
if (chunk->reservation_.IsReserved())
chunk->reservation_.Guard(chunk->area_end_);
else
base::OS::Guard(chunk->area_end_, base::OS::CommitPageSize());
}
}

MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
intptr_t commit_area_size,
Expand Down Expand Up @@ -779,47 +764,6 @@ void Page::ResetFreeListStatistics() {
available_in_free_list_ = 0;
}

size_t Page::ShrinkToHighWaterMark() {
// Shrink pages to high water mark. The water mark points either to a filler
// or the area_end.
HeapObject* filler = HeapObject::FromAddress(HighWaterMark());
if (filler->address() == area_end()) return 0;
CHECK(filler->IsFiller());
if (!filler->IsFreeSpace()) return 0;

#ifdef DEBUG
// Check the the filler is indeed the last filler on the page.
HeapObjectIterator it(this);
HeapObject* filler2 = nullptr;
for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
filler2 = HeapObject::FromAddress(obj->address() + obj->Size());
}
if (filler2 == nullptr || filler2->address() == area_end()) return 0;
DCHECK(filler2->IsFiller());
DCHECK_EQ(filler->address(), filler2->address());
#endif // DEBUG

size_t unused = RoundDown(
static_cast<size_t>(area_end() - filler->address() - FreeSpace::kSize),
base::OS::CommitPageSize());
if (unused > 0) {
if (FLAG_trace_gc_verbose) {
PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
reinterpret_cast<void*>(this),
reinterpret_cast<void*>(area_end()),
reinterpret_cast<void*>(area_end() - unused));
}
heap()->CreateFillerObjectAt(
filler->address(),
static_cast<int>(area_end() - filler->address() - unused),
ClearRecordedSlots::kNo);
heap()->memory_allocator()->ShrinkChunk(this, unused);
CHECK(filler->IsFiller());
CHECK_EQ(filler->address() + filler->Size(), area_end());
}
return unused;
}

void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk,
Address start_free) {
// We do not allow partial shrink for code.
Expand Down Expand Up @@ -1291,25 +1235,17 @@ Object* PagedSpace::FindObject(Address addr) {
return Smi::FromInt(0);
}

void PagedSpace::ShrinkImmortalImmovablePages() {
DCHECK(!heap()->deserialization_complete());
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
EmptyAllocationInfo();
ResetFreeList();

for (Page* page : *this) {
DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
size_t unused = page->ShrinkToHighWaterMark();
accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
AccountUncommitted(unused);
bool PagedSpace::Expand() {
int size = AreaSize();
if (snapshotable() && !HasPages()) {
size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity());
}
}

bool PagedSpace::Expand() {
const int size = AreaSize();
if (!heap()->CanExpandOldGeneration(size)) return false;

Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable());
if (p == nullptr) return false;

AccountCommitted(static_cast<intptr_t>(p->size()));

// Pages created during bootstrapping may contain immortal immovable objects.
Expand Down Expand Up @@ -1400,6 +1336,7 @@ void PagedSpace::IncreaseCapacity(size_t bytes) {

void PagedSpace::ReleasePage(Page* page) {
DCHECK_EQ(page->LiveBytes(), 0);
DCHECK_EQ(AreaSize(), page->area_size());
DCHECK_EQ(page->owner(), this);

free_list_.EvictFreeListItems(page);
Expand All @@ -1418,8 +1355,10 @@ void PagedSpace::ReleasePage(Page* page) {
}

AccountUncommitted(static_cast<intptr_t>(page->size()));
accounting_stats_.ShrinkSpace(page->area_size());
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);

DCHECK(Capacity() > 0);
accounting_stats_.ShrinkSpace(AreaSize());
}

#ifdef DEBUG
Expand Down
15 changes: 2 additions & 13 deletions src/heap/spaces.h
Original file line number Diff line number Diff line change
Expand Up @@ -235,10 +235,7 @@ class MemoryChunk {
IN_TO_SPACE, // All pages in new space has one of these two set.
NEW_SPACE_BELOW_AGE_MARK,
EVACUATION_CANDIDATE,

// |NEVER_EVACUATE|: A page tagged with this flag will never be selected
// for evacuation. Typically used for immortal immovable pages.
NEVER_EVACUATE,
NEVER_EVACUATE, // May contain immortal immutables.

// Large objects can have a progress bar in their page header. These object
// are scanned in increments and will be kept black while being scanned.
Expand Down Expand Up @@ -725,7 +722,7 @@ class Page : public MemoryChunk {
// account.
// TODO(hpayer): This limit should be way smaller but we currently have
// short living objects >256K.
static const int kMaxRegularHeapObjectSize = 400 * KB;
static const int kMaxRegularHeapObjectSize = 600 * KB;

static inline Page* ConvertNewToOld(Page* old_page, PagedSpace* new_owner);

Expand Down Expand Up @@ -826,8 +823,6 @@ class Page : public MemoryChunk {
available_in_free_list_.Increment(available);
}

size_t ShrinkToHighWaterMark();

#ifdef DEBUG
void Print();
#endif // DEBUG
Expand Down Expand Up @@ -1320,8 +1315,6 @@ class MemoryAllocator {
intptr_t commit_area_size,
Executability executable, Space* space);

void ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink);

Address ReserveAlignedMemory(size_t requested, size_t alignment,
base::VirtualMemory* controller);
Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
Expand Down Expand Up @@ -2203,10 +2196,6 @@ class PagedSpace : public Space {
iterator begin() { return iterator(anchor_.next_page()); }
iterator end() { return iterator(&anchor_); }

// Shrink immortal immovable pages of the space to be exactly the size needed
// using the high water mark.
void ShrinkImmortalImmovablePages();

protected:
// PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages.
Expand Down
22 changes: 9 additions & 13 deletions src/isolate.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2409,19 +2409,13 @@ bool Isolate::Init(Deserializer* des) {
runtime_profiler_ = new RuntimeProfiler(this);

// If we are deserializing, read the state into the now-empty heap.
{
AlwaysAllocateScope always_allocate(this);

if (!create_heap_objects) {
des->Deserialize(this);
}
load_stub_cache_->Initialize();
store_stub_cache_->Initialize();
if (FLAG_ignition || serializer_enabled()) {
interpreter_->Initialize();
}

heap_.NotifyDeserializationComplete();
if (!create_heap_objects) {
des->Deserialize(this);
}
load_stub_cache_->Initialize();
store_stub_cache_->Initialize();
if (FLAG_ignition || serializer_enabled()) {
interpreter_->Initialize();
}

// Finish initialization of ThreadLocal after deserialization is done.
Expand Down Expand Up @@ -2452,6 +2446,8 @@ bool Isolate::Init(Deserializer* des) {

time_millis_at_init_ = heap_.MonotonicallyIncreasingTimeInMs();

heap_.NotifyDeserializationComplete();

if (!create_heap_objects) {
// Now that the heap is consistent, it's OK to generate the code for the
// deopt entry table that might have been referred to by optimized code in
Expand Down
5 changes: 2 additions & 3 deletions src/objects.h
Original file line number Diff line number Diff line change
Expand Up @@ -4800,7 +4800,6 @@ class FreeSpace: public HeapObject {
// Size is smi tagged when it is stored.
static const int kSizeOffset = HeapObject::kHeaderSize;
static const int kNextOffset = POINTER_SIZE_ALIGN(kSizeOffset + kPointerSize);
static const int kSize = kNextOffset + kPointerSize;

private:
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeSpace);
Expand Down Expand Up @@ -10562,12 +10561,12 @@ class JSArray: public JSObject {
static const int kLengthOffset = JSObject::kHeaderSize;
static const int kSize = kLengthOffset + kPointerSize;

// 400 * KB is the Page::kMaxRegularHeapObjectSize defined in spaces.h which
// 600 * KB is the Page::kMaxRegularHeapObjectSize defined in spaces.h which
// we do not want to include in objects.h
// Note that Page::kMaxRegularHeapObjectSize has to be in sync with
// kInitialMaxFastElementArray which is checked in a DCHECK in heap.cc.
static const int kInitialMaxFastElementArray =
(400 * KB - FixedArray::kHeaderSize - kSize - AllocationMemento::kSize) /
(600 * KB - FixedArray::kHeaderSize - kSize - AllocationMemento::kSize) /
kPointerSize;

private:
Expand Down
Loading

0 comments on commit 933195a

Please sign in to comment.