From 2878fc015fec00bd60970aab1c6ec056cfb6aa39 Mon Sep 17 00:00:00 2001 From: Lin Zang Date: Sun, 6 Sep 2020 09:09:14 +0800 Subject: [PATCH 01/13] 8252103: Parallel heap inspection for ParallelScavengeHeap - Parallel heap iteration support for PSS - JBS: https://bugs.openjdk.java.net/browse/JDK-8252103 --- .../gc/parallel/parallelScavengeHeap.cpp | 73 +++++++++++++++++++ .../gc/parallel/parallelScavengeHeap.hpp | 30 ++++++++ src/hotspot/share/gc/parallel/psOldGen.cpp | 46 +++++++++++- src/hotspot/share/gc/parallel/psOldGen.hpp | 7 ++ 4 files changed, 155 insertions(+), 1 deletion(-) diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp index ec2cce62fbe0d..1d4cf08550167 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp @@ -539,6 +539,79 @@ void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) { old_gen()->object_iterate(cl); } +void ParallelScavengeHeap::object_iterate_parallel(ObjectClosure* cl, + uint worker_id, + HeapBlockClaimer* claimer) { + uint block_index; + // Iterate until all blocks are claimed + while (claimer->claim_and_get_block(&block_index)) { + if (block_index == HeapBlockClaimer::eden_index) { + young_gen()->eden_space()->object_iterate(cl); + } else if (block_index == HeapBlockClaimer::survivor_index) { + young_gen()->from_space()->object_iterate(cl); + young_gen()->to_space()->object_iterate(cl); + } else { + uint index = block_index - HeapBlockClaimer::num_inseparable_spaces; + old_gen()->block_iterate(cl, index); + } + } +} + +HeapBlockClaimer::HeapBlockClaimer(uint n_workers) : + _n_workers(n_workers), _n_blocks(0), _claims(NULL) { + assert(n_workers > 0, "Need at least one worker."); + size_t old_gen_used = ParallelScavengeHeap::heap()->old_gen()->used_in_bytes(); + size_t block_size = ParallelScavengeHeap::heap()->old_gen()->iterate_block_size(); + uint n_blocks_in_old = old_gen_used / block_size + 1; + _n_blocks = n_blocks_in_old + num_inseparable_spaces; + _unclaimed_index = 0; + uint* new_claims = NEW_C_HEAP_ARRAY(uint, _n_blocks, mtGC); + memset(new_claims, Unclaimed, sizeof(*_claims) * _n_blocks); + _claims = new_claims; +} + +HeapBlockClaimer::~HeapBlockClaimer() { + FREE_C_HEAP_ARRAY(uint, _claims); +} + +bool HeapBlockClaimer::claim_and_get_block(uint* block_index) { + assert(block_index != NULL, "Invalid index pointer"); + uint next_index = Atomic::load(&_unclaimed_index); + while (true) { + if (next_index >= _n_blocks) { + return false; + } + uint old_val = Atomic::cmpxchg(&_claims[next_index], Unclaimed, Claimed); + if (old_val == Unclaimed) { + *block_index = next_index; + Atomic::inc(&_unclaimed_index); + return true; + } + next_index = Atomic::load(&_unclaimed_index); + } +} + +class PSScavengeParallelObjectIterator : public ParallelObjectIterator { +private: + uint _thread_num; + ParallelScavengeHeap* _heap; + HeapBlockClaimer _claimer; + +public: + PSScavengeParallelObjectIterator(uint thread_num) : + _thread_num(thread_num), + _heap(ParallelScavengeHeap::heap()), + _claimer(thread_num == 0 ? ParallelScavengeHeap::heap()->workers().active_workers() : thread_num) {} + + virtual void object_iterate(ObjectClosure* cl, uint worker_id) { + _heap->object_iterate_parallel(cl, worker_id, &_claimer); + } +}; + +ParallelObjectIterator* ParallelScavengeHeap::parallel_object_iterator(uint thread_num) { + return new PSScavengeParallelObjectIterator(thread_num); +} + HeapWord* ParallelScavengeHeap::block_start(const void* addr) const { if (young_gen()->is_in_reserved(addr)) { assert(young_gen()->is_in(addr), diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp index 64da505ec23aa..e99d30f1469d6 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp @@ -45,6 +45,7 @@ class AdjoiningGenerations; class GCHeapSummary; +class HeapBlockClaimer; class MemoryManager; class MemoryPool; class PSAdaptiveSizePolicy; @@ -53,6 +54,7 @@ class PSHeapSummary; class ParallelScavengeHeap : public CollectedHeap { friend class VMStructs; + friend class HeapBlockClaimer; private: static PSYoungGen* _young_gen; static PSOldGen* _old_gen; @@ -209,6 +211,8 @@ class ParallelScavengeHeap : public CollectedHeap { size_t unsafe_max_tlab_alloc(Thread* thr) const; void object_iterate(ObjectClosure* cl); + void object_iterate_parallel(ObjectClosure* cl, uint worker_id, HeapBlockClaimer* claimer); + virtual ParallelObjectIterator* parallel_object_iterator(uint thread_num); HeapWord* block_start(const void* addr) const; bool block_is_obj(const HeapWord* addr) const; @@ -288,4 +292,30 @@ class AdaptiveSizePolicyOutput : AllStatic { } }; +// The HeapBlockClaimer is used during parallel iteration over heap, +// allowing workers to claim heap blocks, gaining exclusive rights to these blocks. +// The eden, survivor spaces are treated as single blocks as it is hard to divide +// these spaces. +// The old spaces are divided into serveral fixed-size blocks. +class HeapBlockClaimer : public StackObj { + uint _n_workers; + uint _n_blocks; + uint _unclaimed_index; + volatile uint* _claims; + + static const uint Unclaimed = 0; + static const uint Claimed = 1; + + public: + HeapBlockClaimer(uint n_workers); + ~HeapBlockClaimer(); + + // Claim the block and get the block index. + bool claim_and_get_block(uint* block_index); + + static const uint eden_index = 0; + static const uint survivor_index = 1; + static const uint num_inseparable_spaces = 2; +}; + #endif // SHARE_GC_PARALLEL_PARALLELSCAVENGEHEAP_HPP diff --git a/src/hotspot/share/gc/parallel/psOldGen.cpp b/src/hotspot/share/gc/parallel/psOldGen.cpp index fe4480294ca60..83630f25154d6 100644 --- a/src/hotspot/share/gc/parallel/psOldGen.cpp +++ b/src/hotspot/share/gc/parallel/psOldGen.cpp @@ -41,7 +41,8 @@ PSOldGen::PSOldGen(ReservedSpace rs, size_t initial_size, size_t min_size, size_t max_size, const char* perf_data_name, int level): _min_gen_size(min_size), - _max_gen_size(max_size) + _max_gen_size(max_size), + _iterate_block_size(1024 * 1024) // 1M (HeapWord) { initialize(rs, initial_size, GenAlignment, perf_data_name, level); } @@ -171,6 +172,49 @@ HeapWord* PSOldGen::allocate(size_t word_size) { return res; } +/* + * Divide space into blocks, processes block begins at + * bottom + block_index * _iterate_block_size. + * NOTE: + * - The initial block start address may not be a valid + * object address, _start_array is used to correct it. + * + * - The end address is not necessary to be object address. + * + * - If there is an object that crosses blocks, it is + * processed by the worker that owns the block within + * which the object starts. + * + */ +void PSOldGen::block_iterate(ObjectClosure* cl, uint block_index) { + MutableSpace *space = object_space(); + HeapWord* bottom = space->bottom(); + HeapWord* top = space->top(); + HeapWord* begin = bottom + block_index * _iterate_block_size; + + assert((_iterate_block_size % (ObjectStartArray::block_size)) == 0, + "BLOCK SIZE not a multiple of start_array block"); + + // iterate objects in block. + HeapWord* end = MIN2(top, begin + _iterate_block_size); + // There can be no object between begin and end. + if (start_array()->object_starts_in_range(begin, end)) { + // There are objects in the range. Find the object of begin address. + // Note that object_start() can return the last object in previous block, + // and the object is processed by other worker. Here only focus objects that + // fall into the current block. + HeapWord* start = start_array()->object_start(begin); + if (start < begin) { + start += oop(start)->size(); + } + assert(begin <= start && start < end, + "object %p must in the range of [%p, %p)\n", start, begin, end); + for (HeapWord* p = start; p < end; p += oop(p)->size()) { + cl->do_object(oop(p)); + } + } +} + HeapWord* PSOldGen::expand_and_allocate(size_t word_size) { expand(word_size*HeapWordSize); if (GCExpandToAllocateDelayMillis > 0) { diff --git a/src/hotspot/share/gc/parallel/psOldGen.hpp b/src/hotspot/share/gc/parallel/psOldGen.hpp index f7e43af064f1c..ffb3d24d0227d 100644 --- a/src/hotspot/share/gc/parallel/psOldGen.hpp +++ b/src/hotspot/share/gc/parallel/psOldGen.hpp @@ -52,6 +52,9 @@ class PSOldGen : public CHeapObj { const size_t _min_gen_size; const size_t _max_gen_size; + // Block size for parallel iteration + const size_t _iterate_block_size; + #ifdef ASSERT void assert_block_in_covered_region(MemRegion new_memregion) { // Explictly capture current covered_region in a local @@ -123,6 +126,7 @@ class PSOldGen : public CHeapObj { MemRegion reserved() const { return _reserved; } size_t max_gen_size() const { return _max_gen_size; } size_t min_gen_size() const { return _min_gen_size; } + size_t iterate_block_size() const { return _iterate_block_size; } bool is_in(const void* p) const { return _virtual_space->contains((void *)p); @@ -162,6 +166,9 @@ class PSOldGen : public CHeapObj { // Iteration. void oop_iterate(OopIterateClosure* cl) { object_space()->oop_iterate(cl); } void object_iterate(ObjectClosure* cl) { object_space()->object_iterate(cl); } + // Iterate block with given block_index + void block_iterate(ObjectClosure* cl, uint block_index); + // Debugging - do not use for time critical operations void print() const; From d1ca71d8aa86bdb24ba1f970fbbef9108e0b28a7 Mon Sep 17 00:00:00 2001 From: Lin Zang Date: Mon, 26 Oct 2020 15:18:56 +0800 Subject: [PATCH 02/13] Refine HeapBlockClaimer implementation --- .../gc/parallel/parallelScavengeHeap.cpp | 50 +++++-------------- .../gc/parallel/parallelScavengeHeap.hpp | 27 +++------- src/hotspot/share/gc/parallel/psOldGen.cpp | 22 ++++---- src/hotspot/share/gc/parallel/psOldGen.hpp | 6 ++- 4 files changed, 35 insertions(+), 70 deletions(-) diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp index 1d4cf08550167..297b155d4ae22 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp @@ -540,55 +540,29 @@ void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) { } void ParallelScavengeHeap::object_iterate_parallel(ObjectClosure* cl, - uint worker_id, HeapBlockClaimer* claimer) { - uint block_index; + int block_index; // Iterate until all blocks are claimed while (claimer->claim_and_get_block(&block_index)) { - if (block_index == HeapBlockClaimer::eden_index) { + if (block_index == HeapBlockClaimer::EdenIndex) { young_gen()->eden_space()->object_iterate(cl); - } else if (block_index == HeapBlockClaimer::survivor_index) { + } else if (block_index == HeapBlockClaimer::SurvivorIndex) { young_gen()->from_space()->object_iterate(cl); young_gen()->to_space()->object_iterate(cl); } else { - uint index = block_index - HeapBlockClaimer::num_inseparable_spaces; - old_gen()->block_iterate(cl, index); + old_gen()->block_iterate(cl, block_index); } } } -HeapBlockClaimer::HeapBlockClaimer(uint n_workers) : - _n_workers(n_workers), _n_blocks(0), _claims(NULL) { - assert(n_workers > 0, "Need at least one worker."); - size_t old_gen_used = ParallelScavengeHeap::heap()->old_gen()->used_in_bytes(); - size_t block_size = ParallelScavengeHeap::heap()->old_gen()->iterate_block_size(); - uint n_blocks_in_old = old_gen_used / block_size + 1; - _n_blocks = n_blocks_in_old + num_inseparable_spaces; - _unclaimed_index = 0; - uint* new_claims = NEW_C_HEAP_ARRAY(uint, _n_blocks, mtGC); - memset(new_claims, Unclaimed, sizeof(*_claims) * _n_blocks); - _claims = new_claims; -} - -HeapBlockClaimer::~HeapBlockClaimer() { - FREE_C_HEAP_ARRAY(uint, _claims); -} - -bool HeapBlockClaimer::claim_and_get_block(uint* block_index) { +bool HeapBlockClaimer::claim_and_get_block(int* block_index) { assert(block_index != NULL, "Invalid index pointer"); - uint next_index = Atomic::load(&_unclaimed_index); - while (true) { - if (next_index >= _n_blocks) { - return false; - } - uint old_val = Atomic::cmpxchg(&_claims[next_index], Unclaimed, Claimed); - if (old_val == Unclaimed) { - *block_index = next_index; - Atomic::inc(&_unclaimed_index); - return true; - } - next_index = Atomic::load(&_unclaimed_index); + *block_index = Atomic::fetch_and_add(&_claimed_index, 1); + int itrable_blocks = ParallelScavengeHeap::heap()->old_gen()->iterable_blocks(); + if (*block_index >= itrable_blocks) { + return false; } + return true; } class PSScavengeParallelObjectIterator : public ParallelObjectIterator { @@ -601,10 +575,10 @@ class PSScavengeParallelObjectIterator : public ParallelObjectIterator { PSScavengeParallelObjectIterator(uint thread_num) : _thread_num(thread_num), _heap(ParallelScavengeHeap::heap()), - _claimer(thread_num == 0 ? ParallelScavengeHeap::heap()->workers().active_workers() : thread_num) {} + _claimer() {} virtual void object_iterate(ObjectClosure* cl, uint worker_id) { - _heap->object_iterate_parallel(cl, worker_id, &_claimer); + _heap->object_iterate_parallel(cl, &_claimer); } }; diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp index e99d30f1469d6..1a7aed1667c3f 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp @@ -211,7 +211,7 @@ class ParallelScavengeHeap : public CollectedHeap { size_t unsafe_max_tlab_alloc(Thread* thr) const; void object_iterate(ObjectClosure* cl); - void object_iterate_parallel(ObjectClosure* cl, uint worker_id, HeapBlockClaimer* claimer); + void object_iterate_parallel(ObjectClosure* cl, HeapBlockClaimer* claimer); virtual ParallelObjectIterator* parallel_object_iterator(uint thread_num); HeapWord* block_start(const void* addr) const; @@ -294,28 +294,17 @@ class AdaptiveSizePolicyOutput : AllStatic { // The HeapBlockClaimer is used during parallel iteration over heap, // allowing workers to claim heap blocks, gaining exclusive rights to these blocks. -// The eden, survivor spaces are treated as single blocks as it is hard to divide +// The eden and survivor spaces are treated as single blocks as it is hard to divide // these spaces. // The old spaces are divided into serveral fixed-size blocks. class HeapBlockClaimer : public StackObj { - uint _n_workers; - uint _n_blocks; - uint _unclaimed_index; - volatile uint* _claims; - - static const uint Unclaimed = 0; - static const uint Claimed = 1; - - public: - HeapBlockClaimer(uint n_workers); - ~HeapBlockClaimer(); - + int _claimed_index; + public: + HeapBlockClaimer() : _claimed_index(EdenIndex) { } // Claim the block and get the block index. - bool claim_and_get_block(uint* block_index); - - static const uint eden_index = 0; - static const uint survivor_index = 1; - static const uint num_inseparable_spaces = 2; + bool claim_and_get_block(int* block_index); + static const int EdenIndex = -2; + static const int SurvivorIndex = -1; }; #endif // SHARE_GC_PARALLEL_PARALLELSCAVENGEHEAP_HPP diff --git a/src/hotspot/share/gc/parallel/psOldGen.cpp b/src/hotspot/share/gc/parallel/psOldGen.cpp index 83630f25154d6..4d63fd378feee 100644 --- a/src/hotspot/share/gc/parallel/psOldGen.cpp +++ b/src/hotspot/share/gc/parallel/psOldGen.cpp @@ -41,8 +41,7 @@ PSOldGen::PSOldGen(ReservedSpace rs, size_t initial_size, size_t min_size, size_t max_size, const char* perf_data_name, int level): _min_gen_size(min_size), - _max_gen_size(max_size), - _iterate_block_size(1024 * 1024) // 1M (HeapWord) + _max_gen_size(max_size) { initialize(rs, initial_size, GenAlignment, perf_data_name, level); } @@ -174,7 +173,7 @@ HeapWord* PSOldGen::allocate(size_t word_size) { /* * Divide space into blocks, processes block begins at - * bottom + block_index * _iterate_block_size. + * bottom + block_index * (_iterate_block_size / HeapWordSize). * NOTE: * - The initial block start address may not be a valid * object address, _start_array is used to correct it. @@ -190,19 +189,20 @@ void PSOldGen::block_iterate(ObjectClosure* cl, uint block_index) { MutableSpace *space = object_space(); HeapWord* bottom = space->bottom(); HeapWord* top = space->top(); - HeapWord* begin = bottom + block_index * _iterate_block_size; + size_t block_word_size = _iterate_block_size / HeapWordSize; + HeapWord* begin = bottom + block_index * block_word_size; - assert((_iterate_block_size % (ObjectStartArray::block_size)) == 0, + assert((block_word_size % (ObjectStartArray::block_size)) == 0, "BLOCK SIZE not a multiple of start_array block"); // iterate objects in block. - HeapWord* end = MIN2(top, begin + _iterate_block_size); - // There can be no object between begin and end. + HeapWord* end = MIN2(top, begin + block_word_size); + // Only iterate if there are objects between begin and end. if (start_array()->object_starts_in_range(begin, end)) { - // There are objects in the range. Find the object of begin address. - // Note that object_start() can return the last object in previous block, - // and the object is processed by other worker. Here only focus objects that - // fall into the current block. + // Process objects in the range, start from finding object at the begining + // address. Note that object_start() can return the last object in previous + // block, and that object is processed by other worker scanning that block. + // So here only focus on objects that fall into the current block. HeapWord* start = start_array()->object_start(begin); if (start < begin) { start += oop(start)->size(); diff --git a/src/hotspot/share/gc/parallel/psOldGen.hpp b/src/hotspot/share/gc/parallel/psOldGen.hpp index ffb3d24d0227d..f9c89f61a7f06 100644 --- a/src/hotspot/share/gc/parallel/psOldGen.hpp +++ b/src/hotspot/share/gc/parallel/psOldGen.hpp @@ -53,7 +53,7 @@ class PSOldGen : public CHeapObj { const size_t _max_gen_size; // Block size for parallel iteration - const size_t _iterate_block_size; + static const size_t _iterate_block_size = 1024 * 1024; #ifdef ASSERT void assert_block_in_covered_region(MemRegion new_memregion) { @@ -126,7 +126,6 @@ class PSOldGen : public CHeapObj { MemRegion reserved() const { return _reserved; } size_t max_gen_size() const { return _max_gen_size; } size_t min_gen_size() const { return _min_gen_size; } - size_t iterate_block_size() const { return _iterate_block_size; } bool is_in(const void* p) const { return _virtual_space->contains((void *)p); @@ -166,6 +165,9 @@ class PSOldGen : public CHeapObj { // Iteration. void oop_iterate(OopIterateClosure* cl) { object_space()->oop_iterate(cl); } void object_iterate(ObjectClosure* cl) { object_space()->object_iterate(cl); } + uint iterable_blocks() { + return (object_space()->used_in_bytes() + _iterate_block_size -1) / _iterate_block_size; + } // Iterate block with given block_index void block_iterate(ObjectClosure* cl, uint block_index); From 2d432f9571ea436d6d57c6153a8f495b9a42ee27 Mon Sep 17 00:00:00 2001 From: Lin Zang Date: Wed, 28 Oct 2020 19:25:11 +0800 Subject: [PATCH 03/13] fix constant coding style and do code refine --- src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp | 6 ++---- src/hotspot/share/gc/parallel/psOldGen.cpp | 4 ++-- src/hotspot/share/gc/parallel/psOldGen.hpp | 4 ++-- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp index 297b155d4ae22..8882e94754d9a 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp @@ -567,13 +567,11 @@ bool HeapBlockClaimer::claim_and_get_block(int* block_index) { class PSScavengeParallelObjectIterator : public ParallelObjectIterator { private: - uint _thread_num; ParallelScavengeHeap* _heap; HeapBlockClaimer _claimer; public: - PSScavengeParallelObjectIterator(uint thread_num) : - _thread_num(thread_num), + PSScavengeParallelObjectIterator() : _heap(ParallelScavengeHeap::heap()), _claimer() {} @@ -583,7 +581,7 @@ class PSScavengeParallelObjectIterator : public ParallelObjectIterator { }; ParallelObjectIterator* ParallelScavengeHeap::parallel_object_iterator(uint thread_num) { - return new PSScavengeParallelObjectIterator(thread_num); + return new PSScavengeParallelObjectIterator(); } HeapWord* ParallelScavengeHeap::block_start(const void* addr) const { diff --git a/src/hotspot/share/gc/parallel/psOldGen.cpp b/src/hotspot/share/gc/parallel/psOldGen.cpp index 4d63fd378feee..904b5c4f1ecb9 100644 --- a/src/hotspot/share/gc/parallel/psOldGen.cpp +++ b/src/hotspot/share/gc/parallel/psOldGen.cpp @@ -173,7 +173,7 @@ HeapWord* PSOldGen::allocate(size_t word_size) { /* * Divide space into blocks, processes block begins at - * bottom + block_index * (_iterate_block_size / HeapWordSize). + * bottom + block_index * (IterateBlockSize / HeapWordSize). * NOTE: * - The initial block start address may not be a valid * object address, _start_array is used to correct it. @@ -189,7 +189,7 @@ void PSOldGen::block_iterate(ObjectClosure* cl, uint block_index) { MutableSpace *space = object_space(); HeapWord* bottom = space->bottom(); HeapWord* top = space->top(); - size_t block_word_size = _iterate_block_size / HeapWordSize; + size_t block_word_size = IterateBlockSize / HeapWordSize; HeapWord* begin = bottom + block_index * block_word_size; assert((block_word_size % (ObjectStartArray::block_size)) == 0, diff --git a/src/hotspot/share/gc/parallel/psOldGen.hpp b/src/hotspot/share/gc/parallel/psOldGen.hpp index f9c89f61a7f06..7b6592069ef24 100644 --- a/src/hotspot/share/gc/parallel/psOldGen.hpp +++ b/src/hotspot/share/gc/parallel/psOldGen.hpp @@ -53,7 +53,7 @@ class PSOldGen : public CHeapObj { const size_t _max_gen_size; // Block size for parallel iteration - static const size_t _iterate_block_size = 1024 * 1024; + static const size_t IterateBlockSize = 1024 * 1024; #ifdef ASSERT void assert_block_in_covered_region(MemRegion new_memregion) { @@ -166,7 +166,7 @@ class PSOldGen : public CHeapObj { void oop_iterate(OopIterateClosure* cl) { object_space()->oop_iterate(cl); } void object_iterate(ObjectClosure* cl) { object_space()->object_iterate(cl); } uint iterable_blocks() { - return (object_space()->used_in_bytes() + _iterate_block_size -1) / _iterate_block_size; + return (object_space()->used_in_bytes() + IterateBlockSize - 1) / IterateBlockSize; } // Iterate block with given block_index void block_iterate(ObjectClosure* cl, uint block_index); From b17c8224200b7000387027f550bdd776ce8d1707 Mon Sep 17 00:00:00 2001 From: Lin Zang Date: Wed, 28 Oct 2020 23:05:46 +0800 Subject: [PATCH 04/13] cast iterable_blocks to int in claim_and_get_block() --- src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp index 8882e94754d9a..fe461d1c2a247 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp @@ -558,7 +558,7 @@ void ParallelScavengeHeap::object_iterate_parallel(ObjectClosure* cl, bool HeapBlockClaimer::claim_and_get_block(int* block_index) { assert(block_index != NULL, "Invalid index pointer"); *block_index = Atomic::fetch_and_add(&_claimed_index, 1); - int itrable_blocks = ParallelScavengeHeap::heap()->old_gen()->iterable_blocks(); + int itrable_blocks = (int)ParallelScavengeHeap::heap()->old_gen()->iterable_blocks(); if (*block_index >= itrable_blocks) { return false; } From 9cb4b0294ee9b5739796fa118c347c16211d271a Mon Sep 17 00:00:00 2001 From: Lin Zang Date: Thu, 29 Oct 2020 19:03:48 +0800 Subject: [PATCH 05/13] update the return type of iterable_blocks to size_t --- src/hotspot/share/gc/parallel/psOldGen.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/hotspot/share/gc/parallel/psOldGen.hpp b/src/hotspot/share/gc/parallel/psOldGen.hpp index 7b6592069ef24..c63954f2ef232 100644 --- a/src/hotspot/share/gc/parallel/psOldGen.hpp +++ b/src/hotspot/share/gc/parallel/psOldGen.hpp @@ -165,7 +165,7 @@ class PSOldGen : public CHeapObj { // Iteration. void oop_iterate(OopIterateClosure* cl) { object_space()->oop_iterate(cl); } void object_iterate(ObjectClosure* cl) { object_space()->object_iterate(cl); } - uint iterable_blocks() { + size_t iterable_blocks() { return (object_space()->used_in_bytes() + IterateBlockSize - 1) / IterateBlockSize; } // Iterate block with given block_index From cb75521f40b9eb9c6888edeb4d3c299aa1724335 Mon Sep 17 00:00:00 2001 From: Lin Zang Date: Mon, 2 Nov 2020 23:50:36 +0800 Subject: [PATCH 06/13] Code refine and use ssize_t for HeapBlockClaimer index --- .../gc/parallel/parallelScavengeHeap.cpp | 40 +++++++++++++------ .../gc/parallel/parallelScavengeHeap.hpp | 16 -------- src/hotspot/share/gc/parallel/psOldGen.cpp | 7 ++-- src/hotspot/share/gc/parallel/psOldGen.hpp | 3 +- 4 files changed, 33 insertions(+), 33 deletions(-) diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp index 1177a31bec80d..6e3b0b8e96a6d 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp @@ -539,9 +539,35 @@ void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) { old_gen()->object_iterate(cl); } +// The HeapBlockClaimer is used during parallel iteration over the heap, +// allowing workers to claim heap blocks, gaining exclusive rights to these blocks. +// The eden and survivor spaces are treated as single blocks as it is hard to divide +// these spaces. +// The old spaces are divided into serveral fixed-size blocks. +class HeapBlockClaimer : public StackObj { + // Index of iterable block, negative values for indexes of young generation spaces, + // zero and positive values for indexes of blocks in old generation space. + ssize_t _claimed_index; + public: + static const ssize_t EdenIndex = -2; + static const ssize_t SurvivorIndex = -1; + + HeapBlockClaimer() : _claimed_index(EdenIndex) { } + // Claim the block and get the block index. + bool claim_and_get_block(ssize_t* block_index) { + assert(block_index != NULL, "Invalid index pointer"); + *block_index = Atomic::fetch_and_add(&_claimed_index, 1); + ssize_t iterable_blocks = (ssize_t)ParallelScavengeHeap::heap()->old_gen()->iterable_blocks(); + if (*block_index >= iterable_blocks) { + return false; + } + return true; + } +}; + void ParallelScavengeHeap::object_iterate_parallel(ObjectClosure* cl, HeapBlockClaimer* claimer) { - int block_index; + ssize_t block_index; // Iterate until all blocks are claimed while (claimer->claim_and_get_block(&block_index)) { if (block_index == HeapBlockClaimer::EdenIndex) { @@ -550,21 +576,11 @@ void ParallelScavengeHeap::object_iterate_parallel(ObjectClosure* cl, young_gen()->from_space()->object_iterate(cl); young_gen()->to_space()->object_iterate(cl); } else { - old_gen()->block_iterate(cl, block_index); + old_gen()->block_iterate(cl, (size_t)block_index); } } } -bool HeapBlockClaimer::claim_and_get_block(int* block_index) { - assert(block_index != NULL, "Invalid index pointer"); - *block_index = Atomic::fetch_and_add(&_claimed_index, 1); - int itrable_blocks = (int)ParallelScavengeHeap::heap()->old_gen()->iterable_blocks(); - if (*block_index >= itrable_blocks) { - return false; - } - return true; -} - class PSScavengeParallelObjectIterator : public ParallelObjectIterator { private: ParallelScavengeHeap* _heap; diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp index 63d84ce536055..77f9ce1dc43c3 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp @@ -54,7 +54,6 @@ class PSHeapSummary; class ParallelScavengeHeap : public CollectedHeap { friend class VMStructs; - friend class HeapBlockClaimer; private: static PSYoungGen* _young_gen; static PSOldGen* _old_gen; @@ -290,19 +289,4 @@ class AdaptiveSizePolicyOutput : AllStatic { } }; -// The HeapBlockClaimer is used during parallel iteration over heap, -// allowing workers to claim heap blocks, gaining exclusive rights to these blocks. -// The eden and survivor spaces are treated as single blocks as it is hard to divide -// these spaces. -// The old spaces are divided into serveral fixed-size blocks. -class HeapBlockClaimer : public StackObj { - int _claimed_index; - public: - HeapBlockClaimer() : _claimed_index(EdenIndex) { } - // Claim the block and get the block index. - bool claim_and_get_block(int* block_index); - static const int EdenIndex = -2; - static const int SurvivorIndex = -1; -}; - #endif // SHARE_GC_PARALLEL_PARALLELSCAVENGEHEAP_HPP diff --git a/src/hotspot/share/gc/parallel/psOldGen.cpp b/src/hotspot/share/gc/parallel/psOldGen.cpp index fc13741af8595..adf578e443db0 100644 --- a/src/hotspot/share/gc/parallel/psOldGen.cpp +++ b/src/hotspot/share/gc/parallel/psOldGen.cpp @@ -187,7 +187,7 @@ HeapWord* PSOldGen::allocate(size_t word_size) { * which the object starts. * */ -void PSOldGen::block_iterate(ObjectClosure* cl, uint block_index) { +void PSOldGen::block_iterate(ObjectClosure* cl, size_t block_index) { MutableSpace *space = object_space(); HeapWord* bottom = space->bottom(); HeapWord* top = space->top(); @@ -209,8 +209,9 @@ void PSOldGen::block_iterate(ObjectClosure* cl, uint block_index) { if (start < begin) { start += oop(start)->size(); } - assert(begin <= start && start < end, - "object %p must in the range of [%p, %p)\n", start, begin, end); + assert(begin <= start, + "object address" PTR_FORMAT " must be larger or equal to block address at " PTR_FORMAT "\n", + start, begin); for (HeapWord* p = start; p < end; p += oop(p)->size()) { cl->do_object(oop(p)); } diff --git a/src/hotspot/share/gc/parallel/psOldGen.hpp b/src/hotspot/share/gc/parallel/psOldGen.hpp index c63954f2ef232..703ed9f7e5763 100644 --- a/src/hotspot/share/gc/parallel/psOldGen.hpp +++ b/src/hotspot/share/gc/parallel/psOldGen.hpp @@ -169,8 +169,7 @@ class PSOldGen : public CHeapObj { return (object_space()->used_in_bytes() + IterateBlockSize - 1) / IterateBlockSize; } // Iterate block with given block_index - void block_iterate(ObjectClosure* cl, uint block_index); - + void block_iterate(ObjectClosure* cl, size_t block_index); // Debugging - do not use for time critical operations void print() const; From 046638c69680f30bd0d0a3ccd71144f5ddd35f86 Mon Sep 17 00:00:00 2001 From: Lin Zang Date: Tue, 3 Nov 2020 00:11:43 +0800 Subject: [PATCH 07/13] fix issue of slowdebug build fail --- src/hotspot/share/gc/parallel/psOldGen.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/hotspot/share/gc/parallel/psOldGen.cpp b/src/hotspot/share/gc/parallel/psOldGen.cpp index adf578e443db0..940fb4e3d7eb6 100644 --- a/src/hotspot/share/gc/parallel/psOldGen.cpp +++ b/src/hotspot/share/gc/parallel/psOldGen.cpp @@ -211,7 +211,7 @@ void PSOldGen::block_iterate(ObjectClosure* cl, size_t block_index) { } assert(begin <= start, "object address" PTR_FORMAT " must be larger or equal to block address at " PTR_FORMAT "\n", - start, begin); + p2i(start), p2i(begin)); for (HeapWord* p = start; p < end; p += oop(p)->size()) { cl->do_object(oop(p)); } From dacae05609cd47794adc38e93d20e7c5831ed43b Mon Sep 17 00:00:00 2001 From: Lin Zang Date: Tue, 3 Nov 2020 17:23:46 +0800 Subject: [PATCH 08/13] remove unnecessary newline symbol in assertion --- src/hotspot/share/gc/parallel/psOldGen.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/hotspot/share/gc/parallel/psOldGen.cpp b/src/hotspot/share/gc/parallel/psOldGen.cpp index 940fb4e3d7eb6..d429347f2043e 100644 --- a/src/hotspot/share/gc/parallel/psOldGen.cpp +++ b/src/hotspot/share/gc/parallel/psOldGen.cpp @@ -210,7 +210,7 @@ void PSOldGen::block_iterate(ObjectClosure* cl, size_t block_index) { start += oop(start)->size(); } assert(begin <= start, - "object address" PTR_FORMAT " must be larger or equal to block address at " PTR_FORMAT "\n", + "object address " PTR_FORMAT " must be larger or equal to block address at " PTR_FORMAT, p2i(start), p2i(begin)); for (HeapWord* p = start; p < end; p += oop(p)->size()) { cl->do_object(oop(p)); From fe89b6ba68b34ad2fe53739e96f672e1d04185d9 Mon Sep 17 00:00:00 2001 From: Thomas Schatzl Date: Tue, 3 Nov 2020 12:45:47 +0100 Subject: [PATCH 09/13] - comments touch-up - use unsigned data type for claim index, removing a few casts - renamings - some minor code simplifications --- .../gc/parallel/parallelScavengeHeap.cpp | 34 +++++----- src/hotspot/share/gc/parallel/psOldGen.cpp | 67 ++++++++----------- src/hotspot/share/gc/parallel/psOldGen.hpp | 13 ++-- 3 files changed, 52 insertions(+), 62 deletions(-) diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp index 6e3b0b8e96a6d..1125ca76183e5 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp @@ -540,34 +540,34 @@ void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) { } // The HeapBlockClaimer is used during parallel iteration over the heap, -// allowing workers to claim heap blocks, gaining exclusive rights to these blocks. +// allowing workers to claim heap areas ("blocks"), gaining exclusive rights to these. // The eden and survivor spaces are treated as single blocks as it is hard to divide // these spaces. -// The old spaces are divided into serveral fixed-size blocks. +// The old space is divided into fixed-size blocks. class HeapBlockClaimer : public StackObj { - // Index of iterable block, negative values for indexes of young generation spaces, - // zero and positive values for indexes of blocks in old generation space. - ssize_t _claimed_index; - public: - static const ssize_t EdenIndex = -2; - static const ssize_t SurvivorIndex = -1; + size_t _claimed_index; + +public: + static const size_t EdenIndex = 0; + static const size_t SurvivorIndex = 1; + static const size_t NumNonOldGenClaims = 2; HeapBlockClaimer() : _claimed_index(EdenIndex) { } // Claim the block and get the block index. - bool claim_and_get_block(ssize_t* block_index) { + bool claim_and_get_block(size_t* block_index) { assert(block_index != NULL, "Invalid index pointer"); - *block_index = Atomic::fetch_and_add(&_claimed_index, 1); - ssize_t iterable_blocks = (ssize_t)ParallelScavengeHeap::heap()->old_gen()->iterable_blocks(); - if (*block_index >= iterable_blocks) { - return false; - } - return true; + *block_index = Atomic::fetch_and_add(&_claimed_index, (size_t)1); + + PSOldGen* old_gen = ParallelScavengeHeap::heap()->old_gen(); + size_t num_claims = old_gen->num_iterable_blocks() + NumNonOldGenClaims; + + return (*block_index < num_claims); } }; void ParallelScavengeHeap::object_iterate_parallel(ObjectClosure* cl, HeapBlockClaimer* claimer) { - ssize_t block_index; + size_t block_index; // Iterate until all blocks are claimed while (claimer->claim_and_get_block(&block_index)) { if (block_index == HeapBlockClaimer::EdenIndex) { @@ -576,7 +576,7 @@ void ParallelScavengeHeap::object_iterate_parallel(ObjectClosure* cl, young_gen()->from_space()->object_iterate(cl); young_gen()->to_space()->object_iterate(cl); } else { - old_gen()->block_iterate(cl, (size_t)block_index); + old_gen()->object_iterate_block(cl, block_index - HeapBlockClaimer::NumNonOldGenClaims); } } } diff --git a/src/hotspot/share/gc/parallel/psOldGen.cpp b/src/hotspot/share/gc/parallel/psOldGen.cpp index d429347f2043e..8506079d5f012 100644 --- a/src/hotspot/share/gc/parallel/psOldGen.cpp +++ b/src/hotspot/share/gc/parallel/psOldGen.cpp @@ -173,48 +173,35 @@ HeapWord* PSOldGen::allocate(size_t word_size) { return res; } -/* - * Divide space into blocks, processes block begins at - * bottom + block_index * (IterateBlockSize / HeapWordSize). - * NOTE: - * - The initial block start address may not be a valid - * object address, _start_array is used to correct it. - * - * - The end address is not necessary to be object address. - * - * - If there is an object that crosses blocks, it is - * processed by the worker that owns the block within - * which the object starts. - * - */ -void PSOldGen::block_iterate(ObjectClosure* cl, size_t block_index) { - MutableSpace *space = object_space(); - HeapWord* bottom = space->bottom(); - HeapWord* top = space->top(); - size_t block_word_size = IterateBlockSize / HeapWordSize; - HeapWord* begin = bottom + block_index * block_word_size; +size_t PSOldGen::num_iterable_blocks() const { + return (object_space()->used_in_bytes() + IterateBlockSize - 1) / IterateBlockSize; +} +void PSOldGen::object_iterate_block(ObjectClosure* cl, size_t block_index) { + size_t block_word_size = IterateBlockSize / HeapWordSize; assert((block_word_size % (ObjectStartArray::block_size)) == 0, - "BLOCK SIZE not a multiple of start_array block"); - - // iterate objects in block. - HeapWord* end = MIN2(top, begin + block_word_size); - // Only iterate if there are objects between begin and end. - if (start_array()->object_starts_in_range(begin, end)) { - // Process objects in the range, start from finding object at the begining - // address. Note that object_start() can return the last object in previous - // block, and that object is processed by other worker scanning that block. - // So here only focus on objects that fall into the current block. - HeapWord* start = start_array()->object_start(begin); - if (start < begin) { - start += oop(start)->size(); - } - assert(begin <= start, - "object address " PTR_FORMAT " must be larger or equal to block address at " PTR_FORMAT, - p2i(start), p2i(begin)); - for (HeapWord* p = start; p < end; p += oop(p)->size()) { - cl->do_object(oop(p)); - } + "Block size not a multiple of start_array block"); + + MutableSpace *space = object_space(); + + HeapWord* begin = space->bottom() + block_index * block_word_size; + HeapWord* end = MIN2(space->top(), begin + block_word_size); + + if (!start_array()->object_starts_in_range(begin, end)) { + return; + } + + // Get object starting at or reaching into this block. + HeapWord* start = start_array()->object_start(begin); + if (start < begin) { + start += oop(start)->size(); + } + assert(start >= begin, + "Object address" PTR_FORMAT " must be larger or equal to block address at " PTR_FORMAT, + p2i(start), p2i(begin)); + // Iterate all objects until the end. + for (HeapWord* p = start; p < end; p += oop(p)->size()) { + cl->do_object(oop(p)); } } diff --git a/src/hotspot/share/gc/parallel/psOldGen.hpp b/src/hotspot/share/gc/parallel/psOldGen.hpp index 703ed9f7e5763..d882bab114b6b 100644 --- a/src/hotspot/share/gc/parallel/psOldGen.hpp +++ b/src/hotspot/share/gc/parallel/psOldGen.hpp @@ -165,11 +165,14 @@ class PSOldGen : public CHeapObj { // Iteration. void oop_iterate(OopIterateClosure* cl) { object_space()->oop_iterate(cl); } void object_iterate(ObjectClosure* cl) { object_space()->object_iterate(cl); } - size_t iterable_blocks() { - return (object_space()->used_in_bytes() + IterateBlockSize - 1) / IterateBlockSize; - } - // Iterate block with given block_index - void block_iterate(ObjectClosure* cl, size_t block_index); + + // Number of blocks to be iterated over in the used part of old gen. + size_t num_iterable_blocks() const; + // Iterate the objects starting in block block_index within [bottom, top) of the + // old gen. The object just reaching into this block is not iterated over. + // A block is an evenly sized non-overlapping part of the old gen of + // IterateBlockSize bytes. + void object_iterate_block(ObjectClosure* cl, size_t block_index); // Debugging - do not use for time critical operations void print() const; From 2668fe69507836b3d30bc56dec5de0da20b4076f Mon Sep 17 00:00:00 2001 From: Lin Zang Date: Wed, 4 Nov 2020 14:04:33 +0800 Subject: [PATCH 10/13] revise claim_and_get_block() to avoid using pointer arguments --- .../share/gc/parallel/parallelScavengeHeap.cpp | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp index 1125ca76183e5..80523ef280e94 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp @@ -550,26 +550,28 @@ class HeapBlockClaimer : public StackObj { public: static const size_t EdenIndex = 0; static const size_t SurvivorIndex = 1; + // Use max_size_t as the invalid claim index + static const size_t InvalidIndex = (size_t)-1; static const size_t NumNonOldGenClaims = 2; HeapBlockClaimer() : _claimed_index(EdenIndex) { } // Claim the block and get the block index. - bool claim_and_get_block(size_t* block_index) { - assert(block_index != NULL, "Invalid index pointer"); - *block_index = Atomic::fetch_and_add(&_claimed_index, (size_t)1); + size_t claim_and_get_block() { + size_t block_index; + block_index = Atomic::fetch_and_add(&_claimed_index, (size_t)1); PSOldGen* old_gen = ParallelScavengeHeap::heap()->old_gen(); size_t num_claims = old_gen->num_iterable_blocks() + NumNonOldGenClaims; - return (*block_index < num_claims); + return block_index < num_claims ? block_index : InvalidIndex; } }; void ParallelScavengeHeap::object_iterate_parallel(ObjectClosure* cl, HeapBlockClaimer* claimer) { - size_t block_index; + size_t block_index = claimer->claim_and_get_block(); // Iterate until all blocks are claimed - while (claimer->claim_and_get_block(&block_index)) { + while (block_index != HeapBlockClaimer::InvalidIndex) { if (block_index == HeapBlockClaimer::EdenIndex) { young_gen()->eden_space()->object_iterate(cl); } else if (block_index == HeapBlockClaimer::SurvivorIndex) { @@ -578,6 +580,7 @@ void ParallelScavengeHeap::object_iterate_parallel(ObjectClosure* cl, } else { old_gen()->object_iterate_block(cl, block_index - HeapBlockClaimer::NumNonOldGenClaims); } + block_index = claimer->claim_and_get_block(); } } From ee9208618b68cf3f0e77c74ef5fa10a6b3e6b72b Mon Sep 17 00:00:00 2001 From: Lin Zang Date: Wed, 4 Nov 2020 18:47:15 +0800 Subject: [PATCH 11/13] improve readability and use SIZE_MAX for InvalidIndex --- .../gc/parallel/parallelScavengeHeap.cpp | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp index 80523ef280e94..dfecb469dc1d1 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp @@ -548,10 +548,9 @@ class HeapBlockClaimer : public StackObj { size_t _claimed_index; public: + static const size_t InvalidIndex = (size_t)SIZE_MAX; static const size_t EdenIndex = 0; static const size_t SurvivorIndex = 1; - // Use max_size_t as the invalid claim index - static const size_t InvalidIndex = (size_t)-1; static const size_t NumNonOldGenClaims = 2; HeapBlockClaimer() : _claimed_index(EdenIndex) { } @@ -571,15 +570,17 @@ void ParallelScavengeHeap::object_iterate_parallel(ObjectClosure* cl, HeapBlockClaimer* claimer) { size_t block_index = claimer->claim_and_get_block(); // Iterate until all blocks are claimed + if (block_index == HeapBlockClaimer::EdenIndex) { + young_gen()->eden_space()->object_iterate(cl); + block_index = claimer->claim_and_get_block(); + } + if (block_index == HeapBlockClaimer::SurvivorIndex) { + young_gen()->from_space()->object_iterate(cl); + young_gen()->to_space()->object_iterate(cl); + block_index = claimer->claim_and_get_block(); + } while (block_index != HeapBlockClaimer::InvalidIndex) { - if (block_index == HeapBlockClaimer::EdenIndex) { - young_gen()->eden_space()->object_iterate(cl); - } else if (block_index == HeapBlockClaimer::SurvivorIndex) { - young_gen()->from_space()->object_iterate(cl); - young_gen()->to_space()->object_iterate(cl); - } else { - old_gen()->object_iterate_block(cl, block_index - HeapBlockClaimer::NumNonOldGenClaims); - } + old_gen()->object_iterate_block(cl, block_index - HeapBlockClaimer::NumNonOldGenClaims); block_index = claimer->claim_and_get_block(); } } From 6b4600a0c503518335837191efc9ecf9e3729526 Mon Sep 17 00:00:00 2001 From: Lin Zang <56812395+linzang@users.noreply.github.com> Date: Wed, 4 Nov 2020 19:18:57 +0800 Subject: [PATCH 12/13] use 1u instead of conversion to size_t for block_index Co-authored-by: Stefan Johansson <54407259+kstefanj@users.noreply.github.com> --- src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp index dfecb469dc1d1..816348de5c450 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp @@ -557,7 +557,7 @@ class HeapBlockClaimer : public StackObj { // Claim the block and get the block index. size_t claim_and_get_block() { size_t block_index; - block_index = Atomic::fetch_and_add(&_claimed_index, (size_t)1); + block_index = Atomic::fetch_and_add(&_claimed_index, 1u); PSOldGen* old_gen = ParallelScavengeHeap::heap()->old_gen(); size_t num_claims = old_gen->num_iterable_blocks() + NumNonOldGenClaims; From 4cb086654243d2d114419e004d3e66193ee8631c Mon Sep 17 00:00:00 2001 From: Lin Zang Date: Wed, 4 Nov 2020 19:54:05 +0800 Subject: [PATCH 13/13] remove unnecessory size_t conversion --- src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp index 816348de5c450..4f3f41e5ccf58 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp @@ -548,7 +548,7 @@ class HeapBlockClaimer : public StackObj { size_t _claimed_index; public: - static const size_t InvalidIndex = (size_t)SIZE_MAX; + static const size_t InvalidIndex = SIZE_MAX; static const size_t EdenIndex = 0; static const size_t SurvivorIndex = 1; static const size_t NumNonOldGenClaims = 2;