annotate src/hotspot/share/gc/g1/g1Allocator.cpp @ 53912:bb051ca06e9e

8159440: Move marking of promoted objects during initial mark into the concurrent phase Reviewed-by: sjohanss, kbarrett
author tschatzl
date Thu, 06 Dec 2018 13:55:22 +0100
parents 1906adbef2dc
children aa87f38fcba2
rev   line source
sjohanss@26837 1 /*
kbarrett@49798 2 * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
sjohanss@26837 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
sjohanss@26837 4 *
sjohanss@26837 5 * This code is free software; you can redistribute it and/or modify it
sjohanss@26837 6 * under the terms of the GNU General Public License version 2 only, as
sjohanss@26837 7 * published by the Free Software Foundation.
sjohanss@26837 8 *
sjohanss@26837 9 * This code is distributed in the hope that it will be useful, but WITHOUT
sjohanss@26837 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
sjohanss@26837 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
sjohanss@26837 12 * version 2 for more details (a copy is included in the LICENSE file that
sjohanss@26837 13 * accompanied this code).
sjohanss@26837 14 *
sjohanss@26837 15 * You should have received a copy of the GNU General Public License version
sjohanss@26837 16 * 2 along with this work; if not, write to the Free Software Foundation,
sjohanss@26837 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
sjohanss@26837 18 *
sjohanss@26837 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
sjohanss@26837 20 * or visit www.oracle.com if you need additional information or have any
sjohanss@26837 21 * questions.
sjohanss@26837 22 *
sjohanss@26837 23 */
sjohanss@26837 24
sjohanss@26837 25 #include "precompiled.hpp"
tschatzl@32185 26 #include "gc/g1/g1Allocator.inline.hpp"
tschatzl@32389 27 #include "gc/g1/g1AllocRegion.inline.hpp"
coleenp@34230 28 #include "gc/g1/g1EvacStats.inline.hpp"
pliden@30764 29 #include "gc/g1/g1CollectedHeap.inline.hpp"
tschatzl@50102 30 #include "gc/g1/g1Policy.hpp"
pliden@30764 31 #include "gc/g1/heapRegion.inline.hpp"
pliden@30764 32 #include "gc/g1/heapRegionSet.inline.hpp"
tschatzl@50102 33 #include "gc/g1/heapRegionType.hpp"
stefank@46625 34 #include "utilities/align.hpp"
sjohanss@26837 35
sjohanss@50208 36 G1Allocator::G1Allocator(G1CollectedHeap* heap) :
sjohanss@50208 37 _g1h(heap),
sjohanss@34146 38 _survivor_is_full(false),
sjohanss@34146 39 _old_is_full(false),
tschatzl@52033 40 _mutator_alloc_region(),
tschatzl@32379 41 _survivor_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Young)),
tschatzl@52033 42 _old_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Old)),
tschatzl@52033 43 _retained_old_gc_alloc_region(NULL) {
tschatzl@32379 44 }
tschatzl@32379 45
sjohanss@50208 46 void G1Allocator::init_mutator_alloc_region() {
sjohanss@26837 47 assert(_mutator_alloc_region.get() == NULL, "pre-condition");
sjohanss@26837 48 _mutator_alloc_region.init();
sjohanss@26837 49 }
sjohanss@26837 50
sjohanss@50208 51 void G1Allocator::release_mutator_alloc_region() {
sjohanss@26837 52 _mutator_alloc_region.release();
sjohanss@26837 53 assert(_mutator_alloc_region.get() == NULL, "post-condition");
sjohanss@26837 54 }
sjohanss@26837 55
sjohanss@50208 56 bool G1Allocator::is_retained_old_region(HeapRegion* hr) {
sjohanss@50208 57 return _retained_old_gc_alloc_region == hr;
sjohanss@50208 58 }
sjohanss@50208 59
sjohanss@26837 60 void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
sjohanss@26837 61 OldGCAllocRegion* old,
sjohanss@26837 62 HeapRegion** retained_old) {
sjohanss@26837 63 HeapRegion* retained_region = *retained_old;
sjohanss@26837 64 *retained_old = NULL;
jiangli@31346 65 assert(retained_region == NULL || !retained_region->is_archive(),
david@33105 66 "Archive region should not be alloc region (index %u)", retained_region->hrm_index());
sjohanss@26837 67
sjohanss@26837 68 // We will discard the current GC alloc region if:
sjohanss@26837 69 // a) it's in the collection set (it can happen!),
sjohanss@26837 70 // b) it's already full (no point in using it),
sjohanss@26837 71 // c) it's empty (this means that it was emptied during
sjohanss@26837 72 // a cleanup and it should be on the free list now), or
sjohanss@26837 73 // d) it's humongous (this means that it was emptied
sjohanss@26837 74 // during a cleanup and was added to the free list, but
sjohanss@26837 75 // has been subsequently used to allocate a humongous
sjohanss@26837 76 // object that may be less than the region size).
sjohanss@26837 77 if (retained_region != NULL &&
sjohanss@26837 78 !retained_region->in_collection_set() &&
sjohanss@26837 79 !(retained_region->top() == retained_region->end()) &&
sjohanss@26837 80 !retained_region->is_empty() &&
tonyp@26846 81 !retained_region->is_humongous()) {
sjohanss@26837 82 // The retained region was added to the old region set when it was
sjohanss@26837 83 // retired. We have to remove it now, since we don't allow regions
sjohanss@26837 84 // we allocate to in the region sets. We'll re-add it later, when
sjohanss@26837 85 // it's retired again.
tschatzl@32193 86 _g1h->old_set_remove(retained_region);
sjohanss@26837 87 old->set(retained_region);
tschatzl@32193 88 _g1h->hr_printer()->reuse(retained_region);
sjohanss@26837 89 evacuation_info.set_alloc_regions_used_before(retained_region->used());
sjohanss@26837 90 }
sjohanss@26837 91 }
sjohanss@26837 92
sjohanss@50208 93 void G1Allocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
kbarrett@49798 94 assert_at_safepoint_on_vm_thread();
sjohanss@26837 95
sjohanss@34146 96 _survivor_is_full = false;
sjohanss@34146 97 _old_is_full = false;
tschatzl@32377 98
sjohanss@26837 99 _survivor_gc_alloc_region.init();
sjohanss@26837 100 _old_gc_alloc_region.init();
sjohanss@26837 101 reuse_retained_old_region(evacuation_info,
sjohanss@26837 102 &_old_gc_alloc_region,
sjohanss@26837 103 &_retained_old_gc_alloc_region);
sjohanss@26837 104 }
sjohanss@26837 105
sjohanss@50208 106 void G1Allocator::release_gc_alloc_regions(EvacuationInfo& evacuation_info) {
sjohanss@49788 107 evacuation_info.set_allocation_regions(survivor_gc_alloc_region()->count() +
sjohanss@49788 108 old_gc_alloc_region()->count());
sjohanss@49788 109 survivor_gc_alloc_region()->release();
sjohanss@26837 110 // If we have an old GC alloc region to release, we'll save it in
sjohanss@26837 111 // _retained_old_gc_alloc_region. If we don't
sjohanss@26837 112 // _retained_old_gc_alloc_region will become NULL. This is what we
sjohanss@26837 113 // want either way so no reason to check explicitly for either
sjohanss@26837 114 // condition.
sjohanss@49788 115 _retained_old_gc_alloc_region = old_gc_alloc_region()->release();
sjohanss@26837 116 }
sjohanss@26837 117
sjohanss@50208 118 void G1Allocator::abandon_gc_alloc_regions() {
sjohanss@49788 119 assert(survivor_gc_alloc_region()->get() == NULL, "pre-condition");
sjohanss@49788 120 assert(old_gc_alloc_region()->get() == NULL, "pre-condition");
sjohanss@26837 121 _retained_old_gc_alloc_region = NULL;
sjohanss@26837 122 }
sjohanss@26837 123
sjohanss@50208 124 bool G1Allocator::survivor_is_full() const {
sjohanss@34146 125 return _survivor_is_full;
sjohanss@34146 126 }
sjohanss@34146 127
sjohanss@50208 128 bool G1Allocator::old_is_full() const {
sjohanss@34146 129 return _old_is_full;
sjohanss@34146 130 }
sjohanss@34146 131
sjohanss@50208 132 void G1Allocator::set_survivor_full() {
sjohanss@34146 133 _survivor_is_full = true;
sjohanss@34146 134 }
sjohanss@34146 135
sjohanss@50208 136 void G1Allocator::set_old_full() {
sjohanss@34146 137 _old_is_full = true;
sjohanss@34146 138 }
sjohanss@34146 139
sjohanss@49788 140 size_t G1Allocator::unsafe_max_tlab_alloc() {
tschatzl@32185 141 // Return the remaining space in the cur alloc region, but not less than
tschatzl@32185 142 // the min TLAB size.
tschatzl@32185 143
tschatzl@32185 144 // Also, this value can be at most the humongous object threshold,
tschatzl@32185 145 // since we can't allow tlabs to grow big enough to accommodate
tschatzl@32185 146 // humongous objects.
tschatzl@32185 147
sjohanss@49788 148 HeapRegion* hr = mutator_alloc_region()->get();
tschatzl@32185 149 size_t max_tlab = _g1h->max_tlab_size() * wordSize;
tschatzl@32185 150 if (hr == NULL) {
tschatzl@32185 151 return max_tlab;
tschatzl@32185 152 } else {
tschatzl@32185 153 return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
tschatzl@32185 154 }
tschatzl@32185 155 }
tschatzl@32185 156
sjohanss@50208 157 size_t G1Allocator::used_in_alloc_regions() {
sjohanss@50208 158 assert(Heap_lock->owner() != NULL, "Should be owned on this thread's behalf.");
sjohanss@50470 159 return mutator_alloc_region()->used_in_alloc_regions();
sjohanss@50208 160 }
sjohanss@50208 161
sjohanss@50208 162
tschatzl@32185 163 HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
sjohanss@49788 164 size_t word_size) {
tschatzl@32389 165 size_t temp = 0;
sjohanss@49788 166 HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp);
tschatzl@32389 167 assert(result == NULL || temp == word_size,
david@33105 168 "Requested " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
david@33105 169 word_size, temp, p2i(result));
tschatzl@32389 170 return result;
tschatzl@32389 171 }
tschatzl@32389 172
tschatzl@32389 173 HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
tschatzl@32389 174 size_t min_word_size,
tschatzl@32389 175 size_t desired_word_size,
sjohanss@49788 176 size_t* actual_word_size) {
tschatzl@32185 177 switch (dest.value()) {
tschatzl@32185 178 case InCSetState::Young:
sjohanss@49788 179 return survivor_attempt_allocation(min_word_size, desired_word_size, actual_word_size);
tschatzl@32185 180 case InCSetState::Old:
sjohanss@49788 181 return old_attempt_allocation(min_word_size, desired_word_size, actual_word_size);
tschatzl@32185 182 default:
tschatzl@32185 183 ShouldNotReachHere();
tschatzl@32185 184 return NULL; // Keep some compilers happy
tschatzl@32185 185 }
tschatzl@32185 186 }
tschatzl@32185 187
tschatzl@32389 188 HeapWord* G1Allocator::survivor_attempt_allocation(size_t min_word_size,
tschatzl@32389 189 size_t desired_word_size,
sjohanss@49788 190 size_t* actual_word_size) {
tschatzl@32389 191 assert(!_g1h->is_humongous(desired_word_size),
tschatzl@32185 192 "we should not be seeing humongous-size allocations in this path");
tschatzl@32185 193
sjohanss@49788 194 HeapWord* result = survivor_gc_alloc_region()->attempt_allocation(min_word_size,
sjohanss@49788 195 desired_word_size,
sjohanss@49788 196 actual_word_size);
sjohanss@49788 197 if (result == NULL && !survivor_is_full()) {
tschatzl@32185 198 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
sjohanss@49788 199 result = survivor_gc_alloc_region()->attempt_allocation_locked(min_word_size,
sjohanss@49788 200 desired_word_size,
sjohanss@49788 201 actual_word_size);
tschatzl@32377 202 if (result == NULL) {
sjohanss@49788 203 set_survivor_full();
tschatzl@32377 204 }
tschatzl@32185 205 }
tschatzl@32185 206 if (result != NULL) {
tschatzl@32389 207 _g1h->dirty_young_block(result, *actual_word_size);
tschatzl@32185 208 }
tschatzl@32185 209 return result;
tschatzl@32185 210 }
tschatzl@32185 211
tschatzl@32389 212 HeapWord* G1Allocator::old_attempt_allocation(size_t min_word_size,
tschatzl@32389 213 size_t desired_word_size,
sjohanss@49788 214 size_t* actual_word_size) {
tschatzl@32389 215 assert(!_g1h->is_humongous(desired_word_size),
tschatzl@32185 216 "we should not be seeing humongous-size allocations in this path");
tschatzl@32185 217
sjohanss@49788 218 HeapWord* result = old_gc_alloc_region()->attempt_allocation(min_word_size,
sjohanss@49788 219 desired_word_size,
sjohanss@49788 220 actual_word_size);
sjohanss@49788 221 if (result == NULL && !old_is_full()) {
tschatzl@32185 222 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
sjohanss@49788 223 result = old_gc_alloc_region()->attempt_allocation_locked(min_word_size,
sjohanss@49788 224 desired_word_size,
sjohanss@49788 225 actual_word_size);
tschatzl@32377 226 if (result == NULL) {
sjohanss@49788 227 set_old_full();
tschatzl@32377 228 }
tschatzl@32185 229 }
tschatzl@32185 230 return result;
tschatzl@32185 231 }
tschatzl@32185 232
sjohanss@50208 233 uint G1PLABAllocator::calc_survivor_alignment_bytes() {
sjohanss@50208 234 assert(SurvivorAlignmentInBytes >= ObjectAlignmentInBytes, "sanity");
sjohanss@50208 235 if (SurvivorAlignmentInBytes == ObjectAlignmentInBytes) {
sjohanss@50208 236 // No need to align objects in the survivors differently, return 0
sjohanss@50208 237 // which means "survivor alignment is not used".
sjohanss@50208 238 return 0;
sjohanss@50208 239 } else {
sjohanss@50208 240 assert(SurvivorAlignmentInBytes > 0, "sanity");
sjohanss@50208 241 return SurvivorAlignmentInBytes;
sjohanss@50208 242 }
sjohanss@50208 243 }
sjohanss@50208 244
tschatzl@32185 245 G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) :
tschatzl@32185 246 _g1h(G1CollectedHeap::heap()),
tschatzl@32185 247 _allocator(allocator),
sjohanss@50208 248 _surviving_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Young)),
sjohanss@50208 249 _tenured_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Old)),
tschatzl@32185 250 _survivor_alignment_bytes(calc_survivor_alignment_bytes()) {
sjohanss@50208 251 for (uint state = 0; state < InCSetState::Num; state++) {
sjohanss@50208 252 _direct_allocated[state] = 0;
sjohanss@50208 253 _alloc_buffers[state] = NULL;
tschatzl@32379 254 }
sjohanss@50208 255 _alloc_buffers[InCSetState::Young] = &_surviving_alloc_buffer;
sjohanss@50208 256 _alloc_buffers[InCSetState::Old] = &_tenured_alloc_buffer;
tschatzl@32185 257 }
tschatzl@32185 258
tschatzl@32383 259 bool G1PLABAllocator::may_throw_away_buffer(size_t const allocation_word_sz, size_t const buffer_size) const {
tschatzl@32383 260 return (allocation_word_sz * 100 < buffer_size * ParallelGCBufferWastePct);
tschatzl@32383 261 }
tschatzl@32383 262
tschatzl@32185 263 HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(InCSetState dest,
tschatzl@32185 264 size_t word_sz,
tschatzl@32377 265 bool* plab_refill_failed) {
tschatzl@50301 266 size_t plab_word_size = _g1h->desired_plab_sz(dest);
tschatzl@32383 267 size_t required_in_plab = PLAB::size_required_for_allocation(word_sz);
tschatzl@32383 268
tschatzl@32383 269 // Only get a new PLAB if the allocation fits and it would not waste more than
tschatzl@32383 270 // ParallelGCBufferWastePct in the existing buffer.
tschatzl@32383 271 if ((required_in_plab <= plab_word_size) &&
tschatzl@32383 272 may_throw_away_buffer(required_in_plab, plab_word_size)) {
tschatzl@32383 273
sjohanss@49788 274 PLAB* alloc_buf = alloc_buffer(dest);
tschatzl@29327 275 alloc_buf->retire();
sjohanss@26837 276
tschatzl@32389 277 size_t actual_plab_size = 0;
tschatzl@32389 278 HeapWord* buf = _allocator->par_allocate_during_gc(dest,
tschatzl@32389 279 required_in_plab,
tschatzl@32389 280 plab_word_size,
sjohanss@49788 281 &actual_plab_size);
tschatzl@32389 282
tschatzl@32389 283 assert(buf == NULL || ((actual_plab_size >= required_in_plab) && (actual_plab_size <= plab_word_size)),
david@33105 284 "Requested at minimum " SIZE_FORMAT ", desired " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
david@33105 285 required_in_plab, plab_word_size, actual_plab_size, p2i(buf));
tschatzl@32389 286
tschatzl@32377 287 if (buf != NULL) {
tschatzl@32389 288 alloc_buf->set_buf(buf, actual_plab_size);
tschatzl@32377 289
tschatzl@32377 290 HeapWord* const obj = alloc_buf->allocate(word_sz);
david@33105 291 assert(obj != NULL, "PLAB should have been big enough, tried to allocate "
david@33105 292 SIZE_FORMAT " requiring " SIZE_FORMAT " PLAB size " SIZE_FORMAT,
david@33105 293 word_sz, required_in_plab, plab_word_size);
tschatzl@32377 294 return obj;
sjohanss@26837 295 }
sjohanss@26837 296 // Otherwise.
tschatzl@32377 297 *plab_refill_failed = true;
sjohanss@26837 298 }
tschatzl@32379 299 // Try direct allocation.
sjohanss@49788 300 HeapWord* result = _allocator->par_allocate_during_gc(dest, word_sz);
tschatzl@32379 301 if (result != NULL) {
tschatzl@32379 302 _direct_allocated[dest.value()] += word_sz;
tschatzl@32379 303 }
tschatzl@32379 304 return result;
sjohanss@26837 305 }
sjohanss@26837 306
sjohanss@49788 307 void G1PLABAllocator::undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz) {
sjohanss@49788 308 alloc_buffer(dest)->undo_allocation(obj, word_sz);
tschatzl@32185 309 }
tschatzl@32185 310
sjohanss@50208 311 void G1PLABAllocator::flush_and_retire_stats() {
tschatzl@28213 312 for (uint state = 0; state < InCSetState::Num; state++) {
tschatzl@49394 313 PLAB* const buf = _alloc_buffers[state];
tschatzl@28213 314 if (buf != NULL) {
tschatzl@32379 315 G1EvacStats* stats = _g1h->alloc_buffer_stats(state);
tschatzl@32379 316 buf->flush_and_retire_stats(stats);
tschatzl@32379 317 stats->add_direct_allocated(_direct_allocated[state]);
tschatzl@32379 318 _direct_allocated[state] = 0;
tschatzl@28213 319 }
sjohanss@26837 320 }
sjohanss@26837 321 }
tschatzl@30564 322
sjohanss@50208 323 void G1PLABAllocator::waste(size_t& wasted, size_t& undo_wasted) {
tschatzl@30564 324 wasted = 0;
tschatzl@30564 325 undo_wasted = 0;
tschatzl@30564 326 for (uint state = 0; state < InCSetState::Num; state++) {
tschatzl@49394 327 PLAB * const buf = _alloc_buffers[state];
tschatzl@30564 328 if (buf != NULL) {
tschatzl@30564 329 wasted += buf->waste();
tschatzl@30564 330 undo_wasted += buf->undo_waste();
tschatzl@30564 331 }
tschatzl@30564 332 }
tschatzl@30564 333 }
jiangli@31346 334
sjohanss@46285 335 bool G1ArchiveAllocator::_archive_check_enabled = false;
jiangli@46810 336 G1ArchiveRegionMap G1ArchiveAllocator::_closed_archive_region_map;
jiangli@46810 337 G1ArchiveRegionMap G1ArchiveAllocator::_open_archive_region_map;
sjohanss@46285 338
jiangli@46810 339 G1ArchiveAllocator* G1ArchiveAllocator::create_allocator(G1CollectedHeap* g1h, bool open) {
jiangli@31346 340 // Create the archive allocator, and also enable archive object checking
jiangli@31346 341 // in mark-sweep, since we will be creating archive regions.
jiangli@46810 342 G1ArchiveAllocator* result = new G1ArchiveAllocator(g1h, open);
sjohanss@46285 343 enable_archive_object_check();
jiangli@31346 344 return result;
jiangli@31346 345 }
jiangli@31346 346
jiangli@31346 347 bool G1ArchiveAllocator::alloc_new_region() {
jiangli@31346 348 // Allocate the highest free region in the reserved heap,
jiangli@31346 349 // and add it to our list of allocated regions. It is marked
jiangli@31346 350 // archive and added to the old set.
jiangli@31346 351 HeapRegion* hr = _g1h->alloc_highest_free_region();
jiangli@31346 352 if (hr == NULL) {
jiangli@31346 353 return false;
jiangli@31346 354 }
david@33105 355 assert(hr->is_empty(), "expected empty region (index %u)", hr->hrm_index());
jiangli@46810 356 if (_open) {
jiangli@46810 357 hr->set_open_archive();
jiangli@46810 358 } else {
jiangli@46810 359 hr->set_closed_archive();
jiangli@46810 360 }
tschatzl@50102 361 _g1h->g1_policy()->remset_tracker()->update_at_allocate(hr);
tschatzl@52220 362 _g1h->archive_set_add(hr);
david@35079 363 _g1h->hr_printer()->alloc(hr);
jiangli@31346 364 _allocated_regions.append(hr);
jiangli@31346 365 _allocation_region = hr;
jiangli@31346 366
jiangli@31346 367 // Set up _bottom and _max to begin allocating in the lowest
jiangli@31346 368 // min_region_size'd chunk of the allocated G1 region.
jiangli@31346 369 _bottom = hr->bottom();
jiangli@31346 370 _max = _bottom + HeapRegion::min_region_size_in_words();
jiangli@31346 371
jiangli@31346 372 // Tell mark-sweep that objects in this region are not to be marked.
jiangli@46810 373 set_range_archive(MemRegion(_bottom, HeapRegion::GrainWords), _open);
jiangli@31346 374
jiangli@31346 375 // Since we've modified the old set, call update_sizes.
jiangli@31346 376 _g1h->g1mm()->update_sizes();
jiangli@31346 377 return true;
jiangli@31346 378 }
jiangli@31346 379
jiangli@31346 380 HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) {
jiangli@31346 381 assert(word_size != 0, "size must not be zero");
jiangli@31346 382 if (_allocation_region == NULL) {
jiangli@31346 383 if (!alloc_new_region()) {
jiangli@31346 384 return NULL;
jiangli@31346 385 }
jiangli@31346 386 }
jiangli@31346 387 HeapWord* old_top = _allocation_region->top();
jiangli@31346 388 assert(_bottom >= _allocation_region->bottom(),
david@33105 389 "inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT,
david@33105 390 p2i(_bottom), p2i(_allocation_region->bottom()));
jiangli@31346 391 assert(_max <= _allocation_region->end(),
david@33105 392 "inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT,
david@33105 393 p2i(_max), p2i(_allocation_region->end()));
jiangli@31346 394 assert(_bottom <= old_top && old_top <= _max,
david@33105 395 "inconsistent allocation state: expected "
david@33105 396 PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT,
david@33105 397 p2i(_bottom), p2i(old_top), p2i(_max));
jiangli@31346 398
jiangli@31346 399 // Allocate the next word_size words in the current allocation chunk.
jiangli@31346 400 // If allocation would cross the _max boundary, insert a filler and begin
jiangli@31346 401 // at the base of the next min_region_size'd chunk. Also advance to the next
jiangli@31346 402 // chunk if we don't yet cross the boundary, but the remainder would be too
jiangli@31346 403 // small to fill.
jiangli@31346 404 HeapWord* new_top = old_top + word_size;
jiangli@31346 405 size_t remainder = pointer_delta(_max, new_top);
jiangli@31346 406 if ((new_top > _max) ||
jiangli@31346 407 ((new_top < _max) && (remainder < CollectedHeap::min_fill_size()))) {
jiangli@31346 408 if (old_top != _max) {
jiangli@31346 409 size_t fill_size = pointer_delta(_max, old_top);
jiangli@31346 410 CollectedHeap::fill_with_object(old_top, fill_size);
jiangli@31346 411 _summary_bytes_used += fill_size * HeapWordSize;
jiangli@31346 412 }
jiangli@31346 413 _allocation_region->set_top(_max);
jiangli@31346 414 old_top = _bottom = _max;
jiangli@31346 415
jiangli@31346 416 // Check if we've just used up the last min_region_size'd chunk
jiangli@31346 417 // in the current region, and if so, allocate a new one.
jiangli@31346 418 if (_bottom != _allocation_region->end()) {
jiangli@31346 419 _max = _bottom + HeapRegion::min_region_size_in_words();
jiangli@31346 420 } else {
jiangli@31346 421 if (!alloc_new_region()) {
jiangli@31346 422 return NULL;
jiangli@31346 423 }
jiangli@31346 424 old_top = _allocation_region->bottom();
jiangli@31346 425 }
jiangli@31346 426 }
jiangli@31346 427 _allocation_region->set_top(old_top + word_size);
jiangli@31346 428 _summary_bytes_used += word_size * HeapWordSize;
jiangli@31346 429
jiangli@31346 430 return old_top;
jiangli@31346 431 }
jiangli@31346 432
jiangli@31346 433 void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
jiangli@31346 434 size_t end_alignment_in_bytes) {
jiangli@31346 435 assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(),
david@33105 436 "alignment " SIZE_FORMAT " too large", end_alignment_in_bytes);
stefank@46619 437 assert(is_aligned(end_alignment_in_bytes, HeapWordSize),
david@33105 438 "alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize);
jiangli@31346 439
jiangli@31346 440 // If we've allocated nothing, simply return.
jiangli@31346 441 if (_allocation_region == NULL) {
jiangli@31346 442 return;
jiangli@31346 443 }
jiangli@31346 444
jiangli@31346 445 // If an end alignment was requested, insert filler objects.
jiangli@31346 446 if (end_alignment_in_bytes != 0) {
jiangli@31346 447 HeapWord* currtop = _allocation_region->top();
stefank@46619 448 HeapWord* newtop = align_up(currtop, end_alignment_in_bytes);
jiangli@31346 449 size_t fill_size = pointer_delta(newtop, currtop);
jiangli@31346 450 if (fill_size != 0) {
jiangli@31346 451 if (fill_size < CollectedHeap::min_fill_size()) {
jiangli@31346 452 // If the required fill is smaller than we can represent,
jiangli@31346 453 // bump up to the next aligned address. We know we won't exceed the current
jiangli@31346 454 // region boundary because the max supported alignment is smaller than the min
jiangli@31346 455 // region size, and because the allocation code never leaves space smaller than
jiangli@31346 456 // the min_fill_size at the top of the current allocation region.
stefank@46619 457 newtop = align_up(currtop + CollectedHeap::min_fill_size(),
stefank@46619 458 end_alignment_in_bytes);
jiangli@31346 459 fill_size = pointer_delta(newtop, currtop);
jiangli@31346 460 }
jiangli@31346 461 HeapWord* fill = archive_mem_allocate(fill_size);
jiangli@31346 462 CollectedHeap::fill_with_objects(fill, fill_size);
jiangli@31346 463 }
jiangli@31346 464 }
jiangli@31346 465
jiangli@31346 466 // Loop through the allocated regions, and create MemRegions summarizing
jiangli@31346 467 // the allocated address range, combining contiguous ranges. Add the
jiangli@31346 468 // MemRegions to the GrowableArray provided by the caller.
jiangli@31346 469 int index = _allocated_regions.length() - 1;
jiangli@31346 470 assert(_allocated_regions.at(index) == _allocation_region,
david@33105 471 "expected region %u at end of array, found %u",
david@33105 472 _allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index());
jiangli@31346 473 HeapWord* base_address = _allocation_region->bottom();
jiangli@31346 474 HeapWord* top = base_address;
jiangli@31346 475
jiangli@31346 476 while (index >= 0) {
jiangli@31346 477 HeapRegion* next = _allocated_regions.at(index);
jiangli@31346 478 HeapWord* new_base = next->bottom();
jiangli@31346 479 HeapWord* new_top = next->top();
jiangli@31346 480 if (new_base != top) {
jiangli@31346 481 ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
jiangli@31346 482 base_address = new_base;
jiangli@31346 483 }
jiangli@31346 484 top = new_top;
jiangli@31346 485 index = index - 1;
jiangli@31346 486 }
jiangli@31346 487
david@33105 488 assert(top != base_address, "zero-sized range, address " PTR_FORMAT, p2i(base_address));
jiangli@31346 489 ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
jiangli@31346 490 _allocated_regions.clear();
jiangli@31346 491 _allocation_region = NULL;
jiangli@31346 492 };