annotate src/hotspot/share/gc/g1/g1CollectedHeap.cpp @ 51970:d56dd9798d54

8208611: Refactor SATBMarkQueue filtering to allow GC-specific filters Summary: Add SATBMarkQueueFilter. Reviewed-by: tschatzl, eosterlund, rkennke
author kbarrett
date Wed, 01 Aug 2018 19:14:04 -0400
parents bd2e3c3b4547
children c25572739e7c
rev   line source
ysr@1374 1 /*
tschatzl@48951 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
ysr@1374 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@1374 4 *
ysr@1374 5 * This code is free software; you can redistribute it and/or modify it
ysr@1374 6 * under the terms of the GNU General Public License version 2 only, as
ysr@1374 7 * published by the Free Software Foundation.
ysr@1374 8 *
ysr@1374 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@1374 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@1374 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@1374 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@1374 13 * accompanied this code).
ysr@1374 14 *
ysr@1374 15 * You should have received a copy of the GNU General Public License version
ysr@1374 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@1374 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@1374 18 *
trims@5547 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@5547 20 * or visit www.oracle.com if you need additional information or have any
trims@5547 21 * questions.
ysr@1374 22 *
ysr@1374 23 */
ysr@1374 24
stefank@7397 25 #include "precompiled.hpp"
stefank@27247 26 #include "classfile/metadataOnStackMark.hpp"
gziemski@24426 27 #include "classfile/stringTable.hpp"
mgronlun@34666 28 #include "classfile/symbolTable.hpp"
johnc@19339 29 #include "code/codeCache.hpp"
stefank@7397 30 #include "code/icBuffer.hpp"
tschatzl@32185 31 #include "gc/g1/g1Allocator.inline.hpp"
pliden@50246 32 #include "gc/g1/g1BarrierSet.hpp"
pliden@30764 33 #include "gc/g1/g1CollectedHeap.inline.hpp"
mgerdin@37039 34 #include "gc/g1/g1CollectionSet.hpp"
pliden@30764 35 #include "gc/g1/g1CollectorPolicy.hpp"
drwhite@31331 36 #include "gc/g1/g1CollectorState.hpp"
tschatzl@47970 37 #include "gc/g1/g1ConcurrentRefine.hpp"
tschatzl@47970 38 #include "gc/g1/g1ConcurrentRefineThread.hpp"
lkorinth@50154 39 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
coleenp@34230 40 #include "gc/g1/g1EvacStats.inline.hpp"
sjohanss@48073 41 #include "gc/g1/g1FullCollector.hpp"
pliden@30764 42 #include "gc/g1/g1GCPhaseTimes.hpp"
mgerdin@37144 43 #include "gc/g1/g1HeapSizingPolicy.hpp"
brutisso@35909 44 #include "gc/g1/g1HeapTransition.hpp"
david@35851 45 #include "gc/g1/g1HeapVerifier.hpp"
kbarrett@38172 46 #include "gc/g1/g1HotCardCache.hpp"
rkennke@48384 47 #include "gc/g1/g1MemoryPool.hpp"
pliden@30764 48 #include "gc/g1/g1OopClosures.inline.hpp"
pliden@30764 49 #include "gc/g1/g1ParScanThreadState.inline.hpp"
mgerdin@37985 50 #include "gc/g1/g1Policy.hpp"
pliden@30764 51 #include "gc/g1/g1RegionToSpaceMapper.hpp"
sjohanss@48073 52 #include "gc/g1/g1RemSet.hpp"
mgerdin@33213 53 #include "gc/g1/g1RootClosures.hpp"
pliden@30764 54 #include "gc/g1/g1RootProcessor.hpp"
kbarrett@51970 55 #include "gc/g1/g1SATBMarkQueueFilter.hpp"
pliden@30764 56 #include "gc/g1/g1StringDedup.hpp"
pliden@50247 57 #include "gc/g1/g1ThreadLocalData.hpp"
pliden@30764 58 #include "gc/g1/g1YCTypes.hpp"
tschatzl@47971 59 #include "gc/g1/g1YoungRemSetSamplingThread.hpp"
pliden@30764 60 #include "gc/g1/heapRegion.inline.hpp"
pliden@30764 61 #include "gc/g1/heapRegionRemSet.hpp"
pliden@30764 62 #include "gc/g1/heapRegionSet.inline.hpp"
pliden@30764 63 #include "gc/g1/vm_operations_g1.hpp"
stefank@49479 64 #include "gc/shared/adaptiveSizePolicy.hpp"
pliden@30764 65 #include "gc/shared/gcHeapSummary.hpp"
brutisso@33107 66 #include "gc/shared/gcId.hpp"
stefank@50089 67 #include "gc/shared/gcLocker.hpp"
pliden@30764 68 #include "gc/shared/gcTimer.hpp"
pliden@30764 69 #include "gc/shared/gcTrace.hpp"
brutisso@35061 70 #include "gc/shared/gcTraceTime.inline.hpp"
pliden@30764 71 #include "gc/shared/generationSpec.hpp"
pliden@30764 72 #include "gc/shared/isGCActiveMark.hpp"
rehn@51049 73 #include "gc/shared/oopStorageParState.hpp"
tonyp@38081 74 #include "gc/shared/preservedMarks.inline.hpp"
rkennke@47802 75 #include "gc/shared/suspendibleThreadSet.hpp"
goetz@35862 76 #include "gc/shared/referenceProcessor.inline.hpp"
pliden@30764 77 #include "gc/shared/taskqueue.inline.hpp"
stefank@47803 78 #include "gc/shared/weakProcessor.hpp"
brutisso@35061 79 #include "logging/log.hpp"
stefank@25492 80 #include "memory/allocation.hpp"
tschatzl@22547 81 #include "memory/iterator.hpp"
kbarrett@51145 82 #include "memory/metaspaceShared.hpp"
jprovino@37248 83 #include "memory/resourceArea.hpp"
stefank@50087 84 #include "oops/access.inline.hpp"
stefank@50087 85 #include "oops/compressedOops.inline.hpp"
stefank@7397 86 #include "oops/oop.inline.hpp"
coleenp@46505 87 #include "prims/resolvedMethodTable.hpp"
dholmes@40655 88 #include "runtime/atomic.hpp"
gziemski@50413 89 #include "runtime/flags/flagSetting.hpp"
stefank@49826 90 #include "runtime/handles.inline.hpp"
jiangli@32589 91 #include "runtime/init.hpp"
coleenp@51033 92 #include "runtime/orderAccess.hpp"
dcubed@48321 93 #include "runtime/threadSMR.hpp"
stefank@7397 94 #include "runtime/vmThread.hpp"
stefank@46625 95 #include "utilities/align.hpp"
stefank@24098 96 #include "utilities/globalDefinitions.hpp"
stefank@30175 97 #include "utilities/stack.inline.hpp"
ysr@1374 98
tonyp@3697 99 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
tonyp@3697 100
ysr@1374 101 // INVARIANTS/NOTES
ysr@1374 102 //
ysr@1374 103 // All allocation activity covered by the G1CollectedHeap interface is
tonyp@7398 104 // serialized by acquiring the HeapLock. This happens in mem_allocate
tonyp@7398 105 // and allocate_new_tlab, which are the "entry" points to the
tonyp@7398 106 // allocation code from the rest of the JVM. (Note that this does not
tonyp@7398 107 // apply to TLAB allocation, which is not part of this interface: it
tonyp@7398 108 // is done by clients of this interface.)
ysr@1374 109
tschatzl@24105 110 class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
tschatzl@24105 111 private:
ehelin@34613 112 size_t _num_dirtied;
ehelin@34613 113 G1CollectedHeap* _g1h;
eosterlund@49595 114 G1CardTable* _g1_ct;
ehelin@34613 115
ehelin@34613 116 HeapRegion* region_for_card(jbyte* card_ptr) const {
eosterlund@49595 117 return _g1h->heap_region_containing(_g1_ct->addr_for(card_ptr));
ehelin@34613 118 }
ehelin@34613 119
ehelin@34613 120 bool will_become_free(HeapRegion* hr) const {
ehelin@34613 121 // A region will be freed by free_collection_set if the region is in the
ehelin@34613 122 // collection set and has not had an evacuation failure.
ehelin@34613 123 return _g1h->is_in_cset(hr) && !hr->evacuation_failed();
ehelin@34613 124 }
tschatzl@24105 125
tschatzl@24105 126 public:
ehelin@34613 127 RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : CardTableEntryClosure(),
eosterlund@49595 128 _num_dirtied(0), _g1h(g1h), _g1_ct(g1h->card_table()) { }
mgerdin@20309 129
vkempik@23855 130 bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
ehelin@34613 131 HeapRegion* hr = region_for_card(card_ptr);
ehelin@34613 132
ehelin@34613 133 // Should only dirty cards in regions that won't be freed.
ehelin@34613 134 if (!will_become_free(hr)) {
eosterlund@49595 135 *card_ptr = G1CardTable::dirty_card_val();
ehelin@34613 136 _num_dirtied++;
ehelin@34613 137 }
ehelin@34613 138
ysr@1374 139 return true;
ysr@1374 140 }
tschatzl@24105 141
ehelin@34613 142 size_t num_dirtied() const { return _num_dirtied; }
ysr@1374 143 };
ysr@1374 144
ysr@1374 145
tschatzl@26160 146 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
tschatzl@28030 147 HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
tschatzl@26160 148 }
tschatzl@26160 149
tschatzl@27149 150 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
tschatzl@27149 151 // The from card cache is not the memory that is actually committed. So we cannot
tschatzl@27149 152 // take advantage of the zero_filled parameter.
tschatzl@26160 153 reset_from_card_cache(start_idx, num_regions);
tschatzl@26160 154 }
tschatzl@26160 155
sjohanss@48073 156
sjohanss@48073 157 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
sjohanss@48073 158 MemRegion mr) {
sjohanss@48073 159 return new HeapRegion(hrs_index, bot(), mr);
sjohanss@48073 160 }
sjohanss@48073 161
ysr@1374 162 // Private methods.
ysr@1374 163
jwilhelm@23471 164 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
tonyp@26846 165 assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
tonyp@7923 166 "the only time we use this to allocate a humongous region is "
tonyp@7923 167 "when we are allocating a single humongous region");
tonyp@7923 168
tschatzl@50127 169 HeapRegion* res = _hrm.allocate_free_region(is_old);
tschatzl@50127 170
tonyp@11449 171 if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
tonyp@11449 172 // Currently, only attempts to allocate GC alloc regions set
tonyp@11449 173 // do_expand to true. So, we should only reach here during a
tonyp@11449 174 // safepoint. If this assumption changes we might have to
tonyp@11449 175 // reconsider the use of _expand_heap_after_alloc_failure.
tonyp@11449 176 assert(SafepointSynchronize::is_at_safepoint(), "invariant");
tonyp@11449 177
brutisso@35061 178 log_debug(gc, ergo, heap)("Attempt heap expansion (region allocation request failed). Allocation request: " SIZE_FORMAT "B",
brutisso@35061 179 word_size * HeapWordSize);
brutisso@35061 180
johnc@8103 181 if (expand(word_size * HeapWordSize)) {
tonyp@11449 182 // Given that expand() succeeded in expanding the heap, and we
tonyp@11449 183 // always expand the heap by an amount aligned to the heap
jwilhelm@23471 184 // region size, the free list should in theory not be empty.
tschatzl@26157 185 // In either case allocate_free_region() will check for NULL.
tschatzl@26316 186 res = _hrm.allocate_free_region(is_old);
tonyp@11449 187 } else {
tonyp@11449 188 _expand_heap_after_alloc_failure = false;
johnc@8103 189 }
ysr@1374 190 }
ysr@1374 191 return res;
ysr@1374 192 }
ysr@1374 193
tonyp@8680 194 HeapWord*
tonyp@12381 195 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
tonyp@12381 196 uint num_regions,
sjohanss@49788 197 size_t word_size) {
tschatzl@26316 198 assert(first != G1_NO_HRM_INDEX, "pre-condition");
tonyp@26846 199 assert(is_humongous(word_size), "word_size should be humongous");
tonyp@8680 200 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
tonyp@8680 201
david@34291 202 // Index of last region in the series.
david@34291 203 uint last = first + num_regions - 1;
tonyp@8680 204
tonyp@8680 205 // We need to initialize the region(s) we just discovered. This is
tonyp@8680 206 // a bit tricky given that it can happen concurrently with
tonyp@8680 207 // refinement threads refining cards on these regions and
tonyp@8680 208 // potentially wanting to refine the BOT as they are scanning
tonyp@8680 209 // those cards (this can happen shortly after a cleanup; see CR
tonyp@8680 210 // 6991377). So we have to set up the region(s) carefully and in
tonyp@8680 211 // a specific order.
tonyp@8680 212
tonyp@8680 213 // The word size sum of all the regions we will allocate.
tonyp@12381 214 size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
tonyp@8680 215 assert(word_size <= word_size_sum, "sanity");
tonyp@8680 216
tonyp@8680 217 // This will be the "starts humongous" region.
tonyp@9989 218 HeapRegion* first_hr = region_at(first);
tonyp@8680 219 // The header of the new object will be placed at the bottom of
tonyp@8680 220 // the first region.
tonyp@8680 221 HeapWord* new_obj = first_hr->bottom();
david@33786 222 // This will be the new top of the new object.
david@33786 223 HeapWord* obj_top = new_obj + word_size;
tonyp@8680 224
tonyp@8680 225 // First, we need to zero the header of the space that we will be
tonyp@8680 226 // allocating. When we update top further down, some refinement
tonyp@8680 227 // threads might try to scan the region. By zeroing the header we
tonyp@8680 228 // ensure that any thread that will try to scan the region will
tonyp@8680 229 // come across the zero klass word and bail out.
tonyp@8680 230 //
tonyp@8680 231 // NOTE: It would not have been correct to have used
tonyp@8680 232 // CollectedHeap::fill_with_object() and make the space look like
tonyp@8680 233 // an int array. The thread that is doing the allocation will
tonyp@8680 234 // later update the object header to a potentially different array
tonyp@8680 235 // type and, for a very short period of time, the klass and length
tonyp@8680 236 // fields will be inconsistent. This could cause a refinement
tonyp@8680 237 // thread to calculate the object size incorrectly.
tonyp@8680 238 Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
tonyp@8680 239
kbarrett@42589 240 // Next, pad out the unused tail of the last region with filler
kbarrett@42589 241 // objects, for improved usage accounting.
david@34291 242 // How many words we use for filler objects.
david@34291 243 size_t word_fill_size = word_size_sum - word_size;
david@34291 244
david@34291 245 // How many words memory we "waste" which cannot hold a filler object.
david@34291 246 size_t words_not_fillable = 0;
david@34291 247
david@34291 248 if (word_fill_size >= min_fill_size()) {
david@34291 249 fill_with_objects(obj_top, word_fill_size);
david@34291 250 } else if (word_fill_size > 0) {
david@34291 251 // We have space to fill, but we cannot fit an object there.
david@34291 252 words_not_fillable = word_fill_size;
david@34291 253 word_fill_size = 0;
david@34249 254 }
david@34249 255
tonyp@8680 256 // We will set up the first region as "starts humongous". This
tonyp@8680 257 // will also update the BOT covering all the regions to reflect
tonyp@8680 258 // that there is a single object that starts at the bottom of the
tonyp@8680 259 // first region.
david@34291 260 first_hr->set_starts_humongous(obj_top, word_fill_size);
tschatzl@50102 261 _g1_policy->remset_tracker()->update_at_allocate(first_hr);
tonyp@8680 262 // Then, if there are any, we will set up the "continues
tonyp@8680 263 // humongous" regions.
tonyp@8680 264 HeapRegion* hr = NULL;
david@34291 265 for (uint i = first + 1; i <= last; ++i) {
tonyp@9989 266 hr = region_at(i);
tonyp@26846 267 hr->set_continues_humongous(first_hr);
tschatzl@50102 268 _g1_policy->remset_tracker()->update_at_allocate(hr);
tonyp@8680 269 }
tonyp@8680 270
tonyp@8680 271 // Up to this point no concurrent thread would have been able to
tonyp@8680 272 // do any scanning on any region in this series. All the top
tonyp@8680 273 // fields still point to bottom, so the intersection between
tonyp@8680 274 // [bottom,top] and [card_start,card_end] will be empty. Before we
tonyp@8680 275 // update the top fields, we'll do a storestore to make sure that
tonyp@8680 276 // no thread sees the update to top before the zeroing of the
tonyp@8680 277 // object header and the BOT initialization.
tonyp@8680 278 OrderAccess::storestore();
tonyp@8680 279
tonyp@8680 280 // Now, we will update the top fields of the "continues humongous"
david@34291 281 // regions except the last one.
david@34291 282 for (uint i = first; i < last; ++i) {
tonyp@9989 283 hr = region_at(i);
david@34249 284 hr->set_top(hr->end());
david@34291 285 }
david@34291 286
david@34291 287 hr = region_at(last);
david@34291 288 // If we cannot fit a filler object, we must set top to the end
david@34291 289 // of the humongous object, otherwise we cannot iterate the heap
david@34291 290 // and the BOT will not be complete.
david@34291 291 hr->set_top(hr->end() - words_not_fillable);
david@34291 292
david@34291 293 assert(hr->bottom() < obj_top && obj_top <= hr->end(),
david@34291 294 "obj_top should be in last region");
david@34291 295
david@35851 296 _verifier->check_bitmaps("Humongous Region Allocation", first_hr);
david@34291 297
david@34291 298 assert(words_not_fillable == 0 ||
david@34291 299 first_hr->bottom() + word_size_sum - words_not_fillable == hr->top(),
david@34291 300 "Miscalculation in humongous allocation");
david@34291 301
david@34291 302 increase_used((word_size_sum - words_not_fillable) * HeapWordSize);
david@34291 303
david@34291 304 for (uint i = first; i <= last; ++i) {
david@34291 305 hr = region_at(i);
david@34291 306 _humongous_set.add(hr);
david@35079 307 _hr_printer.alloc(hr);
tonyp@8680 308 }
david@34249 309
tonyp@8680 310 return new_obj;
tonyp@8680 311 }
tonyp@8680 312
tschatzl@34298 313 size_t G1CollectedHeap::humongous_obj_size_in_regions(size_t word_size) {
tschatzl@34298 314 assert(is_humongous(word_size), "Object of size " SIZE_FORMAT " must be humongous here", word_size);
stefank@46622 315 return align_up(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
tschatzl@34298 316 }
tschatzl@34298 317
ysr@1374 318 // If could fit into free regions w/o expansion, try.
ysr@1374 319 // Otherwise, if can expand, do so.
ysr@1374 320 // Otherwise, if using ex regions might help, try with ex given back.
sjohanss@49788 321 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
tonyp@7923 322 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
tonyp@7923 323
david@35851 324 _verifier->verify_region_sets_optional();
ysr@1374 325
tschatzl@26316 326 uint first = G1_NO_HRM_INDEX;
tschatzl@34298 327 uint obj_regions = (uint) humongous_obj_size_in_regions(word_size);
tschatzl@26157 328
tschatzl@26157 329 if (obj_regions == 1) {
tschatzl@26157 330 // Only one region to allocate, try to use a fast path by directly allocating
tschatzl@26157 331 // from the free lists. Do not try to expand here, we will potentially do that
tschatzl@26157 332 // later.
tschatzl@26157 333 HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
tschatzl@26157 334 if (hr != NULL) {
tschatzl@26316 335 first = hr->hrm_index();
tschatzl@26157 336 }
tschatzl@26157 337 } else {
tschatzl@26157 338 // Policy: Try only empty regions (i.e. already committed first). Maybe we
tschatzl@26157 339 // are lucky enough to find some.
tschatzl@26316 340 first = _hrm.find_contiguous_only_empty(obj_regions);
tschatzl@26316 341 if (first != G1_NO_HRM_INDEX) {
tschatzl@26316 342 _hrm.allocate_free_regions_starting_at(first, obj_regions);
tschatzl@26316 343 }
tschatzl@26316 344 }
tschatzl@26316 345
tschatzl@26316 346 if (first == G1_NO_HRM_INDEX) {
tschatzl@26157 347 // Policy: We could not find enough regions for the humongous object in the
tschatzl@26157 348 // free list. Look through the heap to find a mix of free and uncommitted regions.
tschatzl@26157 349 // If so, try expansion.
tschatzl@26316 350 first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
tschatzl@26316 351 if (first != G1_NO_HRM_INDEX) {
tschatzl@26157 352 // We found something. Make sure these regions are committed, i.e. expand
tschatzl@26157 353 // the heap. Alternatively we could do a defragmentation GC.
brutisso@35061 354 log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B",
brutisso@35061 355 word_size * HeapWordSize);
brutisso@35061 356
tschatzl@42595 357 _hrm.expand_at(first, obj_regions, workers());
tschatzl@26157 358 g1_policy()->record_new_heap_size(num_regions());
tschatzl@26157 359
tschatzl@26157 360 #ifdef ASSERT
tschatzl@26157 361 for (uint i = first; i < first + obj_regions; ++i) {
tschatzl@26157 362 HeapRegion* hr = region_at(i);
tonyp@26696 363 assert(hr->is_free(), "sanity");
tschatzl@26157 364 assert(hr->is_empty(), "sanity");
tschatzl@26157 365 assert(is_on_master_free_list(hr), "sanity");
johnc@8103 366 }
tschatzl@26157 367 #endif
tschatzl@26316 368 _hrm.allocate_free_regions_starting_at(first, obj_regions);
tschatzl@26157 369 } else {
tschatzl@26157 370 // Policy: Potentially trigger a defragmentation GC.
tonyp@7923 371 }
tonyp@7923 372 }
tonyp@7923 373
tonyp@8680 374 HeapWord* result = NULL;
tschatzl@26316 375 if (first != G1_NO_HRM_INDEX) {
sjohanss@49788 376 result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size);
tonyp@8680 377 assert(result != NULL, "it should always return a valid result");
tonyp@10671 378
tonyp@10671 379 // A successful humongous object allocation changes the used space
tonyp@10671 380 // information of the old generation so we need to recalculate the
tonyp@10671 381 // sizes and update the jstat counters here.
tonyp@10671 382 g1mm()->update_sizes();
tonyp@7923 383 }
tonyp@7923 384
david@35851 385 _verifier->verify_region_sets_optional();
tonyp@8680 386
tonyp@8680 387 return result;
ysr@1374 388 }
ysr@1374 389
sjohanss@50470 390 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t min_size,
sjohanss@50470 391 size_t requested_size,
sjohanss@50470 392 size_t* actual_size) {
tonyp@7398 393 assert_heap_not_locked_and_not_at_safepoint();
sjohanss@50470 394 assert(!is_humongous(requested_size), "we do not allow humongous TLABs");
sjohanss@50470 395
sjohanss@50470 396 return attempt_allocation(min_size, requested_size, actual_size);
ysr@1374 397 }
ysr@1374 398
ysr@1374 399 HeapWord*
ysr@1374 400 G1CollectedHeap::mem_allocate(size_t word_size,
tonyp@7398 401 bool* gc_overhead_limit_was_exceeded) {
tonyp@7398 402 assert_heap_not_locked_and_not_at_safepoint();
ysr@1374 403
tschatzl@48951 404 if (is_humongous(word_size)) {
tschatzl@48951 405 return attempt_allocation_humongous(word_size);
ysr@1374 406 }
sjohanss@50470 407 size_t dummy = 0;
sjohanss@50470 408 return attempt_allocation(word_size, word_size, &dummy);
ysr@1374 409 }
ysr@1374 410
sjohanss@49788 411 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
tschatzl@48951 412 ResourceMark rm; // For retrieving the thread names in log messages.
tschatzl@48951 413
tonyp@8928 414 // Make sure you read the note in attempt_allocation_humongous().
tonyp@8928 415
tonyp@8928 416 assert_heap_not_locked_and_not_at_safepoint();
tonyp@26846 417 assert(!is_humongous(word_size), "attempt_allocation_slow() should not "
tonyp@8928 418 "be called for humongous allocation requests");
tonyp@8928 419
tonyp@8928 420 // We should only get here after the first-level allocation attempt
tonyp@8928 421 // (attempt_allocation()) failed to allocate.
tonyp@8928 422
tonyp@8928 423 // We will loop until a) we manage to successfully perform the
tonyp@8928 424 // allocation or b) we successfully schedule a collection which
tonyp@8928 425 // fails to perform the allocation. b) is the only case when we'll
tonyp@8928 426 // return NULL.
tonyp@8928 427 HeapWord* result = NULL;
tschatzl@48951 428 for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
tonyp@8928 429 bool should_try_gc;
mlarsson@29078 430 uint gc_count_before;
tonyp@8928 431
tonyp@8928 432 {
tonyp@8928 433 MutexLockerEx x(Heap_lock);
sjohanss@49788 434 result = _allocator->attempt_allocation_locked(word_size);
tonyp@8928 435 if (result != NULL) {
tonyp@8928 436 return result;
tonyp@8928 437 }
tonyp@8928 438
tschatzl@48951 439 // If the GCLocker is active and we are bound for a GC, try expanding young gen.
tschatzl@48951 440 // This is different to when only GCLocker::needs_gc() is set: try to avoid
tschatzl@48951 441 // waiting because the GCLocker is active to not wait too long.
tschatzl@48951 442 if (GCLocker::is_active_and_needs_gc() && g1_policy()->can_expand_young_list()) {
tschatzl@48951 443 // No need for an ergo message here, can_expand_young_list() does this when
tschatzl@48951 444 // it returns true.
sjohanss@49788 445 result = _allocator->attempt_allocation_force(word_size);
tschatzl@48951 446 if (result != NULL) {
tschatzl@48951 447 return result;
johnc@12778 448 }
tonyp@8928 449 }
tschatzl@48951 450 // Only try a GC if the GCLocker does not signal the need for a GC. Wait until
tschatzl@48951 451 // the GCLocker initiated GC has been performed and then retry. This includes
tschatzl@48951 452 // the case when the GC Locker is not active but has not been performed.
tschatzl@48951 453 should_try_gc = !GCLocker::needs_gc();
tschatzl@48951 454 // Read the GC count while still holding the Heap_lock.
tschatzl@48951 455 gc_count_before = total_collections();
tonyp@8928 456 }
tonyp@8928 457
tonyp@8928 458 if (should_try_gc) {
tonyp@8928 459 bool succeeded;
brutisso@19549 460 result = do_collection_pause(word_size, gc_count_before, &succeeded,
mlarsson@29078 461 GCCause::_g1_inc_collection_pause);
tonyp@8928 462 if (result != NULL) {
tonyp@8928 463 assert(succeeded, "only way to get back a non-NULL result");
tschatzl@48951 464 log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
tschatzl@48951 465 Thread::current()->name(), p2i(result));
tonyp@8928 466 return result;
tonyp@8928 467 }
tonyp@8928 468
tonyp@8928 469 if (succeeded) {
tschatzl@48951 470 // We successfully scheduled a collection which failed to allocate. No
tschatzl@48951 471 // point in trying to allocate further. We'll just return NULL.
tschatzl@48951 472 log_trace(gc, alloc)("%s: Successfully scheduled collection failing to allocate "
tschatzl@48951 473 SIZE_FORMAT " words", Thread::current()->name(), word_size);
tonyp@8928 474 return NULL;
tonyp@8928 475 }
tschatzl@48951 476 log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating " SIZE_FORMAT " words",
tschatzl@48951 477 Thread::current()->name(), word_size);
tonyp@8928 478 } else {
tschatzl@48951 479 // Failed to schedule a collection.
tschatzl@48951 480 if (gclocker_retry_count > GCLockerRetryAllocationCount) {
tschatzl@48951 481 log_warning(gc, alloc)("%s: Retried waiting for GCLocker too often allocating "
tschatzl@48951 482 SIZE_FORMAT " words", Thread::current()->name(), word_size);
mgerdin@16604 483 return NULL;
mgerdin@16604 484 }
tschatzl@48951 485 log_trace(gc, alloc)("%s: Stall until clear", Thread::current()->name());
johnc@12778 486 // The GCLocker is either active or the GCLocker initiated
johnc@12778 487 // GC has not yet been performed. Stall until it is and
johnc@12778 488 // then retry the allocation.
david@35492 489 GCLocker::stall_until_clear();
tschatzl@48951 490 gclocker_retry_count += 1;
tonyp@8928 491 }
tonyp@8928 492
sla@18025 493 // We can reach here if we were unsuccessful in scheduling a
tonyp@8928 494 // collection (because another thread beat us to it) or if we were
tonyp@8928 495 // stalled due to the GC locker. In either can we should retry the
tonyp@8928 496 // allocation attempt in case another thread successfully
tonyp@8928 497 // performed a collection and reclaimed enough space. We do the
tonyp@8928 498 // first attempt (without holding the Heap_lock) here and the
tonyp@8928 499 // follow-on attempt will be at the start of the next loop
tonyp@8928 500 // iteration (after taking the Heap_lock).
sjohanss@50470 501 size_t dummy = 0;
sjohanss@50470 502 result = _allocator->attempt_allocation(word_size, word_size, &dummy);
johnc@12227 503 if (result != NULL) {
tonyp@8928 504 return result;
tonyp@8928 505 }
tonyp@8928 506
tonyp@8928 507 // Give a warning if we seem to be looping forever.
tonyp@8928 508 if ((QueuedAllocationWarningCount > 0) &&
tonyp@8928 509 (try_count % QueuedAllocationWarningCount == 0)) {
tschatzl@48951 510 log_warning(gc, alloc)("%s: Retried allocation %u times for " SIZE_FORMAT " words",
tschatzl@48951 511 Thread::current()->name(), try_count, word_size);
tonyp@8928 512 }
tonyp@8928 513 }
tonyp@8928 514
tonyp@8928 515 ShouldNotReachHere();
tonyp@8928 516 return NULL;
tonyp@8928 517 }
tonyp@8928 518
jiangli@46810 519 void G1CollectedHeap::begin_archive_alloc_range(bool open) {
kbarrett@49798 520 assert_at_safepoint_on_vm_thread();
jiangli@31346 521 if (_archive_allocator == NULL) {
jiangli@46810 522 _archive_allocator = G1ArchiveAllocator::create_allocator(this, open);
jiangli@31346 523 }
jiangli@31346 524 }
jiangli@31346 525
jiangli@31346 526 bool G1CollectedHeap::is_archive_alloc_too_large(size_t word_size) {
jiangli@31346 527 // Allocations in archive regions cannot be of a size that would be considered
jiangli@31346 528 // humongous even for a minimum-sized region, because G1 region sizes/boundaries
jiangli@31346 529 // may be different at archive-restore time.
jiangli@31346 530 return word_size >= humongous_threshold_for(HeapRegion::min_region_size_in_words());
jiangli@31346 531 }
jiangli@31346 532
jiangli@31346 533 HeapWord* G1CollectedHeap::archive_mem_allocate(size_t word_size) {
kbarrett@49798 534 assert_at_safepoint_on_vm_thread();
jiangli@31346 535 assert(_archive_allocator != NULL, "_archive_allocator not initialized");
jiangli@31346 536 if (is_archive_alloc_too_large(word_size)) {
jiangli@31346 537 return NULL;
jiangli@31346 538 }
jiangli@31346 539 return _archive_allocator->archive_mem_allocate(word_size);
jiangli@31346 540 }
jiangli@31346 541
jiangli@31346 542 void G1CollectedHeap::end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
jiangli@31346 543 size_t end_alignment_in_bytes) {
kbarrett@49798 544 assert_at_safepoint_on_vm_thread();
jiangli@31346 545 assert(_archive_allocator != NULL, "_archive_allocator not initialized");
jiangli@31346 546
jiangli@31346 547 // Call complete_archive to do the real work, filling in the MemRegion
jiangli@31346 548 // array with the archive regions.
jiangli@31346 549 _archive_allocator->complete_archive(ranges, end_alignment_in_bytes);
jiangli@31346 550 delete _archive_allocator;
jiangli@31346 551 _archive_allocator = NULL;
jiangli@31346 552 }
jiangli@31346 553
jiangli@31346 554 bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) {
jiangli@31346 555 assert(ranges != NULL, "MemRegion array NULL");
jiangli@31346 556 assert(count != 0, "No MemRegions provided");
jiangli@31346 557 MemRegion reserved = _hrm.reserved();
jiangli@31346 558 for (size_t i = 0; i < count; i++) {
jiangli@31346 559 if (!reserved.contains(ranges[i].start()) || !reserved.contains(ranges[i].last())) {
jiangli@31346 560 return false;
jiangli@31346 561 }
jiangli@31346 562 }
jiangli@31346 563 return true;
jiangli@31346 564 }
jiangli@31346 565
jiangli@46810 566 bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges,
jiangli@46810 567 size_t count,
jiangli@46810 568 bool open) {
jiangli@32589 569 assert(!is_init_completed(), "Expect to be called at JVM init time");
jiangli@31346 570 assert(ranges != NULL, "MemRegion array NULL");
jiangli@31346 571 assert(count != 0, "No MemRegions provided");
jiangli@31346 572 MutexLockerEx x(Heap_lock);
jiangli@31346 573
jiangli@31346 574 MemRegion reserved = _hrm.reserved();
jiangli@31346 575 HeapWord* prev_last_addr = NULL;
jiangli@31346 576 HeapRegion* prev_last_region = NULL;
jiangli@31346 577
jiangli@31346 578 // Temporarily disable pretouching of heap pages. This interface is used
jiangli@31346 579 // when mmap'ing archived heap data in, so pre-touching is wasted.
jiangli@31346 580 FlagSetting fs(AlwaysPreTouch, false);
jiangli@31346 581
sjohanss@46285 582 // Enable archive object checking used by G1MarkSweep. We have to let it know
jiangli@31346 583 // about each archive range, so that objects in those ranges aren't marked.
sjohanss@46285 584 G1ArchiveAllocator::enable_archive_object_check();
jiangli@31346 585
jiangli@31346 586 // For each specified MemRegion range, allocate the corresponding G1
jiangli@46810 587 // regions and mark them as archive regions. We expect the ranges
jiangli@46810 588 // in ascending starting address order, without overlap.
jiangli@31346 589 for (size_t i = 0; i < count; i++) {
jiangli@31346 590 MemRegion curr_range = ranges[i];
jiangli@31346 591 HeapWord* start_address = curr_range.start();
jiangli@31346 592 size_t word_size = curr_range.word_size();
jiangli@31346 593 HeapWord* last_address = curr_range.last();
jiangli@31346 594 size_t commits = 0;
jiangli@31346 595
jiangli@31346 596 guarantee(reserved.contains(start_address) && reserved.contains(last_address),
david@33105 597 "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
david@33105 598 p2i(start_address), p2i(last_address));
jiangli@31346 599 guarantee(start_address > prev_last_addr,
david@33105 600 "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
david@33105 601 p2i(start_address), p2i(prev_last_addr));
jiangli@31346 602 prev_last_addr = last_address;
jiangli@31346 603
jiangli@31346 604 // Check for ranges that start in the same G1 region in which the previous
jiangli@31346 605 // range ended, and adjust the start address so we don't try to allocate
jiangli@31346 606 // the same region again. If the current range is entirely within that
jiangli@31346 607 // region, skip it, just adjusting the recorded top.
jiangli@31346 608 HeapRegion* start_region = _hrm.addr_to_region(start_address);
jiangli@31346 609 if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
jiangli@31346 610 start_address = start_region->end();
jiangli@31346 611 if (start_address > last_address) {
ehelin@31975 612 increase_used(word_size * HeapWordSize);
jiangli@31346 613 start_region->set_top(last_address + 1);
jiangli@31346 614 continue;
jiangli@31346 615 }
jiangli@31346 616 start_region->set_top(start_address);
jiangli@31346 617 curr_range = MemRegion(start_address, last_address + 1);
jiangli@31346 618 start_region = _hrm.addr_to_region(start_address);
jiangli@31346 619 }
jiangli@31346 620
jiangli@31346 621 // Perform the actual region allocation, exiting if it fails.
jiangli@31346 622 // Then note how much new space we have allocated.
tschatzl@42595 623 if (!_hrm.allocate_containing_regions(curr_range, &commits, workers())) {
jiangli@31346 624 return false;
jiangli@31346 625 }
ehelin@31975 626 increase_used(word_size * HeapWordSize);
jiangli@31346 627 if (commits != 0) {
brutisso@35061 628 log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: " SIZE_FORMAT "B",
brutisso@35061 629 HeapRegion::GrainWords * HeapWordSize * commits);
brutisso@35061 630
jiangli@31346 631 }
jiangli@31346 632
jiangli@46810 633 // Mark each G1 region touched by the range as archive, add it to
sjohanss@49788 634 // the old set, and set top.
jiangli@31346 635 HeapRegion* curr_region = _hrm.addr_to_region(start_address);
jiangli@31346 636 HeapRegion* last_region = _hrm.addr_to_region(last_address);
jiangli@31346 637 prev_last_region = last_region;
jiangli@31346 638
jiangli@31346 639 while (curr_region != NULL) {
jiangli@31346 640 assert(curr_region->is_empty() && !curr_region->is_pinned(),
david@33105 641 "Region already in use (index %u)", curr_region->hrm_index());
jiangli@46810 642 if (open) {
jiangli@46810 643 curr_region->set_open_archive();
jiangli@46810 644 } else {
jiangli@46810 645 curr_region->set_closed_archive();
jiangli@46810 646 }
david@35079 647 _hr_printer.alloc(curr_region);
jiangli@31346 648 _old_set.add(curr_region);
jiangli@46810 649 HeapWord* top;
jiangli@46810 650 HeapRegion* next_region;
jiangli@31346 651 if (curr_region != last_region) {
jiangli@46810 652 top = curr_region->end();
jiangli@46810 653 next_region = _hrm.next_region_in_heap(curr_region);
jiangli@31346 654 } else {
jiangli@46810 655 top = last_address + 1;
jiangli@46810 656 next_region = NULL;
jiangli@31346 657 }
jiangli@46810 658 curr_region->set_top(top);
jiangli@46810 659 curr_region->set_first_dead(top);
jiangli@46810 660 curr_region->set_end_of_live(top);
jiangli@46810 661 curr_region = next_region;
jiangli@31346 662 }
jiangli@31346 663
jiangli@46810 664 // Notify mark-sweep of the archive
jiangli@46810 665 G1ArchiveAllocator::set_range_archive(curr_range, open);
jiangli@31346 666 }
jiangli@31346 667 return true;
jiangli@31346 668 }
jiangli@31346 669
jiangli@31346 670 void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
jiangli@32589 671 assert(!is_init_completed(), "Expect to be called at JVM init time");
jiangli@31346 672 assert(ranges != NULL, "MemRegion array NULL");
jiangli@31346 673 assert(count != 0, "No MemRegions provided");
jiangli@31346 674 MemRegion reserved = _hrm.reserved();
jiangli@31346 675 HeapWord *prev_last_addr = NULL;
jiangli@31346 676 HeapRegion* prev_last_region = NULL;
jiangli@31346 677
jiangli@31346 678 // For each MemRegion, create filler objects, if needed, in the G1 regions
jiangli@31346 679 // that contain the address range. The address range actually within the
jiangli@31346 680 // MemRegion will not be modified. That is assumed to have been initialized
jiangli@31346 681 // elsewhere, probably via an mmap of archived heap data.
jiangli@31346 682 MutexLockerEx x(Heap_lock);
jiangli@31346 683 for (size_t i = 0; i < count; i++) {
jiangli@31346 684 HeapWord* start_address = ranges[i].start();
jiangli@31346 685 HeapWord* last_address = ranges[i].last();
jiangli@31346 686
jiangli@31346 687 assert(reserved.contains(start_address) && reserved.contains(last_address),
david@33105 688 "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
david@33105 689 p2i(start_address), p2i(last_address));
jiangli@31346 690 assert(start_address > prev_last_addr,
david@33105 691 "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
david@33105 692 p2i(start_address), p2i(prev_last_addr));
jiangli@31346 693
jiangli@31346 694 HeapRegion* start_region = _hrm.addr_to_region(start_address);
jiangli@31346 695 HeapRegion* last_region = _hrm.addr_to_region(last_address);
jiangli@31346 696 HeapWord* bottom_address = start_region->bottom();
jiangli@31346 697
jiangli@31346 698 // Check for a range beginning in the same region in which the
jiangli@31346 699 // previous one ended.
jiangli@31346 700 if (start_region == prev_last_region) {
jiangli@31346 701 bottom_address = prev_last_addr + 1;
jiangli@31346 702 }
jiangli@31346 703
jiangli@31346 704 // Verify that the regions were all marked as archive regions by
jiangli@31346 705 // alloc_archive_regions.
jiangli@31346 706 HeapRegion* curr_region = start_region;
jiangli@31346 707 while (curr_region != NULL) {
jiangli@31346 708 guarantee(curr_region->is_archive(),
david@33105 709 "Expected archive region at index %u", curr_region->hrm_index());
jiangli@31346 710 if (curr_region != last_region) {
jiangli@31346 711 curr_region = _hrm.next_region_in_heap(curr_region);
jiangli@31346 712 } else {
jiangli@31346 713 curr_region = NULL;
jiangli@31346 714 }
jiangli@31346 715 }
jiangli@31346 716
jiangli@31346 717 prev_last_addr = last_address;
jiangli@31346 718 prev_last_region = last_region;
jiangli@31346 719
jiangli@31346 720 // Fill the memory below the allocated range with dummy object(s),
jiangli@31346 721 // if the region bottom does not match the range start, or if the previous
jiangli@31346 722 // range ended within the same G1 region, and there is a gap.
jiangli@31346 723 if (start_address != bottom_address) {
jiangli@31346 724 size_t fill_size = pointer_delta(start_address, bottom_address);
jiangli@31346 725 G1CollectedHeap::fill_with_objects(bottom_address, fill_size);
ehelin@31975 726 increase_used(fill_size * HeapWordSize);
jiangli@31346 727 }
jiangli@31346 728 }
jiangli@31346 729 }
jiangli@31346 730
sjohanss@50470 731 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t min_word_size,
sjohanss@50470 732 size_t desired_word_size,
sjohanss@50470 733 size_t* actual_word_size) {
tschatzl@32185 734 assert_heap_not_locked_and_not_at_safepoint();
sjohanss@50470 735 assert(!is_humongous(desired_word_size), "attempt_allocation() should not "
tschatzl@32185 736 "be called for humongous allocation requests");
tschatzl@32185 737
sjohanss@50470 738 HeapWord* result = _allocator->attempt_allocation(min_word_size, desired_word_size, actual_word_size);
tschatzl@32185 739
tschatzl@32185 740 if (result == NULL) {
sjohanss@50470 741 *actual_word_size = desired_word_size;
sjohanss@50470 742 result = attempt_allocation_slow(desired_word_size);
tschatzl@32185 743 }
sjohanss@50470 744
tschatzl@32185 745 assert_heap_not_locked();
tschatzl@32185 746 if (result != NULL) {
sjohanss@50470 747 assert(*actual_word_size != 0, "Actual size must have been set here");
sjohanss@50470 748 dirty_young_block(result, *actual_word_size);
sjohanss@50470 749 } else {
sjohanss@50470 750 *actual_word_size = 0;
tschatzl@32185 751 }
sjohanss@50470 752
tschatzl@32185 753 return result;
tschatzl@32185 754 }
tschatzl@32185 755
jiangli@32589 756 void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
jiangli@32589 757 assert(!is_init_completed(), "Expect to be called at JVM init time");
jiangli@32589 758 assert(ranges != NULL, "MemRegion array NULL");
jiangli@32589 759 assert(count != 0, "No MemRegions provided");
jiangli@32589 760 MemRegion reserved = _hrm.reserved();
jiangli@32589 761 HeapWord* prev_last_addr = NULL;
jiangli@32589 762 HeapRegion* prev_last_region = NULL;
jiangli@32589 763 size_t size_used = 0;
jiangli@32589 764 size_t uncommitted_regions = 0;
jiangli@32589 765
jiangli@32589 766 // For each Memregion, free the G1 regions that constitute it, and
jiangli@32589 767 // notify mark-sweep that the range is no longer to be considered 'archive.'
jiangli@32589 768 MutexLockerEx x(Heap_lock);
jiangli@32589 769 for (size_t i = 0; i < count; i++) {
jiangli@32589 770 HeapWord* start_address = ranges[i].start();
jiangli@32589 771 HeapWord* last_address = ranges[i].last();
jiangli@32589 772
jiangli@32589 773 assert(reserved.contains(start_address) && reserved.contains(last_address),
david@33105 774 "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
david@33105 775 p2i(start_address), p2i(last_address));
jiangli@32589 776 assert(start_address > prev_last_addr,
david@33105 777 "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
david@33105 778 p2i(start_address), p2i(prev_last_addr));
jiangli@32589 779 size_used += ranges[i].byte_size();
jiangli@32589 780 prev_last_addr = last_address;
jiangli@32589 781
jiangli@32589 782 HeapRegion* start_region = _hrm.addr_to_region(start_address);
jiangli@32589 783 HeapRegion* last_region = _hrm.addr_to_region(last_address);
jiangli@32589 784
jiangli@32589 785 // Check for ranges that start in the same G1 region in which the previous
jiangli@32589 786 // range ended, and adjust the start address so we don't try to free
jiangli@32589 787 // the same region again. If the current range is entirely within that
jiangli@32589 788 // region, skip it.
jiangli@32589 789 if (start_region == prev_last_region) {
jiangli@32589 790 start_address = start_region->end();
jiangli@32589 791 if (start_address > last_address) {
jiangli@32589 792 continue;
jiangli@32589 793 }
jiangli@32589 794 start_region = _hrm.addr_to_region(start_address);
jiangli@32589 795 }
jiangli@32589 796 prev_last_region = last_region;
jiangli@32589 797
jiangli@32589 798 // After verifying that each region was marked as an archive region by
jiangli@32589 799 // alloc_archive_regions, set it free and empty and uncommit it.
jiangli@32589 800 HeapRegion* curr_region = start_region;
jiangli@32589 801 while (curr_region != NULL) {
jiangli@32589 802 guarantee(curr_region->is_archive(),
david@33105 803 "Expected archive region at index %u", curr_region->hrm_index());
jiangli@32589 804 uint curr_index = curr_region->hrm_index();
jiangli@32589 805 _old_set.remove(curr_region);
jiangli@32589 806 curr_region->set_free();
jiangli@32589 807 curr_region->set_top(curr_region->bottom());
jiangli@32589 808 if (curr_region != last_region) {
jiangli@32589 809 curr_region = _hrm.next_region_in_heap(curr_region);
jiangli@32589 810 } else {
jiangli@32589 811 curr_region = NULL;
jiangli@32589 812 }
jiangli@32589 813 _hrm.shrink_at(curr_index, 1);
jiangli@32589 814 uncommitted_regions++;
jiangli@32589 815 }
jiangli@32589 816
jiangli@32589 817 // Notify mark-sweep that this is no longer an archive range.
sjohanss@46285 818 G1ArchiveAllocator::set_range_archive(ranges[i], false);
jiangli@32589 819 }
jiangli@32589 820
jiangli@32589 821 if (uncommitted_regions != 0) {
brutisso@35061 822 log_debug(gc, ergo, heap)("Attempt heap shrinking (uncommitted archive regions). Total size: " SIZE_FORMAT "B",
brutisso@35061 823 HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
jiangli@32589 824 }
jiangli@32589 825 decrease_used(size_used);
jiangli@32589 826 }
jiangli@32589 827
kbarrett@51145 828 oop G1CollectedHeap::materialize_archived_object(oop obj) {
kbarrett@51145 829 assert(obj != NULL, "archived obj is NULL");
kbarrett@51145 830 assert(MetaspaceShared::is_archive_object(obj), "must be archived object");
kbarrett@51145 831
kbarrett@51145 832 // Loading an archived object makes it strongly reachable. If it is
kbarrett@51145 833 // loaded during concurrent marking, it must be enqueued to the SATB
kbarrett@51145 834 // queue, shading the previously white object gray.
kbarrett@51145 835 G1BarrierSet::enqueue(obj);
kbarrett@51145 836
kbarrett@51145 837 return obj;
kbarrett@51145 838 }
kbarrett@51145 839
tschatzl@48951 840 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
tschatzl@48951 841 ResourceMark rm; // For retrieving the thread names in log messages.
tschatzl@48951 842
tonyp@8928 843 // The structure of this method has a lot of similarities to
tonyp@8928 844 // attempt_allocation_slow(). The reason these two were not merged
tonyp@8928 845 // into a single one is that such a method would require several "if
tonyp@8928 846 // allocation is not humongous do this, otherwise do that"
tonyp@8928 847 // conditional paths which would obscure its flow. In fact, an early
tonyp@8928 848 // version of this code did use a unified method which was harder to
tonyp@8928 849 // follow and, as a result, it had subtle bugs that were hard to
tonyp@8928 850 // track down. So keeping these two methods separate allows each to
tonyp@8928 851 // be more readable. It will be good to keep these two in sync as
tonyp@8928 852 // much as possible.
tonyp@8928 853
tonyp@8928 854 assert_heap_not_locked_and_not_at_safepoint();
tonyp@26846 855 assert(is_humongous(word_size), "attempt_allocation_humongous() "
tonyp@8928 856 "should only be called for humongous allocations");
tonyp@8928 857
brutisso@11581 858 // Humongous objects can exhaust the heap quickly, so we should check if we
brutisso@11581 859 // need to start a marking cycle at each humongous object allocation. We do
brutisso@11581 860 // the check before we do the actual allocation. The reason for doing it
brutisso@11581 861 // before the allocation is that we avoid having to keep track of the newly
brutisso@11581 862 // allocated memory while we do a GC.
tonyp@11754 863 if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation",
tonyp@11754 864 word_size)) {
brutisso@11581 865 collect(GCCause::_g1_humongous_allocation);
brutisso@11581 866 }
brutisso@11581 867
tonyp@8928 868 // We will loop until a) we manage to successfully perform the
tonyp@8928 869 // allocation or b) we successfully schedule a collection which
tonyp@8928 870 // fails to perform the allocation. b) is the only case when we'll
tonyp@8928 871 // return NULL.
tonyp@8928 872 HeapWord* result = NULL;
tschatzl@48951 873 for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
tonyp@8928 874 bool should_try_gc;
mlarsson@29078 875 uint gc_count_before;
tonyp@8928 876
tschatzl@48951 877
tonyp@8928 878 {
tonyp@8928 879 MutexLockerEx x(Heap_lock);
tonyp@8928 880
tonyp@8928 881 // Given that humongous objects are not allocated in young
tonyp@8928 882 // regions, we'll first try to do the allocation without doing a
tonyp@8928 883 // collection hoping that there's enough space in the heap.
sjohanss@49788 884 result = humongous_obj_allocate(word_size);
tonyp@8928 885 if (result != NULL) {
tschatzl@34298 886 size_t size_in_regions = humongous_obj_size_in_regions(word_size);
tschatzl@34298 887 g1_policy()->add_bytes_allocated_in_old_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
tonyp@8928 888 return result;
tonyp@8928 889 }
tonyp@8928 890
tschatzl@48951 891 // Only try a GC if the GCLocker does not signal the need for a GC. Wait until
tschatzl@48951 892 // the GCLocker initiated GC has been performed and then retry. This includes
tschatzl@48951 893 // the case when the GC Locker is not active but has not been performed.
tschatzl@48951 894 should_try_gc = !GCLocker::needs_gc();
tschatzl@48951 895 // Read the GC count while still holding the Heap_lock.
tschatzl@48951 896 gc_count_before = total_collections();
tonyp@8928 897 }
tonyp@8928 898
tonyp@8928 899 if (should_try_gc) {
tonyp@8928 900 bool succeeded;
brutisso@19549 901 result = do_collection_pause(word_size, gc_count_before, &succeeded,
mlarsson@29078 902 GCCause::_g1_humongous_allocation);
tonyp@8928 903 if (result != NULL) {
tonyp@8928 904 assert(succeeded, "only way to get back a non-NULL result");
tschatzl@48951 905 log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
tschatzl@48951 906 Thread::current()->name(), p2i(result));
tonyp@8928 907 return result;
tonyp@8928 908 }
tonyp@8928 909
tonyp@8928 910 if (succeeded) {
tschatzl@48951 911 // We successfully scheduled a collection which failed to allocate. No
tschatzl@48951 912 // point in trying to allocate further. We'll just return NULL.
tschatzl@48951 913 log_trace(gc, alloc)("%s: Successfully scheduled collection failing to allocate "
tschatzl@48951 914 SIZE_FORMAT " words", Thread::current()->name(), word_size);
tonyp@8928 915 return NULL;
tonyp@8928 916 }
tschatzl@48951 917 log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating " SIZE_FORMAT "",
tschatzl@48951 918 Thread::current()->name(), word_size);
tonyp@8928 919 } else {
tschatzl@48951 920 // Failed to schedule a collection.
tschatzl@48951 921 if (gclocker_retry_count > GCLockerRetryAllocationCount) {
tschatzl@48951 922 log_warning(gc, alloc)("%s: Retried waiting for GCLocker too often allocating "
tschatzl@48951 923 SIZE_FORMAT " words", Thread::current()->name(), word_size);
mgerdin@16604 924 return NULL;
mgerdin@16604 925 }
tschatzl@48951 926 log_trace(gc, alloc)("%s: Stall until clear", Thread::current()->name());
johnc@12778 927 // The GCLocker is either active or the GCLocker initiated
johnc@12778 928 // GC has not yet been performed. Stall until it is and
johnc@12778 929 // then retry the allocation.
david@35492 930 GCLocker::stall_until_clear();
tschatzl@48951 931 gclocker_retry_count += 1;
tonyp@8928 932 }
tonyp@8928 933
tschatzl@48951 934
sla@18025 935 // We can reach here if we were unsuccessful in scheduling a
tonyp@8928 936 // collection (because another thread beat us to it) or if we were
tonyp@8928 937 // stalled due to the GC locker. In either can we should retry the
tonyp@8928 938 // allocation attempt in case another thread successfully
tschatzl@48951 939 // performed a collection and reclaimed enough space.
tschatzl@48951 940 // Humongous object allocation always needs a lock, so we wait for the retry
tschatzl@48951 941 // in the next iteration of the loop, unlike for the regular iteration case.
tschatzl@48951 942 // Give a warning if we seem to be looping forever.
tonyp@8928 943
tonyp@8928 944 if ((QueuedAllocationWarningCount > 0) &&
tonyp@8928 945 (try_count % QueuedAllocationWarningCount == 0)) {
tschatzl@48951 946 log_warning(gc, alloc)("%s: Retried allocation %u times for " SIZE_FORMAT " words",
tschatzl@48951 947 Thread::current()->name(), try_count, word_size);
tonyp@8928 948 }
tonyp@8928 949 }
tonyp@8928 950
tonyp@8928 951 ShouldNotReachHere();
tonyp@8928 952 return NULL;
tonyp@8928 953 }
tonyp@8928 954
tonyp@8928 955 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
sjohanss@26837 956 bool expect_null_mutator_alloc_region) {
kbarrett@49798 957 assert_at_safepoint_on_vm_thread();
sjohanss@49788 958 assert(!_allocator->has_mutator_alloc_region() || !expect_null_mutator_alloc_region,
tonyp@8928 959 "the current alloc region was unexpectedly found to be non-NULL");
tonyp@8928 960
tonyp@26846 961 if (!is_humongous(word_size)) {
sjohanss@49788 962 return _allocator->attempt_allocation_locked(word_size);
tonyp@8928 963 } else {
sjohanss@49788 964 HeapWord* result = humongous_obj_allocate(word_size);
brutisso@11576 965 if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
drwhite@31331 966 collector_state()->set_initiate_conc_mark_if_possible(true);
brutisso@11576 967 }
brutisso@11576 968 return result;
tonyp@8928 969 }
tonyp@8928 970
tonyp@8928 971 ShouldNotReachHere();
ysr@1374 972 }
ysr@1374 973
tonyp@10001 974 class PostCompactionPrinterClosure: public HeapRegionClosure {
tonyp@10001 975 private:
tonyp@10001 976 G1HRPrinter* _hr_printer;
tonyp@10001 977 public:
tschatzl@49381 978 bool do_heap_region(HeapRegion* hr) {
tonyp@10001 979 assert(!hr->is_young(), "not expecting to find young regions");
david@35079 980 _hr_printer->post_compaction(hr);
tonyp@10001 981 return false;
tonyp@10001 982 }
tonyp@10001 983
tonyp@10001 984 PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
tonyp@10001 985 : _hr_printer(hr_printer) { }
tonyp@10001 986 };
tonyp@10001 987
tschatzl@26316 988 void G1CollectedHeap::print_hrm_post_compaction() {
brutisso@35061 989 if (_hr_printer.is_active()) {
brutisso@35061 990 PostCompactionPrinterClosure cl(hr_printer());
brutisso@35061 991 heap_region_iterate(&cl);
brutisso@35061 992 }
tonyp@13336 993 }
tonyp@13336 994
sjohanss@46828 995 void G1CollectedHeap::abort_concurrent_cycle() {
sjohanss@46828 996 // If we start the compaction before the CM threads finish
sjohanss@46828 997 // scanning the root regions we might trip them over as we'll
sjohanss@46828 998 // be moving objects / updating references. So let's wait until
sjohanss@46828 999 // they are done. By telling them to abort, they should complete
sjohanss@46828 1000 // early.
sjohanss@46828 1001 _cm->root_regions()->abort();
sjohanss@46828 1002 _cm->root_regions()->wait_until_scan_finished();
sjohanss@46828 1003
sjohanss@46828 1004 // Disable discovery and empty the discovered lists
sjohanss@46828 1005 // for the CM ref processor.
tschatzl@50490 1006 _ref_processor_cm->disable_discovery();
tschatzl@50490 1007 _ref_processor_cm->abandon_partial_discovery();
tschatzl@50490 1008 _ref_processor_cm->verify_no_references_recorded();
sjohanss@46828 1009
sjohanss@46828 1010 // Abandon current iterations of concurrent marking and concurrent
sjohanss@46828 1011 // refinement, if any are in progress.
tschatzl@50139 1012 concurrent_mark()->concurrent_cycle_abort();
sjohanss@46828 1013 }
sjohanss@46828 1014
sjohanss@46828 1015 void G1CollectedHeap::prepare_heap_for_full_collection() {
sjohanss@46828 1016 // Make sure we'll choose a new allocation region afterwards.
sjohanss@46828 1017 _allocator->release_mutator_alloc_region();
sjohanss@46828 1018 _allocator->abandon_gc_alloc_regions();
sjohanss@46828 1019 g1_rem_set()->cleanupHRRS();
sjohanss@46828 1020
sjohanss@46828 1021 // We may have added regions to the current incremental collection
sjohanss@46828 1022 // set between the last GC or pause and now. We need to clear the
sjohanss@46828 1023 // incremental collection set and then start rebuilding it afresh
sjohanss@46828 1024 // after this full GC.
sjohanss@46828 1025 abandon_collection_set(collection_set());
sjohanss@46828 1026
sjohanss@46828 1027 tear_down_region_sets(false /* free_list_only */);
sjohanss@46828 1028 }
sjohanss@46828 1029
sjohanss@46828 1030 void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
sjohanss@46828 1031 assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
sjohanss@46828 1032 assert(used() == recalculate_used(), "Should be equal");
sjohanss@46828 1033 _verifier->verify_region_sets_optional();
sjohanss@48395 1034 _verifier->verify_before_gc(G1HeapVerifier::G1VerifyFull);
sjohanss@46828 1035 _verifier->check_bitmaps("Full GC Start");
sjohanss@46828 1036 }
sjohanss@46828 1037
sjohanss@46828 1038 void G1CollectedHeap::prepare_heap_for_mutators() {
sjohanss@46828 1039 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
sjohanss@46828 1040 ClassLoaderDataGraph::purge();
stuefe@49854 1041 MetaspaceUtils::verify_metrics();
sjohanss@46828 1042
sjohanss@46828 1043 // Prepare heap for normal collections.
sjohanss@46828 1044 assert(num_free_regions() == 0, "we should not have added any free regions");
sjohanss@46828 1045 rebuild_region_sets(false /* free_list_only */);
sjohanss@46828 1046 abort_refinement();
sjohanss@46828 1047 resize_if_necessary_after_full_collection();
sjohanss@46828 1048
sjohanss@46828 1049 // Rebuild the strong code root lists for each region
sjohanss@46828 1050 rebuild_strong_code_roots();
sjohanss@46828 1051
sjohanss@46828 1052 // Start a new incremental collection set for the next pause
sjohanss@46828 1053 start_new_collection_set();
sjohanss@46828 1054
sjohanss@46828 1055 _allocator->init_mutator_alloc_region();
sjohanss@46828 1056
sjohanss@46828 1057 // Post collection state updates.
sjohanss@46828 1058 MetaspaceGC::compute_new_size();
sjohanss@46828 1059 }
sjohanss@46828 1060
sjohanss@46828 1061 void G1CollectedHeap::abort_refinement() {
sjohanss@46828 1062 if (_hot_card_cache->use_cache()) {
sjohanss@46828 1063 _hot_card_cache->reset_hot_cache();
sjohanss@46828 1064 }
sjohanss@46828 1065
sjohanss@46828 1066 // Discard all remembered set updates.
pliden@50246 1067 G1BarrierSet::dirty_card_queue_set().abandon_logs();
sjohanss@46828 1068 assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
sjohanss@46828 1069 }
sjohanss@46828 1070
sjohanss@46828 1071 void G1CollectedHeap::verify_after_full_collection() {
sjohanss@46828 1072 _hrm.verify_optional();
sjohanss@46828 1073 _verifier->verify_region_sets_optional();
sjohanss@48395 1074 _verifier->verify_after_gc(G1HeapVerifier::G1VerifyFull);
sjohanss@46828 1075 // Clear the previous marking bitmap, if needed for bitmap verification.
sjohanss@46828 1076 // Note we cannot do this when we clear the next marking bitmap in
sjohanss@46828 1077 // G1ConcurrentMark::abort() above since VerifyDuringGC verifies the
sjohanss@46828 1078 // objects marked during a full GC against the previous bitmap.
sjohanss@46828 1079 // But we need to clear it before calling check_bitmaps below since
sjohanss@46828 1080 // the full GC has compacted objects and updated TAMS but not updated
sjohanss@46828 1081 // the prev bitmap.
sjohanss@46828 1082 if (G1VerifyBitmaps) {
sjohanss@46828 1083 GCTraceTime(Debug, gc)("Clear Bitmap for Verification");
sjohanss@46828 1084 _cm->clear_prev_bitmap(workers());
sjohanss@46828 1085 }
sjohanss@46828 1086 _verifier->check_bitmaps("Full GC End");
sjohanss@46828 1087
sjohanss@46828 1088 // At this point there should be no regions in the
sjohanss@46828 1089 // entire heap tagged as young.
sjohanss@46828 1090 assert(check_young_list_empty(), "young list should be empty at this point");
sjohanss@46828 1091
sjohanss@46828 1092 // Note: since we've just done a full GC, concurrent
sjohanss@46828 1093 // marking is no longer active. Therefore we need not
sjohanss@46828 1094 // re-enable reference discovery for the CM ref processor.
sjohanss@46828 1095 // That will be done at the start of the next marking cycle.
sjohanss@46828 1096 // We also know that the STW processor should no longer
sjohanss@46828 1097 // discover any new references.
tschatzl@50490 1098 assert(!_ref_processor_stw->discovery_enabled(), "Postcondition");
tschatzl@50490 1099 assert(!_ref_processor_cm->discovery_enabled(), "Postcondition");
tschatzl@50490 1100 _ref_processor_stw->verify_no_references_recorded();
tschatzl@50490 1101 _ref_processor_cm->verify_no_references_recorded();
sjohanss@46828 1102 }
sjohanss@46828 1103
sjohanss@46828 1104 void G1CollectedHeap::print_heap_after_full_collection(G1HeapTransition* heap_transition) {
sjohanss@48073 1105 // Post collection logging.
sjohanss@48073 1106 // We should do this after we potentially resize the heap so
sjohanss@48073 1107 // that all the COMMIT / UNCOMMIT events are generated before
sjohanss@48073 1108 // the compaction events.
sjohanss@46828 1109 print_hrm_post_compaction();
sjohanss@46828 1110 heap_transition->print();
sjohanss@46828 1111 print_heap_after_gc();
sjohanss@46828 1112 print_heap_regions();
sjohanss@46828 1113 #ifdef TRACESPINNING
sjohanss@46828 1114 ParallelTaskTerminator::print_termination_counts();
sjohanss@46828 1115 #endif
sjohanss@46828 1116 }
sjohanss@46828 1117
drwhite@34241 1118 bool G1CollectedHeap::do_full_collection(bool explicit_gc,
drwhite@34241 1119 bool clear_all_soft_refs) {
kbarrett@49798 1120 assert_at_safepoint_on_vm_thread();
tonyp@7923 1121
david@35492 1122 if (GCLocker::check_active_before_gc()) {
sjohanss@46828 1123 // Full GC was not completed.
tonyp@7398 1124 return false;
tonyp@5243 1125 }
tonyp@5243 1126
jmasa@5343 1127 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
stefank@49478 1128 soft_ref_policy()->should_clear_all_soft_refs();
sjohanss@46828 1129
rkennke@48384 1130 G1FullCollector collector(this, &_full_gc_memory_manager, explicit_gc, do_clear_all_soft_refs);
sjohanss@48316 1131 GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
sjohanss@48316 1132
sjohanss@48316 1133 collector.prepare_collection();
sjohanss@48316 1134 collector.collect();
sjohanss@48316 1135 collector.complete_collection();
sjohanss@46828 1136
sjohanss@46828 1137 // Full collection was successfully completed.
tonyp@7398 1138 return true;
ysr@1374 1139 }
ysr@1374 1140
ysr@1374 1141 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
drwhite@34241 1142 // Currently, there is no facility in the do_full_collection(bool) API to notify
drwhite@34241 1143 // the caller that the collection did not succeed (e.g., because it was locked
drwhite@34241 1144 // out by the GC locker). So, right now, we'll ignore the return value.
drwhite@34241 1145 bool dummy = do_full_collection(true, /* explicit_gc */
drwhite@34241 1146 clear_all_soft_refs);
drwhite@34241 1147 }
drwhite@34241 1148
drwhite@34241 1149 void G1CollectedHeap::resize_if_necessary_after_full_collection() {
sjohanss@48073 1150 // Capacity, free and used after the GC counted as full regions to
sjohanss@48073 1151 // include the waste in the following calculations.
ysr@1374 1152 const size_t capacity_after_gc = capacity();
sjohanss@48073 1153 const size_t used_after_gc = capacity_after_gc - unused_committed_regions_in_bytes();
ysr@1374 1154
tonyp@6259 1155 // This is enforced in arguments.cpp.
tonyp@6259 1156 assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
tonyp@6259 1157 "otherwise the code below doesn't make sense");
tonyp@6259 1158
ysr@1374 1159 // We don't have floating point command-line arguments
tonyp@6259 1160 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
ysr@1374 1161 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
tonyp@6259 1162 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
ysr@1374 1163 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
ysr@1374 1164
tonyp@6259 1165 const size_t min_heap_size = collector_policy()->min_heap_byte_size();
tonyp@6259 1166 const size_t max_heap_size = collector_policy()->max_heap_byte_size();
tonyp@6259 1167
tonyp@6259 1168 // We have to be careful here as these two calculations can overflow
tonyp@6259 1169 // 32-bit size_t's.
tonyp@6259 1170 double used_after_gc_d = (double) used_after_gc;
tonyp@6259 1171 double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;
tonyp@6259 1172 double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;
tonyp@6259 1173
tonyp@6259 1174 // Let's make sure that they are both under the max heap size, which
tonyp@6259 1175 // by default will make them fit into a size_t.
tonyp@6259 1176 double desired_capacity_upper_bound = (double) max_heap_size;
tonyp@6259 1177 minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
tonyp@6259 1178 desired_capacity_upper_bound);
tonyp@6259 1179 maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
tonyp@6259 1180 desired_capacity_upper_bound);
tonyp@6259 1181
tonyp@6259 1182 // We can now safely turn them into size_t's.
tonyp@6259 1183 size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;
tonyp@6259 1184 size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;
tonyp@6259 1185
tonyp@6259 1186 // This assert only makes sense here, before we adjust them
tonyp@6259 1187 // with respect to the min and max heap size.
tonyp@6259 1188 assert(minimum_desired_capacity <= maximum_desired_capacity,
david@33105 1189 "minimum_desired_capacity = " SIZE_FORMAT ", "
david@33105 1190 "maximum_desired_capacity = " SIZE_FORMAT,
david@33105 1191 minimum_desired_capacity, maximum_desired_capacity);
tonyp@6259 1192
tonyp@6259 1193 // Should not be greater than the heap max size. No need to adjust
tonyp@6259 1194 // it with respect to the heap min size as it's a lower bound (i.e.,
tonyp@6259 1195 // we'll try to make the capacity larger than it, not smaller).
tonyp@6259 1196 minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
tonyp@6259 1197 // Should not be less than the heap min size. No need to adjust it
tonyp@6259 1198 // with respect to the heap max size as it's an upper bound (i.e.,
tonyp@6259 1199 // we'll try to make the capacity smaller than it, not greater).
tonyp@6259 1200 maximum_desired_capacity = MAX2(maximum_desired_capacity, min_heap_size);
ysr@1374 1201
tonyp@6259 1202 if (capacity_after_gc < minimum_desired_capacity) {
ysr@1374 1203 // Don't expand unless it's significant
ysr@1374 1204 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
brutisso@35061 1205
brutisso@35061 1206 log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity after Full GC). "
sjohanss@48073 1207 "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B "
sjohanss@48073 1208 "min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
sjohanss@48073 1209 capacity_after_gc, used_after_gc, used(), minimum_desired_capacity, MinHeapFreeRatio);
brutisso@35061 1210
tschatzl@41178 1211 expand(expand_bytes, _workers);
ysr@1374 1212
ysr@1374 1213 // No expansion, now see if we want to shrink
tonyp@6259 1214 } else if (capacity_after_gc > maximum_desired_capacity) {
ysr@1374 1215 // Capacity too large, compute shrinking size
ysr@1374 1216 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
brutisso@35061 1217
brutisso@35061 1218 log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity after Full GC). "
sjohanss@48073 1219 "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B "
sjohanss@48073 1220 "maximum_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
sjohanss@48073 1221 capacity_after_gc, used_after_gc, used(), maximum_desired_capacity, MaxHeapFreeRatio);
brutisso@35061 1222
ysr@1374 1223 shrink(shrink_bytes);
ysr@1374 1224 }
ysr@1374 1225 }
ysr@1374 1226
aharlap@33108 1227 HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
aharlap@33108 1228 bool do_gc,
aharlap@33108 1229 bool clear_all_soft_refs,
aharlap@33108 1230 bool expect_null_mutator_alloc_region,
aharlap@33108 1231 bool* gc_succeeded) {
aharlap@33108 1232 *gc_succeeded = true;
tonyp@7398 1233 // Let's attempt the allocation first.
tonyp@8928 1234 HeapWord* result =
tonyp@8928 1235 attempt_allocation_at_safepoint(word_size,
aharlap@33108 1236 expect_null_mutator_alloc_region);
tonyp@7398 1237 if (result != NULL) {
tonyp@7398 1238 return result;
tonyp@7398 1239 }
ysr@1374 1240
ysr@1374 1241 // In a G1 heap, we're supposed to keep allocation from failing by
ysr@1374 1242 // incremental pauses. Therefore, at least for now, we'll favor
ysr@1374 1243 // expansion over collection. (This might change in the future if we can
ysr@1374 1244 // do something smarter than full collection to satisfy a failed alloc.)
sjohanss@49788 1245 result = expand_and_allocate(word_size);
ysr@1374 1246 if (result != NULL) {
ysr@1374 1247 return result;
ysr@1374 1248 }
ysr@1374 1249
aharlap@33108 1250 if (do_gc) {
aharlap@33108 1251 // Expansion didn't work, we'll try to do a Full GC.
drwhite@34241 1252 *gc_succeeded = do_full_collection(false, /* explicit_gc */
drwhite@34241 1253 clear_all_soft_refs);
aharlap@33108 1254 }
aharlap@33108 1255
aharlap@33108 1256 return NULL;
aharlap@33108 1257 }
aharlap@33108 1258
aharlap@33108 1259 HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
aharlap@33108 1260 bool* succeeded) {
kbarrett@49798 1261 assert_at_safepoint_on_vm_thread();
aharlap@33108 1262
aharlap@33108 1263 // Attempts to allocate followed by Full GC.
aharlap@33108 1264 HeapWord* result =
aharlap@33108 1265 satisfy_failed_allocation_helper(word_size,
aharlap@33108 1266 true, /* do_gc */
aharlap@33108 1267 false, /* clear_all_soft_refs */
aharlap@33108 1268 false, /* expect_null_mutator_alloc_region */
aharlap@33108 1269 succeeded);
aharlap@33108 1270
aharlap@33108 1271 if (result != NULL || !*succeeded) {
ysr@1374 1272 return result;
ysr@1374 1273 }
ysr@1374 1274
aharlap@33108 1275 // Attempts to allocate followed by Full GC that will collect all soft references.
aharlap@33108 1276 result = satisfy_failed_allocation_helper(word_size,
aharlap@33108 1277 true, /* do_gc */
aharlap@33108 1278 true, /* clear_all_soft_refs */
aharlap@33108 1279 true, /* expect_null_mutator_alloc_region */
aharlap@33108 1280 succeeded);
aharlap@33108 1281
aharlap@33108 1282 if (result != NULL || !*succeeded) {
aharlap@33108 1283 return result;
aharlap@33108 1284 }
aharlap@33108 1285
aharlap@33108 1286 // Attempts to allocate, no GC
aharlap@33108 1287 result = satisfy_failed_allocation_helper(word_size,
aharlap@33108 1288 false, /* do_gc */
aharlap@33108 1289 false, /* clear_all_soft_refs */
aharlap@33108 1290 true, /* expect_null_mutator_alloc_region */
aharlap@33108 1291 succeeded);
aharlap@33108 1292
ysr@1374 1293 if (result != NULL) {
ysr@1374 1294 return result;
ysr@1374 1295 }
ysr@1374 1296
stefank@49478 1297 assert(!soft_ref_policy()->should_clear_all_soft_refs(),
tonyp@7398 1298 "Flag should have been handled and cleared prior to this point");
jmasa@5343 1299
ysr@1374 1300 // What else? We might try synchronous finalization later. If the total
ysr@1374 1301 // space available is large enough for the allocation, then a more
ysr@1374 1302 // complete compaction phase than we've tried so far might be
ysr@1374 1303 // appropriate.
ysr@1374 1304 return NULL;
ysr@1374 1305 }
ysr@1374 1306
ysr@1374 1307 // Attempting to expand the heap sufficiently
ysr@1374 1308 // to support an allocation of the given "word_size". If
ysr@1374 1309 // successful, perform the allocation and return the address of the
ysr@1374 1310 // allocated block, or else "NULL".
ysr@1374 1311
sjohanss@49788 1312 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
kbarrett@49798 1313 assert_at_safepoint_on_vm_thread();
tonyp@7923 1314
david@35851 1315 _verifier->verify_region_sets_optional();
tonyp@7398 1316
johnc@8103 1317 size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
brutisso@35061 1318 log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B",
brutisso@35061 1319 word_size * HeapWordSize);
brutisso@35061 1320
brutisso@35061 1321
tschatzl@41178 1322 if (expand(expand_bytes, _workers)) {
tschatzl@26316 1323 _hrm.verify_optional();
david@35851 1324 _verifier->verify_region_sets_optional();
johnc@8103 1325 return attempt_allocation_at_safepoint(word_size,
sjohanss@26837 1326 false /* expect_null_mutator_alloc_region */);
johnc@8103 1327 }
johnc@8103 1328 return NULL;
ysr@1374 1329 }
ysr@1374 1330
tschatzl@41178 1331 bool G1CollectedHeap::expand(size_t expand_bytes, WorkGang* pretouch_workers, double* expand_time_ms) {
johnc@8103 1332 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
stefank@46619 1333 aligned_expand_bytes = align_up(aligned_expand_bytes,
ysr@1374 1334 HeapRegion::GrainBytes);
brutisso@35061 1335
tschatzl@46456 1336 log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount: " SIZE_FORMAT "B expansion amount: " SIZE_FORMAT "B",
brutisso@35061 1337 expand_bytes, aligned_expand_bytes);
johnc@8103 1338
tschatzl@26157 1339 if (is_maximal_no_gc()) {
brutisso@35061 1340 log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
brutisso@20311 1341 return false;
brutisso@20311 1342 }
brutisso@20311 1343
ecaspole@33577 1344 double expand_heap_start_time_sec = os::elapsedTime();
tschatzl@26157 1345 uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
tschatzl@26157 1346 assert(regions_to_expand > 0, "Must expand by at least one region");
tschatzl@26157 1347
tschatzl@41178 1348 uint expanded_by = _hrm.expand_by(regions_to_expand, pretouch_workers);
ecaspole@33577 1349 if (expand_time_ms != NULL) {
ecaspole@33577 1350 *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
ecaspole@33577 1351 }
tschatzl@26157 1352
tschatzl@26157 1353 if (expanded_by > 0) {
tschatzl@26157 1354 size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
tonyp@9989 1355 assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
tschatzl@26157 1356 g1_policy()->record_new_heap_size(num_regions());
johnc@8103 1357 } else {
brutisso@35061 1358 log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)");
brutisso@35061 1359
johnc@8103 1360 // The expansion of the virtual storage space was unsuccessful.
johnc@8103 1361 // Let's see if it was because we ran out of swap.
johnc@8103 1362 if (G1ExitOnExpansionFailure &&
tschatzl@26316 1363 _hrm.available() >= regions_to_expand) {
johnc@8103 1364 // We had head room...
ccheung@17087 1365 vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
ysr@1374 1366 }
ysr@1374 1367 }
tschatzl@26157 1368 return regions_to_expand > 0;
ysr@1374 1369 }
ysr@1374 1370
tonyp@9989 1371 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
ysr@1374 1372 size_t aligned_shrink_bytes =
ysr@1374 1373 ReservedSpace::page_align_size_down(shrink_bytes);
stefank@46619 1374 aligned_shrink_bytes = align_down(aligned_shrink_bytes,
ysr@1374 1375 HeapRegion::GrainBytes);
brutisso@17323 1376 uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
brutisso@17323 1377
tschatzl@26316 1378 uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
brutisso@17323 1379 size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
tonyp@10523 1380
brutisso@35061 1381
brutisso@35061 1382 log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",
brutisso@35061 1383 shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
brutisso@17323 1384 if (num_regions_removed > 0) {
tschatzl@26157 1385 g1_policy()->record_new_heap_size(num_regions());
tonyp@10523 1386 } else {
brutisso@35061 1387 log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");
ysr@1374 1388 }
ysr@1374 1389 }
ysr@1374 1390
ysr@1374 1391 void G1CollectedHeap::shrink(size_t shrink_bytes) {
david@35851 1392 _verifier->verify_region_sets_optional();
tonyp@7923 1393
tonyp@10243 1394 // We should only reach here at the end of a Full GC which means we
tonyp@10243 1395 // should not not be holding to any GC alloc regions. The method
tonyp@10243 1396 // below will make sure of that and do any remaining clean up.
sjohanss@26837 1397 _allocator->abandon_gc_alloc_regions();
tonyp@10243 1398
tonyp@7923 1399 // Instead of tearing down / rebuilding the free lists here, we
tonyp@7923 1400 // could instead use the remove_all_pending() method on free_list to
tonyp@7923 1401 // remove only the ones that we need to remove.
tonyp@10996 1402 tear_down_region_sets(true /* free_list_only */);
ysr@1374 1403 shrink_helper(shrink_bytes);
tonyp@10996 1404 rebuild_region_sets(true /* free_list_only */);
tonyp@7923 1405
tschatzl@26316 1406 _hrm.verify_optional();
david@35851 1407 _verifier->verify_region_sets_optional();
ysr@1374 1408 }
ysr@1374 1409
ysr@1374 1410 // Public methods.
ysr@1374 1411
mgerdin@37985 1412 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
brutisso@30154 1413 CollectedHeap(),
tschatzl@47971 1414 _young_gen_sampling_thread(NULL),
mgerdin@37985 1415 _collector_policy(collector_policy),
stefank@49478 1416 _soft_ref_policy(),
eosterlund@49595 1417 _card_table(NULL),
rkennke@48384 1418 _memory_manager("G1 Young Generation", "end of minor GC"),
rkennke@48384 1419 _full_gc_memory_manager("G1 Old Generation", "end of major GC"),
rkennke@48384 1420 _eden_pool(NULL),
rkennke@48384 1421 _survivor_pool(NULL),
rkennke@48384 1422 _old_pool(NULL),
sangheki@46795 1423 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
sangheki@46795 1424 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
ehelin@49840 1425 _g1_policy(new G1Policy(_gc_timer_stw)),
mgerdin@37985 1426 _collection_set(this, _g1_policy),
iveresov@4481 1427 _dirty_card_queue_set(false),
tschatzl@50489 1428 _ref_processor_stw(NULL),
tschatzl@50489 1429 _is_alive_closure_stw(this),
tschatzl@50489 1430 _is_subject_to_discovery_stw(this),
tschatzl@50489 1431 _ref_processor_cm(NULL),
johnc@10670 1432 _is_alive_closure_cm(this),
tschatzl@50489 1433 _is_subject_to_discovery_cm(this),
david@35461 1434 _bot(NULL),
kbarrett@38172 1435 _hot_card_cache(NULL),
kbarrett@38172 1436 _g1_rem_set(NULL),
tschatzl@47970 1437 _cr(NULL),
tonyp@10671 1438 _g1mm(NULL),
tonyp@38081 1439 _preserved_marks_set(true /* in_c_heap */),
brutisso@23450 1440 _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
brutisso@23450 1441 _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
kbarrett@30182 1442 _humongous_reclaim_candidates(),
tschatzl@25889 1443 _has_humongous_reclaim_candidates(false),
jiangli@31346 1444 _archive_allocator(NULL),
ehelin@31975 1445 _summary_bytes_used(0),
tschatzl@36390 1446 _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
tschatzl@36390 1447 _old_evac_stats("Old", OldPLABSize, PLABWeight),
tonyp@11449 1448 _expand_heap_after_alloc_failure(true),
brutisso@12934 1449 _old_marking_cycles_started(0),
brutisso@12934 1450 _old_marking_cycles_completed(0),
sangheki@46795 1451 _in_cset_fast_test() {
sla@18025 1452
stefank@32360 1453 _workers = new WorkGang("GC Thread", ParallelGCThreads,
brutisso@30152 1454 /* are_GC_task_threads */true,
brutisso@30152 1455 /* are_ConcurrentGC_threads */false);
brutisso@30152 1456 _workers->initialize_workers();
david@35851 1457 _verifier = new G1HeapVerifier(this);
brutisso@30152 1458
sjohanss@50208 1459 _allocator = new G1Allocator(this);
mgerdin@37144 1460
mgerdin@37144 1461 _heap_sizing_policy = G1HeapSizingPolicy::create(this, _g1_policy->analytics());
mgerdin@37144 1462
jiangli@31346 1463 _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
jiangli@31346 1464
jiangli@31346 1465 // Override the default _filler_array_max_size so that no humongous filler
jiangli@31346 1466 // objects are created.
jiangli@31346 1467 _filler_array_max_size = _humongous_object_threshold_in_words;
tonyp@3697 1468
david@31330 1469 uint n_queues = ParallelGCThreads;
ysr@1374 1470 _task_queues = new RefToScanQueueSet(n_queues);
ysr@1374 1471
sla@18025 1472 _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
johnc@11248 1473
david@31330 1474 for (uint i = 0; i < n_queues; i++) {
ysr@1374 1475 RefToScanQueue* q = new RefToScanQueue();
ysr@1374 1476 q->initialize();
ysr@1374 1477 _task_queues->register_queue(i, q);
sla@18025 1478 ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
sla@18025 1479 }
johnc@11248 1480
johnc@13517 1481 // Initialize the G1EvacuationFailureALot counters and flags.
johnc@13517 1482 NOT_PRODUCT(reset_evacuation_should_fail();)
johnc@13517 1483
ysr@1374 1484 guarantee(_task_queues != NULL, "task_queues allocation failure.");
ysr@1374 1485 }
ysr@1374 1486
tschatzl@30158 1487 G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description,
tschatzl@30158 1488 size_t size,
tschatzl@30158 1489 size_t translation_factor) {
tschatzl@30166 1490 size_t preferred_page_size = os::page_size_for_region_unaligned(size, 1);
tschatzl@30158 1491 // Allocate a new reserved space, preferring to use large pages.
tschatzl@30166 1492 ReservedSpace rs(size, preferred_page_size);
tschatzl@30158 1493 G1RegionToSpaceMapper* result =
tschatzl@30158 1494 G1RegionToSpaceMapper::create_mapper(rs,
tschatzl@30158 1495 size,
tschatzl@30158 1496 rs.alignment(),
tschatzl@30158 1497 HeapRegion::GrainBytes,
tschatzl@30158 1498 translation_factor,
tschatzl@30158 1499 mtGC);
stefank@37462 1500
stefank@37462 1501 os::trace_page_sizes_for_requested_size(description,
stefank@37462 1502 size,
stefank@37462 1503 preferred_page_size,
stefank@37462 1504 rs.alignment(),
stefank@37462 1505 rs.base(),
stefank@37462 1506 rs.size());
stefank@37462 1507
tschatzl@30158 1508 return result;
tschatzl@30158 1509 }
tschatzl@30158 1510
tschatzl@46652 1511 jint G1CollectedHeap::initialize_concurrent_refinement() {
tschatzl@46652 1512 jint ecode = JNI_OK;
tschatzl@47970 1513 _cr = G1ConcurrentRefine::create(&ecode);
tschatzl@46652 1514 return ecode;
tschatzl@46652 1515 }
tschatzl@46652 1516
tschatzl@47971 1517 jint G1CollectedHeap::initialize_young_gen_sampling_thread() {
tschatzl@47971 1518 _young_gen_sampling_thread = new G1YoungRemSetSamplingThread();
tschatzl@47971 1519 if (_young_gen_sampling_thread->osthread() == NULL) {
tschatzl@47971 1520 vm_shutdown_during_initialization("Could not create G1YoungRemSetSamplingThread");
tschatzl@47971 1521 return JNI_ENOMEM;
tschatzl@47971 1522 }
tschatzl@47971 1523 return JNI_OK;
tschatzl@47971 1524 }
tschatzl@47971 1525
ysr@1374 1526 jint G1CollectedHeap::initialize() {
ysr@1374 1527 os::enable_vtime();
ysr@1374 1528
ysr@1374 1529 // Necessary to satisfy locking discipline assertions.
ysr@1374 1530
ysr@1374 1531 MutexLocker x(Heap_lock);
ysr@1374 1532
ysr@1374 1533 // While there are no constraints in the GC code that HeapWordSize
ysr@1374 1534 // be any particular value, there are multiple other areas in the
ysr@1374 1535 // system which believe this to be true (e.g. oop->object_size in some
ysr@1374 1536 // cases incorrectly returns the size in wordSize units rather than
ysr@1374 1537 // HeapWordSize).
ysr@1374 1538 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
ysr@1374 1539
ysr@1374 1540 size_t init_byte_size = collector_policy()->initial_heap_byte_size();
ysr@1374 1541 size_t max_byte_size = collector_policy()->max_heap_byte_size();
jwilhelm@21561 1542 size_t heap_alignment = collector_policy()->heap_alignment();
ysr@1374 1543
ysr@1374 1544 // Ensure that the sizes are properly aligned.
ysr@1374 1545 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
ysr@1374 1546 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
stefank@19546 1547 Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
ysr@1374 1548
ysr@1374 1549 // Reserve the maximum.
kvn@2254 1550
johnc@10237 1551 // When compressed oops are enabled, the preferred heap base
johnc@10237 1552 // is calculated by subtracting the requested size from the
johnc@10237 1553 // 32Gb boundary and using the result as the base address for
johnc@10237 1554 // heap reservation. If the requested size is not aligned to
johnc@10237 1555 // HeapRegion::GrainBytes (i.e. the alignment that is passed
johnc@10237 1556 // into the ReservedHeapSpace constructor) then the actual
johnc@10237 1557 // base of the reserved heap may end up differing from the
johnc@10237 1558 // address that was requested (i.e. the preferred heap base).
johnc@10237 1559 // If this happens then we could end up using a non-optimal
johnc@10237 1560 // compressed oops mode.
johnc@10237 1561
coleenp@13728 1562 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
stefank@19546 1563 heap_alignment);
ysr@1374 1564
mlarsson@26829 1565 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
ysr@1374 1566
kbarrett@28217 1567 // Create the barrier set for the entire reserved region.
eosterlund@49595 1568 G1CardTable* ct = new G1CardTable(reserved_region());
eosterlund@49595 1569 ct->initialize();
eosterlund@49812 1570 G1BarrierSet* bs = new G1BarrierSet(ct);
kbarrett@28217 1571 bs->initialize();
eosterlund@49812 1572 assert(bs->is_a(BarrierSet::G1BarrierSet), "sanity");
pliden@50249 1573 BarrierSet::set_barrier_set(bs);
eosterlund@49595 1574 _card_table = ct;
ysr@1374 1575
kbarrett@38172 1576 // Create the hot card cache.
kbarrett@38172 1577 _hot_card_cache = new G1HotCardCache(this);
kbarrett@38172 1578
ysr@1374 1579 // Carve out the G1 part of the heap.
tschatzl@26157 1580 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
david@30612 1581 size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
tschatzl@26160 1582 G1RegionToSpaceMapper* heap_storage =
tschatzl@26160 1583 G1RegionToSpaceMapper::create_mapper(g1_rs,
tschatzl@30158 1584 g1_rs.size(),
david@30612 1585 page_size,
tschatzl@26160 1586 HeapRegion::GrainBytes,
tschatzl@26160 1587 1,
tschatzl@26160 1588 mtJavaHeap);
stefank@37462 1589 os::trace_page_sizes("Heap",
stefank@37462 1590 collector_policy()->min_heap_byte_size(),
stefank@37462 1591 max_byte_size,
stefank@37462 1592 page_size,
david@30612 1593 heap_rs.base(),
david@30612 1594 heap_rs.size());
tschatzl@26160 1595 heap_storage->set_mapping_changed_listener(&_listener);
tschatzl@26160 1596
tschatzl@30158 1597 // Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.
tschatzl@26160 1598 G1RegionToSpaceMapper* bot_storage =
stefank@37462 1599 create_aux_memory_mapper("Block Offset Table",
david@35461 1600 G1BlockOffsetTable::compute_size(g1_rs.size() / HeapWordSize),
david@35461 1601 G1BlockOffsetTable::heap_map_factor());
tschatzl@26160 1602
tschatzl@26160 1603 G1RegionToSpaceMapper* cardtable_storage =
stefank@37462 1604 create_aux_memory_mapper("Card Table",
eosterlund@49595 1605 G1CardTable::compute_size(g1_rs.size() / HeapWordSize),
eosterlund@49595 1606 G1CardTable::heap_map_factor());
tschatzl@30158 1607
tschatzl@26160 1608 G1RegionToSpaceMapper* card_counts_storage =
stefank@37462 1609 create_aux_memory_mapper("Card Counts Table",
tschatzl@30565 1610 G1CardCounts::compute_size(g1_rs.size() / HeapWordSize),
tschatzl@30565 1611 G1CardCounts::heap_map_factor());
tschatzl@30158 1612
ehelin@35943 1613 size_t bitmap_size = G1CMBitMap::compute_size(g1_rs.size());
tschatzl@26160 1614 G1RegionToSpaceMapper* prev_bitmap_storage =
ehelin@35943 1615 create_aux_memory_mapper("Prev Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
tschatzl@26160 1616 G1RegionToSpaceMapper* next_bitmap_storage =
ehelin@35943 1617 create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
tschatzl@26160 1618
tschatzl@26316 1619 _hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
eosterlund@49595 1620 _card_table->initialize(cardtable_storage);
kbarrett@38172 1621 // Do later initialization work for concurrent refinement.
kbarrett@38172 1622 _hot_card_cache->initialize(card_counts_storage);
johnc@17327 1623
johnc@2996 1624 // 6843694 - ensure that the maximum region index can fit
johnc@2996 1625 // in the remembered set structures.
tonyp@12381 1626 const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
johnc@2996 1627 guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
johnc@2996 1628
tschatzl@50865 1629 // The G1FromCardCache reserves card with value 0 as "invalid", so the heap must not
tschatzl@50865 1630 // start within the first card.
tschatzl@50865 1631 guarantee(g1_rs.base() >= (char*)G1CardTable::card_size, "Java heap must not start within the first card.");
tschatzl@46652 1632 // Also create a G1 rem set.
eosterlund@49595 1633 _g1_rem_set = new G1RemSet(this, _card_table, _hot_card_cache);
tschatzl@46652 1634 _g1_rem_set->initialize(max_capacity(), max_regions());
tschatzl@35210 1635
johnc@2996 1636 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
tonyp@3697 1637 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
johnc@10677 1638 guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
tonyp@3697 1639 "too many cards per region");
johnc@2996 1640
brutisso@23450 1641 FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
tonyp@7923 1642
david@35461 1643 _bot = new G1BlockOffsetTable(reserved_region(), bot_storage);
ysr@1374 1644
kbarrett@30182 1645 {
kbarrett@30182 1646 HeapWord* start = _hrm.reserved().start();
kbarrett@30182 1647 HeapWord* end = _hrm.reserved().end();
kbarrett@30182 1648 size_t granularity = HeapRegion::GrainBytes;
kbarrett@30182 1649
kbarrett@30182 1650 _in_cset_fast_test.initialize(start, end, granularity);
kbarrett@30182 1651 _humongous_reclaim_candidates.initialize(start, end, granularity);
kbarrett@30182 1652 }
johnc@5350 1653
ehelin@35943 1654 // Create the G1ConcurrentMark data structure and thread.
ysr@1374 1655 // (Must do this late, so that "max_regions" is defined.)
ehelin@35943 1656 _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
johnc@14740 1657 if (_cm == NULL || !_cm->completed_initialization()) {
ehelin@35943 1658 vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
johnc@14740 1659 return JNI_ENOMEM;
johnc@14740 1660 }
lkorinth@50214 1661 _cm_thread = _cm->cm_thread();
ysr@1374 1662
apetrusenko@2344 1663 // Now expand into the initial heap size.
tschatzl@41178 1664 if (!expand(init_byte_size, _workers)) {
johnc@14740 1665 vm_shutdown_during_initialization("Failed to allocate initial heap.");
johnc@8103 1666 return JNI_ENOMEM;
johnc@8103 1667 }
ysr@1374 1668
ysr@1374 1669 // Perform any initialization actions delegated to the policy.
mgerdin@38013 1670 g1_policy()->init(this, &_collection_set);
ysr@1374 1671
kbarrett@51970 1672 G1SATBMarkQueueFilter* satb_filter = new G1SATBMarkQueueFilter(this);
kbarrett@51970 1673 G1BarrierSet::satb_mark_queue_set().initialize(satb_filter,
kbarrett@51970 1674 SATB_Q_CBL_mon,
pliden@50246 1675 SATB_Q_FL_lock,
pliden@50246 1676 G1SATBProcessCompletedThreshold,
pliden@50246 1677 Shared_SATB_Q_lock);
iveresov@2881 1678
tschatzl@46652 1679 jint ecode = initialize_concurrent_refinement();
tschatzl@46652 1680 if (ecode != JNI_OK) {
tschatzl@46652 1681 return ecode;
tschatzl@46652 1682 }
tschatzl@46652 1683
tschatzl@47971 1684 ecode = initialize_young_gen_sampling_thread();
tschatzl@47971 1685 if (ecode != JNI_OK) {
tschatzl@47971 1686 return ecode;
tschatzl@47971 1687 }
tschatzl@47971 1688
pliden@50246 1689 G1BarrierSet::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
pliden@50246 1690 DirtyCardQ_FL_lock,
pliden@50246 1691 (int)concurrent_refine()->yellow_zone(),
pliden@50246 1692 (int)concurrent_refine()->red_zone(),
pliden@50246 1693 Shared_DirtyCardQ_lock,
pliden@50246 1694 NULL, // fl_owner
pliden@50246 1695 true); // init_free_ids
iveresov@2881 1696
tschatzl@46653 1697 dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
tschatzl@26701 1698 DirtyCardQ_FL_lock,
tschatzl@26701 1699 -1, // never trigger processing
tschatzl@26701 1700 -1, // no limit on length
tschatzl@26701 1701 Shared_DirtyCardQ_lock,
pliden@50246 1702 &G1BarrierSet::dirty_card_queue_set());
johnc@6247 1703
tschatzl@26157 1704 // Here we allocate the dummy HeapRegion that is required by the
tschatzl@26157 1705 // G1AllocRegion class.
tschatzl@26316 1706 HeapRegion* dummy_region = _hrm.get_dummy_region();
tschatzl@26160 1707
tonyp@8928 1708 // We'll re-use the same region whether the alloc region will
tonyp@8928 1709 // require BOT updates or not and, if it doesn't, then a non-young
tonyp@8928 1710 // region will complain that it cannot support allocations without
tonyp@26696 1711 // BOT updates. So we'll tag the dummy region as eden to avoid that.
tonyp@26696 1712 dummy_region->set_eden();
tonyp@8928 1713 // Make sure it's full.
tonyp@8928 1714 dummy_region->set_top(dummy_region->end());
tonyp@8928 1715 G1AllocRegion::setup(this, dummy_region);
tonyp@8928 1716
sjohanss@26837 1717 _allocator->init_mutator_alloc_region();
tonyp@8928 1718
jmasa@9338 1719 // Do create of the monitoring and management support so that
jmasa@9338 1720 // values in the heap have been properly initialized.
tonyp@10671 1721 _g1mm = new G1MonitoringSupport(this);
jmasa@9338 1722
pliden@23472 1723 G1StringDedup::initialize();
pliden@23472 1724
tonyp@38081 1725 _preserved_marks_set.init(ParallelGCThreads);
tschatzl@31976 1726
tschatzl@39698 1727 _collection_set.initialize(max_regions());
tschatzl@39698 1728
ysr@1374 1729 return JNI_OK;
ysr@1374 1730 }
ysr@1374 1731
rkennke@48384 1732 void G1CollectedHeap::initialize_serviceability() {
rkennke@48384 1733 _eden_pool = new G1EdenPool(this);
rkennke@48384 1734 _survivor_pool = new G1SurvivorPool(this);
rkennke@48384 1735 _old_pool = new G1OldGenPool(this);
rkennke@48384 1736
rkennke@48384 1737 _full_gc_memory_manager.add_pool(_eden_pool);
rkennke@48384 1738 _full_gc_memory_manager.add_pool(_survivor_pool);
rkennke@48384 1739 _full_gc_memory_manager.add_pool(_old_pool);
rkennke@48384 1740
rkennke@48384 1741 _memory_manager.add_pool(_eden_pool);
rkennke@48384 1742 _memory_manager.add_pool(_survivor_pool);
phh@51269 1743 _memory_manager.add_pool(_old_pool, false /* always_affected_by_gc */);
rkennke@48384 1744 }
rkennke@48384 1745
pliden@24093 1746 void G1CollectedHeap::stop() {
pliden@25070 1747 // Stop all concurrent threads. We do this to make sure these threads
brutisso@35061 1748 // do not continue to execute and access resources (e.g. logging)
pliden@24848 1749 // that are destroyed during shutdown.
tschatzl@47970 1750 _cr->stop();
tschatzl@47971 1751 _young_gen_sampling_thread->stop();
lkorinth@50214 1752 _cm_thread->stop();
pliden@25070 1753 if (G1StringDedup::is_enabled()) {
pliden@25070 1754 G1StringDedup::stop();
pliden@25070 1755 }
pliden@24093 1756 }
pliden@24093 1757
eosterlund@47791 1758 void G1CollectedHeap::safepoint_synchronize_begin() {
eosterlund@47791 1759 SuspendibleThreadSet::synchronize();
eosterlund@47791 1760 }
eosterlund@47791 1761
eosterlund@47791 1762 void G1CollectedHeap::safepoint_synchronize_end() {
eosterlund@47791 1763 SuspendibleThreadSet::desynchronize();
eosterlund@47791 1764 }
eosterlund@47791 1765
tschatzl@19986 1766 size_t G1CollectedHeap::conservative_max_heap_alignment() {
tschatzl@19986 1767 return HeapRegion::max_region_size();
tschatzl@19986 1768 }
tschatzl@19986 1769
brutisso@30152 1770 void G1CollectedHeap::post_initialize() {
rkennke@48384 1771 CollectedHeap::post_initialize();
brutisso@30152 1772 ref_processing_init();
brutisso@30152 1773 }
brutisso@30152 1774
ysr@1374 1775 void G1CollectedHeap::ref_processing_init() {
johnc@7399 1776 // Reference processing in G1 currently works as follows:
johnc@7399 1777 //
johnc@10670 1778 // * There are two reference processor instances. One is
johnc@10670 1779 // used to record and process discovered references
johnc@10670 1780 // during concurrent marking; the other is used to
johnc@10670 1781 // record and process references during STW pauses
johnc@10670 1782 // (both full and incremental).
johnc@10670 1783 // * Both ref processors need to 'span' the entire heap as
johnc@10670 1784 // the regions in the collection set may be dotted around.
johnc@10670 1785 //
johnc@10670 1786 // * For the concurrent marking ref processor:
johnc@10670 1787 // * Reference discovery is enabled at initial marking.
johnc@10670 1788 // * Reference discovery is disabled and the discovered
johnc@10670 1789 // references processed etc during remarking.
johnc@10670 1790 // * Reference discovery is MT (see below).
johnc@10670 1791 // * Reference discovery requires a barrier (see below).
johnc@10670 1792 // * Reference processing may or may not be MT
johnc@10670 1793 // (depending on the value of ParallelRefProcEnabled
johnc@10670 1794 // and ParallelGCThreads).
johnc@10670 1795 // * A full GC disables reference discovery by the CM
johnc@10670 1796 // ref processor and abandons any entries on it's
johnc@10670 1797 // discovered lists.
johnc@10670 1798 //
johnc@10670 1799 // * For the STW processor:
johnc@10670 1800 // * Non MT discovery is enabled at the start of a full GC.
johnc@10670 1801 // * Processing and enqueueing during a full GC is non-MT.
johnc@10670 1802 // * During a full GC, references are processed after marking.
johnc@10670 1803 //
johnc@10670 1804 // * Discovery (may or may not be MT) is enabled at the start
johnc@10670 1805 // of an incremental evacuation pause.
johnc@10670 1806 // * References are processed near the end of a STW evacuation pause.
johnc@10670 1807 // * For both types of GC:
johnc@10670 1808 // * Discovery is atomic - i.e. not concurrent.
johnc@10670 1809 // * Reference discovery will not need a barrier.
johnc@7399 1810
sangheki@46795 1811 bool mt_processing = ParallelRefProcEnabled && (ParallelGCThreads > 1);
sangheki@46795 1812
johnc@10670 1813 // Concurrent Mark ref processor
johnc@10670 1814 _ref_processor_cm =
tschatzl@50489 1815 new ReferenceProcessor(&_is_subject_to_discovery_cm,
tschatzl@50489 1816 mt_processing, // mt processing
tschatzl@50489 1817 ParallelGCThreads, // degree of mt processing
tschatzl@50489 1818 (ParallelGCThreads > 1) || (ConcGCThreads > 1), // mt discovery
tschatzl@50489 1819 MAX2(ParallelGCThreads, ConcGCThreads), // degree of mt discovery
tschatzl@50489 1820 false, // Reference discovery is not atomic
tschatzl@51240 1821 &_is_alive_closure_cm, // is alive closure
tschatzl@51240 1822 true); // allow changes to number of processing threads
johnc@10670 1823
johnc@10670 1824 // STW ref processor
johnc@10670 1825 _ref_processor_stw =
tschatzl@50489 1826 new ReferenceProcessor(&_is_subject_to_discovery_stw,
tschatzl@50489 1827 mt_processing, // mt processing
tschatzl@50489 1828 ParallelGCThreads, // degree of mt processing
tschatzl@50489 1829 (ParallelGCThreads > 1), // mt discovery
tschatzl@50489 1830 ParallelGCThreads, // degree of mt discovery
tschatzl@50489 1831 true, // Reference discovery is atomic
tschatzl@51240 1832 &_is_alive_closure_stw, // is alive closure
tschatzl@51240 1833 true); // allow changes to number of processing threads
ysr@1374 1834 }
ysr@1374 1835
kbarrett@32735 1836 CollectorPolicy* G1CollectedHeap::collector_policy() const {
mgerdin@37985 1837 return _collector_policy;
kbarrett@32735 1838 }
kbarrett@32735 1839
stefank@49478 1840 SoftRefPolicy* G1CollectedHeap::soft_ref_policy() {
stefank@49478 1841 return &_soft_ref_policy;
stefank@49478 1842 }
stefank@49478 1843
ysr@1374 1844 size_t G1CollectedHeap::capacity() const {
tschatzl@26316 1845 return _hrm.length() * HeapRegion::GrainBytes;
ysr@1374 1846 }
ysr@1374 1847
sjohanss@48073 1848 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
sjohanss@48073 1849 return _hrm.total_free_bytes();
sjohanss@48073 1850 }
sjohanss@48073 1851
tschatzl@33204 1852 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
kbarrett@38172 1853 _hot_card_cache->drain(cl, worker_i);
tschatzl@33204 1854 }
tschatzl@33204 1855
tschatzl@33204 1856 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i) {
pliden@50246 1857 DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
brutisso@29680 1858 size_t n_completed_buffers = 0;
tschatzl@46653 1859 while (dcqs.apply_closure_during_gc(cl, worker_i)) {
ysr@1374 1860 n_completed_buffers++;
ysr@1374 1861 }
tschatzl@50436 1862 g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers, G1GCPhaseTimes::UpdateRSProcessedBuffers);
ysr@1374 1863 dcqs.clear_n_completed_buffers();
ysr@1374 1864 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
ysr@1374 1865 }
ysr@1374 1866
ysr@1374 1867 // Computes the sum of the storage used by the various regions.
ysr@1374 1868 size_t G1CollectedHeap::used() const {
ehelin@31975 1869 size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
jiangli@31346 1870 if (_archive_allocator != NULL) {
jiangli@31346 1871 result += _archive_allocator->used();
jiangli@31346 1872 }
jiangli@31346 1873 return result;
ysr@1374 1874 }
ysr@1374 1875
tonyp@3263 1876 size_t G1CollectedHeap::used_unlocked() const {
ehelin@31975 1877 return _summary_bytes_used;
tonyp@3263 1878 }
tonyp@3263 1879
ysr@1374 1880 class SumUsedClosure: public HeapRegionClosure {
ysr@1374 1881 size_t _used;
ysr@1374 1882 public:
ysr@1374 1883 SumUsedClosure() : _used(0) {}
tschatzl@49381 1884 bool do_heap_region(HeapRegion* r) {
david@33786 1885 _used += r->used();
ysr@1374 1886 return false;
ysr@1374 1887 }
ysr@1374 1888 size_t result() { return _used; }
ysr@1374 1889 };
ysr@1374 1890
ysr@1374 1891 size_t G1CollectedHeap::recalculate_used() const {
tschatzl@23455 1892 double recalculate_used_start = os::elapsedTime();
tschatzl@23455 1893
ysr@1374 1894 SumUsedClosure blk;
tonyp@9989 1895 heap_region_iterate(&blk);
tschatzl@23455 1896
tschatzl@23455 1897 g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);
ysr@1374 1898 return blk.result();
ysr@1374 1899 }
ysr@1374 1900
sjohanss@34619 1901 bool G1CollectedHeap::is_user_requested_concurrent_full_gc(GCCause::Cause cause) {
sjohanss@34619 1902 switch (cause) {
sjohanss@34619 1903 case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
sjohanss@34619 1904 case GCCause::_dcmd_gc_run: return ExplicitGCInvokesConcurrent;
sjohanss@34619 1905 case GCCause::_wb_conc_mark: return true;
sjohanss@34619 1906 default : return false;
sjohanss@34619 1907 }
sjohanss@34619 1908 }
sjohanss@34619 1909
tonyp@6058 1910 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
tonyp@11754 1911 switch (cause) {
tonyp@11754 1912 case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
tonyp@11754 1913 case GCCause::_g1_humongous_allocation: return true;
sjohanss@34619 1914 default: return is_user_requested_concurrent_full_gc(cause);
tonyp@11754 1915 }
tonyp@6058 1916 }
tonyp@6058 1917
tonyp@9334 1918 #ifndef PRODUCT
tonyp@9334 1919 void G1CollectedHeap::allocate_dummy_regions() {
tonyp@9334 1920 // Let's fill up most of the region
tonyp@9334 1921 size_t word_size = HeapRegion::GrainWords - 1024;
tonyp@9334 1922 // And as a result the region we'll allocate will be humongous.
tonyp@26846 1923 guarantee(is_humongous(word_size), "sanity");
tonyp@9334 1924
sangheki@34136 1925 // _filler_array_max_size is set to humongous object threshold
sangheki@34136 1926 // but temporarily change it to use CollectedHeap::fill_with_object().
sangheki@34136 1927 SizeTFlagSetting fs(_filler_array_max_size, word_size);
sangheki@34136 1928
tonyp@9334 1929 for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
tonyp@9334 1930 // Let's use the existing mechanism for the allocation
sjohanss@49788 1931 HeapWord* dummy_obj = humongous_obj_allocate(word_size);
tonyp@9334 1932 if (dummy_obj != NULL) {
tonyp@9334 1933 MemRegion mr(dummy_obj, word_size);
tonyp@9334 1934 CollectedHeap::fill_with_object(mr);
tonyp@9334 1935 } else {
tonyp@9334 1936 // If we can't allocate once, we probably cannot allocate
tonyp@9334 1937 // again. Let's get out of the loop.
tonyp@9334 1938 break;
tonyp@9334 1939 }
tonyp@9334 1940 }
tonyp@9334 1941 }
tonyp@9334 1942 #endif // !PRODUCT
tonyp@9334 1943
brutisso@12934 1944 void G1CollectedHeap::increment_old_marking_cycles_started() {
brutisso@12934 1945 assert(_old_marking_cycles_started == _old_marking_cycles_completed ||
david@33105 1946 _old_marking_cycles_started == _old_marking_cycles_completed + 1,
david@33105 1947 "Wrong marking cycle count (started: %d, completed: %d)",
david@33105 1948 _old_marking_cycles_started, _old_marking_cycles_completed);
brutisso@12934 1949
brutisso@12934 1950 _old_marking_cycles_started++;
brutisso@12934 1951 }
brutisso@12934 1952
brutisso@12934 1953 void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
tonyp@6058 1954 MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
tonyp@6058 1955
tonyp@7455 1956 // We assume that if concurrent == true, then the caller is a
tonyp@7455 1957 // concurrent thread that was joined the Suspendible Thread
tonyp@7455 1958 // Set. If there's ever a cheap way to check this, we should add an
tonyp@7455 1959 // assert here.
tonyp@7455 1960
tonyp@6058 1961 // Given that this method is called at the end of a Full GC or of a
tonyp@6058 1962 // concurrent cycle, and those can be nested (i.e., a Full GC can
tonyp@6058 1963 // interrupt a concurrent cycle), the number of full collections
tonyp@6058 1964 // completed should be either one (in the case where there was no
tonyp@6058 1965 // nesting) or two (when a Full GC interrupted a concurrent cycle)
tonyp@6058 1966 // behind the number of full collections started.
tonyp@6058 1967
tonyp@6058 1968 // This is the case for the inner caller, i.e. a Full GC.
tonyp@7455 1969 assert(concurrent ||
brutisso@12934 1970 (_old_marking_cycles_started == _old_marking_cycles_completed + 1) ||
brutisso@12934 1971 (_old_marking_cycles_started == _old_marking_cycles_completed + 2),
david@33105 1972 "for inner caller (Full GC): _old_marking_cycles_started = %u "
david@33105 1973 "is inconsistent with _old_marking_cycles_completed = %u",
david@33105 1974 _old_marking_cycles_started, _old_marking_cycles_completed);
tonyp@6058 1975
tonyp@6058 1976 // This is the case for the outer caller, i.e. the concurrent cycle.
tonyp@7455 1977 assert(!concurrent ||
brutisso@12934 1978 (_old_marking_cycles_started == _old_marking_cycles_completed + 1),
david@33105 1979 "for outer caller (concurrent cycle): "
david@33105 1980 "_old_marking_cycles_started = %u "
david@33105 1981 "is inconsistent with _old_marking_cycles_completed = %u",
david@33105 1982 _old_marking_cycles_started, _old_marking_cycles_completed);
brutisso@12934 1983
brutisso@12934 1984 _old_marking_cycles_completed += 1;
tonyp@6058 1985
johnc@6766 1986 // We need to clear the "in_progress" flag in the CM thread before
johnc@6766 1987 // we wake up any waiters (especially when ExplicitInvokesConcurrent
johnc@6766 1988 // is set) so that if a waiter requests another System.gc() it doesn't
sla@18025 1989 // incorrectly see that a marking cycle is still in progress.
tonyp@7455 1990 if (concurrent) {
lkorinth@50214 1991 _cm_thread->set_idle();
johnc@6766 1992 }
johnc@6766 1993
tonyp@6058 1994 // This notify_all() will ensure that a thread that called
tonyp@6058 1995 // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
tonyp@6058 1996 // and it's waiting for a full GC to finish will be woken up. It is
tschatzl@48951 1997 // waiting in VM_G1CollectForAllocation::doit_epilogue().
tonyp@6058 1998 FullGCCount_lock->notify_all();
tonyp@6058 1999 }
tonyp@6058 2000
ysr@4458 2001 void G1CollectedHeap::collect(GCCause::Cause cause) {
tonyp@11754 2002 assert_heap_not_locked();
ysr@4458 2003
mlarsson@29078 2004 uint gc_count_before;
mlarsson@29078 2005 uint old_marking_count_before;
mlarsson@29078 2006 uint full_gc_count_before;
tonyp@11754 2007 bool retry_gc;
tonyp@11754 2008
tonyp@11754 2009 do {
tonyp@11754 2010 retry_gc = false;
tonyp@11754 2011
tonyp@11754 2012 {
tonyp@11754 2013 MutexLocker ml(Heap_lock);
tonyp@11754 2014
tonyp@11754 2015 // Read the GC count while holding the Heap_lock
tonyp@11754 2016 gc_count_before = total_collections();
sjohanss@27250 2017 full_gc_count_before = total_full_collections();
brutisso@12934 2018 old_marking_count_before = _old_marking_cycles_started;
tonyp@11754 2019 }
tonyp@11754 2020
tonyp@11754 2021 if (should_do_concurrent_full_gc(cause)) {
tonyp@11754 2022 // Schedule an initial-mark evacuation pause that will start a
tonyp@11754 2023 // concurrent cycle. We're setting word_size to 0 which means that
tonyp@11754 2024 // we are not requesting a post-GC allocation.
tschatzl@48951 2025 VM_G1CollectForAllocation op(0, /* word_size */
tschatzl@48951 2026 gc_count_before,
tschatzl@48951 2027 cause,
tschatzl@48951 2028 true, /* should_initiate_conc_mark */
sjohanss@49788 2029 g1_policy()->max_pause_time_ms());
ysr@4458 2030 VMThread::execute(&op);
tonyp@11754 2031 if (!op.pause_succeeded()) {
brutisso@12934 2032 if (old_marking_count_before == _old_marking_cycles_started) {
johnc@12227 2033 retry_gc = op.should_retry_gc();
tonyp@11754 2034 } else {
tonyp@11754 2035 // A Full GC happened while we were trying to schedule the
tonyp@11754 2036 // initial-mark GC. No point in starting a new cycle given
tonyp@11754 2037 // that the whole heap was collected anyway.
tonyp@11754 2038 }
johnc@12227 2039
johnc@12227 2040 if (retry_gc) {
david@35492 2041 if (GCLocker::is_active_and_needs_gc()) {
david@35492 2042 GCLocker::stall_until_clear();
johnc@12227 2043 }
johnc@12227 2044 }
tonyp@11754 2045 }
tonyp@6058 2046 } else {
tschatzl@26183 2047 if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
tonyp@11754 2048 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
tonyp@11754 2049
tonyp@11754 2050 // Schedule a standard evacuation pause. We're setting word_size
tonyp@11754 2051 // to 0 which means that we are not requesting a post-GC allocation.
tschatzl@48951 2052 VM_G1CollectForAllocation op(0, /* word_size */
tschatzl@48951 2053 gc_count_before,
tschatzl@48951 2054 cause,
tschatzl@48951 2055 false, /* should_initiate_conc_mark */
sjohanss@49788 2056 g1_policy()->max_pause_time_ms());
tonyp@11754 2057 VMThread::execute(&op);
tonyp@11754 2058 } else {
tonyp@11754 2059 // Schedule a Full GC.
sjohanss@27250 2060 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
tonyp@11754 2061 VMThread::execute(&op);
tonyp@11754 2062 }
ysr@4458 2063 }
tonyp@11754 2064 } while (retry_gc);
ysr@1374 2065 }
ysr@1374 2066
ysr@1374 2067 bool G1CollectedHeap::is_in(const void* p) const {
tschatzl@26316 2068 if (_hrm.reserved().contains(p)) {
tschatzl@26160 2069 // Given that we know that p is in the reserved space,
david@33786 2070 // heap_region_containing() should successfully
stefank@11247 2071 // return the containing region.
david@33786 2072 HeapRegion* hr = heap_region_containing(p);
ysr@1374 2073 return hr->is_in(p);
ysr@1374 2074 } else {
coleenp@13728 2075 return false;
ysr@1374 2076 }
ysr@1374 2077 }
ysr@1374 2078
tschatzl@26160 2079 #ifdef ASSERT
tschatzl@26160 2080 bool G1CollectedHeap::is_in_exact(const void* p) const {
tschatzl@26160 2081 bool contains = reserved_region().contains(p);
tschatzl@26316 2082 bool available = _hrm.is_available(addr_to_region((HeapWord*)p));
tschatzl@26160 2083 if (contains && available) {
tschatzl@26160 2084 return true;
tschatzl@26160 2085 } else {
tschatzl@26160 2086 return false;
tschatzl@26160 2087 }
tschatzl@26160 2088 }
tschatzl@26160 2089 #endif
tschatzl@26160 2090
ysr@1374 2091 // Iteration functions.
ysr@1374 2092
ysr@1374 2093 // Iterates an ObjectClosure over all objects within a HeapRegion.
ysr@1374 2094
ysr@1374 2095 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
ysr@1374 2096 ObjectClosure* _cl;
ysr@1374 2097 public:
ysr@1374 2098 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
tschatzl@49381 2099 bool do_heap_region(HeapRegion* r) {
tonyp@26846 2100 if (!r->is_continues_humongous()) {
ysr@1374 2101 r->object_iterate(_cl);
ysr@1374 2102 }
ysr@1374 2103 return false;
ysr@1374 2104 }
ysr@1374 2105 };
ysr@1374 2106
coleenp@13728 2107 void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
ysr@1374 2108 IterateObjectClosureRegionClosure blk(cl);
tonyp@9989 2109 heap_region_iterate(&blk);
ysr@1374 2110 }
ysr@1374 2111
tonyp@9989 2112 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
tschatzl@26316 2113 _hrm.iterate(cl);
ysr@1374 2114 }
ysr@1374 2115
sjohanss@48073 2116 void G1CollectedHeap::heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,
sjohanss@48073 2117 HeapRegionClaimer *hrclaimer,
sjohanss@48073 2118 uint worker_id) const {
sjohanss@48073 2119 _hrm.par_iterate(cl, hrclaimer, hrclaimer->offset_for_worker(worker_id));
sjohanss@48073 2120 }
sjohanss@48073 2121
sjohanss@48073 2122 void G1CollectedHeap::heap_region_par_iterate_from_start(HeapRegionClosure* cl,
sjohanss@48073 2123 HeapRegionClaimer *hrclaimer) const {
sjohanss@48073 2124 _hrm.par_iterate(cl, hrclaimer, 0);
mlarsson@27009 2125 }
ysr@1374 2126
tschatzl@39698 2127 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
tschatzl@39698 2128 _collection_set.iterate(cl);
johnc@11248 2129 }
johnc@11248 2130
tschatzl@39698 2131 void G1CollectedHeap::collection_set_iterate_from(HeapRegionClosure *cl, uint worker_id) {
tschatzl@39698 2132 _collection_set.iterate_from(cl, worker_id, workers()->active_workers());
ysr@1374 2133 }
ysr@1374 2134
ysr@1374 2135 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
brutisso@30152 2136 HeapRegion* hr = heap_region_containing(addr);
brutisso@30152 2137 return hr->block_start(addr);
ysr@1374 2138 }
ysr@1374 2139
ysr@1374 2140 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
brutisso@30152 2141 HeapRegion* hr = heap_region_containing(addr);
brutisso@30152 2142 return hr->block_size(addr);
ysr@1374 2143 }
ysr@1374 2144
ysr@1374 2145 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
brutisso@30152 2146 HeapRegion* hr = heap_region_containing(addr);
brutisso@30152 2147 return hr->block_is_obj(addr);
ysr@1374 2148 }
ysr@1374 2149
ysr@1374 2150 bool G1CollectedHeap::supports_tlab_allocation() const {
ysr@1374 2151 return true;
ysr@1374 2152 }
ysr@1374 2153
ysr@1374 2154 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
mgerdin@38183 2155 return (_g1_policy->young_list_target_length() - _survivor.length()) * HeapRegion::GrainBytes;
brutisso@22552 2156 }
brutisso@22552 2157
brutisso@22552 2158 size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
mgerdin@38183 2159 return _eden.length() * HeapRegion::GrainBytes;
brutisso@22552 2160 }
brutisso@22552 2161
brutisso@22552 2162 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
tschatzl@32616 2163 // must be equal to the humongous object limit.
brutisso@22552 2164 size_t G1CollectedHeap::max_tlab_size() const {
stefank@46619 2165 return align_down(_humongous_object_threshold_in_words, MinObjAlignment);
ysr@1374 2166 }
ysr@1374 2167
ysr@1374 2168 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
sjohanss@49788 2169 return _allocator->unsafe_max_tlab_alloc();
ysr@1374 2170 }
ysr@1374 2171
ysr@1374 2172 size_t G1CollectedHeap::max_capacity() const {
tschatzl@26316 2173 return _hrm.reserved().byte_size();
ysr@1374 2174 }
ysr@1374 2175
ysr@1374 2176 jlong G1CollectedHeap::millis_since_last_gc() {
jprovino@41284 2177 // See the notes in GenCollectedHeap::millis_since_last_gc()
jprovino@41284 2178 // for more information about the implementation.
jprovino@41284 2179 jlong ret_val = (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) -
jprovino@41284 2180 _g1_policy->collection_pause_end_millis();
jprovino@41284 2181 if (ret_val < 0) {
jprovino@41284 2182 log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
jprovino@41284 2183 ". returning zero instead.", ret_val);
jprovino@41284 2184 return 0;
jprovino@41284 2185 }
jprovino@41284 2186 return ret_val;
ysr@1374 2187 }
ysr@1374 2188
stefank@50593 2189 void G1CollectedHeap::deduplicate_string(oop str) {
stefank@50593 2190 assert(java_lang_String::is_instance(str), "invariant");
stefank@50593 2191
stefank@50593 2192 if (G1StringDedup::is_enabled()) {
stefank@50593 2193 G1StringDedup::deduplicate(str);
stefank@50593 2194 }
stefank@50593 2195 }
stefank@50593 2196
ysr@1374 2197 void G1CollectedHeap::prepare_for_verify() {
david@35851 2198 _verifier->prepare_for_verify();
ysr@1374 2199 }
ysr@1374 2200
brutisso@35061 2201 void G1CollectedHeap::verify(VerifyOption vo) {
david@35851 2202 _verifier->verify(vo);
johnc@19339 2203 }
johnc@19339 2204
kbarrett@46384 2205 bool G1CollectedHeap::supports_concurrent_phase_control() const {
kbarrett@46384 2206 return true;
kbarrett@46384 2207 }
kbarrett@46384 2208
kbarrett@46384 2209 const char* const* G1CollectedHeap::concurrent_phases() const {
lkorinth@50214 2210 return _cm_thread->concurrent_phases();
kbarrett@46384 2211 }
kbarrett@46384 2212
kbarrett@46384 2213 bool G1CollectedHeap::request_concurrent_phase(const char* phase) {
lkorinth@50214 2214 return _cm_thread->request_concurrent_phase(phase);
kbarrett@46384 2215 }
kbarrett@46384 2216
ysr@1374 2217 class PrintRegionClosure: public HeapRegionClosure {
ysr@1374 2218 outputStream* _st;
ysr@1374 2219 public:
ysr@1374 2220 PrintRegionClosure(outputStream* st) : _st(st) {}
tschatzl@49381 2221 bool do_heap_region(HeapRegion* r) {
ysr@1374 2222 r->print_on(_st);
ysr@1374 2223 return false;
ysr@1374 2224 }
ysr@1374 2225 };
ysr@1374 2226
goetz@23543 2227 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
goetz@23543 2228 const HeapRegion* hr,
goetz@23543 2229 const VerifyOption vo) const {
goetz@23543 2230 switch (vo) {
goetz@23543 2231 case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
goetz@23543 2232 case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
sjohanss@48073 2233 case VerifyOption_G1UseFullMarking: return is_obj_dead_full(obj, hr);
goetz@23543 2234 default: ShouldNotReachHere();
goetz@23543 2235 }
goetz@23543 2236 return false; // keep some compilers happy
goetz@23543 2237 }
goetz@23543 2238
goetz@23543 2239 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
goetz@23543 2240 const VerifyOption vo) const {
goetz@23543 2241 switch (vo) {
goetz@23543 2242 case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
goetz@23543 2243 case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
sjohanss@48073 2244 case VerifyOption_G1UseFullMarking: return is_obj_dead_full(obj);
goetz@23543 2245 default: ShouldNotReachHere();
goetz@23543 2246 }
goetz@23543 2247 return false; // keep some compilers happy
goetz@23543 2248 }
goetz@23543 2249
sjohanss@37085 2250 void G1CollectedHeap::print_heap_regions() const {
stuefe@46701 2251 LogTarget(Trace, gc, heap, region) lt;
stuefe@46701 2252 if (lt.is_enabled()) {
stuefe@46701 2253 LogStream ls(lt);
stuefe@46701 2254 print_regions_on(&ls);
sjohanss@37085 2255 }
sjohanss@37085 2256 }
sjohanss@37085 2257
ysr@1374 2258 void G1CollectedHeap::print_on(outputStream* st) const {
tonyp@3191 2259 st->print(" %-20s", "garbage-first heap");
tonyp@3191 2260 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
tonyp@3263 2261 capacity()/K, used_unlocked()/K);
sjohanss@47941 2262 st->print(" [" PTR_FORMAT ", " PTR_FORMAT ")",
stefank@30272 2263 p2i(_hrm.reserved().start()),
stefank@30272 2264 p2i(_hrm.reserved().end()));
tonyp@3191 2265 st->cr();
johnc@10677 2266 st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
mgerdin@38183 2267 uint young_regions = young_regions_count();
tonyp@12381 2268 st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
tonyp@12381 2269 (size_t) young_regions * HeapRegion::GrainBytes / K);
mgerdin@38183 2270 uint survivor_regions = survivor_regions_count();
tonyp@12381 2271 st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
tonyp@12381 2272 (size_t) survivor_regions * HeapRegion::GrainBytes / K);
tonyp@3191 2273 st->cr();
stuefe@49854 2274 MetaspaceUtils::print_on(st);
tonyp@10997 2275 }
tonyp@10997 2276
sjohanss@37085 2277 void G1CollectedHeap::print_regions_on(outputStream* st) const {
david@34671 2278 st->print_cr("Heap Regions: E=young(eden), S=young(survivor), O=old, "
tonyp@12381 2279 "HS=humongous(starts), HC=humongous(continues), "
tschatzl@50130 2280 "CS=collection set, F=free, A=archive, "
david@34671 2281 "TAMS=top-at-mark-start (previous, next)");
ysr@1374 2282 PrintRegionClosure blk(st);
tonyp@9989 2283 heap_region_iterate(&blk);
ysr@1374 2284 }
ysr@1374 2285
sjohanss@37085 2286 void G1CollectedHeap::print_extended_on(outputStream* st) const {
sjohanss@37085 2287 print_on(st);
sjohanss@37085 2288
sjohanss@37085 2289 // Print the per-region information.
sjohanss@37085 2290 print_regions_on(st);
sjohanss@37085 2291 }
sjohanss@37085 2292
stefank@16685 2293 void G1CollectedHeap::print_on_error(outputStream* st) const {
stefank@16685 2294 this->CollectedHeap::print_on_error(st);
stefank@16685 2295
stefank@16685 2296 if (_cm != NULL) {
stefank@16685 2297 st->cr();
stefank@16685 2298 _cm->print_on_error(st);
stefank@16685 2299 }
stefank@16685 2300 }
stefank@16685 2301
ysr@1374 2302 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
mlarsson@27251 2303 workers()->print_worker_threads_on(st);
lkorinth@50214 2304 _cm_thread->print_on(st);
ysr@1374 2305 st->cr();
tonyp@4022 2306 _cm->print_worker_threads_on(st);
tschatzl@47971 2307 _cr->print_threads_on(st);
tschatzl@47971 2308 _young_gen_sampling_thread->print_on(st);
pliden@23472 2309 if (G1StringDedup::is_enabled()) {
pliden@23472 2310 G1StringDedup::print_worker_threads_on(st);
pliden@23472 2311 }
ysr@1374 2312 }
ysr@1374 2313
ysr@1374 2314 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
mlarsson@27251 2315 workers()->threads_do(tc);
lkorinth@50214 2316 tc->do_thread(_cm_thread);
drwhite@37474 2317 _cm->threads_do(tc);
tschatzl@47971 2318 _cr->threads_do(tc);
tschatzl@47971 2319 tc->do_thread(_young_gen_sampling_thread);
pliden@23472 2320 if (G1StringDedup::is_enabled()) {
pliden@23472 2321 G1StringDedup::threads_do(tc);
pliden@23472 2322 }
ysr@1374 2323 }
ysr@1374 2324
ysr@1374 2325 void G1CollectedHeap::print_tracing_info() const {
brutisso@35061 2326 g1_rem_set()->print_summary_info();
brutisso@35061 2327 concurrent_mark()->print_summary_info();
ysr@1374 2328 }
ysr@1374 2329
tonyp@10000 2330 #ifndef PRODUCT
tonyp@10000 2331 // Helpful for debugging RSet issues.
tonyp@10000 2332
tonyp@10000 2333 class PrintRSetsClosure : public HeapRegionClosure {
tonyp@10000 2334 private:
tonyp@10000 2335 const char* _msg;
tonyp@10000 2336 size_t _occupied_sum;
tonyp@10000 2337
tonyp@10000 2338 public:
tschatzl@49381 2339 bool do_heap_region(HeapRegion* r) {
tonyp@10000 2340 HeapRegionRemSet* hrrs = r->rem_set();
tonyp@10000 2341 size_t occupied = hrrs->occupied();
tonyp@10000 2342 _occupied_sum += occupied;
tonyp@10000 2343
brutisso@35061 2344 tty->print_cr("Printing RSet for region " HR_FORMAT, HR_FORMAT_PARAMS(r));
tonyp@10000 2345 if (occupied == 0) {
brutisso@35061 2346 tty->print_cr(" RSet is empty");
tonyp@10000 2347 } else {
tonyp@10000 2348 hrrs->print();
tonyp@10000 2349 }
brutisso@35061 2350 tty->print_cr("----------");
tonyp@10000 2351 return false;
tonyp@10000 2352 }
tonyp@10000 2353
tonyp@10000 2354 PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
brutisso@35061 2355 tty->cr();
brutisso@35061 2356 tty->print_cr("========================================");
brutisso@35061 2357 tty->print_cr("%s", msg);
brutisso@35061 2358 tty->cr();
tonyp@10000 2359 }
tonyp@10000 2360
tonyp@10000 2361 ~PrintRSetsClosure() {
brutisso@35061 2362 tty->print_cr("Occupied Sum: " SIZE_FORMAT, _occupied_sum);
brutisso@35061 2363 tty->print_cr("========================================");
brutisso@35061 2364 tty->cr();
tonyp@10000 2365 }
tonyp@10000 2366 };
tonyp@10000 2367
tonyp@10000 2368 void G1CollectedHeap::print_cset_rsets() {
tonyp@10000 2369 PrintRSetsClosure cl("Printing CSet RSets");
tonyp@10000 2370 collection_set_iterate(&cl);
tonyp@10000 2371 }
tonyp@10000 2372
tonyp@10000 2373 void G1CollectedHeap::print_all_rsets() {
tonyp@10000 2374 PrintRSetsClosure cl("Printing All RSets");;
tonyp@10000 2375 heap_region_iterate(&cl);
tonyp@10000 2376 }
tonyp@10000 2377 #endif // PRODUCT
tonyp@10000 2378
david@31344 2379 G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
mgerdin@38183 2380
mgerdin@38183 2381 size_t eden_used_bytes = heap()->eden_regions_count() * HeapRegion::GrainBytes;
mgerdin@38183 2382 size_t survivor_used_bytes = heap()->survivor_regions_count() * HeapRegion::GrainBytes;
sangheki@37137 2383 size_t heap_used = Heap_lock->owned_by_self() ? used() : used_unlocked();
david@31344 2384
david@31344 2385 size_t eden_capacity_bytes =
david@31344 2386 (g1_policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;
david@31344 2387
david@31344 2388 VirtualSpaceSummary heap_summary = create_heap_space_summary();
sangheki@37137 2389 return G1HeapSummary(heap_summary, heap_used, eden_used_bytes,
sangheki@37137 2390 eden_capacity_bytes, survivor_used_bytes, num_regions());
david@31344 2391 }
david@31344 2392
tschatzl@32380 2393 G1EvacSummary G1CollectedHeap::create_g1_evac_summary(G1EvacStats* stats) {
tschatzl@32380 2394 return G1EvacSummary(stats->allocated(), stats->wasted(), stats->undo_wasted(),
tschatzl@32380 2395 stats->unused(), stats->used(), stats->region_end_waste(),
tschatzl@32380 2396 stats->regions_filled(), stats->direct_allocated(),
tschatzl@32380 2397 stats->failure_used(), stats->failure_waste());
tschatzl@32380 2398 }
tschatzl@32380 2399
david@31344 2400 void G1CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
david@31344 2401 const G1HeapSummary& heap_summary = create_g1_heap_summary();
david@31344 2402 gc_tracer->report_gc_heap_summary(when, heap_summary);
david@31344 2403
david@31344 2404 const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
david@31344 2405 gc_tracer->report_metaspace_summary(when, metaspace_summary);
david@31344 2406 }
david@31344 2407
ysr@1374 2408 G1CollectedHeap* G1CollectedHeap::heap() {
pliden@30258 2409 CollectedHeap* heap = Universe::heap();
pliden@30258 2410 assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
pliden@50123 2411 assert(heap->kind() == CollectedHeap::G1, "Invalid name");
pliden@30258 2412 return (G1CollectedHeap*)heap;
ysr@1374 2413 }
ysr@1374 2414
sjohanss@46828 2415 void G1CollectedHeap::gc_prologue(bool full) {
ysr@4886 2416 // always_do_update_barrier = false;
ysr@1374 2417 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
tschatzl@46330 2418
sjohanss@46828 2419 // This summary needs to be printed before incrementing total collections.
sjohanss@46828 2420 g1_rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections());
sjohanss@46828 2421
sjohanss@46828 2422 // Update common counters.
sjohanss@46828 2423 increment_total_collections(full /* full gc */);
sjohanss@46828 2424 if (full) {
sjohanss@46828 2425 increment_old_marking_cycles_started();
sjohanss@46828 2426 }
sjohanss@46828 2427
sjohanss@46828 2428 // Fill TLAB's and such
tschatzl@46330 2429 double start = os::elapsedTime();
brutisso@22552 2430 accumulate_statistics_all_tlabs();
ysr@1374 2431 ensure_parsability(true);
tschatzl@46330 2432 g1_policy()->phase_times()->record_prepare_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
ysr@1374 2433 }
ysr@1374 2434
jcoomes@26841 2435 void G1CollectedHeap::gc_epilogue(bool full) {
sjohanss@46828 2436 // Update common counters.
sjohanss@46828 2437 if (full) {
sjohanss@46828 2438 // Update the number of full collections that have been completed.
sjohanss@46828 2439 increment_old_marking_cycles_completed(false /* concurrent */);
sjohanss@46828 2440 }
sjohanss@46828 2441
sjohanss@46828 2442 // We are at the end of the GC. Total collections has already been increased.
brutisso@35061 2443 g1_rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);
tschatzl@17854 2444
ysr@1374 2445 // FIXME: what is this about?
ysr@1374 2446 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
ysr@1374 2447 // is set.
jcm@47980 2448 #if COMPILER2_OR_JVMCI
twisti@33160 2449 assert(DerivedPointerTable::is_empty(), "derived pointer present");
twisti@33160 2450 #endif
ysr@4886 2451 // always_do_update_barrier = true;
johnc@10670 2452
tschatzl@46330 2453 double start = os::elapsedTime();
brutisso@22552 2454 resize_all_tlabs();
tschatzl@46330 2455 g1_policy()->phase_times()->record_resize_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
tschatzl@46330 2456
sjohanss@46828 2457 MemoryService::track_memory_usage();
johnc@10670 2458 // We have just completed a GC. Update the soft reference
johnc@10670 2459 // policy with the new heap occupancy
johnc@10670 2460 Universe::update_heap_info_at_gc();
ysr@1374 2461 }
ysr@1374 2462
tonyp@7398 2463 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
mlarsson@29078 2464 uint gc_count_before,
brutisso@19549 2465 bool* succeeded,
brutisso@19549 2466 GCCause::Cause gc_cause) {
tonyp@7398 2467 assert_heap_not_locked_and_not_at_safepoint();
tschatzl@48951 2468 VM_G1CollectForAllocation op(word_size,
tschatzl@48951 2469 gc_count_before,
tschatzl@48951 2470 gc_cause,
tschatzl@48951 2471 false, /* should_initiate_conc_mark */
sjohanss@49788 2472 g1_policy()->max_pause_time_ms());
tonyp@7398 2473 VMThread::execute(&op);
tonyp@7398 2474
tonyp@7398 2475 HeapWord* result = op.result();
tonyp@7398 2476 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
tonyp@7398 2477 assert(result == NULL || ret_succeeded,
tonyp@7398 2478 "the result should be NULL if the VM did not succeed");
tonyp@7398 2479 *succeeded = ret_succeeded;
tonyp@7398 2480
tonyp@7398 2481 assert_heap_not_locked();
tonyp@7398 2482 return result;
ysr@1374 2483 }
ysr@1374 2484
tschatzl@50139 2485 void G1CollectedHeap::do_concurrent_mark() {
ysr@3262 2486 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
lkorinth@50214 2487 if (!_cm_thread->in_progress()) {
lkorinth@50214 2488 _cm_thread->set_started();
ysr@3262 2489 CGC_lock->notify();
ysr@1374 2490 }
ysr@1374 2491 }
ysr@1374 2492
ysr@1374 2493 size_t G1CollectedHeap::pending_card_num() {
ysr@1374 2494 size_t extra_cards = 0;
dcubed@48321 2495 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *curr = jtiwh.next(); ) {
pliden@50247 2496 DirtyCardQueue& dcq = G1ThreadLocalData::dirty_card_queue(curr);
ysr@1374 2497 extra_cards += dcq.size();
ysr@1374 2498 }
pliden@50246 2499 DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
ysr@1374 2500 size_t buffer_size = dcqs.buffer_size();
ysr@1374 2501 size_t buffer_num = dcqs.completed_buffers_num();
johnc@13482 2502
kbarrett@46443 2503 return buffer_size * buffer_num + extra_cards;
ysr@1374 2504 }
ysr@1374 2505
tschatzl@50099 2506 bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const {
tschatzl@50099 2507 // We don't nominate objects with many remembered set entries, on
tschatzl@50099 2508 // the assumption that such objects are likely still live.
tschatzl@50099 2509 HeapRegionRemSet* rem_set = r->rem_set();
tschatzl@50099 2510
tschatzl@50099 2511 return G1EagerReclaimHumongousObjectsWithStaleRefs ?
tschatzl@50099 2512 rem_set->occupancy_less_or_equal_than(G1RSetSparseRegionEntries) :
tschatzl@50099 2513 G1EagerReclaimHumongousObjects && rem_set->is_empty();
tschatzl@50099 2514 }
tschatzl@50099 2515
tschatzl@25889 2516 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
tschatzl@25889 2517 private:
tschatzl@25889 2518 size_t _total_humongous;
tschatzl@25889 2519 size_t _candidate_humongous;
tschatzl@28379 2520
tschatzl@28379 2521 DirtyCardQueue _dcq;
tschatzl@28379 2522
tschatzl@50099 2523 bool humongous_region_is_candidate(G1CollectedHeap* g1h, HeapRegion* region) const {
tschatzl@28379 2524 assert(region->is_starts_humongous(), "Must start a humongous object");
kbarrett@30182 2525
mgerdin@37471 2526 oop obj = oop(region->bottom());
mgerdin@37471 2527
mgerdin@37471 2528 // Dead objects cannot be eager reclaim candidates. Due to class
mgerdin@37471 2529 // unloading it is unsafe to query their classes so we return early.
tschatzl@50099 2530 if (g1h->is_obj_dead(obj, region)) {
mgerdin@37471 2531 return false;
mgerdin@37471 2532 }
mgerdin@37471 2533
tschatzl@50102 2534 // If we do not have a complete remembered set for the region, then we can
tschatzl@50102 2535 // not be sure that we have all references to it.
tschatzl@50102 2536 if (!region->rem_set()->is_complete()) {
tschatzl@50102 2537 return false;
tschatzl@50102 2538 }
kbarrett@30182 2539 // Candidate selection must satisfy the following constraints
kbarrett@30182 2540 // while concurrent marking is in progress:
kbarrett@30182 2541 //
kbarrett@30182 2542 // * In order to maintain SATB invariants, an object must not be
kbarrett@30182 2543 // reclaimed if it was allocated before the start of marking and
kbarrett@30182 2544 // has not had its references scanned. Such an object must have
kbarrett@30182 2545 // its references (including type metadata) scanned to ensure no
kbarrett@30182 2546 // live objects are missed by the marking process. Objects
kbarrett@30182 2547 // allocated after the start of concurrent marking don't need to
kbarrett@30182 2548 // be scanned.
kbarrett@30182 2549 //
kbarrett@30182 2550 // * An object must not be reclaimed if it is on the concurrent
kbarrett@30182 2551 // mark stack. Objects allocated after the start of concurrent
kbarrett@30182 2552 // marking are never pushed on the mark stack.
kbarrett@30182 2553 //
kbarrett@30182 2554 // Nominating only objects allocated after the start of concurrent
kbarrett@30182 2555 // marking is sufficient to meet both constraints. This may miss
kbarrett@30182 2556 // some objects that satisfy the constraints, but the marking data
kbarrett@30182 2557 // structures don't support efficiently performing the needed
kbarrett@30182 2558 // additional tests or scrubbing of the mark stack.
kbarrett@30182 2559 //
kbarrett@30182 2560 // However, we presently only nominate is_typeArray() objects.
kbarrett@30182 2561 // A humongous object containing references induces remembered
kbarrett@30182 2562 // set entries on other regions. In order to reclaim such an
kbarrett@30182 2563 // object, those remembered sets would need to be cleaned up.
kbarrett@30182 2564 //
kbarrett@30182 2565 // We also treat is_typeArray() objects specially, allowing them
kbarrett@30182 2566 // to be reclaimed even if allocated before the start of
kbarrett@30182 2567 // concurrent mark. For this we rely on mark stack insertion to
kbarrett@30182 2568 // exclude is_typeArray() objects, preventing reclaiming an object
kbarrett@30182 2569 // that is in the mark stack. We also rely on the metadata for
kbarrett@30182 2570 // such objects to be built-in and so ensured to be kept live.
kbarrett@30182 2571 // Frequent allocation and drop of large binary blobs is an
kbarrett@30182 2572 // important use case for eager reclaim, and this special handling
kbarrett@30182 2573 // may reduce needed headroom.
kbarrett@30182 2574
tschatzl@50099 2575 return obj->is_typeArray() &&
tschatzl@50099 2576 g1h->is_potential_eager_reclaim_candidate(region);
tschatzl@28379 2577 }
tschatzl@28379 2578
tschatzl@25889 2579 public:
tschatzl@28379 2580 RegisterHumongousWithInCSetFastTestClosure()
tschatzl@28379 2581 : _total_humongous(0),
tschatzl@28379 2582 _candidate_humongous(0),
pliden@50246 2583 _dcq(&G1BarrierSet::dirty_card_queue_set()) {
tschatzl@25889 2584 }
tschatzl@25889 2585
tschatzl@49381 2586 virtual bool do_heap_region(HeapRegion* r) {
tonyp@26846 2587 if (!r->is_starts_humongous()) {
tschatzl@25889 2588 return false;
tschatzl@25889 2589 }
tschatzl@25889 2590 G1CollectedHeap* g1h = G1CollectedHeap::heap();
tschatzl@25889 2591
kbarrett@30182 2592 bool is_candidate = humongous_region_is_candidate(g1h, r);
kbarrett@30182 2593 uint rindex = r->hrm_index();
kbarrett@30182 2594 g1h->set_humongous_reclaim_candidate(rindex, is_candidate);
tschatzl@25889 2595 if (is_candidate) {
kbarrett@30182 2596 _candidate_humongous++;
kbarrett@30182 2597 g1h->register_humongous_region_with_cset(rindex);
kbarrett@30182 2598 // Is_candidate already filters out humongous object with large remembered sets.
kbarrett@30182 2599 // If we have a humongous object with a few remembered sets, we simply flush these
kbarrett@30182 2600 // remembered set entries into the DCQS. That will result in automatic
kbarrett@30182 2601 // re-evaluation of their remembered set entries during the following evacuation
kbarrett@30182 2602 // phase.
tschatzl@28379 2603 if (!r->rem_set()->is_empty()) {
tschatzl@28379 2604 guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
tschatzl@28379 2605 "Found a not-small remembered set here. This is inconsistent with previous assumptions.");
eosterlund@49595 2606 G1CardTable* ct = g1h->card_table();
tschatzl@28379 2607 HeapRegionRemSetIterator hrrs(r->rem_set());
tschatzl@28379 2608 size_t card_index;
tschatzl@28379 2609 while (hrrs.has_next(card_index)) {
eosterlund@49595 2610 jbyte* card_ptr = (jbyte*)ct->byte_for_index(card_index);
sjohanss@28835 2611 // The remembered set might contain references to already freed
sjohanss@28835 2612 // regions. Filter out such entries to avoid failing card table
sjohanss@28835 2613 // verification.
eosterlund@49595 2614 if (g1h->is_in_closed_subset(ct->addr_for(card_ptr))) {
eosterlund@49595 2615 if (*card_ptr != G1CardTable::dirty_card_val()) {
eosterlund@49595 2616 *card_ptr = G1CardTable::dirty_card_val();
sjohanss@28835 2617 _dcq.enqueue(card_ptr);
sjohanss@28835 2618 }
tschatzl@28379 2619 }
tschatzl@28379 2620 }
sjohanss@34135 2621 assert(hrrs.n_yielded() == r->rem_set()->occupied(),
sjohanss@34135 2622 "Remembered set hash maps out of sync, cur: " SIZE_FORMAT " entries, next: " SIZE_FORMAT " entries",
sjohanss@34135 2623 hrrs.n_yielded(), r->rem_set()->occupied());
tschatzl@50102 2624 // We should only clear the card based remembered set here as we will not
tschatzl@50102 2625 // implicitly rebuild anything else during eager reclaim. Note that at the moment
tschatzl@50102 2626 // (and probably never) we do not enter this path if there are other kind of
tschatzl@50102 2627 // remembered sets for this region.
tschatzl@50102 2628 r->rem_set()->clear_locked(true /* only_cardset */);
tschatzl@50102 2629 // Clear_locked() above sets the state to Empty. However we want to continue
tschatzl@50102 2630 // collecting remembered set entries for humongous regions that were not
tschatzl@50102 2631 // reclaimed.
tschatzl@50102 2632 r->rem_set()->set_state_complete();
tschatzl@28379 2633 }
tschatzl@28379 2634 assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");
tschatzl@25889 2635 }
tschatzl@25889 2636 _total_humongous++;
tschatzl@25889 2637
tschatzl@25889 2638 return false;
tschatzl@25889 2639 }
tschatzl@25889 2640
tschatzl@25889 2641 size_t total_humongous() const { return _total_humongous; }
tschatzl@25889 2642 size_t candidate_humongous() const { return _candidate_humongous; }
tschatzl@28379 2643
tschatzl@28379 2644 void flush_rem_set_entries() { _dcq.flush(); }
tschatzl@25889 2645 };
tschatzl@25889 2646
ehelin@29470 2647 void G1CollectedHeap::register_humongous_regions_with_cset() {
tschatzl@28379 2648 if (!G1EagerReclaimHumongousObjects) {
tschatzl@28379 2649 g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0.0, 0, 0);
tschatzl@25889 2650 return;
tschatzl@25889 2651 }
tschatzl@28379 2652 double time = os::elapsed_counter();
tschatzl@25889 2653
kbarrett@30182 2654 // Collect reclaim candidate information and register candidates with cset.
tschatzl@25889 2655 RegisterHumongousWithInCSetFastTestClosure cl;
tschatzl@25889 2656 heap_region_iterate(&cl);
tschatzl@28379 2657
tschatzl@28379 2658 time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0;
tschatzl@28379 2659 g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(time,
tschatzl@28379 2660 cl.total_humongous(),
tschatzl@25889 2661 cl.candidate_humongous());
tschatzl@25889 2662 _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
tschatzl@25889 2663
tschatzl@28379 2664 // Finally flush all remembered set entries to re-check into the global DCQS.
tschatzl@28379 2665 cl.flush_rem_set_entries();
tschatzl@25889 2666 }
tschatzl@25889 2667
poonam@35507 2668 class VerifyRegionRemSetClosure : public HeapRegionClosure {
poonam@35507 2669 public:
tschatzl@49381 2670 bool do_heap_region(HeapRegion* hr) {
poonam@35507 2671 if (!hr->is_archive() && !hr->is_continues_humongous()) {
poonam@35507 2672 hr->verify_rem_set();
poonam@35507 2673 }
poonam@35507 2674 return false;
poonam@35507 2675 }
poonam@35507 2676 };
poonam@35507 2677
stefank@30878 2678 uint G1CollectedHeap::num_task_queues() const {
stefank@30878 2679 return _task_queues->size();
stefank@30878 2680 }
stefank@30878 2681
jcoomes@6251 2682 #if TASKQUEUE_STATS
jcoomes@6251 2683 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
jcoomes@6251 2684 st->print_raw_cr("GC Task Stats");
jcoomes@6251 2685 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
jcoomes@6251 2686 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
jcoomes@6251 2687 }
jcoomes@6251 2688
brutisso@35061 2689 void G1CollectedHeap::print_taskqueue_stats() const {
tschatzl@37180 2690 if (!log_is_enabled(Trace, gc, task, stats)) {
brutisso@35061 2691 return;
brutisso@35061 2692 }
stefank@37242 2693 Log(gc, task, stats) log;
brutisso@35061 2694 ResourceMark rm;
stuefe@46701 2695 LogStream ls(log.trace());
stuefe@46701 2696 outputStream* st = &ls;
brutisso@35061 2697
jcoomes@6251 2698 print_taskqueue_stats_hdr(st);
jcoomes@6251 2699
jcoomes@6251 2700 TaskQueueStats totals;
stefank@30878 2701 const uint n = num_task_queues();
eistepan@30585 2702 for (uint i = 0; i < n; ++i) {
eistepan@30585 2703 st->print("%3u ", i); task_queue(i)->stats.print(st); st->cr();
jcoomes@6251 2704 totals += task_queue(i)->stats;
jcoomes@6251 2705 }
jcoomes@6251 2706 st->print_raw("tot "); totals.print(st); st->cr();
jcoomes@6251 2707
jcoomes@6251 2708 DEBUG_ONLY(totals.verify());
jcoomes@6251 2709 }
jcoomes@6251 2710
jcoomes@6251 2711 void G1CollectedHeap::reset_taskqueue_stats() {
stefank@30878 2712 const uint n = num_task_queues();
eistepan@30585 2713 for (uint i = 0; i < n; ++i) {
jcoomes@6251 2714 task_queue(i)->stats.reset();
jcoomes@6251 2715 }
jcoomes@6251 2716 }
jcoomes@6251 2717 #endif // TASKQUEUE_STATS
jcoomes@6251 2718
brutisso@31397 2719 void G1CollectedHeap::wait_for_root_region_scanning() {
brutisso@31397 2720 double scan_wait_start = os::elapsedTime();
brutisso@31397 2721 // We have to wait until the CM threads finish scanning the
brutisso@31397 2722 // root regions as it's the only way to ensure that all the
brutisso@31397 2723 // objects on them have been correctly scanned before we start
brutisso@31397 2724 // moving them during the GC.
brutisso@31397 2725 bool waited = _cm->root_regions()->wait_until_scan_finished();
brutisso@31397 2726 double wait_time_ms = 0.0;
brutisso@31397 2727 if (waited) {
brutisso@31397 2728 double scan_wait_end = os::elapsedTime();
brutisso@31397 2729 wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
brutisso@31397 2730 }
brutisso@31397 2731 g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
brutisso@31397 2732 }
brutisso@31397 2733
tschatzl@39698 2734 class G1PrintCollectionSetClosure : public HeapRegionClosure {
tschatzl@39698 2735 private:
tschatzl@39698 2736 G1HRPrinter* _hr_printer;
tschatzl@39698 2737 public:
tschatzl@39698 2738 G1PrintCollectionSetClosure(G1HRPrinter* hr_printer) : HeapRegionClosure(), _hr_printer(hr_printer) { }
tschatzl@39698 2739
tschatzl@49381 2740 virtual bool do_heap_region(HeapRegion* r) {
tschatzl@39698 2741 _hr_printer->cset(r);
tschatzl@39698 2742 return false;
tschatzl@39698 2743 }
tschatzl@39698 2744 };
tschatzl@39698 2745
tschatzl@46330 2746 void G1CollectedHeap::start_new_collection_set() {
tschatzl@46330 2747 collection_set()->start_incremental_building();
tschatzl@46330 2748
tschatzl@46330 2749 clear_cset_fast_test();
tschatzl@46330 2750
tschatzl@46330 2751 guarantee(_eden.length() == 0, "eden should have been cleared");
tschatzl@46330 2752 g1_policy()->transfer_survivors_to_cset(survivor());
tschatzl@46330 2753 }
tschatzl@46330 2754
tonyp@7398 2755 bool
tonyp@6058 2756 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
kbarrett@49798 2757 assert_at_safepoint_on_vm_thread();
tonyp@7923 2758 guarantee(!is_gc_active(), "collection is not reentrant");
tonyp@7923 2759
david@35492 2760 if (GCLocker::check_active_before_gc()) {
tonyp@7398 2761 return false;
tonyp@5243 2762 }
tonyp@5243 2763
mgronlun@21767 2764 _gc_timer_stw->register_gc_start();
sla@18025 2765
brutisso@33107 2766 GCIdMark gc_id_mark;
sla@18025 2767 _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
sla@18025 2768
kamg@7896 2769 SvcGCMarker sgcm(SvcGCMarker::MINOR);
tonyp@7659 2770 ResourceMark rm;
tonyp@7659 2771
tschatzl@38167 2772 g1_policy()->note_gc_start();
tschatzl@38167 2773
brutisso@31397 2774 wait_for_root_region_scanning();
brutisso@31397 2775
never@11636 2776 print_heap_before_gc();
sjohanss@37085 2777 print_heap_regions();
sla@18025 2778 trace_heap_before_gc(_gc_tracer_stw);
tonyp@3191 2779
david@35851 2780 _verifier->verify_region_sets_optional();
david@35851 2781 _verifier->verify_dirty_young_regions();
tonyp@7923 2782
brutisso@37120 2783 // We should not be doing initial mark unless the conc mark thread is running
lkorinth@50214 2784 if (!_cm_thread->should_terminate()) {
brutisso@37120 2785 // This call will decide whether this pause is an initial-mark
tschatzl@50138 2786 // pause. If it is, in_initial_mark_gc() will return true
brutisso@37120 2787 // for the duration of this pause.
brutisso@37120 2788 g1_policy()->decide_on_conc_mark_initiation();
brutisso@37120 2789 }
johnc@11578 2790
johnc@11578 2791 // We do not allow initial-mark to be piggy-backed on a mixed GC.
tschatzl@50138 2792 assert(!collector_state()->in_initial_mark_gc() ||
tschatzl@50138 2793 collector_state()->in_young_only_phase(), "sanity");
johnc@11578 2794
johnc@11578 2795 // We also do not allow mixed GCs during marking.
tschatzl@50138 2796 assert(!collector_state()->mark_or_rebuild_in_progress() || collector_state()->in_young_only_phase(), "sanity");
johnc@11578 2797
johnc@11578 2798 // Record whether this pause is an initial mark. When the current
johnc@11578 2799 // thread has completed its logging output and it's safe to signal
johnc@11578 2800 // the CM thread, the flag's value in the policy has been reset.
tschatzl@50138 2801 bool should_start_conc_mark = collector_state()->in_initial_mark_gc();
johnc@11578 2802
johnc@11578 2803 // Inner scope for scope based logging, timers, and stats collection
tonyp@3191 2804 {
sla@18025 2805 EvacuationInfo evacuation_info;
sla@18025 2806
tschatzl@50138 2807 if (collector_state()->in_initial_mark_gc()) {
tonyp@6058 2808 // We are about to start a marking cycle, so we increment the
tonyp@6058 2809 // full collection counter.
brutisso@12934 2810 increment_old_marking_cycles_started();
sangheki@37137 2811 _cm->gc_tracer_cm()->set_gc_cause(gc_cause());
tonyp@6058 2812 }
sla@18025 2813
drwhite@31331 2814 _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
sla@18025 2815
brutisso@35061 2816 GCTraceCPUTime tcpu;
brutisso@12511 2817
sjohanss@48395 2818 G1HeapVerifier::G1VerifyType verify_type;
tschatzl@51421 2819 FormatBuffer<> gc_string("Pause Young ");
tschatzl@50138 2820 if (collector_state()->in_initial_mark_gc()) {
tschatzl@51421 2821 gc_string.append("(Concurrent Start)");
tschatzl@51421 2822 verify_type = G1HeapVerifier::G1VerifyConcurrentStart;
tschatzl@50138 2823 } else if (collector_state()->in_young_only_phase()) {
tschatzl@51421 2824 if (collector_state()->in_young_gc_before_mixed()) {
tschatzl@51421 2825 gc_string.append("(Prepare Mixed)");
tschatzl@51421 2826 } else {
tschatzl@51421 2827 gc_string.append("(Normal)");
tschatzl@51421 2828 }
tschatzl@51421 2829 verify_type = G1HeapVerifier::G1VerifyYoungNormal;
brutisso@35061 2830 } else {
tschatzl@51421 2831 gc_string.append("(Mixed)");
sjohanss@48395 2832 verify_type = G1HeapVerifier::G1VerifyMixed;
brutisso@35061 2833 }
brutisso@35061 2834 GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
jmasa@29809 2835
brutisso@36201 2836 uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
brutisso@36201 2837 workers()->active_workers(),
brutisso@36201 2838 Threads::number_of_non_daemon_threads());
lkorinth@50094 2839 active_workers = workers()->update_active_workers(active_workers);
tschatzl@40922 2840 log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
brutisso@36201 2841
jmasa@9338 2842 TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
phh@51269 2843 TraceMemoryManagerStats tms(&_memory_manager, gc_cause(),
phh@51269 2844 collector_state()->yc_type() == Mixed /* allMemoryPoolsAffected */);
tonyp@4459 2845
brutisso@35909 2846 G1HeapTransition heap_transition(this);
brutisso@35909 2847 size_t heap_used_bytes_before_gc = used();
brutisso@35909 2848
jmasa@11174 2849 // Don't dynamically change the number of GC threads this early. A value of
jmasa@11174 2850 // 0 is used to indicate serial work. When parallel work is done,
jmasa@11174 2851 // it will be set.
jmasa@11174 2852
tonyp@3191 2853 { // Call to jvmpi::post_class_unload_events must occur outside of active GC
tonyp@3191 2854 IsGCActiveMark x;
tonyp@3191 2855
tonyp@3191 2856 gc_prologue(false);
ysr@1374 2857
poonam@35507 2858 if (VerifyRememberedSets) {
poonam@35507 2859 log_info(gc, verify)("[Verifying RemSets before GC]");
poonam@35507 2860 VerifyRegionRemSetClosure v_cl;
poonam@35507 2861 heap_region_iterate(&v_cl);
poonam@35507 2862 }
poonam@35507 2863
sjohanss@48395 2864 _verifier->verify_before_gc(verify_type);
david@35851 2865
david@35851 2866 _verifier->check_bitmaps("GC Start");
tonyp@3191 2867
jcm@47980 2868 #if COMPILER2_OR_JVMCI
twisti@33160 2869 DerivedPointerTable::clear();
twisti@33160 2870 #endif
tonyp@3191 2871
johnc@10670 2872 // Please see comment in g1CollectedHeap.hpp and
johnc@10670 2873 // G1CollectedHeap::ref_processing_init() to see how
johnc@10670 2874 // reference processing currently works in G1.
johnc@10670 2875
johnc@10670 2876 // Enable discovery in the STW reference processor
tschatzl@50490 2877 _ref_processor_stw->enable_discovery();
johnc@10670 2878
johnc@10670 2879 {
johnc@10670 2880 // We want to temporarily turn off discovery by the
johnc@10670 2881 // CM ref processor, if necessary, and turn it back on
johnc@10670 2882 // on again later if we do. Using a scoped
johnc@10670 2883 // NoRefDiscovery object will do this.
tschatzl@50490 2884 NoRefDiscovery no_cm_discovery(_ref_processor_cm);
johnc@10670 2885
johnc@10670 2886 // Forget the current alloc region (we might even choose it to be part
johnc@10670 2887 // of the collection set!).
sjohanss@26837 2888 _allocator->release_mutator_alloc_region();
johnc@10670 2889
brutisso@13288 2890 // This timing is only used by the ergonomics to handle our pause target.
brutisso@13288 2891 // It is unclear why this should not include the full pause. We will
brutisso@13288 2892 // investigate this in CR 7178365.
brutisso@13288 2893 //
brutisso@13288 2894 // Preserving the old comment here if that helps the investigation:
brutisso@13288 2895 //
johnc@10670 2896 // The elapsed time induced by the start time below deliberately elides
johnc@10670 2897 // the possible verification above.
brutisso@13288 2898 double sample_start_time_sec = os::elapsedTime();
tonyp@3191 2899
johnc@16993 2900 g1_policy()->record_collection_pause_start(sample_start_time_sec);
tonyp@3191 2901
tschatzl@50138 2902 if (collector_state()->in_initial_mark_gc()) {
tschatzl@50139 2903 concurrent_mark()->pre_initial_mark();
johnc@10670 2904 }
johnc@10670 2905
mgerdin@38183 2906 g1_policy()->finalize_collection_set(target_pause_time_ms, &_survivor);
mgerdin@37039 2907
mgerdin@37039 2908 evacuation_info.set_collectionset_regions(collection_set()->region_length());
johnc@10670 2909
sjohanss@34135 2910 // Make sure the remembered sets are up to date. This needs to be
sjohanss@34135 2911 // done before register_humongous_regions_with_cset(), because the
sjohanss@34135 2912 // remembered sets are used there to choose eager reclaim candidates.
sjohanss@34135 2913 // If the remembered sets are not up to date we might miss some
sjohanss@34135 2914 // entries that need to be handled.
sjohanss@34135 2915 g1_rem_set()->cleanupHRRS();
sjohanss@34135 2916
ehelin@29470 2917 register_humongous_regions_with_cset();
tschatzl@25889 2918
david@35851 2919 assert(_verifier->check_cset_fast_test(), "Inconsistency in the InCSetState table.");
tschatzl@28213 2920
kbarrett@30279 2921 // We call this after finalize_cset() to
tonyp@11455 2922 // ensure that the CSet has been finalized.
kbarrett@30279 2923 _cm->verify_no_cset_oops();
tonyp@11455 2924
johnc@10670 2925 if (_hr_printer.is_active()) {
tschatzl@39698 2926 G1PrintCollectionSetClosure cl(&_hr_printer);
tschatzl@39698 2927 _collection_set.iterate(&cl);
brutisso@10529 2928 }
johnc@10670 2929
johnc@10670 2930 // Initialize the GC alloc regions.
sjohanss@26837 2931 _allocator->init_gc_alloc_regions(evacuation_info);
johnc@10670 2932
mgerdin@37039 2933 G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers(), collection_set()->young_region_length());
mgerdin@33609 2934 pre_evacuate_collection_set();
mgerdin@33609 2935
johnc@10670 2936 // Actually do the work...
ehelin@49792 2937 evacuate_collection_set(&per_thread_states);
johnc@10670 2938
mgerdin@33609 2939 post_evacuate_collection_set(evacuation_info, &per_thread_states);
mgerdin@33609 2940
mgerdin@32737 2941 const size_t* surviving_young_words = per_thread_states.surviving_young_words();
tschatzl@39698 2942 free_collection_set(&_collection_set, evacuation_info, surviving_young_words);
tschatzl@25889 2943
tschatzl@25889 2944 eagerly_reclaim_humongous_regions();
tschatzl@25889 2945
tschatzl@36389 2946 record_obj_copy_mem_stats();
tschatzl@36389 2947 _survivor_evac_stats.adjust_desired_plab_sz();
tschatzl@36389 2948 _old_evac_stats.adjust_desired_plab_sz();
tschatzl@36389 2949
tschatzl@46347 2950 double start = os::elapsedTime();
tschatzl@46330 2951 start_new_collection_set();
tschatzl@46347 2952 g1_policy()->phase_times()->record_start_new_cset_time_ms((os::elapsedTime() - start) * 1000.0);
johnc@10670 2953
johnc@10670 2954 if (evacuation_failed()) {
ehelin@31975 2955 set_used(recalculate_used());
jiangli@31346 2956 if (_archive_allocator != NULL) {
jiangli@31346 2957 _archive_allocator->clear_used();
jiangli@31346 2958 }
stefank@30876 2959 for (uint i = 0; i < ParallelGCThreads; i++) {
sla@18025 2960 if (_evacuation_failed_info_array[i].has_failed()) {
sla@18025 2961 _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
sla@18025 2962 }
sla@18025 2963 }
johnc@10670 2964 } else {
johnc@10670 2965 // The "used" of the the collection set have already been subtracted
johnc@10670 2966 // when they were freed. Add in the bytes evacuated.
ehelin@31975 2967 increase_used(g1_policy()->bytes_copied_during_gc());
johnc@10670