annotate src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp @ 7136:f74dbdd45754

7176220: 'Full GC' events miss date stamp information occasionally Summary: Move date stamp logic into GCTraceTime Reviewed-by: brutisso, tschatzl
author aeriksso
date Fri, 17 May 2013 17:24:20 +0200
parents c2844108a708
children 38d6febe66af
rev   line source
duke@0 1 /*
drchase@6245 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
duke@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@0 4 *
duke@0 5 * This code is free software; you can redistribute it and/or modify it
duke@0 6 * under the terms of the GNU General Public License version 2 only, as
duke@0 7 * published by the Free Software Foundation.
duke@0 8 *
duke@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@0 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@0 13 * accompanied this code).
duke@0 14 *
duke@0 15 * You should have received a copy of the GNU General Public License version
duke@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@0 18 *
trims@1472 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1472 20 * or visit www.oracle.com if you need additional information or have any
trims@1472 21 * questions.
duke@0 22 *
duke@0 23 */
duke@0 24
stefank@1879 25 #include "precompiled.hpp"
coleenp@3602 26 #include "classfile/classLoaderData.hpp"
stefank@1879 27 #include "classfile/symbolTable.hpp"
stefank@1879 28 #include "classfile/systemDictionary.hpp"
stefank@1879 29 #include "code/codeCache.hpp"
stefank@1879 30 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
stefank@1879 31 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
stefank@1879 32 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
stefank@1879 33 #include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
stefank@1879 34 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
stefank@1879 35 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
stefank@1879 36 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
stefank@1879 37 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
stefank@1879 38 #include "gc_implementation/parNew/parNewGeneration.hpp"
stefank@1879 39 #include "gc_implementation/shared/collectorCounters.hpp"
sla@4802 40 #include "gc_implementation/shared/gcTimer.hpp"
sla@4802 41 #include "gc_implementation/shared/gcTrace.hpp"
sla@4802 42 #include "gc_implementation/shared/gcTraceTime.hpp"
stefank@1879 43 #include "gc_implementation/shared/isGCActiveMark.hpp"
stefank@1879 44 #include "gc_interface/collectedHeap.inline.hpp"
sla@4802 45 #include "memory/allocation.hpp"
stefank@1879 46 #include "memory/cardTableRS.hpp"
stefank@1879 47 #include "memory/collectorPolicy.hpp"
stefank@1879 48 #include "memory/gcLocker.inline.hpp"
stefank@1879 49 #include "memory/genCollectedHeap.hpp"
stefank@1879 50 #include "memory/genMarkSweep.hpp"
stefank@1879 51 #include "memory/genOopClosures.inline.hpp"
stefank@6455 52 #include "memory/iterator.inline.hpp"
stefank@5080 53 #include "memory/padded.hpp"
stefank@1879 54 #include "memory/referencePolicy.hpp"
stefank@1879 55 #include "memory/resourceArea.hpp"
jmasa@4465 56 #include "memory/tenuredGeneration.hpp"
stefank@1879 57 #include "oops/oop.inline.hpp"
stefank@1879 58 #include "prims/jvmtiExport.hpp"
stefank@1879 59 #include "runtime/globals_extension.hpp"
stefank@1879 60 #include "runtime/handles.inline.hpp"
stefank@1879 61 #include "runtime/java.hpp"
goetz@6317 62 #include "runtime/orderAccess.inline.hpp"
stefank@1879 63 #include "runtime/vmThread.hpp"
stefank@1879 64 #include "services/memoryService.hpp"
stefank@1879 65 #include "services/runtimeService.hpp"
duke@0 66
drchase@6245 67 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
drchase@6245 68
duke@0 69 // statics
duke@0 70 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
sla@4802 71 bool CMSCollector::_full_gc_requested = false;
sla@4802 72 GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
duke@0 73
duke@0 74 //////////////////////////////////////////////////////////////////
duke@0 75 // In support of CMS/VM thread synchronization
duke@0 76 //////////////////////////////////////////////////////////////////
duke@0 77 // We split use of the CGC_lock into 2 "levels".
duke@0 78 // The low-level locking is of the usual CGC_lock monitor. We introduce
duke@0 79 // a higher level "token" (hereafter "CMS token") built on top of the
duke@0 80 // low level monitor (hereafter "CGC lock").
duke@0 81 // The token-passing protocol gives priority to the VM thread. The
duke@0 82 // CMS-lock doesn't provide any fairness guarantees, but clients
duke@0 83 // should ensure that it is only held for very short, bounded
duke@0 84 // durations.
duke@0 85 //
duke@0 86 // When either of the CMS thread or the VM thread is involved in
duke@0 87 // collection operations during which it does not want the other
duke@0 88 // thread to interfere, it obtains the CMS token.
duke@0 89 //
duke@0 90 // If either thread tries to get the token while the other has
duke@0 91 // it, that thread waits. However, if the VM thread and CMS thread
duke@0 92 // both want the token, then the VM thread gets priority while the
duke@0 93 // CMS thread waits. This ensures, for instance, that the "concurrent"
duke@0 94 // phases of the CMS thread's work do not block out the VM thread
duke@0 95 // for long periods of time as the CMS thread continues to hog
duke@0 96 // the token. (See bug 4616232).
duke@0 97 //
duke@0 98 // The baton-passing functions are, however, controlled by the
duke@0 99 // flags _foregroundGCShouldWait and _foregroundGCIsActive,
duke@0 100 // and here the low-level CMS lock, not the high level token,
duke@0 101 // ensures mutual exclusion.
duke@0 102 //
duke@0 103 // Two important conditions that we have to satisfy:
duke@0 104 // 1. if a thread does a low-level wait on the CMS lock, then it
duke@0 105 // relinquishes the CMS token if it were holding that token
duke@0 106 // when it acquired the low-level CMS lock.
duke@0 107 // 2. any low-level notifications on the low-level lock
duke@0 108 // should only be sent when a thread has relinquished the token.
duke@0 109 //
duke@0 110 // In the absence of either property, we'd have potential deadlock.
duke@0 111 //
duke@0 112 // We protect each of the CMS (concurrent and sequential) phases
duke@0 113 // with the CMS _token_, not the CMS _lock_.
duke@0 114 //
duke@0 115 // The only code protected by CMS lock is the token acquisition code
duke@0 116 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
duke@0 117 // baton-passing code.
duke@0 118 //
duke@0 119 // Unfortunately, i couldn't come up with a good abstraction to factor and
duke@0 120 // hide the naked CGC_lock manipulation in the baton-passing code
duke@0 121 // further below. That's something we should try to do. Also, the proof
duke@0 122 // of correctness of this 2-level locking scheme is far from obvious,
duke@0 123 // and potentially quite slippery. We have an uneasy supsicion, for instance,
duke@0 124 // that there may be a theoretical possibility of delay/starvation in the
duke@0 125 // low-level lock/wait/notify scheme used for the baton-passing because of
duke@0 126 // potential intereference with the priority scheme embodied in the
duke@0 127 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
duke@0 128 // invocation further below and marked with "XXX 20011219YSR".
duke@0 129 // Indeed, as we note elsewhere, this may become yet more slippery
duke@0 130 // in the presence of multiple CMS and/or multiple VM threads. XXX
duke@0 131
duke@0 132 class CMSTokenSync: public StackObj {
duke@0 133 private:
duke@0 134 bool _is_cms_thread;
duke@0 135 public:
duke@0 136 CMSTokenSync(bool is_cms_thread):
duke@0 137 _is_cms_thread(is_cms_thread) {
duke@0 138 assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
duke@0 139 "Incorrect argument to constructor");
duke@0 140 ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
duke@0 141 }
duke@0 142
duke@0 143 ~CMSTokenSync() {
duke@0 144 assert(_is_cms_thread ?
duke@0 145 ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
duke@0 146 ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
duke@0 147 "Incorrect state");
duke@0 148 ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
duke@0 149 }
duke@0 150 };
duke@0 151
duke@0 152 // Convenience class that does a CMSTokenSync, and then acquires
duke@0 153 // upto three locks.
duke@0 154 class CMSTokenSyncWithLocks: public CMSTokenSync {
duke@0 155 private:
duke@0 156 // Note: locks are acquired in textual declaration order
duke@0 157 // and released in the opposite order
duke@0 158 MutexLockerEx _locker1, _locker2, _locker3;
duke@0 159 public:
duke@0 160 CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
duke@0 161 Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
duke@0 162 CMSTokenSync(is_cms_thread),
duke@0 163 _locker1(mutex1, Mutex::_no_safepoint_check_flag),
duke@0 164 _locker2(mutex2, Mutex::_no_safepoint_check_flag),
duke@0 165 _locker3(mutex3, Mutex::_no_safepoint_check_flag)
duke@0 166 { }
duke@0 167 };
duke@0 168
duke@0 169
duke@0 170 // Wrapper class to temporarily disable icms during a foreground cms collection.
duke@0 171 class ICMSDisabler: public StackObj {
duke@0 172 public:
duke@0 173 // The ctor disables icms and wakes up the thread so it notices the change;
duke@0 174 // the dtor re-enables icms. Note that the CMSCollector methods will check
duke@0 175 // CMSIncrementalMode.
duke@0 176 ICMSDisabler() { CMSCollector::disable_icms(); CMSCollector::start_icms(); }
duke@0 177 ~ICMSDisabler() { CMSCollector::enable_icms(); }
duke@0 178 };
duke@0 179
duke@0 180 //////////////////////////////////////////////////////////////////
duke@0 181 // Concurrent Mark-Sweep Generation /////////////////////////////
duke@0 182 //////////////////////////////////////////////////////////////////
duke@0 183
duke@0 184 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
duke@0 185
duke@0 186 // This struct contains per-thread things necessary to support parallel
duke@0 187 // young-gen collection.
zgu@3465 188 class CMSParGCThreadState: public CHeapObj<mtGC> {
duke@0 189 public:
duke@0 190 CFLS_LAB lab;
duke@0 191 PromotionInfo promo;
duke@0 192
duke@0 193 // Constructor.
duke@0 194 CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
duke@0 195 promo.setSpace(cfls);
duke@0 196 }
duke@0 197 };
duke@0 198
duke@0 199 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
duke@0 200 ReservedSpace rs, size_t initial_byte_size, int level,
duke@0 201 CardTableRS* ct, bool use_adaptive_freelists,
jmasa@3295 202 FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
duke@0 203 CardGeneration(rs, initial_byte_size, level, ct),
kvn@1491 204 _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
jmasa@4641 205 _debug_collection_type(Concurrent_collection_type),
jmasa@4641 206 _did_compact(false)
duke@0 207 {
duke@0 208 HeapWord* bottom = (HeapWord*) _virtual_space.low();
duke@0 209 HeapWord* end = (HeapWord*) _virtual_space.high();
duke@0 210
duke@0 211 _direct_allocated_words = 0;
duke@0 212 NOT_PRODUCT(
duke@0 213 _numObjectsPromoted = 0;
duke@0 214 _numWordsPromoted = 0;
duke@0 215 _numObjectsAllocated = 0;
duke@0 216 _numWordsAllocated = 0;
duke@0 217 )
duke@0 218
duke@0 219 _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
duke@0 220 use_adaptive_freelists,
duke@0 221 dictionaryChoice);
duke@0 222 NOT_PRODUCT(debug_cms_space = _cmsSpace;)
duke@0 223 if (_cmsSpace == NULL) {
duke@0 224 vm_exit_during_initialization(
duke@0 225 "CompactibleFreeListSpace allocation failure");
duke@0 226 }
duke@0 227 _cmsSpace->_gen = this;
duke@0 228
duke@0 229 _gc_stats = new CMSGCStats();
duke@0 230
duke@0 231 // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
duke@0 232 // offsets match. The ability to tell free chunks from objects
duke@0 233 // depends on this property.
duke@0 234 debug_only(
duke@0 235 FreeChunk* junk = NULL;
ehelin@5259 236 assert(UseCompressedClassPointers ||
coleenp@187 237 junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
duke@0 238 "Offset of FreeChunk::_prev within FreeChunk must match"
duke@0 239 " that of OopDesc::_klass within OopDesc");
duke@0 240 )
jmasa@1753 241 if (CollectedHeap::use_parallel_gc_threads()) {
duke@0 242 typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
duke@0 243 _par_gc_thread_states =
zgu@3465 244 NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads, mtGC);
duke@0 245 if (_par_gc_thread_states == NULL) {
duke@0 246 vm_exit_during_initialization("Could not allocate par gc structs");
duke@0 247 }
duke@0 248 for (uint i = 0; i < ParallelGCThreads; i++) {
duke@0 249 _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
duke@0 250 if (_par_gc_thread_states[i] == NULL) {
duke@0 251 vm_exit_during_initialization("Could not allocate par gc structs");
duke@0 252 }
duke@0 253 }
duke@0 254 } else {
duke@0 255 _par_gc_thread_states = NULL;
duke@0 256 }
duke@0 257 _incremental_collection_failed = false;
duke@0 258 // The "dilatation_factor" is the expansion that can occur on
duke@0 259 // account of the fact that the minimum object size in the CMS
duke@0 260 // generation may be larger than that in, say, a contiguous young
duke@0 261 // generation.
duke@0 262 // Ideally, in the calculation below, we'd compute the dilatation
duke@0 263 // factor as: MinChunkSize/(promoting_gen's min object size)
duke@0 264 // Since we do not have such a general query interface for the
duke@0 265 // promoting generation, we'll instead just use the mimimum
duke@0 266 // object size (which today is a header's worth of space);
duke@0 267 // note that all arithmetic is in units of HeapWords.
kvn@1491 268 assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
duke@0 269 assert(_dilatation_factor >= 1.0, "from previous assert");
duke@0 270 }
duke@0 271
ysr@94 272
ysr@94 273 // The field "_initiating_occupancy" represents the occupancy percentage
ysr@94 274 // at which we trigger a new collection cycle. Unless explicitly specified
coleenp@3602 275 // via CMSInitiatingOccupancyFraction (argument "io" below), it
ysr@94 276 // is calculated by:
ysr@94 277 //
ysr@94 278 // Let "f" be MinHeapFreeRatio in
ysr@94 279 //
ysr@94 280 // _intiating_occupancy = 100-f +
coleenp@3602 281 // f * (CMSTriggerRatio/100)
coleenp@3602 282 // where CMSTriggerRatio is the argument "tr" below.
ysr@94 283 //
ysr@94 284 // That is, if we assume the heap is at its desired maximum occupancy at the
coleenp@3602 285 // end of a collection, we let CMSTriggerRatio of the (purported) free
ysr@94 286 // space be allocated before initiating a new collection cycle.
ysr@94 287 //
jwilhelm@4141 288 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
jwilhelm@4141 289 assert(io <= 100 && tr <= 100, "Check the arguments");
ysr@94 290 if (io >= 0) {
ysr@94 291 _initiating_occupancy = (double)io / 100.0;
ysr@94 292 } else {
ysr@94 293 _initiating_occupancy = ((100 - MinHeapFreeRatio) +
ysr@94 294 (double)(tr * MinHeapFreeRatio) / 100.0)
ysr@94 295 / 100.0;
ysr@94 296 }
ysr@94 297 }
ysr@94 298
duke@0 299 void ConcurrentMarkSweepGeneration::ref_processor_init() {
duke@0 300 assert(collector() != NULL, "no collector");
duke@0 301 collector()->ref_processor_init();
duke@0 302 }
duke@0 303
duke@0 304 void CMSCollector::ref_processor_init() {
duke@0 305 if (_ref_processor == NULL) {
duke@0 306 // Allocate and initialize a reference processor
ysr@2216 307 _ref_processor =
ysr@2216 308 new ReferenceProcessor(_span, // span
ysr@2216 309 (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
ysr@2216 310 (int) ParallelGCThreads, // mt processing degree
ysr@2216 311 _cmsGen->refs_discovery_is_mt(), // mt discovery
ysr@2216 312 (int) MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
ysr@2216 313 _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
brutisso@6284 314 &_is_alive_closure); // closure for liveness info
duke@0 315 // Initialize the _ref_processor field of CMSGen
duke@0 316 _cmsGen->set_ref_processor(_ref_processor);
duke@0 317
duke@0 318 }
duke@0 319 }
duke@0 320
duke@0 321 CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
duke@0 322 GenCollectedHeap* gch = GenCollectedHeap::heap();
duke@0 323 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
duke@0 324 "Wrong type of heap");
duke@0 325 CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
duke@0 326 gch->gen_policy()->size_policy();
duke@0 327 assert(sp->is_gc_cms_adaptive_size_policy(),
duke@0 328 "Wrong type of size policy");
duke@0 329 return sp;
duke@0 330 }
duke@0 331
duke@0 332 CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() {
duke@0 333 CMSGCAdaptivePolicyCounters* results =
duke@0 334 (CMSGCAdaptivePolicyCounters*) collector_policy()->counters();
duke@0 335 assert(
duke@0 336 results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
duke@0 337 "Wrong gc policy counter kind");
duke@0 338 return results;
duke@0 339 }
duke@0 340
duke@0 341
duke@0 342 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
duke@0 343
duke@0 344 const char* gen_name = "old";
duke@0 345
duke@0 346 // Generation Counters - generation 1, 1 subspace
duke@0 347 _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
duke@0 348
duke@0 349 _space_counters = new GSpaceCounters(gen_name, 0,
duke@0 350 _virtual_space.reserved_size(),
duke@0 351 this, _gen_counters);
duke@0 352 }
duke@0 353
duke@0 354 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
duke@0 355 _cms_gen(cms_gen)
duke@0 356 {
duke@0 357 assert(alpha <= 100, "bad value");
duke@0 358 _saved_alpha = alpha;
duke@0 359
duke@0 360 // Initialize the alphas to the bootstrap value of 100.
duke@0 361 _gc0_alpha = _cms_alpha = 100;
duke@0 362
duke@0 363 _cms_begin_time.update();
duke@0 364 _cms_end_time.update();
duke@0 365
duke@0 366 _gc0_duration = 0.0;
duke@0 367 _gc0_period = 0.0;
duke@0 368 _gc0_promoted = 0;
duke@0 369
duke@0 370 _cms_duration = 0.0;
duke@0 371 _cms_period = 0.0;
duke@0 372 _cms_allocated = 0;
duke@0 373
duke@0 374 _cms_used_at_gc0_begin = 0;
duke@0 375 _cms_used_at_gc0_end = 0;
duke@0 376 _allow_duty_cycle_reduction = false;
duke@0 377 _valid_bits = 0;
duke@0 378 _icms_duty_cycle = CMSIncrementalDutyCycle;
duke@0 379 }
duke@0 380
ysr@1145 381 double CMSStats::cms_free_adjustment_factor(size_t free) const {
ysr@1145 382 // TBD: CR 6909490
ysr@1145 383 return 1.0;
ysr@1145 384 }
ysr@1145 385
ysr@1145 386 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
ysr@1145 387 }
ysr@1145 388
duke@0 389 // If promotion failure handling is on use
duke@0 390 // the padded average size of the promotion for each
duke@0 391 // young generation collection.
duke@0 392 double CMSStats::time_until_cms_gen_full() const {
duke@0 393 size_t cms_free = _cms_gen->cmsSpace()->free();
duke@0 394 GenCollectedHeap* gch = GenCollectedHeap::heap();
ysr@1808 395 size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(),
ysr@1808 396 (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
duke@0 397 if (cms_free > expected_promotion) {
duke@0 398 // Start a cms collection if there isn't enough space to promote
duke@0 399 // for the next minor collection. Use the padded average as
duke@0 400 // a safety factor.
duke@0 401 cms_free -= expected_promotion;
duke@0 402
duke@0 403 // Adjust by the safety factor.
duke@0 404 double cms_free_dbl = (double)cms_free;
ysr@1145 405 double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0;
ysr@1145 406 // Apply a further correction factor which tries to adjust
ysr@1145 407 // for recent occurance of concurrent mode failures.
ysr@1145 408 cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
ysr@1145 409 cms_free_dbl = cms_free_dbl * cms_adjustment;
duke@0 410
duke@0 411 if (PrintGCDetails && Verbose) {
duke@0 412 gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
duke@0 413 SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
duke@0 414 cms_free, expected_promotion);
duke@0 415 gclog_or_tty->print_cr(" cms_free_dbl %f cms_consumption_rate %f",
duke@0 416 cms_free_dbl, cms_consumption_rate() + 1.0);
duke@0 417 }
duke@0 418 // Add 1 in case the consumption rate goes to zero.
duke@0 419 return cms_free_dbl / (cms_consumption_rate() + 1.0);
duke@0 420 }
duke@0 421 return 0.0;
duke@0 422 }
duke@0 423
duke@0 424 // Compare the duration of the cms collection to the
duke@0 425 // time remaining before the cms generation is empty.
duke@0 426 // Note that the time from the start of the cms collection
duke@0 427 // to the start of the cms sweep (less than the total
duke@0 428 // duration of the cms collection) can be used. This
duke@0 429 // has been tried and some applications experienced
duke@0 430 // promotion failures early in execution. This was
duke@0 431 // possibly because the averages were not accurate
duke@0 432 // enough at the beginning.
duke@0 433 double CMSStats::time_until_cms_start() const {
duke@0 434 // We add "gc0_period" to the "work" calculation
duke@0 435 // below because this query is done (mostly) at the
duke@0 436 // end of a scavenge, so we need to conservatively
duke@0 437 // account for that much possible delay
duke@0 438 // in the query so as to avoid concurrent mode failures
duke@0 439 // due to starting the collection just a wee bit too
duke@0 440 // late.
duke@0 441 double work = cms_duration() + gc0_period();
duke@0 442 double deadline = time_until_cms_gen_full();
ysr@1145 443 // If a concurrent mode failure occurred recently, we want to be
ysr@1145 444 // more conservative and halve our expected time_until_cms_gen_full()
duke@0 445 if (work > deadline) {
duke@0 446 if (Verbose && PrintGCDetails) {
duke@0 447 gclog_or_tty->print(
duke@0 448 " CMSCollector: collect because of anticipated promotion "
duke@0 449 "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
duke@0 450 gc0_period(), time_until_cms_gen_full());
duke@0 451 }
duke@0 452 return 0.0;
duke@0 453 }
duke@0 454 return work - deadline;
duke@0 455 }
duke@0 456
duke@0 457 // Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
duke@0 458 // amount of change to prevent wild oscillation.
duke@0 459 unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle,
duke@0 460 unsigned int new_duty_cycle) {
duke@0 461 assert(old_duty_cycle <= 100, "bad input value");
duke@0 462 assert(new_duty_cycle <= 100, "bad input value");
duke@0 463
duke@0 464 // Note: use subtraction with caution since it may underflow (values are
duke@0 465 // unsigned). Addition is safe since we're in the range 0-100.
duke@0 466 unsigned int damped_duty_cycle = new_duty_cycle;
duke@0 467 if (new_duty_cycle < old_duty_cycle) {
duke@0 468 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U);
duke@0 469 if (new_duty_cycle + largest_delta < old_duty_cycle) {
duke@0 470 damped_duty_cycle = old_duty_cycle - largest_delta;
duke@0 471 }
duke@0 472 } else if (new_duty_cycle > old_duty_cycle) {
duke@0 473 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U);
duke@0 474 if (new_duty_cycle > old_duty_cycle + largest_delta) {
duke@0 475 damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U);
duke@0 476 }
duke@0 477 }
duke@0 478 assert(damped_duty_cycle <= 100, "invalid duty cycle computed");
duke@0 479
duke@0 480 if (CMSTraceIncrementalPacing) {
duke@0 481 gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ",
duke@0 482 old_duty_cycle, new_duty_cycle, damped_duty_cycle);
duke@0 483 }
duke@0 484 return damped_duty_cycle;
duke@0 485 }
duke@0 486
duke@0 487 unsigned int CMSStats::icms_update_duty_cycle_impl() {
duke@0 488 assert(CMSIncrementalPacing && valid(),
duke@0 489 "should be handled in icms_update_duty_cycle()");
duke@0 490
duke@0 491 double cms_time_so_far = cms_timer().seconds();
duke@0 492 double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M;
duke@0 493 double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far);
duke@0 494
duke@0 495 // Avoid division by 0.
duke@0 496 double time_until_full = MAX2(time_until_cms_gen_full(), 0.01);
duke@0 497 double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full;
duke@0 498
duke@0 499 unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U);
duke@0 500 if (new_duty_cycle > _icms_duty_cycle) {
duke@0 501 // Avoid very small duty cycles (1 or 2); 0 is allowed.
duke@0 502 if (new_duty_cycle > 2) {
duke@0 503 _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle,
duke@0 504 new_duty_cycle);
duke@0 505 }
duke@0 506 } else if (_allow_duty_cycle_reduction) {
duke@0 507 // The duty cycle is reduced only once per cms cycle (see record_cms_end()).
duke@0 508 new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle);
duke@0 509 // Respect the minimum duty cycle.
duke@0 510 unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin;
duke@0 511 _icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle);
duke@0 512 }
duke@0 513
duke@0 514 if (PrintGCDetails || CMSTraceIncrementalPacing) {
duke@0 515 gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle);
duke@0 516 }
duke@0 517
duke@0 518 _allow_duty_cycle_reduction = false;
duke@0 519 return _icms_duty_cycle;
duke@0 520 }
duke@0 521
duke@0 522 #ifndef PRODUCT
duke@0 523 void CMSStats::print_on(outputStream *st) const {
duke@0 524 st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
duke@0 525 st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
duke@0 526 gc0_duration(), gc0_period(), gc0_promoted());
duke@0 527 st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
duke@0 528 cms_duration(), cms_duration_per_mb(),
duke@0 529 cms_period(), cms_allocated());
duke@0 530 st->print(",cms_since_beg=%g,cms_since_end=%g",
duke@0 531 cms_time_since_begin(), cms_time_since_end());
duke@0 532 st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
duke@0 533 _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
duke@0 534 if (CMSIncrementalMode) {
duke@0 535 st->print(",dc=%d", icms_duty_cycle());
duke@0 536 }
duke@0 537
duke@0 538 if (valid()) {
duke@0 539 st->print(",promo_rate=%g,cms_alloc_rate=%g",
duke@0 540 promotion_rate(), cms_allocation_rate());
duke@0 541 st->print(",cms_consumption_rate=%g,time_until_full=%g",
duke@0 542 cms_consumption_rate(), time_until_cms_gen_full());
duke@0 543 }
duke@0 544 st->print(" ");
duke@0 545 }
duke@0 546 #endif // #ifndef PRODUCT
duke@0 547
duke@0 548 CMSCollector::CollectorState CMSCollector::_collectorState =
duke@0 549 CMSCollector::Idling;
duke@0 550 bool CMSCollector::_foregroundGCIsActive = false;
duke@0 551 bool CMSCollector::_foregroundGCShouldWait = false;
duke@0 552
duke@0 553 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
duke@0 554 CardTableRS* ct,
duke@0 555 ConcurrentMarkSweepPolicy* cp):
duke@0 556 _cmsGen(cmsGen),
duke@0 557 _ct(ct),
duke@0 558 _ref_processor(NULL), // will be set later
duke@0 559 _conc_workers(NULL), // may be set later
duke@0 560 _abort_preclean(false),
duke@0 561 _start_sampling(false),
duke@0 562 _between_prologue_and_epilogue(false),
duke@0 563 _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
duke@0 564 _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
duke@0 565 -1 /* lock-free */, "No_lock" /* dummy */),
duke@0 566 _modUnionClosure(&_modUnionTable),
duke@0 567 _modUnionClosurePar(&_modUnionTable),
coleenp@3602 568 // Adjust my span to cover old (cms) gen
coleenp@3602 569 _span(cmsGen->reserved()),
ysr@143 570 // Construct the is_alive_closure with _span & markBitMap
ysr@143 571 _is_alive_closure(_span, &_markBitMap),
duke@0 572 _restart_addr(NULL),
duke@0 573 _overflow_list(NULL),
duke@0 574 _stats(cmsGen),
jmasa@5024 575 _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true)),
duke@0 576 _eden_chunk_array(NULL), // may be set in ctor body
duke@0 577 _eden_chunk_capacity(0), // -- ditto --
duke@0 578 _eden_chunk_index(0), // -- ditto --
duke@0 579 _survivor_plab_array(NULL), // -- ditto --
duke@0 580 _survivor_chunk_array(NULL), // -- ditto --
duke@0 581 _survivor_chunk_capacity(0), // -- ditto --
duke@0 582 _survivor_chunk_index(0), // -- ditto --
duke@0 583 _ser_pmc_preclean_ovflw(0),
ysr@452 584 _ser_kac_preclean_ovflw(0),
duke@0 585 _ser_pmc_remark_ovflw(0),
duke@0 586 _par_pmc_remark_ovflw(0),
duke@0 587 _ser_kac_ovflw(0),
duke@0 588 _par_kac_ovflw(0),
duke@0 589 #ifndef PRODUCT
duke@0 590 _num_par_pushes(0),
duke@0 591 #endif
duke@0 592 _collection_count_start(0),
duke@0 593 _verifying(false),
duke@0 594 _icms_start_limit(NULL),
duke@0 595 _icms_stop_limit(NULL),
duke@0 596 _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
duke@0 597 _completed_initialization(false),
duke@0 598 _collector_policy(cp),
jmasa@5653 599 _should_unload_classes(CMSClassUnloadingEnabled),
ysr@94 600 _concurrent_cycles_since_last_unload(0),
jmasa@5653 601 _roots_scanning_options(SharedHeap::SO_None),
ysr@1145 602 _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
sla@4802 603 _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
sla@4802 604 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
sla@4802 605 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
sla@4802 606 _cms_start_registered(false)
duke@0 607 {
duke@0 608 if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
duke@0 609 ExplicitGCInvokesConcurrent = true;
duke@0 610 }
duke@0 611 // Now expand the span and allocate the collection support structures
duke@0 612 // (MUT, marking bit map etc.) to cover both generations subject to
duke@0 613 // collection.
duke@0 614
duke@0 615 // For use by dirty card to oop closures.
duke@0 616 _cmsGen->cmsSpace()->set_collector(this);
duke@0 617
duke@0 618 // Allocate MUT and marking bit map
duke@0 619 {
duke@0 620 MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
duke@0 621 if (!_markBitMap.allocate(_span)) {
duke@0 622 warning("Failed to allocate CMS Bit Map");
duke@0 623 return;
duke@0 624 }
duke@0 625 assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
duke@0 626 }
duke@0 627 {
duke@0 628 _modUnionTable.allocate(_span);
duke@0 629 assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
duke@0 630 }
duke@0 631
jmasa@1284 632 if (!_markStack.allocate(MarkStackSize)) {
duke@0 633 warning("Failed to allocate CMS Marking Stack");
duke@0 634 return;
duke@0 635 }
duke@0 636
duke@0 637 // Support for multi-threaded concurrent phases
ysr@2216 638 if (CMSConcurrentMTEnabled) {
jmasa@1284 639 if (FLAG_IS_DEFAULT(ConcGCThreads)) {
duke@0 640 // just for now
jmasa@1284 641 FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
jmasa@1284 642 }
jmasa@1284 643 if (ConcGCThreads > 1) {
duke@0 644 _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads",
jmasa@1284 645 ConcGCThreads, true);
duke@0 646 if (_conc_workers == NULL) {
duke@0 647 warning("GC/CMS: _conc_workers allocation failure: "
duke@0 648 "forcing -CMSConcurrentMTEnabled");
duke@0 649 CMSConcurrentMTEnabled = false;
jmasa@1753 650 } else {
jmasa@1753 651 _conc_workers->initialize_workers();
duke@0 652 }
duke@0 653 } else {
duke@0 654 CMSConcurrentMTEnabled = false;
duke@0 655 }
duke@0 656 }
duke@0 657 if (!CMSConcurrentMTEnabled) {
jmasa@1284 658 ConcGCThreads = 0;
duke@0 659 } else {
duke@0 660 // Turn off CMSCleanOnEnter optimization temporarily for
duke@0 661 // the MT case where it's not fixed yet; see 6178663.
duke@0 662 CMSCleanOnEnter = false;
duke@0 663 }
jmasa@1284 664 assert((_conc_workers != NULL) == (ConcGCThreads > 1),
duke@0 665 "Inconsistency");
duke@0 666
duke@0 667 // Parallel task queues; these are shared for the
duke@0 668 // concurrent and stop-world phases of CMS, but
duke@0 669 // are not shared with parallel scavenge (ParNew).
duke@0 670 {
duke@0 671 uint i;
jmasa@1284 672 uint num_queues = (uint) MAX2(ParallelGCThreads, ConcGCThreads);
duke@0 673
duke@0 674 if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
duke@0 675 || ParallelRefProcEnabled)
duke@0 676 && num_queues > 0) {
duke@0 677 _task_queues = new OopTaskQueueSet(num_queues);
duke@0 678 if (_task_queues == NULL) {
duke@0 679 warning("task_queues allocation failure.");
duke@0 680 return;
duke@0 681 }
zgu@3465 682 _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
duke@0 683 if (_hash_seed == NULL) {
duke@0 684 warning("_hash_seed array allocation failure");
duke@0 685 return;
duke@0 686 }
duke@0 687
jcoomes@1585 688 typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
duke@0 689 for (i = 0; i < num_queues; i++) {
jcoomes@1585 690 PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
jcoomes@1585 691 if (q == NULL) {
duke@0 692 warning("work_queue allocation failure.");
duke@0 693 return;
duke@0 694 }
jcoomes@1585 695 _task_queues->register_queue(i, q);
duke@0 696 }
duke@0 697 for (i = 0; i < num_queues; i++) {
duke@0 698 _task_queues->queue(i)->initialize();
duke@0 699 _hash_seed[i] = 17; // copied from ParNew
duke@0 700 }
duke@0 701 }
duke@0 702 }
duke@0 703
ysr@94 704 _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
ysr@94 705
duke@0 706 // Clip CMSBootstrapOccupancy between 0 and 100.
tschatzl@4684 707 _bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100;
duke@0 708
duke@0 709 _full_gcs_since_conc_gc = 0;
duke@0 710
duke@0 711 // Now tell CMS generations the identity of their collector
duke@0 712 ConcurrentMarkSweepGeneration::set_collector(this);
duke@0 713
duke@0 714 // Create & start a CMS thread for this CMS collector
duke@0 715 _cmsThread = ConcurrentMarkSweepThread::start(this);
duke@0 716 assert(cmsThread() != NULL, "CMS Thread should have been created");
duke@0 717 assert(cmsThread()->collector() == this,
duke@0 718 "CMS Thread should refer to this gen");
duke@0 719 assert(CGC_lock != NULL, "Where's the CGC_lock?");
duke@0 720
duke@0 721 // Support for parallelizing young gen rescan
duke@0 722 GenCollectedHeap* gch = GenCollectedHeap::heap();
duke@0 723 _young_gen = gch->prev_gen(_cmsGen);
duke@0 724 if (gch->supports_inline_contig_alloc()) {
duke@0 725 _top_addr = gch->top_addr();
duke@0 726 _end_addr = gch->end_addr();
duke@0 727 assert(_young_gen != NULL, "no _young_gen");
duke@0 728 _eden_chunk_index = 0;
duke@0 729 _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
zgu@3465 730 _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
duke@0 731 if (_eden_chunk_array == NULL) {
duke@0 732 _eden_chunk_capacity = 0;
duke@0 733 warning("GC/CMS: _eden_chunk_array allocation failure");
duke@0 734 }
duke@0 735 }
duke@0 736 assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
duke@0 737
duke@0 738 // Support for parallelizing survivor space rescan
jmasa@5026 739 if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
jmasa@1289 740 const size_t max_plab_samples =
mgerdin@7032 741 ((DefNewGeneration*)_young_gen)->max_survivor_size() / plab_sample_minimum_size();
jmasa@1289 742
zgu@3465 743 _survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
zgu@3465 744 _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC);
zgu@3465 745 _cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
duke@0 746 if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
duke@0 747 || _cursor == NULL) {
duke@0 748 warning("Failed to allocate survivor plab/chunk array");
duke@0 749 if (_survivor_plab_array != NULL) {
zgu@3465 750 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC);
duke@0 751 _survivor_plab_array = NULL;
duke@0 752 }
duke@0 753 if (_survivor_chunk_array != NULL) {
zgu@3465 754 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC);
duke@0 755 _survivor_chunk_array = NULL;
duke@0 756 }
duke@0 757 if (_cursor != NULL) {
zgu@3465 758 FREE_C_HEAP_ARRAY(size_t, _cursor, mtGC);
duke@0 759 _cursor = NULL;
duke@0 760 }
duke@0 761 } else {
duke@0 762 _survivor_chunk_capacity = 2*max_plab_samples;
duke@0 763 for (uint i = 0; i < ParallelGCThreads; i++) {
zgu@3465 764 HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
duke@0 765 if (vec == NULL) {
duke@0 766 warning("Failed to allocate survivor plab array");
duke@0 767 for (int j = i; j > 0; j--) {
zgu@3465 768 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array(), mtGC);
duke@0 769 }
zgu@3465 770 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC);
zgu@3465 771 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC);
duke@0 772 _survivor_plab_array = NULL;
duke@0 773 _survivor_chunk_array = NULL;
duke@0 774 _survivor_chunk_capacity = 0;
duke@0 775 break;
duke@0 776 } else {
duke@0 777 ChunkArray* cur =
duke@0 778 ::new (&_survivor_plab_array[i]) ChunkArray(vec,
duke@0 779 max_plab_samples);
duke@0 780 assert(cur->end() == 0, "Should be 0");
duke@0 781 assert(cur->array() == vec, "Should be vec");
duke@0 782 assert(cur->capacity() == max_plab_samples, "Error");
duke@0 783 }
duke@0 784 }
duke@0 785 }
duke@0 786 }
duke@0 787 assert( ( _survivor_plab_array != NULL
duke@0 788 && _survivor_chunk_array != NULL)
duke@0 789 || ( _survivor_chunk_capacity == 0
duke@0 790 && _survivor_chunk_index == 0),
duke@0 791 "Error");
duke@0 792
duke@0 793 NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
duke@0 794 _gc_counters = new CollectorCounters("CMS", 1);
duke@0 795 _completed_initialization = true;
ysr@1145 796 _inter_sweep_timer.start(); // start of time
duke@0 797 }
duke@0 798
mgerdin@7032 799 size_t CMSCollector::plab_sample_minimum_size() {
mgerdin@7032 800 // The default value of MinTLABSize is 2k, but there is
mgerdin@7032 801 // no way to get the default value if the flag has been overridden.
mgerdin@7032 802 return MAX2(ThreadLocalAllocBuffer::min_size() * HeapWordSize, 2 * K);
mgerdin@7032 803 }
mgerdin@7032 804
duke@0 805 const char* ConcurrentMarkSweepGeneration::name() const {
duke@0 806 return "concurrent mark-sweep generation";
duke@0 807 }
duke@0 808 void ConcurrentMarkSweepGeneration::update_counters() {
duke@0 809 if (UsePerfData) {
duke@0 810 _space_counters->update_all();
duke@0 811 _gen_counters->update_all();
duke@0 812 }
duke@0 813 }
duke@0 814
duke@0 815 // this is an optimized version of update_counters(). it takes the
duke@0 816 // used value as a parameter rather than computing it.
duke@0 817 //
duke@0 818 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
duke@0 819 if (UsePerfData) {
duke@0 820 _space_counters->update_used(used);
duke@0 821 _space_counters->update_capacity();
duke@0 822 _gen_counters->update_all();
duke@0 823 }
duke@0 824 }
duke@0 825
duke@0 826 void ConcurrentMarkSweepGeneration::print() const {
duke@0 827 Generation::print();
duke@0 828 cmsSpace()->print();
duke@0 829 }
duke@0 830
duke@0 831 #ifndef PRODUCT
duke@0 832 void ConcurrentMarkSweepGeneration::print_statistics() {
duke@0 833 cmsSpace()->printFLCensus(0);
duke@0 834 }
duke@0 835 #endif
duke@0 836
duke@0 837 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
duke@0 838 GenCollectedHeap* gch = GenCollectedHeap::heap();
duke@0 839 if (PrintGCDetails) {
duke@0 840 if (Verbose) {
jmasa@3956 841 gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
duke@0 842 level(), short_name(), s, used(), capacity());
duke@0 843 } else {
jmasa@3956 844 gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
duke@0 845 level(), short_name(), s, used() / K, capacity() / K);
duke@0 846 }
duke@0 847 }
duke@0 848 if (Verbose) {
duke@0 849 gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")",
duke@0 850 gch->used(), gch->capacity());
duke@0 851 } else {
duke@0 852 gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)",
duke@0 853 gch->used() / K, gch->capacity() / K);
duke@0 854 }
duke@0 855 }
duke@0 856
duke@0 857 size_t
duke@0 858 ConcurrentMarkSweepGeneration::contiguous_available() const {
duke@0 859 // dld proposes an improvement in precision here. If the committed
duke@0 860 // part of the space ends in a free block we should add that to
duke@0 861 // uncommitted size in the calculation below. Will make this
duke@0 862 // change later, staying with the approximation below for the
duke@0 863 // time being. -- ysr.
duke@0 864 return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
duke@0 865 }
duke@0 866
duke@0 867 size_t
duke@0 868 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
duke@0 869 return _cmsSpace->max_alloc_in_words() * HeapWordSize;
duke@0 870 }
duke@0 871
duke@0 872 size_t ConcurrentMarkSweepGeneration::max_available() const {
duke@0 873 return free() + _virtual_space.uncommitted_size();
duke@0 874 }
duke@0 875
ysr@1808 876 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
ysr@1808 877 size_t available = max_available();
ysr@1808 878 size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average();
ysr@1808 879 bool res = (available >= av_promo) || (available >= max_promotion_in_bytes);
ysr@1901 880 if (Verbose && PrintGCDetails) {
ysr@1808 881 gclog_or_tty->print_cr(
ysr@1808 882 "CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
ysr@1808 883 "max_promo("SIZE_FORMAT")",
ysr@1808 884 res? "":" not", available, res? ">=":"<",
ysr@1808 885 av_promo, max_promotion_in_bytes);
ysr@1808 886 }
ysr@1808 887 return res;
duke@0 888 }
duke@0 889
ysr@1145 890 // At a promotion failure dump information on block layout in heap
ysr@1145 891 // (cms old generation).
ysr@1145 892 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
ysr@1145 893 if (CMSDumpAtPromotionFailure) {
ysr@1145 894 cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
ysr@1145 895 }
ysr@1145 896 }
ysr@1145 897
duke@0 898 CompactibleSpace*
duke@0 899 ConcurrentMarkSweepGeneration::first_compaction_space() const {
duke@0 900 return _cmsSpace;
duke@0 901 }
duke@0 902
duke@0 903 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
duke@0 904 // Clear the promotion information. These pointers can be adjusted
duke@0 905 // along with all the other pointers into the heap but
duke@0 906 // compaction is expected to be a rare event with
duke@0 907 // a heap using cms so don't do it without seeing the need.
jmasa@1753 908 if (CollectedHeap::use_parallel_gc_threads()) {
duke@0 909 for (uint i = 0; i < ParallelGCThreads; i++) {
duke@0 910 _par_gc_thread_states[i]->promo.reset();
duke@0 911 }
duke@0 912 }
duke@0 913 }
duke@0 914
duke@0 915 void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {
duke@0 916 blk->do_space(_cmsSpace);
duke@0 917 }
duke@0 918
duke@0 919 void ConcurrentMarkSweepGeneration::compute_new_size() {
duke@0 920 assert_locked_or_safepoint(Heap_lock);
duke@0 921
duke@0 922 // If incremental collection failed, we just want to expand
duke@0 923 // to the limit.
duke@0 924 if (incremental_collection_failed()) {
duke@0 925 clear_incremental_collection_failed();
duke@0 926 grow_to_reserved();
duke@0 927 return;
duke@0 928 }
duke@0 929
jmasa@4640 930 // The heap has been compacted but not reset yet.
jmasa@4640 931 // Any metric such as free() or used() will be incorrect.
jmasa@4465 932
jmasa@4465 933 CardGeneration::compute_new_size();
jmasa@4465 934
jmasa@4465 935 // Reset again after a possible resizing
jmasa@4641 936 if (did_compact()) {
jmasa@4641 937 cmsSpace()->reset_after_compaction();
jmasa@4641 938 }
jmasa@4465 939 }
jmasa@4465 940
jmasa@4465 941 void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
jmasa@4465 942 assert_locked_or_safepoint(Heap_lock);
jmasa@4465 943
jmasa@4465 944 // If incremental collection failed, we just want to expand
jmasa@4465 945 // to the limit.
jmasa@4465 946 if (incremental_collection_failed()) {
jmasa@4465 947 clear_incremental_collection_failed();
jmasa@4465 948 grow_to_reserved();
jmasa@4465 949 return;
jmasa@4465 950 }
jmasa@4465 951
duke@0 952 double free_percentage = ((double) free()) / capacity();
duke@0 953 double desired_free_percentage = (double) MinHeapFreeRatio / 100;
duke@0 954 double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
duke@0 955
duke@0 956 // compute expansion delta needed for reaching desired free percentage
duke@0 957 if (free_percentage < desired_free_percentage) {
duke@0 958 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
duke@0 959 assert(desired_capacity >= capacity(), "invalid expansion size");
jmasa@4465 960 size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
duke@0 961 if (PrintGCDetails && Verbose) {
duke@0 962 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
duke@0 963 gclog_or_tty->print_cr("\nFrom compute_new_size: ");
duke@0 964 gclog_or_tty->print_cr(" Free fraction %f", free_percentage);
duke@0 965 gclog_or_tty->print_cr(" Desired free fraction %f",
duke@0 966 desired_free_percentage);
duke@0 967 gclog_or_tty->print_cr(" Maximum free fraction %f",
duke@0 968 maximum_free_percentage);
duke@0 969 gclog_or_tty->print_cr(" Capactiy "SIZE_FORMAT, capacity()/1000);
duke@0 970 gclog_or_tty->print_cr(" Desired capacity "SIZE_FORMAT,
duke@0 971 desired_capacity/1000);
duke@0 972 int prev_level = level() - 1;
duke@0 973 if (prev_level >= 0) {
duke@0 974 size_t prev_size = 0;
duke@0 975 GenCollectedHeap* gch = GenCollectedHeap::heap();
duke@0 976 Generation* prev_gen = gch->_gens[prev_level];
duke@0 977 prev_size = prev_gen->capacity();
duke@0 978 gclog_or_tty->print_cr(" Younger gen size "SIZE_FORMAT,
duke@0 979 prev_size/1000);
duke@0 980 }
duke@0 981 gclog_or_tty->print_cr(" unsafe_max_alloc_nogc "SIZE_FORMAT,
duke@0 982 unsafe_max_alloc_nogc()/1000);
duke@0 983 gclog_or_tty->print_cr(" contiguous available "SIZE_FORMAT,
duke@0 984 contiguous_available()/1000);
duke@0 985 gclog_or_tty->print_cr(" Expand by "SIZE_FORMAT" (bytes)",
duke@0 986 expand_bytes);
duke@0 987 }
duke@0 988 // safe if expansion fails
duke@0 989 expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
duke@0 990 if (PrintGCDetails && Verbose) {
duke@0 991 gclog_or_tty->print_cr(" Expanded free fraction %f",
duke@0 992 ((double) free()) / capacity());
duke@0 993 }
jmasa@4465 994 } else {
jmasa@4465 995 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
jmasa@4465 996 assert(desired_capacity <= capacity(), "invalid expansion size");
jmasa@4465 997 size_t shrink_bytes = capacity() - desired_capacity;
jmasa@4465 998 // Don't shrink unless the delta is greater than the minimum shrink we want
jmasa@4465 999 if (shrink_bytes >= MinHeapDeltaBytes) {
jmasa@4465 1000 shrink_free_list_by(shrink_bytes);
jmasa@4465 1001 }
duke@0 1002 }
duke@0 1003 }
duke@0 1004
duke@0 1005 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
duke@0 1006 return cmsSpace()->freelistLock();
duke@0 1007 }
duke@0 1008
duke@0 1009 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
duke@0 1010 bool tlab) {
duke@0 1011 CMSSynchronousYieldRequest yr;
duke@0 1012 MutexLockerEx x(freelistLock(),
duke@0 1013 Mutex::_no_safepoint_check_flag);
duke@0 1014 return have_lock_and_allocate(size, tlab);
duke@0 1015 }
duke@0 1016
duke@0 1017 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
ysr@1636 1018 bool tlab /* ignored */) {
duke@0 1019 assert_lock_strong(freelistLock());
duke@0 1020 size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
duke@0 1021 HeapWord* res = cmsSpace()->allocate(adjustedSize);
duke@0 1022 // Allocate the object live (grey) if the background collector has
duke@0 1023 // started marking. This is necessary because the marker may
duke@0 1024 // have passed this address and consequently this object will
duke@0 1025 // not otherwise be greyed and would be incorrectly swept up.
duke@0 1026 // Note that if this object contains references, the writing
duke@0 1027 // of those references will dirty the card containing this object
duke@0 1028 // allowing the object to be blackened (and its references scanned)
duke@0 1029 // either during a preclean phase or at the final checkpoint.
duke@0 1030 if (res != NULL) {
ysr@1636 1031 // We may block here with an uninitialized object with
ysr@1636 1032 // its mark-bit or P-bits not yet set. Such objects need
ysr@1636 1033 // to be safely navigable by block_start().
ysr@1636 1034 assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
jmasa@3297 1035 assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
duke@0 1036 collector()->direct_allocated(res, adjustedSize);
duke@0 1037 _direct_allocated_words += adjustedSize;
duke@0 1038 // allocation counters
duke@0 1039 NOT_PRODUCT(
duke@0 1040 _numObjectsAllocated++;
duke@0 1041 _numWordsAllocated += (int)adjustedSize;
duke@0 1042 )
duke@0 1043 }
duke@0 1044 return res;
duke@0 1045 }
duke@0 1046
duke@0 1047 // In the case of direct allocation by mutators in a generation that
duke@0 1048 // is being concurrently collected, the object must be allocated
duke@0 1049 // live (grey) if the background collector has started marking.
duke@0 1050 // This is necessary because the marker may
duke@0 1051 // have passed this address and consequently this object will
duke@0 1052 // not otherwise be greyed and would be incorrectly swept up.
duke@0 1053 // Note that if this object contains references, the writing
duke@0 1054 // of those references will dirty the card containing this object
duke@0 1055 // allowing the object to be blackened (and its references scanned)
duke@0 1056 // either during a preclean phase or at the final checkpoint.
duke@0 1057 void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
duke@0 1058 assert(_markBitMap.covers(start, size), "Out of bounds");
duke@0 1059 if (_collectorState >= Marking) {
duke@0 1060 MutexLockerEx y(_markBitMap.lock(),
duke@0 1061 Mutex::_no_safepoint_check_flag);
duke@0 1062 // [see comments preceding SweepClosure::do_blk() below for details]
coleenp@3602 1063 //
coleenp@3602 1064 // Can the P-bits be deleted now? JJJ
coleenp@3602 1065 //
duke@0 1066 // 1. need to mark the object as live so it isn't collected
duke@0 1067 // 2. need to mark the 2nd bit to indicate the object may be uninitialized
ysr@1636 1068 // 3. need to mark the end of the object so marking, precleaning or sweeping
ysr@1636 1069 // can skip over uninitialized or unparsable objects. An allocated
ysr@1636 1070 // object is considered uninitialized for our purposes as long as
coleenp@3602 1071 // its klass word is NULL. All old gen objects are parsable
ysr@1636 1072 // as soon as they are initialized.)
duke@0 1073 _markBitMap.mark(start); // object is live
duke@0 1074 _markBitMap.mark(start + 1); // object is potentially uninitialized?
duke@0 1075 _markBitMap.mark(start + size - 1);
duke@0 1076 // mark end of object
duke@0 1077 }
duke@0 1078 // check that oop looks uninitialized
coleenp@187 1079 assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
duke@0 1080 }
duke@0 1081
duke@0 1082 void CMSCollector::promoted(bool par, HeapWord* start,
duke@0 1083 bool is_obj_array, size_t obj_size) {
duke@0 1084 assert(_markBitMap.covers(start), "Out of bounds");
duke@0 1085 // See comment in direct_allocated() about when objects should
duke@0 1086 // be allocated live.
duke@0 1087 if (_collectorState >= Marking) {
duke@0 1088 // we already hold the marking bit map lock, taken in
duke@0 1089 // the prologue
duke@0 1090 if (par) {
duke@0 1091 _markBitMap.par_mark(start);
duke@0 1092 } else {
duke@0 1093 _markBitMap.mark(start);
duke@0 1094 }
duke@0 1095 // We don't need to mark the object as uninitialized (as
duke@0 1096 // in direct_allocated above) because this is being done with the
duke@0 1097 // world stopped and the object will be initialized by the
ysr@1636 1098 // time the marking, precleaning or sweeping get to look at it.
ysr@1636 1099 // But see the code for copying objects into the CMS generation,
ysr@1636 1100 // where we need to ensure that concurrent readers of the
ysr@1636 1101 // block offset table are able to safely navigate a block that
ysr@1636 1102 // is in flux from being free to being allocated (and in
ysr@1636 1103 // transition while being copied into) and subsequently
ysr@1636 1104 // becoming a bona-fide object when the copy/promotion is complete.
duke@0 1105 assert(SafepointSynchronize::is_at_safepoint(),
duke@0 1106 "expect promotion only at safepoints");
duke@0 1107
duke@0 1108 if (_collectorState < Sweeping) {
duke@0 1109 // Mark the appropriate cards in the modUnionTable, so that
duke@0 1110 // this object gets scanned before the sweep. If this is
duke@0 1111 // not done, CMS generation references in the object might
duke@0 1112 // not get marked.
duke@0 1113 // For the case of arrays, which are otherwise precisely
duke@0 1114 // marked, we need to dirty the entire array, not just its head.
duke@0 1115 if (is_obj_array) {
duke@0 1116 // The [par_]mark_range() method expects mr.end() below to
duke@0 1117 // be aligned to the granularity of a bit's representation
duke@0 1118 // in the heap. In the case of the MUT below, that's a
duke@0 1119 // card size.
duke@0 1120 MemRegion mr(start,
duke@0 1121 (HeapWord*)round_to((intptr_t)(start + obj_size),
duke@0 1122 CardTableModRefBS::card_size /* bytes */));
duke@0 1123 if (par) {
duke@0 1124 _modUnionTable.par_mark_range(mr);
duke@0 1125 } else {
duke@0 1126 _modUnionTable.mark_range(mr);
duke@0 1127 }
duke@0 1128 } else { // not an obj array; we can just mark the head
duke@0 1129 if (par) {
duke@0 1130 _modUnionTable.par_mark(start);
duke@0 1131 } else {
duke@0 1132 _modUnionTable.mark(start);
duke@0 1133 }
duke@0 1134 }
duke@0 1135 }
duke@0 1136 }
duke@0 1137 }
duke@0 1138
duke@0 1139 static inline size_t percent_of_space(Space* space, HeapWord* addr)
duke@0 1140 {
duke@0 1141 size_t delta = pointer_delta(addr, space->bottom());
duke@0 1142 return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
duke@0 1143 }
duke@0 1144
duke@0 1145 void CMSCollector::icms_update_allocation_limits()
duke@0 1146 {
duke@0 1147 Generation* gen0 = GenCollectedHeap::heap()->get_gen(0);
duke@0 1148 EdenSpace* eden = gen0->as_DefNewGeneration()->eden();
duke@0 1149
duke@0 1150 const unsigned int duty_cycle = stats().icms_update_duty_cycle();
duke@0 1151 if (CMSTraceIncrementalPacing) {
duke@0 1152 stats().print();
duke@0 1153 }
duke@0 1154
duke@0 1155 assert(duty_cycle <= 100, "invalid duty cycle");
duke@0 1156 if (duty_cycle != 0) {
duke@0 1157 // The duty_cycle is a percentage between 0 and 100; convert to words and
duke@0 1158 // then compute the offset from the endpoints of the space.
duke@0 1159 size_t free_words = eden->free() / HeapWordSize;
duke@0 1160 double free_words_dbl = (double)free_words;
duke@0 1161 size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
duke@0 1162 size_t offset_words = (free_words - duty_cycle_words) / 2;
duke@0 1163
duke@0 1164 _icms_start_limit = eden->top() + offset_words;
duke@0 1165 _icms_stop_limit = eden->end() - offset_words;
duke@0 1166
duke@0 1167 // The limits may be adjusted (shifted to the right) by
duke@0 1168 // CMSIncrementalOffset, to allow the application more mutator time after a
duke@0 1169 // young gen gc (when all mutators were stopped) and before CMS starts and
duke@0 1170 // takes away one or more cpus.
duke@0 1171 if (CMSIncrementalOffset != 0) {
duke@0 1172 double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0;
duke@0 1173 size_t adjustment = (size_t)adjustment_dbl;
duke@0 1174 HeapWord* tmp_stop = _icms_stop_limit + adjustment;
duke@0 1175 if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) {
duke@0 1176 _icms_start_limit += adjustment;
duke@0 1177 _icms_stop_limit = tmp_stop;
duke@0 1178 }
duke@0 1179 }
duke@0 1180 }
duke@0 1181 if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) {
duke@0 1182 _icms_start_limit = _icms_stop_limit = eden->end();
duke@0 1183 }
duke@0 1184
duke@0 1185 // Install the new start limit.
duke@0 1186 eden->set_soft_end(_icms_start_limit);
duke@0 1187
duke@0 1188 if (CMSTraceIncrementalMode) {
duke@0 1189 gclog_or_tty->print(" icms alloc limits: "
duke@0 1190 PTR_FORMAT "," PTR_FORMAT
duke@0 1191 " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
drchase@6245 1192 p2i(_icms_start_limit), p2i(_icms_stop_limit),
duke@0 1193 percent_of_space(eden, _icms_start_limit),
duke@0 1194 percent_of_space(eden, _icms_stop_limit));
duke@0 1195 if (Verbose) {
duke@0 1196 gclog_or_tty->print("eden: ");
duke@0 1197 eden->print_on(gclog_or_tty);
duke@0 1198 }
duke@0 1199 }
duke@0 1200 }
duke@0 1201
duke@0 1202 // Any changes here should try to maintain the invariant
duke@0 1203 // that if this method is called with _icms_start_limit
duke@0 1204 // and _icms_stop_limit both NULL, then it should return NULL
duke@0 1205 // and not notify the icms thread.
duke@0 1206 HeapWord*
duke@0 1207 CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
duke@0 1208 size_t word_size)
duke@0 1209 {
duke@0 1210 // A start_limit equal to end() means the duty cycle is 0, so treat that as a
duke@0 1211 // nop.
duke@0 1212 if (CMSIncrementalMode && _icms_start_limit != space->end()) {
duke@0 1213 if (top <= _icms_start_limit) {
duke@0 1214 if (CMSTraceIncrementalMode) {
duke@0 1215 space->print_on(gclog_or_tty);
duke@0 1216 gclog_or_tty->stamp();
duke@0 1217 gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
duke@0 1218 ", new limit=" PTR_FORMAT
duke@0 1219 " (" SIZE_FORMAT "%%)",
drchase@6245 1220 p2i(top), p2i(_icms_stop_limit),
duke@0 1221 percent_of_space(space, _icms_stop_limit));
duke@0 1222 }
duke@0 1223 ConcurrentMarkSweepThread::start_icms();
duke@0 1224 assert(top < _icms_stop_limit, "Tautology");
duke@0 1225 if (word_size < pointer_delta(_icms_stop_limit, top)) {
duke@0 1226 return _icms_stop_limit;
duke@0 1227 }
duke@0 1228
duke@0 1229 // The allocation will cross both the _start and _stop limits, so do the
duke@0 1230 // stop notification also and return end().
duke@0 1231 if (CMSTraceIncrementalMode) {
duke@0 1232 space->print_on(gclog_or_tty);
duke@0 1233 gclog_or_tty->stamp();
duke@0 1234 gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
duke@0 1235 ", new limit=" PTR_FORMAT
duke@0 1236 " (" SIZE_FORMAT "%%)",
drchase@6245 1237 p2i(top), p2i(space->end()),
duke@0 1238 percent_of_space(space, space->end()));
duke@0 1239 }
duke@0 1240 ConcurrentMarkSweepThread::stop_icms();
duke@0 1241 return space->end();
duke@0 1242 }
duke@0 1243
duke@0 1244 if (top <= _icms_stop_limit) {
duke@0 1245 if (CMSTraceIncrementalMode) {
duke@0 1246 space->print_on(gclog_or_tty);
duke@0 1247 gclog_or_tty->stamp();
duke@0 1248 gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
duke@0 1249 ", new limit=" PTR_FORMAT
duke@0 1250 " (" SIZE_FORMAT "%%)",
duke@0 1251 top, space->end(),
duke@0 1252 percent_of_space(space, space->end()));
duke@0 1253 }
duke@0 1254 ConcurrentMarkSweepThread::stop_icms();
duke@0 1255 return space->end();
duke@0 1256 }
duke@0 1257
duke@0 1258 if (CMSTraceIncrementalMode) {
duke@0 1259 space->print_on(gclog_or_tty);
duke@0 1260 gclog_or_tty->stamp();
duke@0 1261 gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT
duke@0 1262 ", new limit=" PTR_FORMAT,
duke@0 1263 top, NULL);
duke@0 1264 }
duke@0 1265 }
duke@0 1266
duke@0 1267 return NULL;
duke@0 1268 }
duke@0 1269
coleenp@113 1270 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
duke@0 1271 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
duke@0 1272 // allocate, copy and if necessary update promoinfo --
duke@0 1273 // delegate to underlying space.
duke@0 1274 assert_lock_strong(freelistLock());
duke@0 1275
duke@0 1276 #ifndef PRODUCT
duke@0 1277 if (Universe::heap()->promotion_should_fail()) {
duke@0 1278 return NULL;
duke@0 1279 }
duke@0 1280 #endif // #ifndef PRODUCT
duke@0 1281
coleenp@113 1282 oop res = _cmsSpace->promote(obj, obj_size);
duke@0 1283 if (res == NULL) {
duke@0 1284 // expand and retry
duke@0 1285 size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
duke@0 1286 expand(s*HeapWordSize, MinHeapDeltaBytes,
duke@0 1287 CMSExpansionCause::_satisfy_promotion);
duke@0 1288 // Since there's currently no next generation, we don't try to promote
duke@0 1289 // into a more senior generation.
duke@0 1290 assert(next_gen() == NULL, "assumption, based upon which no attempt "
duke@0 1291 "is made to pass on a possibly failing "
duke@0 1292 "promotion to next generation");
coleenp@113 1293 res = _cmsSpace->promote(obj, obj_size);
duke@0 1294 }
duke@0 1295 if (res != NULL) {
duke@0 1296 // See comment in allocate() about when objects should
duke@0 1297 // be allocated live.
duke@0 1298 assert(obj->is_oop(), "Will dereference klass pointer below");
duke@0 1299 collector()->promoted(false, // Not parallel
duke@0 1300 (HeapWord*)res, obj->is_objArray(), obj_size);
duke@0 1301 // promotion counters
duke@0 1302 NOT_PRODUCT(
duke@0 1303 _numObjectsPromoted++;
duke@0 1304 _numWordsPromoted +=
duke@0 1305 (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
duke@0 1306 )
duke@0 1307 }
duke@0 1308 return res;
duke@0 1309 }
duke@0 1310
duke@0 1311
duke@0 1312 HeapWord*
duke@0 1313 ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
duke@0 1314 HeapWord* top,
duke@0 1315 size_t word_sz)
duke@0 1316 {
duke@0 1317 return collector()->allocation_limit_reached(space, top, word_sz);
duke@0 1318 }
duke@0 1319
ysr@1636 1320 // IMPORTANT: Notes on object size recognition in CMS.
ysr@1636 1321 // ---------------------------------------------------
ysr@1636 1322 // A block of storage in the CMS generation is always in
ysr@1636 1323 // one of three states. A free block (FREE), an allocated
ysr@1636 1324 // object (OBJECT) whose size() method reports the correct size,
ysr@1636 1325 // and an intermediate state (TRANSIENT) in which its size cannot
ysr@1636 1326 // be accurately determined.
ysr@1636 1327 // STATE IDENTIFICATION: (32 bit and 64 bit w/o COOPS)
ysr@1636 1328 // -----------------------------------------------------
ysr@1636 1329 // FREE: klass_word & 1 == 1; mark_word holds block size
ysr@1636 1330 //
ysr@1697 1331 // OBJECT: klass_word installed; klass_word != 0 && klass_word & 1 == 0;
ysr@1636 1332 // obj->size() computes correct size
ysr@1636 1333 //
ysr@1636 1334 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
ysr@1636 1335 //
ysr@1636 1336 // STATE IDENTIFICATION: (64 bit+COOPS)
ysr@1636 1337 // ------------------------------------
ysr@1636 1338 // FREE: mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
ysr@1636 1339 //
ysr@1636 1340 // OBJECT: klass_word installed; klass_word != 0;
ysr@1636 1341 // obj->size() computes correct size
ysr@1636 1342 //
ysr@1636 1343 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
ysr@1636 1344 //
ysr@1636 1345 //
ysr@1636 1346 // STATE TRANSITION DIAGRAM
ysr@1636 1347 //
ysr@1636 1348 // mut / parnew mut / parnew
ysr@1636 1349 // FREE --------------------> TRANSIENT ---------------------> OBJECT --|
ysr@1636 1350 // ^ |
ysr@1636 1351 // |------------------------ DEAD <------------------------------------|
ysr@1636 1352 // sweep mut
ysr@1636 1353 //
ysr@1636 1354 // While a block is in TRANSIENT state its size cannot be determined
ysr@1636 1355 // so readers will either need to come back later or stall until
ysr@1636 1356 // the size can be determined. Note that for the case of direct
ysr@1636 1357 // allocation, P-bits, when available, may be used to determine the
ysr@1636 1358 // size of an object that may not yet have been initialized.
ysr@1636 1359
duke@0 1360 // Things to support parallel young-gen collection.
duke@0 1361 oop
duke@0 1362 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
duke@0 1363 oop old, markOop m,
duke@0 1364 size_t word_sz) {
duke@0 1365 #ifndef PRODUCT
duke@0 1366 if (Universe::heap()->promotion_should_fail()) {
duke@0 1367 return NULL;
duke@0 1368 }
duke@0 1369 #endif // #ifndef PRODUCT
duke@0 1370
duke@0 1371 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
duke@0 1372 PromotionInfo* promoInfo = &ps->promo;
duke@0 1373 // if we are tracking promotions, then first ensure space for
duke@0 1374 // promotion (including spooling space for saving header if necessary).
duke@0 1375 // then allocate and copy, then track promoted info if needed.
duke@0 1376 // When tracking (see PromotionInfo::track()), the mark word may
duke@0 1377 // be displaced and in this case restoration of the mark word
duke@0 1378 // occurs in the (oop_since_save_marks_)iterate phase.
duke@0 1379 if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
duke@0 1380 // Out of space for allocating spooling buffers;
duke@0 1381 // try expanding and allocating spooling buffers.
duke@0 1382 if (!expand_and_ensure_spooling_space(promoInfo)) {
duke@0 1383 return NULL;
duke@0 1384 }
duke@0 1385 }
duke@0 1386 assert(promoInfo->has_spooling_space(), "Control point invariant");
ysr@1636 1387 const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
ysr@1636 1388 HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
duke@0 1389 if (obj_ptr == NULL) {
ysr@1636 1390 obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
duke@0 1391 if (obj_ptr == NULL) {
duke@0 1392 return NULL;
duke@0 1393 }
duke@0 1394 }
duke@0 1395 oop obj = oop(obj_ptr);
ysr@1636 1396 OrderAccess::storestore();
coleenp@187 1397 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
jmasa@3297 1398 assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
ysr@1636 1399 // IMPORTANT: See note on object initialization for CMS above.
duke@0 1400 // Otherwise, copy the object. Here we must be careful to insert the
duke@0 1401 // klass pointer last, since this marks the block as an allocated object.
coleenp@187 1402 // Except with compressed oops it's the mark word.
duke@0 1403 HeapWord* old_ptr = (HeapWord*)old;
ysr@1636 1404 // Restore the mark word copied above.
ysr@1636 1405 obj->set_mark(m);
ysr@1636 1406 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
jmasa@3297 1407 assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
ysr@1636 1408 OrderAccess::storestore();
ysr@1636 1409
ehelin@5259 1410 if (UseCompressedClassPointers) {
ysr@1636 1411 // Copy gap missed by (aligned) header size calculation below
ysr@1636 1412 obj->set_klass_gap(old->klass_gap());
ysr@1636 1413 }
duke@0 1414 if (word_sz > (size_t)oopDesc::header_size()) {
duke@0 1415 Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
duke@0 1416 obj_ptr + oopDesc::header_size(),
duke@0 1417 word_sz - oopDesc::header_size());
duke@0 1418 }
coleenp@187 1419
duke@0 1420 // Now we can track the promoted object, if necessary. We take care
ysr@1441 1421 // to delay the transition from uninitialized to full object
duke@0 1422 // (i.e., insertion of klass pointer) until after, so that it
duke@0 1423 // atomically becomes a promoted object.
duke@0 1424 if (promoInfo->tracking()) {
duke@0 1425 promoInfo->track((PromotedObject*)obj, old->klass());
duke@0 1426 }
ysr@1636 1427 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
jmasa@3297 1428 assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
ysr@1636 1429 assert(old->is_oop(), "Will use and dereference old klass ptr below");
coleenp@187 1430
coleenp@187 1431 // Finally, install the klass pointer (this should be volatile).
ysr@1636 1432 OrderAccess::storestore();
duke@0 1433 obj->set_klass(old->klass());
ysr@1636 1434 // We should now be able to calculate the right size for this object
ysr@1636 1435 assert(obj->is_oop() && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
ysr@1636 1436
duke@0 1437 collector()->promoted(true, // parallel
duke@0 1438 obj_ptr, old->is_objArray(), word_sz);
duke@0 1439
duke@0 1440 NOT_PRODUCT(
ysr@1636 1441 Atomic::inc_ptr(&_numObjectsPromoted);
ysr@1636 1442 Atomic::add_ptr(alloc_sz, &_numWordsPromoted);
duke@0 1443 )
duke@0 1444
duke@0 1445 return obj;
duke@0 1446 }
duke@0 1447
duke@0 1448 void
duke@0 1449 ConcurrentMarkSweepGeneration::
duke@0 1450 par_promote_alloc_undo(int thread_num,
duke@0 1451 HeapWord* obj, size_t word_sz) {
duke@0 1452 // CMS does not support promotion undo.
duke@0 1453 ShouldNotReachHere();
duke@0 1454 }
duke@0 1455
duke@0 1456 void
duke@0 1457 ConcurrentMarkSweepGeneration::
duke@0 1458 par_promote_alloc_done(int thread_num) {
duke@0 1459 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
ysr@1145 1460 ps->lab.retire(thread_num);
duke@0 1461 }
duke@0 1462
duke@0 1463 void
duke@0 1464 ConcurrentMarkSweepGeneration::
duke@0 1465 par_oop_since_save_marks_iterate_done(int thread_num) {
duke@0 1466 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
duke@0 1467 ParScanWithoutBarrierClosure* dummy_cl = NULL;
duke@0 1468 ps->promo.promoted_oops_iterate_nv(dummy_cl);
duke@0 1469 }
duke@0 1470
duke@0 1471 bool ConcurrentMarkSweepGeneration::should_collect(bool full,
duke@0 1472 size_t size,
duke@0 1473 bool tlab)
duke@0 1474 {
duke@0 1475 // We allow a STW collection only if a full
duke@0 1476 // collection was requested.
duke@0 1477 return full || should_allocate(size, tlab); // FIX ME !!!
duke@0 1478 // This and promotion failure handling are connected at the
duke@0 1479 // hip and should be fixed by untying them.
duke@0 1480 }
duke@0 1481
duke@0 1482 bool CMSCollector::shouldConcurrentCollect() {
duke@0 1483 if (_full_gc_requested) {
duke@0 1484 if (Verbose && PrintGCDetails) {
duke@0 1485 gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
ysr@1440 1486 " gc request (or gc_locker)");
duke@0 1487 }
duke@0 1488 return true;
duke@0 1489 }
duke@0 1490
duke@0 1491 // For debugging purposes, change the type of collection.
duke@0 1492 // If the rotation is not on the concurrent collection
duke@0 1493 // type, don't start a concurrent collection.
duke@0 1494 NOT_PRODUCT(
duke@0 1495 if (RotateCMSCollectionTypes &&
duke@0 1496 (_cmsGen->debug_collection_type() !=
duke@0 1497 ConcurrentMarkSweepGeneration::Concurrent_collection_type)) {
duke@0 1498 assert(_cmsGen->debug_collection_type() !=
duke@0 1499 ConcurrentMarkSweepGeneration::Unknown_collection_type,
duke@0 1500 "Bad cms collection type");
duke@0 1501 return false;
duke@0 1502 }
duke@0 1503 )
duke@0 1504
duke@0 1505 FreelistLocker x(this);
duke@0 1506 // ------------------------------------------------------------------
duke@0 1507 // Print out lots of information which affects the initiation of
duke@0 1508 // a collection.
duke@0 1509 if (PrintCMSInitiationStatistics && stats().valid()) {
duke@0 1510 gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
duke@0 1511 gclog_or_tty->stamp();
drchase@6245 1512 gclog_or_tty->cr();
duke@0 1513 stats().print_on(gclog_or_tty);
duke@0 1514 gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
duke@0 1515 stats().time_until_cms_gen_full());
duke@0 1516 gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
duke@0 1517 gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
duke@0 1518 _cmsGen->contiguous_available());
duke@0 1519 gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
duke@0 1520 gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
duke@0 1521 gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
ysr@94 1522 gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
brutisso@6528 1523 gclog_or_tty->print_cr("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
brutisso@6528 1524 gclog_or_tty->print_cr("cms_time_since_end=%3.7f", stats().cms_time_since_end());
coleenp@3602 1525 gclog_or_tty->print_cr("metadata initialized %d",
coleenp@3602 1526 MetaspaceGC::should_concurrent_collect());
duke@0 1527 }
duke@0 1528 // ------------------------------------------------------------------
duke@0 1529
duke@0 1530 // If the estimated time to complete a cms collection (cms_duration())
duke@0 1531 // is less than the estimated time remaining until the cms generation
duke@0 1532 // is full, start a collection.
duke@0 1533 if (!UseCMSInitiatingOccupancyOnly) {
duke@0 1534 if (stats().valid()) {
duke@0 1535 if (stats().time_until_cms_start() == 0.0) {
duke@0 1536 return true;
duke@0 1537 }
duke@0 1538 } else {
duke@0 1539 // We want to conservatively collect somewhat early in order
duke@0 1540 // to try and "bootstrap" our CMS/promotion statistics;
duke@0 1541 // this branch will not fire after the first successful CMS
duke@0 1542 // collection because the stats should then be valid.
duke@0 1543 if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
duke@0 1544 if (Verbose && PrintGCDetails) {
duke@0 1545 gclog_or_tty->print_cr(
duke@0 1546 " CMSCollector: collect for bootstrapping statistics:"
duke@0 1547 " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
duke@0 1548 _bootstrap_occupancy);
duke@0 1549 }
duke@0 1550 return true;
duke@0 1551 }
duke@0 1552 }
duke@0 1553 }
duke@0 1554
coleenp@3602 1555 // Otherwise, we start a collection cycle if
duke@0 1556 // old gen want a collection cycle started. Each may use
duke@0 1557 // an appropriate criterion for making this decision.
duke@0 1558 // XXX We need to make sure that the gen expansion
ysr@94 1559 // criterion dovetails well with this. XXX NEED TO FIX THIS
ysr@94 1560 if (_cmsGen->should_concurrent_collect()) {
duke@0 1561 if (Verbose && PrintGCDetails) {
duke@0 1562 gclog_or_tty->print_cr("CMS old gen initiated");
duke@0 1563 }
duke@0 1564 return true;
duke@0 1565 }
duke@0 1566
ysr@94 1567 // We start a collection if we believe an incremental collection may fail;
ysr@94 1568 // this is not likely to be productive in practice because it's probably too
ysr@94 1569 // late anyway.
duke@0 1570 GenCollectedHeap* gch = GenCollectedHeap::heap();
duke@0 1571 assert(gch->collector_policy()->is_two_generation_policy(),
duke@0 1572 "You may want to check the correctness of the following");
ysr@1901 1573 if (gch->incremental_collection_will_fail(true /* consult_young */)) {
ysr@1901 1574 if (Verbose && PrintGCDetails) {
ysr@94 1575 gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
ysr@94 1576 }
ysr@94 1577 return true;
ysr@94 1578 }
ysr@94 1579
coleenp@3602 1580 if (MetaspaceGC::should_concurrent_collect()) {
stefank@6465 1581 if (Verbose && PrintGCDetails) {
brutisso@6284 1582 gclog_or_tty->print("CMSCollector: collect for metadata allocation ");
stefank@6465 1583 }
stefank@6465 1584 return true;
stefank@6465 1585 }
brutisso@6284 1586
brutisso@6528 1587 // CMSTriggerInterval starts a CMS cycle if enough time has passed.
brutisso@6528 1588 if (CMSTriggerInterval >= 0) {
brutisso@6528 1589 if (CMSTriggerInterval == 0) {
brutisso@6528 1590 // Trigger always
brutisso@6528 1591 return true;
brutisso@6528 1592 }
brutisso@6528 1593
brutisso@6528 1594 // Check the CMS time since begin (we do not check the stats validity
brutisso@6528 1595 // as we want to be able to trigger the first CMS cycle as well)
brutisso@6528 1596 if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) {
ysr@94 1597 if (Verbose && PrintGCDetails) {
brutisso@6528 1598 if (stats().valid()) {
brutisso@6528 1599 gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)",
brutisso@6528 1600 stats().cms_time_since_begin());
brutisso@6528 1601 } else {
brutisso@6528 1602 gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (first collection)");
brutisso@6528 1603 }
ysr@94 1604 }
ysr@94 1605 return true;
ysr@94 1606 }
brutisso@6528 1607 }
coleenp@3602 1608
ysr@94 1609 return false;
ysr@94 1610 }
ysr@94 1611
jmasa@4641 1612 void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
jmasa@4641 1613
ysr@94 1614 // Clear _expansion_cause fields of constituent generations
ysr@94 1615 void CMSCollector::clear_expansion_cause() {
ysr@94 1616 _cmsGen->clear_expansion_cause();
ysr@94 1617 }
ysr@94 1618
ysr@94 1619 // We should be conservative in starting a collection cycle. To
ysr@94 1620 // start too eagerly runs the risk of collecting too often in the
ysr@94 1621 // extreme. To collect too rarely falls back on full collections,
ysr@94 1622 // which works, even if not optimum in terms of concurrent work.
ysr@94 1623 // As a work around for too eagerly collecting, use the flag
ysr@94 1624 // UseCMSInitiatingOccupancyOnly. This also has the advantage of
ysr@94 1625 // giving the user an easily understandable way of controlling the
ysr@94 1626 // collections.
ysr@94 1627 // We want to start a new collection cycle if any of the following
ysr@94 1628 // conditions hold:
ysr@94 1629 // . our current occupancy exceeds the configured initiating occupancy
ysr@94 1630 // for this generation, or
ysr@94 1631 // . we recently needed to expand this space and have not, since that
ysr@94 1632 // expansion, done a collection of this generation, or
ysr@94 1633 // . the underlying space believes that it may be a good idea to initiate
ysr@94 1634 // a concurrent collection (this may be based on criteria such as the
ysr@94 1635 // following: the space uses linear allocation and linear allocation is
ysr@94 1636 // going to fail, or there is believed to be excessive fragmentation in
ysr@94 1637 // the generation, etc... or ...
ysr@94 1638 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
coleenp@3602 1639 // the case of the old generation; see CR 6543076):
ysr@94 1640 // we may be approaching a point at which allocation requests may fail because
ysr@94 1641 // we will be out of sufficient free space given allocation rate estimates.]
ysr@94 1642 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
ysr@94 1643
ysr@94 1644 assert_lock_strong(freelistLock());
ysr@94 1645 if (occupancy() > initiating_occupancy()) {
ysr@94 1646 if (PrintGCDetails && Verbose) {
ysr@94 1647 gclog_or_tty->print(" %s: collect because of occupancy %f / %f ",
ysr@94 1648 short_name(), occupancy(), initiating_occupancy());
ysr@94 1649 }
ysr@94 1650 return true;
ysr@94 1651 }
ysr@94 1652 if (UseCMSInitiatingOccupancyOnly) {
ysr@94 1653 return false;
ysr@94 1654 }
ysr@94 1655 if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
ysr@94 1656 if (PrintGCDetails && Verbose) {
ysr@94 1657 gclog_or_tty->print(" %s: collect because expanded for allocation ",
duke@0 1658 short_name());
duke@0 1659 }
duke@0 1660 return true;
duke@0 1661 }
ysr@94 1662 if (_cmsSpace->should_concurrent_collect()) {
duke@0 1663 if (PrintGCDetails && Verbose) {
ysr@94 1664 gclog_or_tty->print(" %s: collect because cmsSpace says so ",
duke@0 1665 short_name());
duke@0 1666 }
duke@0 1667 return true;
duke@0 1668 }
duke@0 1669 return false;
duke@0 1670 }
duke@0 1671
duke@0 1672 void ConcurrentMarkSweepGeneration::collect(bool full,
duke@0 1673 bool clear_all_soft_refs,
duke@0 1674 size_t size,
duke@0 1675 bool tlab)
duke@0 1676 {
duke@0 1677 collector()->collect(full, clear_all_soft_refs, size, tlab);
duke@0 1678 }
duke@0 1679
duke@0 1680 void CMSCollector::collect(bool full,
duke@0 1681 bool clear_all_soft_refs,
duke@0 1682 size_t size,
duke@0 1683 bool tlab)
duke@0 1684 {
duke@0 1685 if (!UseCMSCollectionPassing && _collectorState > Idling) {
duke@0 1686 // For debugging purposes skip the collection if the state
duke@0 1687 // is not currently idle
duke@0 1688 if (TraceCMSState) {
duke@0 1689 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d",
duke@0 1690 Thread::current(), full, _collectorState);
duke@0 1691 }
duke@0 1692 return;
duke@0 1693 }
duke@0 1694
duke@0 1695 // The following "if" branch is present for defensive reasons.
duke@0 1696 // In the current uses of this interface, it can be replaced with:
duke@0 1697 // assert(!GC_locker.is_active(), "Can't be called otherwise");
duke@0 1698 // But I am not placing that assert here to allow future
duke@0 1699 // generality in invoking this interface.
duke@0 1700 if (GC_locker::is_active()) {
duke@0 1701 // A consistency test for GC_locker
duke@0 1702 assert(GC_locker::needs_gc(), "Should have been set already");
duke@0 1703 // Skip this foreground collection, instead
duke@0 1704 // expanding the heap if necessary.
duke@0 1705 // Need the free list locks for the call to free() in compute_new_size()
duke@0 1706 compute_new_size();
duke@0 1707 return;
duke@0 1708 }
duke@0 1709 acquire_control_and_collect(full, clear_all_soft_refs);
duke@0 1710 _full_gcs_since_conc_gc++;
duke@0 1711 }
duke@0 1712
sla@4802 1713 void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
duke@0 1714 GenCollectedHeap* gch = GenCollectedHeap::heap();
duke@0 1715 unsigned int gc_count = gch->total_full_collections();
duke@0 1716 if (gc_count == full_gc_count) {
duke@0 1717 MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
duke@0 1718 _full_gc_requested = true;
sla@4802 1719 _full_gc_cause = cause;
duke@0 1720 CGC_lock->notify(); // nudge CMS thread
ysr@2212 1721 } else {
ysr@2212 1722 assert(gc_count > full_gc_count, "Error: causal loop");
duke@0 1723 }
duke@0 1724 }
duke@0 1725
sla@4802 1726 bool CMSCollector::is_external_interruption() {
sla@4802 1727 GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
sla@4802 1728 return GCCause::is_user_requested_gc(cause) ||
sla@4802 1729 GCCause::is_serviceability_requested_gc(cause);
sla@4802 1730 }
sla@4802 1731
sla@4802 1732 void CMSCollector::report_concurrent_mode_interruption() {
sla@4802 1733 if (is_external_interruption()) {
sla@4802 1734 if (PrintGCDetails) {
sla@4802 1735 gclog_or_tty->print(" (concurrent mode interrupted)");
sla@4802 1736 }
sla@4802 1737 } else {
sla@4802 1738 if (PrintGCDetails) {
sla@4802 1739 gclog_or_tty->print(" (concurrent mode failure)");
sla@4802 1740 }
sla@4802 1741 _gc_tracer_cm->report_concurrent_mode_failure();
sla@4802 1742 }
sla@4802 1743 }
sla@4802 1744
duke@0 1745
duke@0 1746 // The foreground and background collectors need to coordinate in order
duke@0 1747 // to make sure that they do not mutually interfere with CMS collections.
duke@0 1748 // When a background collection is active,
duke@0 1749 // the foreground collector may need to take over (preempt) and
duke@0 1750 // synchronously complete an ongoing collection. Depending on the
duke@0 1751 // frequency of the background collections and the heap usage
duke@0 1752 // of the application, this preemption can be seldom or frequent.
duke@0 1753 // There are only certain
duke@0 1754 // points in the background collection that the "collection-baton"
duke@0 1755 // can be passed to the foreground collector.
duke@0 1756 //
duke@0 1757 // The foreground collector will wait for the baton before
duke@0 1758 // starting any part of the collection. The foreground collector
duke@0 1759 // will only wait at one location.
duke@0 1760 //
duke@0 1761 // The background collector will yield the baton before starting a new
duke@0 1762 // phase of the collection (e.g., before initial marking, marking from roots,
duke@0 1763 // precleaning, final re-mark, sweep etc.) This is normally done at the head
duke@0 1764 // of the loop which switches the phases. The background collector does some
duke@0 1765 // of the phases (initial mark, final re-mark) with the world stopped.
duke@0 1766 // Because of locking involved in stopping the world,
duke@0 1767 // the foreground collector should not block waiting for the background
duke@0 1768 // collector when it is doing a stop-the-world phase. The background
duke@0 1769 // collector will yield the baton at an additional point just before
duke@0 1770 // it enters a stop-the-world phase. Once the world is stopped, the
duke@0 1771 // background collector checks the phase of the collection. If the
duke@0 1772 // phase has not changed, it proceeds with the collection. If the
duke@0 1773 // phase has changed, it skips that phase of the collection. See
duke@0 1774 // the comments on the use of the Heap_lock in collect_in_background().
duke@0 1775 //
duke@0 1776 // Variable used in baton passing.
duke@0 1777 // _foregroundGCIsActive - Set to true by the foreground collector when
duke@0 1778 // it wants the baton. The foreground clears it when it has finished
duke@0 1779 // the collection.
duke@0 1780 // _foregroundGCShouldWait - Set to true by the background collector
duke@0 1781 // when it is running. The foreground collector waits while
duke@0 1782 // _foregroundGCShouldWait is true.
duke@0 1783 // CGC_lock - monitor used to protect access to the above variables
duke@0 1784 // and to notify the foreground and background collectors.
duke@0 1785 // _collectorState - current state of the CMS collection.
duke@0 1786 //
duke@0 1787 // The foreground collector
duke@0 1788 // acquires the CGC_lock
duke@0 1789 // sets _foregroundGCIsActive
duke@0 1790 // waits on the CGC_lock for _foregroundGCShouldWait to be false
duke@0 1791 // various locks acquired in preparation for the collection
duke@0 1792 // are released so as not to block the background collector
duke@0 1793 // that is in the midst of a collection
duke@0 1794 // proceeds with the collection
duke@0 1795 // clears _foregroundGCIsActive
duke@0 1796 // returns
duke@0 1797 //
duke@0 1798 // The background collector in a loop iterating on the phases of the
duke@0 1799 // collection
duke@0 1800 // acquires the CGC_lock
duke@0 1801 // sets _foregroundGCShouldWait
duke@0 1802 // if _foregroundGCIsActive is set
duke@0 1803 // clears _foregroundGCShouldWait, notifies _CGC_lock
duke@0 1804 // waits on _CGC_lock for _foregroundGCIsActive to become false
duke@0 1805 // and exits the loop.
duke@0 1806 // otherwise
duke@0 1807 // proceed with that phase of the collection
duke@0 1808 // if the phase is a stop-the-world phase,
duke@0 1809 // yield the baton once more just before enqueueing
duke@0 1810 // the stop-world CMS operation (executed by the VM thread).
duke@0 1811 // returns after all phases of the collection are done
duke@0 1812 //
duke@0 1813
duke@0 1814 void CMSCollector::acquire_control_and_collect(bool full,
duke@0 1815 bool clear_all_soft_refs) {
duke@0 1816 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
duke@0 1817 assert(!Thread::current()->is_ConcurrentGC_thread(),
duke@0 1818 "shouldn't try to acquire control from self!");
duke@0 1819
duke@0 1820 // Start the protocol for acquiring control of the
duke@0 1821 // collection from the background collector (aka CMS thread).
duke@0 1822 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
duke@0 1823 "VM thread should have CMS token");
duke@0 1824 // Remember the possibly interrupted state of an ongoing
duke@0 1825 // concurrent collection
duke@0 1826 CollectorState first_state = _collectorState;
duke@0 1827
duke@0 1828 // Signal to a possibly ongoing concurrent collection that
duke@0 1829 // we want to do a foreground collection.
duke@0 1830 _foregroundGCIsActive = true;
duke@0 1831
duke@0 1832 // Disable incremental mode during a foreground collection.
duke@0 1833 ICMSDisabler icms_disabler;
duke@0 1834
duke@0 1835 // release locks and wait for a notify from the background collector
duke@0 1836 // releasing the locks in only necessary for phases which
duke@0 1837 // do yields to improve the granularity of the collection.
duke@0 1838 assert_lock_strong(bitMapLock());
duke@0 1839 // We need to lock the Free list lock for the space that we are
duke@0 1840 // currently collecting.
duke@0 1841 assert(haveFreelistLocks(), "Must be holding free list locks");
duke@0 1842 bitMapLock()->unlock();
duke@0 1843 releaseFreelistLocks();
duke@0 1844 {
duke@0 1845 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
duke@0 1846 if (_foregroundGCShouldWait) {
duke@0 1847 // We are going to be waiting for action for the CMS thread;
duke@0 1848 // it had better not be gone (for instance at shutdown)!
duke@0 1849 assert(ConcurrentMarkSweepThread::cmst() != NULL,
duke@0 1850 "CMS thread must be running");
duke@0 1851 // Wait here until the background collector gives us the go-ahead
duke@0 1852 ConcurrentMarkSweepThread::clear_CMS_flag(
duke@0 1853 ConcurrentMarkSweepThread::CMS_vm_has_token); // release token
duke@0 1854 // Get a possibly blocked CMS thread going:
duke@0 1855 // Note that we set _foregroundGCIsActive true above,
duke@0 1856 // without protection of the CGC_lock.
duke@0 1857 CGC_lock->notify();
duke@0 1858 assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
duke@0 1859 "Possible deadlock");
duke@0 1860 while (_foregroundGCShouldWait) {
duke@0 1861 // wait for notification
duke@0 1862 CGC_lock->wait(Mutex::_no_safepoint_check_flag);
duke@0 1863 // Possibility of delay/starvation here, since CMS token does
duke@0 1864 // not know to give priority to VM thread? Actually, i think
duke@0 1865 // there wouldn't be any delay/starvation, but the proof of
duke@0 1866 // that "fact" (?) appears non-trivial. XXX 20011219YSR
duke@0 1867 }
duke@0 1868 ConcurrentMarkSweepThread::set_CMS_flag(
duke@0 1869 ConcurrentMarkSweepThread::CMS_vm_has_token);
duke@0 1870 }
duke@0 1871 }
duke@0 1872 // The CMS_token is already held. Get back the other locks.
duke@0 1873 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
duke@0 1874 "VM thread should have CMS token");
duke@0 1875 getFreelistLocks();
duke@0 1876 bitMapLock()->lock_without_safepoint_check();
duke@0 1877 if (TraceCMSState) {
duke@0 1878 gclog_or_tty->print_cr("CMS foreground collector has asked for control "
duke@0 1879 INTPTR_FORMAT " with first state %d", Thread::current(), first_state);
duke@0 1880 gclog_or_tty->print_cr(" gets control with state %d", _collectorState);
duke@0 1881 }
duke@0 1882
duke@0 1883 // Check if we need to do a compaction, or if not, whether
duke@0 1884 // we need to start the mark-sweep from scratch.
duke@0 1885 bool should_compact = false;
duke@0 1886 bool should_start_over = false;
duke@0 1887 decide_foreground_collection_type(clear_all_soft_refs,
duke@0 1888 &should_compact, &should_start_over);
duke@0 1889
duke@0 1890 NOT_PRODUCT(
duke@0 1891 if (RotateCMSCollectionTypes) {
duke@0 1892 if (_cmsGen->debug_collection_type() ==
duke@0 1893 ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) {
duke@0 1894 should_compact = true;
duke@0 1895 } else if (_cmsGen->debug_collection_type() ==
duke@0 1896 ConcurrentMarkSweepGeneration::MS_foreground_collection_type) {
duke@0 1897 should_compact = false;
duke@0 1898 }
duke@0 1899 }
duke@0 1900 )
duke@0 1901
sla@4802 1902 if (first_state > Idling) {
sla@4802 1903 report_concurrent_mode_interruption();
duke@0 1904 }
duke@0 1905
jmasa@4641 1906 set_did_compact(should_compact);
duke@0 1907 if (should_compact) {
duke@0 1908 // If the collection is being acquired from the background
duke@0 1909 // collector, there may be references on the discovered
duke@0 1910 // references lists that have NULL referents (being those
duke@0 1911 // that were concurrently cleared by a mutator) or
duke@0 1912 // that are no longer active (having been enqueued concurrently
duke@0 1913 // by the mutator).
duke@0 1914 // Scrub the list of those references because Mark-Sweep-Compact
duke@0 1915 // code assumes referents are not NULL and that all discovered
duke@0 1916 // Reference objects are active.
duke@0 1917 ref_processor()->clean_up_discovered_references();
duke@0 1918
sla@4802 1919 if (first_state > Idling) {
sla@4802 1920 save_heap_summary();
sla@4802 1921 }
sla@4802 1922
duke@0 1923 do_compaction_work(clear_all_soft_refs);
duke@0 1924
duke@0 1925 // Has the GC time limit been exceeded?
jmasa@1387 1926 DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration();
jmasa@1387 1927 size_t max_eden_size = young_gen->max_capacity() -
jmasa@1387 1928 young_gen->to()->capacity() -
jmasa@1387 1929 young_gen->from()->capacity();
jmasa@1387 1930 GenCollectedHeap* gch = GenCollectedHeap::heap();
jmasa@1387 1931 GCCause::Cause gc_cause = gch->gc_cause();
jmasa@1387 1932 size_policy()->check_gc_overhead_limit(_young_gen->used(),
jmasa@1387 1933 young_gen->eden()->used(),
jmasa@1387 1934 _cmsGen->max_capacity(),
jmasa@1387 1935 max_eden_size,
jmasa@1387 1936 full,
jmasa@1387 1937 gc_cause,
jmasa@1387 1938 gch->collector_policy());
duke@0 1939 } else {
duke@0 1940 do_mark_sweep_work(clear_all_soft_refs, first_state,
duke@0 1941 should_start_over);
duke@0 1942 }
duke@0 1943 // Reset the expansion cause, now that we just completed
duke@0 1944 // a collection cycle.
duke@0 1945 clear_expansion_cause();
duke@0 1946 _foregroundGCIsActive = false;
duke@0 1947 return;
duke@0 1948 }
duke@0 1949
coleenp@3602 1950 // Resize the tenured generation
duke@0 1951 // after obtaining the free list locks for the
duke@0 1952 // two generations.
duke@0 1953 void CMSCollector::compute_new_size() {
duke@0 1954 assert_locked_or_safepoint(Heap_lock);
duke@0 1955 FreelistLocker z(this);
coleenp@3602 1956 MetaspaceGC::compute_new_size();
jmasa@4465 1957 _cmsGen->compute_new_size_free_list();
duke@0 1958 }
duke@0 1959
duke@0 1960 // A work method used by foreground collection to determine
duke@0 1961 // what type of collection (compacting or not, continuing or fresh)
duke@0 1962 // it should do.
duke@0 1963 // NOTE: the intent is to make UseCMSCompactAtFullCollection
duke@0 1964 // and CMSCompactWhenClearAllSoftRefs the default in the future
duke@0 1965 // and do away with the flags after a suitable period.
duke@0 1966 void CMSCollector::decide_foreground_collection_type(
duke@0 1967 bool clear_all_soft_refs, bool* should_compact,
duke@0 1968 bool* should_start_over) {
duke@0 1969 // Normally, we'll compact only if the UseCMSCompactAtFullCollection
duke@0 1970 // flag is set, and we have either requested a System.gc() or
duke@0 1971 // the number of full gc's since the last concurrent cycle
duke@0 1972 // has exceeded the threshold set by CMSFullGCsBeforeCompaction,
duke@0 1973 // or if an incremental collection has failed
duke@0 1974 GenCollectedHeap* gch = GenCollectedHeap::heap();
duke@0 1975 assert(gch->collector_policy()->is_two_generation_policy(),
duke@0 1976 "You may want to check the correctness of the following");
duke@0 1977 // Inform cms gen if this was due to partial collection failing.
duke@0 1978 // The CMS gen may use this fact to determine its expansion policy.
ysr@1901 1979 if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
duke@0 1980 assert(!_cmsGen->incremental_collection_failed(),
duke@0 1981 "Should have been noticed, reacted to and cleared");
duke@0 1982 _cmsGen->set_incremental_collection_failed();
duke@0 1983 }
duke@0 1984 *should_compact =
duke@0 1985 UseCMSCompactAtFullCollection &&
duke@0 1986 ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) ||
duke@0 1987 GCCause::is_user_requested_gc(gch->gc_cause()) ||
ysr@1901 1988 gch->incremental_collection_will_fail(true /* consult_young */));
duke@0 1989 *should_start_over = false;
duke@0 1990 if (clear_all_soft_refs && !*should_compact) {
duke@0 1991 // We are about to do a last ditch collection attempt
duke@0 1992 // so it would normally make sense to do a compaction
duke@0 1993 // to reclaim as much space as possible.
duke@0 1994 if (CMSCompactWhenClearAllSoftRefs) {
duke@0 1995 // Default: The rationale is that in this case either
duke@0 1996 // we are past the final marking phase, in which case
duke@0 1997 // we'd have to start over, or so little has been done
duke@0 1998 // that there's little point in saving that work. Compaction
duke@0 1999 // appears to be the sensible choice in either case.
duke@0 2000 *should_compact = true;
duke@0 2001 } else {
duke@0 2002 // We have been asked to clear all soft refs, but not to
duke@0 2003 // compact. Make sure that we aren't past the final checkpoint
duke@0 2004 // phase, for that is where we process soft refs. If we are already
duke@0 2005 // past that phase, we'll need to redo the refs discovery phase and
duke@0 2006 // if necessary clear soft refs that weren't previously
duke@0 2007 // cleared. We do so by remembering the phase in which
duke@0 2008 // we came in, and if we are past the refs processing
duke@0 2009 // phase, we'll choose to just redo the mark-sweep
duke@0 2010 // collection from scratch.
duke@0 2011 if (_collectorState > FinalMarking) {
duke@0 2012 // We are past the refs processing phase;
duke@0 2013 // start over and do a fresh synchronous CMS cycle
duke@0 2014 _collectorState = Resetting; // skip to reset to start new cycle
duke@0 2015 reset(false /* == !asynch */);
duke@0 2016 *should_start_over = true;
duke@0 2017 } // else we can continue a possibly ongoing current cycle
duke@0 2018 }
duke@0 2019 }
duke@0 2020 }
duke@0 2021
duke@0 2022 // A work method used by the foreground collector to do
duke@0 2023 // a mark-sweep-compact.
duke@0 2024 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
duke@0 2025 GenCollectedHeap* gch = GenCollectedHeap::heap();
sla@4802 2026
sla@4802 2027 STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
mgronlun@5696 2028 gc_timer->register_gc_start();
sla@4802 2029
sla@4802 2030 SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
sla@4802 2031 gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
sla@4802 2032
brutisso@6310 2033 GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL, gc_tracer->gc_id());
duke@0 2034 if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
duke@0 2035 gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
duke@0 2036 "collections passed to foreground collector", _full_gcs_since_conc_gc);
duke@0 2037 }
duke@0 2038
duke@0 2039 // Sample collection interval time and reset for collection pause.
duke@0 2040 if (UseAdaptiveSizePolicy) {
duke@0 2041 size_policy()->msc_collection_begin();
duke@0 2042 }
duke@0 2043
duke@0 2044 // Temporarily widen the span of the weak reference processing to
duke@0 2045 // the entire heap.
duke@0 2046 MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
ysr@2216 2047 ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
duke@0 2048 // Temporarily, clear the "is_alive_non_header" field of the
duke@0 2049 // reference processor.
ysr@2216 2050 ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
duke@0 2051 // Temporarily make reference _processing_ single threaded (non-MT).
ysr@2216 2052 ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
duke@0 2053 // Temporarily make refs discovery atomic
ysr@2216 2054 ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
ysr@2216 2055 // Temporarily make reference _discovery_ single threaded (non-MT)
ysr@2216 2056 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
duke@0 2057
duke@0 2058 ref_processor()->set_enqueuing_is_done(false);
johnc@2740 2059 ref_processor()->enable_discovery(false /*verify_disabled*/, false /*check_no_refs*/);
ysr@457 2060 ref_processor()->setup_policy(clear_all_soft_refs);
duke@0 2061 // If an asynchronous collection finishes, the _modUnionTable is
duke@0 2062 // all clear. If we are assuming the collection from an asynchronous
duke@0 2063 // collection, clear the _modUnionTable.
duke@0 2064 assert(_collectorState != Idling || _modUnionTable.isAllClear(),
duke@0 2065 "_modUnionTable should be clear if the baton was not passed");
duke@0 2066 _modUnionTable.clear_all();
coleenp@3602 2067 assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
coleenp@3602 2068 "mod union for klasses should be clear if the baton was passed");
coleenp@3602 2069 _ct->klass_rem_set()->clear_mod_union();
duke@0 2070
duke@0 2071 // We must adjust the allocation statistics being maintained
duke@0 2072 // in the free list space. We do so by reading and clearing
duke@0 2073 // the sweep timer and updating the block flux rate estimates below.
ysr@1145 2074 assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
ysr@1145 2075 if (_inter_sweep_timer.is_active()) {
ysr@1145 2076 _inter_sweep_timer.stop();
ysr@1145 2077 // Note that we do not use this sample to update the _inter_sweep_estimate.
ysr@1145 2078 _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
ysr@1145 2079 _inter_sweep_estimate.padded_average(),
ysr@1145 2080 _intra_sweep_estimate.padded_average());
ysr@1145 2081 }
duke@0 2082
duke@0 2083 GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
duke@0 2084 ref_processor(), clear_all_soft_refs);
duke@0 2085 #ifdef ASSERT
duke@0 2086 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
duke@0 2087 size_t free_size = cms_space->free();
duke@0 2088 assert(free_size ==
duke@0 2089 pointer_delta(cms_space->end(), cms_space->compaction_top())
duke@0 2090 * HeapWordSize,
duke@0 2091 "All the free space should be compacted into one chunk at top");
jmasa@3297 2092 assert(cms_space->dictionary()->total_chunk_size(
duke@0 2093 debug_only(cms_space->freelistLock())) == 0 ||
duke@0 2094 cms_space->totalSizeInIndexedFreeLists() == 0,
duke@0 2095 "All the free space should be in a single chunk");
duke@0 2096 size_t num = cms_space->totalCount();
duke@0 2097 assert((free_size == 0 && num == 0) ||
duke@0 2098 (free_size > 0 && (num == 1 || num == 2)),
duke@0 2099 "There should be at most 2 free chunks after compaction");
duke@0 2100 #endif // ASSERT
duke@0 2101 _collectorState = Resetting;
duke@0 2102 assert(_restart_addr == NULL,
duke@0 2103 "Should have been NULL'd before baton was passed");
duke@0 2104 reset(false /* == !asynch */);
duke@0 2105 _cmsGen->reset_after_compaction();
ysr@94 2106 _concurrent_cycles_since_last_unload = 0;
ysr@94 2107
duke@0 2108 // Clear any data recorded in the PLAB chunk arrays.
duke@0 2109 if (_survivor_plab_array != NULL) {
duke@0 2110 reset_survivor_plab_arrays();
duke@0 2111 }
duke@0 2112
duke@0 2113 // Adjust the per-size allocation stats for the next epoch.
ysr@1145 2114 _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
ysr@1145 2115 // Restart the "inter sweep timer" for the next epoch.
ysr@1145 2116 _inter_sweep_timer.reset();
ysr@1145 2117 _inter_sweep_timer.start();
duke@0 2118
duke@0 2119 // Sample collection pause time and reset for collection interval.
duke@0 2120 if (UseAdaptiveSizePolicy) {
duke@0 2121 size_policy()->msc_collection_end(gch->gc_cause());
duke@0 2122 }
duke@0 2123
mgronlun@5696 2124 gc_timer->register_gc_end();
sla@4802 2125
sla@4802 2126 gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
sla@4802 2127
duke@0 2128 // For a mark-sweep-compact, compute_new_size() will be called
duke@0 2129 // in the heap's do_collection() method.
duke@0 2130 }
duke@0 2131
duke@0 2132 // A work method used by the foreground collector to do
duke@0 2133 // a mark-sweep, after taking over from a possibly on-going
duke@0 2134 // concurrent mark-sweep collection.
duke@0 2135 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
duke@0 2136 CollectorState first_state, bool should_start_over) {
duke@0 2137 if (PrintGC && Verbose) {
duke@0 2138 gclog_or_tty->print_cr("Pass concurrent collection to foreground "
duke@0 2139 "collector with count %d",
duke@0 2140 _full_gcs_since_conc_gc);
duke@0 2141 }
duke@0 2142 switch (_collectorState) {
duke@0 2143 case Idling:
duke@0 2144 if (first_state == Idling || should_start_over) {
duke@0 2145 // The background GC was not active, or should
duke@0 2146 // restarted from scratch; start the cycle.
duke@0 2147 _collectorState = InitialMarking;
duke@0 2148 }
duke@0 2149 // If first_state was not Idling, then a background GC
duke@0 2150 // was in progress and has now finished. No need to do it
duke@0 2151 // again. Leave the state as Idling.
duke@0 2152 break;
duke@0 2153 case Precleaning:
duke@0 2154 // In the foreground case don't do the precleaning since
duke@0 2155 // it is not done concurrently and there is extra work
duke@0 2156 // required.
duke@0 2157 _collectorState = FinalMarking;
duke@0 2158 }
sla@4802 2159 collect_in_foreground(clear_all_soft_refs, GenCollectedHeap::heap()->gc_cause());
duke@0 2160
duke@0 2161 // For a mark-sweep, compute_new_size() will be called
duke@0 2162 // in the heap's do_collection() method.
duke@0 2163 }
duke@0 2164
duke@0 2165
jmasa@5024 2166 void CMSCollector::print_eden_and_survivor_chunk_arrays() {
jmasa@5024 2167 DefNewGeneration* dng = _young_gen->as_DefNewGeneration();
jmasa@5024 2168 EdenSpace* eden_space = dng->eden();
jmasa@5024 2169 ContiguousSpace* from_space = dng->from();
jmasa@5024 2170 ContiguousSpace* to_space = dng->to();
jmasa@5024 2171 // Eden
jmasa@5024 2172 if (_eden_chunk_array != NULL) {
jmasa@5024 2173 gclog_or_tty->print_cr("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
jmasa@5024 2174 eden_space->bottom(), eden_space->top(),
jmasa@5024 2175 eden_space->end(), eden_space->capacity());
jmasa@5024 2176 gclog_or_tty->print_cr("_eden_chunk_index=" SIZE_FORMAT ", "
jmasa@5024 2177 "_eden_chunk_capacity=" SIZE_FORMAT,
jmasa@5024 2178 _eden_chunk_index, _eden_chunk_capacity);
jmasa@5024 2179 for (size_t i = 0; i < _eden_chunk_index; i++) {
jmasa@5024 2180 gclog_or_tty->print_cr("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
jmasa@5024 2181 i, _eden_chunk_array[i]);
jmasa@5024 2182 }
jmasa@5024 2183 }
jmasa@5024 2184 // Survivor
jmasa@5024 2185 if (_survivor_chunk_array != NULL) {
jmasa@5024 2186 gclog_or_tty->print_cr("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
jmasa@5024 2187 from_space->bottom(), from_space->top(),
jmasa@5024 2188 from_space->end(), from_space->capacity());
jmasa@5024 2189 gclog_or_tty->print_cr("_survivor_chunk_index=" SIZE_FORMAT ", "
jmasa@5024 2190 "_survivor_chunk_capacity=" SIZE_FORMAT,
jmasa@5024 2191 _survivor_chunk_index, _survivor_chunk_capacity);
jmasa@5024 2192 for (size_t i = 0; i < _survivor_chunk_index; i++) {
jmasa@5024 2193 gclog_or_tty->print_cr("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
jmasa@5024 2194 i, _survivor_chunk_array[i]);
jmasa@5024 2195 }
jmasa@5024 2196 }
jmasa@5024 2197 }
jmasa@5024 2198
duke@0 2199 void CMSCollector::getFreelistLocks() const {
duke@0 2200 // Get locks for all free lists in all generations that this
duke@0 2201 // collector is responsible for
duke@0 2202 _cmsGen->freelistLock()->lock_without_safepoint_check();
duke@0 2203 }
duke@0 2204
duke@0 2205 void CMSCollector::releaseFreelistLocks() const {
duke@0 2206 // Release locks for all free lists in all generations that this
duke@0 2207 // collector is responsible for
duke@0 2208 _cmsGen->freelistLock()->unlock();
duke@0 2209 }
duke@0 2210
duke@0 2211 bool CMSCollector::haveFreelistLocks() const {
duke@0 2212 // Check locks for all free lists in all generations that this
duke@0 2213 // collector is responsible for
duke@0 2214 assert_lock_strong(_cmsGen->freelistLock());
duke@0 2215 PRODUCT_ONLY(ShouldNotReachHere());
duke@0 2216 return true;
duke@0 2217 }
duke@0 2218
duke@0 2219 // A utility class that is used by the CMS collector to
duke@0 2220 // temporarily "release" the foreground collector from its
duke@0 2221 // usual obligation to wait for the background collector to
duke@0 2222 // complete an ongoing phase before proceeding.
duke@0 2223 class ReleaseForegroundGC: public StackObj {
duke@0 2224 private:
duke@0 2225 CMSCollector* _c;
duke@0 2226 public:
duke@0 2227 ReleaseForegroundGC(CMSCollector* c) : _c(c) {
duke@0 2228 assert(_c->_foregroundGCShouldWait, "Else should not need to call");
duke@0 2229 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
duke@0 2230 // allow a potentially blocked foreground collector to proceed
duke@0 2231 _c->_foregroundGCShouldWait = false;
duke@0 2232 if (_c->_foregroundGCIsActive) {
duke@0 2233 CGC_lock->notify();
duke@0 2234 }
duke@0 2235 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
duke@0 2236 "Possible deadlock");
duke@0 2237 }
duke@0 2238
duke@0 2239 ~ReleaseForegroundGC() {
duke@0 2240 assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
duke@0 2241 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
duke@0 2242 _c->_foregroundGCShouldWait = true;
duke@0 2243 }
duke@0 2244 };
duke@0 2245
duke@0 2246 // There are separate collect_in_background and collect_in_foreground because of
duke@0 2247 // the different locking requirements of the background collector and the
duke@0 2248 // foreground collector. There was originally an attempt to share
duke@0 2249 // one "collect" method between the background collector and the foreground
duke@0 2250 // collector but the if-then-else required made it cleaner to have
duke@0 2251 // separate methods.
sla@4802 2252 void CMSCollector::collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause) {
duke@0 2253 assert(Thread::current()->is_ConcurrentGC_thread(),
duke@0 2254 "A CMS asynchronous collection is only allowed on a CMS thread.");
duke@0 2255
duke@0 2256 GenCollectedHeap* gch = GenCollectedHeap::heap();
duke@0 2257 {
duke@0 2258 bool safepoint_check = Mutex::_no_safepoint_check_flag;
duke@0 2259 MutexLockerEx hl(Heap_lock, safepoint_check);
ysr@94 2260 FreelistLocker fll(this);
duke@0 2261 MutexLockerEx x(CGC_lock, safepoint_check);
duke@0 2262 if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
duke@0 2263 // The foreground collector is active or we're
duke@0 2264 // not using asynchronous collections. Skip this
duke@0 2265 // background collection.
duke@0 2266 assert(!_foregroundGCShouldWait, "Should be clear");
duke@0 2267 return;
duke@0 2268 } else {
duke@0 2269 assert(_collectorState == Idling, "Should be idling before start.");
duke@0 2270 _collectorState = InitialMarking;
sla@4802 2271 register_gc_start(cause);
duke@0 2272 // Reset the expansion cause, now that we are about to begin
duke@0 2273 // a new cycle.
duke@0 2274 clear_expansion_cause();
coleenp@3602 2275
coleenp@3602 2276 // Clear the MetaspaceGC flag since a concurrent collection
coleenp@3602 2277 // is starting but also clear it after the collection.
coleenp@3602 2278 MetaspaceGC::set_should_concurrent_collect(false);
duke@0 2279 }
ysr@94 2280 // Decide if we want to enable class unloading as part of the
ysr@94 2281 // ensuing concurrent GC cycle.
ysr@94 2282 update_should_unload_classes();
duke@0 2283 _full_gc_requested = false; // acks all outstanding full gc requests
sla@4802 2284 _full_gc_cause = GCCause::_no_gc;
duke@0 2285 // Signal that we are about to start a collection
duke@0 2286 gch->increment_total_full_collections(); // ... starting a collection cycle
duke@0 2287 _collection_count_start = gch->total_full_collections();
duke@0 2288 }
duke@0 2289
duke@0 2290 // Used for PrintGC
duke@0 2291 size_t prev_used;
duke@0 2292 if (PrintGC && Verbose) {
duke@0 2293 prev_used = _cmsGen->used(); // XXXPERM
duke@0 2294 }
duke@0 2295
duke@0 2296 // The change of the collection state is normally done at this level;
duke@0 2297 // the exceptions are phases that are executed while the world is
duke@0 2298 // stopped. For those phases the change of state is done while the
duke@0 2299 // world is stopped. For baton passing purposes this allows the
duke@0 2300 // background collector to finish the phase and change state atomically.
duke@0 2301 // The foreground collector cannot wait on a phase that is done
duke@0 2302 // while the world is stopped because the foreground collector already
duke@0 2303 // has the world stopped and would deadlock.
duke@0 2304 while (_collectorState != Idling) {
duke@0 2305 if (TraceCMSState) {
duke@0 2306 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
duke@0 2307 Thread::current(), _collectorState);
duke@0 2308 }
duke@0 2309 // The foreground collector
duke@0 2310 // holds the Heap_lock throughout its collection.
duke@0 2311 // holds the CMS token (but not the lock)
duke@0 2312 // except while it is waiting for the background collector to yield.
duke@0 2313 //
duke@0 2314 // The foreground collector should be blocked (not for long)
duke@0 2315 // if the background collector is about to start a phase
duke@0 2316 // executed with world stopped. If the background
duke@0 2317 // collector has already started such a phase, the
duke@0 2318 // foreground collector is blocked waiting for the
duke@0 2319 // Heap_lock. The stop-world phases (InitialMarking and FinalMarking)
duke@0 2320 // are executed in the VM thread.
duke@0 2321 //
duke@0 2322 // The locking order is
duke@0 2323 // PendingListLock (PLL) -- if applicable (FinalMarking)
duke@0 2324 // Heap_lock (both this & PLL locked in VM_CMS_Operation::prologue())
duke@0 2325 // CMS token (claimed in
duke@0 2326 // stop_world_and_do() -->
duke@0 2327 // safepoint_synchronize() -->
duke@0 2328 // CMSThread::synchronize())
duke@0 2329
duke@0 2330 {
duke@0 2331 // Check if the FG collector wants us to yield.
duke@0 2332 CMSTokenSync x(true); // is cms thread
duke@0 2333 if (waitForForegroundGC()) {
duke@0 2334 // We yielded to a foreground GC, nothing more to be
duke@0 2335 // done this round.
duke@0 2336 assert(_foregroundGCShouldWait == false, "We set it to false in "
duke@0 2337 "waitForForegroundGC()");
duke@0 2338 if (TraceCMSState) {
duke@0 2339 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
duke@0 2340 " exiting collection CMS state %d",
duke@0 2341 Thread::current(), _collectorState);
duke@0 2342 }
duke@0 2343 return;
duke@0 2344 } else {
duke@0 2345 // The background collector can run but check to see if the
duke@0 2346 // foreground collector has done a collection while the
duke@0 2347 // background collector was waiting to get the CGC_lock
duke@0 2348 // above. If yes, break so that _foregroundGCShouldWait
duke@0 2349 // is cleared before returning.
duke@0 2350 if (_collectorState == Idling) {
duke@0 2351 break;
duke@0 2352 }
duke@0 2353 }
duke@0 2354 }
duke@0 2355
duke@0 2356 assert(_foregroundGCShouldWait, "Foreground collector, if active, "
duke@0 2357 "should be waiting");
duke@0 2358
duke@0 2359 switch (_collectorState) {
duke@0 2360 case InitialMarking:
duke@0 2361 {
duke@0 2362 ReleaseForegroundGC x(this);
duke@0 2363 stats().record_cms_begin();
duke@0 2364 VM_CMS_Initial_Mark initial_mark_op(this);
duke@0 2365 VMThread::execute(&initial_mark_op);
duke@0 2366 }
duke@0 2367 // The collector state may be any legal state at this point
duke@0 2368 // since the background collector may have yielded to the
duke@0 2369 // foreground collector.
duke@0 2370 break;
duke@0 2371 case Marking:
duke@0 2372 // initial marking in checkpointRootsInitialWork has been completed
duke@0 2373 if (markFromRoots(true)) { // we were successful
duke@0 2374 assert(_collectorState == Precleaning, "Collector state should "
duke@0 2375 "have changed");
duke@0 2376 } else {
duke@0 2377 assert(_foregroundGCIsActive, "Internal state inconsistency");
duke@0 2378 }
duke@0 2379 break;
duke@0 2380 case Precleaning:
duke@0 2381 if (UseAdaptiveSizePolicy) {
duke@0 2382 size_policy()->concurrent_precleaning_begin();
duke@0 2383 }
duke@0 2384 // marking from roots in markFromRoots has been completed
duke@0 2385 preclean();
duke@0 2386 if (UseAdaptiveSizePolicy) {
duke@0 2387 size_policy()->concurrent_precleaning_end();
duke@0 2388 }
duke@0 2389 assert(_collectorState == AbortablePreclean ||
duke@0 2390 _collectorState == FinalMarking,
duke@0 2391 "Collector state should have changed");
duke@0 2392 break;
duke@0 2393 case AbortablePreclean:
duke@0 2394 if (UseAdaptiveSizePolicy) {
duke@0 2395 size_policy()->concurrent_phases_resume();
duke@0 2396 }
duke@0 2397 abortable_preclean();
duke@0 2398 if (UseAdaptiveSizePolicy) {
duke@0 2399 size_policy()->concurrent_precleaning_end();
duke@0 2400 }
duke@0 2401 assert(_collectorState == FinalMarking, "Collector state should "
duke@0 2402 "have changed");
duke@0 2403 break;
duke@0 2404 case FinalMarking:
duke@0 2405 {
duke@0 2406 ReleaseForegroundGC x(this);
duke@0 2407
duke@0 2408 VM_CMS_Final_Remark final_remark_op(this);
duke@0 2409 VMThread::execute(&final_remark_op);
jmasa@935 2410 }
duke@0 2411 assert(_foregroundGCShouldWait, "block post-condition");
duke@0 2412 break;
duke@0 2413 case Sweeping:
duke@0 2414 if (UseAdaptiveSizePolicy) {
duke@0 2415 size_policy()->concurrent_sweeping_begin();
duke@0 2416 }
duke@0 2417 // final marking in checkpointRootsFinal has been completed
duke@0 2418 sweep(true);
duke@0 2419 assert(_collectorState == Resizing, "Collector state change "
duke@0 2420 "to Resizing must be done under the free_list_lock");
duke@0 2421 _full_gcs_since_conc_gc = 0;
duke@0 2422
duke@0 2423 // Stop the timers for adaptive size policy for the concurrent phases
duke@0 2424 if (UseAdaptiveSizePolicy) {
duke@0 2425 size_policy()->concurrent_sweeping_end();
duke@0 2426 size_policy()->concurrent_phases_end(gch->gc_cause(),
duke@0 2427 gch->prev_gen(_cmsGen)->capacity(),
duke@0 2428 _cmsGen->free());
duke@0 2429 }
duke@0 2430
duke@0 2431 case Resizing: {
duke@0 2432 // Sweeping has been completed...
duke@0 2433 // At this point the background collection has completed.
duke@0 2434 // Don't move the call to compute_new_size() down
duke@0 2435 // into code that might be executed if the background
duke@0 2436 // collection was preempted.
duke@0 2437 {
duke@0 2438 ReleaseForegroundGC x(this); // unblock FG collection
duke@0 2439 MutexLockerEx y(Heap_lock, Mutex::_no_safepoint_check_flag);
duke@0 2440 CMSTokenSync z(true); // not strictly needed.
duke@0 2441 if (_collectorState == Resizing) {
duke@0 2442 compute_new_size();
sla@4802 2443 save_heap_summary();
duke@0 2444 _collectorState = Resetting;
duke@0 2445 } else {
duke@0 2446 assert(_collectorState == Idling, "The state should only change"
duke@0 2447 " because the foreground collector has finished the collection");
duke@0 2448 }
duke@0 2449 }
duke@0 2450 break;
duke@0 2451 }
duke@0 2452 case Resetting:
duke@0 2453 // CMS heap resizing has been completed
duke@0 2454 reset(true);
duke@0 2455 assert(_collectorState == Idling, "Collector state should "
duke@0 2456 "have changed");
coleenp@3602 2457
coleenp@3602 2458 MetaspaceGC::set_should_concurrent_collect(false);
coleenp@3602 2459
duke@0 2460 stats().record_cms_end();
duke@0 2461 // Don't move the concurrent_phases_end() and compute_new_size()
duke@0 2462 // calls to here because a preempted background collection
duke@0 2463 // has it's state set to "Resetting".
duke@0 2464 break;
duke@0 2465 case Idling:
duke@0 2466 default:
duke@0 2467 ShouldNotReachHere();
duke@0 2468 break;
duke@0 2469 }
duke@0 2470 if (TraceCMSState) {
duke@0 2471 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d",
duke@0 2472 Thread::current(), _collectorState);
duke@0 2473 }
duke@0 2474 assert(_foregroundGCShouldWait, "block post-condition");
duke@0 2475 }
duke@0 2476
duke@0 2477 // Should this be in gc_epilogue?
duke@0 2478 collector_policy()->counters()->update_counters();
duke@0 2479
duke@0 2480 {
duke@0 2481 // Clear _foregroundGCShouldWait and, in the event that the
duke@0 2482 // foreground collector is waiting, notify it, before
duke@0 2483 // returning.
duke@0 2484 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
duke@0 2485 _foregroundGCShouldWait = false;
duke@0 2486 if (_foregroundGCIsActive) {
duke@0 2487 CGC_lock->notify();
duke@0 2488 }
duke@0 2489 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
duke@0 2490 "Possible deadlock");
duke@0 2491 }
duke@0 2492 if (TraceCMSState) {
duke@0 2493 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
duke@0 2494 " exiting collection CMS state %d",
duke@0 2495 Thread::current(), _collectorState);
duke@0 2496 }
duke@0 2497 if (PrintGC && Verbose) {
duke@0 2498 _cmsGen->print_heap_change(prev_used);
duke@0 2499 }
duke@0 2500 }
duke@0 2501
sla@4802 2502 void CMSCollector::register_foreground_gc_start(GCCause::Cause cause) {
sla@4802 2503 if (!_cms_start_registered) {
sla@4802 2504 register_gc_start(cause);
sla@4802 2505 }
sla@4802 2506 }
sla@4802 2507
sla@4802 2508 void CMSCollector::register_gc_start(GCCause::Cause cause) {
sla@4802 2509 _cms_start_registered = true;
mgronlun@5696 2510 _gc_timer_cm->register_gc_start();
sla@4802 2511 _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
sla@4802 2512 }
sla@4802 2513
sla@4802 2514 void CMSCollector::register_gc_end() {
sla@4802 2515 if (_cms_start_registered) {
sla@4802 2516 report_heap_summary(GCWhen::AfterGC);
sla@4802 2517
mgronlun@5696 2518 _gc_timer_cm->register_gc_end();
sla@4802 2519 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
sla@4802 2520 _cms_start_registered = false;
sla@4802 2521 }
sla@4802 2522 }
sla@4802 2523
sla@4802 2524 void CMSCollector::save_heap_summary() {
sla@4802 2525 GenCollectedHeap* gch = GenCollectedHeap::heap();
sla@4802 2526 _last_heap_summary = gch->create_heap_summary();
sla@4802 2527 _last_metaspace_summary = gch->create_metaspace_summary();
sla@4802 2528 }
sla@4802 2529
sla@4802 2530 void CMSCollector::report_heap_summary(GCWhen::Type when) {
ehelin@5984 2531 _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary);
ehelin@5984 2532 _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary);
sla@4802 2533 }
sla@4802 2534
sla@4802 2535 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause) {
duke@0 2536 assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
duke@0 2537 "Foreground collector should be waiting, not executing");
duke@0 2538 assert(Thread::current()->is_VM_thread(), "A foreground collection"
duke@0 2539 "may only be done by the VM Thread with the world stopped");
duke@0 2540 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
duke@0 2541 "VM thread should have CMS token");
duke@0 2542
brutisso@6310 2543 // The gc id is created in register_foreground_gc_start if this collection is synchronous
brutisso@6310 2544 const GCId gc_id = _collectorState == InitialMarking ? GCId::peek() : _gc_tracer_cm->gc_id();
sla@4802 2545 NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
brutisso@6310 2546 true, NULL, gc_id);)
duke@0 2547 if (UseAdaptiveSizePolicy) {
duke@0 2548 size_policy()->ms_collection_begin();
duke@0 2549 }
duke@0 2550 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
duke@0 2551
duke@0 2552 HandleMark hm; // Discard invalid handles created during verification
duke@0 2553
duke@0 2554 if (VerifyBeforeGC &&
duke@0 2555 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
johnc@3741 2556 Universe::verify();
duke@0 2557 }
duke@0 2558
ysr@453 2559 // Snapshot the soft reference policy to be used in this collection cycle.
ysr@457 2560 ref_processor()->setup_policy(clear_all_soft_refs);
ysr@453 2561
jmasa@5654 2562 // Decide if class unloading should be done
jmasa@5654 2563 update_should_unload_classes();
jmasa@5654 2564
duke@0 2565 bool init_mark_was_synchronous = false; // until proven otherwise
duke@0 2566 while (_collectorState != Idling) {
duke@0 2567 if (TraceCMSState) {
duke@0 2568 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
duke@0 2569 Thread::current(), _collectorState);
duke@0 2570 }
duke@0 2571 switch (_collectorState) {
duke@0 2572 case InitialMarking:
sla@4802 2573 register_foreground_gc_start(cause);
duke@0 2574 init_mark_was_synchronous = true; // fact to be exploited in re-mark
duke@0 2575 checkpointRootsInitial(false);
duke@0 2576 assert(_collectorState == Marking, "Collector state should have changed"
duke@0 2577 " within checkpointRootsInitial()");
duke@0 2578 break;
duke@0 2579 case Marking:
duke@0 2580 // initial marking in checkpointRootsInitialWork has been completed
duke@0 2581 if (VerifyDuringGC &&
duke@0 2582 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
stefank@4583 2583 Universe::verify("Verify before initial mark: ");
duke@0 2584 }
duke@0 2585 {
duke@0 2586 bool res = markFromRoots(false);
duke@0 2587 assert(res && _collectorState == FinalMarking, "Collector state should "
duke@0 2588 "have changed");
duke@0 2589 break;
duke@0 2590 }
duke@0 2591 case FinalMarking:
duke@0 2592 if (VerifyDuringGC &&
duke@0 2593 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
stefank@4583 2594 Universe::verify("Verify before re-mark: ");
duke@0 2595 }
duke@0 2596 checkpointRootsFinal(false, clear_all_soft_refs,
duke@0 2597 init_mark_was_synchronous);
duke@0 2598 assert(_collectorState == Sweeping, "Collector state should not "
duke@0 2599 "have changed within checkpointRootsFinal()");
duke@0 2600 break;
duke@0 2601 case Sweeping:
duke@0 2602 // final marking in checkpointRootsFinal has been completed
duke@0 2603 if (VerifyDuringGC &&
duke@0 2604 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
stefank@4583 2605 Universe::verify("Verify before sweep: ");
duke@0 2606 }
duke@0 2607 sweep(false);
duke@0 2608 assert(_collectorState == Resizing, "Incorrect state");
duke@0 2609 break;
duke@0 2610 case Resizing: {
duke@0 2611 // Sweeping has been completed; the actual resize in this case
duke@0 2612 // is done separately; nothing to be done in this state.
duke@0 2613 _collectorState = Resetting;
duke@0 2614 break;
duke@0 2615 }
duke@0 2616 case Resetting:
duke@0 2617 // The heap has been resized.
duke@0 2618 if (VerifyDuringGC &&
duke@0 2619 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
stefank@4583 2620 Universe::verify("Verify before reset: ");
duke@0 2621 }
sla@4802 2622 save_heap_summary();
duke@0 2623 reset(false);
duke@0 2624 assert(_collectorState == Idling, "Collector state should "
duke@0 2625 "have changed");
duke@0 2626 break;
duke@0 2627 case Precleaning:
duke@0 2628 case AbortablePreclean:
duke@0 2629 // Elide the preclean phase
duke@0 2630 _collectorState = FinalMarking;
duke@0 2631 break;
duke@0 2632 default:
duke@0 2633 ShouldNotReachHere();
duke@0 2634 }
duke@0 2635 if (TraceCMSState) {
duke@0 2636 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d",
duke@0 2637 Thread::current(), _collectorState);
duke@0 2638 }
duke@0 2639 }
duke@0 2640
duke@0 2641 if (UseAdaptiveSizePolicy) {
duke@0 2642 GenCollectedHeap* gch = GenCollectedHeap::heap();
duke@0 2643 size_policy()->ms_collection_end(gch->gc_cause());
duke@0 2644 }
duke@0 2645
duke@0 2646 if (VerifyAfterGC &&
duke@0 2647 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
johnc@3741 2648 Universe::verify();
duke@0 2649 }
duke@0 2650 if (TraceCMSState) {
duke@0 2651 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
duke@0 2652 " exiting collection CMS state %d",
duke@0 2653 Thread::current(), _collectorState);
duke@0 2654 }
duke@0 2655 }
duke@0 2656
duke@0 2657 bool CMSCollector::waitForForegroundGC() {
duke@0 2658 bool res = false;
duke@0 2659 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
duke@0 2660 "CMS thread should have CMS token");
duke@0 2661 // Block the foreground collector until the
duke@0 2662 // background collectors decides whether to
duke@0 2663 // yield.
duke@0 2664 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
duke@0 2665 _foregroundGCShouldWait = true;
duke@0 2666 if (_foregroundGCIsActive) {
duke@0 2667 // The background collector yields to the
duke@0 2668 // foreground collector and returns a value
duke@0 2669 // indicating that it has yielded. The foreground
duke@0 2670 // collector can proceed.
duke@0 2671 res = true;
duke@0 2672 _foregroundGCShouldWait = false;
duke@0 2673 ConcurrentMarkSweepThread::clear_CMS_flag(
duke@0 2674 ConcurrentMarkSweepThread::CMS_cms_has_token);
duke@0 2675 ConcurrentMarkSweepThread::set_CMS_flag(
duke@0 2676 ConcurrentMarkSweepThread::CMS_cms_wants_token);
duke@0 2677 // Get a possibly blocked foreground thread going
duke@0 2678 CGC_lock->notify();
duke@0 2679 if (TraceCMSState) {
duke@0 2680 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
duke@0 2681 Thread::current(), _collectorState);
duke@0 2682 }
duke@0 2683 while (_foregroundGCIsActive) {
duke@0 2684 CGC_lock->wait(Mutex::_no_safepoint_check_flag);
duke@0 2685 }
duke@0 2686 ConcurrentMarkSweepThread::set_CMS_flag(
duke@0 2687 ConcurrentMarkSweepThread::CMS_cms_has_token);
duke@0 2688 ConcurrentMarkSweepThread::clear_CMS_flag(
duke@0 2689 ConcurrentMarkSweepThread::CMS_cms_wants_token);
duke@0 2690 }
duke@0 2691 if (TraceCMSState) {
duke@0 2692 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
duke@0 2693 Thread::current(), _collectorState);
duke@0 2694 }
duke@0 2695 return res;
duke@0 2696 }
duke@0 2697
duke@0 2698 // Because of the need to lock the free lists and other structures in
duke@0 2699 // the collector, common to all the generations that the collector is
duke@0 2700 // collecting, we need the gc_prologues of individual CMS generations
duke@0 2701 // delegate to their collector. It may have been simpler had the
duke@0 2702 // current infrastructure allowed one to call a prologue on a
duke@0 2703 // collector. In the absence of that we have the generation's
duke@0 2704 // prologue delegate to the collector, which delegates back
duke@0 2705 // some "local" work to a worker method in the individual generations
duke@0 2706 // that it's responsible for collecting, while itself doing any
duke@0 2707 // work common to all generations it's responsible for. A similar
duke@0 2708 // comment applies to the gc_epilogue()'s.
duke@0 2709 // The role of the varaible _between_prologue_and_epilogue is to
duke@0 2710 // enforce the invocation protocol.
duke@0 2711 void CMSCollector::gc_prologue(bool full) {
coleenp@3602 2712 // Call gc_prologue_work() for the CMSGen
duke@0 2713 // we are responsible for.
duke@0 2714
duke@0 2715 // The following locking discipline assumes that we are only called
duke@0 2716 // when the world is stopped.
duke@0 2717 assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
duke@0 2718
duke@0 2719 // The CMSCollector prologue must call the gc_prologues for the
coleenp@3602 2720 // "generations" that it's responsible
duke@0 2721 // for.
duke@0 2722
duke@0 2723 assert( Thread::current()->is_VM_thread()
duke@0 2724 || ( CMSScavengeBeforeRemark
duke@0 2725 && Thread::current()->is_ConcurrentGC_thread()),
duke@0 2726 "Incorrect thread type for prologue execution");
duke@0 2727
duke@0 2728 if (_between_prologue_and_epilogue) {
duke@0 2729 // We have already been invoked; this is a gc_prologue delegation
duke@0 2730 // from yet another CMS generation that we are responsible for, just
duke@0 2731 // ignore it since all relevant work has already been done.
duke@0 2732 return;
duke@0 2733 }
duke@0 2734
duke@0 2735 // set a bit saying prologue has been called; cleared in epilogue
duke@0 2736 _between_prologue_and_epilogue = true;
duke@0 2737 // Claim locks for common data structures, then call gc_prologue_work()
coleenp@3602 2738 // for each CMSGen.
duke@0 2739
duke@0 2740 getFreelistLocks(); // gets free list locks on constituent spaces
duke@0 2741 bitMapLock()->lock_without_safepoint_check();
duke@0 2742
duke@0 2743 // Should call gc_prologue_work() for all cms gens we are responsible for
coleenp@3602 2744 bool duringMarking = _collectorState >= Marking
duke@0 2745 && _collectorState < Sweeping;
coleenp@3602 2746
coleenp@3602 2747 // The young collections clear the modified oops state, which tells if
coleenp@3602 2748 // there are any modified oops in the class. The remark phase also needs
coleenp@3602 2749 // that information. Tell the young collection to save the union of all
coleenp@3602 2750 // modified klasses.
coleenp@3602 2751 if (duringMarking) {
coleenp@3602 2752 _ct->klass_rem_set()->set_accumulate_modified_oops(true);
coleenp@3602 2753 }
coleenp@3602 2754
coleenp@3602 2755 bool registerClosure = duringMarking;
coleenp@3602 2756
jmasa@1753 2757 ModUnionClosure* muc = CollectedHeap::use_parallel_gc_threads() ?
jmasa@1753 2758 &_modUnionClosurePar
duke@0 2759 : &_modUnionClosure;
duke@0 2760 _cmsGen->gc_prologue_work(full, registerClosure, muc);
duke@0 2761
duke@0 2762 if (!full) {
duke@0 2763 stats().record_gc0_begin();
duke@0 2764 }
duke@0 2765 }
duke@0 2766
duke@0 2767 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
jmasa@4465 2768
jmasa@4465 2769 _capacity_at_prologue = capacity();
jmasa@4465 2770 _used_at_prologue = used();
jmasa@4465 2771
duke@0 2772 // Delegate to CMScollector which knows how to coordinate between
duke@0 2773 // this and any other CMS generations that it is responsible for
duke@0 2774 // collecting.
duke@0 2775 collector()->gc_prologue(full);
duke@0 2776 }
duke@0 2777
duke@0 2778 // This is a "private" interface for use by this generation's CMSCollector.
duke@0 2779 // Not to be called directly by any other entity (for instance,
duke@0 2780 // GenCollectedHeap, which calls the "public" gc_prologue method above).
duke@0 2781 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
duke@0 2782 bool registerClosure, ModUnionClosure* modUnionClosure) {
duke@0 2783 assert(!incremental_collection_failed(), "Shouldn't be set yet");
duke@0 2784 assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
duke@0 2785 "Should be NULL");
duke@0 2786 if (registerClosure) {
duke@0 2787 cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
duke@0 2788 }
duke@0 2789 cmsSpace()->gc_prologue();
duke@0 2790 // Clear stat counters
duke@0 2791 NOT_PRODUCT(
duke@0 2792 assert(_numObjectsPromoted == 0, "check");
duke@0 2793 assert(_numWordsPromoted == 0, "check");
duke@0 2794 if (Verbose && PrintGC) {
duke@0 2795 gclog_or_tty->print("Allocated "SIZE_FORMAT" objects, "
duke@0 2796 SIZE_FORMAT" bytes concurrently",
duke@0 2797 _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
duke@0 2798 }
duke@0 2799 _numObjectsAllocated = 0;
duke@0 2800 _numWordsAllocated = 0;
duke@0 2801 )
duke@0 2802 }
duke@0 2803
duke@0 2804 void CMSCollector::gc_epilogue(bool full) {
duke@0 2805 // The following locking discipline assumes that we are only called
duke@0 2806 // when the world is stopped.
duke@0 2807 assert(SafepointSynchronize::is_at_safepoint(),
duke@0 2808 "world is stopped assumption");
duke@0 2809
duke@0 2810 // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
duke@0 2811 // if linear allocation blocks need to be appropriately marked to allow the
duke@0 2812 // the blocks to be parsable. We also check here whether we need to nudge the
duke@0 2813 // CMS collector thread to start a new cycle (if it's not already active).
duke@0 2814 assert( Thread::current()->is_VM_thread()
duke@0 2815 || ( CMSScavengeBeforeRemark
duke@0 2816 && Thread::current()->is_ConcurrentGC_thread()),
duke@0 2817 "Incorrect thread type for epilogue execution");
duke@0 2818
duke@0 2819 if (!_between_prologue_and_epilogue) {
duke@0 2820 // We have already been invoked; this is a gc_epilogue delegation
duke@0 2821 // from yet another CMS generation that we are responsible for, just
duke@0 2822 // ignore it since all relevant work has already been done.
duke@0 2823 return;
duke@0 2824 }
duke@0 2825 assert(haveFreelistLocks(), "must have freelist locks");
duke@0 2826 assert_lock_strong(bitMapLock());
duke@0 2827
coleenp@3602 2828 _ct->klass_rem_set()->set_accumulate_modified_oops(false);
coleenp@3602 2829
duke@0 2830 _cmsGen->gc_epilogue_work(full);
duke@0 2831
duke@0 2832 if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
duke@0 2833 // in case sampling was not already enabled, enable it
duke@0 2834 _start_sampling = true;
duke@0 2835 }
duke@0 2836 // reset _eden_chunk_array so sampling starts afresh
duke@0 2837 _eden_chunk_index = 0;
duke@0 2838
duke@0 2839 size_t cms_used = _cmsGen->cmsSpace()->used();
duke@0 2840
duke@0 2841 // update performance counters - this uses a special version of
duke@0 2842 // update_counters() that allows the utilization to be passed as a
duke@0 2843 // parameter, avoiding multiple calls to used().
duke@0 2844 //
duke@0 2845 _cmsGen->update_counters(cms_used);
duke@0 2846
duke@0 2847 if (CMSIncrementalMode) {
duke@0 2848 icms_update_allocation_limits();
duke@0 2849 }
duke@0 2850
duke@0 2851 bitMapLock()->unlock();
duke@0 2852 releaseFreelistLocks();
duke@0 2853
jcoomes@2561 2854 if (!CleanChunkPoolAsync) {
jcoomes@2561 2855 Chunk::clean_chunk_pool();
jcoomes@2561 2856 }
jcoomes@2561 2857
jmasa@4641 2858 set_did_compact(false);
duke@0 2859 _between_prologue_and_epilogue = false; // ready for next cycle
duke@0 2860 }
duke@0 2861
duke@0 2862 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
duke@0 2863 collector()->gc_epilogue(full);
duke@0 2864
duke@0 2865 // Also reset promotion tracking in par gc thread states.
jmasa@1753 2866 if (CollectedHeap::use_parallel_gc_threads()) {
duke@0 2867 for (uint i = 0; i < ParallelGCThreads; i++) {
ysr@1145 2868 _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
duke@0 2869 }
duke@0 2870 }
duke@0 2871 }
duke@0 2872
duke@0 2873 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
duke@0 2874 assert(!incremental_collection_failed(), "Should have been cleared");
duke@0 2875 cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
duke@0 2876 cmsSpace()->gc_epilogue();
duke@0 2877 // Print stat counters
duke@0 2878 NOT_PRODUCT(
duke@0 2879 assert(_numObjectsAllocated == 0, "check");
duke@0 2880 assert(_numWordsAllocated == 0, "check");
duke@0 2881 if (Verbose && PrintGC) {
duke@0 2882 gclog_or_tty->print("Promoted "SIZE_FORMAT" objects, "
duke@0 2883 SIZE_FORMAT" bytes",
duke@0 2884 _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
duke@0 2885 }
duke@0 2886 _numObjectsPromoted = 0;
duke@0 2887 _numWordsPromoted = 0;
duke@0 2888 )
duke@0 2889
duke@0 2890 if (PrintGC && Verbose) {
duke@0 2891 // Call down the chain in contiguous_available needs the freelistLock
duke@0 2892 // so print this out before releasing the freeListLock.
duke@0 2893 gclog_or_tty->print(" Contiguous available "SIZE_FORMAT" bytes ",
duke@0 2894 contiguous_available());
duke@0 2895 }
duke@0 2896 }
duke@0 2897
duke@0 2898 #ifndef PRODUCT
duke@0 2899 bool CMSCollector::have_cms_token() {
duke@0 2900 Thread* thr = Thread::current();
duke@0 2901 if (thr->is_VM_thread()) {
duke@0 2902 return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
duke@0 2903 } else if (thr->is_ConcurrentGC_thread()) {
duke@0 2904 return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
duke@0 2905 } else if (thr->is_GC_task_thread()) {
duke@0 2906 return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
duke@0 2907 ParGCRareEvent_lock->owned_by_self();
duke@0 2908 }
duke@0 2909 return false;
duke@0 2910 }
duke@0 2911 #endif
duke@0 2912
duke@0 2913 // Check reachability of the given heap address in CMS generation,
duke@0 2914 // treating all other generations as roots.
duke@0 2915 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
duke@0 2916 // We could "guarantee" below, rather than assert, but i'll
duke@0 2917 // leave these as "asserts" so that an adventurous debugger
duke@0 2918 // could try this in the product build provided some subset of
duke@0 2919 // the conditions were met, provided they were intersted in the
duke@0 2920 // results and knew that the computation below wouldn't interfere
duke@0 2921 // with other concurrent computations mutating the structures
duke@0 2922 // being read or written.
duke@0 2923 assert(SafepointSynchronize::is_at_safepoint(),
duke@0 2924 "Else mutations in object graph will make answer suspect");
duke@0 2925 assert(have_cms_token(), "Should hold cms token");
duke@0 2926 assert(haveFreelistLocks(), "must hold free list locks");
duke@0 2927 assert_lock_strong(bitMapLock());
duke@0 2928
duke@0 2929 // Clear the marking bit map array before starting, but, just
duke@0 2930 // for kicks, first report if the given address is already marked
duke@0 2931 gclog_or_tty->print_cr("Start: Address 0x%x is%s marked", addr,
duke@0 2932 _markBitMap.isMarked(addr) ? "" : " not");
duke@0 2933
duke@0 2934 if (verify_after_remark()) {
duke@0 2935 MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
duke@0 2936 bool result = verification_mark_bm()->isMarked(addr);
duke@0 2937 gclog_or_tty->print_cr("TransitiveMark: Address 0x%x %s marked", addr,
duke@0 2938 result ? "IS" : "is NOT");
duke@0 2939 return result;
duke@0 2940 } else {
duke@0 2941 gclog_or_tty->print_cr("Could not compute result");
duke@0 2942 return false;
duke@0 2943 }
duke@0 2944 }
duke@0 2945
stefank@4469 2946
stefank@4469 2947 void
stefank@4469 2948 CMSCollector::print_on_error(outputStream* st) {
stefank@4469 2949 CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector;
stefank@4469 2950 if (collector != NULL) {
stefank@4469 2951 CMSBitMap* bitmap = &collector->_markBitMap;
stefank@4469 2952 st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, bitmap);
stefank@4469 2953 bitmap->print_on_error(st, " Bits: ");
stefank@4469 2954
stefank@4469 2955 st->cr();
stefank@4469 2956
stefank@4469 2957 CMSBitMap* mut_bitmap = &collector->_modUnionTable;
stefank@4469 2958 st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, mut_bitmap);
stefank@4469 2959 mut_bitmap->print_on_error(st, " Bits: ");
stefank@4469 2960 }
stefank@4469 2961 }
stefank@4469 2962
duke@0 2963 ////////////////////////////////////////////////////////
duke@0 2964 // CMS Verification Support
duke@0 2965 ////////////////////////////////////////////////////////
duke@0 2966 // Following the remark phase, the following invariant
duke@0 2967 // should hold -- each object in the CMS heap which is
duke@0 2968 // marked in markBitMap() should be marked in the verification_mark_bm().
duke@0 2969
duke@0 2970 class VerifyMarkedClosure: public BitMapClosure {