annotate src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp @ 453:c96030fff130

6684579: SoftReference processing can be made more efficient Summary: For current soft-ref clearing policies, we can decide at marking time if a soft-reference will definitely not be cleared, postponing the decision of whether it will definitely be cleared to the final reference processing phase. This can be especially beneficial in the case of concurrent collectors where the marking is usually concurrent but reference processing is usually not. Reviewed-by: jmasa
author ysr
date Thu, 20 Nov 2008 16:56:09 -0800
parents 850fdf70db2b
children 27a80744a83b
rev   line source
duke@0 1 /*
xdono@196 2 * Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved.
duke@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@0 4 *
duke@0 5 * This code is free software; you can redistribute it and/or modify it
duke@0 6 * under the terms of the GNU General Public License version 2 only, as
duke@0 7 * published by the Free Software Foundation.
duke@0 8 *
duke@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@0 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@0 13 * accompanied this code).
duke@0 14 *
duke@0 15 * You should have received a copy of the GNU General Public License version
duke@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@0 18 *
duke@0 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@0 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@0 21 * have any questions.
duke@0 22 *
duke@0 23 */
duke@0 24
duke@0 25
duke@0 26 # include "incls/_precompiled.incl"
duke@0 27 # include "incls/_psScavenge.cpp.incl"
duke@0 28
duke@0 29 HeapWord* PSScavenge::_to_space_top_before_gc = NULL;
duke@0 30 int PSScavenge::_consecutive_skipped_scavenges = 0;
duke@0 31 ReferenceProcessor* PSScavenge::_ref_processor = NULL;
duke@0 32 CardTableExtension* PSScavenge::_card_table = NULL;
duke@0 33 bool PSScavenge::_survivor_overflow = false;
duke@0 34 int PSScavenge::_tenuring_threshold = 0;
duke@0 35 HeapWord* PSScavenge::_young_generation_boundary = NULL;
duke@0 36 elapsedTimer PSScavenge::_accumulated_time;
duke@0 37 GrowableArray<markOop>* PSScavenge::_preserved_mark_stack = NULL;
duke@0 38 GrowableArray<oop>* PSScavenge::_preserved_oop_stack = NULL;
duke@0 39 CollectorCounters* PSScavenge::_counters = NULL;
duke@0 40
duke@0 41 // Define before use
duke@0 42 class PSIsAliveClosure: public BoolObjectClosure {
duke@0 43 public:
duke@0 44 void do_object(oop p) {
duke@0 45 assert(false, "Do not call.");
duke@0 46 }
duke@0 47 bool do_object_b(oop p) {
duke@0 48 return (!PSScavenge::is_obj_in_young((HeapWord*) p)) || p->is_forwarded();
duke@0 49 }
duke@0 50 };
duke@0 51
duke@0 52 PSIsAliveClosure PSScavenge::_is_alive_closure;
duke@0 53
duke@0 54 class PSKeepAliveClosure: public OopClosure {
duke@0 55 protected:
duke@0 56 MutableSpace* _to_space;
duke@0 57 PSPromotionManager* _promotion_manager;
duke@0 58
duke@0 59 public:
duke@0 60 PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) {
duke@0 61 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@0 62 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@0 63 _to_space = heap->young_gen()->to_space();
duke@0 64
duke@0 65 assert(_promotion_manager != NULL, "Sanity");
duke@0 66 }
duke@0 67
coleenp@113 68 template <class T> void do_oop_work(T* p) {
coleenp@113 69 assert (!oopDesc::is_null(*p), "expected non-null ref");
coleenp@113 70 assert ((oopDesc::load_decode_heap_oop_not_null(p))->is_oop(),
coleenp@113 71 "expected an oop while scanning weak refs");
duke@0 72
duke@0 73 // Weak refs may be visited more than once.
coleenp@113 74 if (PSScavenge::should_scavenge(p, _to_space)) {
duke@0 75 PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p);
duke@0 76 }
duke@0 77 }
coleenp@113 78 virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); }
coleenp@113 79 virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); }
duke@0 80 };
duke@0 81
duke@0 82 class PSEvacuateFollowersClosure: public VoidClosure {
duke@0 83 private:
duke@0 84 PSPromotionManager* _promotion_manager;
duke@0 85 public:
duke@0 86 PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {}
duke@0 87
coleenp@113 88 virtual void do_void() {
duke@0 89 assert(_promotion_manager != NULL, "Sanity");
duke@0 90 _promotion_manager->drain_stacks(true);
duke@0 91 guarantee(_promotion_manager->stacks_empty(),
duke@0 92 "stacks should be empty at this point");
duke@0 93 }
duke@0 94 };
duke@0 95
duke@0 96 class PSPromotionFailedClosure : public ObjectClosure {
duke@0 97 virtual void do_object(oop obj) {
duke@0 98 if (obj->is_forwarded()) {
duke@0 99 obj->init_mark();
duke@0 100 }
duke@0 101 }
duke@0 102 };
duke@0 103
duke@0 104 class PSRefProcTaskProxy: public GCTask {
duke@0 105 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
duke@0 106 ProcessTask & _rp_task;
duke@0 107 uint _work_id;
duke@0 108 public:
duke@0 109 PSRefProcTaskProxy(ProcessTask & rp_task, uint work_id)
duke@0 110 : _rp_task(rp_task),
duke@0 111 _work_id(work_id)
duke@0 112 { }
duke@0 113
duke@0 114 private:
duke@0 115 virtual char* name() { return (char *)"Process referents by policy in parallel"; }
duke@0 116 virtual void do_it(GCTaskManager* manager, uint which);
duke@0 117 };
duke@0 118
duke@0 119 void PSRefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
duke@0 120 {
duke@0 121 PSPromotionManager* promotion_manager =
duke@0 122 PSPromotionManager::gc_thread_promotion_manager(which);
duke@0 123 assert(promotion_manager != NULL, "sanity check");
duke@0 124 PSKeepAliveClosure keep_alive(promotion_manager);
duke@0 125 PSEvacuateFollowersClosure evac_followers(promotion_manager);
duke@0 126 PSIsAliveClosure is_alive;
duke@0 127 _rp_task.work(_work_id, is_alive, keep_alive, evac_followers);
duke@0 128 }
duke@0 129
duke@0 130 class PSRefEnqueueTaskProxy: public GCTask {
duke@0 131 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
duke@0 132 EnqueueTask& _enq_task;
duke@0 133 uint _work_id;
duke@0 134
duke@0 135 public:
duke@0 136 PSRefEnqueueTaskProxy(EnqueueTask& enq_task, uint work_id)
duke@0 137 : _enq_task(enq_task),
duke@0 138 _work_id(work_id)
duke@0 139 { }
duke@0 140
duke@0 141 virtual char* name() { return (char *)"Enqueue reference objects in parallel"; }
duke@0 142 virtual void do_it(GCTaskManager* manager, uint which)
duke@0 143 {
duke@0 144 _enq_task.work(_work_id);
duke@0 145 }
duke@0 146 };
duke@0 147
duke@0 148 class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
duke@0 149 virtual void execute(ProcessTask& task);
duke@0 150 virtual void execute(EnqueueTask& task);
duke@0 151 };
duke@0 152
duke@0 153 void PSRefProcTaskExecutor::execute(ProcessTask& task)
duke@0 154 {
duke@0 155 GCTaskQueue* q = GCTaskQueue::create();
duke@0 156 for(uint i=0; i<ParallelGCThreads; i++) {
duke@0 157 q->enqueue(new PSRefProcTaskProxy(task, i));
duke@0 158 }
duke@0 159 ParallelTaskTerminator terminator(
duke@0 160 ParallelScavengeHeap::gc_task_manager()->workers(),
duke@0 161 UseDepthFirstScavengeOrder ?
duke@0 162 (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth()
duke@0 163 : (TaskQueueSetSuper*) PSPromotionManager::stack_array_breadth());
duke@0 164 if (task.marks_oops_alive() && ParallelGCThreads > 1) {
duke@0 165 for (uint j=0; j<ParallelGCThreads; j++) {
duke@0 166 q->enqueue(new StealTask(&terminator));
duke@0 167 }
duke@0 168 }
duke@0 169 ParallelScavengeHeap::gc_task_manager()->execute_and_wait(q);
duke@0 170 }
duke@0 171
duke@0 172
duke@0 173 void PSRefProcTaskExecutor::execute(EnqueueTask& task)
duke@0 174 {
duke@0 175 GCTaskQueue* q = GCTaskQueue::create();
duke@0 176 for(uint i=0; i<ParallelGCThreads; i++) {
duke@0 177 q->enqueue(new PSRefEnqueueTaskProxy(task, i));
duke@0 178 }
duke@0 179 ParallelScavengeHeap::gc_task_manager()->execute_and_wait(q);
duke@0 180 }
duke@0 181
duke@0 182 // This method contains all heap specific policy for invoking scavenge.
duke@0 183 // PSScavenge::invoke_no_policy() will do nothing but attempt to
duke@0 184 // scavenge. It will not clean up after failed promotions, bail out if
duke@0 185 // we've exceeded policy time limits, or any other special behavior.
duke@0 186 // All such policy should be placed here.
duke@0 187 //
duke@0 188 // Note that this method should only be called from the vm_thread while
duke@0 189 // at a safepoint!
duke@0 190 void PSScavenge::invoke()
duke@0 191 {
duke@0 192 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
duke@0 193 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
duke@0 194 assert(!Universe::heap()->is_gc_active(), "not reentrant");
duke@0 195
duke@0 196 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@0 197 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@0 198
duke@0 199 PSAdaptiveSizePolicy* policy = heap->size_policy();
duke@0 200
duke@0 201 // Before each allocation/collection attempt, find out from the
duke@0 202 // policy object if GCs are, on the whole, taking too long. If so,
duke@0 203 // bail out without attempting a collection.
duke@0 204 if (!policy->gc_time_limit_exceeded()) {
duke@0 205 IsGCActiveMark mark;
duke@0 206
duke@0 207 bool scavenge_was_done = PSScavenge::invoke_no_policy();
duke@0 208
duke@0 209 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
duke@0 210 if (UsePerfData)
duke@0 211 counters->update_full_follows_scavenge(0);
duke@0 212 if (!scavenge_was_done ||
duke@0 213 policy->should_full_GC(heap->old_gen()->free_in_bytes())) {
duke@0 214 if (UsePerfData)
duke@0 215 counters->update_full_follows_scavenge(full_follows_scavenge);
duke@0 216
duke@0 217 GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
duke@0 218 if (UseParallelOldGC) {
duke@0 219 PSParallelCompact::invoke_no_policy(false);
duke@0 220 } else {
duke@0 221 PSMarkSweep::invoke_no_policy(false);
duke@0 222 }
duke@0 223 }
duke@0 224 }
duke@0 225 }
duke@0 226
duke@0 227 // This method contains no policy. You should probably
duke@0 228 // be calling invoke() instead.
duke@0 229 bool PSScavenge::invoke_no_policy() {
duke@0 230 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
duke@0 231 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
duke@0 232
duke@0 233 TimeStamp scavenge_entry;
duke@0 234 TimeStamp scavenge_midpoint;
duke@0 235 TimeStamp scavenge_exit;
duke@0 236
duke@0 237 scavenge_entry.update();
duke@0 238
duke@0 239 if (GC_locker::check_active_before_gc()) {
duke@0 240 return false;
duke@0 241 }
duke@0 242
duke@0 243 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@0 244 GCCause::Cause gc_cause = heap->gc_cause();
duke@0 245 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@0 246
duke@0 247 // Check for potential problems.
duke@0 248 if (!should_attempt_scavenge()) {
duke@0 249 return false;
duke@0 250 }
duke@0 251
duke@0 252 bool promotion_failure_occurred = false;
duke@0 253
duke@0 254 PSYoungGen* young_gen = heap->young_gen();
duke@0 255 PSOldGen* old_gen = heap->old_gen();
duke@0 256 PSPermGen* perm_gen = heap->perm_gen();
duke@0 257 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
duke@0 258 heap->increment_total_collections();
duke@0 259
duke@0 260 AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
duke@0 261
duke@0 262 if ((gc_cause != GCCause::_java_lang_system_gc) ||
duke@0 263 UseAdaptiveSizePolicyWithSystemGC) {
duke@0 264 // Gather the feedback data for eden occupancy.
duke@0 265 young_gen->eden_space()->accumulate_statistics();
duke@0 266 }
duke@0 267
jmasa@263 268 if (ZapUnusedHeapArea) {
jmasa@263 269 // Save information needed to minimize mangling
jmasa@263 270 heap->record_gen_tops_before_GC();
jmasa@263 271 }
jmasa@263 272
duke@0 273 if (PrintHeapAtGC) {
duke@0 274 Universe::print_heap_before_gc();
duke@0 275 }
duke@0 276
duke@0 277 assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
duke@0 278 assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
duke@0 279
duke@0 280 size_t prev_used = heap->used();
duke@0 281 assert(promotion_failed() == false, "Sanity");
duke@0 282
duke@0 283 // Fill in TLABs
duke@0 284 heap->accumulate_statistics_all_tlabs();
duke@0 285 heap->ensure_parsability(true); // retire TLABs
duke@0 286
duke@0 287 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
duke@0 288 HandleMark hm; // Discard invalid handles created during verification
duke@0 289 gclog_or_tty->print(" VerifyBeforeGC:");
duke@0 290 Universe::verify(true);
duke@0 291 }
duke@0 292
duke@0 293 {
duke@0 294 ResourceMark rm;
duke@0 295 HandleMark hm;
duke@0 296
duke@0 297 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
duke@0 298 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
duke@0 299 TraceTime t1("GC", PrintGC, !PrintGCDetails, gclog_or_tty);
duke@0 300 TraceCollectorStats tcs(counters());
duke@0 301 TraceMemoryManagerStats tms(false /* not full GC */);
duke@0 302
duke@0 303 if (TraceGen0Time) accumulated_time()->start();
duke@0 304
duke@0 305 // Let the size policy know we're starting
duke@0 306 size_policy->minor_collection_begin();
duke@0 307
duke@0 308 // Verify the object start arrays.
duke@0 309 if (VerifyObjectStartArray &&
duke@0 310 VerifyBeforeGC) {
duke@0 311 old_gen->verify_object_start_array();
duke@0 312 perm_gen->verify_object_start_array();
duke@0 313 }
duke@0 314
duke@0 315 // Verify no unmarked old->young roots
duke@0 316 if (VerifyRememberedSets) {
duke@0 317 CardTableExtension::verify_all_young_refs_imprecise();
duke@0 318 }
duke@0 319
duke@0 320 if (!ScavengeWithObjectsInToSpace) {
duke@0 321 assert(young_gen->to_space()->is_empty(),
duke@0 322 "Attempt to scavenge with live objects in to_space");
jmasa@263 323 young_gen->to_space()->clear(SpaceDecorator::Mangle);
duke@0 324 } else if (ZapUnusedHeapArea) {
duke@0 325 young_gen->to_space()->mangle_unused_area();
duke@0 326 }
duke@0 327 save_to_space_top_before_gc();
duke@0 328
duke@0 329 NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
duke@0 330 COMPILER2_PRESENT(DerivedPointerTable::clear());
duke@0 331
duke@0 332 reference_processor()->enable_discovery();
ysr@453 333 reference_processor()->snap_policy(false);
duke@0 334
duke@0 335 // We track how much was promoted to the next generation for
duke@0 336 // the AdaptiveSizePolicy.
duke@0 337 size_t old_gen_used_before = old_gen->used_in_bytes();
duke@0 338
duke@0 339 // For PrintGCDetails
duke@0 340 size_t young_gen_used_before = young_gen->used_in_bytes();
duke@0 341
duke@0 342 // Reset our survivor overflow.
duke@0 343 set_survivor_overflow(false);
duke@0 344
duke@0 345 // We need to save the old/perm top values before
duke@0 346 // creating the promotion_manager. We pass the top
duke@0 347 // values to the card_table, to prevent it from
duke@0 348 // straying into the promotion labs.
duke@0 349 HeapWord* old_top = old_gen->object_space()->top();
duke@0 350 HeapWord* perm_top = perm_gen->object_space()->top();
duke@0 351
duke@0 352 // Release all previously held resources
duke@0 353 gc_task_manager()->release_all_resources();
duke@0 354
duke@0 355 PSPromotionManager::pre_scavenge();
duke@0 356
duke@0 357 // We'll use the promotion manager again later.
duke@0 358 PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
duke@0 359 {
duke@0 360 // TraceTime("Roots");
duke@0 361
duke@0 362 GCTaskQueue* q = GCTaskQueue::create();
duke@0 363
duke@0 364 for(uint i=0; i<ParallelGCThreads; i++) {
duke@0 365 q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i));
duke@0 366 }
duke@0 367
duke@0 368 q->enqueue(new SerialOldToYoungRootsTask(perm_gen, perm_top));
duke@0 369
duke@0 370 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe));
duke@0 371 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles));
duke@0 372 // We scan the thread roots in parallel
duke@0 373 Threads::create_thread_roots_tasks(q);
duke@0 374 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer));
duke@0 375 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler));
duke@0 376 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management));
duke@0 377 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary));
duke@0 378 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti));
duke@0 379
duke@0 380 ParallelTaskTerminator terminator(
duke@0 381 gc_task_manager()->workers(),
duke@0 382 promotion_manager->depth_first() ?
duke@0 383 (TaskQueueSetSuper*) promotion_manager->stack_array_depth()
duke@0 384 : (TaskQueueSetSuper*) promotion_manager->stack_array_breadth());
duke@0 385 if (ParallelGCThreads>1) {
duke@0 386 for (uint j=0; j<ParallelGCThreads; j++) {
duke@0 387 q->enqueue(new StealTask(&terminator));
duke@0 388 }
duke@0 389 }
duke@0 390
duke@0 391 gc_task_manager()->execute_and_wait(q);
duke@0 392 }
duke@0 393
duke@0 394 scavenge_midpoint.update();
duke@0 395
duke@0 396 // Process reference objects discovered during scavenge
duke@0 397 {
ysr@453 398 reference_processor()->snap_policy(false); // not always_clear
duke@0 399 PSKeepAliveClosure keep_alive(promotion_manager);
duke@0 400 PSEvacuateFollowersClosure evac_followers(promotion_manager);
duke@0 401 if (reference_processor()->processing_is_mt()) {
duke@0 402 PSRefProcTaskExecutor task_executor;
duke@0 403 reference_processor()->process_discovered_references(
ysr@453 404 &_is_alive_closure, &keep_alive, &evac_followers, &task_executor);
duke@0 405 } else {
duke@0 406 reference_processor()->process_discovered_references(
ysr@453 407 &_is_alive_closure, &keep_alive, &evac_followers, NULL);
duke@0 408 }
duke@0 409 }
duke@0 410
duke@0 411 // Enqueue reference objects discovered during scavenge.
duke@0 412 if (reference_processor()->processing_is_mt()) {
duke@0 413 PSRefProcTaskExecutor task_executor;
duke@0 414 reference_processor()->enqueue_discovered_references(&task_executor);
duke@0 415 } else {
duke@0 416 reference_processor()->enqueue_discovered_references(NULL);
duke@0 417 }
duke@0 418
duke@0 419 // Finally, flush the promotion_manager's labs, and deallocate its stacks.
duke@0 420 assert(promotion_manager->claimed_stack_empty(), "Sanity");
duke@0 421 PSPromotionManager::post_scavenge();
duke@0 422
duke@0 423 promotion_failure_occurred = promotion_failed();
duke@0 424 if (promotion_failure_occurred) {
duke@0 425 clean_up_failed_promotion();
duke@0 426 if (PrintGC) {
duke@0 427 gclog_or_tty->print("--");
duke@0 428 }
duke@0 429 }
duke@0 430
duke@0 431 // Let the size policy know we're done. Note that we count promotion
duke@0 432 // failure cleanup time as part of the collection (otherwise, we're
duke@0 433 // implicitly saying it's mutator time).
duke@0 434 size_policy->minor_collection_end(gc_cause);
duke@0 435
duke@0 436 if (!promotion_failure_occurred) {
duke@0 437 // Swap the survivor spaces.
jmasa@263 438
jmasa@263 439
jmasa@263 440 young_gen->eden_space()->clear(SpaceDecorator::Mangle);
jmasa@263 441 young_gen->from_space()->clear(SpaceDecorator::Mangle);
duke@0 442 young_gen->swap_spaces();
duke@0 443
duke@0 444 size_t survived = young_gen->from_space()->used_in_bytes();
duke@0 445 size_t promoted = old_gen->used_in_bytes() - old_gen_used_before;
duke@0 446 size_policy->update_averages(_survivor_overflow, survived, promoted);
duke@0 447
duke@0 448 if (UseAdaptiveSizePolicy) {
duke@0 449 // Calculate the new survivor size and tenuring threshold
duke@0 450
duke@0 451 if (PrintAdaptiveSizePolicy) {
duke@0 452 gclog_or_tty->print("AdaptiveSizeStart: ");
duke@0 453 gclog_or_tty->stamp();
duke@0 454 gclog_or_tty->print_cr(" collection: %d ",
duke@0 455 heap->total_collections());
duke@0 456
duke@0 457 if (Verbose) {
duke@0 458 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d"
duke@0 459 " perm_gen_capacity: %d ",
duke@0 460 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(),
duke@0 461 perm_gen->capacity_in_bytes());
duke@0 462 }
duke@0 463 }
duke@0 464
duke@0 465
duke@0 466 if (UsePerfData) {
duke@0 467 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
duke@0 468 counters->update_old_eden_size(
duke@0 469 size_policy->calculated_eden_size_in_bytes());
duke@0 470 counters->update_old_promo_size(
duke@0 471 size_policy->calculated_promo_size_in_bytes());
duke@0 472 counters->update_old_capacity(old_gen->capacity_in_bytes());
duke@0 473 counters->update_young_capacity(young_gen->capacity_in_bytes());
duke@0 474 counters->update_survived(survived);
duke@0 475 counters->update_promoted(promoted);
duke@0 476 counters->update_survivor_overflowed(_survivor_overflow);
duke@0 477 }
duke@0 478
duke@0 479 size_t survivor_limit =
duke@0 480 size_policy->max_survivor_size(young_gen->max_size());
duke@0 481 _tenuring_threshold =
duke@0 482 size_policy->compute_survivor_space_size_and_threshold(
duke@0 483 _survivor_overflow,
duke@0 484 _tenuring_threshold,
duke@0 485 survivor_limit);
duke@0 486
duke@0 487 if (PrintTenuringDistribution) {
duke@0 488 gclog_or_tty->cr();
duke@0 489 gclog_or_tty->print_cr("Desired survivor size %ld bytes, new threshold %d (max %d)",
duke@0 490 size_policy->calculated_survivor_size_in_bytes(),
duke@0 491 _tenuring_threshold, MaxTenuringThreshold);
duke@0 492 }
duke@0 493
duke@0 494 if (UsePerfData) {
duke@0 495 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
duke@0 496 counters->update_tenuring_threshold(_tenuring_threshold);
duke@0 497 counters->update_survivor_size_counters();
duke@0 498 }
duke@0 499
duke@0 500 // Do call at minor collections?
duke@0 501 // Don't check if the size_policy is ready at this
duke@0 502 // level. Let the size_policy check that internally.
duke@0 503 if (UseAdaptiveSizePolicy &&
duke@0 504 UseAdaptiveGenerationSizePolicyAtMinorCollection &&
duke@0 505 ((gc_cause != GCCause::_java_lang_system_gc) ||
duke@0 506 UseAdaptiveSizePolicyWithSystemGC)) {
duke@0 507
duke@0 508 // Calculate optimial free space amounts
duke@0 509 assert(young_gen->max_size() >
duke@0 510 young_gen->from_space()->capacity_in_bytes() +
duke@0 511 young_gen->to_space()->capacity_in_bytes(),
duke@0 512 "Sizes of space in young gen are out-of-bounds");
duke@0 513 size_t max_eden_size = young_gen->max_size() -
duke@0 514 young_gen->from_space()->capacity_in_bytes() -
duke@0 515 young_gen->to_space()->capacity_in_bytes();
duke@0 516 size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
duke@0 517 young_gen->eden_space()->used_in_bytes(),
duke@0 518 old_gen->used_in_bytes(),
duke@0 519 perm_gen->used_in_bytes(),
duke@0 520 young_gen->eden_space()->capacity_in_bytes(),
duke@0 521 old_gen->max_gen_size(),
duke@0 522 max_eden_size,
duke@0 523 false /* full gc*/,
duke@0 524 gc_cause);
duke@0 525
duke@0 526 }
duke@0 527 // Resize the young generation at every collection
duke@0 528 // even if new sizes have not been calculated. This is
duke@0 529 // to allow resizes that may have been inhibited by the
duke@0 530 // relative location of the "to" and "from" spaces.
duke@0 531
duke@0 532 // Resizing the old gen at minor collects can cause increases
duke@0 533 // that don't feed back to the generation sizing policy until
duke@0 534 // a major collection. Don't resize the old gen here.
duke@0 535
duke@0 536 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
duke@0 537 size_policy->calculated_survivor_size_in_bytes());
duke@0 538
duke@0 539 if (PrintAdaptiveSizePolicy) {
duke@0 540 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
duke@0 541 heap->total_collections());
duke@0 542 }
duke@0 543 }
duke@0 544
duke@0 545 // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can
duke@0 546 // cause the change of the heap layout. Make sure eden is reshaped if that's the case.
duke@0 547 // Also update() will case adaptive NUMA chunk resizing.
duke@0 548 assert(young_gen->eden_space()->is_empty(), "eden space should be empty now");
duke@0 549 young_gen->eden_space()->update();
duke@0 550
duke@0 551 heap->gc_policy_counters()->update_counters();
duke@0 552
duke@0 553 heap->resize_all_tlabs();
duke@0 554
duke@0 555 assert(young_gen->to_space()->is_empty(), "to space should be empty now");
duke@0 556 }
duke@0 557
duke@0 558 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
duke@0 559
duke@0 560 NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
duke@0 561
duke@0 562 // Re-verify object start arrays
duke@0 563 if (VerifyObjectStartArray &&
duke@0 564 VerifyAfterGC) {
duke@0 565 old_gen->verify_object_start_array();
duke@0 566 perm_gen->verify_object_start_array();
duke@0 567 }
duke@0 568
duke@0 569 // Verify all old -> young cards are now precise
duke@0 570 if (VerifyRememberedSets) {
duke@0 571 // Precise verification will give false positives. Until this is fixed,
duke@0 572 // use imprecise verification.
duke@0 573 // CardTableExtension::verify_all_young_refs_precise();
duke@0 574 CardTableExtension::verify_all_young_refs_imprecise();
duke@0 575 }
duke@0 576
duke@0 577 if (TraceGen0Time) accumulated_time()->stop();
duke@0 578
duke@0 579 if (PrintGC) {
duke@0 580 if (PrintGCDetails) {
duke@0 581 // Don't print a GC timestamp here. This is after the GC so
duke@0 582 // would be confusing.
duke@0 583 young_gen->print_used_change(young_gen_used_before);
duke@0 584 }
duke@0 585 heap->print_heap_change(prev_used);
duke@0 586 }
duke@0 587
duke@0 588 // Track memory usage and detect low memory
duke@0 589 MemoryService::track_memory_usage();
duke@0 590 heap->update_counters();
duke@0 591 }
duke@0 592
duke@0 593 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
duke@0 594 HandleMark hm; // Discard invalid handles created during verification
duke@0 595 gclog_or_tty->print(" VerifyAfterGC:");
duke@0 596 Universe::verify(false);
duke@0 597 }
duke@0 598
duke@0 599 if (PrintHeapAtGC) {
duke@0 600 Universe::print_heap_after_gc();
duke@0 601 }
duke@0 602
jmasa@263 603 if (ZapUnusedHeapArea) {
jmasa@263 604 young_gen->eden_space()->check_mangled_unused_area_complete();
jmasa@263 605 young_gen->from_space()->check_mangled_unused_area_complete();
jmasa@263 606 young_gen->to_space()->check_mangled_unused_area_complete();
jmasa@263 607 }
jmasa@263 608
duke@0 609 scavenge_exit.update();
duke@0 610
duke@0 611 if (PrintGCTaskTimeStamps) {
duke@0 612 tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " INT64_FORMAT,
duke@0 613 scavenge_entry.ticks(), scavenge_midpoint.ticks(),
duke@0 614 scavenge_exit.ticks());
duke@0 615 gc_task_manager()->print_task_time_stamps();
duke@0 616 }
duke@0 617
duke@0 618 return !promotion_failure_occurred;
duke@0 619 }
duke@0 620
duke@0 621 // This method iterates over all objects in the young generation,
duke@0 622 // unforwarding markOops. It then restores any preserved mark oops,
duke@0 623 // and clears the _preserved_mark_stack.
duke@0 624 void PSScavenge::clean_up_failed_promotion() {
duke@0 625 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@0 626 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@0 627 assert(promotion_failed(), "Sanity");
duke@0 628
duke@0 629 PSYoungGen* young_gen = heap->young_gen();
duke@0 630
duke@0 631 {
duke@0 632 ResourceMark rm;
duke@0 633
duke@0 634 // Unforward all pointers in the young gen.
duke@0 635 PSPromotionFailedClosure unforward_closure;
duke@0 636 young_gen->object_iterate(&unforward_closure);
duke@0 637
duke@0 638 if (PrintGC && Verbose) {
duke@0 639 gclog_or_tty->print_cr("Restoring %d marks",
duke@0 640 _preserved_oop_stack->length());
duke@0 641 }
duke@0 642
duke@0 643 // Restore any saved marks.
duke@0 644 for (int i=0; i < _preserved_oop_stack->length(); i++) {
duke@0 645 oop obj = _preserved_oop_stack->at(i);
duke@0 646 markOop mark = _preserved_mark_stack->at(i);
duke@0 647 obj->set_mark(mark);
duke@0 648 }
duke@0 649
duke@0 650 // Deallocate the preserved mark and oop stacks.
duke@0 651 // The stacks were allocated as CHeap objects, so
duke@0 652 // we must call delete to prevent mem leaks.
duke@0 653 delete _preserved_mark_stack;
duke@0 654 _preserved_mark_stack = NULL;
duke@0 655 delete _preserved_oop_stack;
duke@0 656 _preserved_oop_stack = NULL;
duke@0 657 }
duke@0 658
duke@0 659 // Reset the PromotionFailureALot counters.
duke@0 660 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
duke@0 661 }
duke@0 662
duke@0 663 // This method is called whenever an attempt to promote an object
duke@0 664 // fails. Some markOops will need preserving, some will not. Note
duke@0 665 // that the entire eden is traversed after a failed promotion, with
duke@0 666 // all forwarded headers replaced by the default markOop. This means
duke@0 667 // it is not neccessary to preserve most markOops.
duke@0 668 void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) {
duke@0 669 if (_preserved_mark_stack == NULL) {
duke@0 670 ThreadCritical tc; // Lock and retest
duke@0 671 if (_preserved_mark_stack == NULL) {
duke@0 672 assert(_preserved_oop_stack == NULL, "Sanity");
duke@0 673 _preserved_mark_stack = new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true);
duke@0 674 _preserved_oop_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
duke@0 675 }
duke@0 676 }
duke@0 677
duke@0 678 // Because we must hold the ThreadCritical lock before using
duke@0 679 // the stacks, we should be safe from observing partial allocations,
duke@0 680 // which are also guarded by the ThreadCritical lock.
duke@0 681 if (obj_mark->must_be_preserved_for_promotion_failure(obj)) {
duke@0 682 ThreadCritical tc;
duke@0 683 _preserved_oop_stack->push(obj);
duke@0 684 _preserved_mark_stack->push(obj_mark);
duke@0 685 }
duke@0 686 }
duke@0 687
duke@0 688 bool PSScavenge::should_attempt_scavenge() {
duke@0 689 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@0 690 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@0 691 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
duke@0 692
duke@0 693 if (UsePerfData) {
duke@0 694 counters->update_scavenge_skipped(not_skipped);
duke@0 695 }
duke@0 696
duke@0 697 PSYoungGen* young_gen = heap->young_gen();
duke@0 698 PSOldGen* old_gen = heap->old_gen();
duke@0 699
duke@0 700 if (!ScavengeWithObjectsInToSpace) {
duke@0 701 // Do not attempt to promote unless to_space is empty
duke@0 702 if (!young_gen->to_space()->is_empty()) {
duke@0 703 _consecutive_skipped_scavenges++;
duke@0 704 if (UsePerfData) {
duke@0 705 counters->update_scavenge_skipped(to_space_not_empty);
duke@0 706 }
duke@0 707 return false;
duke@0 708 }
duke@0 709 }
duke@0 710
duke@0 711 // Test to see if the scavenge will likely fail.
duke@0 712 PSAdaptiveSizePolicy* policy = heap->size_policy();
duke@0 713
duke@0 714 // A similar test is done in the policy's should_full_GC(). If this is
duke@0 715 // changed, decide if that test should also be changed.
duke@0 716 size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
duke@0 717 size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes());
duke@0 718 bool result = promotion_estimate < old_gen->free_in_bytes();
duke@0 719
duke@0 720 if (PrintGCDetails && Verbose) {
duke@0 721 gclog_or_tty->print(result ? " do scavenge: " : " skip scavenge: ");
duke@0 722 gclog_or_tty->print_cr(" average_promoted " SIZE_FORMAT
duke@0 723 " padded_average_promoted " SIZE_FORMAT
duke@0 724 " free in old gen " SIZE_FORMAT,
duke@0 725 (size_t) policy->average_promoted_in_bytes(),
duke@0 726 (size_t) policy->padded_average_promoted_in_bytes(),
duke@0 727 old_gen->free_in_bytes());
duke@0 728 if (young_gen->used_in_bytes() <
duke@0 729 (size_t) policy->padded_average_promoted_in_bytes()) {
duke@0 730 gclog_or_tty->print_cr(" padded_promoted_average is greater"
duke@0 731 " than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes());
duke@0 732 }
duke@0 733 }
duke@0 734
duke@0 735 if (result) {
duke@0 736 _consecutive_skipped_scavenges = 0;
duke@0 737 } else {
duke@0 738 _consecutive_skipped_scavenges++;
duke@0 739 if (UsePerfData) {
duke@0 740 counters->update_scavenge_skipped(promoted_too_large);
duke@0 741 }
duke@0 742 }
duke@0 743 return result;
duke@0 744 }
duke@0 745
duke@0 746 // Used to add tasks
duke@0 747 GCTaskManager* const PSScavenge::gc_task_manager() {
duke@0 748 assert(ParallelScavengeHeap::gc_task_manager() != NULL,
duke@0 749 "shouldn't return NULL");
duke@0 750 return ParallelScavengeHeap::gc_task_manager();
duke@0 751 }
duke@0 752
duke@0 753 void PSScavenge::initialize() {
duke@0 754 // Arguments must have been parsed
duke@0 755
duke@0 756 if (AlwaysTenure) {
duke@0 757 _tenuring_threshold = 0;
duke@0 758 } else if (NeverTenure) {
duke@0 759 _tenuring_threshold = markOopDesc::max_age + 1;
duke@0 760 } else {
duke@0 761 // We want to smooth out our startup times for the AdaptiveSizePolicy
duke@0 762 _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
duke@0 763 MaxTenuringThreshold;
duke@0 764 }
duke@0 765
duke@0 766 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@0 767 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@0 768
duke@0 769 PSYoungGen* young_gen = heap->young_gen();
duke@0 770 PSOldGen* old_gen = heap->old_gen();
duke@0 771 PSPermGen* perm_gen = heap->perm_gen();
duke@0 772
duke@0 773 // Set boundary between young_gen and old_gen
duke@0 774 assert(perm_gen->reserved().end() <= old_gen->object_space()->bottom(),
duke@0 775 "perm above old");
duke@0 776 assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(),
duke@0 777 "old above young");
duke@0 778 _young_generation_boundary = young_gen->eden_space()->bottom();
duke@0 779
duke@0 780 // Initialize ref handling object for scavenging.
duke@0 781 MemRegion mr = young_gen->reserved();
duke@0 782 _ref_processor = ReferenceProcessor::create_ref_processor(
duke@0 783 mr, // span
duke@0 784 true, // atomic_discovery
duke@0 785 true, // mt_discovery
duke@0 786 NULL, // is_alive_non_header
duke@0 787 ParallelGCThreads,
duke@0 788 ParallelRefProcEnabled);
duke@0 789
duke@0 790 // Cache the cardtable
duke@0 791 BarrierSet* bs = Universe::heap()->barrier_set();
duke@0 792 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
duke@0 793 _card_table = (CardTableExtension*)bs;
duke@0 794
duke@0 795 _counters = new CollectorCounters("PSScavenge", 0);
duke@0 796 }