annotate hotspot/src/share/vm/gc_interface/collectedHeap.cpp @ 1388:3677f5f3d66b

Merge
author tonyp
date Thu, 21 Aug 2008 23:36:31 -0400
parents 4c24294029a9 ddf3e9583f2f
children 8ec481b8f514
rev   line source
duke@1 1 /*
xdono@670 2 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved.
duke@1 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@1 4 *
duke@1 5 * This code is free software; you can redistribute it and/or modify it
duke@1 6 * under the terms of the GNU General Public License version 2 only, as
duke@1 7 * published by the Free Software Foundation.
duke@1 8 *
duke@1 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@1 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@1 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@1 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@1 13 * accompanied this code).
duke@1 14 *
duke@1 15 * You should have received a copy of the GNU General Public License version
duke@1 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@1 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@1 18 *
duke@1 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@1 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@1 21 * have any questions.
duke@1 22 *
duke@1 23 */
duke@1 24
duke@1 25 # include "incls/_precompiled.incl"
duke@1 26 # include "incls/_collectedHeap.cpp.incl"
duke@1 27
duke@1 28
duke@1 29 #ifdef ASSERT
duke@1 30 int CollectedHeap::_fire_out_of_memory_count = 0;
duke@1 31 #endif
duke@1 32
duke@1 33 // Memory state functions.
duke@1 34
duke@1 35 CollectedHeap::CollectedHeap() :
duke@1 36 _reserved(), _barrier_set(NULL), _is_gc_active(false),
duke@1 37 _total_collections(0), _total_full_collections(0),
duke@1 38 _gc_cause(GCCause::_no_gc), _gc_lastcause(GCCause::_no_gc) {
duke@1 39 NOT_PRODUCT(_promotion_failure_alot_count = 0;)
duke@1 40 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
duke@1 41
duke@1 42 if (UsePerfData) {
duke@1 43 EXCEPTION_MARK;
duke@1 44
duke@1 45 // create the gc cause jvmstat counters
duke@1 46 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
duke@1 47 80, GCCause::to_string(_gc_cause), CHECK);
duke@1 48
duke@1 49 _perf_gc_lastcause =
duke@1 50 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
duke@1 51 80, GCCause::to_string(_gc_lastcause), CHECK);
duke@1 52 }
duke@1 53 }
duke@1 54
duke@1 55
duke@1 56 #ifndef PRODUCT
duke@1 57 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) {
duke@1 58 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
duke@1 59 for (size_t slot = 0; slot < size; slot += 1) {
duke@1 60 assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal),
duke@1 61 "Found badHeapWordValue in post-allocation check");
duke@1 62 }
duke@1 63 }
duke@1 64 }
duke@1 65
duke@1 66 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
duke@1 67 {
duke@1 68 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
duke@1 69 for (size_t slot = 0; slot < size; slot += 1) {
duke@1 70 assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),
duke@1 71 "Found non badHeapWordValue in pre-allocation check");
duke@1 72 }
duke@1 73 }
duke@1 74 }
duke@1 75 #endif // PRODUCT
duke@1 76
duke@1 77 #ifdef ASSERT
duke@1 78 void CollectedHeap::check_for_valid_allocation_state() {
duke@1 79 Thread *thread = Thread::current();
duke@1 80 // How to choose between a pending exception and a potential
duke@1 81 // OutOfMemoryError? Don't allow pending exceptions.
duke@1 82 // This is a VM policy failure, so how do we exhaustively test it?
duke@1 83 assert(!thread->has_pending_exception(),
duke@1 84 "shouldn't be allocating with pending exception");
duke@1 85 if (StrictSafepointChecks) {
duke@1 86 assert(thread->allow_allocation(),
duke@1 87 "Allocation done by thread for which allocation is blocked "
duke@1 88 "by No_Allocation_Verifier!");
duke@1 89 // Allocation of an oop can always invoke a safepoint,
duke@1 90 // hence, the true argument
duke@1 91 thread->check_for_valid_safepoint_state(true);
duke@1 92 }
duke@1 93 }
duke@1 94 #endif
duke@1 95
duke@1 96 HeapWord* CollectedHeap::allocate_from_tlab_slow(Thread* thread, size_t size) {
duke@1 97
duke@1 98 // Retain tlab and allocate object in shared space if
duke@1 99 // the amount free in the tlab is too large to discard.
duke@1 100 if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
duke@1 101 thread->tlab().record_slow_allocation(size);
duke@1 102 return NULL;
duke@1 103 }
duke@1 104
duke@1 105 // Discard tlab and allocate a new one.
duke@1 106 // To minimize fragmentation, the last TLAB may be smaller than the rest.
duke@1 107 size_t new_tlab_size = thread->tlab().compute_size(size);
duke@1 108
duke@1 109 thread->tlab().clear_before_allocation();
duke@1 110
duke@1 111 if (new_tlab_size == 0) {
duke@1 112 return NULL;
duke@1 113 }
duke@1 114
duke@1 115 // Allocate a new TLAB...
duke@1 116 HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size);
duke@1 117 if (obj == NULL) {
duke@1 118 return NULL;
duke@1 119 }
duke@1 120 if (ZeroTLAB) {
duke@1 121 // ..and clear it.
duke@1 122 Copy::zero_to_words(obj, new_tlab_size);
duke@1 123 } else {
duke@1 124 // ...and clear just the allocated object.
duke@1 125 Copy::zero_to_words(obj, size);
duke@1 126 }
duke@1 127 thread->tlab().fill(obj, obj + size, new_tlab_size);
duke@1 128 return obj;
duke@1 129 }
duke@1 130
duke@1 131 oop CollectedHeap::new_store_barrier(oop new_obj) {
duke@1 132 // %%% This needs refactoring. (It was imported from the server compiler.)
duke@1 133 guarantee(can_elide_tlab_store_barriers(), "store barrier elision not supported");
duke@1 134 BarrierSet* bs = this->barrier_set();
duke@1 135 assert(bs->has_write_region_opt(), "Barrier set does not have write_region");
duke@1 136 int new_size = new_obj->size();
duke@1 137 bs->write_region(MemRegion((HeapWord*)new_obj, new_size));
duke@1 138 return new_obj;
duke@1 139 }
duke@1 140
duke@1 141 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {
duke@1 142 guarantee(false, "thread-local allocation buffers not supported");
duke@1 143 return NULL;
duke@1 144 }
duke@1 145
duke@1 146 void CollectedHeap::fill_all_tlabs(bool retire) {
duke@1 147 assert(UseTLAB, "should not reach here");
duke@1 148 // See note in ensure_parsability() below.
duke@1 149 assert(SafepointSynchronize::is_at_safepoint() ||
duke@1 150 !is_init_completed(),
duke@1 151 "should only fill tlabs at safepoint");
duke@1 152 // The main thread starts allocating via a TLAB even before it
duke@1 153 // has added itself to the threads list at vm boot-up.
duke@1 154 assert(Threads::first() != NULL,
duke@1 155 "Attempt to fill tlabs before main thread has been added"
duke@1 156 " to threads list is doomed to failure!");
duke@1 157 for(JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
duke@1 158 thread->tlab().make_parsable(retire);
duke@1 159 }
duke@1 160 }
duke@1 161
duke@1 162 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
duke@1 163 // The second disjunct in the assertion below makes a concession
duke@1 164 // for the start-up verification done while the VM is being
duke@1 165 // created. Callers be careful that you know that mutators
duke@1 166 // aren't going to interfere -- for instance, this is permissible
duke@1 167 // if we are still single-threaded and have either not yet
duke@1 168 // started allocating (nothing much to verify) or we have
duke@1 169 // started allocating but are now a full-fledged JavaThread
duke@1 170 // (and have thus made our TLAB's) available for filling.
duke@1 171 assert(SafepointSynchronize::is_at_safepoint() ||
duke@1 172 !is_init_completed(),
duke@1 173 "Should only be called at a safepoint or at start-up"
duke@1 174 " otherwise concurrent mutator activity may make heap "
duke@1 175 " unparsable again");
duke@1 176 if (UseTLAB) {
duke@1 177 fill_all_tlabs(retire_tlabs);
duke@1 178 }
duke@1 179 }
duke@1 180
duke@1 181 void CollectedHeap::accumulate_statistics_all_tlabs() {
duke@1 182 if (UseTLAB) {
duke@1 183 assert(SafepointSynchronize::is_at_safepoint() ||
duke@1 184 !is_init_completed(),
duke@1 185 "should only accumulate statistics on tlabs at safepoint");
duke@1 186
duke@1 187 ThreadLocalAllocBuffer::accumulate_statistics_before_gc();
duke@1 188 }
duke@1 189 }
duke@1 190
duke@1 191 void CollectedHeap::resize_all_tlabs() {
duke@1 192 if (UseTLAB) {
duke@1 193 assert(SafepointSynchronize::is_at_safepoint() ||
duke@1 194 !is_init_completed(),
duke@1 195 "should only resize tlabs at safepoint");
duke@1 196
duke@1 197 ThreadLocalAllocBuffer::resize_all_tlabs();
duke@1 198 }
duke@1 199 }