annotate src/share/vm/gc_interface/collectedHeap.inline.hpp @ 1995:b1a2afa37ec4

7003271: Hotspot should track cumulative Java heap bytes allocated on a per-thread basis Summary: Track allocated bytes in Thread's, update on TLAB retirement and direct allocation in Eden and tenured, add JNI methods for ThreadMXBean. Reviewed-by: coleenp, kvn, dholmes, ysr
author phh
date Fri, 07 Jan 2011 10:42:32 -0500
parents f95d63e2154a
children
rev   line source
duke@0 1 /*
phh@1995 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
duke@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@0 4 *
duke@0 5 * This code is free software; you can redistribute it and/or modify it
duke@0 6 * under the terms of the GNU General Public License version 2 only, as
duke@0 7 * published by the Free Software Foundation.
duke@0 8 *
duke@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@0 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@0 13 * accompanied this code).
duke@0 14 *
duke@0 15 * You should have received a copy of the GNU General Public License version
duke@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@0 18 *
trims@1472 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1472 20 * or visit www.oracle.com if you need additional information or have any
trims@1472 21 * questions.
duke@0 22 *
duke@0 23 */
duke@0 24
stefank@1885 25 #ifndef SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_INLINE_HPP
stefank@1885 26 #define SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_INLINE_HPP
stefank@1885 27
stefank@1885 28 #include "gc_interface/collectedHeap.hpp"
stefank@1885 29 #include "memory/threadLocalAllocBuffer.inline.hpp"
stefank@1885 30 #include "memory/universe.hpp"
stefank@1885 31 #include "oops/arrayOop.hpp"
stefank@1885 32 #include "prims/jvmtiExport.hpp"
stefank@1885 33 #include "runtime/sharedRuntime.hpp"
stefank@1885 34 #include "runtime/thread.hpp"
stefank@1885 35 #include "services/lowMemoryDetector.hpp"
stefank@1885 36 #include "utilities/copy.hpp"
stefank@1885 37 #ifdef TARGET_OS_FAMILY_linux
stefank@1885 38 # include "thread_linux.inline.hpp"
stefank@1885 39 #endif
stefank@1885 40 #ifdef TARGET_OS_FAMILY_solaris
stefank@1885 41 # include "thread_solaris.inline.hpp"
stefank@1885 42 #endif
stefank@1885 43 #ifdef TARGET_OS_FAMILY_windows
stefank@1885 44 # include "thread_windows.inline.hpp"
stefank@1885 45 #endif
stefank@1885 46
duke@0 47 // Inline allocation implementations.
duke@0 48
duke@0 49 void CollectedHeap::post_allocation_setup_common(KlassHandle klass,
duke@0 50 HeapWord* obj,
duke@0 51 size_t size) {
duke@0 52 post_allocation_setup_no_klass_install(klass, obj, size);
duke@0 53 post_allocation_install_obj_klass(klass, oop(obj), (int) size);
duke@0 54 }
duke@0 55
duke@0 56 void CollectedHeap::post_allocation_setup_no_klass_install(KlassHandle klass,
duke@0 57 HeapWord* objPtr,
duke@0 58 size_t size) {
duke@0 59 oop obj = (oop)objPtr;
duke@0 60
duke@0 61 assert(obj != NULL, "NULL object pointer");
duke@0 62 if (UseBiasedLocking && (klass() != NULL)) {
duke@0 63 obj->set_mark(klass->prototype_header());
duke@0 64 } else {
duke@0 65 // May be bootstrapping
duke@0 66 obj->set_mark(markOopDesc::prototype());
duke@0 67 }
duke@0 68 }
duke@0 69
duke@0 70 void CollectedHeap::post_allocation_install_obj_klass(KlassHandle klass,
duke@0 71 oop obj,
duke@0 72 int size) {
duke@0 73 // These asserts are kind of complicated because of klassKlass
duke@0 74 // and the beginning of the world.
duke@0 75 assert(klass() != NULL || !Universe::is_fully_initialized(), "NULL klass");
duke@0 76 assert(klass() == NULL || klass()->is_klass(), "not a klass");
duke@0 77 assert(klass() == NULL || klass()->klass_part() != NULL, "not a klass");
duke@0 78 assert(obj != NULL, "NULL object pointer");
duke@0 79 obj->set_klass(klass());
duke@0 80 assert(!Universe::is_fully_initialized() || obj->blueprint() != NULL,
duke@0 81 "missing blueprint");
coleenp@113 82 }
duke@0 83
coleenp@113 84 // Support for jvmti and dtrace
coleenp@113 85 inline void post_allocation_notify(KlassHandle klass, oop obj) {
jcoomes@481 86 // support low memory notifications (no-op if not enabled)
jcoomes@481 87 LowMemoryDetector::detect_low_memory_for_collected_pools();
jcoomes@481 88
duke@0 89 // support for JVMTI VMObjectAlloc event (no-op if not enabled)
duke@0 90 JvmtiExport::vm_object_alloc_event_collector(obj);
duke@0 91
duke@0 92 if (DTraceAllocProbes) {
duke@0 93 // support for Dtrace object alloc event (no-op most of the time)
duke@0 94 if (klass() != NULL && klass()->klass_part()->name() != NULL) {
duke@0 95 SharedRuntime::dtrace_object_alloc(obj);
duke@0 96 }
duke@0 97 }
duke@0 98 }
duke@0 99
duke@0 100 void CollectedHeap::post_allocation_setup_obj(KlassHandle klass,
duke@0 101 HeapWord* obj,
duke@0 102 size_t size) {
duke@0 103 post_allocation_setup_common(klass, obj, size);
duke@0 104 assert(Universe::is_bootstrapping() ||
duke@0 105 !((oop)obj)->blueprint()->oop_is_array(), "must not be an array");
coleenp@113 106 // notify jvmti and dtrace
coleenp@113 107 post_allocation_notify(klass, (oop)obj);
duke@0 108 }
duke@0 109
duke@0 110 void CollectedHeap::post_allocation_setup_array(KlassHandle klass,
duke@0 111 HeapWord* obj,
duke@0 112 size_t size,
duke@0 113 int length) {
coleenp@167 114 // Set array length before setting the _klass field
coleenp@167 115 // in post_allocation_setup_common() because the klass field
coleenp@167 116 // indicates that the object is parsable by concurrent GC.
duke@0 117 assert(length >= 0, "length should be non-negative");
coleenp@167 118 ((arrayOop)obj)->set_length(length);
coleenp@113 119 post_allocation_setup_common(klass, obj, size);
duke@0 120 assert(((oop)obj)->blueprint()->oop_is_array(), "must be an array");
coleenp@113 121 // notify jvmti and dtrace (must be after length is set for dtrace)
coleenp@113 122 post_allocation_notify(klass, (oop)obj);
duke@0 123 }
duke@0 124
duke@0 125 HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, bool is_noref, TRAPS) {
duke@0 126
duke@0 127 // Clear unhandled oops for memory allocation. Memory allocation might
duke@0 128 // not take out a lock if from tlab, so clear here.
duke@0 129 CHECK_UNHANDLED_OOPS_ONLY(THREAD->clear_unhandled_oops();)
duke@0 130
duke@0 131 if (HAS_PENDING_EXCEPTION) {
duke@0 132 NOT_PRODUCT(guarantee(false, "Should not allocate with exception pending"));
duke@0 133 return NULL; // caller does a CHECK_0 too
duke@0 134 }
duke@0 135
duke@0 136 // We may want to update this, is_noref objects might not be allocated in TLABs.
duke@0 137 HeapWord* result = NULL;
duke@0 138 if (UseTLAB) {
duke@0 139 result = CollectedHeap::allocate_from_tlab(THREAD, size);
duke@0 140 if (result != NULL) {
duke@0 141 assert(!HAS_PENDING_EXCEPTION,
duke@0 142 "Unexpected exception, will result in uninitialized storage");
duke@0 143 return result;
duke@0 144 }
duke@0 145 }
ysr@342 146 bool gc_overhead_limit_was_exceeded = false;
duke@0 147 result = Universe::heap()->mem_allocate(size,
duke@0 148 is_noref,
duke@0 149 false,
duke@0 150 &gc_overhead_limit_was_exceeded);
duke@0 151 if (result != NULL) {
duke@0 152 NOT_PRODUCT(Universe::heap()->
duke@0 153 check_for_non_bad_heap_word_value(result, size));
duke@0 154 assert(!HAS_PENDING_EXCEPTION,
duke@0 155 "Unexpected exception, will result in uninitialized storage");
phh@1995 156 THREAD->incr_allocated_bytes(size * HeapWordSize);
duke@0 157 return result;
duke@0 158 }
duke@0 159
duke@0 160
duke@0 161 if (!gc_overhead_limit_was_exceeded) {
duke@0 162 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
duke@0 163 report_java_out_of_memory("Java heap space");
duke@0 164
duke@0 165 if (JvmtiExport::should_post_resource_exhausted()) {
duke@0 166 JvmtiExport::post_resource_exhausted(
duke@0 167 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP,
duke@0 168 "Java heap space");
duke@0 169 }
duke@0 170
duke@0 171 THROW_OOP_0(Universe::out_of_memory_error_java_heap());
duke@0 172 } else {
duke@0 173 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
duke@0 174 report_java_out_of_memory("GC overhead limit exceeded");
duke@0 175
duke@0 176 if (JvmtiExport::should_post_resource_exhausted()) {
duke@0 177 JvmtiExport::post_resource_exhausted(
duke@0 178 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP,
duke@0 179 "GC overhead limit exceeded");
duke@0 180 }
duke@0 181
duke@0 182 THROW_OOP_0(Universe::out_of_memory_error_gc_overhead_limit());
duke@0 183 }
duke@0 184 }
duke@0 185
duke@0 186 HeapWord* CollectedHeap::common_mem_allocate_init(size_t size, bool is_noref, TRAPS) {
duke@0 187 HeapWord* obj = common_mem_allocate_noinit(size, is_noref, CHECK_NULL);
duke@0 188 init_obj(obj, size);
duke@0 189 return obj;
duke@0 190 }
duke@0 191
duke@0 192 // Need to investigate, do we really want to throw OOM exception here?
duke@0 193 HeapWord* CollectedHeap::common_permanent_mem_allocate_noinit(size_t size, TRAPS) {
duke@0 194 if (HAS_PENDING_EXCEPTION) {
duke@0 195 NOT_PRODUCT(guarantee(false, "Should not allocate with exception pending"));
duke@0 196 return NULL; // caller does a CHECK_NULL too
duke@0 197 }
duke@0 198
duke@0 199 #ifdef ASSERT
duke@0 200 if (CIFireOOMAt > 0 && THREAD->is_Compiler_thread() &&
duke@0 201 ++_fire_out_of_memory_count >= CIFireOOMAt) {
duke@0 202 // For testing of OOM handling in the CI throw an OOM and see how
duke@0 203 // it does. Historically improper handling of these has resulted
duke@0 204 // in crashes which we really don't want to have in the CI.
duke@0 205 THROW_OOP_0(Universe::out_of_memory_error_perm_gen());
duke@0 206 }
duke@0 207 #endif
duke@0 208
duke@0 209 HeapWord* result = Universe::heap()->permanent_mem_allocate(size);
duke@0 210 if (result != NULL) {
duke@0 211 NOT_PRODUCT(Universe::heap()->
duke@0 212 check_for_non_bad_heap_word_value(result, size));
duke@0 213 assert(!HAS_PENDING_EXCEPTION,
duke@0 214 "Unexpected exception, will result in uninitialized storage");
duke@0 215 return result;
duke@0 216 }
duke@0 217 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
duke@0 218 report_java_out_of_memory("PermGen space");
duke@0 219
duke@0 220 if (JvmtiExport::should_post_resource_exhausted()) {
duke@0 221 JvmtiExport::post_resource_exhausted(
duke@0 222 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
duke@0 223 "PermGen space");
duke@0 224 }
duke@0 225
duke@0 226 THROW_OOP_0(Universe::out_of_memory_error_perm_gen());
duke@0 227 }
duke@0 228
duke@0 229 HeapWord* CollectedHeap::common_permanent_mem_allocate_init(size_t size, TRAPS) {
duke@0 230 HeapWord* obj = common_permanent_mem_allocate_noinit(size, CHECK_NULL);
duke@0 231 init_obj(obj, size);
duke@0 232 return obj;
duke@0 233 }
duke@0 234
duke@0 235 HeapWord* CollectedHeap::allocate_from_tlab(Thread* thread, size_t size) {
duke@0 236 assert(UseTLAB, "should use UseTLAB");
duke@0 237
duke@0 238 HeapWord* obj = thread->tlab().allocate(size);
duke@0 239 if (obj != NULL) {
duke@0 240 return obj;
duke@0 241 }
duke@0 242 // Otherwise...
duke@0 243 return allocate_from_tlab_slow(thread, size);
duke@0 244 }
duke@0 245
duke@0 246 void CollectedHeap::init_obj(HeapWord* obj, size_t size) {
duke@0 247 assert(obj != NULL, "cannot initialize NULL object");
duke@0 248 const size_t hs = oopDesc::header_size();
duke@0 249 assert(size >= hs, "unexpected object size");
coleenp@167 250 ((oop)obj)->set_klass_gap(0);
duke@0 251 Copy::fill_to_aligned_words(obj + hs, size - hs);
duke@0 252 }
duke@0 253
duke@0 254 oop CollectedHeap::obj_allocate(KlassHandle klass, int size, TRAPS) {
duke@0 255 debug_only(check_for_valid_allocation_state());
duke@0 256 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
duke@0 257 assert(size >= 0, "int won't convert to size_t");
duke@0 258 HeapWord* obj = common_mem_allocate_init(size, false, CHECK_NULL);
duke@0 259 post_allocation_setup_obj(klass, obj, size);
duke@0 260 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
duke@0 261 return (oop)obj;
duke@0 262 }
duke@0 263
duke@0 264 oop CollectedHeap::array_allocate(KlassHandle klass,
duke@0 265 int size,
duke@0 266 int length,
duke@0 267 TRAPS) {
duke@0 268 debug_only(check_for_valid_allocation_state());
duke@0 269 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
duke@0 270 assert(size >= 0, "int won't convert to size_t");
duke@0 271 HeapWord* obj = common_mem_allocate_init(size, false, CHECK_NULL);
duke@0 272 post_allocation_setup_array(klass, obj, size, length);
duke@0 273 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
duke@0 274 return (oop)obj;
duke@0 275 }
duke@0 276
duke@0 277 oop CollectedHeap::large_typearray_allocate(KlassHandle klass,
duke@0 278 int size,
duke@0 279 int length,
duke@0 280 TRAPS) {
duke@0 281 debug_only(check_for_valid_allocation_state());
duke@0 282 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
duke@0 283 assert(size >= 0, "int won't convert to size_t");
duke@0 284 HeapWord* obj = common_mem_allocate_init(size, true, CHECK_NULL);
duke@0 285 post_allocation_setup_array(klass, obj, size, length);
duke@0 286 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
duke@0 287 return (oop)obj;
duke@0 288 }
duke@0 289
duke@0 290 oop CollectedHeap::permanent_obj_allocate(KlassHandle klass, int size, TRAPS) {
duke@0 291 oop obj = permanent_obj_allocate_no_klass_install(klass, size, CHECK_NULL);
duke@0 292 post_allocation_install_obj_klass(klass, obj, size);
duke@0 293 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value((HeapWord*) obj,
duke@0 294 size));
duke@0 295 return obj;
duke@0 296 }
duke@0 297
duke@0 298 oop CollectedHeap::permanent_obj_allocate_no_klass_install(KlassHandle klass,
duke@0 299 int size,
duke@0 300 TRAPS) {
duke@0 301 debug_only(check_for_valid_allocation_state());
duke@0 302 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
duke@0 303 assert(size >= 0, "int won't convert to size_t");
duke@0 304 HeapWord* obj = common_permanent_mem_allocate_init(size, CHECK_NULL);
duke@0 305 post_allocation_setup_no_klass_install(klass, obj, size);
duke@0 306 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
duke@0 307 return (oop)obj;
duke@0 308 }
duke@0 309
duke@0 310 oop CollectedHeap::permanent_array_allocate(KlassHandle klass,
duke@0 311 int size,
duke@0 312 int length,
duke@0 313 TRAPS) {
duke@0 314 debug_only(check_for_valid_allocation_state());
duke@0 315 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
duke@0 316 assert(size >= 0, "int won't convert to size_t");
duke@0 317 HeapWord* obj = common_permanent_mem_allocate_init(size, CHECK_NULL);
duke@0 318 post_allocation_setup_array(klass, obj, size, length);
duke@0 319 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
duke@0 320 return (oop)obj;
duke@0 321 }
duke@0 322
duke@0 323 // Returns "TRUE" if "p" is a method oop in the
duke@0 324 // current heap with high probability. NOTE: The main
duke@0 325 // current consumers of this interface are Forte::
duke@0 326 // and ThreadProfiler::. In these cases, the
duke@0 327 // interpreter frame from which "p" came, may be
duke@0 328 // under construction when sampled asynchronously, so
duke@0 329 // the clients want to check that it represents a
duke@0 330 // valid method before using it. Nonetheless since
duke@0 331 // the clients do not typically lock out GC, the
duke@0 332 // predicate is_valid_method() is not stable, so
duke@0 333 // it is possible that by the time "p" is used, it
duke@0 334 // is no longer valid.
duke@0 335 inline bool CollectedHeap::is_valid_method(oop p) const {
duke@0 336 return
duke@0 337 p != NULL &&
duke@0 338
duke@0 339 // Check whether it is aligned at a HeapWord boundary.
duke@0 340 Space::is_aligned(p) &&
duke@0 341
duke@0 342 // Check whether "method" is in the allocated part of the
duke@0 343 // permanent generation -- this needs to be checked before
duke@0 344 // p->klass() below to avoid a SEGV (but see below
duke@0 345 // for a potential window of vulnerability).
duke@0 346 is_permanent((void*)p) &&
duke@0 347
duke@0 348 // See if GC is active; however, there is still an
duke@0 349 // apparently unavoidable window after this call
duke@0 350 // and before the client of this interface uses "p".
duke@0 351 // If the client chooses not to lock out GC, then
duke@0 352 // it's a risk the client must accept.
duke@0 353 !is_gc_active() &&
duke@0 354
duke@0 355 // Check that p is a methodOop.
duke@0 356 p->klass() == Universe::methodKlassObj();
duke@0 357 }
duke@0 358
duke@0 359
duke@0 360 #ifndef PRODUCT
duke@0 361
duke@0 362 inline bool
duke@0 363 CollectedHeap::promotion_should_fail(volatile size_t* count) {
duke@0 364 // Access to count is not atomic; the value does not have to be exact.
duke@0 365 if (PromotionFailureALot) {
duke@0 366 const size_t gc_num = total_collections();
duke@0 367 const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number;
duke@0 368 if (elapsed_gcs >= PromotionFailureALotInterval) {
duke@0 369 // Test for unsigned arithmetic wrap-around.
duke@0 370 if (++*count >= PromotionFailureALotCount) {
duke@0 371 *count = 0;
duke@0 372 return true;
duke@0 373 }
duke@0 374 }
duke@0 375 }
duke@0 376 return false;
duke@0 377 }
duke@0 378
duke@0 379 inline bool CollectedHeap::promotion_should_fail() {
duke@0 380 return promotion_should_fail(&_promotion_failure_alot_count);
duke@0 381 }
duke@0 382
duke@0 383 inline void CollectedHeap::reset_promotion_should_fail(volatile size_t* count) {
duke@0 384 if (PromotionFailureALot) {
duke@0 385 _promotion_failure_alot_gc_number = total_collections();
duke@0 386 *count = 0;
duke@0 387 }
duke@0 388 }
duke@0 389
duke@0 390 inline void CollectedHeap::reset_promotion_should_fail() {
duke@0 391 reset_promotion_should_fail(&_promotion_failure_alot_count);
duke@0 392 }
duke@0 393 #endif // #ifndef PRODUCT
stefank@1885 394
stefank@1885 395 #endif // SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_INLINE_HPP