annotate src/hotspot/share/code/codeCache.cpp @ 49268:74db2b7cec75

8146201: [AOT] Class static initializers that are not pure should not be executed during static compilation Reviewed-by: kvn
author dlong
date Tue, 20 Mar 2018 10:23:14 -0700
parents c1b46afab3ba
children 898ef81cbc0e
rev   line source
duke@1 1 /*
coleenp@46329 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
duke@1 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@1 4 *
duke@1 5 * This code is free software; you can redistribute it and/or modify it
duke@1 6 * under the terms of the GNU General Public License version 2 only, as
duke@1 7 * published by the Free Software Foundation.
duke@1 8 *
duke@1 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@1 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@1 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@1 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@1 13 * accompanied this code).
duke@1 14 *
duke@1 15 * You should have received a copy of the GNU General Public License version
duke@1 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@1 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@1 18 *
trims@5547 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@5547 20 * or visit www.oracle.com if you need additional information or have any
trims@5547 21 * questions.
duke@1 22 *
duke@1 23 */
duke@1 24
stefank@7397 25 #include "precompiled.hpp"
kvn@42650 26 #include "aot/aotLoader.hpp"
stefank@7397 27 #include "code/codeBlob.hpp"
stefank@7397 28 #include "code/codeCache.hpp"
coleenp@13728 29 #include "code/compiledIC.hpp"
stefank@7397 30 #include "code/dependencies.hpp"
coleenp@13728 31 #include "code/icBuffer.hpp"
stefank@7397 32 #include "code/nmethod.hpp"
stefank@7397 33 #include "code/pcDesc.hpp"
vladidan@15201 34 #include "compiler/compileBroker.hpp"
pliden@30764 35 #include "gc/shared/gcLocker.hpp"
stefank@7397 36 #include "memory/allocation.inline.hpp"
stefank@7397 37 #include "memory/iterator.hpp"
stefank@7397 38 #include "memory/resourceArea.hpp"
hseigel@49176 39 #include "oops/method.inline.hpp"
stefank@7397 40 #include "oops/objArrayOop.hpp"
stefank@7397 41 #include "oops/oop.inline.hpp"
stefank@29084 42 #include "oops/verifyOopClosure.hpp"
pliden@30764 43 #include "runtime/arguments.hpp"
pliden@30764 44 #include "runtime/compilationPolicy.hpp"
pliden@30764 45 #include "runtime/deoptimization.hpp"
stefank@7397 46 #include "runtime/handles.inline.hpp"
stefank@7397 47 #include "runtime/icache.hpp"
stefank@7397 48 #include "runtime/java.hpp"
stefank@7397 49 #include "runtime/mutexLocker.hpp"
anoll@27420 50 #include "runtime/sweeper.hpp"
stefank@7397 51 #include "services/memoryService.hpp"
sla@18025 52 #include "trace/tracing.hpp"
stefank@46625 53 #include "utilities/align.hpp"
coleenp@46589 54 #include "utilities/vmError.hpp"
stefank@7397 55 #include "utilities/xmlstream.hpp"
thartmann@26796 56 #ifdef COMPILER1
thartmann@26796 57 #include "c1/c1_Compilation.hpp"
thartmann@26796 58 #include "c1/c1_Compiler.hpp"
thartmann@26796 59 #endif
thartmann@26796 60 #ifdef COMPILER2
thartmann@26796 61 #include "opto/c2compiler.hpp"
thartmann@26796 62 #include "opto/compile.hpp"
goetz@26805 63 #include "opto/node.hpp"
thartmann@26796 64 #endif
duke@1 65
duke@1 66 // Helper class for printing in CodeCache
duke@1 67 class CodeBlob_sizes {
duke@1 68 private:
duke@1 69 int count;
duke@1 70 int total_size;
duke@1 71 int header_size;
duke@1 72 int code_size;
duke@1 73 int stub_size;
duke@1 74 int relocation_size;
duke@1 75 int scopes_oop_size;
coleenp@13728 76 int scopes_metadata_size;
duke@1 77 int scopes_data_size;
duke@1 78 int scopes_pcs_size;
duke@1 79
duke@1 80 public:
duke@1 81 CodeBlob_sizes() {
duke@1 82 count = 0;
duke@1 83 total_size = 0;
duke@1 84 header_size = 0;
duke@1 85 code_size = 0;
duke@1 86 stub_size = 0;
duke@1 87 relocation_size = 0;
duke@1 88 scopes_oop_size = 0;
coleenp@13728 89 scopes_metadata_size = 0;
duke@1 90 scopes_data_size = 0;
duke@1 91 scopes_pcs_size = 0;
duke@1 92 }
duke@1 93
duke@1 94 int total() { return total_size; }
duke@1 95 bool is_empty() { return count == 0; }
duke@1 96
duke@1 97 void print(const char* title) {
drchase@24424 98 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])",
duke@1 99 count,
duke@1 100 title,
drchase@24424 101 (int)(total() / K),
duke@1 102 header_size * 100 / total_size,
duke@1 103 relocation_size * 100 / total_size,
duke@1 104 code_size * 100 / total_size,
duke@1 105 stub_size * 100 / total_size,
duke@1 106 scopes_oop_size * 100 / total_size,
coleenp@13728 107 scopes_metadata_size * 100 / total_size,
duke@1 108 scopes_data_size * 100 / total_size,
duke@1 109 scopes_pcs_size * 100 / total_size);
duke@1 110 }
duke@1 111
duke@1 112 void add(CodeBlob* cb) {
duke@1 113 count++;
duke@1 114 total_size += cb->size();
duke@1 115 header_size += cb->header_size();
duke@1 116 relocation_size += cb->relocation_size();
duke@1 117 if (cb->is_nmethod()) {
twisti@5686 118 nmethod* nm = cb->as_nmethod_or_null();
twisti@6418 119 code_size += nm->insts_size();
duke@1 120 stub_size += nm->stub_size();
duke@1 121
twisti@5686 122 scopes_oop_size += nm->oops_size();
coleenp@13728 123 scopes_metadata_size += nm->metadata_size();
duke@1 124 scopes_data_size += nm->scopes_data_size();
duke@1 125 scopes_pcs_size += nm->scopes_pcs_size();
duke@1 126 } else {
twisti@6418 127 code_size += cb->code_size();
duke@1 128 }
duke@1 129 }
duke@1 130 };
duke@1 131
thartmann@26796 132 // Iterate over all CodeHeaps
thartmann@26796 133 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
kvn@42650 134 #define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap)
dlong@45622 135 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
kvn@42650 136
thartmann@26796 137 // Iterate over all CodeBlobs (cb) on the given CodeHeap
thartmann@26796 138 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb))
duke@1 139
thartmann@26796 140 address CodeCache::_low_bound = 0;
thartmann@26796 141 address CodeCache::_high_bound = 0;
duke@1 142 int CodeCache::_number_of_nmethods_with_dependencies = 0;
duke@1 143 bool CodeCache::_needs_cache_clean = false;
jrose@3908 144 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
duke@1 145
dlong@45622 146 // Initialize arrays of CodeHeap subsets
thartmann@26796 147 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
kvn@42650 148 GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
kvn@42650 149 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
dlong@45622 150 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
thartmann@26796 151
thartmann@34182 152 void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) {
thartmann@34182 153 size_t total_size = non_nmethod_size + profiled_size + non_profiled_size;
thartmann@34182 154 // Prepare error message
thartmann@34182 155 const char* error = "Invalid code heap sizes";
goetz@43407 156 err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K) + ProfiledCodeHeapSize (" SIZE_FORMAT "K)"
goetz@43407 157 " + NonProfiledCodeHeapSize (" SIZE_FORMAT "K) = " SIZE_FORMAT "K",
thartmann@34182 158 non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K);
thartmann@34182 159
thartmann@34182 160 if (total_size > cache_size) {
thartmann@34182 161 // Some code heap sizes were explicitly set: total_size must be <= cache_size
goetz@43407 162 message.append(" is greater than ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);
thartmann@34182 163 vm_exit_during_initialization(error, message);
thartmann@34182 164 } else if (all_set && total_size != cache_size) {
thartmann@34182 165 // All code heap sizes were explicitly set: total_size must equal cache_size
goetz@43407 166 message.append(" is not equal to ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);
thartmann@34182 167 vm_exit_during_initialization(error, message);
thartmann@34182 168 }
thartmann@34182 169 }
thartmann@34182 170
thartmann@26796 171 void CodeCache::initialize_heaps() {
thartmann@34182 172 bool non_nmethod_set = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize);
thartmann@34182 173 bool profiled_set = FLAG_IS_CMDLINE(ProfiledCodeHeapSize);
thartmann@34182 174 bool non_profiled_set = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize);
thartmann@34182 175 size_t min_size = os::vm_page_size();
thartmann@34182 176 size_t cache_size = ReservedCodeCacheSize;
thartmann@34182 177 size_t non_nmethod_size = NonNMethodCodeHeapSize;
thartmann@34182 178 size_t profiled_size = ProfiledCodeHeapSize;
thartmann@34182 179 size_t non_profiled_size = NonProfiledCodeHeapSize;
thartmann@34182 180 // Check if total size set via command line flags exceeds the reserved size
thartmann@34182 181 check_heap_sizes((non_nmethod_set ? non_nmethod_size : min_size),
thartmann@34182 182 (profiled_set ? profiled_size : min_size),
thartmann@34182 183 (non_profiled_set ? non_profiled_size : min_size),
thartmann@34182 184 cache_size,
thartmann@34182 185 non_nmethod_set && profiled_set && non_profiled_set);
thartmann@34182 186
thartmann@26796 187 // Determine size of compiler buffers
thartmann@26796 188 size_t code_buffers_size = 0;
thartmann@26796 189 #ifdef COMPILER1
thartmann@26796 190 // C1 temporary code buffers (see Compiler::init_buffer_blob())
thartmann@26796 191 const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple);
thartmann@26796 192 code_buffers_size += c1_count * Compiler::code_buffer_size();
thartmann@26796 193 #endif
thartmann@26796 194 #ifdef COMPILER2
thartmann@26796 195 // C2 scratch buffers (see Compile::init_scratch_buffer_blob())
thartmann@26796 196 const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization);
thartmann@26796 197 // Initial size of constant table (this may be increased if a compiled method needs more space)
thartmann@26796 198 code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();
thartmann@26796 199 #endif
thartmann@26796 200
thartmann@34182 201 // Increase default non_nmethod_size to account for compiler buffers
thartmann@34182 202 if (!non_nmethod_set) {
thartmann@34182 203 non_nmethod_size += code_buffers_size;
thartmann@34182 204 }
thartmann@26796 205 // Calculate default CodeHeap sizes if not set by user
thartmann@34182 206 if (!non_nmethod_set && !profiled_set && !non_profiled_set) {
thartmann@26919 207 // Check if we have enough space for the non-nmethod code heap
thartmann@34182 208 if (cache_size > non_nmethod_size) {
thartmann@34182 209 // Use the default value for non_nmethod_size and one half of the
thartmann@34182 210 // remaining size for non-profiled and one half for profiled methods
thartmann@34182 211 size_t remaining_size = cache_size - non_nmethod_size;
thartmann@34182 212 profiled_size = remaining_size / 2;
thartmann@34182 213 non_profiled_size = remaining_size - profiled_size;
thartmann@26796 214 } else {
thartmann@26919 215 // Use all space for the non-nmethod heap and set other heaps to minimal size
thartmann@34182 216 non_nmethod_size = cache_size - 2 * min_size;
thartmann@34182 217 profiled_size = min_size;
thartmann@34182 218 non_profiled_size = min_size;
thartmann@34182 219 }
thartmann@34182 220 } else if (!non_nmethod_set || !profiled_set || !non_profiled_set) {
thartmann@34182 221 // The user explicitly set some code heap sizes. Increase or decrease the (default)
thartmann@34182 222 // sizes of the other code heaps accordingly. First adapt non-profiled and profiled
thartmann@34182 223 // code heap sizes and then only change non-nmethod code heap size if still necessary.
thartmann@34182 224 intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size);
thartmann@34182 225 if (non_profiled_set) {
thartmann@34182 226 if (!profiled_set) {
thartmann@34182 227 // Adapt size of profiled code heap
thartmann@34182 228 if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) {
thartmann@34182 229 // Not enough space available, set to minimum size
thartmann@34182 230 diff_size += profiled_size - min_size;
thartmann@34182 231 profiled_size = min_size;
thartmann@34182 232 } else {
thartmann@34182 233 profiled_size += diff_size;
thartmann@34182 234 diff_size = 0;
thartmann@34182 235 }
thartmann@34182 236 }
thartmann@34182 237 } else if (profiled_set) {
thartmann@34182 238 // Adapt size of non-profiled code heap
thartmann@34182 239 if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) {
thartmann@34182 240 // Not enough space available, set to minimum size
thartmann@34182 241 diff_size += non_profiled_size - min_size;
thartmann@34182 242 non_profiled_size = min_size;
thartmann@34182 243 } else {
thartmann@34182 244 non_profiled_size += diff_size;
thartmann@34182 245 diff_size = 0;
thartmann@34182 246 }
thartmann@34182 247 } else if (non_nmethod_set) {
thartmann@34182 248 // Distribute remaining size between profiled and non-profiled code heaps
thartmann@34182 249 diff_size = cache_size - non_nmethod_size;
thartmann@34182 250 profiled_size = diff_size / 2;
thartmann@34182 251 non_profiled_size = diff_size - profiled_size;
thartmann@34182 252 diff_size = 0;
thartmann@34182 253 }
thartmann@34182 254 if (diff_size != 0) {
thartmann@34182 255 // Use non-nmethod code heap for remaining space requirements
thartmann@34182 256 assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity");
thartmann@34182 257 non_nmethod_size += diff_size;
thartmann@26796 258 }
thartmann@26796 259 }
thartmann@26796 260
thartmann@26796 261 // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
thartmann@47993 262 if (!heap_available(CodeBlobType::MethodProfiled)) {
thartmann@34182 263 non_profiled_size += profiled_size;
thartmann@34182 264 profiled_size = 0;
thartmann@26796 265 }
thartmann@26919 266 // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap
thartmann@47993 267 if (!heap_available(CodeBlobType::MethodNonProfiled)) {
thartmann@34182 268 non_nmethod_size += non_profiled_size;
thartmann@34182 269 non_profiled_size = 0;
thartmann@34182 270 }
thartmann@34182 271 // Make sure we have enough space for VM internal code
thartmann@34182 272 uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
thartmann@34182 273 if (non_nmethod_size < (min_code_cache_size + code_buffers_size)) {
thartmann@34182 274 vm_exit_during_initialization(err_msg(
goetz@43407 275 "Not enough space in non-nmethod code heap to run VM: " SIZE_FORMAT "K < " SIZE_FORMAT "K",
thartmann@34182 276 non_nmethod_size/K, (min_code_cache_size + code_buffers_size)/K));
thartmann@26796 277 }
thartmann@26796 278
thartmann@34182 279 // Verify sizes and update flag values
thartmann@34182 280 assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes");
thartmann@34182 281 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, non_nmethod_size);
thartmann@34182 282 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size);
thartmann@34182 283 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size);
thartmann@26796 284
thartmann@47994 285 // If large page support is enabled, align code heaps according to large
thartmann@47994 286 // page size to make sure that code cache is covered by large pages.
thartmann@47994 287 const size_t alignment = MAX2(page_size(false), (size_t) os::vm_allocation_granularity());
stefank@46619 288 non_nmethod_size = align_up(non_nmethod_size, alignment);
thartmann@47994 289 profiled_size = align_down(profiled_size, alignment);
thartmann@26796 290
thartmann@26796 291 // Reserve one continuous chunk of memory for CodeHeaps and split it into
thartmann@26796 292 // parts for the individual heaps. The memory layout looks like this:
thartmann@26796 293 // ---------- high -----------
thartmann@26796 294 // Non-profiled nmethods
thartmann@26796 295 // Profiled nmethods
thartmann@26919 296 // Non-nmethods
thartmann@26796 297 // ---------- low ------------
thartmann@34182 298 ReservedCodeSpace rs = reserve_heap_memory(cache_size);
thartmann@34182 299 ReservedSpace non_method_space = rs.first_part(non_nmethod_size);
thartmann@34182 300 ReservedSpace rest = rs.last_part(non_nmethod_size);
thartmann@26796 301 ReservedSpace profiled_space = rest.first_part(profiled_size);
thartmann@26796 302 ReservedSpace non_profiled_space = rest.last_part(profiled_size);
thartmann@26796 303
thartmann@26919 304 // Non-nmethods (stubs, adapters, ...)
thartmann@28730 305 add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
thartmann@26796 306 // Tier 2 and tier 3 (profiled) methods
thartmann@28730 307 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
thartmann@26796 308 // Tier 1 and tier 4 (non-profiled) methods and native methods
thartmann@28730 309 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
thartmann@28730 310 }
thartmann@28730 311
thartmann@47994 312 size_t CodeCache::page_size(bool aligned) {
thartmann@47994 313 if (os::can_execute_large_page_memory()) {
thartmann@47994 314 return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, 8) :
thartmann@47994 315 os::page_size_for_region_unaligned(ReservedCodeCacheSize, 8);
thartmann@47994 316 } else {
thartmann@47994 317 return os::vm_page_size();
thartmann@47994 318 }
duke@1 319 }
duke@1 320
thartmann@26796 321 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
thartmann@47994 322 // Align and reserve space for code cache
thartmann@47994 323 const size_t rs_ps = page_size();
thartmann@47994 324 const size_t rs_align = MAX2(rs_ps, (size_t) os::vm_allocation_granularity());
thartmann@47994 325 const size_t rs_size = align_up(size, rs_align);
thartmann@47994 326 ReservedCodeSpace rs(rs_size, rs_align, rs_ps > (size_t) os::vm_page_size());
sangheki@35203 327 if (!rs.is_reserved()) {
thartmann@47993 328 vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)",
thartmann@47994 329 rs_size/K));
sangheki@35203 330 }
sangheki@35203 331
thartmann@26796 332 // Initialize bounds
thartmann@26796 333 _low_bound = (address)rs.base();
thartmann@26796 334 _high_bound = _low_bound + rs.size();
thartmann@26796 335 return rs;
duke@1 336 }
duke@1 337
dlong@45622 338 // Heaps available for allocation
thartmann@26796 339 bool CodeCache::heap_available(int code_blob_type) {
thartmann@26796 340 if (!SegmentedCodeCache) {
thartmann@26796 341 // No segmentation: use a single code heap
thartmann@26796 342 return (code_blob_type == CodeBlobType::All);
thartmann@38055 343 } else if (Arguments::is_interpreter_only()) {
thartmann@26796 344 // Interpreter only: we don't need any method code heaps
thartmann@26919 345 return (code_blob_type == CodeBlobType::NonNMethod);
thartmann@26796 346 } else if (TieredCompilation && (TieredStopAtLevel > CompLevel_simple)) {
thartmann@26796 347 // Tiered compilation: use all code heaps
thartmann@26796 348 return (code_blob_type < CodeBlobType::All);
thartmann@26796 349 } else {
thartmann@26919 350 // No TieredCompilation: we only need the non-nmethod and non-profiled code heap
thartmann@26919 351 return (code_blob_type == CodeBlobType::NonNMethod) ||
thartmann@26796 352 (code_blob_type == CodeBlobType::MethodNonProfiled);
thartmann@26796 353 }
duke@1 354 }
duke@1 355
thartmann@27410 356 const char* CodeCache::get_code_heap_flag_name(int code_blob_type) {
thartmann@27410 357 switch(code_blob_type) {
thartmann@27410 358 case CodeBlobType::NonNMethod:
thartmann@27410 359 return "NonNMethodCodeHeapSize";
thartmann@27410 360 break;
thartmann@27410 361 case CodeBlobType::MethodNonProfiled:
thartmann@27410 362 return "NonProfiledCodeHeapSize";
thartmann@27410 363 break;
thartmann@27410 364 case CodeBlobType::MethodProfiled:
thartmann@27410 365 return "ProfiledCodeHeapSize";
thartmann@27410 366 break;
thartmann@27410 367 }
thartmann@27410 368 ShouldNotReachHere();
thartmann@27410 369 return NULL;
thartmann@27410 370 }
thartmann@27410 371
kvn@42650 372 int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) {
kvn@42650 373 if (lhs->code_blob_type() == rhs->code_blob_type()) {
kvn@42650 374 return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0);
kvn@42650 375 } else {
kvn@42650 376 return lhs->code_blob_type() - rhs->code_blob_type();
kvn@42650 377 }
kvn@42650 378 }
kvn@42650 379
kvn@42650 380 void CodeCache::add_heap(CodeHeap* heap) {
kvn@42650 381 assert(!Universe::is_fully_initialized(), "late heap addition?");
kvn@42650 382
kvn@42650 383 _heaps->insert_sorted<code_heap_compare>(heap);
kvn@42650 384
kvn@42650 385 int type = heap->code_blob_type();
kvn@42650 386 if (code_blob_type_accepts_compiled(type)) {
kvn@42650 387 _compiled_heaps->insert_sorted<code_heap_compare>(heap);
kvn@42650 388 }
kvn@42650 389 if (code_blob_type_accepts_nmethod(type)) {
kvn@42650 390 _nmethod_heaps->insert_sorted<code_heap_compare>(heap);
kvn@42650 391 }
dlong@45622 392 if (code_blob_type_accepts_allocable(type)) {
dlong@45622 393 _allocable_heaps->insert_sorted<code_heap_compare>(heap);
dlong@45622 394 }
kvn@42650 395 }
kvn@42650 396
thartmann@28730 397 void CodeCache::add_heap(ReservedSpace rs, const char* name, int code_blob_type) {
thartmann@26796 398 // Check if heap is needed
thartmann@26796 399 if (!heap_available(code_blob_type)) {
thartmann@26796 400 return;
thartmann@26796 401 }
duke@1 402
thartmann@26796 403 // Create CodeHeap
thartmann@26796 404 CodeHeap* heap = new CodeHeap(name, code_blob_type);
kvn@42650 405 add_heap(heap);
thartmann@26796 406
thartmann@26796 407 // Reserve Space
thartmann@28730 408 size_t size_initial = MIN2(InitialCodeCacheSize, rs.size());
stefank@46620 409 size_initial = align_up(size_initial, os::vm_page_size());
thartmann@26796 410 if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
thartmann@47993 411 vm_exit_during_initialization(err_msg("Could not reserve enough space in %s (" SIZE_FORMAT "K)",
thartmann@47993 412 heap->name(), size_initial/K));
thartmann@26796 413 }
thartmann@26796 414
thartmann@26796 415 // Register the CodeHeap
thartmann@26796 416 MemoryService::add_code_heap_memory_pool(heap, name);
duke@1 417 }
duke@1 418
redestad@46647 419 CodeHeap* CodeCache::get_code_heap_containing(void* start) {
redestad@46647 420 FOR_ALL_HEAPS(heap) {
redestad@46647 421 if ((*heap)->contains(start)) {
redestad@46647 422 return *heap;
redestad@46647 423 }
redestad@46647 424 }
redestad@46647 425 return NULL;
redestad@46647 426 }
redestad@46647 427
iignatyev@27642 428 CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) {
thartmann@26796 429 assert(cb != NULL, "CodeBlob is null");
thartmann@26796 430 FOR_ALL_HEAPS(heap) {
zmajo@43945 431 if ((*heap)->contains_blob(cb)) {
thartmann@26796 432 return *heap;
thartmann@26796 433 }
never@5533 434 }
thartmann@26796 435 ShouldNotReachHere();
thartmann@26796 436 return NULL;
never@5533 437 }
never@5533 438
thartmann@26796 439 CodeHeap* CodeCache::get_code_heap(int code_blob_type) {
thartmann@26796 440 FOR_ALL_HEAPS(heap) {
thartmann@26796 441 if ((*heap)->accepts(code_blob_type)) {
thartmann@26796 442 return *heap;
thartmann@26796 443 }
never@5533 444 }
thartmann@26796 445 return NULL;
never@5533 446 }
duke@1 447
thartmann@26796 448 CodeBlob* CodeCache::first_blob(CodeHeap* heap) {
thartmann@26796 449 assert_locked_or_safepoint(CodeCache_lock);
thartmann@26796 450 assert(heap != NULL, "heap is null");
thartmann@26796 451 return (CodeBlob*)heap->first();
thartmann@26796 452 }
vladidan@15201 453
thartmann@26796 454 CodeBlob* CodeCache::first_blob(int code_blob_type) {
thartmann@26796 455 if (heap_available(code_blob_type)) {
thartmann@26796 456 return first_blob(get_code_heap(code_blob_type));
thartmann@26796 457 } else {
thartmann@26796 458 return NULL;
thartmann@26796 459 }
thartmann@26796 460 }
thartmann@26796 461
thartmann@26796 462 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
thartmann@26796 463 assert_locked_or_safepoint(CodeCache_lock);
thartmann@26796 464 assert(heap != NULL, "heap is null");
thartmann@26796 465 return (CodeBlob*)heap->next(cb);
thartmann@26796 466 }
thartmann@26796 467
anoll@27420 468 /**
anoll@27420 469 * Do not seize the CodeCache lock here--if the caller has not
anoll@27420 470 * already done so, we are going to lose bigtime, since the code
anoll@27420 471 * cache will contain a garbage CodeBlob until the caller can
anoll@27420 472 * run the constructor for the CodeBlob subclass he is busy
anoll@27420 473 * instantiating.
anoll@27420 474 */
thartmann@40863 475 CodeBlob* CodeCache::allocate(int size, int code_blob_type, int orig_code_blob_type) {
anoll@27420 476 // Possibly wakes up the sweeper thread.
anoll@27420 477 NMethodSweeper::notify(code_blob_type);
duke@1 478 assert_locked_or_safepoint(CodeCache_lock);
david@33105 479 assert(size > 0, "Code cache allocation request must be > 0 but is %d", size);
anoll@24439 480 if (size <= 0) {
anoll@24439 481 return NULL;
anoll@24439 482 }
duke@1 483 CodeBlob* cb = NULL;
thartmann@26796 484
thartmann@26796 485 // Get CodeHeap for the given CodeBlobType
zmajo@26942 486 CodeHeap* heap = get_code_heap(code_blob_type);
zmajo@26942 487 assert(heap != NULL, "heap is null");
thartmann@26796 488
duke@1 489 while (true) {
anoll@27420 490 cb = (CodeBlob*)heap->allocate(size);
duke@1 491 if (cb != NULL) break;
thartmann@26796 492 if (!heap->expand_by(CodeCacheExpansionSize)) {
thartmann@40863 493 // Save original type for error reporting
thartmann@40863 494 if (orig_code_blob_type == CodeBlobType::All) {
thartmann@40863 495 orig_code_blob_type = code_blob_type;
thartmann@40863 496 }
duke@1 497 // Expansion failed
thartmann@40863 498 if (SegmentedCodeCache) {
thartmann@29338 499 // Fallback solution: Try to store code in another code heap.
thartmann@40863 500 // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled)
thartmann@29338 501 // Note that in the sweeper, we check the reverse_free_ratio of the code heap
thartmann@29338 502 // and force stack scanning if less than 10% of the code heap are free.
thartmann@29338 503 int type = code_blob_type;
thartmann@29338 504 switch (type) {
thartmann@29338 505 case CodeBlobType::NonNMethod:
thartmann@29338 506 type = CodeBlobType::MethodNonProfiled;
thartmann@29338 507 break;
thartmann@29338 508 case CodeBlobType::MethodNonProfiled:
thartmann@29338 509 type = CodeBlobType::MethodProfiled;
thartmann@40863 510 break;
thartmann@40863 511 case CodeBlobType::MethodProfiled:
thartmann@40863 512 // Avoid loop if we already tried that code heap
thartmann@40863 513 if (type == orig_code_blob_type) {
thartmann@40863 514 type = CodeBlobType::MethodNonProfiled;
thartmann@40863 515 }
thartmann@29338 516 break;
thartmann@29338 517 }
thartmann@40863 518 if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) {
thartmann@40863 519 if (PrintCodeCacheExtension) {
thartmann@40863 520 tty->print_cr("Extension of %s failed. Trying to allocate in %s.",
thartmann@40863 521 heap->name(), get_code_heap(type)->name());
thartmann@40863 522 }
thartmann@40863 523 return allocate(size, type, orig_code_blob_type);
thartmann@29338 524 }
thartmann@26796 525 }
anoll@27420 526 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
thartmann@40863 527 CompileBroker::handle_full_code_cache(orig_code_blob_type);
duke@1 528 return NULL;
duke@1 529 }
duke@1 530 if (PrintCodeCacheExtension) {
duke@1 531 ResourceMark rm;
kvn@42650 532 if (_nmethod_heaps->length() >= 1) {
thartmann@26918 533 tty->print("%s", heap->name());
thartmann@26796 534 } else {
thartmann@26919 535 tty->print("CodeCache");
thartmann@26796 536 }
thartmann@26796 537 tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
thartmann@26796 538 (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
thartmann@26796 539 (address)heap->high() - (address)heap->low_boundary());
duke@1 540 }
duke@1 541 }
jrose@3908 542 print_trace("allocation", cb, size);
duke@1 543 return cb;
duke@1 544 }
duke@1 545
duke@1 546 void CodeCache::free(CodeBlob* cb) {
duke@1 547 assert_locked_or_safepoint(CodeCache_lock);
thartmann@34158 548 CodeHeap* heap = get_code_heap(cb);
jrose@3908 549 print_trace("free", cb);
never@5924 550 if (cb->is_nmethod()) {
thartmann@34158 551 heap->set_nmethod_count(heap->nmethod_count() - 1);
never@5924 552 if (((nmethod *)cb)->has_dependencies()) {
never@5924 553 _number_of_nmethods_with_dependencies--;
never@5924 554 }
never@5924 555 }
never@5924 556 if (cb->is_adapter_blob()) {
thartmann@34158 557 heap->set_adapter_count(heap->adapter_count() - 1);
duke@1 558 }
duke@1 559
thartmann@26796 560 // Get heap for given CodeBlob and deallocate
thartmann@26796 561 get_code_heap(cb)->deallocate(cb);
duke@1 562
thartmann@34158 563 assert(heap->blob_count() >= 0, "sanity check");
duke@1 564 }
duke@1 565
simonis@47568 566 void CodeCache::free_unused_tail(CodeBlob* cb, size_t used) {
simonis@47568 567 assert_locked_or_safepoint(CodeCache_lock);
simonis@47568 568 guarantee(cb->is_buffer_blob() && strncmp("Interpreter", cb->name(), 11) == 0, "Only possible for interpreter!");
simonis@47568 569 print_trace("free_unused_tail", cb);
simonis@47568 570
simonis@47568 571 // We also have to account for the extra space (i.e. header) used by the CodeBlob
simonis@47568 572 // which provides the memory (see BufferBlob::create() in codeBlob.cpp).
simonis@47568 573 used += CodeBlob::align_code_offset(cb->header_size());
simonis@47568 574
simonis@47568 575 // Get heap for given CodeBlob and deallocate its unused tail
simonis@47568 576 get_code_heap(cb)->deallocate_tail(cb, used);
simonis@47568 577 // Adjust the sizes of the CodeBlob
simonis@47568 578 cb->adjust_size(used);
simonis@47568 579 }
simonis@47568 580
duke@1 581 void CodeCache::commit(CodeBlob* cb) {
duke@1 582 // this is called by nmethod::nmethod, which must already own CodeCache_lock
duke@1 583 assert_locked_or_safepoint(CodeCache_lock);
thartmann@34158 584 CodeHeap* heap = get_code_heap(cb);
never@5924 585 if (cb->is_nmethod()) {
thartmann@34158 586 heap->set_nmethod_count(heap->nmethod_count() + 1);
never@5924 587 if (((nmethod *)cb)->has_dependencies()) {
never@5924 588 _number_of_nmethods_with_dependencies++;
never@5924 589 }
duke@1 590 }
never@5924 591 if (cb->is_adapter_blob()) {
thartmann@34158 592 heap->set_adapter_count(heap->adapter_count() + 1);
never@5924 593 }
never@5924 594
duke@1 595 // flush the hardware I-cache
twisti@6418 596 ICache::invalidate_range(cb->content_begin(), cb->content_size());
duke@1 597 }
duke@1 598
duke@1 599 bool CodeCache::contains(void *p) {
goetz@42062 600 // S390 uses contains() in current_frame(), which is used before
goetz@42062 601 // code cache initialization if NativeMemoryTracking=detail is set.
goetz@42062 602 S390_ONLY(if (_heaps == NULL) return false;)
goetz@42062 603 // It should be ok to call contains without holding a lock.
thartmann@26796 604 FOR_ALL_HEAPS(heap) {
thartmann@26796 605 if ((*heap)->contains(p)) {
thartmann@26796 606 return true;
thartmann@26796 607 }
thartmann@26796 608 }
thartmann@26796 609 return false;
duke@1 610 }
duke@1 611
kvn@42650 612 bool CodeCache::contains(nmethod *nm) {
kvn@42650 613 return contains((void *)nm);
kvn@42650 614 }
kvn@42650 615
thartmann@26796 616 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not
thartmann@26796 617 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain
duke@1 618 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
duke@1 619 CodeBlob* CodeCache::find_blob(void* start) {
duke@1 620 CodeBlob* result = find_blob_unsafe(start);
anoll@23214 621 // We could potentially look up non_entrant methods
coleenp@46589 622 guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || VMError::is_error_reported(), "unsafe access to zombie method");
duke@1 623 return result;
duke@1 624 }
duke@1 625
thartmann@26796 626 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
thartmann@26796 627 // what you are doing)
thartmann@26796 628 CodeBlob* CodeCache::find_blob_unsafe(void* start) {
thartmann@26796 629 // NMT can walk the stack before code cache is created
redestad@46647 630 if (_heaps != NULL) {
redestad@46647 631 CodeHeap* heap = get_code_heap_containing(start);
redestad@46647 632 if (heap != NULL) {
redestad@46647 633 return heap->find_blob_unsafe(start);
thartmann@26796 634 }
thartmann@26796 635 }
thartmann@26796 636 return NULL;
thartmann@26796 637 }
thartmann@26796 638
duke@1 639 nmethod* CodeCache::find_nmethod(void* start) {
thartmann@26796 640 CodeBlob* cb = find_blob(start);
thartmann@26796 641 assert(cb->is_nmethod(), "did not find an nmethod");
duke@1 642 return (nmethod*)cb;
duke@1 643 }
duke@1 644
duke@1 645 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
duke@1 646 assert_locked_or_safepoint(CodeCache_lock);
dlong@45622 647 FOR_ALL_HEAPS(heap) {
thartmann@26796 648 FOR_ALL_BLOBS(cb, *heap) {
thartmann@26796 649 f(cb);
thartmann@26796 650 }
duke@1 651 }
duke@1 652 }
duke@1 653
duke@1 654 void CodeCache::nmethods_do(void f(nmethod* nm)) {
duke@1 655 assert_locked_or_safepoint(CodeCache_lock);
thartmann@26796 656 NMethodIterator iter;
thartmann@26796 657 while(iter.next()) {
thartmann@26796 658 f(iter.method());
duke@1 659 }
duke@1 660 }
duke@1 661
rbackman@38133 662 void CodeCache::metadata_do(void f(Metadata* m)) {
coleenp@13728 663 assert_locked_or_safepoint(CodeCache_lock);
thartmann@26796 664 NMethodIterator iter;
thartmann@26796 665 while(iter.next_alive()) {
rbackman@38133 666 iter.method()->metadata_do(f);
coleenp@13728 667 }
kvn@42650 668 AOTLoader::metadata_do(f);
coleenp@13728 669 }
duke@1 670
duke@1 671 int CodeCache::alignment_unit() {
thartmann@26796 672 return (int)_heaps->first()->alignment_unit();
duke@1 673 }
duke@1 674
duke@1 675 int CodeCache::alignment_offset() {
thartmann@26796 676 return (int)_heaps->first()->alignment_offset();
duke@1 677 }
duke@1 678
thartmann@26796 679 // Mark nmethods for unloading if they contain otherwise unreachable oops.
brutisso@13878 680 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
duke@1 681 assert_locked_or_safepoint(CodeCache_lock);
rbackman@38133 682 CompiledMethodIterator iter;
thartmann@26796 683 while(iter.next_alive()) {
thartmann@26796 684 iter.method()->do_unloading(is_alive, unloading_occurred);
duke@1 685 }
duke@1 686 }
duke@1 687
jrose@3908 688 void CodeCache::blobs_do(CodeBlobClosure* f) {
duke@1 689 assert_locked_or_safepoint(CodeCache_lock);
dlong@45622 690 FOR_ALL_ALLOCABLE_HEAPS(heap) {
thartmann@26796 691 FOR_ALL_BLOBS(cb, *heap) {
thartmann@26796 692 if (cb->is_alive()) {
thartmann@26796 693 f->do_code_blob(cb);
jrose@3908 694 #ifdef ASSERT
eosterlund@47421 695 if (cb->is_nmethod()) {
eosterlund@47421 696 Universe::heap()->verify_nmethod((nmethod*)cb);
eosterlund@47421 697 }
jrose@3908 698 #endif //ASSERT
thartmann@26796 699 }
thartmann@26796 700 }
duke@1 701 }
duke@1 702 }
duke@1 703
eosterlund@47421 704 // Walk the list of methods which might contain oops to the java heap.
cvarming@36591 705 void CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure* f) {
jrose@3908 706 assert_locked_or_safepoint(CodeCache_lock);
stefank@25492 707
cvarming@36591 708 const bool fix_relocations = f->fix_relocations();
jrose@3908 709 debug_only(mark_scavenge_root_nmethods());
jrose@3908 710
cvarming@36591 711 nmethod* prev = NULL;
cvarming@36591 712 nmethod* cur = scavenge_root_nmethods();
cvarming@36591 713 while (cur != NULL) {
jrose@3908 714 debug_only(cur->clear_scavenge_root_marked());
jrose@3908 715 assert(cur->scavenge_root_not_marked(), "");
jrose@3908 716 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
jrose@3908 717
jrose@3908 718 bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
jrose@3908 719 if (TraceScavenge) {
jrose@3908 720 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
jrose@3908 721 }
twisti@5247 722 if (is_live) {
jrose@3908 723 // Perform cur->oops_do(f), maybe just once per nmethod.
jrose@3908 724 f->do_code_blob(cur);
twisti@5247 725 }
cvarming@36591 726 nmethod* const next = cur->scavenge_root_link();
cvarming@36591 727 // The scavengable nmethod list must contain all methods with scavengable
cvarming@36591 728 // oops. It is safe to include more nmethod on the list, but we do not
cvarming@36591 729 // expect any live non-scavengable nmethods on the list.
cvarming@36591 730 if (fix_relocations) {
cvarming@36591 731 if (!is_live || !cur->detect_scavenge_root_oops()) {
cvarming@36591 732 unlink_scavenge_root_nmethod(cur, prev);
cvarming@36591 733 } else {
cvarming@36591 734 prev = cur;
cvarming@36591 735 }
cvarming@36591 736 }
cvarming@36591 737 cur = next;
jrose@3908 738 }
jrose@3908 739
jrose@3908 740 // Check for stray marks.
jrose@3908 741 debug_only(verify_perm_nmethods(NULL));
jrose@3908 742 }
jrose@3908 743
eosterlund@47421 744 void CodeCache::register_scavenge_root_nmethod(nmethod* nm) {
eosterlund@47421 745 assert_locked_or_safepoint(CodeCache_lock);
eosterlund@47421 746 if (!nm->on_scavenge_root_list() && nm->detect_scavenge_root_oops()) {
eosterlund@47421 747 add_scavenge_root_nmethod(nm);
eosterlund@47421 748 }
eosterlund@47421 749 }
eosterlund@47421 750
eosterlund@47421 751 void CodeCache::verify_scavenge_root_nmethod(nmethod* nm) {
eosterlund@47421 752 nm->verify_scavenge_root_oops();
eosterlund@47421 753 }
eosterlund@47421 754
jrose@3908 755 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
jrose@3908 756 assert_locked_or_safepoint(CodeCache_lock);
stefank@25492 757
jrose@3908 758 nm->set_on_scavenge_root_list();
jrose@3908 759 nm->set_scavenge_root_link(_scavenge_root_nmethods);
jrose@3908 760 set_scavenge_root_nmethods(nm);
jrose@3908 761 print_trace("add_scavenge_root", nm);
jrose@3908 762 }
jrose@3908 763
cvarming@36591 764 void CodeCache::unlink_scavenge_root_nmethod(nmethod* nm, nmethod* prev) {
cvarming@36591 765 assert_locked_or_safepoint(CodeCache_lock);
cvarming@36591 766
cvarming@36591 767 assert((prev == NULL && scavenge_root_nmethods() == nm) ||
cvarming@36591 768 (prev != NULL && prev->scavenge_root_link() == nm), "precondition");
cvarming@36591 769
cvarming@36591 770 print_trace("unlink_scavenge_root", nm);
cvarming@36591 771 if (prev == NULL) {
cvarming@36591 772 set_scavenge_root_nmethods(nm->scavenge_root_link());
cvarming@36591 773 } else {
cvarming@36591 774 prev->set_scavenge_root_link(nm->scavenge_root_link());
cvarming@36591 775 }
cvarming@36591 776 nm->set_scavenge_root_link(NULL);
cvarming@36591 777 nm->clear_on_scavenge_root_list();
cvarming@36591 778 }
cvarming@36591 779
jrose@3908 780 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
jrose@3908 781 assert_locked_or_safepoint(CodeCache_lock);
stefank@25492 782
jrose@3908 783 print_trace("drop_scavenge_root", nm);
cvarming@36591 784 nmethod* prev = NULL;
cvarming@36591 785 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
jrose@3908 786 if (cur == nm) {
cvarming@36591 787 unlink_scavenge_root_nmethod(cur, prev);
jrose@3908 788 return;
jrose@3908 789 }
cvarming@36591 790 prev = cur;
jrose@3908 791 }
jrose@3908 792 assert(false, "should have been on list");
jrose@3908 793 }
jrose@3908 794
jrose@3908 795 void CodeCache::prune_scavenge_root_nmethods() {
jrose@3908 796 assert_locked_or_safepoint(CodeCache_lock);
stefank@25492 797
jrose@3908 798 debug_only(mark_scavenge_root_nmethods());
jrose@3908 799
jrose@3908 800 nmethod* last = NULL;
jrose@3908 801 nmethod* cur = scavenge_root_nmethods();
jrose@3908 802 while (cur != NULL) {
jrose@3908 803 nmethod* next = cur->scavenge_root_link();
jrose@3908 804 debug_only(cur->clear_scavenge_root_marked());
jrose@3908 805 assert(cur->scavenge_root_not_marked(), "");
jrose@3908 806 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
jrose@3908 807
jrose@3908 808 if (!cur->is_zombie() && !cur->is_unloaded()
jrose@3908 809 && cur->detect_scavenge_root_oops()) {
jrose@3908 810 // Keep it. Advance 'last' to prevent deletion.
jrose@3908 811 last = cur;
jrose@3908 812 } else {
jrose@3908 813 // Prune it from the list, so we don't have to look at it any more.
jrose@3908 814 print_trace("prune_scavenge_root", cur);
cvarming@36591 815 unlink_scavenge_root_nmethod(cur, last);
jrose@3908 816 }
jrose@3908 817 cur = next;
jrose@3908 818 }
jrose@3908 819
jrose@3908 820 // Check for stray marks.
jrose@3908 821 debug_only(verify_perm_nmethods(NULL));
jrose@3908 822 }
jrose@3908 823
jrose@3908 824 #ifndef PRODUCT
jrose@3908 825 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
jrose@3908 826 // While we are here, verify the integrity of the list.
jrose@3908 827 mark_scavenge_root_nmethods();
jrose@3908 828 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
jrose@3908 829 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
jrose@3908 830 cur->clear_scavenge_root_marked();
jrose@3908 831 }
jrose@3908 832 verify_perm_nmethods(f);
jrose@3908 833 }
jrose@3908 834
eosterlund@47421 835 // Temporarily mark nmethods that are claimed to be on the scavenge list.
jrose@3908 836 void CodeCache::mark_scavenge_root_nmethods() {
thartmann@26796 837 NMethodIterator iter;
thartmann@26796 838 while(iter.next_alive()) {
thartmann@26796 839 nmethod* nm = iter.method();
thartmann@26796 840 assert(nm->scavenge_root_not_marked(), "clean state");
thartmann@26796 841 if (nm->on_scavenge_root_list())
thartmann@26796 842 nm->set_scavenge_root_marked();
jrose@3908 843 }
jrose@3908 844 }
jrose@3908 845
jrose@3908 846 // If the closure is given, run it on the unlisted nmethods.
jrose@3908 847 // Also make sure that the effects of mark_scavenge_root_nmethods is gone.
jrose@3908 848 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
thartmann@26796 849 NMethodIterator iter;
thartmann@26796 850 while(iter.next_alive()) {
thartmann@26796 851 nmethod* nm = iter.method();
jrose@3908 852 bool call_f = (f_or_null != NULL);
thartmann@26796 853 assert(nm->scavenge_root_not_marked(), "must be already processed");
thartmann@26796 854 if (nm->on_scavenge_root_list())
thartmann@26796 855 call_f = false; // don't show this one to the client
eosterlund@47421 856 Universe::heap()->verify_nmethod(nm);
thartmann@26796 857 if (call_f) f_or_null->do_code_blob(nm);
jrose@3908 858 }
jrose@3908 859 }
jrose@3908 860 #endif //PRODUCT
jrose@3908 861
stefank@25492 862 void CodeCache::verify_clean_inline_caches() {
stefank@25492 863 #ifdef ASSERT
thartmann@26796 864 NMethodIterator iter;
thartmann@26796 865 while(iter.next_alive()) {
thartmann@26796 866 nmethod* nm = iter.method();
thartmann@26796 867 assert(!nm->is_unloaded(), "Tautology");
thartmann@26796 868 nm->verify_clean_inline_caches();
thartmann@26796 869 nm->verify();
stefank@25492 870 }
stefank@25492 871 #endif
stefank@25492 872 }
stefank@25492 873
stefank@25492 874 void CodeCache::verify_icholder_relocations() {
stefank@25492 875 #ifdef ASSERT
stefank@25492 876 // make sure that we aren't leaking icholders
stefank@25492 877 int count = 0;
thartmann@26796 878 FOR_ALL_HEAPS(heap) {
thartmann@26796 879 FOR_ALL_BLOBS(cb, *heap) {
kvn@42650 880 CompiledMethod *nm = cb->as_compiled_method_or_null();
kvn@42650 881 if (nm != NULL) {
thartmann@26796 882 count += nm->verify_icholder_relocations();
thartmann@26796 883 }
stefank@25492 884 }
stefank@25492 885 }
stefank@25492 886 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
stefank@25492 887 CompiledICHolder::live_count(), "must agree");
stefank@25492 888 #endif
stefank@25492 889 }
kvn@4750 890
duke@1 891 void CodeCache::gc_prologue() {
duke@1 892 }
duke@1 893
duke@1 894 void CodeCache::gc_epilogue() {
duke@1 895 assert_locked_or_safepoint(CodeCache_lock);
thartmann@32466 896 NOT_DEBUG(if (needs_cache_clean())) {
rbackman@38133 897 CompiledMethodIterator iter;
thartmann@32466 898 while(iter.next_alive()) {
rbackman@38133 899 CompiledMethod* cm = iter.method();
rbackman@38133 900 assert(!cm->is_unloaded(), "Tautology");
thartmann@32466 901 DEBUG_ONLY(if (needs_cache_clean())) {
rbackman@38133 902 cm->cleanup_inline_caches();
thartmann@32401 903 }
rbackman@38133 904 DEBUG_ONLY(cm->verify());
rbackman@38133 905 DEBUG_ONLY(cm->verify_oop_relocations());
duke@1 906 }
duke@1 907 }
rbackman@38133 908
duke@1 909 set_needs_cache_clean(false);
jrose@3908 910 prune_scavenge_root_nmethods();
coleenp@13728 911
stefank@25492 912 verify_icholder_relocations();
duke@1 913 }
duke@1 914
never@8724 915 void CodeCache::verify_oops() {
never@8724 916 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
never@8724 917 VerifyOopClosure voc;
thartmann@26796 918 NMethodIterator iter;
thartmann@26796 919 while(iter.next_alive()) {
thartmann@26796 920 nmethod* nm = iter.method();
thartmann@26796 921 nm->oops_do(&voc);
thartmann@26796 922 nm->verify_oop_relocations();
never@8724 923 }
never@8724 924 }
never@8724 925
thartmann@34158 926 int CodeCache::blob_count(int code_blob_type) {
thartmann@34158 927 CodeHeap* heap = get_code_heap(code_blob_type);
thartmann@34158 928 return (heap != NULL) ? heap->blob_count() : 0;
thartmann@34158 929 }
thartmann@34158 930
thartmann@34158 931 int CodeCache::blob_count() {
thartmann@34158 932 int count = 0;
thartmann@34158 933 FOR_ALL_HEAPS(heap) {
thartmann@34158 934 count += (*heap)->blob_count();
thartmann@34158 935 }
thartmann@34158 936 return count;
thartmann@34158 937 }
thartmann@34158 938
thartmann@34158 939 int CodeCache::nmethod_count(int code_blob_type) {
thartmann@34158 940 CodeHeap* heap = get_code_heap(code_blob_type);
thartmann@34158 941 return (heap != NULL) ? heap->nmethod_count() : 0;
thartmann@34158 942 }
thartmann@34158 943
thartmann@34158 944 int CodeCache::nmethod_count() {
thartmann@34158 945 int count = 0;
kvn@42650 946 FOR_ALL_NMETHOD_HEAPS(heap) {
thartmann@34158 947 count += (*heap)->nmethod_count();
thartmann@34158 948 }
thartmann@34158 949 return count;
thartmann@34158 950 }
thartmann@34158 951
thartmann@34158 952 int CodeCache::adapter_count(int code_blob_type) {
thartmann@34158 953 CodeHeap* heap = get_code_heap(code_blob_type);
thartmann@34158 954 return (heap != NULL) ? heap->adapter_count() : 0;
thartmann@34158 955 }
thartmann@34158 956
thartmann@34158 957 int CodeCache::adapter_count() {
thartmann@34158 958 int count = 0;
thartmann@34158 959 FOR_ALL_HEAPS(heap) {
thartmann@34158 960 count += (*heap)->adapter_count();
thartmann@34158 961 }
thartmann@34158 962 return count;
thartmann@34158 963 }
thartmann@34158 964
thartmann@34158 965 address CodeCache::low_bound(int code_blob_type) {
thartmann@34158 966 CodeHeap* heap = get_code_heap(code_blob_type);
thartmann@34158 967 return (heap != NULL) ? (address)heap->low_boundary() : NULL;
thartmann@34158 968 }
thartmann@34158 969
thartmann@34158 970 address CodeCache::high_bound(int code_blob_type) {
thartmann@34158 971 CodeHeap* heap = get_code_heap(code_blob_type);
thartmann@34158 972 return (heap != NULL) ? (address)heap->high_boundary() : NULL;
thartmann@34158 973 }
thartmann@34158 974
thartmann@26796 975 size_t CodeCache::capacity() {
thartmann@26796 976 size_t cap = 0;
dlong@45622 977 FOR_ALL_ALLOCABLE_HEAPS(heap) {
thartmann@26796 978 cap += (*heap)->capacity();
thartmann@26796 979 }
thartmann@26796 980 return cap;
duke@1 981 }
duke@1 982
zmajo@26942 983 size_t CodeCache::unallocated_capacity(int code_blob_type) {
zmajo@26942 984 CodeHeap* heap = get_code_heap(code_blob_type);
zmajo@26942 985 return (heap != NULL) ? heap->unallocated_capacity() : 0;
zmajo@26942 986 }
zmajo@26942 987
thartmann@26796 988 size_t CodeCache::unallocated_capacity() {
thartmann@26796 989 size_t unallocated_cap = 0;
dlong@45622 990 FOR_ALL_ALLOCABLE_HEAPS(heap) {
thartmann@26796 991 unallocated_cap += (*heap)->unallocated_capacity();
thartmann@26796 992 }
thartmann@26796 993 return unallocated_cap;
thartmann@26796 994 }
duke@1 995
thartmann@26796 996 size_t CodeCache::max_capacity() {
thartmann@26796 997 size_t max_cap = 0;
dlong@45622 998 FOR_ALL_ALLOCABLE_HEAPS(heap) {
thartmann@26796 999 max_cap += (*heap)->max_capacity();
thartmann@26796 1000 }
thartmann@26796 1001 return max_cap;
duke@1 1002 }
duke@1 1003
anoll@17617 1004 /**
thartmann@26796 1005 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap
anoll@17617 1006 * is free, reverse_free_ratio() returns 4.
anoll@17617 1007 */
thartmann@26796 1008 double CodeCache::reverse_free_ratio(int code_blob_type) {
thartmann@26796 1009 CodeHeap* heap = get_code_heap(code_blob_type);
thartmann@26796 1010 if (heap == NULL) {
thartmann@26796 1011 return 0;
thartmann@26796 1012 }
anoll@27420 1013
anoll@27420 1014 double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0;
thartmann@26796 1015 double max_capacity = (double)heap->max_capacity();
anoll@27420 1016 double result = max_capacity / unallocated_capacity;
anoll@27420 1017 assert (max_capacity >= unallocated_capacity, "Must be");
david@33105 1018 assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result);
anoll@27420 1019 return result;
anoll@17617 1020 }
duke@1 1021
thartmann@26796 1022 size_t CodeCache::bytes_allocated_in_freelists() {
thartmann@26796 1023 size_t allocated_bytes = 0;
dlong@45622 1024 FOR_ALL_ALLOCABLE_HEAPS(heap) {
thartmann@26796 1025 allocated_bytes += (*heap)->allocated_in_freelist();
thartmann@26796 1026 }
thartmann@26796 1027 return allocated_bytes;
thartmann@26796 1028 }
thartmann@26796 1029
thartmann@26796 1030 int CodeCache::allocated_segments() {
thartmann@26796 1031 int number_of_segments = 0;
dlong@45622 1032 FOR_ALL_ALLOCABLE_HEAPS(heap) {
thartmann@26796 1033 number_of_segments += (*heap)->allocated_segments();
thartmann@26796 1034 }
thartmann@26796 1035 return number_of_segments;
thartmann@26796 1036 }
thartmann@26796 1037
thartmann@26796 1038 size_t CodeCache::freelists_length() {
thartmann@26796 1039 size_t length = 0;
dlong@45622 1040 FOR_ALL_ALLOCABLE_HEAPS(heap) {
thartmann@26796 1041 length += (*heap)->freelist_length();
thartmann@26796 1042 }
thartmann@26796 1043 return length;
thartmann@26796 1044 }
thartmann@26796 1045
duke@1 1046 void icache_init();
duke@1 1047
duke@1 1048 void CodeCache::initialize() {
duke@1 1049 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
duke@1 1050 #ifdef COMPILER2
duke@1 1051 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops");
duke@1 1052 #endif
duke@1 1053 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants");
duke@1 1054 // This was originally just a check of the alignment, causing failure, instead, round
duke@1 1055 // the code cache to the page size. In particular, Solaris is moving to a larger
duke@1 1056 // default page size.
stefank@46620 1057 CodeCacheExpansionSize = align_up(CodeCacheExpansionSize, os::vm_page_size());
thartmann@26796 1058
thartmann@26796 1059 if (SegmentedCodeCache) {
thartmann@26796 1060 // Use multiple code heaps
thartmann@26796 1061 initialize_heaps();
thartmann@26796 1062 } else {
thartmann@26796 1063 // Use a single code heap
thartmann@34158 1064 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, 0);
thartmann@34158 1065 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0);
thartmann@34158 1066 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0);
thartmann@26796 1067 ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize);
thartmann@28730 1068 add_heap(rs, "CodeCache", CodeBlobType::All);
duke@1 1069 }
duke@1 1070
duke@1 1071 // Initialize ICache flush mechanism
duke@1 1072 // This service is needed for os::register_code_area
duke@1 1073 icache_init();
duke@1 1074
duke@1 1075 // Give OS a chance to register generated code area.
duke@1 1076 // This is used on Windows 64 bit platforms to register
duke@1 1077 // Structured Exception Handlers for our generated code.
thartmann@26796 1078 os::register_code_area((char*)low_bound(), (char*)high_bound());
duke@1 1079 }
duke@1 1080
duke@1 1081 void codeCache_init() {
duke@1 1082 CodeCache::initialize();
kvn@42650 1083 // Load AOT libraries and add AOT code heaps.
kvn@42650 1084 AOTLoader::initialize();
duke@1 1085 }
duke@1 1086
duke@1 1087 //------------------------------------------------------------------------------------------------
duke@1 1088
duke@1 1089 int CodeCache::number_of_nmethods_with_dependencies() {
duke@1 1090 return _number_of_nmethods_with_dependencies;
duke@1 1091 }
duke@1 1092
duke@1 1093 void CodeCache::clear_inline_caches() {
duke@1 1094 assert_locked_or_safepoint(CodeCache_lock);
rbackman@38133 1095 CompiledMethodIterator iter;
thartmann@26796 1096 while(iter.next_alive()) {
thartmann@26796 1097 iter.method()->clear_inline_caches();
duke@1 1098 }
duke@1 1099 }
duke@1 1100
jcm@37289 1101 void CodeCache::cleanup_inline_caches() {
jcm@37289 1102 assert_locked_or_safepoint(CodeCache_lock);
jcm@37289 1103 NMethodIterator iter;
jcm@37289 1104 while(iter.next_alive()) {
jcm@37289 1105 iter.method()->cleanup_inline_caches(/*clean_all=*/true);
jcm@37289 1106 }
jcm@37289 1107 }
jcm@37289 1108
anoll@22506 1109 // Keeps track of time spent for checking dependencies
anoll@22921 1110 NOT_PRODUCT(static elapsedTimer dependentCheckTime;)
duke@1 1111
vlivanov@36300 1112 int CodeCache::mark_for_deoptimization(KlassDepChange& changes) {
duke@1 1113 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
duke@1 1114 int number_of_marked_CodeBlobs = 0;
duke@1 1115
duke@1 1116 // search the hierarchy looking for nmethods which are affected by the loading of this class
duke@1 1117
duke@1 1118 // then search the interfaces this class implements looking for nmethods
duke@1 1119 // which might be dependent of the fact that an interface only had one
duke@1 1120 // implementor.
anoll@22506 1121 // nmethod::check_all_dependencies works only correctly, if no safepoint
anoll@22506 1122 // can happen
david@35492 1123 NoSafepointVerifier nsv;
anoll@22506 1124 for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
anoll@22506 1125 Klass* d = str.klass();
anoll@22506 1126 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes);
duke@1 1127 }
duke@1 1128
duke@1 1129 #ifndef PRODUCT
anoll@22506 1130 if (VerifyDependencies) {
anoll@22506 1131 // Object pointers are used as unique identifiers for dependency arguments. This
anoll@22506 1132 // is only possible if no safepoint, i.e., GC occurs during the verification code.
anoll@22506 1133 dependentCheckTime.start();
anoll@22506 1134 nmethod::check_all_dependencies(changes);
anoll@22506 1135 dependentCheckTime.stop();
anoll@22506 1136 }
anoll@22506 1137 #endif
duke@1 1138
duke@1 1139 return number_of_marked_CodeBlobs;
duke@1 1140 }
duke@1 1141
rbackman@38133 1142 CompiledMethod* CodeCache::find_compiled(void* start) {
rbackman@38133 1143 CodeBlob *cb = find_blob(start);
rbackman@38133 1144 assert(cb == NULL || cb->is_compiled(), "did not find an compiled_method");
rbackman@38133 1145 return (CompiledMethod*)cb;
rbackman@38133 1146 }
duke@1 1147
kvn@42650 1148 bool CodeCache::is_far_target(address target) {
kvn@42650 1149 #if INCLUDE_AOT
kvn@42650 1150 return NativeCall::is_far_call(_low_bound, target) ||
kvn@42650 1151 NativeCall::is_far_call(_high_bound, target);
kvn@42650 1152 #else
kvn@42650 1153 return false;
kvn@42650 1154 #endif
kvn@42650 1155 }
kvn@42650 1156
duke@1 1157 #ifdef HOTSWAP
coleenp@46329 1158 int CodeCache::mark_for_evol_deoptimization(InstanceKlass* dependee) {
duke@1 1159 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
duke@1 1160 int number_of_marked_CodeBlobs = 0;
duke@1 1161
duke@1 1162 // Deoptimize all methods of the evolving class itself
coleenp@13728 1163 Array<Method*>* old_methods = dependee->methods();
duke@1 1164 for (int i = 0; i < old_methods->length(); i++) {
duke@1 1165 ResourceMark rm;
coleenp@13728 1166 Method* old_method = old_methods->at(i);
rbackman@38133 1167 CompiledMethod* nm = old_method->code();
duke@1 1168 if (nm != NULL) {
duke@1 1169 nm->mark_for_deoptimization();
duke@1 1170 number_of_marked_CodeBlobs++;
duke@1 1171 }
duke@1 1172 }
duke@1 1173
rbackman@38133 1174 CompiledMethodIterator iter;
thartmann@26796 1175 while(iter.next_alive()) {
rbackman@38133 1176 CompiledMethod* nm = iter.method();
duke@1 1177 if (nm->is_marked_for_deoptimization()) {
duke@1 1178 // ...Already marked in the previous pass; don't count it again.
coleenp@46329 1179 } else if (nm->is_evol_dependent_on(dependee)) {
duke@1 1180 ResourceMark rm;
duke@1 1181 nm->mark_for_deoptimization();
duke@1 1182 number_of_marked_CodeBlobs++;
duke@1 1183 } else {
coleenp@13728 1184 // flush caches in case they refer to a redefined Method*
duke@1 1185 nm->clear_inline_caches();
duke@1 1186 }
duke@1 1187 }
duke@1 1188
duke@1 1189 return number_of_marked_CodeBlobs;
duke@1 1190 }
duke@1 1191 #endif // HOTSWAP
duke@1 1192
duke@1 1193
duke@1 1194 // Deoptimize all methods
duke@1 1195 void CodeCache::mark_all_nmethods_for_deoptimization() {
duke@1 1196 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
rbackman@38133 1197 CompiledMethodIterator iter;
thartmann@26796 1198 while(iter.next_alive()) {
rbackman@38133 1199 CompiledMethod* nm = iter.method();
iveresov@26580 1200 if (!nm->method()->is_method_handle_intrinsic()) {
iveresov@26580 1201 nm->mark_for_deoptimization();
iveresov@26580 1202 }
duke@1 1203 }
duke@1 1204 }
duke@1 1205
coleenp@13728 1206 int CodeCache::mark_for_deoptimization(Method* dependee) {
duke@1 1207 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
duke@1 1208 int number_of_marked_CodeBlobs = 0;
duke@1 1209
rbackman@38133 1210 CompiledMethodIterator iter;
thartmann@26796 1211 while(iter.next_alive()) {
rbackman@38133 1212 CompiledMethod* nm = iter.method();
duke@1 1213 if (nm->is_dependent_on_method(dependee)) {
duke@1 1214 ResourceMark rm;
duke@1 1215 nm->mark_for_deoptimization();
duke@1 1216 number_of_marked_CodeBlobs++;
duke@1 1217 }
duke@1 1218 }
duke@1 1219
duke@1 1220 return number_of_marked_CodeBlobs;
duke@1 1221 }
duke@1 1222
duke@1 1223 void CodeCache::make_marked_nmethods_not_entrant() {
duke@1 1224 assert_locked_or_safepoint(CodeCache_lock);
rbackman@38133 1225 CompiledMethodIterator iter;
thartmann@26796 1226 while(iter.next_alive()) {
rbackman@38133 1227 CompiledMethod* nm = iter.method();
vlivanov@43675 1228 if (nm->is_marked_for_deoptimization() && !nm->is_not_entrant()) {
duke@1 1229 nm->make_not_entrant();
duke@1 1230 }
duke@1 1231 }
duke@1 1232 }
duke@1 1233
coleenp@28374 1234 // Flushes compiled methods dependent on dependee.
coleenp@46329 1235 void CodeCache::flush_dependents_on(InstanceKlass* dependee) {
coleenp@28374 1236 assert_lock_strong(Compile_lock);
coleenp@28374 1237
coleenp@28374 1238 if (number_of_nmethods_with_dependencies() == 0) return;
coleenp@28374 1239
coleenp@28374 1240 // CodeCache can only be updated by a thread_in_VM and they will all be
coleenp@28374 1241 // stopped during the safepoint so CodeCache will be safe to update without
coleenp@28374 1242 // holding the CodeCache_lock.
coleenp@28374 1243
coleenp@28374 1244 KlassDepChange changes(dependee);
coleenp@28374 1245
coleenp@28374 1246 // Compute the dependent nmethods
coleenp@28374 1247 if (mark_for_deoptimization(changes) > 0) {
coleenp@28374 1248 // At least one nmethod has been marked for deoptimization
coleenp@28374 1249 VM_Deoptimize op;
coleenp@28374 1250 VMThread::execute(&op);
coleenp@28374 1251 }
coleenp@28374 1252 }
coleenp@28374 1253
coleenp@28374 1254 #ifdef HOTSWAP
coleenp@28374 1255 // Flushes compiled methods dependent on dependee in the evolutionary sense
coleenp@46329 1256 void CodeCache::flush_evol_dependents_on(InstanceKlass* ev_k) {
coleenp@28374 1257 // --- Compile_lock is not held. However we are at a safepoint.
coleenp@28374 1258 assert_locked_or_safepoint(Compile_lock);
kvn@42650 1259 if (number_of_nmethods_with_dependencies() == 0 && !UseAOT) return;
coleenp@28374 1260
coleenp@28374 1261 // CodeCache can only be updated by a thread_in_VM and they will all be
coleenp@28374 1262 // stopped during the safepoint so CodeCache will be safe to update without
coleenp@28374 1263 // holding the CodeCache_lock.
coleenp@28374 1264
coleenp@28374 1265 // Compute the dependent nmethods
coleenp@46329 1266 if (mark_for_evol_deoptimization(ev_k) > 0) {
coleenp@28374 1267 // At least one nmethod has been marked for deoptimization
coleenp@28374 1268
coleenp@28374 1269 // All this already happens inside a VM_Operation, so we'll do all the work here.
coleenp@28374 1270 // Stuff copied from VM_Deoptimize and modified slightly.
coleenp@28374 1271
coleenp@28374 1272 // We do not want any GCs to happen while we are in the middle of this VM operation
coleenp@28374 1273 ResourceMark rm;
coleenp@28374 1274 DeoptimizationMarker dm;
coleenp@28374 1275
coleenp@28374 1276 // Deoptimize all activations depending on marked nmethods
coleenp@28374 1277 Deoptimization::deoptimize_dependents();
coleenp@28374 1278
thartmann@32401 1279 // Make the dependent methods not entrant
coleenp@28374 1280 make_marked_nmethods_not_entrant();
coleenp@28374 1281 }
coleenp@28374 1282 }
coleenp@28374 1283 #endif // HOTSWAP
coleenp@28374 1284
coleenp@28374 1285
coleenp@28374 1286 // Flushes compiled methods dependent on dependee
coleenp@46727 1287 void CodeCache::flush_dependents_on_method(const methodHandle& m_h) {
coleenp@28374 1288 // --- Compile_lock is not held. However we are at a safepoint.
coleenp@28374 1289 assert_locked_or_safepoint(Compile_lock);
coleenp@28374 1290
coleenp@28374 1291 // CodeCache can only be updated by a thread_in_VM and they will all be
coleenp@28374 1292 // stopped dring the safepoint so CodeCache will be safe to update without
coleenp@28374 1293 // holding the CodeCache_lock.
coleenp@28374 1294
coleenp@28374 1295 // Compute the dependent nmethods
coleenp@28374 1296 if (mark_for_deoptimization(m_h()) > 0) {
coleenp@28374 1297 // At least one nmethod has been marked for deoptimization
coleenp@28374 1298
coleenp@28374 1299 // All this already happens inside a VM_Operation, so we'll do all the work here.
coleenp@28374 1300 // Stuff copied from VM_Deoptimize and modified slightly.
coleenp@28374 1301
coleenp@28374 1302 // We do not want any GCs to happen while we are in the middle of this VM operation
coleenp@28374 1303 ResourceMark rm;
coleenp@28374 1304 DeoptimizationMarker dm;
coleenp@28374 1305
coleenp@28374 1306 // Deoptimize all activations depending on marked nmethods
coleenp@28374 1307 Deoptimization::deoptimize_dependents();
coleenp@28374 1308
thartmann@32401 1309 // Make the dependent methods not entrant
coleenp@28374 1310 make_marked_nmethods_not_entrant();
coleenp@28374 1311 }
coleenp@28374 1312 }
coleenp@28374 1313
duke@1 1314 void CodeCache::verify() {
thartmann@26796 1315 assert_locked_or_safepoint(CodeCache_lock);
thartmann@26796 1316 FOR_ALL_HEAPS(heap) {
thartmann@26796 1317 (*heap)->verify();
thartmann@26796 1318 FOR_ALL_BLOBS(cb, *heap) {
thartmann@26796 1319 if (cb->is_alive()) {
thartmann@26796 1320 cb->verify();
thartmann@26796 1321 }
thartmann@26796 1322 }
duke@1 1323 }
duke@1 1324 }
duke@1 1325
thartmann@26796 1326 // A CodeHeap is full. Print out warning and report event.
ysuenaga@46269 1327 PRAGMA_DIAG_PUSH
ysuenaga@46269 1328 PRAGMA_FORMAT_NONLITERAL_IGNORED
thartmann@26796 1329 void CodeCache::report_codemem_full(int code_blob_type, bool print) {
thartmann@26796 1330 // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event
zmajo@26942 1331 CodeHeap* heap = get_code_heap(code_blob_type);
zmajo@26942 1332 assert(heap != NULL, "heap is null");
thartmann@26796 1333
thartmann@34158 1334 if ((heap->full_count() == 0) || print) {
thartmann@26796 1335 // Not yet reported for this heap, report
thartmann@26796 1336 if (SegmentedCodeCache) {
ysuenaga@46269 1337 ResourceMark rm;
ysuenaga@46269 1338 stringStream msg1_stream, msg2_stream;
ysuenaga@46269 1339 msg1_stream.print("%s is full. Compiler has been disabled.",
ysuenaga@46269 1340 get_code_heap_name(code_blob_type));
ysuenaga@46269 1341 msg2_stream.print("Try increasing the code heap size using -XX:%s=",
ysuenaga@46269 1342 get_code_heap_flag_name(code_blob_type));
ysuenaga@46269 1343 const char *msg1 = msg1_stream.as_string();
ysuenaga@46269 1344 const char *msg2 = msg2_stream.as_string();
ysuenaga@46269 1345
ysuenaga@46269 1346 log_warning(codecache)(msg1);
ysuenaga@46269 1347 log_warning(codecache)(msg2);
ysuenaga@46269 1348 warning(msg1);
ysuenaga@46269 1349 warning(msg2);
thartmann@26796 1350 } else {
ysuenaga@46269 1351 const char *msg1 = "CodeCache is full. Compiler has been disabled.";
ysuenaga@46269 1352 const char *msg2 = "Try increasing the code cache size using -XX:ReservedCodeCacheSize=";
ysuenaga@46269 1353
ysuenaga@46269 1354 log_warning(codecache)(msg1);
ysuenaga@46269 1355 log_warning(codecache)(msg2);
ysuenaga@46269 1356 warning(msg1);
ysuenaga@46269 1357 warning(msg2);
thartmann@26796 1358 }
thartmann@26796 1359 ResourceMark rm;
thartmann@26796 1360 stringStream s;
thartmann@26796 1361 // Dump code cache into a buffer before locking the tty,
thartmann@26796 1362 {
thartmann@26796 1363 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
thartmann@26796 1364 print_summary(&s);
thartmann@26796 1365 }
thartmann@26796 1366 ttyLocker ttyl;
thartmann@26796 1367 tty->print("%s", s.as_string());
thartmann@26796 1368 }
thartmann@26796 1369
thartmann@34158 1370 heap->report_full();
thartmann@34158 1371
sla@18025 1372 EventCodeCacheFull event;
sla@18025 1373 if (event.should_commit()) {
thartmann@26796 1374 event.set_codeBlobType((u1)code_blob_type);
thartmann@26796 1375 event.set_startAddress((u8)heap->low_boundary());
thartmann@26796 1376 event.set_commitedTopAddress((u8)heap->high());
thartmann@26796 1377 event.set_reservedTopAddress((u8)heap->high_boundary());
thartmann@34158 1378 event.set_entryCount(heap->blob_count());
thartmann@34158 1379 event.set_methodCount(heap->nmethod_count());
thartmann@34158 1380 event.set_adaptorCount(heap->adapter_count());
neliasso@41056 1381 event.set_unallocatedCapacity(heap->unallocated_capacity());
thartmann@34158 1382 event.set_fullCount(heap->full_count());
sla@18025 1383 event.commit();
sla@18025 1384 }
sla@18025 1385 }
ysuenaga@46269 1386 PRAGMA_DIAG_POP
sla@18025 1387
anoll@23214 1388 void CodeCache::print_memory_overhead() {
anoll@23214 1389 size_t wasted_bytes = 0;
dlong@45622 1390 FOR_ALL_ALLOCABLE_HEAPS(heap) {
thartmann@26796 1391 CodeHeap* curr_heap = *heap;
thartmann@26796 1392 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) {
thartmann@26796 1393 HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
thartmann@26796 1394 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
thartmann@26796 1395 }
anoll@23214 1396 }
anoll@23214 1397 // Print bytes that are allocated in the freelist
anoll@23214 1398 ttyLocker ttl;
thartmann@26796 1399 tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelists_length());
thartmann@26796 1400 tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelists()/K);
drchase@24424 1401 tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K));
drchase@24424 1402 tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment
anoll@23214 1403 }
anoll@23214 1404
duke@1 1405 //------------------------------------------------------------------------------------------------
duke@1 1406 // Non-product version
duke@1 1407
duke@1 1408 #ifndef PRODUCT
duke@1 1409
jrose@3908 1410 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
jrose@3908 1411 if (PrintCodeCache2) { // Need to add a new flag
jrose@3908 1412 ResourceMark rm;
jrose@3908 1413 if (size == 0) size = cb->size();
drchase@24424 1414 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
jrose@3908 1415 }
jrose@3908 1416 }
jrose@3908 1417
duke@1 1418 void CodeCache::print_internals() {
duke@1 1419 int nmethodCount = 0;
duke@1 1420 int runtimeStubCount = 0;
duke@1 1421 int adapterCount = 0;
duke@1 1422 int deoptimizationStubCount = 0;
duke@1 1423 int uncommonTrapStubCount = 0;
duke@1 1424 int bufferBlobCount = 0;
duke@1 1425 int total = 0;
duke@1 1426 int nmethodAlive = 0;
duke@1 1427 int nmethodNotEntrant = 0;
duke@1 1428 int nmethodZombie = 0;
duke@1 1429 int nmethodUnloaded = 0;
duke@1 1430 int nmethodJava = 0;
duke@1 1431 int nmethodNative = 0;
anoll@23214 1432 int max_nm_size = 0;
duke@1 1433 ResourceMark rm;
duke@1 1434
thartmann@26796 1435 int i = 0;
dlong@45622 1436 FOR_ALL_ALLOCABLE_HEAPS(heap) {
kvn@42650 1437 if ((_nmethod_heaps->length() >= 1) && Verbose) {
thartmann@26918 1438 tty->print_cr("-- %s --", (*heap)->name());
thartmann@26796 1439 }
thartmann@26796 1440 FOR_ALL_BLOBS(cb, *heap) {
thartmann@26796 1441 total++;
thartmann@26796 1442 if (cb->is_nmethod()) {
thartmann@26796 1443 nmethod* nm = (nmethod*)cb;
duke@1 1444
thartmann@26796 1445 if (Verbose && nm->method() != NULL) {
thartmann@26796 1446 ResourceMark rm;
thartmann@26796 1447 char *method_name = nm->method()->name_and_sig_as_C_string();
thartmann@26796 1448 tty->print("%s", method_name);
thartmann@26796 1449 if(nm->is_alive()) { tty->print_cr(" alive"); }
thartmann@26796 1450 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
thartmann@26796 1451 if(nm->is_zombie()) { tty->print_cr(" zombie"); }
thartmann@26796 1452 }
thartmann@26796 1453
thartmann@26796 1454 nmethodCount++;
thartmann@26796 1455
thartmann@26796 1456 if(nm->is_alive()) { nmethodAlive++; }
thartmann@26796 1457 if(nm->is_not_entrant()) { nmethodNotEntrant++; }
thartmann@26796 1458 if(nm->is_zombie()) { nmethodZombie++; }
thartmann@26796 1459 if(nm->is_unloaded()) { nmethodUnloaded++; }
thartmann@26796 1460 if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; }
thartmann@26796 1461
thartmann@26796 1462 if(nm->method() != NULL && nm->is_java_method()) {
thartmann@26796 1463 nmethodJava++;
thartmann@26796 1464 max_nm_size = MAX2(max_nm_size, nm->size());
thartmann@26796 1465 }
thartmann@26796 1466 } else if (cb->is_runtime_stub()) {
thartmann@26796 1467 runtimeStubCount++;
thartmann@26796 1468 } else if (cb->is_deoptimization_stub()) {
thartmann@26796 1469 deoptimizationStubCount++;
thartmann@26796 1470 } else if (cb->is_uncommon_trap_stub()) {
thartmann@26796 1471 uncommonTrapStubCount++;
thartmann@26796 1472 } else if (cb->is_adapter_blob()) {
thartmann@26796 1473 adapterCount++;
thartmann@26796 1474 } else if (cb->is_buffer_blob()) {
thartmann@26796 1475 bufferBlobCount++;
duke@1 1476 }
duke@1 1477 }
duke@1 1478 }
duke@1 1479
duke@1 1480 int bucketSize = 512;
anoll@23214 1481 int bucketLimit = max_nm_size / bucketSize + 1;
zgu@13195 1482 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
anoll@23214 1483 memset(buckets, 0, sizeof(int) * bucketLimit);
duke@1 1484
thartmann@26796 1485 NMethodIterator iter;
thartmann@26796 1486 while(iter.next()) {
thartmann@26796 1487 nmethod* nm = iter.method();
thartmann@26796 1488 if(nm->method() != NULL && nm->is_java_method()) {
thartmann@26796 1489 buckets[nm->size() / bucketSize]++;
duke@1 1490 }
duke@1 1491 }
anoll@23214 1492
duke@1 1493 tty->print_cr("Code Cache Entries (total of %d)",total);
duke@1 1494 tty->print_cr("-------------------------------------------------");
duke@1 1495 tty->print_cr("nmethods: %d",nmethodCount);
duke@1 1496 tty->print_cr("\talive: %d",nmethodAlive);
duke@1 1497 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
duke@1 1498 tty->print_cr("\tzombie: %d",nmethodZombie);
duke@1 1499 tty->print_cr("\tunloaded: %d",nmethodUnloaded);
duke@1 1500 tty->print_cr("\tjava: %d",nmethodJava);
duke@1 1501 tty->print_cr("\tnative: %d",nmethodNative);
duke@1 1502 tty->print_cr("runtime_stubs: %d",runtimeStubCount);
duke@1 1503 tty->print_cr("adapters: %d",adapterCount);
duke@1 1504 tty->print_cr("buffer blobs: %d",bufferBlobCount);
duke@1 1505 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
duke@1 1506 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
duke@1 1507 tty->print_cr("\nnmethod size distribution (non-zombie java)");
duke@1 1508 tty->print_cr("-------------------------------------------------");
duke@1 1509
duke@1 1510 for(int i=0; i<bucketLimit; i++) {
duke@1 1511 if(buckets[i] != 0) {
duke@1 1512 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
duke@1 1513 tty->fill_to(40);
duke@1 1514 tty->print_cr("%d",buckets[i]);
duke@1 1515 }
duke@1 1516 }
duke@1 1517
coleenp@27880 1518 FREE_C_HEAP_ARRAY(int, buckets);
anoll@23214 1519 print_memory_overhead();
duke@1 1520 }
duke@1 1521
vladidan@15201 1522 #endif // !PRODUCT
vladidan@15201 1523
duke@1 1524 void CodeCache::print() {
vladidan@15201 1525 print_summary(tty);
vladidan@15201 1526
vladidan@15201 1527 #ifndef PRODUCT
vladidan@15201 1528 if (!Verbose) return;
vladidan@15201 1529
duke@1 1530 CodeBlob_sizes live;
duke@1 1531 CodeBlob_sizes dead;
duke@1 1532
dlong@45622 1533 FOR_ALL_ALLOCABLE_HEAPS(heap) {
thartmann@26796 1534 FOR_ALL_BLOBS(cb, *heap) {
thartmann@26796 1535 if (!cb->is_alive()) {
thartmann@26796 1536 dead.add(cb);
thartmann@26796 1537 } else {
thartmann@26796 1538 live.add(cb);
thartmann@26796 1539 }
duke@1 1540 }
duke@1 1541 }
duke@1 1542
duke@1 1543 tty->print_cr("CodeCache:");
anoll@22506 1544 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds());
duke@1 1545
duke@1 1546 if (!live.is_empty()) {
duke@1 1547 live.print("live");
duke@1 1548 }
duke@1 1549 if (!dead.is_empty()) {
duke@1 1550 dead.print("dead");
duke@1 1551 }
duke@1 1552
vladidan@15201 1553 if (WizardMode) {
duke@1 1554 // print the oop_map usage
duke@1 1555 int code_size = 0;
duke@1 1556 int number_of_blobs = 0;
duke@1 1557 int number_of_oop_maps = 0;
duke@1 1558 int map_size = 0;
dlong@45622 1559 FOR_ALL_ALLOCABLE_HEAPS(heap) {
thartmann@26796 1560 FOR_ALL_BLOBS(cb, *heap) {
thartmann@26796 1561 if (cb->is_alive()) {
thartmann@26796 1562 number_of_blobs++;
thartmann@26796 1563 code_size += cb->code_size();
rbackman@30590 1564 ImmutableOopMapSet* set = cb->oop_maps();
thartmann@26796 1565 if (set != NULL) {
rbackman@30590 1566 number_of_oop_maps += set->count();
rbackman@30628 1567 map_size += set->nr_of_bytes();
thartmann@26796 1568 }
duke@1 1569 }
duke@1 1570 }
duke@1 1571 }
duke@1 1572 tty->print_cr("OopMaps");
duke@1 1573 tty->print_cr(" #blobs = %d", number_of_blobs);
duke@1 1574 tty->print_cr(" code size = %d", code_size);
duke@1 1575 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
duke@1 1576 tty->print_cr(" map size = %d", map_size);
duke@1 1577 }
duke@1 1578
vladidan@15201 1579 #endif // !PRODUCT
duke@1 1580 }
duke@1 1581
vladidan@15201 1582 void CodeCache::print_summary(outputStream* st, bool detailed) {
thartmann@26796 1583 FOR_ALL_HEAPS(heap_iterator) {
thartmann@26796 1584 CodeHeap* heap = (*heap_iterator);
thartmann@26796 1585 size_t total = (heap->high_boundary() - heap->low_boundary());
bdelsart@31620 1586 if (_heaps->length() >= 1) {
thartmann@26918 1587 st->print("%s:", heap->name());
thartmann@26796 1588 } else {
thartmann@26919 1589 st->print("CodeCache:");
thartmann@26796 1590 }
thartmann@26796 1591 st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
thartmann@26796 1592 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
thartmann@26796 1593 total/K, (total - heap->unallocated_capacity())/K,
thartmann@26796 1594 heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K);
thartmann@26796 1595
thartmann@26796 1596 if (detailed) {
thartmann@26796 1597 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
thartmann@26796 1598 p2i(heap->low_boundary()),
thartmann@26796 1599 p2i(heap->high()),
thartmann@26796 1600 p2i(heap->high_boundary()));
thartmann@26796 1601 }
thartmann@26796 1602 }
never@7108 1603
vladidan@15201 1604 if (detailed) {
vladidan@15201 1605 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
thartmann@26796 1606 " adapters=" UINT32_FORMAT,
thartmann@34158 1607 blob_count(), nmethod_count(), adapter_count());
vladidan@15201 1608 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
vladidan@15201 1609 "enabled" : Arguments::mode() == Arguments::_int ?
vladidan@15201 1610 "disabled (interpreter mode)" :
vladidan@15201 1611 "disabled (not enough contiguous free space left)");
vladidan@15201 1612 }
never@7108 1613 }
kvn@8672 1614
neliasso@26587 1615 void CodeCache::print_codelist(outputStream* st) {
neliasso@35825 1616 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
neliasso@26587 1617
neliasso@46637 1618 CompiledMethodIterator iter;
neliasso@46637 1619 while (iter.next_alive()) {
neliasso@46637 1620 CompiledMethod* cm = iter.method();
neliasso@26587 1621 ResourceMark rm;
neliasso@46637 1622 char* method_name = cm->method()->name_and_sig_as_C_string();
neliasso@46637 1623 st->print_cr("%d %d %d %s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]",
neliasso@46637 1624 cm->compile_id(), cm->comp_level(), cm->get_state(),
neliasso@46637 1625 method_name,
neliasso@46637 1626 (intptr_t)cm->header_begin(), (intptr_t)cm->code_begin(), (intptr_t)cm->code_end());
neliasso@26587 1627 }
neliasso@26587 1628 }
neliasso@26587 1629
neliasso@26587 1630 void CodeCache::print_layout(outputStream* st) {
neliasso@35825 1631 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
neliasso@26587 1632 ResourceMark rm;
neliasso@26587 1633 print_summary(st, true);
neliasso@26587 1634 }
neliasso@26587 1635
kvn@8672 1636 void CodeCache::log_state(outputStream* st) {
kvn@8672 1637 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
neliasso@17016 1638 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
thartmann@34158 1639 blob_count(), nmethod_count(), adapter_count(),
neliasso@17016 1640 unallocated_capacity());
kvn@8672 1641 }