annotate src/hotspot/share/code/compiledMethod.cpp @ 53628:9cb53c505acd

8214056: Allow the GC to attach context information to CompiledMethod Reviewed-by: shade, kvn, adinn
author eosterlund
date Thu, 22 Nov 2018 09:55:44 +0100
parents 8b26bd8b1832
children 7e268f863ff0
rev   line source
rbackman@38133 1 /*
kbarrett@49386 2 * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
rbackman@38133 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
rbackman@38133 4 *
rbackman@38133 5 * This code is free software; you can redistribute it and/or modify it
rbackman@38133 6 * under the terms of the GNU General Public License version 2 only, as
rbackman@38133 7 * published by the Free Software Foundation.
rbackman@38133 8 *
rbackman@38133 9 * This code is distributed in the hope that it will be useful, but WITHOUT
rbackman@38133 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
rbackman@38133 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
rbackman@38133 12 * version 2 for more details (a copy is included in the LICENSE file that
rbackman@38133 13 * accompanied this code).
rbackman@38133 14 *
rbackman@38133 15 * You should have received a copy of the GNU General Public License version
rbackman@38133 16 * 2 along with this work; if not, write to the Free Software Foundation,
rbackman@38133 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
rbackman@38133 18 *
rbackman@38133 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
rbackman@38133 20 * or visit www.oracle.com if you need additional information or have any
rbackman@38133 21 * questions.
rbackman@38133 22 *
rbackman@38133 23 */
rbackman@38133 24
rbackman@38133 25 #include "precompiled.hpp"
rbackman@38133 26 #include "code/compiledIC.hpp"
redestad@46623 27 #include "code/compiledMethod.inline.hpp"
rbackman@38133 28 #include "code/scopeDesc.hpp"
rbackman@38133 29 #include "code/codeCache.hpp"
eosterlund@53314 30 #include "gc/shared/barrierSet.hpp"
eosterlund@53314 31 #include "gc/shared/gcBehaviours.hpp"
hseigel@49805 32 #include "interpreter/bytecode.inline.hpp"
coleenp@51020 33 #include "logging/log.hpp"
coleenp@51020 34 #include "logging/logTag.hpp"
rbackman@38133 35 #include "memory/resourceArea.hpp"
coleenp@49945 36 #include "oops/methodData.hpp"
hseigel@49805 37 #include "oops/method.inline.hpp"
coleenp@49945 38 #include "prims/methodHandles.hpp"
coleenp@49914 39 #include "runtime/handles.inline.hpp"
rbackman@38133 40 #include "runtime/mutexLocker.hpp"
rbackman@38133 41
eosterlund@53314 42 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout,
eosterlund@53314 43 int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps,
eosterlund@53314 44 bool caller_must_gc_arguments)
neliasso@42040 45 : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
eosterlund@53314 46 _mark_for_deoptimization_status(not_marked),
eosterlund@53628 47 _method(method),
eosterlund@53628 48 _gc_data(NULL)
eosterlund@53314 49 {
rbackman@38133 50 init_defaults();
rbackman@38133 51 }
rbackman@38133 52
eosterlund@53314 53 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size,
eosterlund@53314 54 int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size,
eosterlund@53314 55 OopMapSet* oop_maps, bool caller_must_gc_arguments)
eosterlund@53314 56 : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb,
eosterlund@53314 57 frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
eosterlund@53314 58 _mark_for_deoptimization_status(not_marked),
eosterlund@53628 59 _method(method),
eosterlund@53628 60 _gc_data(NULL)
eosterlund@53314 61 {
rbackman@38133 62 init_defaults();
rbackman@38133 63 }
rbackman@38133 64
rbackman@38133 65 void CompiledMethod::init_defaults() {
rbackman@38133 66 _has_unsafe_access = 0;
rbackman@38133 67 _has_method_handle_invokes = 0;
rbackman@38133 68 _lazy_critical_native = 0;
rbackman@38133 69 _has_wide_vectors = 0;
rbackman@38133 70 }
rbackman@38133 71
rbackman@38133 72 bool CompiledMethod::is_method_handle_return(address return_pc) {
rbackman@38133 73 if (!has_method_handle_invokes()) return false;
rbackman@38133 74 PcDesc* pd = pc_desc_at(return_pc);
rbackman@38133 75 if (pd == NULL)
rbackman@38133 76 return false;
rbackman@38133 77 return pd->is_method_handle_invoke();
rbackman@38133 78 }
rbackman@38133 79
rbackman@38133 80 // Returns a string version of the method state.
rbackman@38133 81 const char* CompiledMethod::state() const {
rbackman@38133 82 int state = get_state();
rbackman@38133 83 switch (state) {
iveresov@48204 84 case not_installed:
iveresov@48204 85 return "not installed";
rbackman@38133 86 case in_use:
rbackman@38133 87 return "in use";
rbackman@38133 88 case not_used:
rbackman@38133 89 return "not_used";
rbackman@38133 90 case not_entrant:
rbackman@38133 91 return "not_entrant";
rbackman@38133 92 case zombie:
rbackman@38133 93 return "zombie";
rbackman@38133 94 case unloaded:
rbackman@38133 95 return "unloaded";
rbackman@38133 96 default:
rbackman@38133 97 fatal("unexpected method state: %d", state);
rbackman@38133 98 return NULL;
rbackman@38133 99 }
rbackman@38133 100 }
rbackman@38133 101
rbackman@38133 102 //-----------------------------------------------------------------------------
rbackman@38133 103
eosterlund@53334 104 ExceptionCache* CompiledMethod::exception_cache_acquire() const {
eosterlund@53334 105 return OrderAccess::load_acquire(&_exception_cache);
eosterlund@53334 106 }
eosterlund@53334 107
rbackman@38133 108 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
rbackman@38133 109 assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
rbackman@38133 110 assert(new_entry != NULL,"Must be non null");
rbackman@38133 111 assert(new_entry->next() == NULL, "Must be null");
rbackman@38133 112
eosterlund@53334 113 for (;;) {
eosterlund@53334 114 ExceptionCache *ec = exception_cache();
eosterlund@53334 115 if (ec != NULL) {
eosterlund@53334 116 Klass* ex_klass = ec->exception_type();
eosterlund@53334 117 if (!ex_klass->is_loader_alive()) {
eosterlund@53334 118 // We must guarantee that entries are not inserted with new next pointer
eosterlund@53334 119 // edges to ExceptionCache entries with dead klasses, due to bad interactions
eosterlund@53334 120 // with concurrent ExceptionCache cleanup. Therefore, the inserts roll
eosterlund@53334 121 // the head pointer forward to the first live ExceptionCache, so that the new
eosterlund@53334 122 // next pointers always point at live ExceptionCaches, that are not removed due
eosterlund@53334 123 // to concurrent ExceptionCache cleanup.
eosterlund@53334 124 ExceptionCache* next = ec->next();
eosterlund@53334 125 if (Atomic::cmpxchg(next, &_exception_cache, ec) == ec) {
eosterlund@53334 126 CodeCache::release_exception_cache(ec);
eosterlund@53334 127 }
eosterlund@53334 128 continue;
eosterlund@53334 129 }
eosterlund@53334 130 ec = exception_cache();
eosterlund@53334 131 if (ec != NULL) {
eosterlund@53334 132 new_entry->set_next(ec);
eosterlund@53334 133 }
eosterlund@53334 134 }
eosterlund@53334 135 if (Atomic::cmpxchg(new_entry, &_exception_cache, ec) == ec) {
eosterlund@53334 136 return;
eosterlund@53334 137 }
rbackman@38133 138 }
rbackman@38133 139 }
rbackman@38133 140
coleenp@50316 141 void CompiledMethod::clean_exception_cache() {
eosterlund@53334 142 // For each nmethod, only a single thread may call this cleanup function
eosterlund@53334 143 // at the same time, whether called in STW cleanup or concurrent cleanup.
eosterlund@53334 144 // Note that if the GC is processing exception cache cleaning in a concurrent phase,
eosterlund@53334 145 // then a single writer may contend with cleaning up the head pointer to the
eosterlund@53334 146 // first ExceptionCache node that has a Klass* that is alive. That is fine,
eosterlund@53334 147 // as long as there is no concurrent cleanup of next pointers from concurrent writers.
eosterlund@53334 148 // And the concurrent writers do not clean up next pointers, only the head.
eosterlund@53334 149 // Also note that concurent readers will walk through Klass* pointers that are not
eosterlund@53334 150 // alive. That does not cause ABA problems, because Klass* is deleted after
eosterlund@53334 151 // a handshake with all threads, after all stale ExceptionCaches have been
eosterlund@53334 152 // unlinked. That is also when the CodeCache::exception_cache_purge_list()
eosterlund@53334 153 // is deleted, with all ExceptionCache entries that were cleaned concurrently.
eosterlund@53334 154 // That similarly implies that CAS operations on ExceptionCache entries do not
eosterlund@53334 155 // suffer from ABA problems as unlinking and deletion is separated by a global
eosterlund@53334 156 // handshake operation.
rbackman@38133 157 ExceptionCache* prev = NULL;
eosterlund@53334 158 ExceptionCache* curr = exception_cache_acquire();
rbackman@38133 159
rbackman@38133 160 while (curr != NULL) {
rbackman@38133 161 ExceptionCache* next = curr->next();
rbackman@38133 162
eosterlund@53334 163 if (!curr->exception_type()->is_loader_alive()) {
rbackman@38133 164 if (prev == NULL) {
eosterlund@53334 165 // Try to clean head; this is contended by concurrent inserts, that
eosterlund@53334 166 // both lazily clean the head, and insert entries at the head. If
eosterlund@53334 167 // the CAS fails, the operation is restarted.
eosterlund@53334 168 if (Atomic::cmpxchg(next, &_exception_cache, curr) != curr) {
eosterlund@53334 169 prev = NULL;
eosterlund@53334 170 curr = exception_cache_acquire();
eosterlund@53334 171 continue;
eosterlund@53334 172 }
rbackman@38133 173 } else {
eosterlund@53334 174 // It is impossible to during cleanup connect the next pointer to
eosterlund@53334 175 // an ExceptionCache that has not been published before a safepoint
eosterlund@53334 176 // prior to the cleanup. Therefore, release is not required.
rbackman@38133 177 prev->set_next(next);
rbackman@38133 178 }
rbackman@38133 179 // prev stays the same.
eosterlund@53334 180
eosterlund@53334 181 CodeCache::release_exception_cache(curr);
rbackman@38133 182 } else {
rbackman@38133 183 prev = curr;
rbackman@38133 184 }
rbackman@38133 185
rbackman@38133 186 curr = next;
rbackman@38133 187 }
rbackman@38133 188 }
rbackman@38133 189
rbackman@38133 190 // public method for accessing the exception cache
rbackman@38133 191 // These are the public access methods.
rbackman@38133 192 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) {
rbackman@38133 193 // We never grab a lock to read the exception cache, so we may
rbackman@38133 194 // have false negatives. This is okay, as it can only happen during
rbackman@38133 195 // the first few exception lookups for a given nmethod.
eosterlund@53334 196 ExceptionCache* ec = exception_cache_acquire();
rbackman@38133 197 while (ec != NULL) {
rbackman@38133 198 address ret_val;
rbackman@38133 199 if ((ret_val = ec->match(exception,pc)) != NULL) {
rbackman@38133 200 return ret_val;
rbackman@38133 201 }
rbackman@38133 202 ec = ec->next();
rbackman@38133 203 }
rbackman@38133 204 return NULL;
rbackman@38133 205 }
rbackman@38133 206
rbackman@38133 207 void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
rbackman@38133 208 // There are potential race conditions during exception cache updates, so we
rbackman@38133 209 // must own the ExceptionCache_lock before doing ANY modifications. Because
rbackman@38133 210 // we don't lock during reads, it is possible to have several threads attempt
rbackman@38133 211 // to update the cache with the same data. We need to check for already inserted
rbackman@38133 212 // copies of the current data before adding it.
rbackman@38133 213
rbackman@38133 214 MutexLocker ml(ExceptionCache_lock);
rbackman@38133 215 ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
rbackman@38133 216
rbackman@38133 217 if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
rbackman@38133 218 target_entry = new ExceptionCache(exception,pc,handler);
rbackman@38133 219 add_exception_cache_entry(target_entry);
rbackman@38133 220 }
rbackman@38133 221 }
rbackman@38133 222
rbackman@38133 223 // private method for handling exception cache
rbackman@38133 224 // These methods are private, and used to manipulate the exception cache
rbackman@38133 225 // directly.
rbackman@38133 226 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) {
eosterlund@53334 227 ExceptionCache* ec = exception_cache_acquire();
rbackman@38133 228 while (ec != NULL) {
rbackman@38133 229 if (ec->match_exception_with_space(exception)) {
rbackman@38133 230 return ec;
rbackman@38133 231 }
rbackman@38133 232 ec = ec->next();
rbackman@38133 233 }
rbackman@38133 234 return NULL;
rbackman@38133 235 }
rbackman@38133 236
eosterlund@53334 237 //-------------end of code for ExceptionCache--------------
eosterlund@53334 238
rbackman@38133 239 bool CompiledMethod::is_at_poll_return(address pc) {
rbackman@38133 240 RelocIterator iter(this, pc, pc+1);
rbackman@38133 241 while (iter.next()) {
rbackman@38133 242 if (iter.type() == relocInfo::poll_return_type)
rbackman@38133 243 return true;
rbackman@38133 244 }
rbackman@38133 245 return false;
rbackman@38133 246 }
rbackman@38133 247
rbackman@38133 248
rbackman@38133 249 bool CompiledMethod::is_at_poll_or_poll_return(address pc) {
rbackman@38133 250 RelocIterator iter(this, pc, pc+1);
rbackman@38133 251 while (iter.next()) {
rbackman@38133 252 relocInfo::relocType t = iter.type();
rbackman@38133 253 if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
rbackman@38133 254 return true;
rbackman@38133 255 }
rbackman@38133 256 return false;
rbackman@38133 257 }
rbackman@38133 258
rbackman@38133 259 void CompiledMethod::verify_oop_relocations() {
rbackman@38133 260 // Ensure sure that the code matches the current oop values
rbackman@38133 261 RelocIterator iter(this, NULL, NULL);
rbackman@38133 262 while (iter.next()) {
rbackman@38133 263 if (iter.type() == relocInfo::oop_type) {
rbackman@38133 264 oop_Relocation* reloc = iter.oop_reloc();
rbackman@38133 265 if (!reloc->oop_is_immediate()) {
rbackman@38133 266 reloc->verify_oop_relocation();
rbackman@38133 267 }
rbackman@38133 268 }
rbackman@38133 269 }
rbackman@38133 270 }
rbackman@38133 271
rbackman@38133 272
rbackman@38133 273 ScopeDesc* CompiledMethod::scope_desc_at(address pc) {
rbackman@38133 274 PcDesc* pd = pc_desc_at(pc);
rbackman@38133 275 guarantee(pd != NULL, "scope must be present");
rbackman@38133 276 return new ScopeDesc(this, pd->scope_decode_offset(),
rbackman@38133 277 pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
rbackman@38133 278 pd->return_oop());
rbackman@38133 279 }
rbackman@38133 280
fparain@46796 281 ScopeDesc* CompiledMethod::scope_desc_near(address pc) {
fparain@46796 282 PcDesc* pd = pc_desc_near(pc);
fparain@46796 283 guarantee(pd != NULL, "scope must be present");
fparain@46796 284 return new ScopeDesc(this, pd->scope_decode_offset(),
fparain@46796 285 pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
fparain@46796 286 pd->return_oop());
fparain@46796 287 }
fparain@46796 288
coleenp@51020 289 address CompiledMethod::oops_reloc_begin() const {
rbackman@38133 290 // If the method is not entrant or zombie then a JMP is plastered over the
rbackman@38133 291 // first few bytes. If an oop in the old code was there, that oop
rbackman@38133 292 // should not get GC'd. Skip the first few bytes of oops on
rbackman@38133 293 // not-entrant methods.
eosterlund@53339 294 if (frame_complete_offset() != CodeOffsets::frame_never_safe &&
eosterlund@53339 295 code_begin() + frame_complete_offset() >
eosterlund@53339 296 verified_entry_point() + NativeJump::instruction_size)
eosterlund@53339 297 {
eosterlund@53339 298 // If we have a frame_complete_offset after the native jump, then there
eosterlund@53339 299 // is no point trying to look for oops before that. This is a requirement
eosterlund@53339 300 // for being allowed to scan oops concurrently.
eosterlund@53339 301 return code_begin() + frame_complete_offset();
eosterlund@53339 302 }
eosterlund@53339 303
eosterlund@53339 304 // It is not safe to read oops concurrently using entry barriers, if their
eosterlund@53339 305 // location depend on whether the nmethod is entrant or not.
eosterlund@53339 306 assert(BarrierSet::barrier_set()->barrier_set_nmethod() == NULL, "Not safe oop scan");
eosterlund@53339 307
rbackman@38133 308 address low_boundary = verified_entry_point();
rbackman@38133 309 if (!is_in_use() && is_nmethod()) {
rbackman@38133 310 low_boundary += NativeJump::instruction_size;
rbackman@38133 311 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
rbackman@38133 312 // This means that the low_boundary is going to be a little too high.
rbackman@38133 313 // This shouldn't matter, since oops of non-entrant methods are never used.
rbackman@38133 314 // In fact, why are we bothering to look at oops in a non-entrant method??
rbackman@38133 315 }
coleenp@51020 316 return low_boundary;
rbackman@38133 317 }
rbackman@38133 318
rbackman@38133 319 int CompiledMethod::verify_icholder_relocations() {
rbackman@38133 320 ResourceMark rm;
rbackman@38133 321 int count = 0;
rbackman@38133 322
rbackman@38133 323 RelocIterator iter(this);
rbackman@38133 324 while(iter.next()) {
rbackman@38133 325 if (iter.type() == relocInfo::virtual_call_type) {
kvn@42650 326 if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) {
rbackman@38133 327 CompiledIC *ic = CompiledIC_at(&iter);
rbackman@38133 328 if (TraceCompiledIC) {
rbackman@38133 329 tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
rbackman@38133 330 ic->print();
rbackman@38133 331 }
rbackman@38133 332 assert(ic->cached_icholder() != NULL, "must be non-NULL");
rbackman@38133 333 count++;
rbackman@38133 334 }
rbackman@38133 335 }
rbackman@38133 336 }
rbackman@38133 337
rbackman@38133 338 return count;
rbackman@38133 339 }
rbackman@38133 340
rbackman@38133 341 // Method that knows how to preserve outgoing arguments at call. This method must be
rbackman@38133 342 // called with a frame corresponding to a Java invoke
rbackman@38133 343 void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
rbackman@38133 344 if (method() != NULL && !method()->is_native()) {
rbackman@38133 345 address pc = fr.pc();
rbackman@38133 346 SimpleScopeDesc ssd(this, pc);
rbackman@38133 347 Bytecode_invoke call(ssd.method(), ssd.bci());
rbackman@38133 348 bool has_receiver = call.has_receiver();
rbackman@38133 349 bool has_appendix = call.has_appendix();
rbackman@38133 350 Symbol* signature = call.signature();
rbackman@38133 351
rbackman@38133 352 // The method attached by JIT-compilers should be used, if present.
rbackman@38133 353 // Bytecode can be inaccurate in such case.
rbackman@38133 354 Method* callee = attached_method_before_pc(pc);
rbackman@38133 355 if (callee != NULL) {
rbackman@38133 356 has_receiver = !(callee->access_flags().is_static());
rbackman@38133 357 has_appendix = false;
rbackman@38133 358 signature = callee->signature();
rbackman@38133 359 }
rbackman@38133 360
rbackman@38133 361 fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
rbackman@38133 362 }
rbackman@38133 363 }
rbackman@38133 364
rbackman@38133 365 Method* CompiledMethod::attached_method(address call_instr) {
rbackman@38133 366 assert(code_contains(call_instr), "not part of the nmethod");
rbackman@38133 367 RelocIterator iter(this, call_instr, call_instr + 1);
rbackman@38133 368 while (iter.next()) {
rbackman@38133 369 if (iter.addr() == call_instr) {
rbackman@38133 370 switch(iter.type()) {
rbackman@38133 371 case relocInfo::static_call_type: return iter.static_call_reloc()->method_value();
rbackman@38133 372 case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
rbackman@38133 373 case relocInfo::virtual_call_type: return iter.virtual_call_reloc()->method_value();
jwilhelm@46630 374 default: break;
rbackman@38133 375 }
rbackman@38133 376 }
rbackman@38133 377 }
rbackman@38133 378 return NULL; // not found
rbackman@38133 379 }
rbackman@38133 380
rbackman@38133 381 Method* CompiledMethod::attached_method_before_pc(address pc) {
rbackman@38133 382 if (NativeCall::is_call_before(pc)) {
rbackman@38133 383 NativeCall* ncall = nativeCall_before(pc);
rbackman@38133 384 return attached_method(ncall->instruction_address());
rbackman@38133 385 }
rbackman@38133 386 return NULL; // not a call
rbackman@38133 387 }
rbackman@38133 388
rbackman@38133 389 void CompiledMethod::clear_inline_caches() {
rbackman@38133 390 assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
rbackman@38133 391 if (is_zombie()) {
rbackman@38133 392 return;
rbackman@38133 393 }
rbackman@38133 394
rbackman@38133 395 RelocIterator iter(this);
rbackman@38133 396 while (iter.next()) {
rbackman@38133 397 iter.reloc()->clear_inline_cache();
rbackman@38133 398 }
rbackman@38133 399 }
rbackman@38133 400
rbackman@38133 401 // Clear ICStubs of all compiled ICs
rbackman@38133 402 void CompiledMethod::clear_ic_stubs() {
eosterlund@53313 403 assert(CompiledICLocker::is_safe(this), "mt unsafe call");
mbaesken@51656 404 ResourceMark rm;
rbackman@38133 405 RelocIterator iter(this);
rbackman@38133 406 while(iter.next()) {
rbackman@38133 407 if (iter.type() == relocInfo::virtual_call_type) {
rbackman@38133 408 CompiledIC* ic = CompiledIC_at(&iter);
rbackman@38133 409 ic->clear_ic_stub();
rbackman@38133 410 }
rbackman@38133 411 }
rbackman@38133 412 }
rbackman@38133 413
rbackman@38133 414 #ifdef ASSERT
coleenp@50316 415 // Check class_loader is alive for this bit of metadata.
coleenp@50316 416 static void check_class(Metadata* md) {
coleenp@50316 417 Klass* klass = NULL;
coleenp@50316 418 if (md->is_klass()) {
coleenp@50316 419 klass = ((Klass*)md);
coleenp@50316 420 } else if (md->is_method()) {
coleenp@50316 421 klass = ((Method*)md)->method_holder();
coleenp@50316 422 } else if (md->is_methodData()) {
coleenp@50316 423 klass = ((MethodData*)md)->method()->method_holder();
coleenp@50316 424 } else {
coleenp@50316 425 md->print();
coleenp@50316 426 ShouldNotReachHere();
coleenp@50316 427 }
coleenp@50316 428 assert(klass->is_loader_alive(), "must be alive");
coleenp@50316 429 }
rbackman@38133 430 #endif // ASSERT
rbackman@38133 431
kvn@42650 432
coleenp@50316 433 void CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) {
rbackman@38133 434 if (ic->is_icholder_call()) {
coleenp@50494 435 // The only exception is compiledICHolder metdata which may
rbackman@38133 436 // yet be marked below. (We check this further below).
coleenp@50494 437 CompiledICHolder* cichk_metdata = ic->cached_icholder();
rbackman@38133 438
coleenp@50494 439 if (cichk_metdata->is_loader_alive()) {
rbackman@38133 440 return;
rbackman@38133 441 }
rbackman@38133 442 } else {
coleenp@50494 443 Metadata* ic_metdata = ic->cached_metadata();
coleenp@50494 444 if (ic_metdata != NULL) {
coleenp@50494 445 if (ic_metdata->is_klass()) {
coleenp@50494 446 if (((Klass*)ic_metdata)->is_loader_alive()) {
rbackman@38133 447 return;
rbackman@38133 448 }
coleenp@50494 449 } else if (ic_metdata->is_method()) {
coleenp@50494 450 Method* method = (Method*)ic_metdata;
coleenp@50494 451 assert(!method->is_old(), "old method should have been cleaned");
coleenp@50494 452 if (method->method_holder()->is_loader_alive()) {
rbackman@38133 453 return;
rbackman@38133 454 }
rbackman@38133 455 } else {
rbackman@38133 456 ShouldNotReachHere();
rbackman@38133 457 }
rbackman@38133 458 }
rbackman@38133 459 }
rbackman@38133 460
rbackman@38133 461 ic->set_to_clean();
rbackman@38133 462 }
rbackman@38133 463
coleenp@51020 464 // static_stub_Relocations may have dangling references to
coleenp@51020 465 // nmethods so trim them out here. Otherwise it looks like
coleenp@51020 466 // compiled code is maintaining a link to dead metadata.
coleenp@51020 467 void CompiledMethod::clean_ic_stubs() {
rbackman@38133 468 #ifdef ASSERT
coleenp@51020 469 address low_boundary = oops_reloc_begin();
coleenp@51020 470 RelocIterator iter(this, low_boundary);
coleenp@51020 471 while (iter.next()) {
rbackman@38133 472 address static_call_addr = NULL;
rbackman@38133 473 if (iter.type() == relocInfo::opt_virtual_call_type) {
rbackman@38133 474 CompiledIC* cic = CompiledIC_at(&iter);
rbackman@38133 475 if (!cic->is_call_to_interpreted()) {
rbackman@38133 476 static_call_addr = iter.addr();
rbackman@38133 477 }
rbackman@38133 478 } else if (iter.type() == relocInfo::static_call_type) {
rbackman@38133 479 CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
rbackman@38133 480 if (!csc->is_call_to_interpreted()) {
rbackman@38133 481 static_call_addr = iter.addr();
rbackman@38133 482 }
rbackman@38133 483 }
rbackman@38133 484 if (static_call_addr != NULL) {
rbackman@38133 485 RelocIterator sciter(this, low_boundary);
rbackman@38133 486 while (sciter.next()) {
rbackman@38133 487 if (sciter.type() == relocInfo::static_stub_type &&
rbackman@38133 488 sciter.static_stub_reloc()->static_call() == static_call_addr) {
rbackman@38133 489 sciter.static_stub_reloc()->clear_inline_cache();
rbackman@38133 490 }
rbackman@38133 491 }
rbackman@38133 492 }
rbackman@38133 493 }
rbackman@38133 494 #endif
rbackman@38133 495 }
rbackman@38133 496
coleenp@51020 497 // Clean references to unloaded nmethods at addr from this one, which is not unloaded.
rbackman@38133 498 template <class CompiledICorStaticCall>
eosterlund@53314 499 static void clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from,
eosterlund@53314 500 bool clean_all) {
rbackman@38133 501 // Ok, to lookup references to zombies here
rbackman@38133 502 CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
rbackman@38133 503 CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
rbackman@38133 504 if (nm != NULL) {
rbackman@38133 505 // Clean inline caches pointing to both zombie and not_entrant methods
eosterlund@53314 506 if (clean_all || !nm->is_in_use() || nm->is_unloading() || (nm->method()->code() != nm)) {
coleenp@51020 507 ic->set_to_clean(from->is_alive());
rbackman@38133 508 assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
rbackman@38133 509 }
rbackman@38133 510 }
rbackman@38133 511 }
rbackman@38133 512
eosterlund@53314 513 static void clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from,
eosterlund@53314 514 bool clean_all) {
eosterlund@53314 515 clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, clean_all);
rbackman@38133 516 }
rbackman@38133 517
eosterlund@53314 518 static void clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from,
eosterlund@53314 519 bool clean_all) {
eosterlund@53314 520 clean_if_nmethod_is_unloaded(csc, csc->destination(), from, clean_all);
coleenp@51020 521 }
coleenp@51020 522
coleenp@51020 523 // Cleans caches in nmethods that point to either classes that are unloaded
coleenp@51020 524 // or nmethods that are unloaded.
coleenp@51020 525 //
coleenp@51020 526 // Can be called either in parallel by G1 currently or after all
coleenp@51020 527 // nmethods are unloaded. Return postponed=true in the parallel case for
coleenp@51020 528 // inline caches found that point to nmethods that are not yet visited during
coleenp@51020 529 // the do_unloading walk.
eosterlund@53314 530 void CompiledMethod::unload_nmethod_caches(bool unloading_occurred) {
eosterlund@53314 531 ResourceMark rm;
coleenp@51020 532
coleenp@51020 533 // Exception cache only needs to be called if unloading occurred
coleenp@51020 534 if (unloading_occurred) {
coleenp@51020 535 clean_exception_cache();
coleenp@51020 536 }
coleenp@51020 537
eosterlund@53314 538 cleanup_inline_caches_impl(unloading_occurred, false);
coleenp@51020 539
coleenp@51020 540 // All static stubs need to be cleaned.
coleenp@51020 541 clean_ic_stubs();
coleenp@51020 542
coleenp@51020 543 // Check that the metadata embedded in the nmethod is alive
coleenp@51020 544 DEBUG_ONLY(metadata_do(check_class));
eosterlund@53314 545 }
coleenp@51020 546
coleenp@51020 547 // Called to clean up after class unloading for live nmethods and from the sweeper
coleenp@51020 548 // for all methods.
eosterlund@53314 549 void CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) {
eosterlund@53313 550 assert(CompiledICLocker::is_safe(this), "mt unsafe call");
mbaesken@51656 551 ResourceMark rm;
rbackman@38133 552
coleenp@51020 553 // Find all calls in an nmethod and clear the ones that point to non-entrant,
coleenp@51020 554 // zombie and unloaded nmethods.
coleenp@51020 555 RelocIterator iter(this, oops_reloc_begin());
rbackman@38133 556 while(iter.next()) {
rbackman@38133 557
rbackman@38133 558 switch (iter.type()) {
rbackman@38133 559
rbackman@38133 560 case relocInfo::virtual_call_type:
rbackman@38133 561 if (unloading_occurred) {
coleenp@51020 562 // If class unloading occurred we first clear ICs where the cached metadata
coleenp@51020 563 // is referring to an unloaded klass or method.
coleenp@50316 564 clean_ic_if_metadata_is_dead(CompiledIC_at(&iter));
rbackman@38133 565 }
rbackman@38133 566
eosterlund@53314 567 clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all);
rbackman@38133 568 break;
rbackman@38133 569
rbackman@38133 570 case relocInfo::opt_virtual_call_type:
eosterlund@53314 571 clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all);
rbackman@38133 572 break;
rbackman@38133 573
rbackman@38133 574 case relocInfo::static_call_type:
eosterlund@53314 575 clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, clean_all);
rbackman@38133 576 break;
rbackman@38133 577
rbackman@38133 578 case relocInfo::oop_type:
rbackman@38133 579 break;
rbackman@38133 580
rbackman@38133 581 case relocInfo::metadata_type:
rbackman@38133 582 break; // nothing to do.
jwilhelm@46630 583
jwilhelm@46630 584 default:
jwilhelm@46630 585 break;
rbackman@38133 586 }
rbackman@38133 587 }
rbackman@38133 588 }
lucy@52334 589
lucy@52334 590 // Iterating over all nmethods, e.g. with the help of CodeCache::nmethods_do(fun) was found
lucy@52334 591 // to not be inherently safe. There is a chance that fields are seen which are not properly
lucy@52334 592 // initialized. This happens despite the fact that nmethods_do() asserts the CodeCache_lock
lucy@52334 593 // to be held.
lucy@52334 594 // To bundle knowledge about necessary checks in one place, this function was introduced.
lucy@52334 595 // It is not claimed that these checks are sufficient, but they were found to be necessary.
lucy@52334 596 bool CompiledMethod::nmethod_access_is_safe(nmethod* nm) {
lucy@52334 597 Method* method = (nm == NULL) ? NULL : nm->method(); // nm->method() may be uninitialized, i.e. != NULL, but invalid
lucy@52334 598 return (nm != NULL) && (method != NULL) && (method->signature() != NULL) &&
lucy@52334 599 !nm->is_zombie() && !nm->is_not_installed() &&
lucy@52334 600 os::is_readable_pointer(method) &&
lucy@52334 601 os::is_readable_pointer(method->constants()) &&
lucy@52334 602 os::is_readable_pointer(method->signature());
lucy@52334 603 }