annotate src/share/vm/runtime/sweeper.cpp @ 4802:f2110083203d

8005849: JEP 167: Event-Based JVM Tracing Reviewed-by: acorn, coleenp, sla Contributed-by: Karen Kinnear <karen.kinnear@oracle.com>, Bengt Rutisson <bengt.rutisson@oracle.com>, Calvin Cheung <calvin.cheung@oracle.com>, Erik Gahlin <erik.gahlin@oracle.com>, Erik Helin <erik.helin@oracle.com>, Jesper Wilhelmsson <jesper.wilhelmsson@oracle.com>, Keith McGuigan <keith.mcguigan@oracle.com>, Mattias Tobiasson <mattias.tobiasson@oracle.com>, Markus Gronlund <markus.gronlund@oracle.com>, Mikael Auno <mikael.auno@oracle.com>, Nils Eliasson <nils.eliasson@oracle.com>, Nils Loodin <nils.loodin@oracle.com>, Rickard Backman <rickard.backman@oracle.com>, Staffan Larsen <staffan.larsen@oracle.com>, Stefan Karlsson <stefan.karlsson@oracle.com>, Yekaterina Kantserova <yekaterina.kantserova@oracle.com>
author sla
date Mon, 10 Jun 2013 11:30:51 +0200
parents 0cfa93c2fcc4
children ab274453d37f
rev   line source
duke@0 1 /*
sla@4802 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
duke@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@0 4 *
duke@0 5 * This code is free software; you can redistribute it and/or modify it
duke@0 6 * under the terms of the GNU General Public License version 2 only, as
duke@0 7 * published by the Free Software Foundation.
duke@0 8 *
duke@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@0 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@0 13 * accompanied this code).
duke@0 14 *
duke@0 15 * You should have received a copy of the GNU General Public License version
duke@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@0 18 *
trims@1472 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1472 20 * or visit www.oracle.com if you need additional information or have any
trims@1472 21 * questions.
duke@0 22 *
duke@0 23 */
duke@0 24
stefank@1879 25 #include "precompiled.hpp"
stefank@1879 26 #include "code/codeCache.hpp"
coleenp@3602 27 #include "code/compiledIC.hpp"
coleenp@3602 28 #include "code/icBuffer.hpp"
stefank@1879 29 #include "code/nmethod.hpp"
stefank@1879 30 #include "compiler/compileBroker.hpp"
stefank@1879 31 #include "memory/resourceArea.hpp"
coleenp@3602 32 #include "oops/method.hpp"
stefank@1879 33 #include "runtime/atomic.hpp"
stefank@1879 34 #include "runtime/compilationPolicy.hpp"
stefank@1879 35 #include "runtime/mutexLocker.hpp"
stefank@1879 36 #include "runtime/os.hpp"
stefank@1879 37 #include "runtime/sweeper.hpp"
stefank@1879 38 #include "runtime/vm_operations.hpp"
sla@4802 39 #include "trace/tracing.hpp"
stefank@1879 40 #include "utilities/events.hpp"
stefank@1879 41 #include "utilities/xmlstream.hpp"
duke@0 42
never@2481 43 #ifdef ASSERT
never@2481 44
never@2481 45 #define SWEEP(nm) record_sweep(nm, __LINE__)
never@2481 46 // Sweeper logging code
never@2481 47 class SweeperRecord {
never@2481 48 public:
never@2481 49 int traversal;
never@2481 50 int invocation;
never@2481 51 int compile_id;
never@2481 52 long traversal_mark;
never@2481 53 int state;
never@2481 54 const char* kind;
never@2481 55 address vep;
never@2481 56 address uep;
never@2481 57 int line;
never@2481 58
never@2481 59 void print() {
never@2481 60 tty->print_cr("traversal = %d invocation = %d compile_id = %d %s uep = " PTR_FORMAT " vep = "
never@2481 61 PTR_FORMAT " state = %d traversal_mark %d line = %d",
never@2481 62 traversal,
never@2481 63 invocation,
never@2481 64 compile_id,
never@2481 65 kind == NULL ? "" : kind,
never@2481 66 uep,
never@2481 67 vep,
never@2481 68 state,
never@2481 69 traversal_mark,
never@2481 70 line);
never@2481 71 }
never@2481 72 };
never@2481 73
never@2481 74 static int _sweep_index = 0;
never@2481 75 static SweeperRecord* _records = NULL;
never@2481 76
never@2481 77 void NMethodSweeper::report_events(int id, address entry) {
never@2481 78 if (_records != NULL) {
never@2481 79 for (int i = _sweep_index; i < SweeperLogEntries; i++) {
never@2481 80 if (_records[i].uep == entry ||
never@2481 81 _records[i].vep == entry ||
never@2481 82 _records[i].compile_id == id) {
never@2481 83 _records[i].print();
never@2481 84 }
never@2481 85 }
never@2481 86 for (int i = 0; i < _sweep_index; i++) {
never@2481 87 if (_records[i].uep == entry ||
never@2481 88 _records[i].vep == entry ||
never@2481 89 _records[i].compile_id == id) {
never@2481 90 _records[i].print();
never@2481 91 }
never@2481 92 }
never@2481 93 }
never@2481 94 }
never@2481 95
never@2481 96 void NMethodSweeper::report_events() {
never@2481 97 if (_records != NULL) {
never@2481 98 for (int i = _sweep_index; i < SweeperLogEntries; i++) {
never@2481 99 // skip empty records
never@2481 100 if (_records[i].vep == NULL) continue;
never@2481 101 _records[i].print();
never@2481 102 }
never@2481 103 for (int i = 0; i < _sweep_index; i++) {
never@2481 104 // skip empty records
never@2481 105 if (_records[i].vep == NULL) continue;
never@2481 106 _records[i].print();
never@2481 107 }
never@2481 108 }
never@2481 109 }
never@2481 110
never@2481 111 void NMethodSweeper::record_sweep(nmethod* nm, int line) {
never@2481 112 if (_records != NULL) {
never@2481 113 _records[_sweep_index].traversal = _traversals;
never@2481 114 _records[_sweep_index].traversal_mark = nm->_stack_traversal_mark;
never@2481 115 _records[_sweep_index].invocation = _invocations;
never@2481 116 _records[_sweep_index].compile_id = nm->compile_id();
never@2481 117 _records[_sweep_index].kind = nm->compile_kind();
never@2481 118 _records[_sweep_index].state = nm->_state;
never@2481 119 _records[_sweep_index].vep = nm->verified_entry_point();
never@2481 120 _records[_sweep_index].uep = nm->entry_point();
never@2481 121 _records[_sweep_index].line = line;
never@2481 122
never@2481 123 _sweep_index = (_sweep_index + 1) % SweeperLogEntries;
never@2481 124 }
never@2481 125 }
never@2481 126 #else
never@2481 127 #define SWEEP(nm)
never@2481 128 #endif
never@2481 129
never@2481 130
duke@0 131 long NMethodSweeper::_traversals = 0; // No. of stack traversals performed
never@1535 132 nmethod* NMethodSweeper::_current = NULL; // Current nmethod
never@1564 133 int NMethodSweeper::_seen = 0 ; // No. of nmethods we have currently processed in current pass of CodeCache
sla@4802 134 int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep
sla@4802 135 int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
sla@4802 136 int NMethodSweeper::_marked_count = 0; // Nof. nmethods marked for reclaim in current sweep
never@1564 137
never@1564 138 volatile int NMethodSweeper::_invocations = 0; // No. of invocations left until we are completed with this pass
never@1564 139 volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
duke@0 140
duke@0 141 jint NMethodSweeper::_locked_seen = 0;
duke@0 142 jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
neliasso@4603 143 bool NMethodSweeper::_resweep = false;
neliasso@4603 144 jint NMethodSweeper::_flush_token = 0;
neliasso@4603 145 jlong NMethodSweeper::_last_full_flush_time = 0;
neliasso@4603 146 int NMethodSweeper::_highest_marked = 0;
neliasso@4603 147 int NMethodSweeper::_dead_compile_ids = 0;
neliasso@4603 148 long NMethodSweeper::_last_flush_traversal_id = 0;
duke@0 149
sla@4802 150 int NMethodSweeper::_number_of_flushes = 0; // Total of full traversals caused by full cache
sla@4802 151 int NMethodSweeper::_total_nof_methods_reclaimed = 0;
sla@4802 152 jlong NMethodSweeper::_total_time_sweeping = 0;
sla@4802 153 jlong NMethodSweeper::_total_time_this_sweep = 0;
sla@4802 154 jlong NMethodSweeper::_peak_sweep_time = 0;
sla@4802 155 jlong NMethodSweeper::_peak_sweep_fraction_time = 0;
sla@4802 156 jlong NMethodSweeper::_total_disconnect_time = 0;
sla@4802 157 jlong NMethodSweeper::_peak_disconnect_time = 0;
sla@4802 158
jrose@989 159 class MarkActivationClosure: public CodeBlobClosure {
jrose@989 160 public:
jrose@989 161 virtual void do_code_blob(CodeBlob* cb) {
jrose@989 162 // If we see an activation belonging to a non_entrant nmethod, we mark it.
jrose@989 163 if (cb->is_nmethod() && ((nmethod*)cb)->is_not_entrant()) {
jrose@989 164 ((nmethod*)cb)->mark_as_seen_on_stack();
jrose@989 165 }
jrose@989 166 }
jrose@989 167 };
jrose@989 168 static MarkActivationClosure mark_activation_closure;
jrose@989 169
neliasso@4603 170 bool NMethodSweeper::sweep_in_progress() {
neliasso@4603 171 return (_current != NULL);
neliasso@4603 172 }
neliasso@4603 173
never@1458 174 void NMethodSweeper::scan_stacks() {
duke@0 175 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
duke@0 176 if (!MethodFlushing) return;
duke@0 177
duke@0 178 // No need to synchronize access, since this is always executed at a
neliasso@4603 179 // safepoint.
duke@0 180
duke@0 181 // Make sure CompiledIC_lock in unlocked, since we might update some
duke@0 182 // inline caches. If it is, we just bail-out and try later.
duke@0 183 if (CompiledIC_lock->is_locked() || Patching_lock->is_locked()) return;
duke@0 184
duke@0 185 // Check for restart
duke@0 186 assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
neliasso@4603 187 if (!sweep_in_progress() && _resweep) {
duke@0 188 _seen = 0;
duke@0 189 _invocations = NmethodSweepFraction;
never@1458 190 _current = CodeCache::first_nmethod();
duke@0 191 _traversals += 1;
sla@4802 192 _total_time_this_sweep = 0;
sla@4802 193
duke@0 194 if (PrintMethodFlushing) {
duke@0 195 tty->print_cr("### Sweep: stack traversal %d", _traversals);
duke@0 196 }
jrose@989 197 Threads::nmethods_do(&mark_activation_closure);
duke@0 198
duke@0 199 // reset the flags since we started a scan from the beginning.
neliasso@4603 200 _resweep = false;
duke@0 201 _locked_seen = 0;
duke@0 202 _not_entrant_seen_on_stack = 0;
duke@0 203 }
duke@0 204
kvn@1202 205 if (UseCodeCacheFlushing) {
neliasso@4603 206 // only allow new flushes after the interval is complete.
neliasso@4603 207 jlong now = os::javaTimeMillis();
neliasso@4603 208 jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
neliasso@4603 209 jlong curr_interval = now - _last_full_flush_time;
neliasso@4603 210 if (curr_interval > max_interval) {
neliasso@4603 211 _flush_token = 0;
kvn@1202 212 }
kvn@1202 213
neliasso@4603 214 if (!CodeCache::needs_flushing() && !CompileBroker::should_compile_new_jobs()) {
neliasso@4603 215 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
neliasso@4603 216 log_sweep("restart_compiler");
kvn@1202 217 }
kvn@1202 218 }
duke@0 219 }
duke@0 220
never@1458 221 void NMethodSweeper::possibly_sweep() {
never@1564 222 assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
neliasso@4603 223 if (!MethodFlushing || !sweep_in_progress()) return;
never@1458 224
never@1458 225 if (_invocations > 0) {
never@1458 226 // Only one thread at a time will sweep
never@1458 227 jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
never@1458 228 if (old != 0) {
never@1458 229 return;
never@1458 230 }
never@2481 231 #ifdef ASSERT
never@2481 232 if (LogSweeper && _records == NULL) {
never@2481 233 // Create the ring buffer for the logging code
zgu@3465 234 _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC);
never@2481 235 memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
never@2481 236 }
never@2481 237 #endif
never@1564 238 if (_invocations > 0) {
never@1564 239 sweep_code_cache();
never@1564 240 _invocations--;
never@1564 241 }
never@1564 242 _sweep_started = 0;
never@1458 243 }
never@1458 244 }
never@1458 245
never@1458 246 void NMethodSweeper::sweep_code_cache() {
sla@4802 247
sla@4802 248 jlong sweep_start_counter = os::elapsed_counter();
sla@4802 249
sla@4802 250 _flushed_count = 0;
sla@4802 251 _zombified_count = 0;
sla@4802 252 _marked_count = 0;
sla@4802 253
never@1458 254 if (PrintMethodFlushing && Verbose) {
never@1564 255 tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations);
never@1458 256 }
never@1458 257
neliasso@4603 258 if (!CompileBroker::should_compile_new_jobs()) {
neliasso@4603 259 // If we have turned off compilations we might as well do full sweeps
neliasso@4603 260 // in order to reach the clean state faster. Otherwise the sleeping compiler
neliasso@4603 261 // threads will slow down sweeping. After a few iterations the cache
neliasso@4603 262 // will be clean and sweeping stops (_resweep will not be set)
neliasso@4603 263 _invocations = 1;
neliasso@4603 264 }
neliasso@4603 265
never@1564 266 // We want to visit all nmethods after NmethodSweepFraction
never@1564 267 // invocations so divide the remaining number of nmethods by the
never@1564 268 // remaining number of invocations. This is only an estimate since
never@1564 269 // the number of nmethods changes during the sweep so the final
never@1564 270 // stage must iterate until it there are no more nmethods.
never@1564 271 int todo = (CodeCache::nof_nmethods() - _seen) / _invocations;
never@1458 272
never@1458 273 assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
never@1458 274 assert(!CodeCache_lock->owned_by_self(), "just checking");
never@1458 275
never@1458 276 {
never@1458 277 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
never@1458 278
never@1564 279 // The last invocation iterates until there are no more nmethods
never@1564 280 for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) {
iveresov@3137 281 if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
iveresov@3137 282 if (PrintMethodFlushing && Verbose) {
iveresov@3137 283 tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _invocations);
iveresov@3137 284 }
iveresov@3137 285 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
never@1458 286
iveresov@3137 287 assert(Thread::current()->is_Java_thread(), "should be java thread");
iveresov@3137 288 JavaThread* thread = (JavaThread*)Thread::current();
iveresov@3137 289 ThreadBlockInVM tbivm(thread);
iveresov@3137 290 thread->java_suspend_self();
iveresov@3137 291 }
never@1564 292 // Since we will give up the CodeCache_lock, always skip ahead
never@1564 293 // to the next nmethod. Other blobs can be deleted by other
never@1564 294 // threads but nmethods are only reclaimed by the sweeper.
never@1535 295 nmethod* next = CodeCache::next_nmethod(_current);
never@1458 296
never@1458 297 // Now ready to process nmethod and give up CodeCache_lock
never@1458 298 {
never@1458 299 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
never@1535 300 process_nmethod(_current);
never@1458 301 }
never@1458 302 _seen++;
never@1458 303 _current = next;
never@1458 304 }
never@1458 305 }
never@1458 306
never@1564 307 assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
never@1564 308
neliasso@4603 309 if (!sweep_in_progress() && !_resweep && (_locked_seen || _not_entrant_seen_on_stack)) {
never@1458 310 // we've completed a scan without making progress but there were
never@1458 311 // nmethods we were unable to process either because they were
never@1458 312 // locked or were still on stack. We don't have to aggresively
never@1458 313 // clean them up so just stop scanning. We could scan once more
never@1458 314 // but that complicates the control logic and it's unlikely to
never@1458 315 // matter much.
never@1458 316 if (PrintMethodFlushing) {
never@1458 317 tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
never@1458 318 }
never@1458 319 }
never@1458 320
sla@4802 321 jlong sweep_end_counter = os::elapsed_counter();
sla@4802 322 jlong sweep_time = sweep_end_counter - sweep_start_counter;
sla@4802 323 _total_time_sweeping += sweep_time;
sla@4802 324 _total_time_this_sweep += sweep_time;
sla@4802 325 _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time);
sla@4802 326 _total_nof_methods_reclaimed += _flushed_count;
sla@4802 327
sla@4802 328 EventSweepCodeCache event(UNTIMED);
sla@4802 329 if (event.should_commit()) {
sla@4802 330 event.set_starttime(sweep_start_counter);
sla@4802 331 event.set_endtime(sweep_end_counter);
sla@4802 332 event.set_sweepIndex(_traversals);
sla@4802 333 event.set_sweepFractionIndex(NmethodSweepFraction - _invocations + 1);
sla@4802 334 event.set_sweptCount(todo);
sla@4802 335 event.set_flushedCount(_flushed_count);
sla@4802 336 event.set_markedCount(_marked_count);
sla@4802 337 event.set_zombifiedCount(_zombified_count);
sla@4802 338 event.commit();
sla@4802 339 }
sla@4802 340
never@1458 341 #ifdef ASSERT
never@1458 342 if(PrintMethodFlushing) {
sla@4802 343 tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _invocations, (jlong)sweep_time);
never@1458 344 }
never@1458 345 #endif
never@1564 346
never@1564 347 if (_invocations == 1) {
sla@4802 348 _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep);
never@1564 349 log_sweep("finished");
never@1564 350 }
neliasso@4603 351
neliasso@4603 352 // Sweeper is the only case where memory is released,
neliasso@4603 353 // check here if it is time to restart the compiler.
neliasso@4603 354 if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs() && !CodeCache::needs_flushing()) {
neliasso@4603 355 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
neliasso@4603 356 log_sweep("restart_compiler");
neliasso@4603 357 }
never@1458 358 }
never@1458 359
never@2481 360 class NMethodMarker: public StackObj {
never@2481 361 private:
never@2481 362 CompilerThread* _thread;
never@2481 363 public:
never@2481 364 NMethodMarker(nmethod* nm) {
never@2481 365 _thread = CompilerThread::current();
coleenp@3602 366 if (!nm->is_zombie() && !nm->is_unloaded()) {
coleenp@3602 367 // Only expose live nmethods for scanning
never@2481 368 _thread->set_scanned_nmethod(nm);
never@2481 369 }
coleenp@3602 370 }
never@2481 371 ~NMethodMarker() {
never@2481 372 _thread->set_scanned_nmethod(NULL);
never@2481 373 }
never@2481 374 };
never@2481 375
coleenp@3602 376 void NMethodSweeper::release_nmethod(nmethod *nm) {
coleenp@3602 377 // Clean up any CompiledICHolders
coleenp@3602 378 {
coleenp@3602 379 ResourceMark rm;
coleenp@3602 380 MutexLocker ml_patch(CompiledIC_lock);
coleenp@3602 381 RelocIterator iter(nm);
coleenp@3602 382 while (iter.next()) {
coleenp@3602 383 if (iter.type() == relocInfo::virtual_call_type) {
coleenp@3602 384 CompiledIC::cleanup_call_site(iter.virtual_call_reloc());
coleenp@3602 385 }
coleenp@3602 386 }
coleenp@3602 387 }
coleenp@3602 388
coleenp@3602 389 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
coleenp@3602 390 nm->flush();
coleenp@3602 391 }
duke@0 392
duke@0 393 void NMethodSweeper::process_nmethod(nmethod *nm) {
never@1458 394 assert(!CodeCache_lock->owned_by_self(), "just checking");
never@1458 395
never@2481 396 // Make sure this nmethod doesn't get unloaded during the scan,
never@2481 397 // since the locks acquired below might safepoint.
never@2481 398 NMethodMarker nmm(nm);
never@2481 399
never@2481 400 SWEEP(nm);
never@2481 401
duke@0 402 // Skip methods that are currently referenced by the VM
duke@0 403 if (nm->is_locked_by_vm()) {
duke@0 404 // But still remember to clean-up inline caches for alive nmethods
duke@0 405 if (nm->is_alive()) {
duke@0 406 // Clean-up all inline caches that points to zombie/non-reentrant methods
never@1458 407 MutexLocker cl(CompiledIC_lock);
duke@0 408 nm->cleanup_inline_caches();
never@2481 409 SWEEP(nm);
duke@0 410 } else {
duke@0 411 _locked_seen++;
never@2481 412 SWEEP(nm);
duke@0 413 }
duke@0 414 return;
duke@0 415 }
duke@0 416
duke@0 417 if (nm->is_zombie()) {
duke@0 418 // If it is first time, we see nmethod then we mark it. Otherwise,
duke@0 419 // we reclame it. When we have seen a zombie method twice, we know that
never@1564 420 // there are no inline caches that refer to it.
duke@0 421 if (nm->is_marked_for_reclamation()) {
duke@0 422 assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
ysr@941 423 if (PrintMethodFlushing && Verbose) {
kvn@1202 424 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
ysr@941 425 }
coleenp@3602 426 release_nmethod(nm);
sla@4802 427 _flushed_count++;
duke@0 428 } else {
ysr@941 429 if (PrintMethodFlushing && Verbose) {
kvn@1202 430 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
ysr@941 431 }
duke@0 432 nm->mark_for_reclamation();
neliasso@4603 433 _resweep = true;
sla@4802 434 _marked_count++;
never@2481 435 SWEEP(nm);
duke@0 436 }
duke@0 437 } else if (nm->is_not_entrant()) {
duke@0 438 // If there is no current activations of this method on the
duke@0 439 // stack we can safely convert it to a zombie method
duke@0 440 if (nm->can_not_entrant_be_converted()) {
ysr@941 441 if (PrintMethodFlushing && Verbose) {
kvn@1202 442 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
ysr@941 443 }
duke@0 444 nm->make_zombie();
neliasso@4603 445 _resweep = true;
sla@4802 446 _zombified_count++;
never@2481 447 SWEEP(nm);
duke@0 448 } else {
duke@0 449 // Still alive, clean up its inline caches
never@1458 450 MutexLocker cl(CompiledIC_lock);
duke@0 451 nm->cleanup_inline_caches();
duke@0 452 // we coudn't transition this nmethod so don't immediately
duke@0 453 // request a rescan. If this method stays on the stack for a
never@1458 454 // long time we don't want to keep rescanning the code cache.
duke@0 455 _not_entrant_seen_on_stack++;
never@2481 456 SWEEP(nm);
duke@0 457 }
duke@0 458 } else if (nm->is_unloaded()) {
duke@0 459 // Unloaded code, just make it a zombie
ysr@941 460 if (PrintMethodFlushing && Verbose)
kvn@1202 461 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
sla@4802 462
ysr@941 463 if (nm->is_osr_method()) {
coleenp@3602 464 SWEEP(nm);
duke@0 465 // No inline caches will ever point to osr methods, so we can just remove it
coleenp@3602 466 release_nmethod(nm);
sla@4802 467 _flushed_count++;
duke@0 468 } else {
duke@0 469 nm->make_zombie();
neliasso@4603 470 _resweep = true;
sla@4802 471 _zombified_count++;
never@2481 472 SWEEP(nm);
duke@0 473 }
duke@0 474 } else {
duke@0 475 assert(nm->is_alive(), "should be alive");
kvn@1202 476
kvn@1202 477 if (UseCodeCacheFlushing) {
neliasso@4603 478 if (nm->is_speculatively_disconnected() && !nm->is_locked_by_vm() && !nm->is_osr_method() &&
neliasso@4603 479 (_traversals > _last_flush_traversal_id + 2) && (nm->compile_id() < _highest_marked)) {
kvn@1202 480 // This method has not been called since the forced cleanup happened
kvn@1202 481 nm->make_not_entrant();
kvn@1202 482 }
kvn@1202 483 }
kvn@1202 484
duke@0 485 // Clean-up all inline caches that points to zombie/non-reentrant methods
never@1458 486 MutexLocker cl(CompiledIC_lock);
duke@0 487 nm->cleanup_inline_caches();
never@2481 488 SWEEP(nm);
duke@0 489 }
duke@0 490 }
kvn@1202 491
kvn@1202 492 // Code cache unloading: when compilers notice the code cache is getting full,
kvn@1202 493 // they will call a vm op that comes here. This code attempts to speculatively
kvn@1202 494 // unload the oldest half of the nmethods (based on the compile job id) by
kvn@1202 495 // saving the old code in a list in the CodeCache. Then
never@1458 496 // execution resumes. If a method so marked is not called by the second sweeper
never@1458 497 // stack traversal after the current one, the nmethod will be marked non-entrant and
coleenp@3602 498 // got rid of by normal sweeping. If the method is called, the Method*'s
coleenp@3602 499 // _code field is restored and the Method*/nmethod
kvn@1202 500 // go back to their normal state.
kvn@1202 501 void NMethodSweeper::handle_full_code_cache(bool is_full) {
neliasso@4603 502
neliasso@4603 503 if (is_full) {
neliasso@4603 504 // Since code cache is full, immediately stop new compiles
neliasso@4603 505 if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
neliasso@4603 506 log_sweep("disable_compiler");
kvn@1202 507 }
kvn@1202 508 }
kvn@1202 509
neliasso@4603 510 // Make sure only one thread can flush
neliasso@4603 511 // The token is reset after CodeCacheMinimumFlushInterval in scan stacks,
neliasso@4603 512 // no need to check the timeout here.
neliasso@4603 513 jint old = Atomic::cmpxchg( 1, &_flush_token, 0 );
neliasso@4603 514 if (old != 0) {
neliasso@4603 515 return;
kvn@1202 516 }
kvn@1202 517
kvn@1202 518 VM_HandleFullCodeCache op(is_full);
kvn@1202 519 VMThread::execute(&op);
kvn@1202 520
neliasso@4603 521 // resweep again as soon as possible
neliasso@4603 522 _resweep = true;
kvn@1202 523 }
kvn@1202 524
kvn@1202 525 void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
kvn@1202 526 // If there was a race in detecting full code cache, only run
kvn@1202 527 // one vm op for it or keep the compiler shut off
kvn@1202 528
sla@4802 529 jlong disconnect_start_counter = os::elapsed_counter();
kvn@1202 530
neliasso@4603 531 // Traverse the code cache trying to dump the oldest nmethods
neliasso@4603 532 int curr_max_comp_id = CompileBroker::get_compilation_id();
neliasso@4603 533 int flush_target = ((curr_max_comp_id - _dead_compile_ids) / CodeCacheFlushingFraction) + _dead_compile_ids;
kvn@1202 534
never@1564 535 log_sweep("start_cleaning");
kvn@1202 536
kvn@1202 537 nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
kvn@1202 538 jint disconnected = 0;
kvn@1202 539 jint made_not_entrant = 0;
neliasso@4603 540 jint nmethod_count = 0;
neliasso@4603 541
kvn@1202 542 while ((nm != NULL)){
neliasso@4603 543 int curr_comp_id = nm->compile_id();
kvn@1202 544
kvn@1202 545 // OSR methods cannot be flushed like this. Also, don't flush native methods
kvn@1202 546 // since they are part of the JDK in most cases
neliasso@4603 547 if (!nm->is_osr_method() && !nm->is_locked_by_vm() && !nm->is_native_method()) {
kvn@1202 548
neliasso@4603 549 // only count methods that can be speculatively disconnected
neliasso@4603 550 nmethod_count++;
kvn@1202 551
neliasso@4603 552 if (nm->is_in_use() && (curr_comp_id < flush_target)) {
neliasso@4603 553 if ((nm->method()->code() == nm)) {
neliasso@4603 554 // This method has not been previously considered for
neliasso@4603 555 // unloading or it was restored already
neliasso@4603 556 CodeCache::speculatively_disconnect(nm);
neliasso@4603 557 disconnected++;
neliasso@4603 558 } else if (nm->is_speculatively_disconnected()) {
neliasso@4603 559 // This method was previously considered for preemptive unloading and was not called since then
neliasso@4603 560 CompilationPolicy::policy()->delay_compilation(nm->method());
neliasso@4603 561 nm->make_not_entrant();
neliasso@4603 562 made_not_entrant++;
neliasso@4603 563 }
neliasso@4603 564
neliasso@4603 565 if (curr_comp_id > _highest_marked) {
neliasso@4603 566 _highest_marked = curr_comp_id;
neliasso@4603 567 }
kvn@1202 568 }
kvn@1202 569 }
kvn@1202 570 nm = CodeCache::alive_nmethod(CodeCache::next(nm));
kvn@1202 571 }
kvn@1202 572
neliasso@4603 573 // remember how many compile_ids wheren't seen last flush.
neliasso@4603 574 _dead_compile_ids = curr_max_comp_id - nmethod_count;
neliasso@4603 575
never@1564 576 log_sweep("stop_cleaning",
never@1564 577 "disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'",
never@1564 578 disconnected, made_not_entrant);
kvn@1202 579
never@1458 580 // Shut off compiler. Sweeper will start over with a new stack scan and
never@1458 581 // traversal cycle and turn it back on if it clears enough space.
neliasso@4603 582 if (is_full) {
neliasso@4603 583 _last_full_flush_time = os::javaTimeMillis();
kvn@1202 584 }
kvn@1202 585
sla@4802 586 jlong disconnect_end_counter = os::elapsed_counter();
sla@4802 587 jlong disconnect_time = disconnect_end_counter - disconnect_start_counter;
sla@4802 588 _total_disconnect_time += disconnect_time;
sla@4802 589 _peak_disconnect_time = MAX2(disconnect_time, _peak_disconnect_time);
sla@4802 590
sla@4802 591 EventCleanCodeCache event(UNTIMED);
sla@4802 592 if (event.should_commit()) {
sla@4802 593 event.set_starttime(disconnect_start_counter);
sla@4802 594 event.set_endtime(disconnect_end_counter);
sla@4802 595 event.set_disconnectedCount(disconnected);
sla@4802 596 event.set_madeNonEntrantCount(made_not_entrant);
sla@4802 597 event.commit();
sla@4802 598 }
sla@4802 599 _number_of_flushes++;
sla@4802 600
kvn@1202 601 // After two more traversals the sweeper will get rid of unrestored nmethods
neliasso@4603 602 _last_flush_traversal_id = _traversals;
neliasso@4603 603 _resweep = true;
kvn@1202 604 #ifdef ASSERT
sla@4802 605
kvn@1202 606 if(PrintMethodFlushing && Verbose) {
sla@4802 607 tty->print_cr("### sweeper: unload time: " INT64_FORMAT, (jlong)disconnect_time);
kvn@1202 608 }
kvn@1202 609 #endif
kvn@1202 610 }
never@1564 611
never@1564 612
never@1564 613 // Print out some state information about the current sweep and the
never@1564 614 // state of the code cache if it's requested.
never@1564 615 void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) {
never@1564 616 if (PrintMethodFlushing) {
iveresov@2329 617 stringStream s;
iveresov@2329 618 // Dump code cache state into a buffer before locking the tty,
iveresov@2329 619 // because log_state() will use locks causing lock conflicts.
iveresov@2329 620 CodeCache::log_state(&s);
iveresov@2329 621
never@1564 622 ttyLocker ttyl;
never@1564 623 tty->print("### sweeper: %s ", msg);
never@1564 624 if (format != NULL) {
never@1564 625 va_list ap;
never@1564 626 va_start(ap, format);
never@1564 627 tty->vprint(format, ap);
never@1564 628 va_end(ap);
never@1564 629 }
iveresov@2329 630 tty->print_cr(s.as_string());
never@1564 631 }
never@1564 632
never@1564 633 if (LogCompilation && (xtty != NULL)) {
iveresov@2329 634 stringStream s;
iveresov@2329 635 // Dump code cache state into a buffer before locking the tty,
iveresov@2329 636 // because log_state() will use locks causing lock conflicts.
iveresov@2329 637 CodeCache::log_state(&s);
iveresov@2329 638
never@1564 639 ttyLocker ttyl;
never@1566 640 xtty->begin_elem("sweeper state='%s' traversals='" INTX_FORMAT "' ", msg, (intx)traversal_count());
never@1564 641 if (format != NULL) {
never@1564 642 va_list ap;
never@1564 643 va_start(ap, format);
never@1564 644 xtty->vprint(format, ap);
never@1564 645 va_end(ap);
never@1564 646 }
iveresov@2329 647 xtty->print(s.as_string());
never@1564 648 xtty->stamp();
never@1564 649 xtty->end_elem();
never@1564 650 }
never@1564 651 }