annotate src/hotspot/share/runtime/compilationPolicy.cpp @ 53643:7d3cde494494

8214206: Fix for JDK-8213419 is broken on 32-bit Reviewed-by: mdoerr, shade
author roland
date Thu, 22 Nov 2018 17:25:47 +0100
parents 0451e0a2f1f5
children 9e041366c764
rev   line source
duke@1 1 /*
zgu@49268 2 * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
duke@1 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@1 4 *
duke@1 5 * This code is free software; you can redistribute it and/or modify it
duke@1 6 * under the terms of the GNU General Public License version 2 only, as
duke@1 7 * published by the Free Software Foundation.
duke@1 8 *
duke@1 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@1 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@1 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@1 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@1 13 * accompanied this code).
duke@1 14 *
duke@1 15 * You should have received a copy of the GNU General Public License version
duke@1 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@1 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@1 18 *
trims@5547 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@5547 20 * or visit www.oracle.com if you need additional information or have any
trims@5547 21 * questions.
duke@1 22 *
duke@1 23 */
duke@1 24
stefank@7397 25 #include "precompiled.hpp"
coleenp@52788 26 #include "classfile/classLoaderDataGraph.inline.hpp"
stefank@7397 27 #include "code/compiledIC.hpp"
stefank@7397 28 #include "code/nmethod.hpp"
stefank@7397 29 #include "code/scopeDesc.hpp"
stefank@7397 30 #include "interpreter/interpreter.hpp"
jprovino@37248 31 #include "memory/resourceArea.hpp"
coleenp@13728 32 #include "oops/methodData.hpp"
hseigel@49805 33 #include "oops/method.inline.hpp"
stefank@7397 34 #include "oops/oop.inline.hpp"
stefank@7397 35 #include "prims/nativeLookup.hpp"
stefank@7397 36 #include "runtime/compilationPolicy.hpp"
stefank@7397 37 #include "runtime/frame.hpp"
stefank@7397 38 #include "runtime/handles.inline.hpp"
stefank@7397 39 #include "runtime/rframe.hpp"
stefank@7397 40 #include "runtime/stubRoutines.hpp"
stefank@7397 41 #include "runtime/thread.hpp"
redestad@52081 42 #include "runtime/tieredThresholdPolicy.hpp"
stefank@7397 43 #include "runtime/timer.hpp"
stefank@7397 44 #include "runtime/vframe.hpp"
stefank@7397 45 #include "runtime/vm_operations.hpp"
stefank@7397 46 #include "utilities/events.hpp"
stefank@7397 47 #include "utilities/globalDefinitions.hpp"
duke@1 48
thartmann@53254 49 #ifdef COMPILER1
thartmann@53254 50 #include "c1/c1_Compiler.hpp"
thartmann@53254 51 #endif
thartmann@53254 52 #ifdef COMPILER2
thartmann@53254 53 #include "opto/c2compiler.hpp"
thartmann@53254 54 #endif
thartmann@53254 55
duke@1 56 CompilationPolicy* CompilationPolicy::_policy;
duke@1 57 elapsedTimer CompilationPolicy::_accumulated_time;
duke@1 58 bool CompilationPolicy::_in_vm_startup;
duke@1 59
duke@1 60 // Determine compilation policy based on command line argument
duke@1 61 void compilationPolicy_init() {
duke@1 62 CompilationPolicy::set_in_vm_startup(DelayCompilationDuringStartup);
duke@1 63
duke@1 64 switch(CompilationPolicyChoice) {
duke@1 65 case 0:
duke@1 66 CompilationPolicy::set_policy(new SimpleCompPolicy());
duke@1 67 break;
duke@1 68
duke@1 69 case 1:
duke@1 70 #ifdef COMPILER2
duke@1 71 CompilationPolicy::set_policy(new StackWalkCompPolicy());
duke@1 72 #else
duke@1 73 Unimplemented();
duke@1 74 #endif
duke@1 75 break;
iveresov@6453 76 case 2:
iveresov@6453 77 #ifdef TIERED
redestad@52081 78 CompilationPolicy::set_policy(new TieredThresholdPolicy());
iveresov@6453 79 #else
iveresov@6453 80 Unimplemented();
iveresov@6453 81 #endif
iveresov@6453 82 break;
duke@1 83 default:
redestad@50604 84 fatal("CompilationPolicyChoice must be in the range: [0-2]");
duke@1 85 }
iveresov@6453 86 CompilationPolicy::policy()->initialize();
duke@1 87 }
duke@1 88
duke@1 89 void CompilationPolicy::completed_vm_startup() {
duke@1 90 if (TraceCompilationPolicy) {
duke@1 91 tty->print("CompilationPolicy: completed vm startup.\n");
duke@1 92 }
duke@1 93 _in_vm_startup = false;
duke@1 94 }
duke@1 95
duke@1 96 // Returns true if m must be compiled before executing it
duke@1 97 // This is intended to force compiles for methods (usually for
duke@1 98 // debugging) that would otherwise be interpreted for some reason.
coleenp@46727 99 bool CompilationPolicy::must_be_compiled(const methodHandle& m, int comp_level) {
minqi@14477 100 // Don't allow Xcomp to cause compiles in replay mode
minqi@14477 101 if (ReplayCompiles) return false;
minqi@14477 102
duke@1 103 if (m->has_compiled_code()) return false; // already compiled
iveresov@6453 104 if (!can_be_compiled(m, comp_level)) return false;
duke@1 105
duke@1 106 return !UseInterpreter || // must compile all methods
kvn@4750 107 (UseCompiler && AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods
duke@1 108 }
duke@1 109
coleenp@46727 110 void CompilationPolicy::compile_if_required(const methodHandle& selected_method, TRAPS) {
never@38139 111 if (must_be_compiled(selected_method)) {
never@38139 112 // This path is unusual, mostly used by the '-Xcomp' stress test mode.
never@38139 113
never@38139 114 // Note: with several active threads, the must_be_compiled may be true
never@38139 115 // while can_be_compiled is false; remove assert
never@38139 116 // assert(CompilationPolicy::can_be_compiled(selected_method), "cannot compile");
never@38139 117 if (!THREAD->can_call_java() || THREAD->is_Compiler_thread()) {
never@38139 118 // don't force compilation, resolve was on behalf of compiler
never@38139 119 return;
never@38139 120 }
never@38139 121 if (selected_method->method_holder()->is_not_initialized()) {
never@38139 122 // 'is_not_initialized' means not only '!is_initialized', but also that
never@38139 123 // initialization has not been started yet ('!being_initialized')
never@38139 124 // Do not force compilation of methods in uninitialized classes.
never@38139 125 // Note that doing this would throw an assert later,
never@38139 126 // in CompileBroker::compile_method.
never@38139 127 // We sometimes use the link resolver to do reflective lookups
never@38139 128 // even before classes are initialized.
never@38139 129 return;
never@38139 130 }
never@38139 131 CompileBroker::compile_method(selected_method, InvocationEntryBci,
never@38139 132 CompilationPolicy::policy()->initial_compile_level(),
neliasso@38218 133 methodHandle(), 0, CompileTask::Reason_MustBeCompiled, CHECK);
never@38139 134 }
never@38139 135 }
never@38139 136
duke@1 137 // Returns true if m is allowed to be compiled
coleenp@46727 138 bool CompilationPolicy::can_be_compiled(const methodHandle& m, int comp_level) {
iignatyev@17126 139 // allow any levels for WhiteBox
iignatyev@17126 140 assert(WhiteBoxAPI || comp_level == CompLevel_all || is_compile(comp_level), "illegal compilation level");
iignatyev@17126 141
duke@1 142 if (m->is_abstract()) return false;
duke@1 143 if (DontCompileHugeMethods && m->code_size() > HugeMethodLimit) return false;
duke@1 144
never@4645 145 // Math intrinsics should never be compiled as this can lead to
never@4645 146 // monotonicity problems because the interpreter will prefer the
never@4645 147 // compiled code to the intrinsic version. This can't happen in
never@4645 148 // production because the invocation counter can't be incremented
never@4645 149 // but we shouldn't expose the system to this problem in testing
never@4645 150 // modes.
never@4645 151 if (!AbstractInterpreter::can_be_compiled(m)) {
never@4645 152 return false;
never@4645 153 }
iveresov@6453 154 if (comp_level == CompLevel_all) {
iignatyev@17126 155 if (TieredCompilation) {
iignatyev@17126 156 // enough to be compilable at any level for tiered
iignatyev@17126 157 return !m->is_not_compilable(CompLevel_simple) || !m->is_not_compilable(CompLevel_full_optimization);
iignatyev@17126 158 } else {
iignatyev@17126 159 // must be compilable at available level for non-tiered
iignatyev@17126 160 return !m->is_not_compilable(CompLevel_highest_tier);
iignatyev@17126 161 }
iignatyev@16689 162 } else if (is_compile(comp_level)) {
iveresov@6453 163 return !m->is_not_compilable(comp_level);
iveresov@6453 164 }
iignatyev@16689 165 return false;
iveresov@6453 166 }
never@4645 167
iignatyev@19332 168 // Returns true if m is allowed to be osr compiled
coleenp@46727 169 bool CompilationPolicy::can_be_osr_compiled(const methodHandle& m, int comp_level) {
iignatyev@19332 170 bool result = false;
iignatyev@19332 171 if (comp_level == CompLevel_all) {
iignatyev@19332 172 if (TieredCompilation) {
iignatyev@19332 173 // enough to be osr compilable at any level for tiered
iignatyev@19332 174 result = !m->is_not_osr_compilable(CompLevel_simple) || !m->is_not_osr_compilable(CompLevel_full_optimization);
iignatyev@19332 175 } else {
iignatyev@19332 176 // must be osr compilable at available level for non-tiered
iignatyev@19332 177 result = !m->is_not_osr_compilable(CompLevel_highest_tier);
iignatyev@19332 178 }
iignatyev@19332 179 } else if (is_compile(comp_level)) {
iignatyev@19332 180 result = !m->is_not_osr_compilable(comp_level);
iignatyev@19332 181 }
iignatyev@19332 182 return (result && can_be_compiled(m, comp_level));
iignatyev@19332 183 }
iignatyev@19332 184
iveresov@6453 185 bool CompilationPolicy::is_compilation_enabled() {
iveresov@6453 186 // NOTE: CompileBroker::should_compile_new_jobs() checks for UseCompiler
iveresov@6453 187 return !delay_compilation_during_startup() && CompileBroker::should_compile_new_jobs();
duke@1 188 }
duke@1 189
dnsimon@35547 190 CompileTask* CompilationPolicy::select_task_helper(CompileQueue* compile_queue) {
dnsimon@35547 191 #if INCLUDE_JVMCI
dnsimon@35547 192 if (UseJVMCICompiler && !BackgroundCompilation) {
dnsimon@35547 193 /*
dnsimon@35547 194 * In blocking compilation mode, the CompileBroker will make
dnsimon@35547 195 * compilations submitted by a JVMCI compiler thread non-blocking. These
dnsimon@35547 196 * compilations should be scheduled after all blocking compilations
dnsimon@35547 197 * to service non-compiler related compilations sooner and reduce the
dnsimon@35547 198 * chance of such compilations timing out.
dnsimon@35547 199 */
dnsimon@35547 200 for (CompileTask* task = compile_queue->first(); task != NULL; task = task->next()) {
dnsimon@35547 201 if (task->is_blocking()) {
dnsimon@35547 202 return task;
dnsimon@35547 203 }
dnsimon@35547 204 }
dnsimon@35547 205 }
dnsimon@35547 206 #endif
dnsimon@35547 207 return compile_queue->first();
dnsimon@35547 208 }
dnsimon@35547 209
duke@1 210 #ifndef PRODUCT
duke@1 211 void CompilationPolicy::print_time() {
duke@1 212 tty->print_cr ("Accumulated compilationPolicy times:");
duke@1 213 tty->print_cr ("---------------------------");
duke@1 214 tty->print_cr (" Total: %3.3f sec.", _accumulated_time.seconds());
duke@1 215 }
duke@1 216
iveresov@6453 217 void NonTieredCompPolicy::trace_osr_completion(nmethod* osr_nm) {
duke@1 218 if (TraceOnStackReplacement) {
duke@1 219 if (osr_nm == NULL) tty->print_cr("compilation failed");
drchase@24424 220 else tty->print_cr("nmethod " INTPTR_FORMAT, p2i(osr_nm));
duke@1 221 }
duke@1 222 }
duke@1 223 #endif // !PRODUCT
duke@1 224
iveresov@6453 225 void NonTieredCompPolicy::initialize() {
iveresov@6453 226 // Setup the compiler thread numbers
iveresov@6453 227 if (CICompilerCountPerCPU) {
iveresov@6453 228 // Example: if CICompilerCountPerCPU is true, then we get
iveresov@6453 229 // max(log2(8)-1,1) = 2 compiler threads on an 8-way machine.
iveresov@6453 230 // May help big-app startup time.
roland@53643 231 _compiler_count = MAX2(log2_int(os::active_processor_count())-1,1);
thartmann@53254 232 // Make sure there is enough space in the code cache to hold all the compiler buffers
thartmann@53254 233 size_t buffer_size = 1;
thartmann@53254 234 #ifdef COMPILER1
thartmann@53254 235 buffer_size = is_client_compilation_mode_vm() ? Compiler::code_buffer_size() : buffer_size;
thartmann@53254 236 #endif
thartmann@53254 237 #ifdef COMPILER2
thartmann@53254 238 buffer_size = is_server_compilation_mode_vm() ? C2Compiler::initial_code_buffer_size() : buffer_size;
thartmann@53254 239 #endif
thartmann@53254 240 int max_count = (ReservedCodeCacheSize - (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3))) / (int)buffer_size;
thartmann@53254 241 if (_compiler_count > max_count) {
thartmann@53254 242 // Lower the compiler count such that all buffers fit into the code cache
thartmann@53254 243 _compiler_count = MAX2(max_count, 1);
thartmann@53254 244 }
anoll@24013 245 FLAG_SET_ERGO(intx, CICompilerCount, _compiler_count);
iveresov@6453 246 } else {
iveresov@6453 247 _compiler_count = CICompilerCount;
iveresov@6453 248 }
iveresov@6453 249 }
iveresov@6453 250
iveresov@6747 251 // Note: this policy is used ONLY if TieredCompilation is off.
iveresov@6747 252 // compiler_count() behaves the following way:
iveresov@6747 253 // - with TIERED build (with both COMPILER1 and COMPILER2 defined) it should return
jcm@43455 254 // zero for the c1 compilation levels in server compilation mode runs
jcm@43455 255 // and c2 compilation levels in client compilation mode runs.
jcm@43455 256 // - with COMPILER2 not defined it should return zero for c2 compilation levels.
jcm@43455 257 // - with COMPILER1 not defined it should return zero for c1 compilation levels.
iveresov@6747 258 // - if neither is defined - always return zero.
iveresov@6453 259 int NonTieredCompPolicy::compiler_count(CompLevel comp_level) {
iveresov@6747 260 assert(!TieredCompilation, "This policy should not be used with TieredCompilation");
jcm@43455 261 if (COMPILER2_PRESENT(is_server_compilation_mode_vm() && is_c2_compile(comp_level) ||)
jcm@43455 262 is_client_compilation_mode_vm() && is_c1_compile(comp_level)) {
iveresov@6747 263 return _compiler_count;
iveresov@6747 264 }
iveresov@6453 265 return 0;
iveresov@6453 266 }
iveresov@6453 267
coleenp@33593 268 void NonTieredCompPolicy::reset_counter_for_invocation_event(const methodHandle& m) {
duke@1 269 // Make sure invocation and backedge counter doesn't overflow again right away
duke@1 270 // as would be the case for native methods.
duke@1 271
duke@1 272 // BUT also make sure the method doesn't look like it was never executed.
duke@1 273 // Set carry bit and reduce counter's value to min(count, CompileThreshold/2).
jiangli@17000 274 MethodCounters* mcs = m->method_counters();
jiangli@17000 275 assert(mcs != NULL, "MethodCounters cannot be NULL for profiling");
jiangli@17000 276 mcs->invocation_counter()->set_carry();
jiangli@17000 277 mcs->backedge_counter()->set_carry();
duke@1 278
duke@1 279 assert(!m->was_never_executed(), "don't reset to 0 -- could be mistaken for never-executed");
duke@1 280 }
duke@1 281
coleenp@33593 282 void NonTieredCompPolicy::reset_counter_for_back_branch_event(const methodHandle& m) {
jwilhelm@22551 283 // Delay next back-branch event but pump up invocation counter to trigger
duke@1 284 // whole method compilation.
jiangli@17000 285 MethodCounters* mcs = m->method_counters();
jiangli@17000 286 assert(mcs != NULL, "MethodCounters cannot be NULL for profiling");
jiangli@17000 287 InvocationCounter* i = mcs->invocation_counter();
jiangli@17000 288 InvocationCounter* b = mcs->backedge_counter();
duke@1 289
duke@1 290 // Don't set invocation_counter's value too low otherwise the method will
duke@1 291 // look like immature (ic < ~5300) which prevents the inlining based on
duke@1 292 // the type profiling.
duke@1 293 i->set(i->state(), CompileThreshold);
duke@1 294 // Don't reset counter too low - it is used to check if OSR method is ready.
duke@1 295 b->set(b->state(), CompileThreshold / 2);
duke@1 296 }
duke@1 297
iveresov@6453 298 //
iveresov@6453 299 // CounterDecay
iveresov@6453 300 //
jwilhelm@22551 301 // Iterates through invocation counters and decrements them. This
iveresov@6453 302 // is done at each safepoint.
iveresov@6453 303 //
iveresov@6453 304 class CounterDecay : public AllStatic {
iveresov@6453 305 static jlong _last_timestamp;
coleenp@13728 306 static void do_method(Method* m) {
jiangli@17000 307 MethodCounters* mcs = m->method_counters();
jiangli@17000 308 if (mcs != NULL) {
jiangli@17000 309 mcs->invocation_counter()->decay();
jiangli@17000 310 }
iveresov@6453 311 }
iveresov@6453 312 public:
iveresov@6453 313 static void decay();
iveresov@6453 314 static bool is_decay_needed() {
iveresov@6453 315 return (os::javaTimeMillis() - _last_timestamp) > CounterDecayMinIntervalLength;
iveresov@6453 316 }
iveresov@6453 317 };
iveresov@6453 318
iveresov@6453 319 jlong CounterDecay::_last_timestamp = 0;
iveresov@6453 320
iveresov@6453 321 void CounterDecay::decay() {
iveresov@6453 322 _last_timestamp = os::javaTimeMillis();
iveresov@6453 323
iveresov@6453 324 // This operation is going to be performed only at the end of a safepoint
iveresov@6453 325 // and hence GC's will not be going on, all Java mutators are suspended
iveresov@6453 326 // at this point and hence SystemDictionary_lock is also not needed.
iveresov@6453 327 assert(SafepointSynchronize::is_at_safepoint(), "can only be executed at a safepoint");
zgu@49268 328 size_t nclasses = ClassLoaderDataGraph::num_instance_classes();
zgu@49268 329 size_t classes_per_tick = nclasses * (CounterDecayMinIntervalLength * 1e-3 /
iveresov@6453 330 CounterHalfLifeTime);
zgu@49268 331 for (size_t i = 0; i < classes_per_tick; i++) {
coleenp@46729 332 InstanceKlass* k = ClassLoaderDataGraph::try_get_next_class();
coleenp@46729 333 if (k != NULL) {
coleenp@46729 334 k->methods_do(do_method);
iveresov@6453 335 }
iveresov@6453 336 }
iveresov@6453 337 }
iveresov@6453 338
iveresov@6453 339 // Called at the end of the safepoint
iveresov@6453 340 void NonTieredCompPolicy::do_safepoint_work() {
iveresov@6453 341 if(UseCounterDecay && CounterDecay::is_decay_needed()) {
iveresov@6453 342 CounterDecay::decay();
iveresov@6453 343 }
iveresov@6453 344 }
iveresov@6453 345
iveresov@6453 346 void NonTieredCompPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) {
iveresov@6453 347 ScopeDesc* sd = trap_scope;
jiangli@17000 348 MethodCounters* mcs;
jiangli@17000 349 InvocationCounter* c;
iveresov@6453 350 for (; !sd->is_top(); sd = sd->sender()) {
jiangli@17000 351 mcs = sd->method()->method_counters();
jiangli@17000 352 if (mcs != NULL) {
jiangli@17000 353 // Reset ICs of inlined methods, since they can trigger compilations also.
jiangli@17000 354 mcs->invocation_counter()->reset();
jiangli@17000 355 }
iveresov@6453 356 }
jiangli@17000 357 mcs = sd->method()->method_counters();
jiangli@17000 358 if (mcs != NULL) {
jiangli@17000 359 c = mcs->invocation_counter();
jiangli@17000 360 if (is_osr) {
jiangli@17000 361 // It was an OSR method, so bump the count higher.
jiangli@17000 362 c->set(c->state(), CompileThreshold);
jiangli@17000 363 } else {
jiangli@17000 364 c->reset();
jiangli@17000 365 }
jiangli@17000 366 mcs->backedge_counter()->reset();
iveresov@6453 367 }
iveresov@6453 368 }
iveresov@6453 369
iveresov@6453 370 // This method can be called by any component of the runtime to notify the policy
jwilhelm@22551 371 // that it's recommended to delay the compilation of this method.
coleenp@13728 372 void NonTieredCompPolicy::delay_compilation(Method* method) {
jiangli@17000 373 MethodCounters* mcs = method->method_counters();
jiangli@17001 374 if (mcs != NULL) {
jiangli@17001 375 mcs->invocation_counter()->decay();
jiangli@17001 376 mcs->backedge_counter()->decay();
jiangli@17001 377 }
iveresov@6453 378 }
iveresov@6453 379
coleenp@13728 380 void NonTieredCompPolicy::disable_compilation(Method* method) {
jiangli@17000 381 MethodCounters* mcs = method->method_counters();
jiangli@17000 382 if (mcs != NULL) {
jiangli@17000 383 mcs->invocation_counter()->set_state(InvocationCounter::wait_for_nothing);
jiangli@17000 384 mcs->backedge_counter()->set_state(InvocationCounter::wait_for_nothing);
jiangli@17000 385 }
iveresov@6453 386 }
iveresov@6453 387
iveresov@6453 388 CompileTask* NonTieredCompPolicy::select_task(CompileQueue* compile_queue) {
dnsimon@35547 389 return select_task_helper(compile_queue);
iveresov@6453 390 }
iveresov@6453 391
coleenp@13728 392 bool NonTieredCompPolicy::is_mature(Method* method) {
coleenp@13728 393 MethodData* mdo = method->method_data();
iveresov@6453 394 assert(mdo != NULL, "Should be");
iveresov@6453 395 uint current = mdo->mileage_of(method);
iveresov@6453 396 uint initial = mdo->creation_mileage();
iveresov@6453 397 if (current < initial)
iveresov@6453 398 return true; // some sort of overflow
iveresov@6453 399 uint target;
iveresov@6453 400 if (ProfileMaturityPercentage <= 0)
iveresov@6453 401 target = (uint) -ProfileMaturityPercentage; // absolute value
iveresov@6453 402 else
iveresov@6453 403 target = (uint)( (ProfileMaturityPercentage * CompileThreshold) / 100 );
iveresov@6453 404 return (current >= initial + target);
iveresov@6453 405 }
iveresov@6453 406
coleenp@33593 407 nmethod* NonTieredCompPolicy::event(const methodHandle& method, const methodHandle& inlinee, int branch_bci,
rbackman@38133 408 int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread) {
iveresov@6453 409 assert(comp_level == CompLevel_none, "This should be only called from the interpreter");
iveresov@6453 410 NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci));
iveresov@11572 411 if (JvmtiExport::can_post_interpreter_events() && thread->is_interp_only_mode()) {
iveresov@11572 412 // If certain JVMTI events (e.g. frame pop event) are requested then the
iveresov@11572 413 // thread is forced to remain in interpreted code. This is
iveresov@11572 414 // implemented partly by a check in the run_compiled_code
iveresov@11572 415 // section of the interpreter whether we should skip running
iveresov@11572 416 // compiled code, and partly by skipping OSR compiles for
iveresov@11572 417 // interpreted-only threads.
iveresov@11572 418 if (bci != InvocationEntryBci) {
iveresov@11572 419 reset_counter_for_back_branch_event(method);
iveresov@11572 420 return NULL;
iveresov@6453 421 }
iveresov@6453 422 }
minqi@14477 423 if (CompileTheWorld || ReplayCompiles) {
minqi@14477 424 // Don't trigger other compiles in testing mode
minqi@14477 425 if (bci == InvocationEntryBci) {
minqi@14477 426 reset_counter_for_invocation_event(method);
minqi@14477 427 } else {
minqi@14477 428 reset_counter_for_back_branch_event(method);
minqi@14477 429 }
minqi@14477 430 return NULL;
minqi@14477 431 }
minqi@14477 432
iveresov@6453 433 if (bci == InvocationEntryBci) {
iveresov@6453 434 // when code cache is full, compilation gets switched off, UseCompiler
iveresov@6453 435 // is set to false
iveresov@6453 436 if (!method->has_compiled_code() && UseCompiler) {
iveresov@11572 437 method_invocation_event(method, thread);
iveresov@6453 438 } else {
iveresov@6453 439 // Force counter overflow on method entry, even if no compilation
iveresov@6453 440 // happened. (The method_invocation_event call does this also.)
iveresov@6453 441 reset_counter_for_invocation_event(method);
iveresov@6453 442 }
iveresov@6453 443 // compilation at an invocation overflow no longer goes and retries test for
iveresov@6453 444 // compiled method. We always run the loser of the race as interpreted.
iveresov@6453 445 // so return NULL
iveresov@6453 446 return NULL;
iveresov@6453 447 } else {
iveresov@6453 448 // counter overflow in a loop => try to do on-stack-replacement
iveresov@6453 449 nmethod* osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true);
iveresov@6453 450 NOT_PRODUCT(trace_osr_request(method, osr_nm, bci));
iveresov@6453 451 // when code cache is full, we should not compile any more...
iveresov@6453 452 if (osr_nm == NULL && UseCompiler) {
iveresov@11572 453 method_back_branch_event(method, bci, thread);
iveresov@6453 454 osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true);
iveresov@6453 455 }
iveresov@6453 456 if (osr_nm == NULL) {
iveresov@6453 457 reset_counter_for_back_branch_event(method);
iveresov@6453 458 return NULL;
iveresov@6453 459 }
iveresov@6453 460 return osr_nm;
iveresov@6453 461 }
iveresov@6453 462 return NULL;
iveresov@6453 463 }
iveresov@6453 464
iveresov@6453 465 #ifndef PRODUCT
coleenp@33593 466 void NonTieredCompPolicy::trace_frequency_counter_overflow(const methodHandle& m, int branch_bci, int bci) {
iveresov@6453 467 if (TraceInvocationCounterOverflow) {
jiangli@17000 468 MethodCounters* mcs = m->method_counters();
jiangli@17000 469 assert(mcs != NULL, "MethodCounters cannot be NULL for profiling");
jiangli@17000 470 InvocationCounter* ic = mcs->invocation_counter();
jiangli@17000 471 InvocationCounter* bc = mcs->backedge_counter();
iveresov@6453 472 ResourceMark rm;
goetz@33604 473 if (bci == InvocationEntryBci) {
goetz@33604 474 tty->print("comp-policy cntr ovfl @ %d in entry of ", bci);
goetz@33604 475 } else {
goetz@33604 476 tty->print("comp-policy cntr ovfl @ %d in loop of ", bci);
goetz@33604 477 }
iveresov@6453 478 m->print_value();
iveresov@6453 479 tty->cr();
iveresov@6453 480 ic->print();
iveresov@6453 481 bc->print();
iveresov@6453 482 if (ProfileInterpreter) {
iveresov@6453 483 if (bci != InvocationEntryBci) {
coleenp@13728 484 MethodData* mdo = m->method_data();
iveresov@6453 485 if (mdo != NULL) {
dlong@48689 486 ProfileData *pd = mdo->bci_to_data(branch_bci);
dlong@48689 487 if (pd == NULL) {
dlong@48689 488 tty->print_cr("back branch count = N/A (missing ProfileData)");
dlong@48689 489 } else {
dlong@48689 490 tty->print_cr("back branch count = %d", pd->as_JumpData()->taken());
dlong@48689 491 }
iveresov@6453 492 }
iveresov@6453 493 }
iveresov@6453 494 }
iveresov@6453 495 }
iveresov@6453 496 }
iveresov@6453 497
coleenp@33593 498 void NonTieredCompPolicy::trace_osr_request(const methodHandle& method, nmethod* osr, int bci) {
iveresov@6453 499 if (TraceOnStackReplacement) {
iveresov@6453 500 ResourceMark rm;
iveresov@6453 501 tty->print(osr != NULL ? "Reused OSR entry for " : "Requesting OSR entry for ");
iveresov@6453 502 method->print_short_name(tty);
iveresov@6453 503 tty->print_cr(" at bci %d", bci);
iveresov@6453 504 }
iveresov@6453 505 }
iveresov@6453 506 #endif // !PRODUCT
iveresov@6453 507
duke@1 508 // SimpleCompPolicy - compile current method
duke@1 509
coleenp@33593 510 void SimpleCompPolicy::method_invocation_event(const methodHandle& m, JavaThread* thread) {
twisti@13891 511 const int comp_level = CompLevel_highest_tier;
twisti@13891 512 const int hot_count = m->invocation_count();
duke@1 513 reset_counter_for_invocation_event(m);
duke@1 514
iignatyev@17126 515 if (is_compilation_enabled() && can_be_compiled(m, comp_level)) {
rbackman@38133 516 CompiledMethod* nm = m->code();
duke@1 517 if (nm == NULL ) {
neliasso@38218 518 CompileBroker::compile_method(m, InvocationEntryBci, comp_level, m, hot_count, CompileTask::Reason_InvocationCount, thread);
duke@1 519 }
duke@1 520 }
duke@1 521 }
duke@1 522
coleenp@33593 523 void SimpleCompPolicy::method_back_branch_event(const methodHandle& m, int bci, JavaThread* thread) {
twisti@13891 524 const int comp_level = CompLevel_highest_tier;
twisti@13891 525 const int hot_count = m->backedge_count();
duke@1 526
iignatyev@19332 527 if (is_compilation_enabled() && can_be_osr_compiled(m, comp_level)) {
neliasso@38218 528 CompileBroker::compile_method(m, bci, comp_level, m, hot_count, CompileTask::Reason_BackedgeCount, thread);
twisti@13891 529 NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, comp_level, true));)
duke@1 530 }
duke@1 531 }
duke@1 532 // StackWalkCompPolicy - walk up stack to find a suitable method to compile
duke@1 533
duke@1 534 #ifdef COMPILER2
duke@1 535 const char* StackWalkCompPolicy::_msg = NULL;
duke@1 536
duke@1 537
duke@1 538 // Consider m for compilation
coleenp@33593 539 void StackWalkCompPolicy::method_invocation_event(const methodHandle& m, JavaThread* thread) {
twisti@13891 540 const int comp_level = CompLevel_highest_tier;
twisti@13891 541 const int hot_count = m->invocation_count();
duke@1 542 reset_counter_for_invocation_event(m);
duke@1 543
iignatyev@17126 544 if (is_compilation_enabled() && m->code() == NULL && can_be_compiled(m, comp_level)) {
iveresov@11572 545 ResourceMark rm(thread);
duke@1 546 frame fr = thread->last_frame();
duke@1 547 assert(fr.is_interpreted_frame(), "must be interpreted");
duke@1 548 assert(fr.interpreter_frame_method() == m(), "bad method");
duke@1 549
duke@1 550 if (TraceCompilationPolicy) {
duke@1 551 tty->print("method invocation trigger: ");
duke@1 552 m->print_short_name(tty);
drchase@24424 553 tty->print(" ( interpreted " INTPTR_FORMAT ", size=%d ) ", p2i((address)m()), m->code_size());
duke@1 554 }
duke@1 555 RegisterMap reg_map(thread, false);
duke@1 556 javaVFrame* triggerVF = thread->last_java_vframe(&reg_map);
duke@1 557 // triggerVF is the frame that triggered its counter
iveresov@31521 558 RFrame* first = new InterpretedRFrame(triggerVF->fr(), thread, m());
duke@1 559
duke@1 560 if (first->top_method()->code() != NULL) {
duke@1 561 // called obsolete method/nmethod -- no need to recompile
drchase@24424 562 if (TraceCompilationPolicy) tty->print_cr(" --> " INTPTR_FORMAT, p2i(first->top_method()->code()));
duke@1 563 } else {
duke@1 564 if (TimeCompilationPolicy) accumulated_time()->start();
duke@1 565 GrowableArray<RFrame*>* stack = new GrowableArray<RFrame*>(50);
duke@1 566 stack->push(first);
duke@1 567 RFrame* top = findTopInlinableFrame(stack);
duke@1 568 if (TimeCompilationPolicy) accumulated_time()->stop();
duke@1 569 assert(top != NULL, "findTopInlinableFrame returned null");
duke@1 570 if (TraceCompilationPolicy) top->print();
twisti@13891 571 CompileBroker::compile_method(top->top_method(), InvocationEntryBci, comp_level,
neliasso@38218 572 m, hot_count, CompileTask::Reason_InvocationCount, thread);
duke@1 573 }
duke@1 574 }
duke@1 575 }
duke@1 576
coleenp@33593 577 void StackWalkCompPolicy::method_back_branch_event(const methodHandle& m, int bci, JavaThread* thread) {
twisti@13891 578 const int comp_level = CompLevel_highest_tier;
twisti@13891 579 const int hot_count = m->backedge_count();
duke@1 580
iignatyev@19332 581 if (is_compilation_enabled() && can_be_osr_compiled(m, comp_level)) {
neliasso@38218 582 CompileBroker::compile_method(m, bci, comp_level, m, hot_count, CompileTask::Reason_BackedgeCount, thread);
twisti@13891 583 NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, comp_level, true));)
duke@1 584 }
duke@1 585 }
duke@1 586
duke@1 587 RFrame* StackWalkCompPolicy::findTopInlinableFrame(GrowableArray<RFrame*>* stack) {
duke@1 588 // go up the stack until finding a frame that (probably) won't be inlined
duke@1 589 // into its caller
duke@1 590 RFrame* current = stack->at(0); // current choice for stopping
duke@1 591 assert( current && !current->is_compiled(), "" );
duke@1 592 const char* msg = NULL;
duke@1 593
duke@1 594 while (1) {
duke@1 595
duke@1 596 // before going up the stack further, check if doing so would get us into
duke@1 597 // compiled code
duke@1 598 RFrame* next = senderOf(current, stack);
duke@1 599 if( !next ) // No next frame up the stack?
duke@1 600 break; // Then compile with current frame
duke@1 601
iveresov@31521 602 Method* m = current->top_method();
iveresov@31521 603 Method* next_m = next->top_method();
duke@1 604
duke@1 605 if (TraceCompilationPolicy && Verbose) {
duke@1 606 tty->print("[caller: ");
duke@1 607 next_m->print_short_name(tty);
duke@1 608 tty->print("] ");
duke@1 609 }
duke@1 610
duke@1 611 if( !Inline ) { // Inlining turned off
duke@1 612 msg = "Inlining turned off";
duke@1 613 break;
duke@1 614 }
duke@1 615 if (next_m->is_not_compilable()) { // Did fail to compile this before/
duke@1 616 msg = "caller not compilable";
duke@1 617 break;
duke@1 618 }
duke@1 619 if (next->num() > MaxRecompilationSearchLength) {
duke@1 620 // don't go up too high when searching for recompilees
duke@1 621 msg = "don't go up any further: > MaxRecompilationSearchLength";
duke@1 622 break;
duke@1 623 }
duke@1 624 if (next->distance() > MaxInterpretedSearchLength) {
duke@1 625 // don't go up too high when searching for recompilees
duke@1 626 msg = "don't go up any further: next > MaxInterpretedSearchLength";
duke@1 627 break;
duke@1 628 }
duke@1 629 // Compiled frame above already decided not to inline;
duke@1 630 // do not recompile him.
duke@1 631 if (next->is_compiled()) {
duke@1 632 msg = "not going up into optimized code";
duke@1 633 break;
duke@1 634 }
duke@1 635
duke@1 636 // Interpreted frame above us was already compiled. Do not force
duke@1 637 // a recompile, although if the frame above us runs long enough an
duke@1 638 // OSR might still happen.
duke@1 639 if( current->is_interpreted() && next_m->has_compiled_code() ) {
duke@1 640 msg = "not going up -- already compiled caller";
duke@1 641 break;
duke@1 642 }
duke@1 643
duke@1 644 // Compute how frequent this call site is. We have current method 'm'.
duke@1 645 // We know next method 'next_m' is interpreted. Find the call site and
duke@1 646 // check the various invocation counts.
duke@1 647 int invcnt = 0; // Caller counts
duke@1 648 if (ProfileInterpreter) {
duke@1 649 invcnt = next_m->interpreter_invocation_count();
duke@1 650 }
duke@1 651 int cnt = 0; // Call site counts
duke@1 652 if (ProfileInterpreter && next_m->method_data() != NULL) {
duke@1 653 ResourceMark rm;
duke@1 654 int bci = next->top_vframe()->bci();
duke@1 655 ProfileData* data = next_m->method_data()->bci_to_data(bci);
duke@1 656 if (data != NULL && data->is_CounterData())
duke@1 657 cnt = data->as_CounterData()->count();
duke@1 658 }
duke@1 659
duke@1 660 // Caller counts / call-site counts; i.e. is this call site
duke@1 661 // a hot call site for method next_m?
duke@1 662 int freq = (invcnt) ? cnt/invcnt : cnt;
duke@1 663
duke@1 664 // Check size and frequency limits
duke@1 665 if ((msg = shouldInline(m, freq, cnt)) != NULL) {
duke@1 666 break;
duke@1 667 }
duke@1 668 // Check inlining negative tests
duke@1 669 if ((msg = shouldNotInline(m)) != NULL) {
duke@1 670 break;
duke@1 671 }
duke@1 672
duke@1 673
duke@1 674 // If the caller method is too big or something then we do not want to
duke@1 675 // compile it just to inline a method
iignatyev@17126 676 if (!can_be_compiled(next_m, CompLevel_any)) {
duke@1 677 msg = "caller cannot be compiled";
duke@1 678 break;
duke@1 679 }
duke@1 680
duke@1 681 if( next_m->name() == vmSymbols::class_initializer_name() ) {
duke@1 682 msg = "do not compile class initializer (OSR ok)";
duke@1 683 break;
duke@1 684 }
duke@1 685
duke@1 686 if (TraceCompilationPolicy && Verbose) {
duke@1 687 tty->print("\n\t check caller: ");
duke@1 688 next_m->print_short_name(tty);
iveresov@31521 689 tty->print(" ( interpreted " INTPTR_FORMAT ", size=%d ) ", p2i((address)next_m), next_m->code_size());
duke@1 690 }
duke@1 691
duke@1 692 current = next;
duke@1 693 }
duke@1 694
duke@1 695 assert( !current || !current->is_compiled(), "" );
duke@1 696
duke@1 697 if (TraceCompilationPolicy && msg) tty->print("(%s)\n", msg);
duke@1 698
duke@1 699 return current;
duke@1 700 }
duke@1 701
duke@1 702 RFrame* StackWalkCompPolicy::senderOf(RFrame* rf, GrowableArray<RFrame*>* stack) {
duke@1 703 RFrame* sender = rf->caller();
duke@1 704 if (sender && sender->num() == stack->length()) stack->push(sender);
duke@1 705 return sender;
duke@1 706 }
duke@1 707
duke@1 708
coleenp@33593 709 const char* StackWalkCompPolicy::shouldInline(const methodHandle& m, float freq, int cnt) {
duke@1 710 // Allows targeted inlining
duke@1 711 // positive filter: should send be inlined? returns NULL (--> yes)
duke@1 712 // or rejection msg
duke@1 713 int max_size = MaxInlineSize;
duke@1 714 int cost = m->code_size();
duke@1 715
duke@1 716 // Check for too many throws (and not too huge)
duke@1 717 if (m->interpreter_throwout_count() > InlineThrowCount && cost < InlineThrowMaxSize ) {
duke@1 718 return NULL;
duke@1 719 }
duke@1 720
duke@1 721 // bump the max size if the call is frequent
duke@1 722 if ((freq >= InlineFrequencyRatio) || (cnt >= InlineFrequencyCount)) {
duke@1 723 if (TraceFrequencyInlining) {
duke@1 724 tty->print("(Inlined frequent method)\n");
duke@1 725 m->print();
duke@1 726 }
duke@1 727 max_size = FreqInlineSize;
duke@1 728 }
duke@1 729 if (cost > max_size) {
duke@1 730 return (_msg = "too big");
duke@1 731 }
duke@1 732 return NULL;
duke@1 733 }
duke@1 734
duke@1 735
coleenp@33593 736 const char* StackWalkCompPolicy::shouldNotInline(const methodHandle& m) {
duke@1 737 // negative filter: should send NOT be inlined? returns NULL (--> inline) or rejection msg
duke@1 738 if (m->is_abstract()) return (_msg = "abstract method");
duke@1 739 // note: we allow ik->is_abstract()
coleenp@14391 740 if (!m->method_holder()->is_initialized()) return (_msg = "method holder not initialized");
duke@1 741 if (m->is_native()) return (_msg = "native method");
rbackman@38133 742 CompiledMethod* m_code = m->code();
twisti@6418 743 if (m_code != NULL && m_code->code_size() > InlineSmallCode)
duke@1 744 return (_msg = "already compiled into a big method");
duke@1 745
duke@1 746 // use frequency-based objections only for non-trivial methods
duke@1 747 if (m->code_size() <= MaxTrivialSize) return NULL;
duke@1 748 if (UseInterpreter) { // don't use counts with -Xcomp
duke@1 749 if ((m->code() == NULL) && m->was_never_executed()) return (_msg = "never executed");
duke@1 750 if (!m->was_executed_more_than(MIN2(MinInliningThreshold, CompileThreshold >> 1))) return (_msg = "executed < MinInliningThreshold times");
duke@1 751 }
coleenp@13728 752 if (Method::has_unloaded_classes_in_signature(m, JavaThread::current())) return (_msg = "unloaded signature classes");
duke@1 753
duke@1 754 return NULL;
duke@1 755 }
duke@1 756
duke@1 757
duke@1 758
duke@1 759 #endif // COMPILER2