annotate hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp @ 36556:7f092a7ce938

8150839: Adjust the number of compiler threads for 32-bit platforms Summary: Set the number of compiler threads to 3 on 32-bit platforms. Reviewed-by: iveresov
author zmajo
date Fri, 04 Mar 2016 08:53:59 +0100
parents 0ee84aa8e705
children ee256e343585
rev   line source
iveresov@8667 1 /*
twisti@33160 2 * Copyright (c) 2010, 2015, Oracle and/or its affiliates. All rights reserved.
iveresov@9625 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
iveresov@9625 4 *
iveresov@9625 5 * This code is free software; you can redistribute it and/or modify it
iveresov@9625 6 * under the terms of the GNU General Public License version 2 only, as
iveresov@9625 7 * published by the Free Software Foundation.
iveresov@9625 8 *
iveresov@9625 9 * This code is distributed in the hope that it will be useful, but WITHOUT
iveresov@9625 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
iveresov@9625 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
iveresov@9625 12 * version 2 for more details (a copy is included in the LICENSE file that
iveresov@9625 13 * accompanied this code).
iveresov@9625 14 *
iveresov@9625 15 * You should have received a copy of the GNU General Public License version
iveresov@9625 16 * 2 along with this work; if not, write to the Free Software Foundation,
iveresov@9625 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
iveresov@9625 18 *
iveresov@9625 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
iveresov@9625 20 * or visit www.oracle.com if you need additional information or have any
iveresov@9625 21 * questions.
iveresov@9625 22 *
iveresov@9625 23 */
iveresov@8667 24
iveresov@8667 25 #include "precompiled.hpp"
goetz@25715 26 #include "code/codeCache.hpp"
twisti@33160 27 #include "compiler/compileTask.hpp"
iveresov@8667 28 #include "runtime/advancedThresholdPolicy.hpp"
iveresov@8667 29 #include "runtime/simpleThresholdPolicy.inline.hpp"
iveresov@8667 30
iveresov@8667 31 #ifdef TIERED
iveresov@8667 32 // Print an event.
iveresov@8667 33 void AdvancedThresholdPolicy::print_specific(EventType type, methodHandle mh, methodHandle imh,
iveresov@8667 34 int bci, CompLevel level) {
twisti@13891 35 tty->print(" rate=");
iveresov@8667 36 if (mh->prev_time() == 0) tty->print("n/a");
iveresov@8667 37 else tty->print("%f", mh->rate());
iveresov@8667 38
twisti@13891 39 tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback),
twisti@13891 40 threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback));
iveresov@8667 41
iveresov@8667 42 }
iveresov@8667 43
iveresov@8667 44 void AdvancedThresholdPolicy::initialize() {
zmajo@36556 45 int count = CICompilerCount;
zmajo@36556 46 #ifdef _LP64
iveresov@8667 47 // Turn on ergonomic compiler count selection
iveresov@8667 48 if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) {
iveresov@8667 49 FLAG_SET_DEFAULT(CICompilerCountPerCPU, true);
iveresov@8667 50 }
iveresov@8667 51 if (CICompilerCountPerCPU) {
iveresov@8667 52 // Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n
iveresov@8667 53 int log_cpu = log2_intptr(os::active_processor_count());
iveresov@8667 54 int loglog_cpu = log2_intptr(MAX2(log_cpu, 1));
iveresov@8667 55 count = MAX2(log_cpu * loglog_cpu, 1) * 3 / 2;
iveresov@8667 56 }
zmajo@36556 57 #else
zmajo@36556 58 // On 32-bit systems, the number of compiler threads is limited to 3.
zmajo@36556 59 // On these systems, the virtual address space available to the JVM
zmajo@36556 60 // is usually limited to 2-4 GB (the exact value depends on the platform).
zmajo@36556 61 // As the compilers (especially C2) can consume a large amount of
zmajo@36556 62 // memory, scaling the number of compiler threads with the number of
zmajo@36556 63 // available cores can result in the exhaustion of the address space
zmajo@36556 64 /// available to the VM and thus cause the VM to crash.
zmajo@36556 65 if (FLAG_IS_DEFAULT(CICompilerCount)) {
zmajo@36556 66 count = 3;
zmajo@36556 67 }
zmajo@36556 68 #endif
iveresov@8667 69
iveresov@8667 70 set_c1_count(MAX2(count / 3, 1));
anoll@24013 71 set_c2_count(MAX2(count - c1_count(), 1));
anoll@24013 72 FLAG_SET_ERGO(intx, CICompilerCount, c1_count() + c2_count());
iveresov@8667 73
iveresov@8667 74 // Some inlining tuning
iveresov@8667 75 #ifdef X86
iveresov@8667 76 if (FLAG_IS_DEFAULT(InlineSmallCode)) {
iveresov@8667 77 FLAG_SET_DEFAULT(InlineSmallCode, 2000);
iveresov@8667 78 }
iveresov@8667 79 #endif
iveresov@8667 80
enevill@31053 81 #if defined SPARC || defined AARCH64
iveresov@8667 82 if (FLAG_IS_DEFAULT(InlineSmallCode)) {
iveresov@8667 83 FLAG_SET_DEFAULT(InlineSmallCode, 2500);
iveresov@8667 84 }
iveresov@8667 85 #endif
iveresov@8667 86
anoll@17617 87 set_increase_threshold_at_ratio();
iveresov@8667 88 set_start_time(os::javaTimeMillis());
iveresov@8667 89 }
iveresov@8667 90
iveresov@8667 91 // update_rate() is called from select_task() while holding a compile queue lock.
coleenp@13728 92 void AdvancedThresholdPolicy::update_rate(jlong t, Method* m) {
vlivanov@24443 93 // Skip update if counters are absent.
vlivanov@24443 94 // Can't allocate them since we are holding compile queue lock.
vlivanov@24443 95 if (m->method_counters() == NULL) return;
vlivanov@24443 96
iveresov@8667 97 if (is_old(m)) {
iveresov@8667 98 // We don't remove old methods from the queue,
iveresov@8667 99 // so we can just zero the rate.
vlivanov@24443 100 m->set_rate(0);
iveresov@8667 101 return;
iveresov@8667 102 }
iveresov@8667 103
iveresov@8667 104 // We don't update the rate if we've just came out of a safepoint.
iveresov@8667 105 // delta_s is the time since last safepoint in milliseconds.
iveresov@8667 106 jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint();
iveresov@8667 107 jlong delta_t = t - (m->prev_time() != 0 ? m->prev_time() : start_time()); // milliseconds since the last measurement
iveresov@8667 108 // How many events were there since the last time?
iveresov@8667 109 int event_count = m->invocation_count() + m->backedge_count();
iveresov@8667 110 int delta_e = event_count - m->prev_event_count();
iveresov@8667 111
iveresov@8667 112 // We should be running for at least 1ms.
iveresov@8667 113 if (delta_s >= TieredRateUpdateMinTime) {
iveresov@8667 114 // And we must've taken the previous point at least 1ms before.
iveresov@8667 115 if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) {
vlivanov@24443 116 m->set_prev_time(t);
vlivanov@24443 117 m->set_prev_event_count(event_count);
vlivanov@24443 118 m->set_rate((float)delta_e / (float)delta_t); // Rate is events per millisecond
vlivanov@24443 119 } else {
iveresov@8667 120 if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) {
iveresov@8667 121 // If nothing happened for 25ms, zero the rate. Don't modify prev values.
vlivanov@24443 122 m->set_rate(0);
iveresov@8667 123 }
vlivanov@24443 124 }
iveresov@8667 125 }
iveresov@8667 126 }
iveresov@8667 127
iveresov@8667 128 // Check if this method has been stale from a given number of milliseconds.
iveresov@8667 129 // See select_task().
coleenp@13728 130 bool AdvancedThresholdPolicy::is_stale(jlong t, jlong timeout, Method* m) {
iveresov@8667 131 jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint();
iveresov@8667 132 jlong delta_t = t - m->prev_time();
iveresov@8667 133 if (delta_t > timeout && delta_s > timeout) {
iveresov@8667 134 int event_count = m->invocation_count() + m->backedge_count();
iveresov@8667 135 int delta_e = event_count - m->prev_event_count();
iveresov@8667 136 // Return true if there were no events.
iveresov@8667 137 return delta_e == 0;
iveresov@8667 138 }
iveresov@8667 139 return false;
iveresov@8667 140 }
iveresov@8667 141
iveresov@8667 142 // We don't remove old methods from the compile queue even if they have
iveresov@8667 143 // very low activity. See select_task().
coleenp@13728 144 bool AdvancedThresholdPolicy::is_old(Method* method) {
iveresov@8667 145 return method->invocation_count() > 50000 || method->backedge_count() > 500000;
iveresov@8667 146 }
iveresov@8667 147
coleenp@13728 148 double AdvancedThresholdPolicy::weight(Method* method) {
aph@35155 149 return (double)(method->rate() + 1) *
aph@35155 150 (method->invocation_count() + 1) * (method->backedge_count() + 1);
iveresov@8667 151 }
iveresov@8667 152
iveresov@8667 153 // Apply heuristics and return true if x should be compiled before y
coleenp@13728 154 bool AdvancedThresholdPolicy::compare_methods(Method* x, Method* y) {
iveresov@8667 155 if (x->highest_comp_level() > y->highest_comp_level()) {
iveresov@8667 156 // recompilation after deopt
iveresov@8667 157 return true;
iveresov@8667 158 } else
iveresov@8667 159 if (x->highest_comp_level() == y->highest_comp_level()) {
iveresov@8667 160 if (weight(x) > weight(y)) {
iveresov@8667 161 return true;
iveresov@8667 162 }
iveresov@8667 163 }
iveresov@8667 164 return false;
iveresov@8667 165 }
iveresov@8667 166
iveresov@8667 167 // Is method profiled enough?
coleenp@13728 168 bool AdvancedThresholdPolicy::is_method_profiled(Method* method) {
coleenp@13728 169 MethodData* mdo = method->method_data();
iveresov@8667 170 if (mdo != NULL) {
iveresov@8667 171 int i = mdo->invocation_count_delta();
iveresov@8667 172 int b = mdo->backedge_count_delta();
zmajo@28650 173 return call_predicate_helper<CompLevel_full_profile>(i, b, 1, method);
iveresov@8667 174 }
iveresov@8667 175 return false;
iveresov@8667 176 }
iveresov@8667 177
iveresov@8667 178 // Called with the queue locked and with at least one element
iveresov@8667 179 CompileTask* AdvancedThresholdPolicy::select_task(CompileQueue* compile_queue) {
twisti@33160 180 #if INCLUDE_JVMCI
dnsimon@35547 181 CompileTask *max_blocking_task = NULL;
twisti@33160 182 #endif
iveresov@8667 183 CompileTask *max_task = NULL;
coleenp@13952 184 Method* max_method = NULL;
iveresov@8667 185 jlong t = os::javaTimeMillis();
iveresov@8667 186 // Iterate through the queue and find a method with a maximum rate.
iveresov@8667 187 for (CompileTask* task = compile_queue->first(); task != NULL;) {
iveresov@8667 188 CompileTask* next_task = task->next();
coleenp@13728 189 Method* method = task->method();
coleenp@13728 190 update_rate(t, method);
iveresov@8667 191 if (max_task == NULL) {
iveresov@8667 192 max_task = task;
iveresov@8667 193 max_method = method;
iveresov@8667 194 } else {
iveresov@8667 195 // If a method has been stale for some time, remove it from the queue.
coleenp@13728 196 if (is_stale(t, TieredCompileTaskTimeout, method) && !is_old(method)) {
iveresov@8667 197 if (PrintTieredEvents) {
iveresov@10014 198 print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel)task->comp_level());
iveresov@8667 199 }
twisti@33160 200 task->log_task_dequeued("stale");
vlivanov@24443 201 compile_queue->remove_and_mark_stale(task);
iveresov@8667 202 method->clear_queued_for_compilation();
iveresov@8667 203 task = next_task;
iveresov@8667 204 continue;
iveresov@8667 205 }
iveresov@8667 206
iveresov@8667 207 // Select a method with a higher rate
coleenp@13728 208 if (compare_methods(method, max_method)) {
iveresov@8667 209 max_task = task;
iveresov@8667 210 max_method = method;
iveresov@8667 211 }
iveresov@8667 212 }
dnsimon@35547 213 #if INCLUDE_JVMCI
dnsimon@35547 214 if (UseJVMCICompiler && task->is_blocking()) {
dnsimon@35547 215 if (max_blocking_task == NULL || compare_methods(method, max_blocking_task->method())) {
dnsimon@35547 216 max_blocking_task = task;
dnsimon@35547 217 }
dnsimon@35547 218 }
dnsimon@35547 219 #endif
iveresov@8667 220 task = next_task;
iveresov@8667 221 }
iveresov@8667 222
twisti@33160 223 #if INCLUDE_JVMCI
twisti@33160 224 if (UseJVMCICompiler) {
dnsimon@35547 225 if (max_blocking_task != NULL) {
dnsimon@35547 226 // In blocking compilation mode, the CompileBroker will make
dnsimon@35547 227 // compilations submitted by a JVMCI compiler thread non-blocking. These
dnsimon@35547 228 // compilations should be scheduled after all blocking compilations
dnsimon@35547 229 // to service non-compiler related compilations sooner and reduce the
dnsimon@35547 230 // chance of such compilations timing out.
dnsimon@35547 231 max_task = max_blocking_task;
twisti@33160 232 max_method = max_task->method();
twisti@33160 233 }
twisti@33160 234 }
twisti@33160 235 #endif
twisti@33160 236
iveresov@10250 237 if (max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile
coleenp@13728 238 && is_method_profiled(max_method)) {
iveresov@8667 239 max_task->set_comp_level(CompLevel_limited_profile);
iveresov@8667 240 if (PrintTieredEvents) {
iveresov@10014 241 print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
iveresov@8667 242 }
iveresov@8667 243 }
iveresov@8667 244
iveresov@8667 245 return max_task;
iveresov@8667 246 }
iveresov@8667 247
iveresov@8667 248 double AdvancedThresholdPolicy::threshold_scale(CompLevel level, int feedback_k) {
iveresov@8667 249 double queue_size = CompileBroker::queue_size(level);
iveresov@8667 250 int comp_count = compiler_count(level);
iveresov@8667 251 double k = queue_size / (feedback_k * comp_count) + 1;
anoll@17617 252
anoll@17617 253 // Increase C1 compile threshold when the code cache is filled more
anoll@17617 254 // than specified by IncreaseFirstTierCompileThresholdAt percentage.
anoll@17617 255 // The main intention is to keep enough free space for C2 compiled code
anoll@17617 256 // to achieve peak performance if the code cache is under stress.
anoll@17617 257 if ((TieredStopAtLevel == CompLevel_full_optimization) && (level != CompLevel_full_optimization)) {
thartmann@26796 258 double current_reverse_free_ratio = CodeCache::reverse_free_ratio(CodeCache::get_code_blob_type(level));
anoll@17617 259 if (current_reverse_free_ratio > _increase_threshold_at_ratio) {
anoll@17617 260 k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio);
anoll@17617 261 }
anoll@17617 262 }
iveresov@8667 263 return k;
iveresov@8667 264 }
iveresov@8667 265
iveresov@8667 266 // Call and loop predicates determine whether a transition to a higher
iveresov@8667 267 // compilation level should be performed (pointers to predicate functions
iveresov@8667 268 // are passed to common()).
iveresov@8667 269 // Tier?LoadFeedback is basically a coefficient that determines of
iveresov@8667 270 // how many methods per compiler thread can be in the queue before
iveresov@8667 271 // the threshold values double.
zmajo@28650 272 bool AdvancedThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level, Method* method) {
iveresov@8667 273 switch(cur_level) {
iveresov@8667 274 case CompLevel_none:
iveresov@8667 275 case CompLevel_limited_profile: {
iveresov@8667 276 double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
zmajo@28650 277 return loop_predicate_helper<CompLevel_none>(i, b, k, method);
iveresov@8667 278 }
iveresov@8667 279 case CompLevel_full_profile: {
iveresov@8667 280 double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
zmajo@28650 281 return loop_predicate_helper<CompLevel_full_profile>(i, b, k, method);
iveresov@8667 282 }
iveresov@8667 283 default:
iveresov@8667 284 return true;
iveresov@8667 285 }
iveresov@8667 286 }
iveresov@8667 287
zmajo@28650 288 bool AdvancedThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level, Method* method) {
iveresov@8667 289 switch(cur_level) {
iveresov@8667 290 case CompLevel_none:
iveresov@8667 291 case CompLevel_limited_profile: {
iveresov@8667 292 double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
zmajo@28650 293 return call_predicate_helper<CompLevel_none>(i, b, k, method);
iveresov@8667 294 }
iveresov@8667 295 case CompLevel_full_profile: {
iveresov@8667 296 double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
zmajo@28650 297 return call_predicate_helper<CompLevel_full_profile>(i, b, k, method);
iveresov@8667 298 }
iveresov@8667 299 default:
iveresov@8667 300 return true;
iveresov@8667 301 }
iveresov@8667 302 }
iveresov@8667 303
iveresov@8667 304 // If a method is old enough and is still in the interpreter we would want to
iveresov@8667 305 // start profiling without waiting for the compiled method to arrive.
iveresov@8667 306 // We also take the load on compilers into the account.
coleenp@13728 307 bool AdvancedThresholdPolicy::should_create_mdo(Method* method, CompLevel cur_level) {
iveresov@8667 308 if (cur_level == CompLevel_none &&
iveresov@8667 309 CompileBroker::queue_size(CompLevel_full_optimization) <=
iveresov@8667 310 Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
iveresov@8667 311 int i = method->invocation_count();
iveresov@8667 312 int b = method->backedge_count();
iveresov@8667 313 double k = Tier0ProfilingStartPercentage / 100.0;
zmajo@28650 314 return call_predicate_helper<CompLevel_none>(i, b, k, method) || loop_predicate_helper<CompLevel_none>(i, b, k, method);
iveresov@8667 315 }
iveresov@8667 316 return false;
iveresov@8667 317 }
iveresov@8667 318
iveresov@10014 319 // Inlining control: if we're compiling a profiled method with C1 and the callee
iveresov@10014 320 // is known to have OSRed in a C2 version, don't inline it.
iveresov@10014 321 bool AdvancedThresholdPolicy::should_not_inline(ciEnv* env, ciMethod* callee) {
iveresov@10014 322 CompLevel comp_level = (CompLevel)env->comp_level();
iveresov@10014 323 if (comp_level == CompLevel_full_profile ||
iveresov@10014 324 comp_level == CompLevel_limited_profile) {
iveresov@10014 325 return callee->highest_osr_comp_level() == CompLevel_full_optimization;
iveresov@10014 326 }
iveresov@10014 327 return false;
iveresov@10014 328 }
iveresov@10014 329
iveresov@8667 330 // Create MDO if necessary.
iveresov@11572 331 void AdvancedThresholdPolicy::create_mdo(methodHandle mh, JavaThread* THREAD) {
ppunegov@29335 332 if (mh->is_native() ||
ppunegov@29335 333 mh->is_abstract() ||
ppunegov@29335 334 mh->is_accessor() ||
ppunegov@29335 335 mh->is_constant_getter()) {
ppunegov@29335 336 return;
ppunegov@29335 337 }
iveresov@8667 338 if (mh->method_data() == NULL) {
coleenp@13728 339 Method::build_interpreter_method_data(mh, CHECK_AND_CLEAR);
iveresov@8667 340 }
iveresov@8667 341 }
iveresov@8667 342
iveresov@8667 343
iveresov@8667 344 /*
iveresov@8667 345 * Method states:
iveresov@8667 346 * 0 - interpreter (CompLevel_none)
iveresov@8667 347 * 1 - pure C1 (CompLevel_simple)
iveresov@8667 348 * 2 - C1 with invocation and backedge counting (CompLevel_limited_profile)
iveresov@8667 349 * 3 - C1 with full profiling (CompLevel_full_profile)
iveresov@8667 350 * 4 - C2 (CompLevel_full_optimization)
iveresov@8667 351 *
iveresov@8667 352 * Common state transition patterns:
iveresov@8667 353 * a. 0 -> 3 -> 4.
iveresov@8667 354 * The most common path. But note that even in this straightforward case
iveresov@8667 355 * profiling can start at level 0 and finish at level 3.
iveresov@8667 356 *
iveresov@8667 357 * b. 0 -> 2 -> 3 -> 4.
jwilhelm@22551 358 * This case occurs when the load on C2 is deemed too high. So, instead of transitioning
iveresov@8667 359 * into state 3 directly and over-profiling while a method is in the C2 queue we transition to
iveresov@8667 360 * level 2 and wait until the load on C2 decreases. This path is disabled for OSRs.
iveresov@8667 361 *
iveresov@8667 362 * c. 0 -> (3->2) -> 4.
iveresov@8667 363 * In this case we enqueue a method for compilation at level 3, but the C1 queue is long enough
iveresov@8667 364 * to enable the profiling to fully occur at level 0. In this case we change the compilation level
thartmann@27643 365 * of the method to 2 while the request is still in-queue, because it'll allow it to run much faster
thartmann@27643 366 * without full profiling while c2 is compiling.
iveresov@8667 367 *
iveresov@8667 368 * d. 0 -> 3 -> 1 or 0 -> 2 -> 1.
iveresov@8667 369 * After a method was once compiled with C1 it can be identified as trivial and be compiled to
iveresov@8667 370 * level 1. These transition can also occur if a method can't be compiled with C2 but can with C1.
iveresov@8667 371 *
iveresov@8667 372 * e. 0 -> 4.
iveresov@8667 373 * This can happen if a method fails C1 compilation (it will still be profiled in the interpreter)
iveresov@8667 374 * or because of a deopt that didn't require reprofiling (compilation won't happen in this case because
iveresov@8667 375 * the compiled version already exists).
iveresov@8667 376 *
iveresov@8667 377 * Note that since state 0 can be reached from any other state via deoptimization different loops
iveresov@8667 378 * are possible.
iveresov@8667 379 *
iveresov@8667 380 */
iveresov@8667 381
iveresov@8667 382 // Common transition function. Given a predicate determines if a method should transition to another level.
coleenp@13728 383 CompLevel AdvancedThresholdPolicy::common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback) {
iveresov@8667 384 CompLevel next_level = cur_level;
iveresov@8667 385 int i = method->invocation_count();
iveresov@8667 386 int b = method->backedge_count();
iveresov@8667 387
iveresov@10250 388 if (is_trivial(method)) {
iveresov@10250 389 next_level = CompLevel_simple;
iveresov@10250 390 } else {
iveresov@10250 391 switch(cur_level) {
iveresov@10250 392 case CompLevel_none:
iveresov@10250 393 // If we were at full profile level, would we switch to full opt?
iveresov@10250 394 if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
iveresov@10250 395 next_level = CompLevel_full_optimization;
zmajo@28650 396 } else if ((this->*p)(i, b, cur_level, method)) {
twisti@33160 397 #if INCLUDE_JVMCI
twisti@33160 398 if (UseJVMCICompiler) {
twisti@33160 399 // Since JVMCI takes a while to warm up, its queue inevitably backs up during
twisti@33160 400 // early VM execution.
twisti@33160 401 next_level = CompLevel_full_profile;
twisti@33160 402 break;
twisti@33160 403 }
twisti@33160 404 #endif
iveresov@10250 405 // C1-generated fully profiled code is about 30% slower than the limited profile
iveresov@10250 406 // code that has only invocation and backedge counters. The observation is that
iveresov@10250 407 // if C2 queue is large enough we can spend too much time in the fully profiled code
iveresov@10250 408 // while waiting for C2 to pick the method from the queue. To alleviate this problem
iveresov@10250 409 // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
iveresov@10250 410 // we choose to compile a limited profiled version and then recompile with full profiling
iveresov@10250 411 // when the load on C2 goes down.
iveresov@10250 412 if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) >
twisti@33160 413 Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
iveresov@10250 414 next_level = CompLevel_limited_profile;
iveresov@8667 415 } else {
iveresov@10250 416 next_level = CompLevel_full_profile;
iveresov@8667 417 }
iveresov@8667 418 }
iveresov@10250 419 break;
iveresov@10250 420 case CompLevel_limited_profile:
iveresov@10250 421 if (is_method_profiled(method)) {
iveresov@10250 422 // Special case: we got here because this method was fully profiled in the interpreter.
iveresov@10250 423 next_level = CompLevel_full_optimization;
iveresov@10250 424 } else {
coleenp@13728 425 MethodData* mdo = method->method_data();
iveresov@10250 426 if (mdo != NULL) {
iveresov@10250 427 if (mdo->would_profile()) {
iveresov@10250 428 if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
iveresov@10250 429 Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
zmajo@28650 430 (this->*p)(i, b, cur_level, method))) {
iveresov@10250 431 next_level = CompLevel_full_profile;
iveresov@10250 432 }
iveresov@10250 433 } else {
iveresov@8667 434 next_level = CompLevel_full_optimization;
iveresov@8667 435 }
iveresov@8667 436 }
iveresov@8667 437 }
iveresov@10250 438 break;
iveresov@10250 439 case CompLevel_full_profile:
iveresov@10250 440 {
coleenp@13728 441 MethodData* mdo = method->method_data();
iveresov@10250 442 if (mdo != NULL) {
iveresov@10250 443 if (mdo->would_profile()) {
iveresov@10250 444 int mdo_i = mdo->invocation_count_delta();
iveresov@10250 445 int mdo_b = mdo->backedge_count_delta();
zmajo@28650 446 if ((this->*p)(mdo_i, mdo_b, cur_level, method)) {
iveresov@10250 447 next_level = CompLevel_full_optimization;
iveresov@10250 448 }
iveresov@10250 449 } else {
iveresov@10250 450 next_level = CompLevel_full_optimization;
iveresov@10250 451 }
iveresov@10250 452 }
iveresov@10250 453 }
iveresov@10250 454 break;
iveresov@8667 455 }
iveresov@8667 456 }
iveresov@10250 457 return MIN2(next_level, (CompLevel)TieredStopAtLevel);
iveresov@8667 458 }
iveresov@8667 459
iveresov@8667 460 // Determine if a method should be compiled with a normal entry point at a different level.
coleenp@13728 461 CompLevel AdvancedThresholdPolicy::call_event(Method* method, CompLevel cur_level) {
iveresov@10013 462 CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(),
iveresov@10250 463 common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level, true));
iveresov@8667 464 CompLevel next_level = common(&AdvancedThresholdPolicy::call_predicate, method, cur_level);
iveresov@8667 465
iveresov@8667 466 // If OSR method level is greater than the regular method level, the levels should be
iveresov@8667 467 // equalized by raising the regular method level in order to avoid OSRs during each
iveresov@8667 468 // invocation of the method.
iveresov@8667 469 if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) {
coleenp@13728 470 MethodData* mdo = method->method_data();
iveresov@8667 471 guarantee(mdo != NULL, "MDO should not be NULL");
iveresov@8667 472 if (mdo->invocation_count() >= 1) {
iveresov@8667 473 next_level = CompLevel_full_optimization;
iveresov@8667 474 }
iveresov@8667 475 } else {
iveresov@8667 476 next_level = MAX2(osr_level, next_level);
iveresov@8667 477 }
iveresov@8667 478 return next_level;
iveresov@8667 479 }
iveresov@8667 480
iveresov@8667 481 // Determine if we should do an OSR compilation of a given method.
coleenp@13728 482 CompLevel AdvancedThresholdPolicy::loop_event(Method* method, CompLevel cur_level) {
iveresov@10250 483 CompLevel next_level = common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level, true);
iveresov@8667 484 if (cur_level == CompLevel_none) {
iveresov@8667 485 // If there is a live OSR method that means that we deopted to the interpreter
iveresov@8667 486 // for the transition.
iveresov@10013 487 CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level);
iveresov@8667 488 if (osr_level > CompLevel_none) {
iveresov@8667 489 return osr_level;
iveresov@8667 490 }
iveresov@8667 491 }
iveresov@10013 492 return next_level;
iveresov@8667 493 }
iveresov@8667 494
iveresov@8667 495 // Update the rate and submit compile
coleenp@33593 496 void AdvancedThresholdPolicy::submit_compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread) {
iveresov@8667 497 int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
iveresov@8667 498 update_rate(os::javaTimeMillis(), mh());
iveresov@11572 499 CompileBroker::compile_method(mh, bci, level, mh, hot_count, "tiered", thread);
iveresov@8667 500 }
iveresov@8667 501
iveresov@8667 502 // Handle the invocation event.
coleenp@33593 503 void AdvancedThresholdPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh,
iveresov@11572 504 CompLevel level, nmethod* nm, JavaThread* thread) {
iveresov@8667 505 if (should_create_mdo(mh(), level)) {
iveresov@11572 506 create_mdo(mh, thread);
iveresov@8667 507 }
anoll@24321 508 if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
iveresov@8667 509 CompLevel next_level = call_event(mh(), level);
iveresov@8667 510 if (next_level != level) {
iveresov@11572 511 compile(mh, InvocationEntryBci, next_level, thread);
iveresov@8667 512 }
iveresov@8667 513 }
iveresov@8667 514 }
iveresov@8667 515
iveresov@8667 516 // Handle the back branch event. Notice that we can compile the method
iveresov@8667 517 // with a regular entry from here.
coleenp@33593 518 void AdvancedThresholdPolicy::method_back_branch_event(const methodHandle& mh, const methodHandle& imh,
iveresov@11572 519 int bci, CompLevel level, nmethod* nm, JavaThread* thread) {
iveresov@8667 520 if (should_create_mdo(mh(), level)) {
iveresov@11572 521 create_mdo(mh, thread);
iveresov@8667 522 }
iveresov@10014 523 // Check if MDO should be created for the inlined method
iveresov@10014 524 if (should_create_mdo(imh(), level)) {
iveresov@11572 525 create_mdo(imh, thread);
iveresov@10014 526 }
iveresov@8667 527
iveresov@10014 528 if (is_compilation_enabled()) {
iveresov@10014 529 CompLevel next_osr_level = loop_event(imh(), level);
iveresov@10014 530 CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level();
iveresov@10014 531 // At the very least compile the OSR version
anoll@24321 532 if (!CompileBroker::compilation_is_in_queue(imh) && (next_osr_level != level)) {
iveresov@11572 533 compile(imh, bci, next_osr_level, thread);
iveresov@8667 534 }
iveresov@8667 535
iveresov@10014 536 // Use loop event as an opportunity to also check if there's been
iveresov@10014 537 // enough calls.
iveresov@10014 538 CompLevel cur_level, next_level;
iveresov@10014 539 if (mh() != imh()) { // If there is an enclosing method
iveresov@10014 540 guarantee(nm != NULL, "Should have nmethod here");
iveresov@10014 541 cur_level = comp_level(mh());
iveresov@10014 542 next_level = call_event(mh(), cur_level);
iveresov@10014 543
iveresov@10014 544 if (max_osr_level == CompLevel_full_optimization) {
iveresov@10014 545 // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts
iveresov@10014 546 bool make_not_entrant = false;
iveresov@10014 547 if (nm->is_osr_method()) {
iveresov@10014 548 // This is an osr method, just make it not entrant and recompile later if needed
iveresov@10014 549 make_not_entrant = true;
iveresov@10014 550 } else {
iveresov@10014 551 if (next_level != CompLevel_full_optimization) {
iveresov@10014 552 // next_level is not full opt, so we need to recompile the
iveresov@10014 553 // enclosing method without the inlinee
iveresov@10014 554 cur_level = CompLevel_none;
iveresov@10014 555 make_not_entrant = true;
iveresov@10014 556 }
iveresov@10014 557 }
iveresov@10014 558 if (make_not_entrant) {
iveresov@10014 559 if (PrintTieredEvents) {
iveresov@10014 560 int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci;
iveresov@10014 561 print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level);
iveresov@10014 562 }
iveresov@10014 563 nm->make_not_entrant();
iveresov@10014 564 }
iveresov@10014 565 }
anoll@24321 566 if (!CompileBroker::compilation_is_in_queue(mh)) {
iveresov@10014 567 // Fix up next_level if necessary to avoid deopts
iveresov@10014 568 if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) {
iveresov@10014 569 next_level = CompLevel_full_profile;
iveresov@10014 570 }
iveresov@10014 571 if (cur_level != next_level) {
iveresov@11572 572 compile(mh, InvocationEntryBci, next_level, thread);
iveresov@10014 573 }
iveresov@10014 574 }
iveresov@10014 575 } else {
iveresov@10014 576 cur_level = comp_level(imh());
iveresov@10014 577 next_level = call_event(imh(), cur_level);
anoll@24321 578 if (!CompileBroker::compilation_is_in_queue(imh) && (next_level != cur_level)) {
iveresov@11572 579 compile(imh, InvocationEntryBci, next_level, thread);
iveresov@10014 580 }
iveresov@8667 581 }
iveresov@8667 582 }
iveresov@8667 583 }
iveresov@8667 584
iveresov@8667 585 #endif // TIERED