annotate src/share/vm/opto/compile.cpp @ 5936:17ec2d5c43e8

8032490: Remove -XX:+-UseOldInlining Summary: Move the option to obsolete options list, purge the redundant compiler code. Reviewed-by: kvn, jrose
author shade
date Fri, 24 Jan 2014 15:26:56 +0400
parents 5ec7dace41a6
children 45467c53f178
rev   line source
duke@0 1 /*
sla@4802 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
duke@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@0 4 *
duke@0 5 * This code is free software; you can redistribute it and/or modify it
duke@0 6 * under the terms of the GNU General Public License version 2 only, as
duke@0 7 * published by the Free Software Foundation.
duke@0 8 *
duke@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@0 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@0 13 * accompanied this code).
duke@0 14 *
duke@0 15 * You should have received a copy of the GNU General Public License version
duke@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@0 18 *
trims@1472 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1472 20 * or visit www.oracle.com if you need additional information or have any
trims@1472 21 * questions.
duke@0 22 *
duke@0 23 */
duke@0 24
stefank@1879 25 #include "precompiled.hpp"
twisti@3883 26 #include "asm/macroAssembler.hpp"
twisti@3883 27 #include "asm/macroAssembler.inline.hpp"
kvn@5784 28 #include "ci/ciReplay.hpp"
stefank@1879 29 #include "classfile/systemDictionary.hpp"
stefank@1879 30 #include "code/exceptionHandlerTable.hpp"
stefank@1879 31 #include "code/nmethod.hpp"
stefank@1879 32 #include "compiler/compileLog.hpp"
twisti@3883 33 #include "compiler/disassembler.hpp"
stefank@1879 34 #include "compiler/oopMap.hpp"
stefank@1879 35 #include "opto/addnode.hpp"
stefank@1879 36 #include "opto/block.hpp"
stefank@1879 37 #include "opto/c2compiler.hpp"
stefank@1879 38 #include "opto/callGenerator.hpp"
stefank@1879 39 #include "opto/callnode.hpp"
stefank@1879 40 #include "opto/cfgnode.hpp"
stefank@1879 41 #include "opto/chaitin.hpp"
stefank@1879 42 #include "opto/compile.hpp"
stefank@1879 43 #include "opto/connode.hpp"
stefank@1879 44 #include "opto/divnode.hpp"
stefank@1879 45 #include "opto/escape.hpp"
stefank@1879 46 #include "opto/idealGraphPrinter.hpp"
stefank@1879 47 #include "opto/loopnode.hpp"
stefank@1879 48 #include "opto/machnode.hpp"
stefank@1879 49 #include "opto/macro.hpp"
stefank@1879 50 #include "opto/matcher.hpp"
rbackman@5492 51 #include "opto/mathexactnode.hpp"
stefank@1879 52 #include "opto/memnode.hpp"
stefank@1879 53 #include "opto/mulnode.hpp"
stefank@1879 54 #include "opto/node.hpp"
stefank@1879 55 #include "opto/opcodes.hpp"
stefank@1879 56 #include "opto/output.hpp"
stefank@1879 57 #include "opto/parse.hpp"
stefank@1879 58 #include "opto/phaseX.hpp"
stefank@1879 59 #include "opto/rootnode.hpp"
stefank@1879 60 #include "opto/runtime.hpp"
stefank@1879 61 #include "opto/stringopts.hpp"
stefank@1879 62 #include "opto/type.hpp"
stefank@1879 63 #include "opto/vectornode.hpp"
stefank@1879 64 #include "runtime/arguments.hpp"
stefank@1879 65 #include "runtime/signature.hpp"
stefank@1879 66 #include "runtime/stubRoutines.hpp"
stefank@1879 67 #include "runtime/timer.hpp"
sla@4802 68 #include "trace/tracing.hpp"
stefank@1879 69 #include "utilities/copy.hpp"
stefank@1879 70 #ifdef TARGET_ARCH_MODEL_x86_32
stefank@1879 71 # include "adfiles/ad_x86_32.hpp"
stefank@1879 72 #endif
stefank@1879 73 #ifdef TARGET_ARCH_MODEL_x86_64
stefank@1879 74 # include "adfiles/ad_x86_64.hpp"
stefank@1879 75 #endif
stefank@1879 76 #ifdef TARGET_ARCH_MODEL_sparc
stefank@1879 77 # include "adfiles/ad_sparc.hpp"
stefank@1879 78 #endif
stefank@1879 79 #ifdef TARGET_ARCH_MODEL_zero
stefank@1879 80 # include "adfiles/ad_zero.hpp"
stefank@1879 81 #endif
bobv@2073 82 #ifdef TARGET_ARCH_MODEL_arm
bobv@2073 83 # include "adfiles/ad_arm.hpp"
bobv@2073 84 #endif
bobv@2073 85 #ifdef TARGET_ARCH_MODEL_ppc
bobv@2073 86 # include "adfiles/ad_ppc.hpp"
bobv@2073 87 #endif
duke@0 88
twisti@1915 89
twisti@1915 90 // -------------------- Compile::mach_constant_base_node -----------------------
twisti@1915 91 // Constant table base node singleton.
twisti@1915 92 MachConstantBaseNode* Compile::mach_constant_base_node() {
twisti@1915 93 if (_mach_constant_base_node == NULL) {
twisti@1915 94 _mach_constant_base_node = new (C) MachConstantBaseNode();
twisti@1915 95 _mach_constant_base_node->add_req(C->root());
twisti@1915 96 }
twisti@1915 97 return _mach_constant_base_node;
twisti@1915 98 }
twisti@1915 99
twisti@1915 100
duke@0 101 /// Support for intrinsics.
duke@0 102
duke@0 103 // Return the index at which m must be inserted (or already exists).
duke@0 104 // The sort order is by the address of the ciMethod, with is_virtual as minor key.
duke@0 105 int Compile::intrinsic_insertion_index(ciMethod* m, bool is_virtual) {
duke@0 106 #ifdef ASSERT
duke@0 107 for (int i = 1; i < _intrinsics->length(); i++) {
duke@0 108 CallGenerator* cg1 = _intrinsics->at(i-1);
duke@0 109 CallGenerator* cg2 = _intrinsics->at(i);
duke@0 110 assert(cg1->method() != cg2->method()
duke@0 111 ? cg1->method() < cg2->method()
duke@0 112 : cg1->is_virtual() < cg2->is_virtual(),
duke@0 113 "compiler intrinsics list must stay sorted");
duke@0 114 }
duke@0 115 #endif
duke@0 116 // Binary search sorted list, in decreasing intervals [lo, hi].
duke@0 117 int lo = 0, hi = _intrinsics->length()-1;
duke@0 118 while (lo <= hi) {
duke@0 119 int mid = (uint)(hi + lo) / 2;
duke@0 120 ciMethod* mid_m = _intrinsics->at(mid)->method();
duke@0 121 if (m < mid_m) {
duke@0 122 hi = mid-1;
duke@0 123 } else if (m > mid_m) {
duke@0 124 lo = mid+1;
duke@0 125 } else {
duke@0 126 // look at minor sort key
duke@0 127 bool mid_virt = _intrinsics->at(mid)->is_virtual();
duke@0 128 if (is_virtual < mid_virt) {
duke@0 129 hi = mid-1;
duke@0 130 } else if (is_virtual > mid_virt) {
duke@0 131 lo = mid+1;
duke@0 132 } else {
duke@0 133 return mid; // exact match
duke@0 134 }
duke@0 135 }
duke@0 136 }
duke@0 137 return lo; // inexact match
duke@0 138 }
duke@0 139
duke@0 140 void Compile::register_intrinsic(CallGenerator* cg) {
duke@0 141 if (_intrinsics == NULL) {
roland@3974 142 _intrinsics = new (comp_arena())GrowableArray<CallGenerator*>(comp_arena(), 60, 0, NULL);
duke@0 143 }
duke@0 144 // This code is stolen from ciObjectFactory::insert.
duke@0 145 // Really, GrowableArray should have methods for
duke@0 146 // insert_at, remove_at, and binary_search.
duke@0 147 int len = _intrinsics->length();
duke@0 148 int index = intrinsic_insertion_index(cg->method(), cg->is_virtual());
duke@0 149 if (index == len) {
duke@0 150 _intrinsics->append(cg);
duke@0 151 } else {
duke@0 152 #ifdef ASSERT
duke@0 153 CallGenerator* oldcg = _intrinsics->at(index);
duke@0 154 assert(oldcg->method() != cg->method() || oldcg->is_virtual() != cg->is_virtual(), "don't register twice");
duke@0 155 #endif
duke@0 156 _intrinsics->append(_intrinsics->at(len-1));
duke@0 157 int pos;
duke@0 158 for (pos = len-2; pos >= index; pos--) {
duke@0 159 _intrinsics->at_put(pos+1,_intrinsics->at(pos));
duke@0 160 }
duke@0 161 _intrinsics->at_put(index, cg);
duke@0 162 }
duke@0 163 assert(find_intrinsic(cg->method(), cg->is_virtual()) == cg, "registration worked");
duke@0 164 }
duke@0 165
duke@0 166 CallGenerator* Compile::find_intrinsic(ciMethod* m, bool is_virtual) {
duke@0 167 assert(m->is_loaded(), "don't try this on unloaded methods");
duke@0 168 if (_intrinsics != NULL) {
duke@0 169 int index = intrinsic_insertion_index(m, is_virtual);
duke@0 170 if (index < _intrinsics->length()
duke@0 171 && _intrinsics->at(index)->method() == m
duke@0 172 && _intrinsics->at(index)->is_virtual() == is_virtual) {
duke@0 173 return _intrinsics->at(index);
duke@0 174 }
duke@0 175 }
duke@0 176 // Lazily create intrinsics for intrinsic IDs well-known in the runtime.
jrose@856 177 if (m->intrinsic_id() != vmIntrinsics::_none &&
jrose@856 178 m->intrinsic_id() <= vmIntrinsics::LAST_COMPILER_INLINE) {
duke@0 179 CallGenerator* cg = make_vm_intrinsic(m, is_virtual);
duke@0 180 if (cg != NULL) {
duke@0 181 // Save it for next time:
duke@0 182 register_intrinsic(cg);
duke@0 183 return cg;
duke@0 184 } else {
duke@0 185 gather_intrinsic_statistics(m->intrinsic_id(), is_virtual, _intrinsic_disabled);
duke@0 186 }
duke@0 187 }
duke@0 188 return NULL;
duke@0 189 }
duke@0 190
duke@0 191 // Compile:: register_library_intrinsics and make_vm_intrinsic are defined
duke@0 192 // in library_call.cpp.
duke@0 193
duke@0 194
duke@0 195 #ifndef PRODUCT
duke@0 196 // statistics gathering...
duke@0 197
duke@0 198 juint Compile::_intrinsic_hist_count[vmIntrinsics::ID_LIMIT] = {0};
duke@0 199 jubyte Compile::_intrinsic_hist_flags[vmIntrinsics::ID_LIMIT] = {0};
duke@0 200
duke@0 201 bool Compile::gather_intrinsic_statistics(vmIntrinsics::ID id, bool is_virtual, int flags) {
duke@0 202 assert(id > vmIntrinsics::_none && id < vmIntrinsics::ID_LIMIT, "oob");
duke@0 203 int oflags = _intrinsic_hist_flags[id];
duke@0 204 assert(flags != 0, "what happened?");
duke@0 205 if (is_virtual) {
duke@0 206 flags |= _intrinsic_virtual;
duke@0 207 }
duke@0 208 bool changed = (flags != oflags);
duke@0 209 if ((flags & _intrinsic_worked) != 0) {
duke@0 210 juint count = (_intrinsic_hist_count[id] += 1);
duke@0 211 if (count == 1) {
duke@0 212 changed = true; // first time
duke@0 213 }
duke@0 214 // increment the overall count also:
duke@0 215 _intrinsic_hist_count[vmIntrinsics::_none] += 1;
duke@0 216 }
duke@0 217 if (changed) {
duke@0 218 if (((oflags ^ flags) & _intrinsic_virtual) != 0) {
duke@0 219 // Something changed about the intrinsic's virtuality.
duke@0 220 if ((flags & _intrinsic_virtual) != 0) {
duke@0 221 // This is the first use of this intrinsic as a virtual call.
duke@0 222 if (oflags != 0) {
duke@0 223 // We already saw it as a non-virtual, so note both cases.
duke@0 224 flags |= _intrinsic_both;
duke@0 225 }
duke@0 226 } else if ((oflags & _intrinsic_both) == 0) {
duke@0 227 // This is the first use of this intrinsic as a non-virtual
duke@0 228 flags |= _intrinsic_both;
duke@0 229 }
duke@0 230 }
duke@0 231 _intrinsic_hist_flags[id] = (jubyte) (oflags | flags);
duke@0 232 }
duke@0 233 // update the overall flags also:
duke@0 234 _intrinsic_hist_flags[vmIntrinsics::_none] |= (jubyte) flags;
duke@0 235 return changed;
duke@0 236 }
duke@0 237
duke@0 238 static char* format_flags(int flags, char* buf) {
duke@0 239 buf[0] = 0;
duke@0 240 if ((flags & Compile::_intrinsic_worked) != 0) strcat(buf, ",worked");
duke@0 241 if ((flags & Compile::_intrinsic_failed) != 0) strcat(buf, ",failed");
duke@0 242 if ((flags & Compile::_intrinsic_disabled) != 0) strcat(buf, ",disabled");
duke@0 243 if ((flags & Compile::_intrinsic_virtual) != 0) strcat(buf, ",virtual");
duke@0 244 if ((flags & Compile::_intrinsic_both) != 0) strcat(buf, ",nonvirtual");
duke@0 245 if (buf[0] == 0) strcat(buf, ",");
duke@0 246 assert(buf[0] == ',', "must be");
duke@0 247 return &buf[1];
duke@0 248 }
duke@0 249
duke@0 250 void Compile::print_intrinsic_statistics() {
duke@0 251 char flagsbuf[100];
duke@0 252 ttyLocker ttyl;
duke@0 253 if (xtty != NULL) xtty->head("statistics type='intrinsic'");
duke@0 254 tty->print_cr("Compiler intrinsic usage:");
duke@0 255 juint total = _intrinsic_hist_count[vmIntrinsics::_none];
duke@0 256 if (total == 0) total = 1; // avoid div0 in case of no successes
duke@0 257 #define PRINT_STAT_LINE(name, c, f) \
duke@0 258 tty->print_cr(" %4d (%4.1f%%) %s (%s)", (int)(c), ((c) * 100.0) / total, name, f);
duke@0 259 for (int index = 1 + (int)vmIntrinsics::_none; index < (int)vmIntrinsics::ID_LIMIT; index++) {
duke@0 260 vmIntrinsics::ID id = (vmIntrinsics::ID) index;
duke@0 261 int flags = _intrinsic_hist_flags[id];
duke@0 262 juint count = _intrinsic_hist_count[id];
duke@0 263 if ((flags | count) != 0) {
duke@0 264 PRINT_STAT_LINE(vmIntrinsics::name_at(id), count, format_flags(flags, flagsbuf));
duke@0 265 }
duke@0 266 }
duke@0 267 PRINT_STAT_LINE("total", total, format_flags(_intrinsic_hist_flags[vmIntrinsics::_none], flagsbuf));
duke@0 268 if (xtty != NULL) xtty->tail("statistics");
duke@0 269 }
duke@0 270
duke@0 271 void Compile::print_statistics() {
duke@0 272 { ttyLocker ttyl;
duke@0 273 if (xtty != NULL) xtty->head("statistics type='opto'");
duke@0 274 Parse::print_statistics();
duke@0 275 PhaseCCP::print_statistics();
duke@0 276 PhaseRegAlloc::print_statistics();
duke@0 277 Scheduling::print_statistics();
duke@0 278 PhasePeephole::print_statistics();
duke@0 279 PhaseIdealLoop::print_statistics();
duke@0 280 if (xtty != NULL) xtty->tail("statistics");
duke@0 281 }
duke@0 282 if (_intrinsic_hist_flags[vmIntrinsics::_none] != 0) {
duke@0 283 // put this under its own <statistics> element.
duke@0 284 print_intrinsic_statistics();
duke@0 285 }
duke@0 286 }
duke@0 287 #endif //PRODUCT
duke@0 288
duke@0 289 // Support for bundling info
duke@0 290 Bundle* Compile::node_bundling(const Node *n) {
duke@0 291 assert(valid_bundle_info(n), "oob");
duke@0 292 return &_node_bundling_base[n->_idx];
duke@0 293 }
duke@0 294
duke@0 295 bool Compile::valid_bundle_info(const Node *n) {
duke@0 296 return (_node_bundling_limit > n->_idx);
duke@0 297 }
duke@0 298
duke@0 299
never@1080 300 void Compile::gvn_replace_by(Node* n, Node* nn) {
never@1080 301 for (DUIterator_Last imin, i = n->last_outs(imin); i >= imin; ) {
never@1080 302 Node* use = n->last_out(i);
never@1080 303 bool is_in_table = initial_gvn()->hash_delete(use);
never@1080 304 uint uses_found = 0;
never@1080 305 for (uint j = 0; j < use->len(); j++) {
never@1080 306 if (use->in(j) == n) {
never@1080 307 if (j < use->req())
never@1080 308 use->set_req(j, nn);
never@1080 309 else
never@1080 310 use->set_prec(j, nn);
never@1080 311 uses_found++;
never@1080 312 }
never@1080 313 }
never@1080 314 if (is_in_table) {
never@1080 315 // reinsert into table
never@1080 316 initial_gvn()->hash_find_insert(use);
never@1080 317 }
never@1080 318 record_for_igvn(use);
never@1080 319 i -= uses_found; // we deleted 1 or more copies of this edge
never@1080 320 }
never@1080 321 }
never@1080 322
never@1080 323
bharadwaj@3880 324 static inline bool not_a_node(const Node* n) {
bharadwaj@3880 325 if (n == NULL) return true;
bharadwaj@3880 326 if (((intptr_t)n & 1) != 0) return true; // uninitialized, etc.
bharadwaj@3880 327 if (*(address*)n == badAddress) return true; // kill by Node::destruct
bharadwaj@3880 328 return false;
bharadwaj@3880 329 }
never@1080 330
duke@0 331 // Identify all nodes that are reachable from below, useful.
duke@0 332 // Use breadth-first pass that records state in a Unique_Node_List,
duke@0 333 // recursive traversal is slower.
duke@0 334 void Compile::identify_useful_nodes(Unique_Node_List &useful) {
duke@0 335 int estimated_worklist_size = unique();
duke@0 336 useful.map( estimated_worklist_size, NULL ); // preallocate space
duke@0 337
duke@0 338 // Initialize worklist
duke@0 339 if (root() != NULL) { useful.push(root()); }
duke@0 340 // If 'top' is cached, declare it useful to preserve cached node
duke@0 341 if( cached_top_node() ) { useful.push(cached_top_node()); }
duke@0 342
duke@0 343 // Push all useful nodes onto the list, breadthfirst
duke@0 344 for( uint next = 0; next < useful.size(); ++next ) {
duke@0 345 assert( next < unique(), "Unique useful nodes < total nodes");
duke@0 346 Node *n = useful.at(next);
duke@0 347 uint max = n->len();
duke@0 348 for( uint i = 0; i < max; ++i ) {
duke@0 349 Node *m = n->in(i);
bharadwaj@3880 350 if (not_a_node(m)) continue;
duke@0 351 useful.push(m);
duke@0 352 }
duke@0 353 }
duke@0 354 }
duke@0 355
bharadwaj@3880 356 // Update dead_node_list with any missing dead nodes using useful
bharadwaj@3880 357 // list. Consider all non-useful nodes to be useless i.e., dead nodes.
bharadwaj@3880 358 void Compile::update_dead_node_list(Unique_Node_List &useful) {
bharadwaj@3880 359 uint max_idx = unique();
bharadwaj@3880 360 VectorSet& useful_node_set = useful.member_set();
bharadwaj@3880 361
bharadwaj@3880 362 for (uint node_idx = 0; node_idx < max_idx; node_idx++) {
bharadwaj@3880 363 // If node with index node_idx is not in useful set,
bharadwaj@3880 364 // mark it as dead in dead node list.
bharadwaj@3880 365 if (! useful_node_set.test(node_idx) ) {
bharadwaj@3880 366 record_dead_node(node_idx);
bharadwaj@3880 367 }
bharadwaj@3880 368 }
bharadwaj@3880 369 }
bharadwaj@3880 370
roland@3974 371 void Compile::remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful) {
roland@3974 372 int shift = 0;
roland@3974 373 for (int i = 0; i < inlines->length(); i++) {
roland@3974 374 CallGenerator* cg = inlines->at(i);
roland@3974 375 CallNode* call = cg->call_node();
roland@3974 376 if (shift > 0) {
roland@3974 377 inlines->at_put(i-shift, cg);
roland@3974 378 }
roland@3974 379 if (!useful.member(call)) {
roland@3974 380 shift++;
roland@3974 381 }
roland@3974 382 }
roland@3974 383 inlines->trunc_to(inlines->length()-shift);
roland@3974 384 }
roland@3974 385
duke@0 386 // Disconnect all useless nodes by disconnecting those at the boundary.
duke@0 387 void Compile::remove_useless_nodes(Unique_Node_List &useful) {
duke@0 388 uint next = 0;
kvn@2825 389 while (next < useful.size()) {
duke@0 390 Node *n = useful.at(next++);
duke@0 391 // Use raw traversal of out edges since this code removes out edges
duke@0 392 int max = n->outcnt();
kvn@2825 393 for (int j = 0; j < max; ++j) {
duke@0 394 Node* child = n->raw_out(j);
kvn@2825 395 if (! useful.member(child)) {
kvn@2825 396 assert(!child->is_top() || child != top(),
kvn@2825 397 "If top is cached in Compile object it is in useful list");
duke@0 398 // Only need to remove this out-edge to the useless node
duke@0 399 n->raw_del_out(j);
duke@0 400 --j;
duke@0 401 --max;
duke@0 402 }
duke@0 403 }
duke@0 404 if (n->outcnt() == 1 && n->has_special_unique_user()) {
kvn@2825 405 record_for_igvn(n->unique_out());
kvn@2825 406 }
kvn@2825 407 }
kvn@2825 408 // Remove useless macro and predicate opaq nodes
kvn@2825 409 for (int i = C->macro_count()-1; i >= 0; i--) {
kvn@2825 410 Node* n = C->macro_node(i);
kvn@2825 411 if (!useful.member(n)) {
kvn@2825 412 remove_macro_node(n);
duke@0 413 }
duke@0 414 }
roland@4154 415 // Remove useless expensive node
roland@4154 416 for (int i = C->expensive_count()-1; i >= 0; i--) {
roland@4154 417 Node* n = C->expensive_node(i);
roland@4154 418 if (!useful.member(n)) {
roland@4154 419 remove_expensive_node(n);
roland@4154 420 }
roland@4154 421 }
roland@3974 422 // clean up the late inline lists
roland@3974 423 remove_useless_late_inlines(&_string_late_inlines, useful);
kvn@4675 424 remove_useless_late_inlines(&_boxing_late_inlines, useful);
roland@3974 425 remove_useless_late_inlines(&_late_inlines, useful);
duke@0 426 debug_only(verify_graph_edges(true/*check for no_dead_code*/);)
duke@0 427 }
duke@0 428
duke@0 429 //------------------------------frame_size_in_words-----------------------------
duke@0 430 // frame_slots in units of words
duke@0 431 int Compile::frame_size_in_words() const {
duke@0 432 // shift is 0 in LP32 and 1 in LP64
duke@0 433 const int shift = (LogBytesPerWord - LogBytesPerInt);
duke@0 434 int words = _frame_slots >> shift;
duke@0 435 assert( words << shift == _frame_slots, "frame size must be properly aligned in LP64" );
duke@0 436 return words;
duke@0 437 }
duke@0 438
duke@0 439 // ============================================================================
duke@0 440 //------------------------------CompileWrapper---------------------------------
duke@0 441 class CompileWrapper : public StackObj {
duke@0 442 Compile *const _compile;
duke@0 443 public:
duke@0 444 CompileWrapper(Compile* compile);
duke@0 445
duke@0 446 ~CompileWrapper();
duke@0 447 };
duke@0 448
duke@0 449 CompileWrapper::CompileWrapper(Compile* compile) : _compile(compile) {
duke@0 450 // the Compile* pointer is stored in the current ciEnv:
duke@0 451 ciEnv* env = compile->env();
duke@0 452 assert(env == ciEnv::current(), "must already be a ciEnv active");
duke@0 453 assert(env->compiler_data() == NULL, "compile already active?");
duke@0 454 env->set_compiler_data(compile);
duke@0 455 assert(compile == Compile::current(), "sanity");
duke@0 456
duke@0 457 compile->set_type_dict(NULL);
duke@0 458 compile->set_type_hwm(NULL);
duke@0 459 compile->set_type_last_size(0);
duke@0 460 compile->set_last_tf(NULL, NULL);
duke@0 461 compile->set_indexSet_arena(NULL);
duke@0 462 compile->set_indexSet_free_block_list(NULL);
duke@0 463 compile->init_type_arena();
duke@0 464 Type::Initialize(compile);
duke@0 465 _compile->set_scratch_buffer_blob(NULL);
duke@0 466 _compile->begin_method();
duke@0 467 }
duke@0 468 CompileWrapper::~CompileWrapper() {
duke@0 469 _compile->end_method();
duke@0 470 if (_compile->scratch_buffer_blob() != NULL)
duke@0 471 BufferBlob::free(_compile->scratch_buffer_blob());
duke@0 472 _compile->env()->set_compiler_data(NULL);
duke@0 473 }
duke@0 474
duke@0 475
duke@0 476 //----------------------------print_compile_messages---------------------------
duke@0 477 void Compile::print_compile_messages() {
duke@0 478 #ifndef PRODUCT
duke@0 479 // Check if recompiling
duke@0 480 if (_subsume_loads == false && PrintOpto) {
duke@0 481 // Recompiling without allowing machine instructions to subsume loads
duke@0 482 tty->print_cr("*********************************************************");
duke@0 483 tty->print_cr("** Bailout: Recompile without subsuming loads **");
duke@0 484 tty->print_cr("*********************************************************");
duke@0 485 }
kvn@38 486 if (_do_escape_analysis != DoEscapeAnalysis && PrintOpto) {
kvn@38 487 // Recompiling without escape analysis
kvn@38 488 tty->print_cr("*********************************************************");
kvn@38 489 tty->print_cr("** Bailout: Recompile without escape analysis **");
kvn@38 490 tty->print_cr("*********************************************************");
kvn@38 491 }
kvn@4675 492 if (_eliminate_boxing != EliminateAutoBox && PrintOpto) {
kvn@4675 493 // Recompiling without boxing elimination
kvn@4675 494 tty->print_cr("*********************************************************");
kvn@4675 495 tty->print_cr("** Bailout: Recompile without boxing elimination **");
kvn@4675 496 tty->print_cr("*********************************************************");
kvn@4675 497 }
duke@0 498 if (env()->break_at_compile()) {
twisti@605 499 // Open the debugger when compiling this method.
duke@0 500 tty->print("### Breaking when compiling: ");
duke@0 501 method()->print_short_name();
duke@0 502 tty->cr();
duke@0 503 BREAKPOINT;
duke@0 504 }
duke@0 505
duke@0 506 if( PrintOpto ) {
duke@0 507 if (is_osr_compilation()) {
duke@0 508 tty->print("[OSR]%3d", _compile_id);
duke@0 509 } else {
duke@0 510 tty->print("%3d", _compile_id);
duke@0 511 }
duke@0 512 }
duke@0 513 #endif
duke@0 514 }
duke@0 515
duke@0 516
kvn@1979 517 //-----------------------init_scratch_buffer_blob------------------------------
kvn@1979 518 // Construct a temporary BufferBlob and cache it for this compile.
twisti@1915 519 void Compile::init_scratch_buffer_blob(int const_size) {
kvn@1979 520 // If there is already a scratch buffer blob allocated and the
kvn@1979 521 // constant section is big enough, use it. Otherwise free the
kvn@1979 522 // current and allocate a new one.
kvn@1979 523 BufferBlob* blob = scratch_buffer_blob();
kvn@1979 524 if ((blob != NULL) && (const_size <= _scratch_const_size)) {
kvn@1979 525 // Use the current blob.
kvn@1979 526 } else {
kvn@1979 527 if (blob != NULL) {
kvn@1979 528 BufferBlob::free(blob);
kvn@1979 529 }
duke@0 530
kvn@1979 531 ResourceMark rm;
kvn@1979 532 _scratch_const_size = const_size;
kvn@1979 533 int size = (MAX_inst_size + MAX_stubs_size + _scratch_const_size);
kvn@1979 534 blob = BufferBlob::create("Compile::scratch_buffer", size);
kvn@1979 535 // Record the buffer blob for next time.
kvn@1979 536 set_scratch_buffer_blob(blob);
kvn@1979 537 // Have we run out of code space?
kvn@1979 538 if (scratch_buffer_blob() == NULL) {
kvn@1979 539 // Let CompilerBroker disable further compilations.
kvn@1979 540 record_failure("Not enough space for scratch buffer in CodeCache");
kvn@1979 541 return;
kvn@1979 542 }
kvn@163 543 }
duke@0 544
duke@0 545 // Initialize the relocation buffers
twisti@1668 546 relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
duke@0 547 set_scratch_locs_memory(locs_buf);
duke@0 548 }
duke@0 549
duke@0 550
duke@0 551 //-----------------------scratch_emit_size-------------------------------------
duke@0 552 // Helper function that computes size by emitting code
duke@0 553 uint Compile::scratch_emit_size(const Node* n) {
twisti@1915 554 // Start scratch_emit_size section.
twisti@1915 555 set_in_scratch_emit_size(true);
twisti@1915 556
duke@0 557 // Emit into a trash buffer and count bytes emitted.
duke@0 558 // This is a pretty expensive way to compute a size,
duke@0 559 // but it works well enough if seldom used.
duke@0 560 // All common fixed-size instructions are given a size
duke@0 561 // method by the AD file.
duke@0 562 // Note that the scratch buffer blob and locs memory are
duke@0 563 // allocated at the beginning of the compile task, and
duke@0 564 // may be shared by several calls to scratch_emit_size.
duke@0 565 // The allocation of the scratch buffer blob is particularly
duke@0 566 // expensive, since it has to grab the code cache lock.
duke@0 567 BufferBlob* blob = this->scratch_buffer_blob();
duke@0 568 assert(blob != NULL, "Initialize BufferBlob at start");
duke@0 569 assert(blob->size() > MAX_inst_size, "sanity");
duke@0 570 relocInfo* locs_buf = scratch_locs_memory();
twisti@1668 571 address blob_begin = blob->content_begin();
duke@0 572 address blob_end = (address)locs_buf;
twisti@1668 573 assert(blob->content_contains(blob_end), "sanity");
duke@0 574 CodeBuffer buf(blob_begin, blob_end - blob_begin);
twisti@1915 575 buf.initialize_consts_size(_scratch_const_size);
duke@0 576 buf.initialize_stubs_size(MAX_stubs_size);
duke@0 577 assert(locs_buf != NULL, "sanity");
twisti@1915 578 int lsize = MAX_locs_size / 3;
twisti@1915 579 buf.consts()->initialize_shared_locs(&locs_buf[lsize * 0], lsize);
twisti@1915 580 buf.insts()->initialize_shared_locs( &locs_buf[lsize * 1], lsize);
twisti@1915 581 buf.stubs()->initialize_shared_locs( &locs_buf[lsize * 2], lsize);
twisti@1915 582
twisti@1915 583 // Do the emission.
kvn@2602 584
kvn@2602 585 Label fakeL; // Fake label for branch instructions.
kvn@2616 586 Label* saveL = NULL;
kvn@2616 587 uint save_bnum = 0;
kvn@2616 588 bool is_branch = n->is_MachBranch();
kvn@2602 589 if (is_branch) {
kvn@2602 590 MacroAssembler masm(&buf);
kvn@2602 591 masm.bind(fakeL);
kvn@2616 592 n->as_MachBranch()->save_label(&saveL, &save_bnum);
kvn@2616 593 n->as_MachBranch()->label_set(&fakeL, 0);
kvn@2602 594 }
duke@0 595 n->emit(buf, this->regalloc());
kvn@2616 596 if (is_branch) // Restore label.
kvn@2616 597 n->as_MachBranch()->label_set(saveL, save_bnum);
twisti@1915 598
twisti@1915 599 // End scratch_emit_size section.
twisti@1915 600 set_in_scratch_emit_size(false);
twisti@1915 601
twisti@1668 602 return buf.insts_size();
duke@0 603 }
duke@0 604
duke@0 605
duke@0 606 // ============================================================================
duke@0 607 //------------------------------Compile standard-------------------------------
duke@0 608 debug_only( int Compile::_debug_idx = 100000; )
duke@0 609
duke@0 610 // Compile a method. entry_bci is -1 for normal compilations and indicates
duke@0 611 // the continuation bci for on stack replacement.
duke@0 612
duke@0 613
kvn@4675 614 Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr_bci,
kvn@4675 615 bool subsume_loads, bool do_escape_analysis, bool eliminate_boxing )
duke@0 616 : Phase(Compiler),
duke@0 617 _env(ci_env),
duke@0 618 _log(ci_env->log()),
duke@0 619 _compile_id(ci_env->compile_id()),
duke@0 620 _save_argument_registers(false),
duke@0 621 _stub_name(NULL),
duke@0 622 _stub_function(NULL),
duke@0 623 _stub_entry_point(NULL),
duke@0 624 _method(target),
duke@0 625 _entry_bci(osr_bci),
duke@0 626 _initial_gvn(NULL),
duke@0 627 _for_igvn(NULL),
duke@0 628 _warm_calls(NULL),
duke@0 629 _subsume_loads(subsume_loads),
kvn@38 630 _do_escape_analysis(do_escape_analysis),
kvn@4675 631 _eliminate_boxing(eliminate_boxing),
duke@0 632 _failure_reason(NULL),
duke@0 633 _code_buffer("Compile::Fill_buffer"),
duke@0 634 _orig_pc_slot(0),
duke@0 635 _orig_pc_slot_offset_in_bytes(0),
twisti@1265 636 _has_method_handle_invokes(false),
twisti@1915 637 _mach_constant_base_node(NULL),
duke@0 638 _node_bundling_limit(0),
duke@0 639 _node_bundling_base(NULL),
kvn@859 640 _java_calls(0),
kvn@859 641 _inner_loops(0),
twisti@1915 642 _scratch_const_size(-1),
twisti@1915 643 _in_scratch_emit_size(false),
bharadwaj@3880 644 _dead_node_list(comp_arena()),
bharadwaj@3880 645 _dead_node_count(0),
duke@0 646 #ifndef PRODUCT
duke@0 647 _trace_opto_output(TraceOptoOutput || method()->has_option("TraceOptoOutput")),
duke@0 648 _printer(IdealGraphPrinter::printer()),
duke@0 649 #endif
roland@3922 650 _congraph(NULL),
kvn@5784 651 _replay_inline_data(NULL),
roland@3974 652 _late_inlines(comp_arena(), 2, 0, NULL),
roland@3974 653 _string_late_inlines(comp_arena(), 2, 0, NULL),
kvn@4675 654 _boxing_late_inlines(comp_arena(), 2, 0, NULL),
roland@3974 655 _late_inlines_pos(0),
roland@3974 656 _number_of_mh_late_inlines(0),
roland@3974 657 _inlining_progress(false),
roland@3974 658 _inlining_incrementally(false),
roland@3922 659 _print_inlining_list(NULL),
roland@5546 660 _print_inlining_idx(0),
roland@5546 661 _preserve_jvm_state(0) {
duke@0 662 C = this;
duke@0 663
duke@0 664 CompileWrapper cw(this);
duke@0 665 #ifndef PRODUCT
duke@0 666 if (TimeCompiler2) {
duke@0 667 tty->print(" ");
duke@0 668 target->holder()->name()->print();
duke@0 669 tty->print(".");
duke@0 670 target->print_short_name();
duke@0 671 tty->print(" ");
duke@0 672 }
duke@0 673 TraceTime t1("Total compilation time", &_t_totalCompilation, TimeCompiler, TimeCompiler2);
duke@0 674 TraceTime t2(NULL, &_t_methodCompilation, TimeCompiler, false);
jrose@100 675 bool print_opto_assembly = PrintOptoAssembly || _method->has_option("PrintOptoAssembly");
jrose@100 676 if (!print_opto_assembly) {
jrose@100 677 bool print_assembly = (PrintAssembly || _method->should_print_assembly());
jrose@100 678 if (print_assembly && !Disassembler::can_decode()) {
jrose@100 679 tty->print_cr("PrintAssembly request changed to PrintOptoAssembly");
jrose@100 680 print_opto_assembly = true;
jrose@100 681 }
jrose@100 682 }
jrose@100 683 set_print_assembly(print_opto_assembly);
never@367 684 set_parsed_irreducible_loop(false);
kvn@5784 685
kvn@5784 686 if (method()->has_option("ReplayInline")) {
kvn@5784 687 _replay_inline_data = ciReplay::load_inline_data(method(), entry_bci(), ci_env->comp_level());
kvn@5784 688 }
duke@0 689 #endif
kvn@5328 690 set_print_inlining(PrintInlining || method()->has_option("PrintInlining") NOT_PRODUCT( || PrintOptoInlining));
kvn@5328 691 set_print_intrinsics(PrintIntrinsics || method()->has_option("PrintIntrinsics"));
duke@0 692
duke@0 693 if (ProfileTraps) {
duke@0 694 // Make sure the method being compiled gets its own MDO,
duke@0 695 // so we can at least track the decompile_count().
iveresov@1914 696 method()->ensure_method_data();
duke@0 697 }
duke@0 698
duke@0 699 Init(::AliasLevel);
duke@0 700
duke@0 701
duke@0 702 print_compile_messages();
duke@0 703
shade@5936 704 _ilt = InlineTree::build_inline_tree_root();
duke@0 705
duke@0 706 // Even if NO memory addresses are used, MergeMem nodes must have at least 1 slice
duke@0 707 assert(num_alias_types() >= AliasIdxRaw, "");
duke@0 708
duke@0 709 #define MINIMUM_NODE_HASH 1023
duke@0 710 // Node list that Iterative GVN will start with
duke@0 711 Unique_Node_List for_igvn(comp_arena());
duke@0 712 set_for_igvn(&for_igvn);
duke@0 713
duke@0 714 // GVN that will be run immediately on new nodes
duke@0 715 uint estimated_size = method()->code_size()*4+64;
duke@0 716 estimated_size = (estimated_size < MINIMUM_NODE_HASH ? MINIMUM_NODE_HASH : estimated_size);
duke@0 717 PhaseGVN gvn(node_arena(), estimated_size);
duke@0 718 set_initial_gvn(&gvn);
duke@0 719
kvn@5328 720 if (print_inlining() || print_intrinsics()) {
roland@3922 721 _print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer());
roland@3922 722 }
duke@0 723 { // Scope for timing the parser
duke@0 724 TracePhase t3("parse", &_t_parser, true);
duke@0 725
duke@0 726 // Put top into the hash table ASAP.
duke@0 727 initial_gvn()->transform_no_reclaim(top());
duke@0 728
duke@0 729 // Set up tf(), start(), and find a CallGenerator.
johnc@2346 730 CallGenerator* cg = NULL;
duke@0 731 if (is_osr_compilation()) {
duke@0 732 const TypeTuple *domain = StartOSRNode::osr_domain();
duke@0 733 const TypeTuple *range = TypeTuple::make_range(method()->signature());
duke@0 734 init_tf(TypeFunc::make(domain, range));
kvn@3680 735 StartNode* s = new (this) StartOSRNode(root(), domain);
duke@0 736 initial_gvn()->set_type_bottom(s);
duke@0 737 init_start(s);
duke@0 738 cg = CallGenerator::for_osr(method(), entry_bci());
duke@0 739 } else {
duke@0 740 // Normal case.
duke@0 741 init_tf(TypeFunc::make(method()));
kvn@3680 742 StartNode* s = new (this) StartNode(root(), tf()->domain());
duke@0 743 initial_gvn()->set_type_bottom(s);
duke@0 744 init_start(s);
johnc@2346 745 if (method()->intrinsic_id() == vmIntrinsics::_Reference_get && UseG1GC) {
johnc@2346 746 // With java.lang.ref.reference.get() we must go through the
johnc@2346 747 // intrinsic when G1 is enabled - even when get() is the root
johnc@2346 748 // method of the compile - so that, if necessary, the value in
johnc@2346 749 // the referent field of the reference object gets recorded by
johnc@2346 750 // the pre-barrier code.
johnc@2346 751 // Specifically, if G1 is enabled, the value in the referent
johnc@2346 752 // field is recorded by the G1 SATB pre barrier. This will
johnc@2346 753 // result in the referent being marked live and the reference
johnc@2346 754 // object removed from the list of discovered references during
johnc@2346 755 // reference processing.
johnc@2346 756 cg = find_intrinsic(method(), false);
johnc@2346 757 }
johnc@2346 758 if (cg == NULL) {
johnc@2346 759 float past_uses = method()->interpreter_invocation_count();
johnc@2346 760 float expected_uses = past_uses;
johnc@2346 761 cg = CallGenerator::for_inline(method(), expected_uses);
johnc@2346 762 }
duke@0 763 }
duke@0 764 if (failing()) return;
duke@0 765 if (cg == NULL) {
duke@0 766 record_method_not_compilable_all_tiers("cannot parse method");
duke@0 767 return;
duke@0 768 }
duke@0 769 JVMState* jvms = build_start_state(start(), tf());
roland@5546 770 if ((jvms = cg->generate(jvms, NULL)) == NULL) {
duke@0 771 record_method_not_compilable("method parse failed");
duke@0 772 return;
duke@0 773 }
duke@0 774 GraphKit kit(jvms);
duke@0 775
duke@0 776 if (!kit.stopped()) {
duke@0 777 // Accept return values, and transfer control we know not where.
duke@0 778 // This is done by a special, unique ReturnNode bound to root.
duke@0 779 return_values(kit.jvms());
duke@0 780 }
duke@0 781
duke@0 782 if (kit.has_exceptions()) {
duke@0 783 // Any exceptions that escape from this call must be rethrown
duke@0 784 // to whatever caller is dynamically above us on the stack.
duke@0 785 // This is done by a special, unique RethrowNode bound to root.
duke@0 786 rethrow_exceptions(kit.transfer_exceptions_into_jvms());
duke@0 787 }
duke@0 788
roland@3974 789 assert(IncrementalInline || (_late_inlines.length() == 0 && !has_mh_late_inlines()), "incremental inlining is off");
roland@3974 790
roland@3974 791 if (_late_inlines.length() == 0 && !has_mh_late_inlines() && !failing() && has_stringbuilder()) {
roland@3974 792 inline_string_calls(true);
never@1080 793 }
roland@3974 794
roland@3974 795 if (failing()) return;
never@1080 796
sla@4802 797 print_method(PHASE_BEFORE_REMOVEUSELESS, 3);
never@367 798
duke@0 799 // Remove clutter produced by parsing.
duke@0 800 if (!failing()) {
duke@0 801 ResourceMark rm;
duke@0 802 PhaseRemoveUseless pru(initial_gvn(), &for_igvn);
duke@0 803 }
duke@0 804 }
duke@0 805
duke@0 806 // Note: Large methods are capped off in do_one_bytecode().
duke@0 807 if (failing()) return;
duke@0 808
duke@0 809 // After parsing, node notes are no longer automagic.
duke@0 810 // They must be propagated by register_new_node_with_optimizer(),
duke@0 811 // clone(), or the like.
duke@0 812 set_default_node_notes(NULL);
duke@0 813
duke@0 814 for (;;) {
duke@0 815 int successes = Inline_Warm();
duke@0 816 if (failing()) return;
duke@0 817 if (successes == 0) break;
duke@0 818 }
duke@0 819
duke@0 820 // Drain the list.
duke@0 821 Finish_Warm();
duke@0 822 #ifndef PRODUCT
duke@0 823 if (_printer) {
duke@0 824 _printer->print_inlining(this);
duke@0 825 }
duke@0 826 #endif
duke@0 827
duke@0 828 if (failing()) return;
duke@0 829 NOT_PRODUCT( verify_graph_edges(); )
duke@0 830
duke@0 831 // Now optimize
duke@0 832 Optimize();
duke@0 833 if (failing()) return;
duke@0 834 NOT_PRODUCT( verify_graph_edges(); )
duke@0 835
duke@0 836 #ifndef PRODUCT
duke@0 837 if (PrintIdeal) {
duke@0 838 ttyLocker ttyl; // keep the following output all in one block
duke@0 839 // This output goes directly to the tty, not the compiler log.
duke@0 840 // To enable tools to match it up with the compilation activity,
duke@0 841 // be sure to tag this tty output with the compile ID.
duke@0 842 if (xtty != NULL) {
duke@0 843 xtty->head("ideal compile_id='%d'%s", compile_id(),
duke@0 844 is_osr_compilation() ? " compile_kind='osr'" :
duke@0 845 "");
duke@0 846 }
duke@0 847 root()->dump(9999);
duke@0 848 if (xtty != NULL) {
duke@0 849 xtty->tail("ideal");
duke@0 850 }
duke@0 851 }
duke@0 852 #endif
duke@0 853
iveresov@5635 854 NOT_PRODUCT( verify_barriers(); )
kvn@5784 855
kvn@5784 856 // Dump compilation data to replay it.
kvn@5784 857 if (method()->has_option("DumpReplay")) {
kvn@5784 858 env()->dump_replay_data(_compile_id);
kvn@5784 859 }
kvn@5784 860 if (method()->has_option("DumpInline") && (ilt() != NULL)) {
kvn@5784 861 env()->dump_inline_data(_compile_id);
kvn@5784 862 }
kvn@5784 863
duke@0 864 // Now that we know the size of all the monitors we can add a fixed slot
duke@0 865 // for the original deopt pc.
duke@0 866
duke@0 867 _orig_pc_slot = fixed_slots();
duke@0 868 int next_slot = _orig_pc_slot + (sizeof(address) / VMRegImpl::stack_slot_size);
duke@0 869 set_fixed_slots(next_slot);
duke@0 870
duke@0 871 // Now generate code
duke@0 872 Code_Gen();
duke@0 873 if (failing()) return;
duke@0 874
duke@0 875 // Check if we want to skip execution of all compiled code.
duke@0 876 {
duke@0 877 #ifndef PRODUCT
duke@0 878 if (OptoNoExecute) {
duke@0 879 record_method_not_compilable("+OptoNoExecute"); // Flag as failed
duke@0 880 return;
duke@0 881 }
duke@0 882 TracePhase t2("install_code", &_t_registerMethod, TimeCompiler);
duke@0 883 #endif
duke@0 884
duke@0 885 if (is_osr_compilation()) {
duke@0 886 _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
duke@0 887 _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
duke@0 888 } else {
duke@0 889 _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
duke@0 890 _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
duke@0 891 }
duke@0 892
duke@0 893 env()->register_method(_method, _entry_bci,
duke@0 894 &_code_offsets,
duke@0 895 _orig_pc_slot_offset_in_bytes,
duke@0 896 code_buffer(),
duke@0 897 frame_size_in_words(), _oop_map_set,
duke@0 898 &_handler_table, &_inc_table,
duke@0 899 compiler,
duke@0 900 env()->comp_level(),
kvn@3668 901 has_unsafe_access(),
kvn@3668 902 SharedRuntime::is_wide_vector(max_vector_size())
duke@0 903 );
vlivanov@3719 904
vlivanov@3719 905 if (log() != NULL) // Print code cache state into compiler log
vlivanov@3719 906 log()->code_cache_state();
duke@0 907 }
duke@0 908 }
duke@0 909
duke@0 910 //------------------------------Compile----------------------------------------
duke@0 911 // Compile a runtime stub
duke@0 912 Compile::Compile( ciEnv* ci_env,
duke@0 913 TypeFunc_generator generator,
duke@0 914 address stub_function,
duke@0 915 const char *stub_name,
duke@0 916 int is_fancy_jump,
duke@0 917 bool pass_tls,
duke@0 918 bool save_arg_registers,
duke@0 919 bool return_pc )
duke@0 920 : Phase(Compiler),
duke@0 921 _env(ci_env),
duke@0 922 _log(ci_env->log()),
neliasso@4295 923 _compile_id(0),
duke@0 924 _save_argument_registers(save_arg_registers),
duke@0 925 _method(NULL),
duke@0 926 _stub_name(stub_name),
duke@0 927 _stub_function(stub_function),
duke@0 928 _stub_entry_point(NULL),
duke@0 929 _entry_bci(InvocationEntryBci),
duke@0 930 _initial_gvn(NULL),
duke@0 931 _for_igvn(NULL),
duke@0 932 _warm_calls(NULL),
duke@0 933 _orig_pc_slot(0),
duke@0 934 _orig_pc_slot_offset_in_bytes(0),
duke@0 935 _subsume_loads(true),
kvn@38 936 _do_escape_analysis(false),
kvn@4675 937 _eliminate_boxing(false),
duke@0 938 _failure_reason(NULL),
duke@0 939 _code_buffer("Compile::Fill_buffer"),
twisti@1265 940 _has_method_handle_invokes(false),
twisti@1915 941 _mach_constant_base_node(NULL),
duke@0 942 _node_bundling_limit(0),
duke@0 943 _node_bundling_base(NULL),
kvn@859 944 _java_calls(0),
kvn@859 945 _inner_loops(0),
duke@0 946 #ifndef PRODUCT
duke@0 947 _trace_opto_output(TraceOptoOutput),
duke@0 948 _printer(NULL),
duke@0 949 #endif
bharadwaj@3880 950 _dead_node_list(comp_arena()),
bharadwaj@3880 951 _dead_node_count(0),
roland@3922 952 _congraph(NULL),
kvn@5784 953 _replay_inline_data(NULL),
roland@3974 954 _number_of_mh_late_inlines(0),
roland@3974 955 _inlining_progress(false),
roland@3974 956 _inlining_incrementally(false),
roland@3922 957 _print_inlining_list(NULL),
roland@5546 958 _print_inlining_idx(0),
roland@5546 959 _preserve_jvm_state(0) {
duke@0 960 C = this;
duke@0 961
duke@0 962 #ifndef PRODUCT
duke@0 963 TraceTime t1(NULL, &_t_totalCompilation, TimeCompiler, false);
duke@0 964 TraceTime t2(NULL, &_t_stubCompilation, TimeCompiler, false);
duke@0 965 set_print_assembly(PrintFrameConverterAssembly);
never@367 966 set_parsed_irreducible_loop(false);
duke@0 967 #endif
duke@0 968 CompileWrapper cw(this);
duke@0 969 Init(/*AliasLevel=*/ 0);
duke@0 970 init_tf((*generator)());
duke@0 971
duke@0 972 {
duke@0 973 // The following is a dummy for the sake of GraphKit::gen_stub
duke@0 974 Unique_Node_List for_igvn(comp_arena());
duke@0 975 set_for_igvn(&for_igvn); // not used, but some GraphKit guys push on this
duke@0 976 PhaseGVN gvn(Thread::current()->resource_area(),255);
duke@0 977 set_initial_gvn(&gvn); // not significant, but GraphKit guys use it pervasively
duke@0 978 gvn.transform_no_reclaim(top());
duke@0 979
duke@0 980 GraphKit kit;
duke@0 981 kit.gen_stub(stub_function, stub_name, is_fancy_jump, pass_tls, return_pc);
duke@0 982 }
duke@0 983
duke@0 984 NOT_PRODUCT( verify_graph_edges(); )
duke@0 985 Code_Gen();
duke@0 986 if (failing()) return;
duke@0 987
duke@0 988
duke@0 989 // Entry point will be accessed using compile->stub_entry_point();
duke@0 990 if (code_buffer() == NULL) {
duke@0 991 Matcher::soft_match_failure();
duke@0 992 } else {
duke@0 993 if (PrintAssembly && (WizardMode || Verbose))
duke@0 994 tty->print_cr("### Stub::%s", stub_name);
duke@0 995
duke@0 996 if (!failing()) {
duke@0 997 assert(_fixed_slots == 0, "no fixed slots used for runtime stubs");
duke@0 998
duke@0 999 // Make the NMethod
duke@0 1000 // For now we mark the frame as never safe for profile stackwalking
duke@0 1001 RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
duke@0 1002 code_buffer(),
duke@0 1003 CodeOffsets::frame_never_safe,
duke@0 1004 // _code_offsets.value(CodeOffsets::Frame_Complete),
duke@0 1005 frame_size_in_words(),
duke@0 1006 _oop_map_set,
duke@0 1007 save_arg_registers);
duke@0 1008 assert(rs != NULL && rs->is_runtime_stub(), "sanity check");
duke@0 1009
duke@0 1010 _stub_entry_point = rs->entry_point();
duke@0 1011 }
duke@0 1012 }
duke@0 1013 }
duke@0 1014
duke@0 1015 //------------------------------Init-------------------------------------------
duke@0 1016 // Prepare for a single compilation
duke@0 1017 void Compile::Init(int aliaslevel) {
duke@0 1018 _unique = 0;
duke@0 1019 _regalloc = NULL;
duke@0 1020
duke@0 1021 _tf = NULL; // filled in later
duke@0 1022 _top = NULL; // cached later
duke@0 1023 _matcher = NULL; // filled in later
duke@0 1024 _cfg = NULL; // filled in later
duke@0 1025
duke@0 1026 set_24_bit_selection_and_mode(Use24BitFP, false);
duke@0 1027
duke@0 1028 _node_note_array = NULL;
duke@0 1029 _default_node_notes = NULL;
duke@0 1030
duke@0 1031 _immutable_memory = NULL; // filled in at first inquiry
duke@0 1032
duke@0 1033 // Globally visible Nodes
duke@0 1034 // First set TOP to NULL to give safe behavior during creation of RootNode
duke@0 1035 set_cached_top_node(NULL);
kvn@3680 1036 set_root(new (this) RootNode());
duke@0 1037 // Now that you have a Root to point to, create the real TOP
kvn@3680 1038 set_cached_top_node( new (this) ConNode(Type::TOP) );
duke@0 1039 set_recent_alloc(NULL, NULL);
duke@0 1040
duke@0 1041 // Create Debug Information Recorder to record scopes, oopmaps, etc.
coleenp@3602 1042 env()->set_oop_recorder(new OopRecorder(env()->arena()));
duke@0 1043 env()->set_debug_info(new DebugInformationRecorder(env()->oop_recorder()));
duke@0 1044 env()->set_dependencies(new Dependencies(env()));
duke@0 1045
duke@0 1046 _fixed_slots = 0;
duke@0 1047 set_has_split_ifs(false);
duke@0 1048 set_has_loops(has_method() && method()->has_loops()); // first approximation
never@1080 1049 set_has_stringbuilder(false);
kvn@4675 1050 set_has_boxed_value(false);
duke@0 1051 _trap_can_recompile = false; // no traps emitted yet
duke@0 1052 _major_progress = true; // start out assuming good things will happen
duke@0 1053 set_has_unsafe_access(false);
kvn@3668 1054 set_max_vector_size(0);
duke@0 1055 Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist));
duke@0 1056 set_decompile_count(0);
duke@0 1057
rasbold@418 1058 set_do_freq_based_layout(BlockLayoutByFrequency || method_has_option("BlockLayoutByFrequency"));
iveresov@1703 1059 set_num_loop_opts(LoopOptsCount);
iveresov@1703 1060 set_do_inlining(Inline);
iveresov@1703 1061 set_max_inline_size(MaxInlineSize);
iveresov@1703 1062 set_freq_inline_size(FreqInlineSize);
iveresov@1703 1063 set_do_scheduling(OptoScheduling);
iveresov@1703 1064 set_do_count_invocations(false);
iveresov@1703 1065 set_do_method_data_update(false);
duke@0 1066
duke@0 1067 if (debug_info()->recording_non_safepoints()) {
duke@0 1068 set_node_note_array(new(comp_arena()) GrowableArray<Node_Notes*>
duke@0 1069 (comp_arena(), 8, 0, NULL));
duke@0 1070 set_default_node_notes(Node_Notes::make(this));
duke@0 1071 }
duke@0 1072
duke@0 1073 // // -- Initialize types before each compile --
duke@0 1074 // // Update cached type information
duke@0 1075 // if( _method && _method->constants() )
duke@0 1076 // Type::update_loaded_types(_method, _method->constants());
duke@0 1077
duke@0 1078 // Init alias_type map.
kvn@38 1079 if (!_do_escape_analysis && aliaslevel == 3)
duke@0 1080 aliaslevel = 2; // No unique types without escape analysis
duke@0 1081 _AliasLevel = aliaslevel;
duke@0 1082 const int grow_ats = 16;
duke@0 1083 _max_alias_types = grow_ats;
duke@0 1084 _alias_types = NEW_ARENA_ARRAY(comp_arena(), AliasType*, grow_ats);
duke@0 1085 AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType, grow_ats);
duke@0 1086 Copy::zero_to_bytes(ats, sizeof(AliasType)*grow_ats);
duke@0 1087 {
duke@0 1088 for (int i = 0; i < grow_ats; i++) _alias_types[i] = &ats[i];
duke@0 1089 }
duke@0 1090 // Initialize the first few types.
duke@0 1091 _alias_types[AliasIdxTop]->Init(AliasIdxTop, NULL);
duke@0 1092 _alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM);
duke@0 1093 _alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM);
duke@0 1094 _num_alias_types = AliasIdxRaw+1;
duke@0 1095 // Zero out the alias type cache.
duke@0 1096 Copy::zero_to_bytes(_alias_cache, sizeof(_alias_cache));
duke@0 1097 // A NULL adr_type hits in the cache right away. Preload the right answer.
duke@0 1098 probe_alias_cache(NULL)->_index = AliasIdxTop;
duke@0 1099
duke@0 1100 _intrinsics = NULL;
kvn@1605 1101 _macro_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
kvn@1605 1102 _predicate_opaqs = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
roland@4154 1103 _expensive_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
duke@0 1104 register_library_intrinsics();
duke@0 1105 }
duke@0 1106
duke@0 1107 //---------------------------init_start----------------------------------------
duke@0 1108 // Install the StartNode on this compile object.
duke@0 1109 void Compile::init_start(StartNode* s) {
duke@0 1110 if (failing())
duke@0 1111 return; // already failing
duke@0 1112 assert(s == start(), "");
duke@0 1113 }
duke@0 1114
duke@0 1115 StartNode* Compile::start() const {
duke@0 1116 assert(!failing(), "");
duke@0 1117 for (DUIterator_Fast imax, i = root()->fast_outs(imax); i < imax; i++) {
duke@0 1118 Node* start = root()->fast_out(i);
duke@0 1119 if( start->is_Start() )
duke@0 1120 return start->as_Start();
duke@0 1121 }
duke@0 1122 ShouldNotReachHere();
duke@0 1123 return NULL;
duke@0 1124 }
duke@0 1125
duke@0 1126 //-------------------------------immutable_memory-------------------------------------
duke@0 1127 // Access immutable memory
duke@0 1128 Node* Compile::immutable_memory() {
duke@0 1129 if (_immutable_memory != NULL) {
duke@0 1130 return _immutable_memory;
duke@0 1131 }
duke@0 1132 StartNode* s = start();
duke@0 1133 for (DUIterator_Fast imax, i = s->fast_outs(imax); true; i++) {
duke@0 1134 Node *p = s->fast_out(i);
duke@0 1135 if (p != s && p->as_Proj()->_con == TypeFunc::Memory) {
duke@0 1136 _immutable_memory = p;
duke@0 1137 return _immutable_memory;
duke@0 1138 }
duke@0 1139 }
duke@0 1140 ShouldNotReachHere();
duke@0 1141 return NULL;
duke@0 1142 }
duke@0 1143
duke@0 1144 //----------------------set_cached_top_node------------------------------------
duke@0 1145 // Install the cached top node, and make sure Node::is_top works correctly.
duke@0 1146 void Compile::set_cached_top_node(Node* tn) {
duke@0 1147 if (tn != NULL) verify_top(tn);
duke@0 1148 Node* old_top = _top;
duke@0 1149 _top = tn;
duke@0 1150 // Calling Node::setup_is_top allows the nodes the chance to adjust
duke@0 1151 // their _out arrays.
duke@0 1152 if (_top != NULL) _top->setup_is_top();
duke@0 1153 if (old_top != NULL) old_top->setup_is_top();
duke@0 1154 assert(_top == NULL || top()->is_top(), "");
duke@0 1155 }
duke@0 1156
bharadwaj@3880 1157 #ifdef ASSERT
bharadwaj@3880 1158 uint Compile::count_live_nodes_by_graph_walk() {
bharadwaj@3880 1159 Unique_Node_List useful(comp_arena());
bharadwaj@3880 1160 // Get useful node list by walking the graph.
bharadwaj@3880 1161 identify_useful_nodes(useful);
bharadwaj@3880 1162 return useful.size();
bharadwaj@3880 1163 }
bharadwaj@3880 1164
bharadwaj@3880 1165 void Compile::print_missing_nodes() {
bharadwaj@3880 1166
bharadwaj@3880 1167 // Return if CompileLog is NULL and PrintIdealNodeCount is false.
bharadwaj@3880 1168 if ((_log == NULL) && (! PrintIdealNodeCount)) {
bharadwaj@3880 1169 return;
bharadwaj@3880 1170 }
bharadwaj@3880 1171
bharadwaj@3880 1172 // This is an expensive function. It is executed only when the user
bharadwaj@3880 1173 // specifies VerifyIdealNodeCount option or otherwise knows the
bharadwaj@3880 1174 // additional work that needs to be done to identify reachable nodes
bharadwaj@3880 1175 // by walking the flow graph and find the missing ones using
bharadwaj@3880 1176 // _dead_node_list.
bharadwaj@3880 1177
bharadwaj@3880 1178 Unique_Node_List useful(comp_arena());
bharadwaj@3880 1179 // Get useful node list by walking the graph.
bharadwaj@3880 1180 identify_useful_nodes(useful);
bharadwaj@3880 1181
bharadwaj@3880 1182 uint l_nodes = C->live_nodes();
bharadwaj@3880 1183 uint l_nodes_by_walk = useful.size();
bharadwaj@3880 1184
bharadwaj@3880 1185 if (l_nodes != l_nodes_by_walk) {
bharadwaj@3880 1186 if (_log != NULL) {
bharadwaj@3880 1187 _log->begin_head("mismatched_nodes count='%d'", abs((int) (l_nodes - l_nodes_by_walk)));
bharadwaj@3880 1188 _log->stamp();
bharadwaj@3880 1189 _log->end_head();
bharadwaj@3880 1190 }
bharadwaj@3880 1191 VectorSet& useful_member_set = useful.member_set();
bharadwaj@3880 1192 int last_idx = l_nodes_by_walk;
bharadwaj@3880 1193 for (int i = 0; i < last_idx; i++) {
bharadwaj@3880 1194 if (useful_member_set.test(i)) {
bharadwaj@3880 1195 if (_dead_node_list.test(i)) {
bharadwaj@3880 1196 if (_log != NULL) {
bharadwaj@3880 1197 _log->elem("mismatched_node_info node_idx='%d' type='both live and dead'", i);
bharadwaj@3880 1198 }
bharadwaj@3880 1199 if (PrintIdealNodeCount) {
bharadwaj@3880 1200 // Print the log message to tty
bharadwaj@3880 1201 tty->print_cr("mismatched_node idx='%d' both live and dead'", i);
bharadwaj@3880 1202 useful.at(i)->dump();
bharadwaj@3880 1203 }
bharadwaj@3880 1204 }
bharadwaj@3880 1205 }
bharadwaj@3880 1206 else if (! _dead_node_list.test(i)) {
bharadwaj@3880 1207 if (_log != NULL) {
bharadwaj@3880 1208 _log->elem("mismatched_node_info node_idx='%d' type='neither live nor dead'", i);
bharadwaj@3880 1209 }
bharadwaj@3880 1210 if (PrintIdealNodeCount) {
bharadwaj@3880 1211 // Print the log message to tty
bharadwaj@3880 1212 tty->print_cr("mismatched_node idx='%d' type='neither live nor dead'", i);
bharadwaj@3880 1213 }
bharadwaj@3880 1214 }
bharadwaj@3880 1215 }
bharadwaj@3880 1216 if (_log != NULL) {
bharadwaj@3880 1217 _log->tail("mismatched_nodes");
bharadwaj@3880 1218 }
bharadwaj@3880 1219 }
bharadwaj@3880 1220 }
bharadwaj@3880 1221 #endif
bharadwaj@3880 1222
duke@0 1223 #ifndef PRODUCT
duke@0 1224 void Compile::verify_top(Node* tn) const {
duke@0 1225 if (tn != NULL) {
duke@0 1226 assert(tn->is_Con(), "top node must be a constant");
duke@0 1227 assert(((ConNode*)tn)->type() == Type::TOP, "top node must have correct type");
duke@0 1228 assert(tn->in(0) != NULL, "must have live top node");
duke@0 1229 }
duke@0 1230 }
duke@0 1231 #endif
duke@0 1232
duke@0 1233
duke@0 1234 ///-------------------Managing Per-Node Debug & Profile Info-------------------
duke@0 1235
duke@0 1236 void Compile::grow_node_notes(GrowableArray<Node_Notes*>* arr, int grow_by) {
duke@0 1237 guarantee(arr != NULL, "");
duke@0 1238 int num_blocks = arr->length();
duke@0 1239 if (grow_by < num_blocks) grow_by = num_blocks;
duke@0 1240 int num_notes = grow_by * _node_notes_block_size;
duke@0 1241 Node_Notes* notes = NEW_ARENA_ARRAY(node_arena(), Node_Notes, num_notes);
duke@0 1242 Copy::zero_to_bytes(notes, num_notes * sizeof(Node_Notes));
duke@0 1243 while (num_notes > 0) {
duke@0 1244 arr->append(notes);
duke@0 1245 notes += _node_notes_block_size;
duke@0 1246 num_notes -= _node_notes_block_size;
duke@0 1247 }
duke@0 1248 assert(num_notes == 0, "exact multiple, please");
duke@0 1249 }
duke@0 1250
duke@0 1251 bool Compile::copy_node_notes_to(Node* dest, Node* source) {
duke@0 1252 if (source == NULL || dest == NULL) return false;
duke@0 1253
duke@0 1254 if (dest->is_Con())
duke@0 1255 return false; // Do not push debug info onto constants.
duke@0 1256
duke@0 1257 #ifdef ASSERT
duke@0 1258 // Leave a bread crumb trail pointing to the original node:
duke@0 1259 if (dest != NULL && dest != source && dest->debug_orig() == NULL) {
duke@0 1260 dest->set_debug_orig(source);
duke@0 1261 }
duke@0 1262 #endif
duke@0 1263
duke@0 1264 if (node_note_array() == NULL)
duke@0 1265 return false; // Not collecting any notes now.
duke@0 1266
duke@0 1267 // This is a copy onto a pre-existing node, which may already have notes.
duke@0 1268 // If both nodes have notes, do not overwrite any pre-existing notes.
duke@0 1269 Node_Notes* source_notes = node_notes_at(source->_idx);
duke@0 1270 if (source_notes == NULL || source_notes->is_clear()) return false;
duke@0 1271 Node_Notes* dest_notes = node_notes_at(dest->_idx);
duke@0 1272 if (dest_notes == NULL || dest_notes->is_clear()) {
duke@0 1273 return set_node_notes_at(dest->_idx, source_notes);
duke@0 1274 }
duke@0 1275
duke@0 1276 Node_Notes merged_notes = (*source_notes);
duke@0 1277 // The order of operations here ensures that dest notes will win...
duke@0 1278 merged_notes.update_from(dest_notes);
duke@0 1279 return set_node_notes_at(dest->_idx, &merged_notes);
duke@0 1280 }
duke@0 1281
duke@0 1282
duke@0 1283 //--------------------------allow_range_check_smearing-------------------------
duke@0 1284 // Gating condition for coalescing similar range checks.
duke@0 1285 // Sometimes we try 'speculatively' replacing a series of a range checks by a
duke@0 1286 // single covering check that is at least as strong as any of them.
duke@0 1287 // If the optimization succeeds, the simplified (strengthened) range check
duke@0 1288 // will always succeed. If it fails, we will deopt, and then give up
duke@0 1289 // on the optimization.
duke@0 1290 bool Compile::allow_range_check_smearing() const {
duke@0 1291 // If this method has already thrown a range-check,
duke@0 1292 // assume it was because we already tried range smearing
duke@0 1293 // and it failed.
duke@0 1294 uint already_trapped = trap_count(Deoptimization::Reason_range_check);
duke@0 1295 return !already_trapped;
duke@0 1296 }
duke@0 1297
duke@0 1298
duke@0 1299 //------------------------------flatten_alias_type-----------------------------
duke@0 1300 const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
duke@0 1301 int offset = tj->offset();
duke@0 1302 TypePtr::PTR ptr = tj->ptr();
duke@0 1303
kvn@247 1304 // Known instance (scalarizable allocation) alias only with itself.
kvn@247 1305 bool is_known_inst = tj->isa_oopptr() != NULL &&
kvn@247 1306 tj->is_oopptr()->is_known_instance();
kvn@247 1307
duke@0 1308 // Process weird unsafe references.
duke@0 1309 if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) {
duke@0 1310 assert(InlineUnsafeOps, "indeterminate pointers come only from unsafe ops");
kvn@247 1311 assert(!is_known_inst, "scalarizable allocation should not have unsafe references");
duke@0 1312 tj = TypeOopPtr::BOTTOM;
duke@0 1313 ptr = tj->ptr();
duke@0 1314 offset = tj->offset();
duke@0 1315 }
duke@0 1316
duke@0 1317 // Array pointers need some flattening
duke@0 1318 const TypeAryPtr *ta = tj->isa_aryptr();
vlivanov@5223 1319 if (ta && ta->is_stable()) {
vlivanov@5223 1320 // Erase stability property for alias analysis.
vlivanov@5223 1321 tj = ta = ta->cast_to_stable(false);
vlivanov@5223 1322 }
kvn@247 1323 if( ta && is_known_inst ) {
kvn@247 1324 if ( offset != Type::OffsetBot &&
kvn@247 1325 offset > arrayOopDesc::length_offset_in_bytes() ) {
kvn@247 1326 offset = Type::OffsetBot; // Flatten constant access into array body only
kvn@247 1327 tj = ta = TypeAryPtr::make(ptr, ta->ary(), ta->klass(), true, offset, ta->instance_id());
kvn@247 1328 }
kvn@247 1329 } else if( ta && _AliasLevel >= 2 ) {
duke@0 1330 // For arrays indexed by constant indices, we flatten the alias
duke@0 1331 // space to include all of the array body. Only the header, klass
duke@0 1332 // and array length can be accessed un-aliased.
duke@0 1333 if( offset != Type::OffsetBot ) {
coleenp@3602 1334 if( ta->const_oop() ) { // MethodData* or Method*
duke@0 1335 offset = Type::OffsetBot; // Flatten constant access into array body
kvn@247 1336 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),ta->ary(),ta->klass(),false,offset);
duke@0 1337 } else if( offset == arrayOopDesc::length_offset_in_bytes() ) {
duke@0 1338 // range is OK as-is.
duke@0 1339 tj = ta = TypeAryPtr::RANGE;
duke@0 1340 } else if( offset == oopDesc::klass_offset_in_bytes() ) {
duke@0 1341 tj = TypeInstPtr::KLASS; // all klass loads look alike
duke@0 1342 ta = TypeAryPtr::RANGE; // generic ignored junk
duke@0 1343 ptr = TypePtr::BotPTR;
duke@0 1344 } else if( offset == oopDesc::mark_offset_in_bytes() ) {
duke@0 1345 tj = TypeInstPtr::MARK;
duke@0 1346 ta = TypeAryPtr::RANGE; // generic ignored junk
duke@0 1347 ptr = TypePtr::BotPTR;
duke@0 1348 } else { // Random constant offset into array body
duke@0 1349 offset = Type::OffsetBot; // Flatten constant access into array body
kvn@247 1350 tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,offset);
duke@0 1351 }
duke@0 1352 }
duke@0 1353 // Arrays of fixed size alias with arrays of unknown size.
duke@0 1354 if (ta->size() != TypeInt::POS) {
duke@0 1355 const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS);
kvn@247 1356 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,offset);
duke@0 1357 }
duke@0 1358 // Arrays of known objects become arrays of unknown objects.
coleenp@113 1359 if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) {
coleenp@113 1360 const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
kvn@247 1361 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);
coleenp@113 1362 }
duke@0 1363 if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) {
duke@0 1364 const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
kvn@247 1365 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);
duke@0 1366 }
duke@0 1367 // Arrays of bytes and of booleans both use 'bastore' and 'baload' so
duke@0 1368 // cannot be distinguished by bytecode alone.
duke@0 1369 if (ta->elem() == TypeInt::BOOL) {
duke@0 1370 const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size());
duke@0 1371 ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE);
kvn@247 1372 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,offset);
duke@0 1373 }
duke@0 1374 // During the 2nd round of IterGVN, NotNull castings are removed.
duke@0 1375 // Make sure the Bottom and NotNull variants alias the same.
duke@0 1376 // Also, make sure exact and non-exact variants alias the same.
roland@5556 1377 if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != NULL) {
kvn@2551 1378 tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,offset);
duke@0 1379 }
duke@0 1380 }
duke@0 1381
duke@0 1382 // Oop pointers need some flattening
duke@0 1383 const TypeInstPtr *to = tj->isa_instptr();
duke@0 1384 if( to && _AliasLevel >= 2 && to != TypeOopPtr::BOTTOM ) {
never@2223 1385 ciInstanceKlass *k = to->klass()->as_instance_klass();
duke@0 1386 if( ptr == TypePtr::Constant ) {
never@2223 1387 if (to->klass() != ciEnv::current()->Class_klass() ||
never@2223 1388 offset < k->size_helper() * wordSize) {
never@2223 1389 // No constant oop pointers (such as Strings); they alias with
never@2223 1390 // unknown strings.
never@2223 1391 assert(!is_known_inst, "not scalarizable allocation");
never@2223 1392 tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
never@2223 1393 }
kvn@247 1394 } else if( is_known_inst ) {
kvn@163 1395 tj = to; // Keep NotNull and klass_is_exact for instance type
duke@0 1396 } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) {
duke@0 1397 // During the 2nd round of IterGVN, NotNull castings are removed.
duke@0 1398 // Make sure the Bottom and NotNull variants alias the same.
duke@0 1399 // Also, make sure exact and non-exact variants alias the same.
kvn@247 1400 tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
duke@0 1401 }
roland@5556 1402 if (to->speculative() != NULL) {
roland@5556 1403 tj = to = TypeInstPtr::make(to->ptr(),to->klass(),to->klass_is_exact(),to->const_oop(),to->offset(), to->instance_id());
roland@5556 1404 }
duke@0 1405 // Canonicalize the holder of this field
coleenp@113 1406 if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
duke@0 1407 // First handle header references such as a LoadKlassNode, even if the
duke@0 1408 // object's klass is unloaded at compile time (4965979).
kvn@247 1409 if (!is_known_inst) { // Do it only for non-instance types
kvn@247 1410 tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset);
kvn@247 1411 }
duke@0 1412 } else if (offset < 0 || offset >= k->size_helper() * wordSize) {
never@2223 1413 // Static fields are in the space above the normal instance
never@2223 1414 // fields in the java.lang.Class instance.
never@2223 1415 if (to->klass() != ciEnv::current()->Class_klass()) {
never@2223 1416 to = NULL;
never@2223 1417 tj = TypeOopPtr::BOTTOM;
never@2223 1418 offset = tj->offset();
never@2223 1419 }
duke@0 1420 } else {
duke@0 1421 ciInstanceKlass *canonical_holder = k->get_canonical_holder(offset);
duke@0 1422 if (!k->equals(canonical_holder) || tj->offset() != offset) {
kvn@247 1423 if( is_known_inst ) {
kvn@247 1424 tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, offset, to->instance_id());
kvn@247 1425 } else {
kvn@247 1426 tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, offset);
kvn@247 1427 }
duke@0 1428 }
duke@0 1429 }
duke@0 1430 }
duke@0 1431
duke@0 1432 // Klass pointers to object array klasses need some flattening
duke@0 1433 const TypeKlassPtr *tk = tj->isa_klassptr();
duke@0 1434 if( tk ) {
duke@0 1435 // If we are referencing a field within a Klass, we need
duke@0 1436 // to assume the worst case of an Object. Both exact and
never@2954 1437 // inexact types must flatten to the same alias class so
never@2954 1438 // use NotNull as the PTR.
duke@0 1439 if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) {
duke@0 1440
never@2954 1441 tj = tk = TypeKlassPtr::make(TypePtr::NotNull,
duke@0 1442 TypeKlassPtr::OBJECT->klass(),
duke@0 1443 offset);
duke@0 1444 }
duke@0 1445
duke@0 1446 ciKlass* klass = tk->klass();
duke@0 1447 if( klass->is_obj_array_klass() ) {
duke@0 1448 ciKlass* k = TypeAryPtr::OOPS->klass();
duke@0 1449 if( !k || !k->is_loaded() ) // Only fails for some -Xcomp runs
duke@0 1450 k = TypeInstPtr::BOTTOM->klass();
duke@0 1451 tj = tk = TypeKlassPtr::make( TypePtr::NotNull, k, offset );
duke@0 1452 }
duke@0 1453
duke@0 1454 // Check for precise loads from the primary supertype array and force them
duke@0 1455 // to the supertype cache alias index. Check for generic array loads from
duke@0 1456 // the primary supertype array and also force them to the supertype cache
duke@0 1457 // alias index. Since the same load can reach both, we need to merge
duke@0 1458 // these 2 disparate memories into the same alias class. Since the
duke@0 1459 // primary supertype array is read-only, there's no chance of confusion
duke@0 1460 // where we bypass an array load and an array store.
stefank@2956 1461 int primary_supers_offset = in_bytes(Klass::primary_supers_offset());
never@2954 1462 if (offset == Type::OffsetBot ||
never@2954 1463 (offset >= primary_supers_offset &&
never@2954 1464 offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) ||
stefank@2956 1465 offset == (int)in_bytes(Klass::secondary_super_cache_offset())) {
stefank@2956 1466 offset = in_bytes(Klass::secondary_super_cache_offset());
duke@0 1467 tj = tk = TypeKlassPtr::make( TypePtr::NotNull, tk->klass(), offset );
duke@0 1468 }
duke@0 1469 }
duke@0 1470
duke@0 1471 // Flatten all Raw pointers together.
duke@0 1472 if (tj->base() == Type::RawPtr)
duke@0 1473 tj = TypeRawPtr::BOTTOM;
duke@0 1474
duke@0 1475 if (tj->base() == Type::AnyPtr)
duke@0 1476 tj = TypePtr::BOTTOM; // An error, which the caller must check for.
duke@0 1477
duke@0 1478 // Flatten all to bottom for now
duke@0 1479 switch( _AliasLevel ) {
duke@0 1480 case 0:
duke@0 1481 tj = TypePtr::BOTTOM;
duke@0 1482 break;
duke@0 1483 case 1: // Flatten to: oop, static, field or array
duke@0 1484 switch (tj->base()) {
duke@0 1485 //case Type::AryPtr: tj = TypeAryPtr::RANGE; break;
duke@0 1486 case Type::RawPtr: tj = TypeRawPtr::BOTTOM; break;
duke@0 1487 case Type::AryPtr: // do not distinguish arrays at all
duke@0 1488 case Type::InstPtr: tj = TypeInstPtr::BOTTOM; break;
duke@0 1489 case Type::KlassPtr: tj = TypeKlassPtr::OBJECT; break;
duke@0 1490 case Type::AnyPtr: tj = TypePtr::BOTTOM; break; // caller checks it
duke@0 1491 default: ShouldNotReachHere();
duke@0 1492 }
duke@0 1493 break;
twisti@605 1494 case 2: // No collapsing at level 2; keep all splits
twisti@605 1495 case 3: // No collapsing at level 3; keep all splits
duke@0 1496 break;
duke@0 1497 default:
duke@0 1498 Unimplemented();
duke@0 1499 }
duke@0 1500
duke@0 1501 offset = tj->offset();
duke@0 1502 assert( offset != Type::OffsetTop, "Offset has fallen from constant" );
duke@0 1503
duke@0 1504 assert( (offset != Type::OffsetBot && tj->base() != Type::AryPtr) ||
duke@0 1505 (offset == Type::OffsetBot && tj->base() == Type::AryPtr) ||
duke@0 1506 (offset == Type::OffsetBot && tj == TypeOopPtr::BOTTOM) ||
duke@0 1507 (offset == Type::OffsetBot && tj == TypePtr::BOTTOM) ||
duke@0 1508 (offset == oopDesc::mark_offset_in_bytes() && tj->base() == Type::AryPtr) ||
duke@0 1509 (offset == oopDesc::klass_offset_in_bytes() && tj->base() == Type::AryPtr) ||
duke@0 1510 (offset == arrayOopDesc::length_offset_in_bytes() && tj->base() == Type::AryPtr) ,
duke@0 1511 "For oops, klasses, raw offset must be constant; for arrays the offset is never known" );
duke@0 1512 assert( tj->ptr() != TypePtr::TopPTR &&
duke@0 1513 tj->ptr() != TypePtr::AnyNull &&
duke@0 1514 tj->ptr() != TypePtr::Null, "No imprecise addresses" );
duke@0 1515 // assert( tj->ptr() != TypePtr::Constant ||
duke@0 1516 // tj->base() == Type::RawPtr ||
duke@0 1517 // tj->base() == Type::KlassPtr, "No constant oop addresses" );
duke@0 1518
duke@0 1519 return tj;
duke@0 1520 }
duke@0 1521
duke@0 1522 void Compile::AliasType::Init(int i, const TypePtr* at) {
duke@0 1523 _index = i;
duke@0 1524 _adr_type = at;
duke@0 1525 _field = NULL;
vlivanov@5223 1526 _element = NULL;
duke@0 1527 _is_rewritable = true; // default
duke@0 1528 const TypeOopPtr *atoop = (at != NULL) ? at->isa_oopptr() : NULL;
kvn@223 1529 if (atoop != NULL && atoop->is_known_instance()) {
kvn@223 1530 const TypeOopPtr *gt = atoop->cast_to_instance_id(TypeOopPtr::InstanceBot);
duke@0 1531 _general_index = Compile::current()->get_alias_index(gt);
duke@0 1532 } else {
duke@0 1533 _general_index = 0;
duke@0 1534 }
duke@0 1535 }
duke@0 1536
duke@0 1537 //---------------------------------print_on------------------------------------
duke@0 1538 #ifndef PRODUCT
duke@0 1539 void Compile::AliasType::print_on(outputStream* st) {
duke@0 1540 if (index() < 10)
duke@0 1541 st->print("@ <%d> ", index());
duke@0 1542 else st->print("@ <%d>", index());
duke@0 1543 st->print(is_rewritable() ? " " : " RO");
duke@0 1544 int offset = adr_type()->offset();
duke@0 1545 if (offset == Type::OffsetBot)
duke@0 1546 st->print(" +any");
duke@0 1547 else st->print(" +%-3d", offset);
duke@0 1548 st->print(" in ");
duke@0 1549 adr_type()->dump_on(st);
duke@0 1550 const TypeOopPtr* tjp = adr_type()->isa_oopptr();
duke@0 1551 if (field() != NULL && tjp) {
duke@0 1552 if (tjp->klass() != field()->holder() ||
duke@0 1553 tjp->offset() != field()->offset_in_bytes()) {
duke@0 1554 st->print(" != ");
duke@0 1555 field()->print();
duke@0 1556 st->print(" ***");
duke@0 1557 }
duke@0 1558 }
duke@0 1559 }
duke@0 1560
duke@0 1561 void print_alias_types() {
duke@0 1562 Compile* C = Compile::current();
duke@0 1563 tty->print_cr("--- Alias types, AliasIdxBot .. %d", C->num_alias_types()-1);
duke@0 1564 for (int idx = Compile::AliasIdxBot; idx < C->num_alias_types(); idx++) {
duke@0 1565 C->alias_type(idx)->print_on(tty);
duke@0 1566 tty->cr();
duke@0 1567 }
duke@0 1568 }
duke@0 1569 #endif
duke@0 1570
duke@0 1571
duke@0 1572 //----------------------------probe_alias_cache--------------------------------
duke@0 1573 Compile::AliasCacheEntry* Compile::probe_alias_cache(const TypePtr* adr_type) {
duke@0 1574 intptr_t key = (intptr_t) adr_type;
duke@0 1575 key ^= key >> logAliasCacheSize;
duke@0 1576 return &_alias_cache[key & right_n_bits(logAliasCacheSize)];
duke@0 1577 }
duke@0 1578
duke@0 1579
duke@0 1580 //-----------------------------grow_alias_types--------------------------------
duke@0 1581 void Compile::grow_alias_types() {
duke@0 1582 const int old_ats = _max_alias_types; // how many before?
duke@0 1583 const int new_ats = old_ats; // how many more?
duke@0 1584 const int grow_ats = old_ats+new_ats; // how many now?
duke@0 1585 _max_alias_types = grow_ats;
duke@0 1586 _alias_types = REALLOC_ARENA_ARRAY(comp_arena(), AliasType*, _alias_types, old_ats, grow_ats);
duke@0 1587 AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType, new_ats);
duke@0 1588 Copy::zero_to_bytes(ats, sizeof(AliasType)*new_ats);
duke@0 1589 for (int i = 0; i < new_ats; i++) _alias_types[old_ats+i] = &ats[i];
duke@0 1590 }
duke@0 1591
duke@0 1592
duke@0 1593 //--------------------------------find_alias_type------------------------------
never@2223 1594 Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field) {
duke@0 1595 if (_AliasLevel == 0)
duke@0 1596 return alias_type(AliasIdxBot);
duke@0 1597
duke@0 1598 AliasCacheEntry* ace = probe_alias_cache(adr_type);
duke@0 1599 if (ace->_adr_type == adr_type) {
duke@0 1600 return alias_type(ace->_index);
duke@0 1601 }
duke@0 1602
duke@0 1603 // Handle special cases.
duke@0 1604 if (adr_type == NULL) return alias_type(AliasIdxTop);
duke@0 1605 if (adr_type == TypePtr::BOTTOM) return alias_type(AliasIdxBot);
duke@0 1606
duke@0 1607 // Do it the slow way.
duke@0 1608 const TypePtr* flat = flatten_alias_type(adr_type);
duke@0 1609
duke@0 1610 #ifdef ASSERT
duke@0 1611 assert(flat == flatten_alias_type(flat), "idempotent");
duke@0 1612 assert(flat != TypePtr::BOTTOM, "cannot alias-analyze an untyped ptr");
duke@0 1613 if (flat->isa_oopptr() && !flat->isa_klassptr()) {
duke@0 1614 const TypeOopPtr* foop = flat->is_oopptr();
kvn@247 1615 // Scalarizable allocations have exact klass always.
kvn@247 1616 bool exact = !foop->klass_is_exact() || foop->is_known_instance();
kvn@247 1617 const TypePtr* xoop = foop->cast_to_exactness(exact)->is_ptr();
duke@0 1618 assert(foop == flatten_alias_type(xoop), "exactness must not affect alias type");
duke@0 1619 }
duke@0 1620 assert(flat == flatten_alias_type(flat), "exact bit doesn't matter");
duke@0 1621 #endif
duke@0 1622
duke@0 1623 int idx = AliasIdxTop;
duke@0 1624 for (int i = 0; i < num_alias_types(); i++) {
duke@0 1625 if (alias_type(i)->adr_type() == flat) {
duke@0 1626 idx = i;
duke@0 1627 break;
duke@0 1628 }
duke@0 1629 }
duke@0 1630
duke@0 1631 if (idx == AliasIdxTop) {
duke@0 1632 if (no_create) return NULL;
duke@0 1633 // Grow the array if necessary.
duke@0 1634 if (_num_alias_types == _max_alias_types) grow_alias_types();
duke@0 1635 // Add a new alias type.
duke@0 1636 idx = _num_alias_types++;
duke@0 1637 _alias_types[idx]->Init(idx, flat);
duke@0 1638 if (flat == TypeInstPtr::KLASS) alias_type(idx)->set_rewritable(false);
duke@0 1639 if (flat == TypeAryPtr::RANGE) alias_type(idx)->set_rewritable(false);
duke@0 1640 if (flat->isa_instptr()) {
duke@0 1641 if (flat->offset() == java_lang_Class::klass_offset_in_bytes()
duke@0 1642 && flat->is_instptr()->klass() == env()->Class_klass())
duke@0 1643 alias_type(idx)->set_rewritable(false);
duke@0 1644 }
vlivanov@5223 1645 if (flat->isa_aryptr()) {
vlivanov@5223 1646 #ifdef ASSERT
vlivanov@5223 1647 const int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE);
vlivanov@5223 1648 // (T_BYTE has the weakest alignment and size restrictions...)
vlivanov@5223 1649 assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
vlivanov@5223 1650 #endif
vlivanov@5223 1651 if (flat->offset() == TypePtr::OffsetBot) {
vlivanov@5223 1652 alias_type(idx)->set_element(flat->is_aryptr()->elem());
vlivanov@5223 1653 }
vlivanov@5223 1654 }
duke@0 1655 if (flat->isa_klassptr()) {
stefank@2956 1656 if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
duke@0 1657 alias_type(idx)->set_rewritable(false);
stefank@2956 1658 if (flat->offset() == in_bytes(Klass::modifier_flags_offset()))
duke@0 1659 alias_type(idx)->set_rewritable(false);
stefank@2956 1660 if (flat->offset() == in_bytes(Klass::access_flags_offset()))
duke@0 1661 alias_type(idx)->set_rewritable(false);
stefank@2956 1662 if (flat->offset() == in_bytes(Klass::java_mirror_offset()))
duke@0 1663 alias_type(idx)->set_rewritable(false);
duke@0 1664 }
duke@0 1665 // %%% (We would like to finalize JavaThread::threadObj_offset(),
duke@0 1666 // but the base pointer type is not distinctive enough to identify
duke@0 1667 // references into JavaThread.)
duke@0 1668
never@2223 1669 // Check for final fields.
duke@0 1670 const TypeInstPtr* tinst = flat->isa_instptr();
coleenp@113 1671 if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) {
never@2223 1672 ciField* field;
never@2223 1673 if (tinst->const_oop() != NULL &&
never@2223 1674 tinst->klass() == ciEnv::current()->Class_klass() &&
never@2223 1675 tinst->offset() >= (tinst->klass()->as_instance_klass()->size_helper() * wordSize)) {
never@2223 1676 // static field
never@2223 1677 ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
never@2223 1678 field = k->get_field_by_offset(tinst->offset(), true);
never@2223 1679 } else {
never@2223 1680 ciInstanceKlass *k = tinst->klass()->as_instance_klass();
never@2223 1681 field = k->get_field_by_offset(tinst->offset(), false);
never@2223 1682 }
never@2223 1683 assert(field == NULL ||
never@2223 1684 original_field == NULL ||
never@2223 1685 (field->holder() == original_field->holder() &&
never@2223 1686 field->offset() == original_field->offset() &&
never@2223 1687 field->is_static() == original_field->is_static()), "wrong field?");
duke@0 1688 // Set field() and is_rewritable() attributes.
duke@0 1689 if (field != NULL) alias_type(idx)->set_field(field);
duke@0 1690 }
duke@0 1691 }
duke@0 1692
duke@0 1693 // Fill the cache for next time.
duke@0 1694 ace->_adr_type = adr_type;
duke@0 1695 ace->_index = idx;
duke@0 1696 assert(alias_type(adr_type) == alias_type(idx), "type must be installed");
duke@0 1697
duke@0 1698 // Might as well try to fill the cache for the flattened version, too.
duke@0 1699 AliasCacheEntry* face = probe_alias_cache(flat);
duke@0 1700 if (face->_adr_type == NULL) {
duke@0 1701 face->_adr_type = flat;
duke@0 1702 face->_index = idx;
duke@0 1703 assert(alias_type(flat) == alias_type(idx), "flat type must work too");
duke@0 1704 }
duke@0 1705
duke@0 1706 return alias_type(idx);
duke@0 1707 }
duke@0 1708
duke@0 1709
duke@0 1710 Compile::AliasType* Compile::alias_type(ciField* field) {
duke@0 1711 const TypeOopPtr* t;
duke@0 1712 if (field->is_static())
never@2223 1713 t = TypeInstPtr::make(field->holder()->java_mirror());
duke@0 1714 else
duke@0 1715 t = TypeOopPtr::make_from_klass_raw(field->holder());
never@2223 1716 AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field);
vlivanov@5223 1717 assert((field->is_final() || field->is_stable()) == !atp->is_rewritable(), "must get the rewritable bits correct");
duke@0 1718 return atp;
duke@0 1719 }
duke@0 1720
duke@0 1721
duke@0 1722 //------------------------------have_alias_type--------------------------------
duke@0 1723 bool Compile::have_alias_type(const TypePtr* adr_type) {
duke@0 1724 AliasCacheEntry* ace = probe_alias_cache(adr_type);
duke@0 1725 if (ace->_adr_type == adr_type) {
duke@0 1726 return true;
duke@0 1727 }
duke@0 1728
duke@0 1729 // Handle special cases.
duke@0 1730 if (adr_type == NULL) return true;
duke@0 1731 if (adr_type == TypePtr::BOTTOM) return true;
duke@0 1732
never@2223 1733 return find_alias_type(adr_type, true, NULL) != NULL;
duke@0 1734 }
duke@0 1735
duke@0 1736 //-----------------------------must_alias--------------------------------------
duke@0 1737 // True if all values of the given address type are in the given alias category.
duke@0 1738 bool Compile::must_alias(const TypePtr* adr_type, int alias_idx) {
duke@0 1739 if (alias_idx == AliasIdxBot) return true; // the universal category
duke@0 1740 if (adr_type == NULL) return true; // NULL serves as TypePtr::TOP
duke@0 1741 if (alias_idx == AliasIdxTop) return false; // the empty category
duke@0 1742 if (adr_type->base() == Type::AnyPtr) return false; // TypePtr::BOTTOM or its twins
duke@0 1743
duke@0 1744 // the only remaining possible overlap is identity
duke@0 1745 int adr_idx = get_alias_index(adr_type);
duke@0 1746 assert(adr_idx != AliasIdxBot && adr_idx != AliasIdxTop, "");
duke@0 1747 assert(adr_idx == alias_idx ||
duke@0 1748 (alias_type(alias_idx)->adr_type() != TypeOopPtr::BOTTOM
duke@0 1749 && adr_type != TypeOopPtr::BOTTOM),
duke@0 1750 "should not be testing for overlap with an unsafe pointer");
duke@0 1751 return adr_idx == alias_idx;
duke@0 1752 }
duke@0 1753
duke@0 1754 //------------------------------can_alias--------------------------------------
duke@0 1755 // True if any values of the given address type are in the given alias category.
duke@0 1756 bool Compile::can_alias(const TypePtr* adr_type, int alias_idx) {
duke@0 1757 if (alias_idx == AliasIdxTop) return false; // the empty category
duke@0 1758 if (adr_type == NULL) return false; // NULL serves as TypePtr::TOP
duke@0 1759 if (alias_idx == AliasIdxBot) return true; // the universal category
duke@0 1760 if (adr_type->base() == Type::AnyPtr) return true; // TypePtr::BOTTOM or its twins
duke@0 1761
duke@0 1762 // the only remaining possible overlap is identity
duke@0 1763 int adr_idx = get_alias_index(adr_type);
duke@0 1764 assert(adr_idx != AliasIdxBot && adr_idx != AliasIdxTop, "");
duke@0 1765 return adr_idx == alias_idx;
duke@0 1766 }
duke@0 1767
duke@0 1768
duke@0 1769
duke@0 1770 //---------------------------pop_warm_call-------------------------------------
duke@0 1771 WarmCallInfo* Compile::pop_warm_call() {
duke@0 1772 WarmCallInfo* wci = _warm_calls;
duke@0 1773 if (wci != NULL) _warm_calls = wci->remove_from(wci);
duke@0 1774 return wci;
duke@0 1775 }
duke@0 1776
duke@0 1777 //----------------------------Inline_Warm--------------------------------------
duke@0 1778 int Compile::Inline_Warm() {
duke@0 1779 // If there is room, try to inline some more warm call sites.
duke@0 1780 // %%% Do a graph index compaction pass when we think we're out of space?
duke@0 1781 if (!InlineWarmCalls) return 0;
duke@0 1782
duke@0 1783 int calls_made_hot = 0;
duke@0 1784 int room_to_grow = NodeCountInliningCutoff - unique();
duke@0 1785 int amount_to_grow = MIN2(room_to_grow, (int)NodeCountInliningStep);
duke@0 1786 int amount_grown = 0;
duke@0 1787 WarmCallInfo* call;
duke@0 1788 while (amount_to_grow > 0 && (call = pop_warm_call()) != NULL) {
duke@0 1789 int est_size = (int)call->size();
duke@0 1790 if (est_size > (room_to_grow - amount_grown)) {
duke@0 1791 // This one won't fit anyway. Get rid of it.
duke@0 1792 call->make_cold();
duke@0 1793 continue;
duke@0 1794 }
duke@0 1795 call->make_hot();
duke@0 1796 calls_made_hot++;
duke@0 1797 amount_grown += est_size;
duke@0 1798 amount_to_grow -= est_size;
duke@0 1799 }
duke@0 1800
duke@0 1801 if (calls_made_hot > 0) set_major_progress();
duke@0 1802 return calls_made_hot;
duke@0 1803 }
duke@0 1804
duke@0 1805
duke@0 1806 //----------------------------Finish_Warm--------------------------------------
duke@0 1807 void Compile::Finish_Warm() {
duke@0 1808 if (!InlineWarmCalls) return;
duke@0 1809 if (failing()) return;
duke@0 1810 if (warm_calls() == NULL) return;
duke@0 1811
duke@0 1812 // Clean up loose ends, if we are out of space for inlining.
duke@0 1813 WarmCallInfo* call;
duke@0 1814 while ((call = pop_warm_call()) != NULL) {
duke@0 1815 call->make_cold();
duke@0 1816 }
duke@0 1817 }
duke@0 1818
cfang@1172 1819 //---------------------cleanup_loop_predicates-----------------------
cfang@1172 1820 // Remove the opaque nodes that protect the predicates so that all unused
cfang@1172 1821 // checks and uncommon_traps will be eliminated from the ideal graph
cfang@1172 1822 void Compile::cleanup_loop_predicates(PhaseIterGVN &igvn) {
cfang@1172 1823 if (predicate_count()==0) return;
cfang@1172 1824 for (int i = predicate_count(); i > 0; i--) {
cfang@1172 1825 Node * n = predicate_opaque1_node(i-1);
cfang@1172 1826 assert(n->Opcode() == Op_Opaque1, "must be");
cfang@1172 1827 igvn.replace_node(n, n->in(1));
cfang@1172 1828 }
cfang@1172 1829 assert(predicate_count()==0, "should be clean!");
cfang@1172 1830 }
duke@0 1831
roland@3974 1832 // StringOpts and late inlining of string methods
roland@3974 1833 void Compile::inline_string_calls(bool parse_time) {
roland@3974 1834 {
roland@3974 1835 // remove useless nodes to make the usage analysis simpler
roland@3974 1836 ResourceMark rm;
roland@3974 1837 PhaseRemoveUseless pru(initial_gvn(), for_igvn());
roland@3974 1838 }
roland@3974 1839
roland@3974 1840 {
roland@3974 1841 ResourceMark rm;
sla@4802 1842 print_method(PHASE_BEFORE_STRINGOPTS, 3);
roland@3974 1843 PhaseStringOpts pso(initial_gvn(), for_igvn());
sla@4802 1844 print_method(PHASE_AFTER_STRINGOPTS, 3);
roland@3974 1845 }
roland@3974 1846
roland@3974 1847 // now inline anything that we skipped the first time around
roland@3974 1848 if (!parse_time) {
roland@3974 1849 _late_inlines_pos = _late_inlines.length();
roland@3974 1850 }
roland@3974 1851
roland@3974 1852 while (_string_late_inlines.length() > 0) {
roland@3974 1853 CallGenerator* cg = _string_late_inlines.pop();
roland@3974 1854 cg->do_late_inline();
roland@3974 1855 if (failing()) return;
roland@3974 1856 }
roland@3974 1857 _string_late_inlines.trunc_to(0);
roland@3974 1858 }
roland@3974 1859
kvn@4675 1860 // Late inlining of boxing methods
kvn@4675 1861 void Compile::inline_boxing_calls(PhaseIterGVN& igvn) {
kvn@4675 1862 if (_boxing_late_inlines.length() > 0) {
kvn@4675 1863 assert(has_boxed_value(), "inconsistent");
kvn@4675 1864
kvn@4675 1865 PhaseGVN* gvn = initial_gvn();
kvn@4675 1866 set_inlining_incrementally(true);
kvn@4675 1867
kvn@4675 1868 assert( igvn._worklist.size() == 0, "should be done with igvn" );
kvn@4675 1869 for_igvn()->clear();
kvn@4675 1870 gvn->replace_with(&igvn);
kvn@4675 1871
kvn@4675 1872 while (_boxing_late_inlines.length() > 0) {
kvn@4675 1873 CallGenerator* cg = _boxing_late_inlines.pop();
kvn@4675 1874 cg->do_late_inline();
kvn@4675 1875 if (failing()) return;
kvn@4675 1876 }
kvn@4675 1877 _boxing_late_inlines.trunc_to(0);
kvn@4675 1878
kvn@4675 1879 {
kvn@4675 1880 ResourceMark rm;
kvn@4675 1881 PhaseRemoveUseless pru(gvn, for_igvn());
kvn@4675 1882 }
kvn@4675 1883
kvn@4675 1884 igvn = PhaseIterGVN(gvn);
kvn@4675 1885 igvn.optimize();
kvn@4675 1886
kvn@4675 1887 set_inlining_progress(false);
kvn@4675 1888 set_inlining_incrementally(false);
kvn@4675 1889 }
kvn@4675 1890 }
kvn@4675 1891
roland@3974 1892 void Compile::inline_incrementally_one(PhaseIterGVN& igvn) {
roland@3974 1893 assert(IncrementalInline, "incremental inlining should be on");
roland@3974 1894 PhaseGVN* gvn = initial_gvn();
roland@3974 1895
roland@3974 1896 set_inlining_progress(false);
roland@3974 1897 for_igvn()->clear();
roland@3974 1898 gvn->replace_with(&igvn);
roland@3974 1899
roland@3974 1900 int i = 0;
roland@3974 1901
roland@3974 1902 for (; i <_late_inlines.length() && !inlining_progress(); i++) {
roland@3974 1903 CallGenerator* cg = _late_inlines.at(i);
roland@3974 1904 _late_inlines_pos = i+1;
roland@3974 1905 cg->do_late_inline();
roland@3974 1906 if (failing()) return;
roland@3974 1907 }
roland@3974 1908 int j = 0;
roland@3974 1909 for (; i < _late_inlines.length(); i++, j++) {
roland@3974 1910 _late_inlines.at_put(j, _late_inlines.at(i));
roland@3974 1911 }
roland@3974 1912 _late_inlines.trunc_to(j);
roland@3974 1913
roland@3974 1914 {
roland@3974 1915 ResourceMark rm;
kvn@4675 1916 PhaseRemoveUseless pru(gvn, for_igvn());
roland@3974 1917 }
roland@3974 1918
roland@3974 1919 igvn = PhaseIterGVN(gvn);
roland@3974 1920 }
roland@3974 1921
roland@3974 1922 // Perform incremental inlining until bound on number of live nodes is reached
roland@3974 1923 void Compile::inline_incrementally(PhaseIterGVN& igvn) {
roland@3974 1924 PhaseGVN* gvn = initial_gvn();
roland@3974 1925
roland@3974 1926 set_inlining_incrementally(true);
roland@3974 1927 set_inlining_progress(true);
roland@3974 1928 uint low_live_nodes = 0;
roland@3974 1929
roland@3974 1930 while(inlining_progress() && _late_inlines.length() > 0) {
roland@3974 1931
roland@3974 1932 if (live_nodes() > (uint)LiveNodeCountInliningCutoff) {
roland@3974 1933 if (low_live_nodes < (uint)LiveNodeCountInliningCutoff * 8 / 10) {
roland@3974 1934 // PhaseIdealLoop is expensive so we only try it once we are
roland@3974 1935 // out of loop and we only try it again if the previous helped
roland@3974 1936 // got the number of nodes down significantly
roland@3974 1937 PhaseIdealLoop ideal_loop( igvn, false, true );
roland@3974 1938 if (failing()) return;
roland@3974 1939 low_live_nodes = live_nodes();
roland@3974 1940 _major_progress = true;
roland@3974 1941 }
roland@3974 1942
roland@3974 1943 if (live_nodes() > (uint)LiveNodeCountInliningCutoff) {
roland@3974 1944 break;
roland@3974 1945 }
roland@3974 1946 }
roland@3974 1947
roland@3974 1948 inline_incrementally_one(igvn);
roland@3974 1949
roland@3974 1950 if (failing()) return;
roland@3974 1951
roland@3974 1952 igvn.optimize();
roland@3974 1953
roland@3974 1954 if (failing()) return;
roland@3974 1955 }
roland@3974 1956
roland@3974 1957 assert( igvn._worklist.size() == 0, "should be done with igvn" );
roland@3974 1958
roland@3974 1959 if (_string_late_inlines.length() > 0) {
roland@3974 1960 assert(has_stringbuilder(), "inconsistent");
roland@3974 1961 for_igvn()->clear();
roland@3974 1962 initial_gvn()->replace_with(&igvn);
roland@3974 1963
roland@3974 1964 inline_string_calls(false);
roland@3974 1965
roland@3974 1966 if (failing()) return;
roland@3974 1967
roland@3974 1968 {
roland@3974 1969 ResourceMark rm;
roland@3974 1970 PhaseRemoveUseless pru(initial_gvn(), for_igvn());
roland@3974 1971 }
roland@3974 1972
roland@3974 1973 igvn = PhaseIterGVN(gvn);
roland@3974 1974
roland@3974 1975 igvn.optimize();
roland@3974 1976 }
roland@3974 1977
roland@3974 1978 set_inlining_incrementally(false);
roland@3974 1979 }
roland@3974 1980
roland@3974 1981
duke@0 1982 //------------------------------Optimize---------------------------------------
duke@0 1983 // Given a graph, optimize it.
duke@0 1984 void Compile::Optimize() {
duke@0 1985 TracePhase t1("optimizer", &_t_optimizer, true);
duke@0 1986
duke@0 1987 #ifndef PRODUCT
duke@0 1988 if (env()->break_at_compile()) {
duke@0 1989 BREAKPOINT;
duke@0 1990 }
duke@0 1991
duke@0 1992 #endif
duke@0 1993
duke@0 1994 ResourceMark rm;
duke@0 1995 int loop_opts_cnt;
duke@0 1996
duke@0 1997 NOT_PRODUCT( verify_graph_edges(); )
duke@0 1998
sla@4802 1999 print_method(PHASE_AFTER_PARSING);
duke@0 2000
duke@0 2001 {
duke@0 2002 // Iterative Global Value Numbering, including ideal transforms
duke@0 2003 // Initialize IterGVN with types and values from parse-time GVN
duke@0 2004 PhaseIterGVN igvn(initial_gvn());
duke@0 2005 {
duke@0 2006 NOT_PRODUCT( TracePhase t2("iterGVN", &_t_iterGVN, TimeCompiler); )
duke@0 2007 igvn.optimize();
duke@0 2008 }
duke@0 2009
sla@4802 2010 print_method(PHASE_ITER_GVN1, 2);
duke@0 2011
duke@0 2012 if (failing()) return;
duke@0 2013
kvn@4675 2014 {
kvn@4675 2015 NOT_PRODUCT( TracePhase t2("incrementalInline", &_t_incrInline, TimeCompiler); )
kvn@4675 2016 inline_incrementally(igvn);
kvn@4675 2017 }
roland@3974 2018
sla@4802 2019 print_method(PHASE_INCREMENTAL_INLINE, 2);
roland@3974 2020
roland@3974 2021 if (failing()) return;
roland@3974 2022
kvn@4675 2023 if (eliminate_boxing()) {
kvn@4675 2024 NOT_PRODUCT( TracePhase t2("incrementalInline", &_t_incrInline, TimeCompiler); )
kvn@4675 2025 // Inline valueOf() methods now.
kvn@4675 2026 inline_boxing_calls(igvn);
kvn@4675 2027
sla@4802 2028 print_method(PHASE_INCREMENTAL_BOXING_INLINE, 2);
kvn@4675 2029
kvn@4675 2030 if (failing()) return;
kvn@4675 2031 }
kvn@4675 2032
roland@5556 2033 // Remove the speculative part of types and clean up the graph from
roland@5556 2034 // the extra CastPP nodes whose only purpose is to carry them. Do
roland@5556 2035 // that early so that optimizations are not disrupted by the extra
roland@5556 2036 // CastPP nodes.
roland@5556 2037 remove_speculative_types(igvn);
roland@5556 2038
roland@4154 2039 // No more new expensive nodes will be added to the list from here
roland@4154 2040 // so keep only the actual candidates for optimizations.
roland@4154 2041 cleanup_expensive_nodes(igvn);
roland@4154 2042
kvn@1554 2043 // Perform escape analysis
kvn@1554 2044 if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
kvn@2825 2045 if (has_loops()) {
kvn@2825 2046 // Cleanup graph (remove dead nodes).
kvn@2825 2047 TracePhase t2("idealLoop", &_t_idealLoop, true);
kvn@2825 2048 PhaseIdealLoop ideal_loop( igvn, false, true );
sla@4802 2049 if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
kvn@2825 2050 if (failing()) return;
kvn@2825 2051 }
kvn@1554 2052 ConnectionGraph::do_analysis(this, &igvn);
kvn@1554 2053
kvn@1554 2054 if (failing()) return;
kvn@1554 2055
kvn@2876 2056 // Optimize out fields loads from scalar replaceable allocations.
kvn@1554 2057 igvn.optimize();
sla@4802 2058 print_method(PHASE_ITER_GVN_AFTER_EA, 2);
kvn@1554 2059
kvn@1554 2060 if (failing()) return;
kvn@1554 2061
kvn@2876 2062 if (congraph() != NULL && macro_count() > 0) {
kvn@3216 2063 NOT_PRODUCT( TracePhase t2("macroEliminate", &_t_macroEliminate, TimeCompiler); )
kvn@2876 2064 PhaseMacroExpand mexp(igvn);
kvn@2876 2065 mexp.eliminate_macro_nodes();
kvn@2876 2066 igvn.set_delay_transform(false);
kvn@2876 2067
kvn@2876 2068 igvn.optimize();
sla@4802 2069 print_method(PHASE_ITER_GVN_AFTER_ELIMINATION, 2);
kvn@2876 2070
kvn@2876 2071 if (failing()) return;
kvn@2876 2072 }
kvn@1554 2073 }
kvn@1554 2074
duke@0 2075 // Loop transforms on the ideal graph. Range Check Elimination,
duke@0 2076 // peeling, unrolling, etc.
duke@0 2077
duke@0 2078 // Set loop opts counter
duke@0 2079 loop_opts_cnt = num_loop_opts();
duke@0 2080 if((loop_opts_cnt > 0) && (has_loops() || has_split_ifs())) {
duke@0 2081 {
duke@0 2082 TracePhase t2("idealLoop", &_t_idealLoop, true);
kvn@2292 2083 PhaseIdealLoop ideal_loop( igvn, true );
duke@0 2084 loop_opts_cnt--;
sla@4802 2085 if (major_progress()) print_method(PHASE_PHASEIDEALLOOP1, 2);
duke@0 2086 if (failing()) return;
duke@0 2087 }
duke@0 2088 // Loop opts pass if partial peeling occurred in previous pass
duke@0 2089 if(PartialPeelLoop && major_progress() && (loop_opts_cnt > 0)) {
duke@0 2090 TracePhase t3("idealLoop", &_t_idealLoop, true);
kvn@2292 2091 PhaseIdealLoop ideal_loop( igvn, false );
duke@0 2092 loop_opts_cnt--;
sla@4802 2093 if (major_progress()) print_method(PHASE_PHASEIDEALLOOP2, 2);
duke@0 2094 if (failing()) return;
duke@0 2095 }
duke@0 2096 // Loop opts pass for loop-unrolling before CCP
duke@0 2097 if(major_progress() && (loop_opts_cnt > 0)) {
duke@0 2098 TracePhase t4("idealLoop", &_t_idealLoop, true);
kvn@2292 2099 PhaseIdealLoop ideal_loop( igvn, false );
duke@0 2100 loop_opts_cnt--;
sla@4802 2101 if (major_progress()) print_method(PHASE_PHASEIDEALLOOP3, 2);
duke@0 2102 }
never@921 2103 if (!failing()) {
never@921 2104 // Verify that last round of loop opts produced a valid graph
never@921 2105 NOT_PRODUCT( TracePhase t2("idealLoopVerify", &_t_idealLoopVerify, TimeCompiler); )
never@921 2106 PhaseIdealLoop::verify(igvn);
never@921 2107 }
duke@0 2108 }
duke@0 2109 if (failing()) return;
duke@0 2110
duke@0 2111 // Conditional Constant Propagation;
duke@0 2112 PhaseCCP ccp( &igvn );
duke@0 2113 assert( true, "Break here to ccp.dump_nodes_and_types(_root,999,1)");
duke@0 2114 {
duke@0 2115 TracePhase t2("ccp", &_t_ccp, true);
duke@0 2116 ccp.do_transform();
duke@0 2117 }
sla@4802 2118 print_method(PHASE_CPP1, 2);
duke@0 2119
duke@0 2120 assert( true, "Break here to ccp.dump_old2new_map()");
duke@0 2121
duke@0 2122 // Iterative Global Value Numbering, including ideal transforms
duke@0 2123 {
duke@0 2124 NOT_PRODUCT( TracePhase t2("iterGVN2", &_t_iterGVN2, TimeCompiler); )
duke@0 2125 igvn = ccp;
duke@0 2126 igvn.optimize();
duke@0 2127 }
duke@0 2128
sla@4802 2129 print_method(PHASE_ITER_GVN2, 2);
duke@0 2130
duke@0 2131 if (failing()) return;
duke@0 2132
duke@0 2133 // Loop transforms on the ideal graph. Range Check Elimination,
duke@0 2134 // peeling, unrolling, etc.
duke@0 2135 if(loop_opts_cnt > 0) {
duke@0 2136 debug_only( int cnt = 0; );
duke@0 2137 while(major_progress() && (loop_opts_cnt > 0)) {
duke@0 2138 TracePhase t2("idealLoop", &_t_idealLoop, true);
duke@0 2139 assert( cnt++ < 40, "infinite cycle in loop optimization" );
kvn@2292 2140 PhaseIdealLoop ideal_loop( igvn, true);
duke@0 2141 loop_opts_cnt--;
sla@4802 2142 if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
duke@0 2143 if (failing()) return;
duke@0 2144 }
duke@0 2145 }
never@921 2146
never@921 2147 {
never@921 2148 // Verify that all previous optimizations produced a valid graph
never@921 2149 // at least to this point, even if no loop optimizations were done.
never@921 2150 NOT_PRODUCT( TracePhase t2("idealLoopVerify", &_t_idealLoopVerify, TimeCompiler); )
never@921 2151 PhaseIdealLoop::verify(igvn);
never@921 2152 }
never@921 2153
duke@0 2154 {
duke@0 2155 NOT_PRODUCT( TracePhase t2("macroExpand", &_t_macroExpand, TimeCompiler); )
duke@0 2156 PhaseMacroExpand mex(igvn);
duke@0 2157 if (mex.expand_macro_nodes()) {
duke@0 2158 assert(failing(), "must bail out w/ explicit message");
duke@0 2159 return;
duke@0 2160 }
duke@0 2161 }
duke@0 2162
duke@0 2163 } // (End scope of igvn; run destructor if necessary for asserts.)
duke@0 2164
kvn@4013 2165 dump_inlining();
duke@0 2166 // A method with only infinite loops has no edges entering loops from root
duke@0 2167 {
duke@0 2168 NOT_PRODUCT( TracePhase t2("graphReshape", &_t_graphReshaping, TimeCompiler); )
duke@0 2169 if (final_graph_reshaping()) {
duke@0 2170 assert(failing(), "must bail out w/ explicit message");
duke@0 2171 return;
duke@0 2172 }
duke@0 2173 }
duke@0 2174
sla@4802 2175 print_method(PHASE_OPTIMIZE_FINISHED, 2);
duke@0 2176 }
duke@0 2177
duke@0 2178
duke@0 2179 //------------------------------Code_Gen---------------------------------------
duke@0 2180 // Given a graph, generate code for it
duke@0 2181 void Compile::Code_Gen() {
adlertz@5104 2182 if (failing()) {
adlertz@5104 2183 return;
adlertz@5104 2184 }
duke@0 2185
duke@0 2186 // Perform instruction selection. You might think we could reclaim Matcher
duke@0 2187 // memory PDQ, but actually the Matcher is used in generating spill code.
duke@0 2188 // Internals of the Matcher (including some VectorSets) must remain live
duke@0 2189 // for awhile - thus I cannot reclaim Matcher memory lest a VectorSet usage
duke@0 2190 // set a bit in reclaimed memory.
duke@0 2191
duke@0 2192 // In debug mode can dump m._nodes.dump() for mapping of ideal to machine
duke@0 2193 // nodes. Mapping is only valid at the root of each matched subtree.
duke@0 2194 NOT_PRODUCT( verify_graph_edges(); )
duke@0 2195
adlertz@5104 2196 Matcher matcher;
adlertz@5104 2197 _matcher = &matcher;
duke@0 2198 {
duke@0 2199 TracePhase t2("matcher", &_t_matcher, true);
adlertz@5104 2200 matcher.match();
duke@0 2201 }
duke@0 2202 // In debug mode can dump m._nodes.dump() for mapping of ideal to machine
duke@0 2203 // nodes. Mapping is only valid at the root of each matched subtree.
duke@0 2204 NOT_PRODUCT( verify_graph_edges(); )
duke@0 2205
duke@0 2206 // If you have too many nodes, or if matching has failed, bail out
duke@0 2207 check_node_count(0, "out of nodes matching instructions");
adlertz@5104 2208 if (failing()) {
adlertz@5104 2209 return;
adlertz@5104 2210 }
duke@0 2211
duke@0 2212 // Build a proper-looking CFG
adlertz@5104 2213 PhaseCFG cfg(node_arena(), root(), matcher);
duke@0 2214 _cfg = &cfg;
duke@0 2215 {
duke@0 2216 NOT_PRODUCT( TracePhase t2("scheduler", &_t_scheduler, TimeCompiler); )
adlertz@5104 2217 bool success = cfg.do_global_code_motion();
adlertz@5104 2218 if (!success) {
adlertz@5104 2219 return;
adlertz@5104 2220 }
adlertz@5104 2221
adlertz@5104 2222 print_method(PHASE_GLOBAL_CODE_MOTION, 2);
duke@0 2223 NOT_PRODUCT( verify_graph_edges(); )
duke@0 2224 debug_only( cfg.verify(); )
duke@0 2225 }
adlertz@5104 2226
adlertz@5104 2227 PhaseChaitin regalloc(unique(), cfg, matcher);
duke@0 2228 _regalloc = &regalloc;
duke@0 2229 {
duke@0 2230 TracePhase t2("regalloc", &_t_registerAllocation, true);
duke@0 2231 // Perform register allocation. After Chaitin, use-def chains are
duke@0 2232 // no longer accurate (at spill code) and so must be ignored.
duke@0 2233 // Node->LRG->reg mappings are still accurate.
duke@0 2234 _regalloc->Register_Allocate();
duke@0 2235
duke@0 2236 // Bail out if the allocator builds too many nodes
neliasso@4514 2237 if (failing()) {
neliasso@4514 2238 return;
neliasso@4514 2239 }
duke@0 2240 }
duke@0 2241
duke@0 2242 // Prior to register allocation we kept empty basic blocks in case the
duke@0 2243 // the allocator needed a place to spill. After register allocation we
duke@0 2244 // are not adding any new instructions. If any basic block is empty, we
duke@0 2245 // can now safely remove it.
duke@0 2246 {
rasbold@418 2247 NOT_PRODUCT( TracePhase t2("blockOrdering", &_t_blockOrdering, TimeCompiler); )
adlertz@5104 2248 cfg.remove_empty_blocks();
rasbold@418 2249 if (do_freq_based_layout()) {
rasbold@418 2250 PhaseBlockLayout layout(cfg);
rasbold@418 2251 } else {
rasbold@418 2252 cfg.set_loop_alignment();
rasbold@418 2253 }
rasbold@418 2254 cfg.fixup_flow();
duke@0 2255 }
duke@0 2256
duke@0 2257 // Apply peephole optimizations
duke@0 2258 if( OptoPeephole ) {
duke@0 2259 NOT_PRODUCT( TracePhase t2("peephole", &_t_peephole, TimeCompiler); )
duke@0 2260 PhasePeephole peep( _regalloc, cfg);
duke@0 2261 peep.do_transform();
duke@0 2262 }
duke@0 2263
duke@0 2264 // Convert Nodes to instruction bits in a buffer
duke@0 2265 {
duke@0 2266 // %%%% workspace merge brought two timers together for one job
duke@0 2267 TracePhase t2a("output", &_t_output, true);
duke@0 2268 NOT_PRODUCT( TraceTime t2b(NULL, &_t_codeGeneration, TimeCompiler, false); )
duke@0 2269 Output();
duke@0 2270 }
duke@0 2271
sla@4802 2272 print_method(PHASE_FINAL_CODE);
duke@0 2273
duke@0 2274 // He's dead, Jim.
duke@0 2275 _cfg = (PhaseCFG*)0xdeadbeef;
duke@0 2276 _regalloc = (PhaseChaitin*)0xdeadbeef;
duke@0 2277 }
duke@0 2278
duke@0 2279
duke@0 2280 //------------------------------dump_asm---------------------------------------
duke@0 2281 // Dump formatted assembly
duke@0 2282 #ifndef PRODUCT
duke@0 2283 void Compile::dump_asm(int *pcs, uint pc_limit) {
duke@0 2284 bool cut_short = false;
duke@0 2285 tty->print_cr("#");
duke@0 2286 tty->print("# "); _tf->dump(); tty->cr();
duke@0 2287 tty->print_cr("#");
duke@0 2288
duke@0 2289 // For all blocks
duke@0 2290 int pc = 0x0; // Program counter
duke@0 2291 char starts_bundle = ' ';
duke@0 2292 _regalloc->dump_frame();
duke@0 2293
duke@0 2294 Node *n = NULL;
adlertz@5104 2295 for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
adlertz@5104 2296 if (VMThread::should_terminate()) {
adlertz@5104 2297 cut_short = true;
adlertz@5104 2298 break;
adlertz@5104 2299 }
adlertz@5104 2300 Block* block = _cfg->get_block(i);
adlertz@5104 2301 if (block->is_connector() && !Verbose) {
adlertz@5104 2302 continue;
adlertz@5104 2303 }
adlertz@5200 2304 n = block->head();
adlertz@5104 2305 if (pcs && n->_idx < pc_limit) {
duke@0 2306 tty->print("%3.3x ", pcs[n->_idx]);
adlertz@5104 2307 } else {
duke@0 2308 tty->print(" ");
adlertz@5104 2309 }
adlertz@5104 2310 block->dump_head(_cfg);
adlertz@5104 2311 if (block->is_connector()) {
duke@0 2312 tty->print_cr(" # Empty connector block");
adlertz@5104 2313 } else if (block->num_preds() == 2 && block->pred(1)->is_CatchProj() && block->pred(1)->as_CatchProj()->_con == CatchProjNode::fall_through_index) {
duke@0 2314 tty->print_cr(" # Block is sole successor of call");
duke@0 2315 }
duke@0 2316
duke@0 2317 // For all instructions
duke@0 2318 Node *delay = NULL;
adlertz@5200 2319 for (uint j = 0; j < block->number_of_nodes(); j++) {
adlertz@5104 2320 if (VMThread::should_terminate()) {
adlertz@5104 2321 cut_short = true;
adlertz@5104 2322 break;
adlertz@5104 2323 }
adlertz@5200 2324 n = block->get_node(j);
duke@0 2325 if (valid_bundle_info(n)) {
adlertz@5104 2326 Bundle* bundle = node_bundling(n);
duke@0 2327 if (bundle->used_in_unconditional_delay()) {
duke@0 2328 delay = n;
duke@0 2329 continue;
duke@0 2330 }
adlertz@5104 2331 if (bundle->starts_bundle()) {
duke@0 2332 starts_bundle = '+';
adlertz@5104 2333 }
duke@0 2334 }
duke@0 2335
adlertz@5104 2336 if (WizardMode) {
adlertz@5104 2337 n->dump();
adlertz@5104 2338 }
coleenp@113 2339
duke@0 2340 if( !n->is_Region() && // Dont print in the Assembly
duke@0 2341 !n->is_Phi() && // a few noisely useless nodes
duke@0 2342 !n->is_Proj() &&
duke@0 2343 !n->is_MachTemp() &&
kvn@1100 2344 !n->is_SafePointScalarObject() &&
duke@0 2345 !n->is_Catch() && // Would be nice to print exception table targets
duke@0 2346 !n->is_MergeMem() && // Not very interesting
duke@0 2347 !n->is_top() && // Debug info table constants
duke@0 2348 !(n->is_Con() && !n->is_Mach())// Debug info table constants
duke@0 2349 ) {
duke@0 2350 if (pcs && n->_idx < pc_limit)
duke@0 2351 tty->print("%3.3x", pcs[n->_idx]);
duke@0 2352 else
duke@0 2353 tty->print(" ");
duke@0 2354 tty->print(" %c ", starts_bundle);
duke@0 2355 starts_bundle = ' ';
duke@0 2356 tty->print("\t");
duke@0 2357 n->format(_regalloc, tty);
duke@0 2358 tty->cr();
duke@0 2359 }
duke@0 2360
duke@0 2361 // If we have an instruction with a delay slot, and have seen a delay,
duke@0 2362 // then back up and print it
duke@0 2363 if (valid_bundle_info(n) && node_bundling(n)->use_unconditional_delay()) {
duke@0 2364 assert(delay != NULL, "no unconditional delay instruction");
coleenp@113 2365 if (WizardMode) delay->dump();
coleenp@113 2366
duke@0 2367 if (node_bundling(delay)->starts_bundle())
duke@0 2368 starts_bundle = '+';
duke@0 2369 if (pcs && n->_idx < pc_limit)
duke@0 2370 tty->print("%3.3x", pcs[n->_idx]);
duke@0 2371 else
duke@0 2372 tty->print(" ");
duke@0 2373 tty->print(" %c ", starts_bundle);
duke@0 2374 starts_bundle = ' ';
duke@0 2375 tty->print("\t");
duke@0 2376 delay->format(_regalloc, tty);
duke@0 2377 tty->print_cr("");
duke@0 2378 delay = NULL;
duke@0 2379 }
duke@0 2380
duke@0 2381 // Dump the exception table as well
duke@0 2382 if( n->is_Catch() && (Verbose || WizardMode) ) {
duke@0 2383 // Print the exception table for this offset
duke@0 2384 _handler_table.print_subtable_for(pc);
duke@0 2385 }
duke@0 2386 }
duke@0 2387
duke@0 2388 if (pcs && n->_idx < pc_limit)
duke@0 2389 tty->print_cr("%3.3x", pcs[n->_idx]);
duke@0 2390 else
duke@0 2391 tty->print_cr("");
duke@0 2392
duke@0 2393 assert(cut_short || delay == NULL, "no unconditional delay branch");
duke@0 2394
duke@0 2395 } // End of per-block dump
duke@0 2396 tty->print_cr("");
duke@0 2397
duke@0 2398 if (cut_short) tty->print_cr("*** disassembly is cut short ***");
duke@0 2399 }
duke@0 2400 #endif
duke@0 2401
duke@0 2402 //------------------------------Final_Reshape_Counts---------------------------
duke@0 2403 // This class defines counters to help identify when a method
duke@0 2404 // may/must be executed using hardware with only 24-bit precision.
duke@0 2405 struct Final_Reshape_Counts : public StackObj {
duke@0 2406 int _call_count; // count non-inlined 'common' calls
duke@0 2407 int _float_count; // count float ops requiring 24-bit precision
duke@0 2408 int _double_count; // count double ops requiring more precision
duke@0 2409 int _java_call_count; // count non-inlined 'java' calls
kvn@859 2410 int _inner_loop_count; // count loops which need alignment
duke@0 2411 VectorSet _visited; // Visitation flags
duke@0 2412 Node_List _tests; // Set of IfNodes & PCTableNodes
duke@0 2413
duke@0 2414 Final_Reshape_Counts() :
kvn@859 2415 _call_count(0), _float_count(0), _double_count(0),
kvn@859 2416 _java_call_count(0), _inner_loop_count(0),
duke@0 2417 _visited( Thread::current()->resource_area() ) { }
duke@0 2418
duke@0 2419 void inc_call_count () { _call_count ++; }
duke@0 2420 void inc_float_count () { _float_count ++; }
duke@0 2421 void inc_double_count() { _double_count++; }
duke@0 2422 void inc_java_call_count() { _java_call_count++; }
kvn@859 2423 void inc_inner_loop_count() { _inner_loop_count++; }
duke@0 2424
duke@0 2425 int get_call_count () const { return _call_count ; }
duke@0 2426 int get_float_count () const { return _float_count ; }
duke@0 2427 int get_double_count() const { return _double_count; }
duke@0 2428 int get_java_call_count() const { return _java_call_count; }
kvn@859 2429 int get_inner_loop_count() const { return _inner_loop_count; }
duke@0 2430 };
duke@0 2431
mikael@4454 2432 #ifdef ASSERT
duke@0 2433 static bool oop_offset_is_sane(const TypeInstPtr* tp) {
duke@0 2434 ciInstanceKlass *k = tp->klass()->as_instance_klass();
duke@0 2435 // Make sure the offset goes inside the instance layout.
coleenp@113 2436 return k->contains_field_offset(tp->offset());
duke@0 2437 // Note that OffsetBot and OffsetTop are very negative.
duke@0 2438 }
mikael@4454 2439 #endif
duke@0 2440
never@2345 2441 // Eliminate trivially redundant StoreCMs and accumulate their
never@2345 2442 // precedence edges.
bharadwaj@3880 2443 void Compile::eliminate_redundant_card_marks(Node* n) {
never@2345 2444 assert(n->Opcode() == Op_StoreCM, "expected StoreCM");
never@2345 2445 if (n->in(MemNode::Address)->outcnt() > 1) {
never@2345 2446 // There are multiple users of the same address so it might be
never@2345 2447 // possible to eliminate some of the StoreCMs
never@2345 2448 Node* mem = n->in(MemNode::Memory);
never@2345 2449 Node* adr = n->in(MemNode::Address);
never@2345 2450 Node* val = n->in(MemNode::ValueIn);
never@2345 2451 Node* prev = n;
never@2345 2452 bool done = false;
never@2345 2453 // Walk the chain of StoreCMs eliminating ones that match. As
never@2345 2454 // long as it's a chain of single users then the optimization is
never@2345 2455 // safe. Eliminating partially redundant StoreCMs would require
never@2345 2456 // cloning copies down the other paths.
never@2345 2457 while (mem->Opcode() == Op_StoreCM && mem->outcnt() == 1 && !done) {
never@2345 2458 if (adr == mem->in(MemNode::Address) &&
never@2345 2459 val == mem->in(MemNode::ValueIn)) {
never@2345 2460 // redundant StoreCM
never@2345 2461 if (mem->req() > MemNode::OopStore) {
never@2345 2462 // Hasn't been processed by this code yet.
never@2345 2463 n->add_prec(mem->in(MemNode::OopStore));
never@2345 2464 } else {
never@2345 2465 // Already converted to precedence edge
never@2345 2466 for (uint i = mem->req(); i < mem->len(); i++) {
never@2345 2467 // Accumulate any precedence edges
never@2345 2468 if (mem->in(i) != NULL) {
never@2345 2469 n->add_prec(mem->in(i));
never@2345 2470 }
never@2345 2471 }
never@2345 2472 // Everything above this point has been processed.
never@2345 2473 done = true;
never@2345 2474 }
never@2345 2475 // Eliminate the previous StoreCM
never@2345 2476 prev->set_req(MemNode::Memory, mem->in(MemNode::Memory));
never@2345 2477 assert(mem->outcnt() == 0, "should be dead");
bharadwaj@3880 2478 mem->disconnect_inputs(NULL, this);
never@2345 2479 } else {
never@2345 2480 prev = mem;
never@2345 2481 }
never@2345 2482 mem = prev->in(MemNode::Memory);
never@2345 2483 }
never@2345 2484 }
never@2345 2485 }
never@2345 2486
duke@0 2487 //------------------------------final_graph_reshaping_impl----------------------
duke@0 2488 // Implement items 1-5 from final_graph_reshaping below.
bharadwaj@3880 2489 void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
duke@0 2490
kvn@168 2491 if ( n->outcnt() == 0 ) return; // dead node
duke@0 2492 uint nop = n->Opcode();
duke@0 2493
duke@0 2494 // Check for 2-input instruction with "last use" on right input.
duke@0 2495 // Swap to left input. Implements item (2).
duke@0 2496 if( n->req() == 3 && // two-input instruction
duke@0 2497 n->in(1)->outcnt() > 1 && // left use is NOT a last use
duke@0 2498 (!n->in(1)->is_Phi() || n->in(1)->in(2) != n) && // it is not data loop
duke@0 2499 n->in(2)->outcnt() == 1 &&// right use IS a last use
duke@0 2500 !n->in(2)->is_Con() ) { // right use is not a constant
duke@0 2501 // Check for commutative opcode
duke@0 2502 switch( nop ) {
duke@0 2503 case Op_AddI: case Op_AddF: case Op_AddD: case Op_AddL:
duke@0 2504 case Op_MaxI: case Op_MinI:
duke@0 2505 case Op_MulI: case Op_MulF: case Op_MulD: case Op_MulL:
duke@0 2506 case Op_AndL: case Op_XorL: case Op_OrL:
duke@0 2507 case Op_AndI: case Op_XorI: case Op_OrI: {
duke@0 2508 // Move "last use" input to left by swapping inputs
duke@0 2509 n->swap_edges(1, 2);
duke@0 2510 break;
duke@0 2511 }
duke@0 2512 default:
duke@0 2513 break;
duke@0 2514 }
duke@0 2515 }
duke@0 2516
kvn@1529 2517 #ifdef ASSERT
kvn@1529 2518 if( n->is_Mem() ) {
bharadwaj@3880 2519 int alias_idx = get_alias_index(n->as_Mem()->adr_type());
kvn@1529 2520 assert( n->in(0) != NULL || alias_idx != Compile::AliasIdxRaw ||
kvn@1529 2521 // oop will be recorded in oop map if load crosses safepoint
kvn@1529 2522 n->is_Load() && (n->as_Load()->bottom_type()->isa_oopptr() ||
kvn@1529 2523 LoadNode::is_immutable_value(n->in(MemNode::Address))),
kvn@1529 2524 "raw memory operations should have control edge");
kvn@1529 2525 }
kvn@1529 2526 #endif
duke@0 2527 // Count FPU ops and common calls, implements item (3)
duke@0 2528 switch( nop ) {
duke@0 2529 // Count all float operations that may use FPU
duke@0 2530 case Op_AddF:
duke@0 2531 case Op_SubF:
duke@0 2532 case Op_MulF:
duke@0 2533 case Op_DivF:
duke@0 2534 case Op_NegF:
duke@0 2535 case Op_ModF:
duke@0 2536 case Op_ConvI2F:
duke@0 2537 case Op_ConF:
duke@0 2538 case Op_CmpF:
duke@0 2539 case Op_CmpF3:
duke@0 2540 // case Op_ConvL2F: // longs are split into 32-bit halves
kvn@859 2541 frc.inc_float_count();
duke@0 2542 break;
duke@0 2543
duke@0 2544 case Op_ConvF2D:
duke@0 2545 case Op_ConvD2F:
kvn@859 2546 frc.inc_float_count();
kvn@859 2547 frc.inc_double_count();
duke@0 2548 break;
duke@0 2549
duke@0 2550 // Count all double operations that may use FPU
duke@0 2551 case Op_AddD:
duke@0 2552 case Op_SubD:
duke@0 2553 case Op_MulD:
duke@0 2554 case Op_DivD:
duke@0 2555 case Op_NegD:
duke@0 2556 case Op_ModD:
duke@0 2557 case Op_ConvI2D:
duke@0 2558 case Op_ConvD2I:
duke@0 2559 // case Op_ConvL2D: // handled by leaf call
duke@0 2560 // case Op_ConvD2L: // handled by leaf call
duke@0 2561 case Op_ConD:
duke@0 2562 case Op_CmpD:
duke@0 2563 case Op_CmpD3:
kvn@859 2564 frc.inc_double_count();
duke@0 2565 break;
duke@0 2566 case Op_Opaque1: // Remove Opaque Nodes before matching
duke@0 2567 case Op_Opaque2: // Remove Opaque Nodes before matching
bharadwaj@3880 2568 n->subsume_by(n->in(1), this);
duke@0 2569 break;
duke@0 2570 case Op_CallStaticJava:
duke@0 2571 case Op_CallJava:
duke@0 2572 case Op_CallDynamicJava:
kvn@859 2573 frc.inc_java_call_count(); // Count java call site;
duke@0 2574 case Op_CallRuntime:
duke@0 2575 case Op_CallLeaf:
duke@0 2576 case Op_CallLeafNoFP: {
duke@0 2577 assert( n->is_Call(), "" );
duke@0 2578 CallNode *call = n->as_Call();
duke@0 2579 // Count call sites where the FP mode bit would have to be flipped.
duke@0 2580 // Do not count uncommon runtime calls:
duke@0 2581 // uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking,
duke@0 2582 // _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ...
duke@0 2583 if( !call->is_CallStaticJava() || !call->as_CallStaticJava()->_name ) {
kvn@859 2584 frc.inc_call_count(); // Count the call site
duke@0 2585 } else { // See if uncommon argument is shared
duke@0 2586 Node *n = call->in(TypeFunc::Parms);
duke@0 2587 int nop = n->Opcode();
duke@0 2588 // Clone shared simple arguments to uncommon calls, item (1).
duke@0 2589 if( n->outcnt() > 1 &&
duke@0 2590 !n->is_Proj() &&
duke@0 2591 nop != Op_CreateEx &&
duke@0 2592 nop != Op_CheckCastPP &&
kvn@331 2593 nop != Op_DecodeN &&
roland@3724 2594 nop != Op_DecodeNKlass &&
duke@0 2595 !n->is_Mem() ) {
duke@0 2596 Node *x = n->clone();
duke@0 2597 call->set_req( TypeFunc::Parms, x );
duke@0 2598 }
duke@0 2599 }
duke@0 2600 break;
duke@0 2601 }
duke@0 2602
duke@0 2603 case Op_StoreD:
duke@0 2604 case Op_LoadD:
duke@0 2605 case Op_LoadD_unaligned:
kvn@859 2606 frc.inc_double_count();
duke@0 2607 goto handle_mem;
duke@0 2608 case Op_StoreF:
duke@0 2609 case Op_LoadF:
kvn@859 2610 frc.inc_float_count();
duke@0 2611 goto handle_mem;
duke@0 2612
never@2345 2613 case Op_StoreCM:
never@2345 2614 {
never@2345 2615 // Convert OopStore dependence into precedence edge
never@2345 2616 Node* prec = n->in(MemNode::OopStore);
never@2345 2617 n->del_req(MemNode::OopStore);
never@2345 2618 n->add_prec(prec);
never@2345 2619 eliminate_redundant_card_marks(n);
never@2345 2620 }
never@2345 2621
never@2345 2622 // fall through
never@2345 2623
duke@0 2624 case Op_StoreB:
duke@0 2625 case Op_StoreC:
duke@0 2626 case Op_StorePConditional:
duke@0 2627 case Op_StoreI:
duke@0 2628 case Op_StoreL:
kvn@420 2629 case Op_StoreIConditional:
duke@0 2630 case Op_StoreLConditional:
duke@0 2631 case Op_CompareAndSwapI:
duke@0 2632 case Op_CompareAndSwapL:
duke@0 2633 case Op_CompareAndSwapP:
coleenp@113 2634 case Op_CompareAndSwapN:
roland@3671 2635 case Op_GetAndAddI:
roland@3671 2636 case Op_GetAndAddL:
roland@3671 2637 case Op_GetAndSetI:
roland@3671 2638 case Op_GetAndSetL:
roland@3671 2639 case Op_GetAndSetP:
roland@3671 2640 case Op_GetAndSetN:
duke@0 2641 case Op_StoreP:
coleenp@113 2642 case Op_StoreN:
roland@3724 2643 case Op_StoreNKlass:
duke@0 2644 case Op_LoadB:
twisti@624 2645 case Op_LoadUB:
twisti@558 2646 case Op_LoadUS:
duke@0 2647 case Op_LoadI:
duke@0 2648 case Op_LoadKlass:
kvn@164 2649 case Op_LoadNKlass:
duke@0 2650 case Op_LoadL:
duke@0 2651 case Op_LoadL_unaligned:
duke@0 2652 case Op_LoadPLocked:
duke@0 2653 case Op_LoadP:
coleenp@113 2654 case Op_LoadN:
duke@0 2655 case Op_LoadRange:
duke@0 2656 case Op_LoadS: {
duke@0 2657 handle_mem:
duke@0 2658 #ifdef ASSERT
duke@0 2659 if( VerifyOptoOopOffsets ) {
duke@0 2660 assert( n->is_Mem(), "" );
duke@0 2661 MemNode *mem = (MemNode*)n;
duke@0 2662 // Check to see if address types have grounded out somehow.
duke@0 2663 const TypeInstPtr *tp = mem->in(MemNode::Address)->bottom_type()->isa_instptr();
duke@0 2664 assert( !tp || oop_offset_is_sane(tp), "" );
duke@0 2665 }
duke@0 2666 #endif
duke@0 2667 break;
duke@0 2668 }
duke@0 2669
duke@0 2670 case Op_AddP: { // Assert sane base pointers
kvn@182 2671 Node *addp = n->in(AddPNode::Address);
duke@0 2672 assert( !addp->is_AddP() ||
duke@0 2673 addp->in(AddPNode::Base)->is_top() || // Top OK for allocation
duke@0 2674 addp->in(AddPNode::Base) == n->in(AddPNode::Base),
duke@0 2675 "Base pointers must match" );
kvn@182 2676 #ifdef _LP64
ehelin@5259 2677 if ((UseCompressedOops || UseCompressedClassPointers) &&
kvn@182 2678 addp->Opcode() == Op_ConP &&
kvn@182 2679 addp == n->in(AddPNode::Base) &&
kvn@182 2680 n->in(AddPNode::Offset)->is_Con()) {
kvn@182 2681 // Use addressing with narrow klass to load with offset on x86.
kvn@182 2682 // On sparc loading 32-bits constant and decoding it have less
kvn@182 2683 // instructions (4) then load 64-bits constant (7).
kvn@182 2684 // Do this transformation here since IGVN will convert ConN back to ConP.
kvn@182 2685 const Type* t = addp->bottom_type();
roland@3724 2686 if (t->isa_oopptr() || t->isa_klassptr()) {
kvn@182 2687 Node* nn = NULL;
kvn@182 2688
roland@3724 2689 int op = t->isa_oopptr() ? Op_ConN : Op_ConNKlass;
roland@3724 2690
kvn@182 2691 // Look for existing ConN node of the same exact type.
bharadwaj@3880 2692 Node* r = root();
kvn@182 2693 uint cnt = r->outcnt();
kvn@182 2694 for (uint i = 0; i < cnt; i++) {
kvn@182 2695 Node* m = r->raw_out(i);
roland@3724 2696 if (m!= NULL && m->Opcode() == op &&
kvn@221 2697 m->bottom_type()->make_ptr() == t) {
kvn@182 2698 nn = m;
kvn@182 2699 break;
kvn@182 2700 }
kvn@182 2701 }
kvn@182 2702 if (nn != NULL) {
kvn@182 2703 // Decode a narrow oop to match address
kvn@182 2704 // [R12 + narrow_oop_reg<<3 + offset]
roland@3724 2705 if (t->isa_oopptr()) {
bharadwaj@3880 2706 nn = new (this) DecodeNNode(nn, t);
roland@3724 2707 } else {
bharadwaj@3880 2708 nn = new (this) DecodeNKlassNode(nn, t);
roland@3724 2709 }
kvn@182 2710 n->set_req(AddPNode::Base, nn);
kvn@182 2711 n->set_req(AddPNode::Address, nn);
kvn@182 2712 if (addp->outcnt() == 0) {
bharadwaj@3880 2713 addp->disconnect_inputs(NULL, this);
kvn@182 2714 }
kvn@182 2715 }
kvn@182 2716 }
kvn@182 2717 }
kvn@182 2718 #endif
duke@0 2719 break;
duke@0 2720 }
duke@0 2721
kvn@164 2722 #ifdef _LP64
kvn@368 2723 case Op_CastPP:
kvn@1495 2724 if (n->in(1)->is_DecodeN() && Matcher::gen_narrow_oop_implicit_null_checks()) {
kvn@368 2725 Node* in1 = n->in(1);
kvn@368 2726 const Type* t = n->bottom_type();
kvn@368 2727 Node* new_in1 = in1->clone();
kvn@368 2728 new_in1->as_DecodeN()->set_type(t);
kvn@368 2729
kvn@1495 2730 if (!Matcher::narrow_oop_use_complex_address()) {
kvn@368 2731 //
kvn@368 2732 // x86, ARM and friends can handle 2 adds in addressing mode
kvn@368 2733 // and Matcher can fold a DecodeN node into address by using
kvn@368 2734 // a narrow oop directly and do implicit NULL check in address:
kvn@368 2735 //
kvn@368 2736 // [R12 + narrow_oop_reg<<3 + offset]
kvn@368 2737 // NullCheck narrow_oop_reg
kvn@368 2738 //
kvn@368 2739 // On other platforms (Sparc) we have to keep new DecodeN node and
kvn@368 2740 // use it to do implicit NULL check in address:
kvn@368 2741 //
kvn@368 2742 // decode_not_null narrow_oop_reg, base_reg
kvn@368 2743 // [base_reg + offset]
kvn@368 2744 // NullCheck base_reg
kvn@368 2745 //
twisti@605 2746 // Pin the new DecodeN node to non-null path on these platform (Sparc)
kvn@368 2747 // to keep the information to which NULL check the new DecodeN node
kvn@368 2748 // corresponds to use it as value in implicit_null_check().
kvn@368 2749 //
kvn@368 2750 new_in1->set_req(0, n->in(0));
kvn@368 2751 }
kvn@368 2752
bharadwaj@3880 2753 n->subsume_by(new_in1, this);
kvn@368 2754 if (in1->outcnt() == 0) {
bharadwaj@3880 2755 in1->disconnect_inputs(NULL, this);
kvn@368 2756 }
kvn@368 2757 }
kvn@368 2758 break;
kvn@368 2759
kvn@164 2760 case Op_CmpP:
kvn@168 2761 // Do this transformation here to preserve CmpPNode::sub() and
kvn@168 2762 // other TypePtr related Ideal optimizations (for example, ptr nullness).
roland@3724 2763 if (n->in(1)->is_DecodeNarrowPtr() || n->in(2)->is_DecodeNarrowPtr()) {
kvn@331 2764 Node* in1 = n->in(1);
kvn@331 2765 Node* in2 = n->in(2);
roland@3724 2766 if (!in1->is_DecodeNarrowPtr()) {
kvn@331 2767 in2 = in1;
kvn@331 2768 in1 = n->in(2);
kvn@331 2769 }
roland@3724 2770 assert(in1->is_DecodeNarrowPtr(), "sanity");
kvn@331 2771
kvn@331 2772 Node* new_in2 = NULL;
roland@3724 2773 if (in2->is_DecodeNarrowPtr()) {
roland@3724 2774 assert(in2->Opcode() == in1->Opcode(), "must be same node type");
kvn@331 2775 new_in2 = in2->in(1);
kvn@331 2776 } else if (in2->Opcode() == Op_ConP) {
kvn@331 2777 const Type* t = in2->bottom_type();
kvn@1495 2778 if (t == TypePtr::NULL_PTR) {
roland@3724 2779 assert(in1->is_DecodeN(), "compare klass to null?");
kvn@1495 2780 // Don't convert CmpP null check into CmpN if compressed
kvn@1495 2781 // oops implicit null check is not generated.
kvn@1495 2782 // This will allow to generate normal oop implicit null check.
kvn@1495 2783 if (Matcher::gen_narrow_oop_implicit_null_checks())
bharadwaj@3880 2784 new_in2 = ConNode::make(this, TypeNarrowOop::NULL_PTR);
kvn@368 2785 //
kvn@368 2786 // This transformation together with CastPP transformation above
kvn@368 2787 // will generated code for implicit NULL checks for compressed oops.
kvn@368 2788 //
kvn@368 2789 // The original code after Optimize()
kvn@368 2790 //
kvn@368 2791 // LoadN memory, narrow_oop_reg
kvn@368 2792 // decode narrow_oop_reg, base_reg
kvn@368 2793 // CmpP base_reg, NULL
kvn@368 2794 // CastPP base_reg // NotNull
kvn@368 2795 // Load [base_reg + offset], val_reg
kvn@368 2796 //
kvn@368 2797 // after these transformations will be
kvn@368 2798 //
kvn@368 2799 // LoadN memory, narrow_oop_reg
kvn@368 2800 // CmpN narrow_oop_reg, NULL
kvn@368 2801 // decode_not_null narrow_oop_reg, base_reg
kvn@368 2802 // Load [base_reg + offset], val_reg
kvn@368 2803 //
kvn@368 2804 // and the uncommon path (== NULL) will use narrow_oop_reg directly
kvn@368 2805 // since narrow oops can be used in debug info now (see the code in
kvn@368 2806 // final_graph_reshaping_walk()).
kvn@368 2807 //
kvn@368 2808 // At the end the code will be matched to
kvn@368 2809 // on x86:
kvn@368 2810 //
kvn@368 2811 // Load_narrow_oop memory, narrow_oop_reg
kvn@368 2812 // Load [R12 + narrow_oop_reg<<3 + offset], val_reg
kvn@368 2813 // NullCheck narrow_oop_reg
kvn@368 2814 //
kvn@368 2815 // and on sparc:
kvn@368 2816 //
kvn@368 2817 // Load_narrow_oop memory, narrow_oop_reg
kvn@368 2818 // decode_not_null narrow_oop_reg, base_reg
kvn@368 2819 // Load [base_reg + offset], val_reg
kvn@368 2820 // NullCheck base_reg
kvn@368 2821 //
kvn@164 2822 } else if (t->isa_oopptr()) {
bharadwaj@3880 2823 new_in2 = ConNode::make(this, t->make_narrowoop());
roland@3724 2824 } else if (t->isa_klassptr()) {
bharadwaj@3880 2825 new_in2 = ConNode::make(this, t->make_narrowklass());
kvn@164 2826 }
kvn@164 2827 }
kvn@331 2828 if (new_in2 != NULL) {
bharadwaj@3880 2829 Node* cmpN = new (this) CmpNNode(in1->in(1), new_in2);
bharadwaj@3880 2830 n->subsume_by(cmpN, this);
kvn@331 2831 if (in1->outcnt() == 0) {
bharadwaj@3880 2832 in1->disconnect_inputs(NULL, this);
kvn@331 2833 }
kvn@331 2834 if (in2->outcnt() == 0) {
bharadwaj@3880 2835 in2->disconnect_inputs(NULL, this);
kvn@331 2836 }
kvn@164 2837 }
kvn@164 2838 }
kvn@293 2839 break;
kvn@368 2840
kvn@368 2841 case Op_DecodeN:
roland@3724 2842 case Op_DecodeNKlass:
roland@3724 2843 assert(!n->in(1)->is_EncodeNarrowPtr(), "should be optimized out");
kvn@1495 2844 // DecodeN could be pinned when it can't be fold into
kvn@492 2845 // an address expression, see the code for Op_CastPP above.
roland@3724 2846 assert(n->in(0) == NULL || (UseCompressedOops && !Matcher::narrow_oop_use_complex_address()), "no control");
kvn@368 2847 break;
kvn@368 2848
roland@3724 2849 case Op_EncodeP:
roland@3724 2850 case Op_EncodePKlass: {
kvn@368 2851 Node* in1 = n->in(1);
roland@3724 2852 if (in1->is_DecodeNarrowPtr()) {
bharadwaj@3880 2853 n->subsume_by(in1->in(1), this);
kvn@368 2854 } else if (in1->Opcode() == Op_ConP) {
kvn@368 2855 const Type* t = in1->bottom_type();
kvn@368 2856 if (t == TypePtr::NULL_PTR) {
roland@3724 2857 assert(t->isa_oopptr(), "null klass?");
bharadwaj@3880 2858 n->subsume_by(ConNode::make(this, TypeNarrowOop::NULL_PTR), this);
kvn@368 2859 } else if (t->isa_oopptr()) {
bharadwaj@3880 2860 n->subsume_by(ConNode::make(this, t->make_narrowoop()), this);
roland@3724 2861 } else if (t->isa_klassptr()) {
bharadwaj@3880 2862 n->subsume_by(ConNode::make(this, t->make_narrowklass()), this);
kvn@368 2863 }
kvn@368 2864 }
kvn@368 2865 if (in1->outcnt() == 0) {
bharadwaj@3880 2866 in1->disconnect_inputs(NULL, this);
kvn@368 2867 }
kvn@368 2868 break;
kvn@368 2869 }
kvn@368 2870
never@1080 2871 case Op_Proj: {
never@1080 2872 if (OptimizeStringConcat) {
never@1080 2873 ProjNode* p = n->as_Proj();
never@1080 2874 if (p->_is_io_use) {
never@1080 2875 // Separate projections were used for the exception path which
never@1080 2876 // are normally removed by a late inline. If it wasn't inlined
never@1080 2877 // then they will hang around and should just be replaced with
never@1080 2878 // the original one.
never@1080 2879 Node* proj = NULL;
never@1080 2880 // Replace with just one
never@1080 2881 for (SimpleDUIterator i(p->in(0)); i.has_next(); i.next()) {
never@1080 2882 Node *use = i.get();
never@1080 2883 if (use->is_Proj() && p != use && use->as_Proj()->_con == p->_con) {
never@1080 2884 proj = use;
never@1080 2885 break;
never@1080 2886 }
never@1080 2887 }
kvn@2961 2888 assert(proj != NULL, "must be found");
bharadwaj@3880 2889 p->subsume_by(proj, this);
never@1080 2890 }
never@1080 2891 }
never@1080 2892 break;
never@1080 2893 }
never@1080 2894
kvn@368 2895 case Op_Phi:
roland@3724 2896 if (n->as_Phi()->bottom_type()->isa_narrowoop() || n->as_Phi()->bottom_type()->isa_narrowklass()) {
kvn@368 2897 // The EncodeP optimization may create Phi with the same edges
kvn@368 2898 // for all paths. It is not handled well by Register Allocator.
kvn@368 2899 Node* unique_in = n->in(1);
kvn@368 2900 assert(unique_in != NULL, "");
kvn@368 2901 uint cnt = n->req();
kvn@368 2902 for (uint i = 2; i < cnt; i++) {
kvn@368 2903 Node* m = n->in(i);
kvn@368 2904 assert(m != NULL, "");
kvn@368 2905 if (unique_in != m)
kvn@368 2906 unique_in = NULL;
kvn@368 2907 }
kvn@368 2908 if (unique_in != NULL) {
bharadwaj@3880 2909 n->subsume_by(unique_in, this);
kvn@368 2910 }
kvn@368 2911 }
kvn@368 2912 break;
kvn@368 2913
kvn@164 2914 #endif
kvn@164 2915
duke@0 2916 case Op_ModI:
duke@0 2917 if (UseDivMod) {
duke@0 2918 // Check if a%b and a/b both exist
duke@0 2919 Node* d = n->find_similar(Op_DivI);
duke@0 2920 if (d) {
duke@0 2921 // Replace them with a fused divmod if supported
duke@0 2922 if (Matcher::has_match_rule(Op_DivModI)) {
bharadwaj@3880 2923 DivModINode* divmod = DivModINode::make(this, n);
bharadwaj@3880 2924 d->subsume_by(divmod->div_proj(), this);
bharadwaj@3880 2925 n->subsume_by(divmod->mod_proj(), this);
duke@0 2926 } else {
duke@0 2927 // replace a%b with a-((a/b)*b)
bharadwaj@3880 2928 Node* mult = new (this) MulINode(d, d->in(2));
bharadwaj@3880 2929 Node* sub = new (this) SubINode(d->in(1), mult);
bharadwaj@3880 2930 n->subsume_by(sub, this);
duke@0 2931 }
duke@0 2932 }
duke@0 2933 }
duke@0 2934 break;
duke@0 2935
duke@0 2936 case Op_ModL:
duke@0 2937 if (UseDivMod) {
duke@0 2938 // Check if a%b and a/b both exist
duke@0 2939 Node* d = n->find_similar(Op_DivL);
duke@0 2940 if (d) {
duke@0 2941 // Replace them with a fused divmod if supported
duke@0 2942 if (Matcher::has_match_rule(Op_DivModL)) {
bharadwaj@3880 2943 DivModLNode* divmod = DivModLNode::make(this, n);
bharadwaj@3880 2944 d->subsume_by(divmod->div_proj(), this);
bharadwaj@3880 2945 n->subsume_by(divmod->mod_proj(), this);
duke@0 2946 } else {
duke@0 2947 // replace a%b with a-((a/b)*b)
bharadwaj@3880 2948 Node* mult = new (this) MulLNode(d, d->in(2));
bharadwaj@3880 2949 Node* sub = new (this) SubLNode(d->in(1), mult);
bharadwaj@3880 2950 n->subsume_by(sub, this);
duke@0 2951 }
duke@0 2952 }
duke@0 2953 }
duke@0 2954 break;
duke@0 2955
kvn@3447 2956 case Op_LoadVector:
kvn@3447 2957 case Op_StoreVector:
duke@0 2958 break;
duke@0 2959
duke@0 2960 case Op_PackB:
duke@0 2961 case Op_PackS:
duke@0 2962 case Op_PackI:
duke@0 2963 case Op_PackF:
duke@0 2964 case Op_PackL:
duke@0 2965 case Op_PackD:
duke@0 2966 if (n->req()-1 > 2) {
duke@0 2967 // Replace many operand PackNodes with a binary tree for matching
duke@0 2968 PackNode* p = (PackNode*) n;
bharadwaj@3880 2969 Node* btp = p->binary_tree_pack(this, 1, n->req());
bharadwaj@3880 2970 n->subsume_by(btp, this);
duke@0 2971 }
duke@0 2972 break;
kvn@859 2973 case Op_Loop:
kvn@859 2974 case Op_CountedLoop:
kvn@859 2975 if (n->as_Loop()->is_inner_loop()) {
kvn@859 2976 frc.inc_inner_loop_count();
kvn@859 2977 }
kvn@859 2978 break;
roland@2248 2979 case Op_LShiftI:
roland@2248 2980 case Op_RShiftI:
roland@2248 2981 case Op_URShiftI:
roland@2248 2982 case Op_LShiftL:
roland@2248 2983 case Op_RShiftL:
roland@2248 2984 case Op_URShiftL:
roland@2248 2985 if (Matcher::need_masked_shift_count) {
roland@2248 2986 // The cpu's shift instructions don't restrict the count to the
roland@2248 2987 // lower 5/6 bits. We need to do the masking ourselves.
roland@2248 2988 Node* in2 = n->in(2);
roland@2248 2989 juint mask = (n->bottom_type() == TypeInt::INT) ? (BitsPerInt - 1) : (BitsPerLong - 1);
roland@2248 2990 const TypeInt* t = in2->find_int_type();
roland@2248 2991 if (t != NULL && t->is_con()) {
roland@2248 2992 juint shift = t->get_con();
roland@2248 2993 if (shift > mask) { // Unsigned cmp
bharadwaj@3880 2994 n->set_req(2, ConNode::make(this, TypeInt::make(shift & mask)));
roland@2248 2995 }
roland@2248 2996 } else {
roland@2248 2997 if (t == NULL || t->_lo < 0 || t->_hi > (int)mask) {
bharadwaj@3880 2998 Node* shift = new (this) AndINode(in2, ConNode::make(this, TypeInt::make(mask)));
roland@2248 2999 n->set_req(2, shift);
roland@2248 3000 }
roland@2248 3001 }
roland@2248 3002 if (in2->outcnt() == 0) { // Remove dead node
bharadwaj@3880 3003 in2->disconnect_inputs(NULL, this);
roland@2248 3004 }
roland@2248 3005 }
roland@2248 3006 break;
roland@4259 3007 case Op_MemBarStoreStore:
kvn@4675 3008 case Op_MemBarRelease:
roland@4259 3009 // Break the link with AllocateNode: it is no longer useful and
roland@4259 3010 // confuses register allocation.
roland@4259 3011 if (n->req() > MemBarNode::Precedent) {
roland@4259 3012 n->set_req(MemBarNode::Precedent, top());
roland@4259 3013 }
roland@4259 3014 break;
rbackman@5492 3015 // Must set a control edge on all nodes that produce a FlagsProj
rbackman@5492 3016 // so they can't escape the block that consumes the flags.
rbackman@5492 3017 // Must also set the non throwing branch as the control
rbackman@5492 3018 // for all nodes that depends on the result. Unless the node
rbackman@5492 3019 // already have a control that isn't the control of the
rbackman@5492 3020 // flag producer
rbackman@5492 3021 case Op_FlagsProj:
rbackman@5492 3022 {
rbackman@5492 3023 MathExactNode* math = (MathExactNode*) n->in(0);
rbackman@5492 3024 Node* ctrl = math->control_node();
rbackman@5492 3025 Node* non_throwing = math->non_throwing_branch();
rbackman@5492 3026 math->set_req(0, ctrl);
rbackman@5492 3027
rbackman@5492 3028 Node* result = math->result_node();
rbackman@5492 3029 if (result != NULL) {
rbackman@5492 3030 for (DUIterator_Fast jmax, j = result->fast_outs(jmax); j < jmax; j++) {
rbackman@5492 3031 Node* out = result->fast_out(j);
rbackman@5553 3032 // Phi nodes shouldn't be moved. They would only match below if they
rbackman@5553 3033 // had the same control as the MathExactNode. The only time that
rbackman@5553 3034 // would happen is if the Phi is also an input to the MathExact
rbackman@5633 3035 //
rbackman@5633 3036 // Cmp nodes shouldn't have control set at all.
rbackman@5633 3037 if (out->is_Phi() ||
rbackman@5633 3038 out->is_Cmp()) {
rbackman@5633 3039 continue;
rbackman@5633 3040 }
rbackman@5633 3041
rbackman@5633 3042 if (out->in(0) == NULL) {
rbackman@5633 3043 out->set_req(0, non_throwing);
rbackman@5633 3044 } else if (out->in(0) == ctrl) {
rbackman@5633 3045 out->set_req(0, non_throwing);
rbackman@5492 3046 }
rbackman@5492 3047 }
rbackman@5492 3048 }
rbackman@5492 3049 }
rbackman@5492 3050 break;
duke@0 3051 default:
duke@0 3052 assert( !n->is_Call(), "" );
duke@0 3053 assert( !n->is_Mem(), "" );
duke@0 3054 break;
duke@0 3055 }
never@127 3056
never@127 3057 // Collect CFG split points
never@127 3058 if (n->is_MultiBranch())
kvn@859 3059 frc._tests.push(n);
duke@0 3060 }
duke@0 3061
duke@0 3062 //------------------------------final_graph_reshaping_walk---------------------
duke@0 3063 // Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
duke@0 3064 // requires that the walk visits a node's inputs before visiting the node.
bharadwaj@3880 3065 void Compile::final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ) {
kvn@331 3066 ResourceArea *area = Thread::current()->resource_area();
kvn@331 3067 Unique_Node_List sfpt(area);
kvn@331 3068
kvn@859 3069 frc._visited.set(root->_idx); // first, mark node as visited
duke@0 3070 uint cnt = root->req();
duke@0 3071 Node *n = root;
duke@0 3072 uint i = 0;
duke@0 3073 while (true) {
duke@0 3074 if (i < cnt) {
duke@0 3075 // Place all non-visited non-null inputs onto stack
duke@0 3076 Node* m = n->in(i);
duke@0 3077 ++i;
kvn@859 3078 if (m != NULL && !frc._visited.test_set(m->_idx)) {
kvn@331 3079 if (m->is_SafePoint() && m->as_SafePoint()->jvms() != NULL)
kvn@331 3080 sfpt.push(m);
duke@0 3081 cnt = m->req();
duke@0 3082 nstack.push(n, i); // put on stack parent and next input's index
duke@0 3083 n = m;
duke@0 3084 i = 0;
duke@0 3085 }
duke@0 3086 } else {
duke@0 3087 // Now do post-visit work
kvn@859 3088 final_graph_reshaping_impl( n, frc );
duke@0 3089 if (nstack.is_empty())
duke@0 3090 break; // finished
duke@0 3091 n = nstack.node(); // Get node from stack
duke@0 3092 cnt = n->req();
duke@0 3093 i = nstack.index();
duke@0 3094 nstack.pop(); // Shift to the next node on stack
duke@0 3095 }
duke@0 3096 }
kvn@331 3097
kvn@1495 3098 // Skip next transformation if compressed oops are not used.
roland@3724 3099 if ((UseCompressedOops && !Matcher::gen_narrow_oop_implicit_null_checks()) ||
ehelin@5259 3100 (!UseCompressedOops && !UseCompressedClassPointers))
kvn@1495 3101 return;
kvn@1495 3102
roland@3724 3103 // Go over safepoints nodes to skip DecodeN/DecodeNKlass nodes for debug edges.
kvn@331 3104 // It could be done for an uncommon traps or any safepoints/calls
roland@3724 3105 // if the DecodeN/DecodeNKlass node is referenced only in a debug info.
kvn@331 3106 while (sfpt.size() > 0) {
kvn@331 3107 n = sfpt.pop();
kvn@331 3108 JVMState *jvms = n->as_SafePoint()->jvms();
kvn@331 3109 assert(jvms != NULL, "sanity");
kvn@331 3110 int start = jvms->debug_start();
kvn@331 3111 int end = n->req();
kvn@331 3112 bool is_uncommon = (n->is_CallStaticJava() &&
kvn@331 3113 n->as_CallStaticJava()->uncommon_trap_request() != 0);
kvn@331 3114 for (int j = start; j < end; j++) {
kvn@331 3115 Node* in = n->in(j);
roland@3724 3116 if (in->is_DecodeNarrowPtr()) {
kvn@331 3117 bool safe_to_skip = true;
kvn@331 3118 if (!is_uncommon ) {
kvn@331 3119 // Is it safe to skip?
kvn@331 3120 for (uint i = 0; i < in->outcnt(); i++) {
kvn@331 3121 Node* u = in->raw_out(i);
kvn@331 3122 if (!u->is_SafePoint() ||
kvn@331 3123 u->is_Call() && u->as_Call()->has_non_debug_use(n)) {
kvn@331 3124 safe_to_skip = false;
kvn@331 3125 }
kvn@331 3126 }
kvn@331 3127 }
kvn@331 3128 if (safe_to_skip) {
kvn@331 3129 n->set_req(j, in->in(1));
kvn@331 3130 }
kvn@331 3131 if (in->outcnt() == 0) {
bharadwaj@3880 3132 in->disconnect_inputs(NULL, this);
kvn@331 3133 }
kvn@331 3134 }
kvn@331 3135 }
kvn@331 3136 }
duke@0 3137 }
duke@0 3138
duke@0 3139 //------------------------------final_graph_reshaping--------------------------
duke@0 3140 // Final Graph Reshaping.
duke@0 3141 //
duke@0 3142 // (1) Clone simple inputs to uncommon calls, so they can be scheduled late
duke@0 3143 // and not commoned up and forced early. Must come after regular
duke@0 3144 // optimizations to avoid GVN undoing the cloning. Clone constant
duke@0 3145 // inputs to Loop Phis; these will be split by the allocator anyways.
duke@0 3146 // Remove Opaque nodes.
duke@0 3147 // (2) Move last-uses by commutative operations to the left input to encourage
duke@0 3148 // Intel update-in-place two-address operations and better register usage
duke@0 3149 // on RISCs. Must come after regular optimizations to avoid GVN Ideal
duke@0 3150 // calls canonicalizing them back.
duke@0 3151 // (3) Count the number of double-precision FP ops, single-precision FP ops
duke@0 3152 // and call sites. On Intel, we can get correct rounding either by
duke@0 3153 // forcing singles to memory (requires extra stores and loads after each
duke@0 3154 // FP bytecode) or we can set a rounding mode bit (requires setting and
duke@0 3155 // clearing the mode bit around call sites). The mode bit is only used
duke@0 3156 // if the relative frequency of single FP ops to calls is low enough.
duke@0 3157 // This is a key transform for SPEC mpeg_audio.
duke@0 3158 // (4) Detect infinite loops; blobs of code reachable from above but not
duke@0 3159 // below. Several of the Code_Gen algorithms fail on such code shapes,
duke@0 3160 // so we simply bail out. Happens a lot in ZKM.jar, but also happens
duke@0 3161 // from time to time in other codes (such as -Xcomp finalizer loops, etc).
duke@0 3162 // Detection is by looking for IfNodes where only 1 projection is
duke@0 3163 // reachable from below or CatchNodes missing some targets.
duke@0 3164 // (5) Assert for insane oop offsets in debug mode.
duke@0 3165
duke@0 3166 bool Compile::final_graph_reshaping() {
duke@0 3167 // an infinite loop may have been eliminated by the optimizer,
duke@0 3168 // in which case the graph will be empty.
duke@0 3169 if (root()->req() == 1) {
duke@0 3170 record_method_not_compilable("trivial infinite loop");
duke@0 3171 return true;
duke@0 3172 }
duke@0 3173
roland@4154 3174 // Expensive nodes have their control input set to prevent the GVN
roland@4154 3175 // from freely commoning them. There's no GVN beyond this point so
roland@4154 3176 // no need to keep the control input. We want the expensive nodes to
roland@4154 3177 // be freely moved to the least frequent code path by gcm.
roland@4154 3178 assert(OptimizeExpensiveOps || expensive_count() == 0, "optimization off but list non empty?");
roland@4154 3179 for (int i = 0; i < expensive_count(); i++) {
roland@4154 3180 _expensive_nodes->at(i)->set_req(0, NULL);
roland@4154 3181 }
roland@4154 3182
kvn@859 3183 Final_Reshape_Counts frc;
duke@0 3184
duke@0 3185 // Visit everybody reachable!
duke@0 3186 // Allocate stack of size C->unique()/2 to avoid frequent realloc
duke@0 3187 Node_Stack nstack(unique() >> 1);
kvn@859 3188 final_graph_reshaping_walk(nstack, root(), frc);
duke@0 3189
duke@0 3190 // Check for unreachable (from below) code (i.e., infinite loops).
kvn@859 3191 for( uint i = 0; i < frc._tests.size(); i++ ) {
kvn@859 3192 MultiBranchNode *n = frc._tests[i]->as_MultiBranch();
never@127 3193 // Get number of CFG targets.
duke@0 3194 // Note that PCTables include exception targets after calls.
never@127 3195 uint required_outcnt = n->required_outcnt();
never@127 3196 if (n->outcnt() != required_outcnt) {
duke@0 3197 // Check for a few special cases. Rethrow Nodes never take the
duke@0 3198 // 'fall-thru' path, so expected kids is 1 less.
duke@0 3199 if (n->is_PCTable() && n->in(0) && n->in(0)->in(0)) {
duke@0 3200 if (n->in(0)->in(0)->is_Call()) {
duke@0 3201 CallNode *call = n->in(0)->in(0)->as_Call();
duke@0 3202 if (call->entry_point() == OptoRuntime::rethrow_stub()) {
never@127 3203 required_outcnt--; // Rethrow always has 1 less kid
duke@0 3204 } else if (call->req() > TypeFunc::Parms &&
duke@0 3205 call->is_CallDynamicJava()) {
duke@0 3206 // Check for null receiver. In such case, the optimizer has
duke@0 3207 // detected that the virtual call will always result in a null
duke@0 3208 // pointer exception. The fall-through projection of this CatchNode
duke@0 3209 // will not be populated.
duke@0 3210 Node *arg0 = call->in(TypeFunc::Parms);
duke@0 3211 if (arg0->is_Type() &&
duke@0 3212 arg0->as_Type()->type()->higher_equal(TypePtr::NULL_PTR)) {
never@127 3213 required_outcnt--;
duke@0 3214 }
duke@0 3215 } else if (call->entry_point() == OptoRuntime::new_array_Java() &&
duke@0 3216 call->req() > TypeFunc::Parms+1 &&
duke@0 3217 call->is_CallStaticJava()) {
duke@0 3218 // Check for negative array length. In such case, the optimizer has
duke@0 3219 // detected that the allocation attempt will always result in an
duke@0 3220 // exception. There is no fall-through projection of this CatchNode .
duke@0 3221 Node *arg1 = call->in(TypeFunc::Parms+1);
duke@0 3222 if (arg1->is_Type() &&
duke@0 3223 arg1->as_Type()->type()->join(TypeInt::POS)->empty()) {
never@127 3224 required_outcnt--;
duke@0 3225 }
duke@0 3226 }
duke@0 3227 }
duke@0 3228 }
never@127 3229 // Recheck with a better notion of 'required_outcnt'
never@127 3230 if (n->outcnt() != required_outcnt) {
duke@0 3231 record_method_not_compilable("malformed control flow");
duke@0 3232 return true; // Not all targets reachable!
duke@0 3233 }
duke@0 3234 }
duke@0 3235 // Check that I actually visited all kids. Unreached kids
duke@0 3236 // must be infinite loops.
duke@0 3237 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++)
kvn@859 3238 if (!frc._visited.test(n->fast_out(j)->_idx)) {
duke@0 3239 record_method_not_compilable("infinite loop");
duke@0 3240 return true; // Found unvisited kid; must be unreach
duke@0 3241 }
duke@0 3242 }
duke@0 3243
duke@0 3244 // If original bytecodes contained a mixture of floats and doubles
duke@0 3245 // check if the optimizer has made it homogenous, item (3).
never@929 3246 if( Use24BitFPMode && Use24BitFP && UseSSE == 0 &&
kvn@859 3247 frc.get_float_count() > 32 &&
kvn@859 3248 frc.get_double_count() == 0 &&
kvn@859 3249 (10 * frc.get_call_count() < frc.get_float_count()) ) {
duke@0 3250 set_24_bit_selection_and_mode( false, true );
duke@0 3251 }
duke@0 3252
kvn@859 3253 set_java_calls(frc.get_java_call_count());
kvn@859 3254 set_inner_loops(frc.get_inner_loop_count());
duke@0 3255
duke@0 3256 // No infinite loops, no reason to bail out.
duke@0 3257 return false;
duke@0 3258 }
duke@0 3259
duke@0 3260 //-----------------------------too_many_traps----------------------------------
duke@0 3261 // Report if there are too many traps at the current method and bci.
duke@0 3262 // Return true if there was a trap, and/or PerMethodTrapLimit is exceeded.
duke@0 3263 bool Compile::too_many_traps(ciMethod* method,
duke@0 3264 int bci,
duke@0 3265 Deoptimization::DeoptReason reason) {
duke@0 3266 ciMethodData* md = method->method_data();
duke@0 3267 if (md->is_empty()) {
duke@0 3268 // Assume the trap has not occurred, or that it occurred only
duke@0 3269 // because of a transient condition during start-up in the interpreter.
duke@0 3270 return false;
duke@0 3271 }
duke@0 3272 if (md->has_trap_at(bci, reason) != 0) {
duke@0 3273 // Assume PerBytecodeTrapLimit==0, for a more conservative heuristic.
duke@0 3274 // Also, if there are multiple reasons, or if there is no per-BCI record,
duke@0 3275 // assume the worst.
duke@0 3276 if (log())
duke@0 3277 log()->elem("observe trap='%s' count='%d'",
duke@0 3278 Deoptimization::trap_reason_name(reason),
duke@0 3279 md->trap_count(reason));
duke@0 3280 return true;
duke@0 3281 } else {
duke@0 3282 // Ignore method/bci and see if there have been too many globally.
duke@0 3283 return too_many_traps(reason, md);
duke@0 3284 }
duke@0 3285 }
duke@0 3286
duke@0 3287 // Less-accurate variant which does not require a method and bci.
duke@0 3288 bool Compile::too_many_traps(Deoptimization::DeoptReason reason,
duke@0 3289 ciMethodData* logmd) {
duke@0 3290 if (trap_count(reason) >= (uint)PerMethodTrapLimit) {
duke@0 3291 // Too many traps globally.
duke@0 3292 // Note that we use cumulative trap_count, not just md->trap_count.
duke@0 3293 if (log()) {
duke@0 3294 int mcount = (logmd == NULL)? -1: (int)logmd->trap_count(reason);
duke@0 3295 log()->elem("observe trap='%s' count='0' mcount='%d' ccount='%d'",
duke@0 3296 Deoptimization::trap_reason_name(reason),
duke@0 3297 mcount, trap_count(reason));
duke@0 3298 }
duke@0 3299 return true;
duke@0 3300 } else {
duke@0 3301 // The coast is clear.
duke@0 3302 return false;
duke@0 3303 }
duke@0 3304 }
duke@0 3305
duke@0 3306 //--------------------------too_many_recompiles--------------------------------
duke@0 3307 // Report if there are too many recompiles at the current method and bci.
duke@0 3308 // Consults PerBytecodeRecompilationCutoff and PerMethodRecompilationCutoff.
duke@0 3309 // Is not eager to return true, since this will cause the compiler to use
duke@0 3310 // Action_none for a trap point, to avoid too many recompilations.
duke@0 3311 bool Compile::too_many_recompiles(ciMethod* method,
duke@0 3312 int bci,
duke@0 3313 Deoptimization::DeoptReason reason) {
duke@0 3314 ciMethodData* md = method->method_data();
duke@0 3315 if (md->is_empty()) {
duke@0 3316 // Assume the trap has not occurred, or that it occurred only
duke@0 3317 // because of a transient condition during start-up in the interpreter.
duke@0 3318 return false;
duke@0 3319 }
duke@0 3320 // Pick a cutoff point well within PerBytecodeRecompilationCutoff.
duke@0 3321 uint bc_cutoff = (uint) PerBytecodeRecompilationCutoff / 8;
duke@0 3322 uint m_cutoff = (uint) PerMethodRecompilationCutoff / 2 + 1; // not zero
duke@0 3323 Deoptimization::DeoptReason per_bc_reason
duke@0 3324 = Deoptimization::reason_recorded_per_bytecode_if_any(reason);
duke@0 3325 if ((per_bc_reason == Deoptimization::Reason_none
duke@0 3326 || md->has_trap_at(bci, reason) != 0)
duke@0 3327 // The trap frequency measure we care about is the recompile count:
duke@0 3328 && md->trap_recompiled_at(bci)
duke@0 3329 && md->overflow_recompile_count() >= bc_cutoff) {
duke@0 3330 // Do not emit a trap here if it has already caused recompilations.
duke@0 3331 // Also, if there are multiple reasons, or if there is no per-BCI record,
duke@0 3332 // assume the worst.
duke@0 3333 if (log())
duke@0 3334 log()->elem("observe trap='%s recompiled' count='%d' recompiles2='%d'",
duke@0 3335 Deoptimization::trap_reason_name(reason),
duke@0 3336 md->trap_count(reason),
duke@0 3337 md->overflow_recompile_count());
duke@0 3338 return true;
duke@0 3339 } else if (trap_count(reason) != 0
duke@0 3340 && decompile_count() >= m_cutoff) {
duke@0 3341 // Too many recompiles globally, and we have seen this sort of trap.
duke@0 3342 // Use cumulative decompile_count, not just md->decompile_count.
duke@0 3343 if (log())
duke@0 3344 log()->elem("observe trap='%s' count='%d' mcount='%d' decompiles='%d' mdecompiles='%d'",
duke@0 3345 Deoptimization::trap_reason_name(reason),
duke@0 3346 md->trap_count(reason), trap_count(reason),
duke@0 3347 md->decompile_count(), decompile_count());
duke@0 3348 return true;
duke@0 3349 } else {
duke@0 3350 // The coast is clear.
duke@0 3351 return false;
duke@0 3352 }
duke@0 3353 }
duke@0 3354
duke@0 3355
duke@0 3356 #ifndef PRODUCT
duke@0 3357 //------------------------------verify_graph_edges---------------------------
duke@0 3358 // Walk the Graph and verify that there is a one-to-one correspondence
duke@0 3359 // between Use-Def edges and Def-Use edges in the graph.
duke@0 3360 void Compile::verify_graph_edges(bool no_dead_code) {
duke@0 3361 if (VerifyGraphEdges) {
duke@0 3362 ResourceArea *area = Thread::current()->resource_area();
duke@0 3363 Unique_Node_List visited(area);
duke@0 3364 // Call recursive graph walk to check edges
duke@0 3365 _root->verify_edges(visited);
duke@0 3366 if (no_dead_code) {
duke@0 3367 // Now make sure that no visited node is used by an unvisited node.
duke@0 3368 bool dead_nodes = 0;
duke@0 3369 Unique_Node_List checked(area);
duke@0 3370 while (visited.size() > 0) {
duke@0 3371 Node* n = visited.pop();
duke@0 3372 checked.push(n);
duke@0 3373 for (uint i = 0; i < n->outcnt(); i++) {
duke@0 3374 Node* use = n->raw_out(i);
duke@0 3375 if (checked.member(use)) continue; // already checked
duke@0 3376 if (visited.member(use)) continue; // already in the graph
duke@0 3377 if (use->is_Con()) continue; // a dead ConNode is OK
duke@0 3378 // At this point, we have found a dead node which is DU-reachable.
duke@0 3379 if (dead_nodes++ == 0)
duke@0 3380 tty->print_cr("*** Dead nodes reachable via DU edges:");
duke@0 3381 use->dump(2);
duke@0 3382 tty->print_cr("---");
duke@0 3383 checked.push(use); // No repeats; pretend it is now checked.
duke@0 3384 }
duke@0 3385 }
duke@0 3386 assert(dead_nodes == 0, "using nodes must be reachable from root");
duke@0 3387 }
duke@0 3388 }
duke@0 3389 }
iveresov@5635 3390
iveresov@5635 3391 // Verify GC barriers consistency
iveresov@5635 3392 // Currently supported:
iveresov@5635 3393 // - G1 pre-barriers (see GraphKit::g1_write_barrier_pre())
iveresov@5635 3394 void Compile::verify_barriers() {
iveresov@5635 3395 if (UseG1GC) {
iveresov@5635 3396 // Verify G1 pre-barriers
iveresov@5635 3397 const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active());
iveresov@5635 3398
iveresov@5635 3399 ResourceArea *area = Thread::current()->resource_area();
iveresov@5635 3400 Unique_Node_List visited(area);
iveresov@5635 3401 Node_List worklist(area);
iveresov@5635 3402 // We're going to walk control flow backwards starting from the Root
iveresov@5635 3403 worklist.push(_root);
iveresov@5635 3404 while (worklist.size() > 0) {
iveresov@5635 3405 Node* x = worklist.pop();
iveresov@5635 3406 if (x == NULL || x == top()) continue;
iveresov@5635 3407 if (visited.member(x)) {
iveresov@5635 3408 continue;
iveresov@5635 3409 } else {
iveresov@5635 3410 visited.push(x);
iveresov@5635 3411 }
iveresov@5635 3412
iveresov@5635 3413 if (x->is_Region()) {
iveresov@5635 3414 for (uint i = 1; i < x->req(); i++) {
iveresov@5635 3415 worklist.push(x->in(i));
iveresov@5635 3416 }
iveresov@5635 3417 } else {
iveresov@5635 3418 worklist.push(x->in(0));
iveresov@5635 3419 // We are looking for the pattern:
iveresov@5635 3420 // /->ThreadLocal
iveresov@5635 3421 // If->Bool->CmpI->LoadB->AddP->ConL(marking_offset)
iveresov@5635 3422 // \->ConI(0)
iveresov@5635 3423 // We want to verify that the If and the LoadB have the same control
iveresov@5635 3424 // See GraphKit::g1_write_barrier_pre()
iveresov@5635 3425 if (x->is_If()) {
iveresov@5635 3426 IfNode *iff = x->as_If();
iveresov@5635 3427 if (iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp()) {
iveresov@5635 3428 CmpNode *cmp = iff->in(1)->in(1)->as_Cmp();
iveresov@5635 3429 if (cmp->Opcode() == Op_CmpI && cmp->in(2)->is_Con() && cmp->in(2)->bottom_type()->is_int()->get_con() == 0
iveresov@5635 3430 && cmp->in(1)->is_Load()) {
iveresov@5635 3431 LoadNode* load = cmp->in(1)->as_Load();
iveresov@5635 3432 if (load->Opcode() == Op_LoadB && load->in(2)->is_AddP() && load->in(2)->in(2)->Opcode() == Op_ThreadLocal
iveresov@5635 3433 && load->in(2)->in(3)->is_Con()
iveresov@5635 3434 && load->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == marking_offset) {
iveresov@5635 3435
iveresov@5635 3436 Node* if_ctrl = iff->in(0);
iveresov@5635 3437 Node* load_ctrl = load->in(0);
iveresov@5635 3438
iveresov@5635 3439 if (if_ctrl != load_ctrl) {
iveresov@5635 3440 // Skip possible CProj->NeverBranch in infinite loops
iveresov@5635 3441 if ((if_ctrl->is_Proj() && if_ctrl->Opcode() == Op_CProj)
iveresov@5635 3442 && (if_ctrl->in(0)->is_MultiBranch() && if_ctrl->in(0)->Opcode() == Op_NeverBranch)) {
iveresov@5635 3443 if_ctrl = if_ctrl->in(0)->in(0);
iveresov@5635 3444 }
iveresov@5635 3445 }
iveresov@5635 3446 assert(load_ctrl != NULL && if_ctrl == load_ctrl, "controls must match");
iveresov@5635 3447 }
iveresov@5635 3448 }
iveresov@5635 3449 }
iveresov@5635 3450 }
iveresov@5635 3451 }
iveresov@5635 3452 }
iveresov@5635 3453 }
iveresov@5635 3454 }
iveresov@5635 3455
duke@0 3456 #endif
duke@0 3457
duke@0 3458 // The Compile object keeps track of failure reasons separately from the ciEnv.
duke@0 3459 // This is required because there is not quite a 1-1 relation between the
duke@0 3460 // ciEnv and its compilation task and the Compile object. Note that one
duke@0 3461 // ciEnv might use two Compile objects, if C2Compiler::compile_method decides
duke@0 3462 // to backtrack and retry without subsuming loads. Other than this backtracking
duke@0 3463 // behavior, the Compile's failure reason is quietly copied up to the ciEnv
duke@0 3464 // by the logic in C2Compiler.
duke@0 3465 void Compile::record_failure(const char* reason) {
duke@0 3466 if (log() != NULL) {
duke@0 3467 log()->elem("failure reason='%s' phase='compile'", reason);
duke@0 3468 }
duke@0 3469 if (_failure_reason == NULL) {
duke@0 3470 // Record the first failure reason.
duke@0 3471 _failure_reason = reason;
duke@0 3472 }
sla@4802 3473
sla@4802 3474 EventCompilerFailure event;
sla@4802 3475 if (event.should_commit()) {
sla@4802 3476 event.set_compileID(Compile::compile_id());
sla@4802 3477 event.set_failure(reason);
sla@4802 3478 event.commit();
sla@4802 3479 }
sla@4802 3480
never@222 3481 if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
sla@4802 3482 C->print_method(PHASE_FAILURE);
never@222 3483 }
duke@0 3484 _root = NULL; // flush the graph, too
duke@0 3485 }
duke@0 3486
duke@0 3487 Compile::TracePhase::TracePhase(const char* name, elapsedTimer* accumulator, bool dolog)
bharadwaj@3880 3488 : TraceTime(NULL, accumulator, false NOT_PRODUCT( || TimeCompiler ), false),
bharadwaj@3880 3489 _phase_name(name), _dolog(dolog)
duke@0 3490 {
duke@0 3491 if (dolog) {
duke@0 3492 C = Compile::current();
duke@0 3493 _log = C->log();
duke@0 3494 } else {
duke@0 3495 C = NULL;
duke@0 3496 _log = NULL;
duke@0 3497 }
duke@0 3498 if (_log != NULL) {
bharadwaj@3880 3499 _log->begin_head("phase name='%s' nodes='%d' live='%d'", _phase_name, C->unique(), C->live_nodes());
duke@0 3500 _log->stamp();
duke@0 3501 _log->end_head();
duke@0 3502 }
duke@0 3503 }
duke@0 3504
duke@0 3505 Compile::TracePhase::~TracePhase() {
bharadwaj@3880 3506
bharadwaj@3880 3507 C = Compile::current();
bharadwaj@3880 3508 if (_dolog) {
bharadwaj@3880 3509 _log = C->log();
bharadwaj@3880 3510 } else {
bharadwaj@3880 3511 _log = NULL;
bharadwaj@3880 3512 }
bharadwaj@3880 3513
bharadwaj@3880 3514 #ifdef ASSERT
bharadwaj@3880 3515 if (PrintIdealNodeCount) {
bharadwaj@3880 3516 tty->print_cr("phase name='%s' nodes='%d' live='%d' live_graph_walk='%d'",
bharadwaj@3880 3517 _phase_name, C->unique(), C->live_nodes(), C->count_live_nodes_by_graph_walk());
bharadwaj@3880 3518 }
bharadwaj@3880 3519
bharadwaj@3880 3520 if (VerifyIdealNodeCount) {
bharadwaj@3880 3521 Compile::current()->print_missing_nodes();
bharadwaj@3880 3522 }
bharadwaj@3880 3523 #endif
bharadwaj@3880 3524
duke@0 3525 if (_log != NULL) {
bharadwaj@3880 3526 _log->done("phase name='%s' nodes='%d' live='%d'", _phase_name, C->unique(), C->live_nodes());
duke@0 3527 }
duke@0 3528 }
twisti@1915 3529
twisti@1915 3530 //=============================================================================
twisti@1915 3531 // Two Constant's are equal when the type and the value are equal.
twisti@1915 3532 bool Compile::Constant::operator==(const Constant& other) {
twisti@1915 3533 if (type() != other.type() ) return false;
twisti@1915 3534 if (can_be_reused() != other.can_be_reused()) return false;
twisti@1915 3535 // For floating point values we compare the bit pattern.
twisti@1915 3536 switch (type()) {
coleenp@3602 3537 case T_FLOAT: return (_v._value.i == other._v._value.i);
twisti@1915 3538 case T_LONG:
coleenp@3602 3539 case T_DOUBLE: return (_v._value.j == other._v._value.j);
twisti@1915 3540 case T_OBJECT:
coleenp@3602 3541 case T_ADDRESS: return (_v._value.l == other._v._value.l);
coleenp@3602 3542 case T_VOID: return (_v._value.l == other._v._value.l); // jump-table entries
kvn@3764 3543 case T_METADATA: return (_v._metadata == other._v._metadata);
twisti@1915 3544 default: ShouldNotReachHere();
twisti@1915 3545 }
twisti@1915 3546 return false;
twisti@1915 3547 }
twisti@1915 3548
twisti@1915 3549 static int type_to_size_in_bytes(BasicType t) {
twisti@1915 3550 switch (t) {
twisti@1915 3551 case T_LONG: return sizeof(jlong );
twisti@1915 3552 case T_FLOAT: return sizeof(jfloat );
twisti@1915 3553 case T_DOUBLE: return sizeof(jdouble);
coleenp@3602 3554 case T_METADATA: return sizeof(Metadata*);
twisti@1915 3555 // We use T_VOID as marker for jump-table entries (labels) which
twisti@2875 3556 // need an internal word relocation.
twisti@1915 3557 case T_VOID:
twisti@1915 3558 case T_ADDRESS:
twisti@1915 3559 case T_OBJECT: return sizeof(jobject);
twisti@1915 3560 }
twisti@1915 3561
twisti@1915 3562 ShouldNotReachHere();
twisti@1915 3563 return -1;
twisti@1915 3564 }
twisti@1915 3565
twisti@2875 3566 int Compile::ConstantTable::qsort_comparator(Constant* a, Constant* b) {
twisti@2875 3567 // sort descending
twisti@2875 3568 if (a->freq() > b->freq()) return -1;
twisti@2875 3569 if (a->freq() < b->freq()) return 1;
twisti@2875 3570 return 0;
twisti@2875 3571 }
twisti@2875 3572