annotate hotspot/src/share/vm/opto/graphKit.hpp @ 9176:42d9d1010f38

7009266: G1: assert(obj->is_oop_or_null(true )) failed: Error Summary: A referent object that is only weakly reachable at the start of concurrent marking but is re-attached to the strongly reachable object graph during marking may not be marked as live. This can cause the reference object to be processed prematurely and leave dangling pointers to the referent object. Implement a read barrier for the java.lang.ref.Reference::referent field by intrinsifying the Reference.get() method, and intercepting accesses though JNI, reflection, and Unsafe, so that when a non-null referent object is read it is also logged in an SATB buffer. Reviewed-by: kvn, iveresov, never, tonyp, dholmes
author johnc
date Thu, 07 Apr 2011 09:53:20 -0700
parents 16fc1c68714b
children 6db9c9dffe1f
rev   line source
duke@1 1 /*
johnc@9176 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
duke@1 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@1 4 *
duke@1 5 * This code is free software; you can redistribute it and/or modify it
duke@1 6 * under the terms of the GNU General Public License version 2 only, as
duke@1 7 * published by the Free Software Foundation.
duke@1 8 *
duke@1 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@1 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@1 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@1 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@1 13 * accompanied this code).
duke@1 14 *
duke@1 15 * You should have received a copy of the GNU General Public License version
duke@1 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@1 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@1 18 *
trims@5547 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@5547 20 * or visit www.oracle.com if you need additional information or have any
trims@5547 21 * questions.
duke@1 22 *
duke@1 23 */
duke@1 24
stefank@7397 25 #ifndef SHARE_VM_OPTO_GRAPHKIT_HPP
stefank@7397 26 #define SHARE_VM_OPTO_GRAPHKIT_HPP
stefank@7397 27
stefank@7397 28 #include "ci/ciEnv.hpp"
stefank@7397 29 #include "ci/ciMethodData.hpp"
stefank@7397 30 #include "opto/addnode.hpp"
stefank@7397 31 #include "opto/callnode.hpp"
stefank@7397 32 #include "opto/cfgnode.hpp"
stefank@7397 33 #include "opto/compile.hpp"
stefank@7397 34 #include "opto/divnode.hpp"
stefank@7397 35 #include "opto/mulnode.hpp"
stefank@7397 36 #include "opto/phaseX.hpp"
stefank@7397 37 #include "opto/subnode.hpp"
stefank@7397 38 #include "opto/type.hpp"
stefank@7397 39 #include "runtime/deoptimization.hpp"
stefank@7397 40
duke@1 41 class FastLockNode;
duke@1 42 class FastUnlockNode;
ysr@1374 43 class IdealKit;
duke@1 44 class Parse;
duke@1 45 class RootNode;
duke@1 46
duke@1 47 //-----------------------------------------------------------------------------
duke@1 48 //----------------------------GraphKit-----------------------------------------
duke@1 49 // Toolkit for building the common sorts of subgraphs.
duke@1 50 // Does not know about bytecode parsing or type-flow results.
duke@1 51 // It is able to create graphs implementing the semantics of most
duke@1 52 // or all bytecodes, so that it can expand intrinsics and calls.
duke@1 53 // It may depend on JVMState structure, but it must not depend
duke@1 54 // on specific bytecode streams.
duke@1 55 class GraphKit : public Phase {
duke@1 56 friend class PreserveJVMState;
duke@1 57
duke@1 58 protected:
duke@1 59 ciEnv* _env; // Compilation environment
duke@1 60 PhaseGVN &_gvn; // Some optimizations while parsing
duke@1 61 SafePointNode* _map; // Parser map from JVM to Nodes
duke@1 62 SafePointNode* _exceptions;// Parser map(s) for exception state(s)
duke@1 63 int _sp; // JVM Expression Stack Pointer
duke@1 64 int _bci; // JVM Bytecode Pointer
duke@1 65 ciMethod* _method; // JVM Current Method
duke@1 66
duke@1 67 private:
duke@1 68 SafePointNode* map_not_null() const {
duke@1 69 assert(_map != NULL, "must call stopped() to test for reset compiler map");
duke@1 70 return _map;
duke@1 71 }
duke@1 72
duke@1 73 public:
duke@1 74 GraphKit(); // empty constructor
duke@1 75 GraphKit(JVMState* jvms); // the JVM state on which to operate
duke@1 76
duke@1 77 #ifdef ASSERT
duke@1 78 ~GraphKit() {
duke@1 79 assert(!has_exceptions(), "user must call transfer_exceptions_into_jvms");
duke@1 80 }
duke@1 81 #endif
duke@1 82
duke@1 83 virtual Parse* is_Parse() const { return NULL; }
duke@1 84
duke@1 85 ciEnv* env() const { return _env; }
duke@1 86 PhaseGVN& gvn() const { return _gvn; }
duke@1 87
duke@1 88 void record_for_igvn(Node* n) const { C->record_for_igvn(n); } // delegate to Compile
duke@1 89
duke@1 90 // Handy well-known nodes:
duke@1 91 Node* null() const { return zerocon(T_OBJECT); }
duke@1 92 Node* top() const { return C->top(); }
duke@1 93 RootNode* root() const { return C->root(); }
duke@1 94
duke@1 95 // Create or find a constant node
duke@1 96 Node* intcon(jint con) const { return _gvn.intcon(con); }
duke@1 97 Node* longcon(jlong con) const { return _gvn.longcon(con); }
duke@1 98 Node* makecon(const Type *t) const { return _gvn.makecon(t); }
duke@1 99 Node* zerocon(BasicType bt) const { return _gvn.zerocon(bt); }
duke@1 100 // (See also macro MakeConX in type.hpp, which uses intcon or longcon.)
duke@1 101
never@2027 102 // Helper for byte_map_base
never@2027 103 Node* byte_map_base_node() {
never@2027 104 // Get base of card map
never@2027 105 CardTableModRefBS* ct = (CardTableModRefBS*)(Universe::heap()->barrier_set());
never@2027 106 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust users of this code");
never@2027 107 if (ct->byte_map_base != NULL) {
never@2027 108 return makecon(TypeRawPtr::make((address)ct->byte_map_base));
never@2027 109 } else {
never@2027 110 return null();
never@2027 111 }
never@2027 112 }
never@2027 113
duke@1 114 jint find_int_con(Node* n, jint value_if_unknown) {
duke@1 115 return _gvn.find_int_con(n, value_if_unknown);
duke@1 116 }
duke@1 117 jlong find_long_con(Node* n, jlong value_if_unknown) {
duke@1 118 return _gvn.find_long_con(n, value_if_unknown);
duke@1 119 }
duke@1 120 // (See also macro find_intptr_t_con in type.hpp, which uses one of these.)
duke@1 121
duke@1 122 // JVM State accessors:
duke@1 123 // Parser mapping from JVM indices into Nodes.
duke@1 124 // Low slots are accessed by the StartNode::enum.
duke@1 125 // Then come the locals at StartNode::Parms to StartNode::Parms+max_locals();
duke@1 126 // Then come JVM stack slots.
duke@1 127 // Finally come the monitors, if any.
duke@1 128 // See layout accessors in class JVMState.
duke@1 129
duke@1 130 SafePointNode* map() const { return _map; }
duke@1 131 bool has_exceptions() const { return _exceptions != NULL; }
duke@1 132 JVMState* jvms() const { return map_not_null()->_jvms; }
duke@1 133 int sp() const { return _sp; }
duke@1 134 int bci() const { return _bci; }
duke@1 135 Bytecodes::Code java_bc() const;
duke@1 136 ciMethod* method() const { return _method; }
duke@1 137
duke@1 138 void set_jvms(JVMState* jvms) { set_map(jvms->map());
duke@1 139 assert(jvms == this->jvms(), "sanity");
duke@1 140 _sp = jvms->sp();
duke@1 141 _bci = jvms->bci();
duke@1 142 _method = jvms->has_method() ? jvms->method() : NULL; }
duke@1 143 void set_map(SafePointNode* m) { _map = m; debug_only(verify_map()); }
duke@1 144 void set_sp(int i) { assert(i >= 0, "must be non-negative"); _sp = i; }
duke@1 145 void clean_stack(int from_sp); // clear garbage beyond from_sp to top
duke@1 146
duke@1 147 void inc_sp(int i) { set_sp(sp() + i); }
duke@1 148 void set_bci(int bci) { _bci = bci; }
duke@1 149
duke@1 150 // Make sure jvms has current bci & sp.
duke@1 151 JVMState* sync_jvms() const;
duke@1 152 #ifdef ASSERT
duke@1 153 // Make sure JVMS has an updated copy of bci and sp.
duke@1 154 // Also sanity-check method, depth, and monitor depth.
duke@1 155 bool jvms_in_sync() const;
duke@1 156
duke@1 157 // Make sure the map looks OK.
duke@1 158 void verify_map() const;
duke@1 159
duke@1 160 // Make sure a proposed exception state looks OK.
duke@1 161 static void verify_exception_state(SafePointNode* ex_map);
duke@1 162 #endif
duke@1 163
duke@1 164 // Clone the existing map state. (Implements PreserveJVMState.)
duke@1 165 SafePointNode* clone_map();
duke@1 166
duke@1 167 // Set the map to a clone of the given one.
duke@1 168 void set_map_clone(SafePointNode* m);
duke@1 169
duke@1 170 // Tell if the compilation is failing.
duke@1 171 bool failing() const { return C->failing(); }
duke@1 172
duke@1 173 // Set _map to NULL, signalling a stop to further bytecode execution.
duke@1 174 // Preserve the map intact for future use, and return it back to the caller.
duke@1 175 SafePointNode* stop() { SafePointNode* m = map(); set_map(NULL); return m; }
duke@1 176
duke@1 177 // Stop, but first smash the map's inputs to NULL, to mark it dead.
duke@1 178 void stop_and_kill_map();
duke@1 179
duke@1 180 // Tell if _map is NULL, or control is top.
duke@1 181 bool stopped();
duke@1 182
duke@1 183 // Tell if this method or any caller method has exception handlers.
duke@1 184 bool has_ex_handler();
duke@1 185
duke@1 186 // Save an exception without blowing stack contents or other JVM state.
duke@1 187 // (The extra pointer is stuck with add_req on the map, beyond the JVMS.)
duke@1 188 static void set_saved_ex_oop(SafePointNode* ex_map, Node* ex_oop);
duke@1 189
duke@1 190 // Recover a saved exception from its map.
duke@1 191 static Node* saved_ex_oop(SafePointNode* ex_map);
duke@1 192
duke@1 193 // Recover a saved exception from its map, and remove it from the map.
duke@1 194 static Node* clear_saved_ex_oop(SafePointNode* ex_map);
duke@1 195
duke@1 196 #ifdef ASSERT
duke@1 197 // Recover a saved exception from its map, and remove it from the map.
duke@1 198 static bool has_saved_ex_oop(SafePointNode* ex_map);
duke@1 199 #endif
duke@1 200
duke@1 201 // Push an exception in the canonical position for handlers (stack(0)).
duke@1 202 void push_ex_oop(Node* ex_oop) {
duke@1 203 ensure_stack(1); // ensure room to push the exception
duke@1 204 set_stack(0, ex_oop);
duke@1 205 set_sp(1);
duke@1 206 clean_stack(1);
duke@1 207 }
duke@1 208
duke@1 209 // Detach and return an exception state.
duke@1 210 SafePointNode* pop_exception_state() {
duke@1 211 SafePointNode* ex_map = _exceptions;
duke@1 212 if (ex_map != NULL) {
duke@1 213 _exceptions = ex_map->next_exception();
duke@1 214 ex_map->set_next_exception(NULL);
duke@1 215 debug_only(verify_exception_state(ex_map));
duke@1 216 }
duke@1 217 return ex_map;
duke@1 218 }
duke@1 219
duke@1 220 // Add an exception, using the given JVM state, without commoning.
duke@1 221 void push_exception_state(SafePointNode* ex_map) {
duke@1 222 debug_only(verify_exception_state(ex_map));
duke@1 223 ex_map->set_next_exception(_exceptions);
duke@1 224 _exceptions = ex_map;
duke@1 225 }
duke@1 226
duke@1 227 // Turn the current JVM state into an exception state, appending the ex_oop.
duke@1 228 SafePointNode* make_exception_state(Node* ex_oop);
duke@1 229
duke@1 230 // Add an exception, using the given JVM state.
duke@1 231 // Combine all exceptions with a common exception type into a single state.
duke@1 232 // (This is done via combine_exception_states.)
duke@1 233 void add_exception_state(SafePointNode* ex_map);
duke@1 234
duke@1 235 // Combine all exceptions of any sort whatever into a single master state.
duke@1 236 SafePointNode* combine_and_pop_all_exception_states() {
duke@1 237 if (_exceptions == NULL) return NULL;
duke@1 238 SafePointNode* phi_map = pop_exception_state();
duke@1 239 SafePointNode* ex_map;
duke@1 240 while ((ex_map = pop_exception_state()) != NULL) {
duke@1 241 combine_exception_states(ex_map, phi_map);
duke@1 242 }
duke@1 243 return phi_map;
duke@1 244 }
duke@1 245
duke@1 246 // Combine the two exception states, building phis as necessary.
duke@1 247 // The second argument is updated to include contributions from the first.
duke@1 248 void combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map);
duke@1 249
duke@1 250 // Reset the map to the given state. If there are any half-finished phis
duke@1 251 // in it (created by combine_exception_states), transform them now.
duke@1 252 // Returns the exception oop. (Caller must call push_ex_oop if required.)
duke@1 253 Node* use_exception_state(SafePointNode* ex_map);
duke@1 254
duke@1 255 // Collect exceptions from a given JVM state into my exception list.
duke@1 256 void add_exception_states_from(JVMState* jvms);
duke@1 257
duke@1 258 // Collect all raised exceptions into the current JVM state.
duke@1 259 // Clear the current exception list and map, returns the combined states.
duke@1 260 JVMState* transfer_exceptions_into_jvms();
duke@1 261
duke@1 262 // Helper to throw a built-in exception.
duke@1 263 // Range checks take the offending index.
duke@1 264 // Cast and array store checks take the offending class.
duke@1 265 // Others do not take the optional argument.
duke@1 266 // The JVMS must allow the bytecode to be re-executed
duke@1 267 // via an uncommon trap.
duke@1 268 void builtin_throw(Deoptimization::DeoptReason reason, Node* arg = NULL);
duke@1 269
dcubed@4761 270 // Helper to check the JavaThread::_should_post_on_exceptions flag
dcubed@4761 271 // and branch to an uncommon_trap if it is true (with the specified reason and must_throw)
dcubed@4761 272 void uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptReason reason,
dcubed@4761 273 bool must_throw) ;
dcubed@4761 274
duke@1 275 // Helper Functions for adding debug information
duke@1 276 void kill_dead_locals();
duke@1 277 #ifdef ASSERT
duke@1 278 bool dead_locals_are_killed();
duke@1 279 #endif
duke@1 280 // The call may deoptimize. Supply required JVM state as debug info.
duke@1 281 // If must_throw is true, the call is guaranteed not to return normally.
duke@1 282 void add_safepoint_edges(SafePointNode* call,
duke@1 283 bool must_throw = false);
duke@1 284
duke@1 285 // How many stack inputs does the current BC consume?
duke@1 286 // And, how does the stack change after the bytecode?
duke@1 287 // Returns false if unknown.
duke@1 288 bool compute_stack_effects(int& inputs, int& depth);
duke@1 289
duke@1 290 // Add a fixed offset to a pointer
duke@1 291 Node* basic_plus_adr(Node* base, Node* ptr, intptr_t offset) {
duke@1 292 return basic_plus_adr(base, ptr, MakeConX(offset));
duke@1 293 }
duke@1 294 Node* basic_plus_adr(Node* base, intptr_t offset) {
duke@1 295 return basic_plus_adr(base, base, MakeConX(offset));
duke@1 296 }
duke@1 297 // Add a variable offset to a pointer
duke@1 298 Node* basic_plus_adr(Node* base, Node* offset) {
duke@1 299 return basic_plus_adr(base, base, offset);
duke@1 300 }
duke@1 301 Node* basic_plus_adr(Node* base, Node* ptr, Node* offset);
duke@1 302
never@4450 303
never@4450 304 // Some convenient shortcuts for common nodes
never@4450 305 Node* IfTrue(IfNode* iff) { return _gvn.transform(new (C,1) IfTrueNode(iff)); }
never@4450 306 Node* IfFalse(IfNode* iff) { return _gvn.transform(new (C,1) IfFalseNode(iff)); }
never@4450 307
never@4450 308 Node* AddI(Node* l, Node* r) { return _gvn.transform(new (C,3) AddINode(l, r)); }
never@4450 309 Node* SubI(Node* l, Node* r) { return _gvn.transform(new (C,3) SubINode(l, r)); }
never@4450 310 Node* MulI(Node* l, Node* r) { return _gvn.transform(new (C,3) MulINode(l, r)); }
never@4450 311 Node* DivI(Node* ctl, Node* l, Node* r) { return _gvn.transform(new (C,3) DivINode(ctl, l, r)); }
never@4450 312
never@4450 313 Node* AndI(Node* l, Node* r) { return _gvn.transform(new (C,3) AndINode(l, r)); }
never@4450 314 Node* OrI(Node* l, Node* r) { return _gvn.transform(new (C,3) OrINode(l, r)); }
never@4450 315 Node* XorI(Node* l, Node* r) { return _gvn.transform(new (C,3) XorINode(l, r)); }
never@4450 316
never@4450 317 Node* MaxI(Node* l, Node* r) { return _gvn.transform(new (C,3) MaxINode(l, r)); }
never@4450 318 Node* MinI(Node* l, Node* r) { return _gvn.transform(new (C,3) MinINode(l, r)); }
never@4450 319
never@4450 320 Node* LShiftI(Node* l, Node* r) { return _gvn.transform(new (C,3) LShiftINode(l, r)); }
never@4450 321 Node* RShiftI(Node* l, Node* r) { return _gvn.transform(new (C,3) RShiftINode(l, r)); }
never@4450 322 Node* URShiftI(Node* l, Node* r) { return _gvn.transform(new (C,3) URShiftINode(l, r)); }
never@4450 323
never@4450 324 Node* CmpI(Node* l, Node* r) { return _gvn.transform(new (C,3) CmpINode(l, r)); }
never@4450 325 Node* CmpL(Node* l, Node* r) { return _gvn.transform(new (C,3) CmpLNode(l, r)); }
never@4450 326 Node* CmpP(Node* l, Node* r) { return _gvn.transform(new (C,3) CmpPNode(l, r)); }
never@4450 327 Node* Bool(Node* cmp, BoolTest::mask relop) { return _gvn.transform(new (C,2) BoolNode(cmp, relop)); }
never@4450 328
never@4450 329 Node* AddP(Node* b, Node* a, Node* o) { return _gvn.transform(new (C,4) AddPNode(b, a, o)); }
never@4450 330
duke@1 331 // Convert between int and long, and size_t.
duke@1 332 // (See macros ConvI2X, etc., in type.hpp for ConvI2X, etc.)
duke@1 333 Node* ConvI2L(Node* offset);
duke@1 334 Node* ConvL2I(Node* offset);
duke@1 335 // Find out the klass of an object.
duke@1 336 Node* load_object_klass(Node* object);
duke@1 337 // Find out the length of an array.
duke@1 338 Node* load_array_length(Node* array);
duke@1 339 // Helper function to do a NULL pointer check or ZERO check based on type.
duke@1 340 Node* null_check_common(Node* value, BasicType type,
duke@1 341 bool assert_null, Node* *null_control);
duke@1 342 // Throw an exception if a given value is null.
duke@1 343 // Return the value cast to not-null.
duke@1 344 // Be clever about equivalent dominating null checks.
duke@1 345 Node* do_null_check(Node* value, BasicType type) {
duke@1 346 return null_check_common(value, type, false, NULL);
duke@1 347 }
duke@1 348 // Throw an uncommon trap if a given value is __not__ null.
duke@1 349 // Return the value cast to null, and be clever about dominating checks.
duke@1 350 Node* do_null_assert(Node* value, BasicType type) {
duke@1 351 return null_check_common(value, type, true, NULL);
duke@1 352 }
duke@1 353 // Null check oop. Return null-path control into (*null_control).
duke@1 354 // Return a cast-not-null node which depends on the not-null control.
duke@1 355 // If never_see_null, use an uncommon trap (*null_control sees a top).
duke@1 356 // The cast is not valid along the null path; keep a copy of the original.
duke@1 357 Node* null_check_oop(Node* value, Node* *null_control,
duke@1 358 bool never_see_null = false);
duke@1 359
jrose@6416 360 // Check the null_seen bit.
jrose@6416 361 bool seems_never_null(Node* obj, ciProfileData* data);
jrose@6416 362
jrose@6416 363 // Use the type profile to narrow an object type.
jrose@6416 364 Node* maybe_cast_profiled_receiver(Node* not_null_obj,
jrose@6416 365 ciProfileData* data,
jrose@6416 366 ciKlass* require_klass);
jrose@6416 367
duke@1 368 // Cast obj to not-null on this path
duke@1 369 Node* cast_not_null(Node* obj, bool do_replace_in_map = true);
duke@1 370 // Replace all occurrences of one node by another.
duke@1 371 void replace_in_map(Node* old, Node* neww);
duke@1 372
duke@1 373 void push(Node* n) { map_not_null(); _map->set_stack(_map->_jvms,_sp++,n); }
duke@1 374 Node* pop() { map_not_null(); return _map->stack(_map->_jvms,--_sp); }
duke@1 375 Node* peek(int off=0) { map_not_null(); return _map->stack(_map->_jvms, _sp - off - 1); }
duke@1 376
duke@1 377 void push_pair(Node* ldval) {
duke@1 378 push(ldval);
duke@1 379 push(top()); // the halfword is merely a placeholder
duke@1 380 }
duke@1 381 void push_pair_local(int i) {
duke@1 382 // longs are stored in locals in "push" order
duke@1 383 push( local(i+0) ); // the real value
duke@1 384 assert(local(i+1) == top(), "");
duke@1 385 push(top()); // halfword placeholder
duke@1 386 }
duke@1 387 Node* pop_pair() {
duke@1 388 // the second half is pushed last & popped first; it contains exactly nothing
duke@1 389 Node* halfword = pop();
duke@1 390 assert(halfword == top(), "");
duke@1 391 // the long bits are pushed first & popped last:
duke@1 392 return pop();
duke@1 393 }
duke@1 394 void set_pair_local(int i, Node* lval) {
duke@1 395 // longs are stored in locals as a value/half pair (like doubles)
duke@1 396 set_local(i+0, lval);
duke@1 397 set_local(i+1, top());
duke@1 398 }
duke@1 399
duke@1 400 // Push the node, which may be zero, one, or two words.
duke@1 401 void push_node(BasicType n_type, Node* n) {
duke@1 402 int n_size = type2size[n_type];
duke@1 403 if (n_size == 1) push( n ); // T_INT, ...
duke@1 404 else if (n_size == 2) push_pair( n ); // T_DOUBLE, T_LONG
duke@1 405 else { assert(n_size == 0, "must be T_VOID"); }
duke@1 406 }
duke@1 407
duke@1 408 Node* pop_node(BasicType n_type) {
duke@1 409 int n_size = type2size[n_type];
duke@1 410 if (n_size == 1) return pop();
duke@1 411 else if (n_size == 2) return pop_pair();
duke@1 412 else return NULL;
duke@1 413 }
duke@1 414
duke@1 415 Node* control() const { return map_not_null()->control(); }
duke@1 416 Node* i_o() const { return map_not_null()->i_o(); }
duke@1 417 Node* returnadr() const { return map_not_null()->returnadr(); }
duke@1 418 Node* frameptr() const { return map_not_null()->frameptr(); }
duke@1 419 Node* local(uint idx) const { map_not_null(); return _map->local( _map->_jvms, idx); }
duke@1 420 Node* stack(uint idx) const { map_not_null(); return _map->stack( _map->_jvms, idx); }
duke@1 421 Node* argument(uint idx) const { map_not_null(); return _map->argument( _map->_jvms, idx); }
duke@1 422 Node* monitor_box(uint idx) const { map_not_null(); return _map->monitor_box(_map->_jvms, idx); }
duke@1 423 Node* monitor_obj(uint idx) const { map_not_null(); return _map->monitor_obj(_map->_jvms, idx); }
duke@1 424
duke@1 425 void set_control (Node* c) { map_not_null()->set_control(c); }
duke@1 426 void set_i_o (Node* c) { map_not_null()->set_i_o(c); }
duke@1 427 void set_local(uint idx, Node* c) { map_not_null(); _map->set_local( _map->_jvms, idx, c); }
duke@1 428 void set_stack(uint idx, Node* c) { map_not_null(); _map->set_stack( _map->_jvms, idx, c); }
duke@1 429 void set_argument(uint idx, Node* c){ map_not_null(); _map->set_argument(_map->_jvms, idx, c); }
duke@1 430 void ensure_stack(uint stk_size) { map_not_null(); _map->ensure_stack(_map->_jvms, stk_size); }
duke@1 431
duke@1 432 // Access unaliased memory
duke@1 433 Node* memory(uint alias_idx);
duke@1 434 Node* memory(const TypePtr *tp) { return memory(C->get_alias_index(tp)); }
duke@1 435 Node* memory(Node* adr) { return memory(_gvn.type(adr)->is_ptr()); }
duke@1 436
duke@1 437 // Access immutable memory
duke@1 438 Node* immutable_memory() { return C->immutable_memory(); }
duke@1 439
duke@1 440 // Set unaliased memory
duke@1 441 void set_memory(Node* c, uint alias_idx) { merged_memory()->set_memory_at(alias_idx, c); }
duke@1 442 void set_memory(Node* c, const TypePtr *tp) { set_memory(c,C->get_alias_index(tp)); }
duke@1 443 void set_memory(Node* c, Node* adr) { set_memory(c,_gvn.type(adr)->is_ptr()); }
duke@1 444
duke@1 445 // Get the entire memory state (probably a MergeMemNode), and reset it
duke@1 446 // (The resetting prevents somebody from using the dangling Node pointer.)
duke@1 447 Node* reset_memory();
duke@1 448
duke@1 449 // Get the entire memory state, asserted to be a MergeMemNode.
duke@1 450 MergeMemNode* merged_memory() {
duke@1 451 Node* mem = map_not_null()->memory();
duke@1 452 assert(mem->is_MergeMem(), "parse memory is always pre-split");
duke@1 453 return mem->as_MergeMem();
duke@1 454 }
duke@1 455
duke@1 456 // Set the entire memory state; produce a new MergeMemNode.
duke@1 457 void set_all_memory(Node* newmem);
duke@1 458
duke@1 459 // Create a memory projection from the call, then set_all_memory.
never@4450 460 void set_all_memory_call(Node* call, bool separate_io_proj = false);
duke@1 461
duke@1 462 // Create a LoadNode, reading from the parser's memory state.
duke@1 463 // (Note: require_atomic_access is useful only with T_LONG.)
duke@1 464 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
duke@1 465 bool require_atomic_access = false) {
duke@1 466 // This version computes alias_index from bottom_type
duke@1 467 return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(),
duke@1 468 require_atomic_access);
duke@1 469 }
duke@1 470 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type, bool require_atomic_access = false) {
duke@1 471 // This version computes alias_index from an address type
duke@1 472 assert(adr_type != NULL, "use other make_load factory");
duke@1 473 return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type),
duke@1 474 require_atomic_access);
duke@1 475 }
duke@1 476 // This is the base version which is given an alias index.
duke@1 477 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx, bool require_atomic_access = false);
duke@1 478
duke@1 479 // Create & transform a StoreNode and store the effect into the
duke@1 480 // parser's memory state.
duke@1 481 Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
duke@1 482 const TypePtr* adr_type,
duke@1 483 bool require_atomic_access = false) {
duke@1 484 // This version computes alias_index from an address type
duke@1 485 assert(adr_type != NULL, "use other store_to_memory factory");
duke@1 486 return store_to_memory(ctl, adr, val, bt,
duke@1 487 C->get_alias_index(adr_type),
duke@1 488 require_atomic_access);
duke@1 489 }
duke@1 490 // This is the base version which is given alias index
duke@1 491 // Return the new StoreXNode
duke@1 492 Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
duke@1 493 int adr_idx,
duke@1 494 bool require_atomic_access = false);
duke@1 495
duke@1 496
duke@1 497 // All in one pre-barrier, store, post_barrier
duke@1 498 // Insert a write-barrier'd store. This is to let generational GC
duke@1 499 // work; we have to flag all oop-stores before the next GC point.
duke@1 500 //
duke@1 501 // It comes in 3 flavors of store to an object, array, or unknown.
duke@1 502 // We use precise card marks for arrays to avoid scanning the entire
duke@1 503 // array. We use imprecise for object. We use precise for unknown
duke@1 504 // since we don't know if we have an array or and object or even
duke@1 505 // where the object starts.
duke@1 506 //
duke@1 507 // If val==NULL, it is taken to be a completely unknown value. QQQ
duke@1 508
kvn@3268 509 Node* store_oop(Node* ctl,
kvn@3268 510 Node* obj, // containing obj
kvn@3268 511 Node* adr, // actual adress to store val at
kvn@3268 512 const TypePtr* adr_type,
kvn@3268 513 Node* val,
kvn@3268 514 const TypeOopPtr* val_type,
kvn@3268 515 BasicType bt,
kvn@3268 516 bool use_precise);
kvn@3268 517
duke@1 518 Node* store_oop_to_object(Node* ctl,
duke@1 519 Node* obj, // containing obj
duke@1 520 Node* adr, // actual adress to store val at
duke@1 521 const TypePtr* adr_type,
duke@1 522 Node* val,
never@3178 523 const TypeOopPtr* val_type,
kvn@3268 524 BasicType bt) {
kvn@3268 525 return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, false);
kvn@3268 526 }
duke@1 527
duke@1 528 Node* store_oop_to_array(Node* ctl,
duke@1 529 Node* obj, // containing obj
duke@1 530 Node* adr, // actual adress to store val at
duke@1 531 const TypePtr* adr_type,
duke@1 532 Node* val,
never@3178 533 const TypeOopPtr* val_type,
kvn@3268 534 BasicType bt) {
kvn@3268 535 return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true);
kvn@3268 536 }
duke@1 537
duke@1 538 // Could be an array or object we don't know at compile time (unsafe ref.)
duke@1 539 Node* store_oop_to_unknown(Node* ctl,
duke@1 540 Node* obj, // containing obj
duke@1 541 Node* adr, // actual adress to store val at
duke@1 542 const TypePtr* adr_type,
duke@1 543 Node* val,
duke@1 544 BasicType bt);
duke@1 545
duke@1 546 // For the few case where the barriers need special help
johnc@9176 547 void pre_barrier(bool do_load, Node* ctl,
johnc@9176 548 Node* obj, Node* adr, uint adr_idx, Node* val, const TypeOopPtr* val_type,
johnc@9176 549 Node* pre_val,
johnc@9176 550 BasicType bt);
duke@1 551
duke@1 552 void post_barrier(Node* ctl, Node* store, Node* obj, Node* adr, uint adr_idx,
duke@1 553 Node* val, BasicType bt, bool use_precise);
duke@1 554
duke@1 555 // Return addressing for an array element.
duke@1 556 Node* array_element_address(Node* ary, Node* idx, BasicType elembt,
duke@1 557 // Optional constraint on the array size:
duke@1 558 const TypeInt* sizetype = NULL);
duke@1 559
duke@1 560 // Return a load of array element at idx.
duke@1 561 Node* load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype);
duke@1 562
duke@1 563 //---------------- Dtrace support --------------------
duke@1 564 void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry);
duke@1 565 void make_dtrace_method_entry(ciMethod* method) {
duke@1 566 make_dtrace_method_entry_exit(method, true);
duke@1 567 }
duke@1 568 void make_dtrace_method_exit(ciMethod* method) {
duke@1 569 make_dtrace_method_entry_exit(method, false);
duke@1 570 }
duke@1 571
duke@1 572 //--------------- stub generation -------------------
duke@1 573 public:
duke@1 574 void gen_stub(address C_function,
duke@1 575 const char *name,
duke@1 576 int is_fancy_jump,
duke@1 577 bool pass_tls,
duke@1 578 bool return_pc);
duke@1 579
duke@1 580 //---------- help for generating calls --------------
duke@1 581
duke@1 582 // Do a null check on the receiver, which is in argument(0).
duke@1 583 Node* null_check_receiver(ciMethod* callee) {
duke@1 584 assert(!callee->is_static(), "must be a virtual method");
duke@1 585 int nargs = 1 + callee->signature()->size();
duke@1 586 // Null check on self without removing any arguments. The argument
duke@1 587 // null check technically happens in the wrong place, which can lead to
duke@1 588 // invalid stack traces when the primitive is inlined into a method
duke@1 589 // which handles NullPointerExceptions.
duke@1 590 Node* receiver = argument(0);
duke@1 591 _sp += nargs;
duke@1 592 receiver = do_null_check(receiver, T_OBJECT);
duke@1 593 _sp -= nargs;
duke@1 594 return receiver;
duke@1 595 }
duke@1 596
duke@1 597 // Fill in argument edges for the call from argument(0), argument(1), ...
duke@1 598 // (The next step is to call set_edges_for_java_call.)
duke@1 599 void set_arguments_for_java_call(CallJavaNode* call);
duke@1 600
duke@1 601 // Fill in non-argument edges for the call.
duke@1 602 // Transform the call, and update the basics: control, i_o, memory.
duke@1 603 // (The next step is usually to call set_results_for_java_call.)
duke@1 604 void set_edges_for_java_call(CallJavaNode* call,
never@4450 605 bool must_throw = false, bool separate_io_proj = false);
duke@1 606
duke@1 607 // Finish up a java call that was started by set_edges_for_java_call.
duke@1 608 // Call add_exception on any throw arising from the call.
duke@1 609 // Return the call result (transformed).
never@4450 610 Node* set_results_for_java_call(CallJavaNode* call, bool separate_io_proj = false);
duke@1 611
duke@1 612 // Similar to set_edges_for_java_call, but simplified for runtime calls.
duke@1 613 void set_predefined_output_for_runtime_call(Node* call) {
duke@1 614 set_predefined_output_for_runtime_call(call, NULL, NULL);
duke@1 615 }
duke@1 616 void set_predefined_output_for_runtime_call(Node* call,
duke@1 617 Node* keep_mem,
duke@1 618 const TypePtr* hook_mem);
duke@1 619 Node* set_predefined_input_for_runtime_call(SafePointNode* call);
duke@1 620
never@4450 621 // Replace the call with the current state of the kit. Requires
never@4450 622 // that the call was generated with separate io_projs so that
never@4450 623 // exceptional control flow can be handled properly.
never@4450 624 void replace_call(CallNode* call, Node* result);
never@4450 625
duke@1 626 // helper functions for statistics
duke@1 627 void increment_counter(address counter_addr); // increment a debug counter
duke@1 628 void increment_counter(Node* counter_addr); // increment a debug counter
duke@1 629
duke@1 630 // Bail out to the interpreter right now
duke@1 631 // The optional klass is the one causing the trap.
duke@1 632 // The optional reason is debug information written to the compile log.
duke@1 633 // Optional must_throw is the same as with add_safepoint_edges.
duke@1 634 void uncommon_trap(int trap_request,
duke@1 635 ciKlass* klass = NULL, const char* reason_string = NULL,
duke@1 636 bool must_throw = false, bool keep_exact_action = false);
duke@1 637
duke@1 638 // Shorthand, to avoid saying "Deoptimization::" so many times.
duke@1 639 void uncommon_trap(Deoptimization::DeoptReason reason,
duke@1 640 Deoptimization::DeoptAction action,
duke@1 641 ciKlass* klass = NULL, const char* reason_string = NULL,
duke@1 642 bool must_throw = false, bool keep_exact_action = false) {
duke@1 643 uncommon_trap(Deoptimization::make_trap_request(reason, action),
duke@1 644 klass, reason_string, must_throw, keep_exact_action);
duke@1 645 }
duke@1 646
duke@1 647 // Report if there were too many traps at the current method and bci.
duke@1 648 // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
duke@1 649 // If there is no MDO at all, report no trap unless told to assume it.
duke@1 650 bool too_many_traps(Deoptimization::DeoptReason reason) {
duke@1 651 return C->too_many_traps(method(), bci(), reason);
duke@1 652 }
duke@1 653
duke@1 654 // Report if there were too many recompiles at the current method and bci.
duke@1 655 bool too_many_recompiles(Deoptimization::DeoptReason reason) {
duke@1 656 return C->too_many_recompiles(method(), bci(), reason);
duke@1 657 }
duke@1 658
duke@1 659 // Returns the object (if any) which was created the moment before.
duke@1 660 Node* just_allocated_object(Node* current_control);
duke@1 661
duke@1 662 static bool use_ReduceInitialCardMarks() {
duke@1 663 return (ReduceInitialCardMarks
duke@1 664 && Universe::heap()->can_elide_tlab_store_barriers());
duke@1 665 }
duke@1 666
kvn@3268 667 void sync_kit(IdealKit& ideal);
kvn@3268 668
kvn@3268 669 // vanilla/CMS post barrier
cfang@3904 670 void write_barrier_post(Node *store, Node* obj,
cfang@3904 671 Node* adr, uint adr_idx, Node* val, bool use_precise);
kvn@3268 672
ysr@1374 673 // G1 pre/post barriers
johnc@9176 674 void g1_write_barrier_pre(bool do_load,
johnc@9176 675 Node* obj,
ysr@1374 676 Node* adr,
ysr@1374 677 uint alias_idx,
ysr@1374 678 Node* val,
never@3178 679 const TypeOopPtr* val_type,
johnc@9176 680 Node* pre_val,
ysr@1374 681 BasicType bt);
ysr@1374 682
ysr@1374 683 void g1_write_barrier_post(Node* store,
ysr@1374 684 Node* obj,
ysr@1374 685 Node* adr,
ysr@1374 686 uint alias_idx,
ysr@1374 687 Node* val,
ysr@1374 688 BasicType bt,
ysr@1374 689 bool use_precise);
ysr@1374 690 // Helper function for g1
ysr@1374 691 private:
cfang@3904 692 void g1_mark_card(IdealKit& ideal, Node* card_adr, Node* store, uint oop_alias_idx,
cfang@3904 693 Node* index, Node* index_adr,
ysr@1374 694 Node* buffer, const TypeFunc* tf);
ysr@1374 695
ysr@1374 696 public:
duke@1 697 // Helper function to round double arguments before a call
duke@1 698 void round_double_arguments(ciMethod* dest_method);
duke@1 699 void round_double_result(ciMethod* dest_method);
duke@1 700
duke@1 701 // rounding for strict float precision conformance
duke@1 702 Node* precision_rounding(Node* n);
duke@1 703
duke@1 704 // rounding for strict double precision conformance
duke@1 705 Node* dprecision_rounding(Node* n);
duke@1 706
duke@1 707 // rounding for non-strict double stores
duke@1 708 Node* dstore_rounding(Node* n);
duke@1 709
duke@1 710 // Helper functions for fast/slow path codes
duke@1 711 Node* opt_iff(Node* region, Node* iff);
duke@1 712 Node* make_runtime_call(int flags,
duke@1 713 const TypeFunc* call_type, address call_addr,
duke@1 714 const char* call_name,
duke@1 715 const TypePtr* adr_type, // NULL if no memory effects
duke@1 716 Node* parm0 = NULL, Node* parm1 = NULL,
duke@1 717 Node* parm2 = NULL, Node* parm3 = NULL,
duke@1 718 Node* parm4 = NULL, Node* parm5 = NULL,
duke@1 719 Node* parm6 = NULL, Node* parm7 = NULL);
duke@1 720 enum { // flag values for make_runtime_call
duke@1 721 RC_NO_FP = 1, // CallLeafNoFPNode
duke@1 722 RC_NO_IO = 2, // do not hook IO edges
duke@1 723 RC_NO_LEAF = 4, // CallStaticJavaNode
duke@1 724 RC_MUST_THROW = 8, // flag passed to add_safepoint_edges
duke@1 725 RC_NARROW_MEM = 16, // input memory is same as output
duke@1 726 RC_UNCOMMON = 32, // freq. expected to be like uncommon trap
duke@1 727 RC_LEAF = 0 // null value: no flags set
duke@1 728 };
duke@1 729
duke@1 730 // merge in all memory slices from new_mem, along the given path
duke@1 731 void merge_memory(Node* new_mem, Node* region, int new_path);
duke@1 732 void make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj);
duke@1 733
duke@1 734 // Helper functions to build synchronizations
duke@1 735 int next_monitor();
duke@1 736 Node* insert_mem_bar(int opcode, Node* precedent = NULL);
duke@1 737 Node* insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent = NULL);
duke@1 738 // Optional 'precedent' is appended as an extra edge, to force ordering.
duke@1 739 FastLockNode* shared_lock(Node* obj);
duke@1 740 void shared_unlock(Node* box, Node* obj);
duke@1 741
duke@1 742 // helper functions for the fast path/slow path idioms
duke@1 743 Node* fast_and_slow(Node* in, const Type *result_type, Node* null_result, IfNode* fast_test, Node* fast_result, address slow_call, const TypeFunc *slow_call_type, Node* slow_arg, klassOop ex_klass, Node* slow_result);
duke@1 744
duke@1 745 // Generate an instance-of idiom. Used by both the instance-of bytecode
duke@1 746 // and the reflective instance-of call.
duke@1 747 Node* gen_instanceof( Node *subobj, Node* superkls );
duke@1 748
duke@1 749 // Generate a check-cast idiom. Used by both the check-cast bytecode
duke@1 750 // and the array-store bytecode
duke@1 751 Node* gen_checkcast( Node *subobj, Node* superkls,
duke@1 752 Node* *failure_control = NULL );
duke@1 753
duke@1 754 // Generate a subtyping check. Takes as input the subtype and supertype.
duke@1 755 // Returns 2 values: sets the default control() to the true path and
duke@1 756 // returns the false path. Only reads from constant memory taken from the
duke@1 757 // default memory; does not write anything. It also doesn't take in an
duke@1 758 // Object; if you wish to check an Object you need to load the Object's
duke@1 759 // class prior to coming here.
duke@1 760 Node* gen_subtype_check(Node* subklass, Node* superklass);
duke@1 761
duke@1 762 // Static parse-time type checking logic for gen_subtype_check:
duke@1 763 enum { SSC_always_false, SSC_always_true, SSC_easy_test, SSC_full_test };
duke@1 764 int static_subtype_check(ciKlass* superk, ciKlass* subk);
duke@1 765
duke@1 766 // Exact type check used for predicted calls and casts.
duke@1 767 // Rewrites (*casted_receiver) to be casted to the stronger type.
duke@1 768 // (Caller is responsible for doing replace_in_map.)
duke@1 769 Node* type_check_receiver(Node* receiver, ciKlass* klass, float prob,
duke@1 770 Node* *casted_receiver);
duke@1 771
duke@1 772 // implementation of object creation
duke@1 773 Node* set_output_for_allocation(AllocateNode* alloc,
duke@1 774 const TypeOopPtr* oop_type,
duke@1 775 bool raw_mem_only);
duke@1 776 Node* get_layout_helper(Node* klass_node, jint& constant_value);
duke@1 777 Node* new_instance(Node* klass_node,
duke@1 778 Node* slow_test = NULL,
duke@1 779 bool raw_mem_only = false,
duke@1 780 Node* *return_size_val = NULL);
cfang@2574 781 Node* new_array(Node* klass_node, Node* count_val, int nargs,
duke@1 782 bool raw_mem_only = false, Node* *return_size_val = NULL);
duke@1 783
duke@1 784 // Handy for making control flow
duke@1 785 IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) {
duke@1 786 IfNode* iff = new (C, 2) IfNode(ctrl, tst, prob, cnt);// New IfNode's
duke@1 787 _gvn.set_type(iff, iff->Value(&_gvn)); // Value may be known at parse-time
duke@1 788 // Place 'if' on worklist if it will be in graph
duke@1 789 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later
duke@1 790 return iff;
duke@1 791 }
duke@1 792
duke@1 793 IfNode* create_and_xform_if(Node* ctrl, Node* tst, float prob, float cnt) {
duke@1 794 IfNode* iff = new (C, 2) IfNode(ctrl, tst, prob, cnt);// New IfNode's
duke@1 795 _gvn.transform(iff); // Value may be known at parse-time
duke@1 796 // Place 'if' on worklist if it will be in graph
duke@1 797 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later
duke@1 798 return iff;
duke@1 799 }
kvn@8732 800
kvn@8732 801 // Insert a loop predicate into the graph
kvn@8732 802 void add_predicate(int nargs = 0);
kvn@8732 803 void add_predicate_impl(Deoptimization::DeoptReason reason, int nargs);
duke@1 804 };
duke@1 805
duke@1 806 // Helper class to support building of control flow branches. Upon
duke@1 807 // creation the map and sp at bci are cloned and restored upon de-
duke@1 808 // struction. Typical use:
duke@1 809 //
duke@1 810 // { PreserveJVMState pjvms(this);
duke@1 811 // // code of new branch
duke@1 812 // }
duke@1 813 // // here the JVM state at bci is established
duke@1 814
duke@1 815 class PreserveJVMState: public StackObj {
duke@1 816 protected:
duke@1 817 GraphKit* _kit;
duke@1 818 #ifdef ASSERT
duke@1 819 int _block; // PO of current block, if a Parse
duke@1 820 int _bci;
duke@1 821 #endif
duke@1 822 SafePointNode* _map;
duke@1 823 uint _sp;
duke@1 824
duke@1 825 public:
duke@1 826 PreserveJVMState(GraphKit* kit, bool clone_map = true);
duke@1 827 ~PreserveJVMState();
duke@1 828 };
duke@1 829
duke@1 830 // Helper class to build cutouts of the form if (p) ; else {x...}.
duke@1 831 // The code {x...} must not fall through.
duke@1 832 // The kit's main flow of control is set to the "then" continuation of if(p).
duke@1 833 class BuildCutout: public PreserveJVMState {
duke@1 834 public:
duke@1 835 BuildCutout(GraphKit* kit, Node* p, float prob, float cnt = COUNT_UNKNOWN);
duke@1 836 ~BuildCutout();
duke@1 837 };
cfang@3600 838
cfang@3600 839 // Helper class to preserve the original _reexecute bit and _sp and restore
cfang@3600 840 // them back
cfang@3600 841 class PreserveReexecuteState: public StackObj {
cfang@3600 842 protected:
cfang@3600 843 GraphKit* _kit;
cfang@3600 844 uint _sp;
cfang@3600 845 JVMState::ReexecuteState _reexecute;
cfang@3600 846
cfang@3600 847 public:
cfang@3600 848 PreserveReexecuteState(GraphKit* kit);
cfang@3600 849 ~PreserveReexecuteState();
cfang@3600 850 };
stefank@7397 851
stefank@7397 852 #endif // SHARE_VM_OPTO_GRAPHKIT_HPP