changeset 1739:f02a8bbe6ed4

6986046: C1 valuestack cleanup Summary: fixes an historical oddity in C1 with inlining where all of the expression stacks are kept in the topmost ValueStack instead of being in their respective ValueStacks. Reviewed-by: never Contributed-by: Christian Wimmer <cwimmer@uci.edu>
author roland
date Tue, 29 Dec 2009 19:08:54 +0100
parents c40600e85311
children 861f533d12b0
files src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp src/cpu/x86/vm/c1_CodeStubs_x86.cpp src/cpu/x86/vm/c1_LIRGenerator_x86.cpp src/share/vm/c1/c1_CFGPrinter.cpp src/share/vm/c1/c1_Canonicalizer.cpp src/share/vm/c1/c1_Compilation.hpp src/share/vm/c1/c1_GraphBuilder.cpp src/share/vm/c1/c1_GraphBuilder.hpp src/share/vm/c1/c1_IR.cpp src/share/vm/c1/c1_IR.hpp src/share/vm/c1/c1_Instruction.cpp src/share/vm/c1/c1_Instruction.hpp src/share/vm/c1/c1_InstructionPrinter.cpp src/share/vm/c1/c1_LIR.cpp src/share/vm/c1/c1_LIRAssembler.cpp src/share/vm/c1/c1_LIRGenerator.cpp src/share/vm/c1/c1_LinearScan.cpp src/share/vm/c1/c1_LinearScan.hpp src/share/vm/c1/c1_Optimizer.cpp src/share/vm/c1/c1_ValueStack.cpp src/share/vm/c1/c1_ValueStack.hpp src/share/vm/c1/c1_globals.hpp src/share/vm/includeDB_compiler1
diffstat 24 files changed, 745 insertions(+), 1073 deletions(-) [+]
line wrap: on
line diff
--- a/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Wed Sep 22 23:51:03 2010 -0700
+++ b/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Tue Dec 29 19:08:54 2009 +0100
@@ -32,6 +32,7 @@
   : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
   , _index(index)
 {
+  assert(info != NULL, "must have info");
   _info = new CodeEmitInfo(info);
 }
 
--- a/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Wed Sep 22 23:51:03 2010 -0700
+++ b/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Tue Dec 29 19:08:54 2009 +0100
@@ -311,7 +311,7 @@
 
 
 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
-  assert(x->is_root(),"");
+  assert(x->is_pinned(),"");
   bool needs_range_check = true;
   bool use_length = x->length() != NULL;
   bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
@@ -386,7 +386,7 @@
 
 
 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
-  assert(x->is_root(),"");
+  assert(x->is_pinned(),"");
   LIRItem obj(x->obj(), this);
   obj.load_item();
 
@@ -398,7 +398,7 @@
 
   CodeEmitInfo* info_for_exception = NULL;
   if (x->needs_null_check()) {
-    info_for_exception = state_for(x, x->lock_stack_before());
+    info_for_exception = state_for(x);
   }
 
   // this CodeEmitInfo must not have the xhandlers because here the
@@ -409,7 +409,7 @@
 
 
 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
-  assert(x->is_root(),"");
+  assert(x->is_pinned(),"");
   LIRItem obj(x->obj(), this);
   obj.dont_load_item();
 
@@ -871,10 +871,11 @@
   // This instruction can be deoptimized in the slow path : use
   // O0 as result register.
   const LIR_Opr reg = result_register_for(x->type());
-
+#ifndef PRODUCT
   if (PrintNotLoaded && !x->klass()->is_loaded()) {
-    tty->print_cr("   ###class not loaded at new bci %d", x->bci());
+    tty->print_cr("   ###class not loaded at new bci %d", x->printable_bci());
   }
+#endif
   CodeEmitInfo* info = state_for(x, x->state());
   LIR_Opr tmp1 = FrameMap::G1_oop_opr;
   LIR_Opr tmp2 = FrameMap::G3_oop_opr;
@@ -1018,7 +1019,7 @@
   obj.load_item();
   LIR_Opr out_reg = rlock_result(x);
   CodeStub* stub;
-  CodeEmitInfo* info_for_exception = state_for(x, x->state()->copy_locks());
+  CodeEmitInfo* info_for_exception = state_for(x);
 
   if (x->is_incompatible_class_change_check()) {
     assert(patching_info == NULL, "can't patch this");
--- a/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Wed Sep 22 23:51:03 2010 -0700
+++ b/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Tue Dec 29 19:08:54 2009 +0100
@@ -83,7 +83,8 @@
   : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
   , _index(index)
 {
-  _info = info == NULL ? NULL : new CodeEmitInfo(info);
+  assert(info != NULL, "must have info");
+  _info = new CodeEmitInfo(info);
 }
 
 
--- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Wed Sep 22 23:51:03 2010 -0700
+++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Tue Dec 29 19:08:54 2009 +0100
@@ -107,7 +107,7 @@
     return false;
   }
   Constant* c = v->as_Constant();
-  if (c && c->state() == NULL) {
+  if (c && c->state_before() == NULL) {
     // constants of any type can be stored directly, except for
     // unloaded object constants.
     return true;
@@ -250,7 +250,7 @@
 
 
 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
-  assert(x->is_root(),"");
+  assert(x->is_pinned(),"");
   bool needs_range_check = true;
   bool use_length = x->length() != NULL;
   bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
@@ -325,7 +325,7 @@
 
 
 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
-  assert(x->is_root(),"");
+  assert(x->is_pinned(),"");
   LIRItem obj(x->obj(), this);
   obj.load_item();
 
@@ -341,7 +341,7 @@
 
   CodeEmitInfo* info_for_exception = NULL;
   if (x->needs_null_check()) {
-    info_for_exception = state_for(x, x->lock_stack_before());
+    info_for_exception = state_for(x);
   }
   // this CodeEmitInfo must not have the xhandlers because here the
   // object is already locked (xhandlers expect object to be unlocked)
@@ -352,7 +352,7 @@
 
 
 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
-  assert(x->is_root(),"");
+  assert(x->is_pinned(),"");
 
   LIRItem obj(x->obj(), this);
   obj.dont_load_item();
@@ -984,9 +984,11 @@
 
 
 void LIRGenerator::do_NewInstance(NewInstance* x) {
+#ifndef PRODUCT
   if (PrintNotLoaded && !x->klass()->is_loaded()) {
-    tty->print_cr("   ###class not loaded at new bci %d", x->bci());
+    tty->print_cr("   ###class not loaded at new bci %d", x->printable_bci());
   }
+#endif
   CodeEmitInfo* info = state_for(x, x->state());
   LIR_Opr reg = result_register_for(x->type());
   LIR_Opr klass_reg = new_register(objectType);
@@ -1127,7 +1129,7 @@
   obj.load_item();
 
   // info for exceptions
-  CodeEmitInfo* info_for_exception = state_for(x, x->state()->copy_locks());
+  CodeEmitInfo* info_for_exception = state_for(x);
 
   CodeStub* stub;
   if (x->is_incompatible_class_change_check()) {
--- a/src/share/vm/c1/c1_CFGPrinter.cpp	Wed Sep 22 23:51:03 2010 -0700
+++ b/src/share/vm/c1/c1_CFGPrinter.cpp	Tue Dec 29 19:08:54 2009 +0100
@@ -174,31 +174,6 @@
   int index;
   Value value;
 
-  if (state->stack_size() > 0) {
-    print_begin("stack");
-    print("size %d", state->stack_size());
-
-    for_each_stack_value(state, index, value) {
-      ip.print_phi(index, value, block);
-      print_operand(value);
-      output()->cr();
-    }
-
-    print_end("stack");
-  }
-
-  if (state->locks_size() > 0) {
-    print_begin("locks");
-    print("size %d", state->locks_size());
-
-    for_each_lock_value(state, index, value) {
-      ip.print_phi(index, value, block);
-      print_operand(value);
-      output()->cr();
-    }
-    print_end("locks");
-  }
-
   for_each_state(state) {
     print_begin("locals");
     print("size %d", state->locals_size());
@@ -210,6 +185,33 @@
       output()->cr();
     }
     print_end("locals");
+
+    if (state->stack_size() > 0) {
+      print_begin("stack");
+      print("size %d", state->stack_size());
+      print("method \"%s\"", method_name(state->scope()->method()));
+
+      for_each_stack_value(state, index, value) {
+        ip.print_phi(index, value, block);
+        print_operand(value);
+        output()->cr();
+      }
+
+      print_end("stack");
+    }
+
+    if (state->locks_size() > 0) {
+      print_begin("locks");
+      print("size %d", state->locks_size());
+      print("method \"%s\"", method_name(state->scope()->method()));
+
+      for_each_lock_value(state, index, value) {
+        ip.print_phi(index, value, block);
+        print_operand(value);
+        output()->cr();
+      }
+      print_end("locks");
+    }
   }
 
   print_end("states");
@@ -230,7 +232,8 @@
   if (instr->is_pinned()) {
     output()->put('.');
   }
-  output()->print("%d %d ", instr->bci(), instr->use_count());
+
+  output()->print("%d %d ", instr->printable_bci(), instr->use_count());
 
   print_operand(instr);
 
@@ -271,7 +274,7 @@
   print("name \"B%d\"", block->block_id());
 
   print("from_bci %d", block->bci());
-  print("to_bci %d", (block->end() == NULL ? -1 : block->end()->bci()));
+  print("to_bci %d", (block->end() == NULL ? -1 : block->end()->printable_bci()));
 
   output()->indent();
   output()->print("predecessors ");
--- a/src/share/vm/c1/c1_Canonicalizer.cpp	Wed Sep 22 23:51:03 2010 -0700
+++ b/src/share/vm/c1/c1_Canonicalizer.cpp	Tue Dec 29 19:08:54 2009 +0100
@@ -205,7 +205,7 @@
     // limit this optimization to current block
     if (value != NULL && in_current_block(conv)) {
       set_canonical(new StoreField(x->obj(), x->offset(), x->field(), value, x->is_static(),
-                                       x->lock_stack(), x->state_before(), x->is_loaded(), x->is_initialized()));
+                                       x->state_before(), x->is_loaded(), x->is_initialized()));
       return;
     }
   }
@@ -256,7 +256,7 @@
     // limit this optimization to current block
     if (value != NULL && in_current_block(conv)) {
       set_canonical(new StoreIndexed(x->array(), x->index(), x->length(),
-                                     x->elt_type(), value, x->lock_stack()));
+                                     x->elt_type(), value, x->state_before()));
       return;
     }
   }
@@ -667,7 +667,7 @@
             }
           }
           set_canonical(canon);
-          set_bci(cmp->bci());
+          set_bci(cmp->state_before()->bci());
         }
       }
     } else if (l->as_InstanceOf() != NULL) {
@@ -685,7 +685,7 @@
         set_canonical(new Goto(is_inst_sux, x->state_before(), x->is_safepoint()));
       } else {
         // successors differ => simplify to: IfInstanceOf
-        set_canonical(new IfInstanceOf(inst->klass(), inst->obj(), true, inst->bci(), is_inst_sux, no_inst_sux));
+        set_canonical(new IfInstanceOf(inst->klass(), inst->obj(), true, inst->state_before()->bci(), is_inst_sux, no_inst_sux));
       }
     }
   } else if (rt == objectNull && (l->as_NewInstance() || l->as_NewArray())) {
--- a/src/share/vm/c1/c1_Compilation.hpp	Wed Sep 22 23:51:03 2010 -0700
+++ b/src/share/vm/c1/c1_Compilation.hpp	Tue Dec 29 19:08:54 2009 +0100
@@ -22,7 +22,6 @@
  *
  */
 
-class BlockBegin;
 class CompilationResourceObj;
 class XHandlers;
 class ExceptionInfo;
--- a/src/share/vm/c1/c1_GraphBuilder.cpp	Wed Sep 22 23:51:03 2010 -0700
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Tue Dec 29 19:08:54 2009 +0100
@@ -659,7 +659,6 @@
   , _jsr_xhandlers(NULL)
   , _caller_stack_size(-1)
   , _continuation(NULL)
-  , _continuation_state(NULL)
   , _num_returns(0)
   , _cleanup_block(NULL)
   , _cleanup_return_prev(NULL)
@@ -795,14 +794,6 @@
   if (i >= -1) worklist->at_put(i + 1, top);
 }
 
-int GraphBuilder::ScopeData::caller_stack_size() const {
-  ValueStack* state = scope()->caller_state();
-  if (state == NULL) {
-    return 0;
-  }
-  return state->stack_size();
-}
-
 
 BlockBegin* GraphBuilder::ScopeData::remove_from_work_list() {
   if (is_work_list_empty()) {
@@ -880,7 +871,7 @@
         ciObject* obj = con.as_object();
         if (!obj->is_loaded()
             || (PatchALot && obj->klass() != ciEnv::current()->String_klass())) {
-          patch_state = state()->copy();
+          patch_state = copy_state_before();
           t = new ObjectConstant(obj);
         } else {
           assert(!obj->is_klass(), "must be java_mirror of klass");
@@ -902,7 +893,8 @@
 
 
 void GraphBuilder::load_local(ValueType* type, int index) {
-  Value x = state()->load_local(index);
+  Value x = state()->local_at(index);
+  assert(x != NULL && !x->type()->is_illegal(), "access of illegal local variable");
   push(type, x);
 }
 
@@ -942,19 +934,21 @@
 
 
 void GraphBuilder::load_indexed(BasicType type) {
+  ValueStack* state_before = copy_state_for_exception();
   Value index = ipop();
   Value array = apop();
   Value length = NULL;
   if (CSEArrayLength ||
       (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) ||
       (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant())) {
-    length = append(new ArrayLength(array, lock_stack()));
+    length = append(new ArrayLength(array, state_before));
   }
-  push(as_ValueType(type), append(new LoadIndexed(array, index, length, type, lock_stack())));
+  push(as_ValueType(type), append(new LoadIndexed(array, index, length, type, state_before)));
 }
 
 
 void GraphBuilder::store_indexed(BasicType type) {
+  ValueStack* state_before = copy_state_for_exception();
   Value value = pop(as_ValueType(type));
   Value index = ipop();
   Value array = apop();
@@ -962,9 +956,9 @@
   if (CSEArrayLength ||
       (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) ||
       (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant())) {
-    length = append(new ArrayLength(array, lock_stack()));
+    length = append(new ArrayLength(array, state_before));
   }
-  StoreIndexed* result = new StoreIndexed(array, index, length, type, value, lock_stack());
+  StoreIndexed* result = new StoreIndexed(array, index, length, type, value, state_before);
   append(result);
   _memory->store_value(value);
 
@@ -1063,12 +1057,12 @@
 }
 
 
-void GraphBuilder::arithmetic_op(ValueType* type, Bytecodes::Code code, ValueStack* stack) {
+void GraphBuilder::arithmetic_op(ValueType* type, Bytecodes::Code code, ValueStack* state_before) {
   Value y = pop(type);
   Value x = pop(type);
   // NOTE: strictfp can be queried from current method since we don't
   // inline methods with differing strictfp bits
-  Value res = new ArithmeticOp(code, x, y, method()->is_strict(), stack);
+  Value res = new ArithmeticOp(code, x, y, method()->is_strict(), state_before);
   // Note: currently single-precision floating-point rounding on Intel is handled at the LIRGenerator level
   res = append(res);
   if (method()->is_strict()) {
@@ -1132,7 +1126,7 @@
 
 
 void GraphBuilder::compare_op(ValueType* type, Bytecodes::Code code) {
-  ValueStack* state_before = state()->copy();
+  ValueStack* state_before = copy_state_before();
   Value y = pop(type);
   Value x = pop(type);
   ipush(append(new CompareOp(code, x, y, state_before)));
@@ -1217,7 +1211,7 @@
 
 void GraphBuilder::if_zero(ValueType* type, If::Condition cond) {
   Value y = append(new Constant(intZero));
-  ValueStack* state_before = state()->copy();
+  ValueStack* state_before = copy_state_before();
   Value x = ipop();
   if_node(x, cond, y, state_before);
 }
@@ -1225,14 +1219,14 @@
 
 void GraphBuilder::if_null(ValueType* type, If::Condition cond) {
   Value y = append(new Constant(objectNull));
-  ValueStack* state_before = state()->copy();
+  ValueStack* state_before = copy_state_before();
   Value x = apop();
   if_node(x, cond, y, state_before);
 }
 
 
 void GraphBuilder::if_same(ValueType* type, If::Condition cond) {
-  ValueStack* state_before = state()->copy();
+  ValueStack* state_before = copy_state_before();
   Value y = pop(type);
   Value x = pop(type);
   if_node(x, cond, y, state_before);
@@ -1282,7 +1276,7 @@
     BlockBegin* tsux = block_at(bci() + switch_->dest_offset_at(0));
     BlockBegin* fsux = block_at(bci() + switch_->default_offset());
     bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
-    ValueStack* state_before = is_bb ? state() : NULL;
+    ValueStack* state_before = is_bb ? copy_state_before() : NULL;
     append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb));
   } else {
     // collect successors
@@ -1295,7 +1289,7 @@
     }
     // add default successor
     sux->at_put(i, block_at(bci() + switch_->default_offset()));
-    ValueStack* state_before = has_bb ? state() : NULL;
+    ValueStack* state_before = has_bb ? copy_state_before() : NULL;
     append(new TableSwitch(ipop(), sux, switch_->low_key(), state_before, has_bb));
   }
 }
@@ -1314,7 +1308,7 @@
     BlockBegin* tsux = block_at(bci() + pair->offset());
     BlockBegin* fsux = block_at(bci() + switch_->default_offset());
     bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
-    ValueStack* state_before = is_bb ? state() : NULL;
+    ValueStack* state_before = is_bb ? copy_state_before() : NULL;
     append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb));
   } else {
     // collect successors & keys
@@ -1330,7 +1324,7 @@
     }
     // add default successor
     sux->at_put(i, block_at(bci() + switch_->default_offset()));
-    ValueStack* state_before = has_bb ? state() : NULL;
+    ValueStack* state_before = has_bb ? copy_state_before() : NULL;
     append(new LookupSwitch(ipop(), sux, keys, state_before, has_bb));
   }
 }
@@ -1340,7 +1334,7 @@
   // the registration on return.
 
   // Gather some type information about the receiver
-  Value receiver = state()->load_local(0);
+  Value receiver = state()->local_at(0);
   assert(receiver != NULL, "must have a receiver");
   ciType* declared_type = receiver->declared_type();
   ciType* exact_type = receiver->exact_type();
@@ -1373,10 +1367,11 @@
 
   if (needs_check) {
     // Perform the registration of finalizable objects.
+    ValueStack* state_before = copy_state_for_exception();
     load_local(objectType, 0);
     append_split(new Intrinsic(voidType, vmIntrinsics::_Object_init,
                                state()->pop_arguments(1),
-                               true, lock_stack(), true));
+                               true, state_before, true));
   }
 }
 
@@ -1395,12 +1390,14 @@
     // If the inlined method is synchronized, the monitor must be
     // released before we jump to the continuation block.
     if (method()->is_synchronized()) {
-      int i = state()->caller_state()->locks_size();
-      assert(state()->locks_size() == i + 1, "receiver must be locked here");
-      monitorexit(state()->lock_at(i), SynchronizationEntryBCI);
+      assert(state()->locks_size() == 1, "receiver must be locked here");
+      monitorexit(state()->lock_at(0), SynchronizationEntryBCI);
     }
 
-    state()->truncate_stack(caller_stack_size());
+    // State at end of inlined method is the state of the caller
+    // without the method parameters on stack, including the
+    // return value, if any, of the inlined method on operand stack.
+    set_state(state()->caller_state()->copy_for_parsing());
     if (x != NULL) {
       state()->push(x->type(), x);
     }
@@ -1412,14 +1409,6 @@
       set_inline_cleanup_info(_block, _last, state());
     }
 
-    // State at end of inlined method is the state of the caller
-    // without the method parameters on stack, including the
-    // return value, if any, of the inlined method on operand stack.
-    set_state(scope_data()->continuation_state()->copy());
-    if (x) {
-      state()->push(x->type(), x);
-    }
-
     // The current bci() is in the wrong scope, so use the bci() of
     // the continuation point.
     append_with_bci(goto_callee, scope_data()->continuation()->bci());
@@ -1455,11 +1444,11 @@
                          field->will_link(method()->holder(), code);
   const bool is_initialized = is_loaded && holder->is_initialized();
 
-  ValueStack* state_copy = NULL;
+  ValueStack* state_before = NULL;
   if (!is_initialized || PatchALot) {
     // save state before instruction for debug info when
     // deoptimization happens during patching
-    state_copy = state()->copy();
+    state_before = copy_state_before();
   }
 
   Value obj = NULL;
@@ -1468,9 +1457,9 @@
     // fully initialized and resolved in this constant pool.  The will_link test
     // above essentially checks if this class is resolved in this constant pool
     // so, the is_initialized flag should be suffiect.
-    if (state_copy != NULL) {
+    if (state_before != NULL) {
       // build a patching constant
-      obj = new Constant(new ClassConstant(holder), state_copy);
+      obj = new Constant(new ClassConstant(holder), state_before);
     } else {
       obj = new Constant(new ClassConstant(holder));
     }
@@ -1499,25 +1488,32 @@
       }
       if (constant != NULL) {
         push(type, append(constant));
-        state_copy = NULL; // Not a potential deoptimization point (see set_state_before logic below)
       } else {
+        if (state_before == NULL) {
+          state_before = copy_state_for_exception();
+        }
         push(type, append(new LoadField(append(obj), offset, field, true,
-                                        lock_stack(), state_copy, is_loaded, is_initialized)));
+                                        state_before, is_loaded, is_initialized)));
       }
       break;
     }
     case Bytecodes::_putstatic:
       { Value val = pop(type);
-        append(new StoreField(append(obj), offset, field, val, true, lock_stack(), state_copy, is_loaded, is_initialized));
+        if (state_before == NULL) {
+          state_before = copy_state_for_exception();
+        }
+        append(new StoreField(append(obj), offset, field, val, true, state_before, is_loaded, is_initialized));
       }
       break;
     case Bytecodes::_getfield :
       {
-        LoadField* load = new LoadField(apop(), offset, field, false, lock_stack(), state_copy, is_loaded, true);
+        if (state_before == NULL) {
+          state_before = copy_state_for_exception();
+        }
+        LoadField* load = new LoadField(apop(), offset, field, false, state_before, is_loaded, true);
         Value replacement = is_loaded ? _memory->load(load) : load;
         if (replacement != load) {
-          assert(replacement->bci() != -99 || replacement->as_Phi() || replacement->as_Local(),
-                 "should already by linked");
+          assert(replacement->is_linked() || !replacement->can_be_linked(), "should already by linked");
           push(type, replacement);
         } else {
           push(type, append(load));
@@ -1527,7 +1523,10 @@
 
     case Bytecodes::_putfield :
       { Value val = pop(type);
-        StoreField* store = new StoreField(apop(), offset, field, val, false, lock_stack(), state_copy, is_loaded, true);
+        if (state_before == NULL) {
+          state_before = copy_state_for_exception();
+        }
+        StoreField* store = new StoreField(apop(), offset, field, val, false, state_before, is_loaded, true);
         if (is_loaded) store = _memory->store(store);
         if (store != NULL) {
           append(store);
@@ -1647,7 +1646,7 @@
           actual_recv = target->holder();
 
           // insert a check it's really the expected class.
-          CheckCast* c = new CheckCast(klass, receiver, NULL);
+          CheckCast* c = new CheckCast(klass, receiver, copy_state_for_exception());
           c->set_incompatible_class_change_check();
           c->set_direct_compare(klass->is_final());
           append_split(c);
@@ -1732,7 +1731,7 @@
 
   // We require the debug info to be the "state before" because
   // invokedynamics may deoptimize.
-  ValueStack* state_before = is_invokedynamic ? state()->copy() : NULL;
+  ValueStack* state_before = is_invokedynamic ? copy_state_before() : copy_state_exhandling();
 
   Values* args = state()->pop_arguments(target->arg_size_no_receiver());
   Value recv = has_receiver ? apop() : NULL;
@@ -1795,24 +1794,26 @@
 
 
 void GraphBuilder::new_instance(int klass_index) {
+  ValueStack* state_before = copy_state_exhandling();
   bool will_link;
   ciKlass* klass = stream()->get_klass(will_link);
   assert(klass->is_instance_klass(), "must be an instance klass");
-  NewInstance* new_instance = new NewInstance(klass->as_instance_klass());
+  NewInstance* new_instance = new NewInstance(klass->as_instance_klass(), state_before);
   _memory->new_instance(new_instance);
   apush(append_split(new_instance));
 }
 
 
 void GraphBuilder::new_type_array() {
-  apush(append_split(new NewTypeArray(ipop(), (BasicType)stream()->get_index())));
+  ValueStack* state_before = copy_state_exhandling();
+  apush(append_split(new NewTypeArray(ipop(), (BasicType)stream()->get_index(), state_before)));
 }
 
 
 void GraphBuilder::new_object_array() {
   bool will_link;
   ciKlass* klass = stream()->get_klass(will_link);
-  ValueStack* state_before = !klass->is_loaded() || PatchALot ? state()->copy() : NULL;
+  ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling();
   NewArray* n = new NewObjectArray(klass, ipop(), state_before);
   apush(append_split(n));
 }
@@ -1838,7 +1839,7 @@
 void GraphBuilder::check_cast(int klass_index) {
   bool will_link;
   ciKlass* klass = stream()->get_klass(will_link);
-  ValueStack* state_before = !klass->is_loaded() || PatchALot ? state()->copy() : NULL;
+  ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_for_exception();
   CheckCast* c = new CheckCast(klass, apop(), state_before);
   apush(append_split(c));
   c->set_direct_compare(direct_compare(klass));
@@ -1859,7 +1860,7 @@
 void GraphBuilder::instance_of(int klass_index) {
   bool will_link;
   ciKlass* klass = stream()->get_klass(will_link);
-  ValueStack* state_before = !klass->is_loaded() || PatchALot ? state()->copy() : NULL;
+  ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling();
   InstanceOf* i = new InstanceOf(klass, apop(), state_before);
   ipush(append_split(i));
   i->set_direct_compare(direct_compare(klass));
@@ -1879,25 +1880,13 @@
 
 void GraphBuilder::monitorenter(Value x, int bci) {
   // save state before locking in case of deoptimization after a NullPointerException
-  ValueStack* lock_stack_before = lock_stack();
-  append_with_bci(new MonitorEnter(x, state()->lock(scope(), x), lock_stack_before), bci);
+  ValueStack* state_before = copy_state_for_exception_with_bci(bci);
+  append_with_bci(new MonitorEnter(x, state()->lock(x), state_before), bci);
   kill_all();
 }
 
 
 void GraphBuilder::monitorexit(Value x, int bci) {
-  // Note: the comment below is only relevant for the case where we do
-  // not deoptimize due to asynchronous exceptions (!(DeoptC1 &&
-  // DeoptOnAsyncException), which is not used anymore)
-
-  // Note: Potentially, the monitor state in an exception handler
-  //       can be wrong due to wrong 'initialization' of the handler
-  //       via a wrong asynchronous exception path. This can happen,
-  //       if the exception handler range for asynchronous exceptions
-  //       is too long (see also java bug 4327029, and comment in
-  //       GraphBuilder::handle_exception()). This may cause 'under-
-  //       flow' of the monitor stack => bailout instead.
-  if (state()->locks_size() < 1) BAILOUT("monitor stack underflow");
   append_with_bci(new MonitorExit(x, state()->unlock()), bci);
   kill_all();
 }
@@ -1906,7 +1895,7 @@
 void GraphBuilder::new_multi_array(int dimensions) {
   bool will_link;
   ciKlass* klass = stream()->get_klass(will_link);
-  ValueStack* state_before = !klass->is_loaded() || PatchALot ? state()->copy() : NULL;
+  ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling();
 
   Values* dims = new Values(dimensions, NULL);
   // fill in all dimensions
@@ -1921,8 +1910,10 @@
 void GraphBuilder::throw_op(int bci) {
   // We require that the debug info for a Throw be the "state before"
   // the Throw (i.e., exception oop is still on TOS)
-  ValueStack* state_before = state()->copy();
+  ValueStack* state_before = copy_state_before_with_bci(bci);
   Throw* t = new Throw(apop(), state_before);
+  // operand stack not needed after a throw
+  state()->truncate_stack(0);
   append_with_bci(t, bci);
 }
 
@@ -1947,60 +1938,62 @@
 Instruction* GraphBuilder::append_with_bci(Instruction* instr, int bci) {
   Canonicalizer canon(compilation(), instr, bci);
   Instruction* i1 = canon.canonical();
-  if (i1->bci() != -99) {
+  if (i1->is_linked() || !i1->can_be_linked()) {
     // Canonicalizer returned an instruction which was already
     // appended so simply return it.
     return i1;
-  } else if (UseLocalValueNumbering) {
+  }
+
+  if (UseLocalValueNumbering) {
     // Lookup the instruction in the ValueMap and add it to the map if
     // it's not found.
     Instruction* i2 = vmap()->find_insert(i1);
     if (i2 != i1) {
       // found an entry in the value map, so just return it.
-      assert(i2->bci() != -1, "should already be linked");
+      assert(i2->is_linked(), "should already be linked");
       return i2;
     }
     ValueNumberingEffects vne(vmap());
     i1->visit(&vne);
   }
 
-  if (i1->as_Phi() == NULL && i1->as_Local() == NULL) {
-    // i1 was not eliminated => append it
-    assert(i1->next() == NULL, "shouldn't already be linked");
-    _last = _last->set_next(i1, canon.bci());
-    if (++_instruction_count >= InstructionCountCutoff
-        && !bailed_out()) {
-      // set the bailout state but complete normal processing.  We
-      // might do a little more work before noticing the bailout so we
-      // want processing to continue normally until it's noticed.
-      bailout("Method and/or inlining is too large");
+  // i1 was not eliminated => append it
+  assert(i1->next() == NULL, "shouldn't already be linked");
+  _last = _last->set_next(i1, canon.bci());
+
+  if (++_instruction_count >= InstructionCountCutoff && !bailed_out()) {
+    // set the bailout state but complete normal processing.  We
+    // might do a little more work before noticing the bailout so we
+    // want processing to continue normally until it's noticed.
+    bailout("Method and/or inlining is too large");
+  }
+
+#ifndef PRODUCT
+  if (PrintIRDuringConstruction) {
+    InstructionPrinter ip;
+    ip.print_line(i1);
+    if (Verbose) {
+      state()->print();
     }
-
-#ifndef PRODUCT
-    if (PrintIRDuringConstruction) {
-      InstructionPrinter ip;
-      ip.print_line(i1);
-      if (Verbose) {
-        state()->print();
+  }
+#endif
+
+  // save state after modification of operand stack for StateSplit instructions
+  StateSplit* s = i1->as_StateSplit();
+  if (s != NULL) {
+    if (EliminateFieldAccess) {
+      Intrinsic* intrinsic = s->as_Intrinsic();
+      if (s->as_Invoke() != NULL || (intrinsic && !intrinsic->preserves_state())) {
+        _memory->kill();
       }
     }
-#endif
-    assert(_last == i1, "adjust code below");
-    StateSplit* s = i1->as_StateSplit();
-    if (s != NULL && i1->as_BlockEnd() == NULL) {
-      if (EliminateFieldAccess) {
-        Intrinsic* intrinsic = s->as_Intrinsic();
-        if (s->as_Invoke() != NULL || (intrinsic && !intrinsic->preserves_state())) {
-          _memory->kill();
-        }
-      }
-      s->set_state(state()->copy());
-    }
-    // set up exception handlers for this instruction if necessary
-    if (i1->can_trap()) {
-      assert(exception_state() != NULL || !has_handler(), "must have setup exception state");
-      i1->set_exception_handlers(handle_exception(bci));
-    }
+    s->set_state(state()->copy(ValueStack::StateAfter, canon.bci()));
+  }
+
+  // set up exception handlers for this instruction if necessary
+  if (i1->can_trap()) {
+    i1->set_exception_handlers(handle_exception(i1));
+    assert(i1->exception_state() != NULL || !i1->needs_exception_state() || bailed_out(), "handle_exception must set exception state");
   }
   return i1;
 }
@@ -2032,26 +2025,30 @@
       }
     }
   }
-  append(new NullCheck(value, lock_stack()));
+  append(new NullCheck(value, copy_state_for_exception()));
 }
 
 
 
-XHandlers* GraphBuilder::handle_exception(int cur_bci) {
-  // fast path if it is guaranteed that no exception handlers are present
-  if (!has_handler()) {
-    // TODO: check if return NULL is possible (avoids empty lists)
+XHandlers* GraphBuilder::handle_exception(Instruction* instruction) {
+  if (!has_handler() && (!instruction->needs_exception_state() || instruction->exception_state() != NULL)) {
+    assert(instruction->exception_state() == NULL
+           || instruction->exception_state()->kind() == ValueStack::EmptyExceptionState
+           || (instruction->exception_state()->kind() == ValueStack::ExceptionState && _compilation->env()->jvmti_can_access_local_variables()),
+           "exception_state should be of exception kind");
     return new XHandlers();
   }
 
   XHandlers*  exception_handlers = new XHandlers();
   ScopeData*  cur_scope_data = scope_data();
-  ValueStack* s = exception_state();
+  ValueStack* cur_state = instruction->state_before();
+  ValueStack* prev_state = NULL;
   int scope_count = 0;
 
-  assert(s != NULL, "exception state must be set");
+  assert(cur_state != NULL, "state_before must be set");
   do {
-    assert(cur_scope_data->scope() == s->scope(), "scopes do not match");
+    int cur_bci = cur_state->bci();
+    assert(cur_scope_data->scope() == cur_state->scope(), "scopes do not match");
     assert(cur_bci == SynchronizationEntryBCI || cur_bci == cur_scope_data->stream()->cur_bci(), "invalid bci");
 
     // join with all potential exception handlers
@@ -2075,10 +2072,15 @@
 
         // previously this was a BAILOUT, but this is not necessary
         // now because asynchronous exceptions are not handled this way.
-        assert(entry->state() == NULL || s->locks_size() == entry->state()->locks_size(), "locks do not match");
+        assert(entry->state() == NULL || cur_state->total_locks_size() == entry->state()->total_locks_size(), "locks do not match");
 
         // xhandler start with an empty expression stack
-        s->truncate_stack(cur_scope_data->caller_stack_size());
+        if (cur_state->stack_size() != 0) {
+          cur_state = cur_state->copy(ValueStack::ExceptionState, cur_state->bci());
+        }
+        if (instruction->exception_state() == NULL) {
+          instruction->set_exception_state(cur_state);
+        }
 
         // Note: Usually this join must work. However, very
         // complicated jsr-ret structures where we don't ret from
@@ -2087,12 +2089,12 @@
         // The only test case we've seen so far which exhibits this
         // problem is caught by the infinite recursion test in
         // GraphBuilder::jsr() if the join doesn't work.
-        if (!entry->try_merge(s)) {
+        if (!entry->try_merge(cur_state)) {
           BAILOUT_("error while joining with exception handler, prob. due to complicated jsr/rets", exception_handlers);
         }
 
         // add current state for correct handling of phi functions at begin of xhandler
-        int phi_operand = entry->add_exception_state(s);
+        int phi_operand = entry->add_exception_state(cur_state);
 
         // add entry to the list of xhandlers of this block
         _block->add_exception_handler(entry);
@@ -2119,26 +2121,39 @@
       }
     }
 
+    if (exception_handlers->length() == 0) {
+      // This scope and all callees do not handle exceptions, so the local
+      // variables of this scope are not needed. However, the scope itself is
+      // required for a correct exception stack trace -> clear out the locals.
+      if (_compilation->env()->jvmti_can_access_local_variables()) {
+        cur_state = cur_state->copy(ValueStack::ExceptionState, cur_state->bci());
+      } else {
+        cur_state = cur_state->copy(ValueStack::EmptyExceptionState, cur_state->bci());
+      }
+      if (prev_state != NULL) {
+        prev_state->set_caller_state(cur_state);
+      }
+      if (instruction->exception_state() == NULL) {
+        instruction->set_exception_state(cur_state);
+      }
+    }
+
     // Set up iteration for next time.
     // If parsing a jsr, do not grab exception handlers from the
     // parent scopes for this method (already got them, and they
     // needed to be cloned)
-    if (cur_scope_data->parsing_jsr()) {
-      IRScope* tmp_scope = cur_scope_data->scope();
-      while (cur_scope_data->parent() != NULL &&
-             cur_scope_data->parent()->scope() == tmp_scope) {
-        cur_scope_data = cur_scope_data->parent();
-      }
+
+    while (cur_scope_data->parsing_jsr()) {
+      cur_scope_data = cur_scope_data->parent();
     }
-    if (cur_scope_data != NULL) {
-      if (cur_scope_data->parent() != NULL) {
-        // must use pop_scope instead of caller_state to preserve all monitors
-        s = s->pop_scope();
-      }
-      cur_bci = cur_scope_data->scope()->caller_bci();
-      cur_scope_data = cur_scope_data->parent();
-      scope_count++;
-    }
+
+    assert(cur_scope_data->scope() == cur_state->scope(), "scopes do not match");
+    assert(cur_state->locks_size() == 0 || cur_state->locks_size() == 1, "unlocking must be done in a catchall exception handler");
+
+    prev_state = cur_state;
+    cur_state = cur_state->caller_state();
+    cur_scope_data = cur_scope_data->parent();
+    scope_count++;
   } while (cur_scope_data != NULL);
 
   return exception_handlers;
@@ -2243,14 +2258,10 @@
   );
 
   ValueStack* state = b->state()->caller_state();
-  int index;
-  Value value;
-  for_each_state(state) {
-    for_each_local_value(state, index, value) {
-      Phi* phi = value->as_Phi();
-      assert(phi == NULL || phi->block() != b, "must not have phi function to simplify in caller state");
-    }
-  }
+  for_each_state_value(state, value,
+    Phi* phi = value->as_Phi();
+    assert(phi == NULL || phi->block() != b, "must not have phi function to simplify in caller state");
+  );
 #endif
 }
 
@@ -2265,7 +2276,7 @@
   // setup iteration
   kill_all();
   _block = beg;
-  _state = beg->state()->copy();
+  _state = beg->state()->copy_for_parsing();
   _last  = beg;
   iterate_bytecodes_for_block(beg->bci());
 }
@@ -2301,14 +2312,7 @@
   while (!bailed_out() && last()->as_BlockEnd() == NULL &&
          (code = stream()->next()) != ciBytecodeStream::EOBC() &&
          (block_at(s.cur_bci()) == NULL || block_at(s.cur_bci()) == block())) {
-
-    if (has_handler() && can_trap(method(), code)) {
-      // copy the state because it is modified before handle_exception is called
-      set_exception_state(state()->copy());
-    } else {
-      // handle_exception is not called for this bytecode
-      set_exception_state(NULL);
-    }
+    assert(state()->kind() == ValueStack::Parsing, "invalid state kind");
 
     // Check for active jsr during OSR compilation
     if (compilation()->is_osr_compile()
@@ -2433,12 +2437,12 @@
       case Bytecodes::_lmul           : arithmetic_op(longType  , code); break;
       case Bytecodes::_fmul           : arithmetic_op(floatType , code); break;
       case Bytecodes::_dmul           : arithmetic_op(doubleType, code); break;
-      case Bytecodes::_idiv           : arithmetic_op(intType   , code, lock_stack()); break;
-      case Bytecodes::_ldiv           : arithmetic_op(longType  , code, lock_stack()); break;
+      case Bytecodes::_idiv           : arithmetic_op(intType   , code, copy_state_for_exception()); break;
+      case Bytecodes::_ldiv           : arithmetic_op(longType  , code, copy_state_for_exception()); break;
       case Bytecodes::_fdiv           : arithmetic_op(floatType , code); break;
       case Bytecodes::_ddiv           : arithmetic_op(doubleType, code); break;
-      case Bytecodes::_irem           : arithmetic_op(intType   , code, lock_stack()); break;
-      case Bytecodes::_lrem           : arithmetic_op(longType  , code, lock_stack()); break;
+      case Bytecodes::_irem           : arithmetic_op(intType   , code, copy_state_for_exception()); break;
+      case Bytecodes::_lrem           : arithmetic_op(longType  , code, copy_state_for_exception()); break;
       case Bytecodes::_frem           : arithmetic_op(floatType , code); break;
       case Bytecodes::_drem           : arithmetic_op(doubleType, code); break;
       case Bytecodes::_ineg           : negate_op(intType   ); break;
@@ -2515,11 +2519,10 @@
       case Bytecodes::_new            : new_instance(s.get_index_u2()); break;
       case Bytecodes::_newarray       : new_type_array(); break;
       case Bytecodes::_anewarray      : new_object_array(); break;
-      case Bytecodes::_arraylength    : ipush(append(new ArrayLength(apop(), lock_stack()))); break;
+      case Bytecodes::_arraylength    : { ValueStack* state_before = copy_state_for_exception(); ipush(append(new ArrayLength(apop(), state_before))); break; }
       case Bytecodes::_athrow         : throw_op(s.cur_bci()); break;
       case Bytecodes::_checkcast      : check_cast(s.get_index_u2()); break;
       case Bytecodes::_instanceof     : instance_of(s.get_index_u2()); break;
-      // Note: we do not have special handling for the monitorenter bytecode if DeoptC1 && DeoptOnAsyncException
       case Bytecodes::_monitorenter   : monitorenter(apop(), s.cur_bci()); break;
       case Bytecodes::_monitorexit    : monitorexit (apop(), s.cur_bci()); break;
       case Bytecodes::_wide           : ShouldNotReachHere(); break;
@@ -2546,28 +2549,22 @@
   if (end == NULL) {
     // all blocks must end with a BlockEnd instruction => add a Goto
     end = new Goto(block_at(s.cur_bci()), false);
-    _last = _last->set_next(end, prev_bci);
+    append(end);
   }
   assert(end == last()->as_BlockEnd(), "inconsistency");
 
-  // if the method terminates, we don't need the stack anymore
-  if (end->as_Return() != NULL) {
-    state()->clear_stack();
-  } else if (end->as_Throw() != NULL) {
-    // May have exception handler in caller scopes
-    state()->truncate_stack(scope()->lock_stack_size());
-  }
+  assert(end->state() != NULL, "state must already be present");
+  assert(end->as_Return() == NULL || end->as_Throw() == NULL || end->state()->stack_size() == 0, "stack not needed for return and throw");
 
   // connect to begin & set state
   // NOTE that inlining may have changed the block we are parsing
   block()->set_end(end);
-  end->set_state(state());
   // propagate state
   for (int i = end->number_of_sux() - 1; i >= 0; i--) {
     BlockBegin* sux = end->sux_at(i);
     assert(sux->is_predecessor(block()), "predecessor missing");
     // be careful, bailout if bytecodes are strange
-    if (!sux->try_merge(state())) BAILOUT_("block join failed", NULL);
+    if (!sux->try_merge(end->state())) BAILOUT_("block join failed", NULL);
     scope_data()->add_to_work_list(end->sux_at(i));
   }
 
@@ -2605,7 +2602,6 @@
 
 
 bool GraphBuilder::_can_trap      [Bytecodes::number_of_java_codes];
-bool GraphBuilder::_is_async[Bytecodes::number_of_java_codes];
 
 void GraphBuilder::initialize() {
   // the following bytecodes are assumed to potentially
@@ -2657,67 +2653,14 @@
     , Bytecodes::_multianewarray
     };
 
-  // the following bytecodes are assumed to potentially
-  // throw asynchronous exceptions in compiled code due
-  // to safepoints (note: these entries could be merged
-  // with the can_trap_list - however, we need to know
-  // which ones are asynchronous for now - see also the
-  // comment in GraphBuilder::handle_exception)
-  Bytecodes::Code is_async_list[] =
-    { Bytecodes::_ifeq
-    , Bytecodes::_ifne
-    , Bytecodes::_iflt
-    , Bytecodes::_ifge
-    , Bytecodes::_ifgt
-    , Bytecodes::_ifle
-    , Bytecodes::_if_icmpeq
-    , Bytecodes::_if_icmpne
-    , Bytecodes::_if_icmplt
-    , Bytecodes::_if_icmpge
-    , Bytecodes::_if_icmpgt
-    , Bytecodes::_if_icmple
-    , Bytecodes::_if_acmpeq
-    , Bytecodes::_if_acmpne
-    , Bytecodes::_goto
-    , Bytecodes::_jsr
-    , Bytecodes::_ret
-    , Bytecodes::_tableswitch
-    , Bytecodes::_lookupswitch
-    , Bytecodes::_ireturn
-    , Bytecodes::_lreturn
-    , Bytecodes::_freturn
-    , Bytecodes::_dreturn
-    , Bytecodes::_areturn
-    , Bytecodes::_return
-    , Bytecodes::_ifnull
-    , Bytecodes::_ifnonnull
-    , Bytecodes::_goto_w
-    , Bytecodes::_jsr_w
-    };
-
   // inititialize trap tables
   for (int i = 0; i < Bytecodes::number_of_java_codes; i++) {
     _can_trap[i] = false;
-    _is_async[i] = false;
   }
   // set standard trap info
   for (uint j = 0; j < ARRAY_SIZE(can_trap_list); j++) {
     _can_trap[can_trap_list[j]] = true;
   }
-
-  // We now deoptimize if an asynchronous exception is thrown. This
-  // considerably cleans up corner case issues related to javac's
-  // incorrect exception handler ranges for async exceptions and
-  // allows us to precisely analyze the types of exceptions from
-  // certain bytecodes.
-  if (!(DeoptC1 && DeoptOnAsyncException)) {
-    // set asynchronous trap info
-    for (uint k = 0; k < ARRAY_SIZE(is_async_list); k++) {
-      assert(!_can_trap[is_async_list[k]], "can_trap_list and is_async_list should be disjoint");
-      _can_trap[is_async_list[k]] = true;
-      _is_async[is_async_list[k]] = true;
-    }
-  }
 }
 
 
@@ -2733,7 +2676,7 @@
   h->set_end(g);
   h->set(f);
   // setup header block end state
-  ValueStack* s = state->copy(); // can use copy since stack is empty (=> no phis)
+  ValueStack* s = state->copy(ValueStack::StateAfter, entry->bci()); // can use copy since stack is empty (=> no phis)
   assert(s->stack_is_empty(), "must have empty stack at entry point");
   g->set_state(s);
   return h;
@@ -2768,8 +2711,8 @@
   start->set_next(base, 0);
   start->set_end(base);
   // create & setup state for start block
-  start->set_state(state->copy());
-  base->set_state(state->copy());
+  start->set_state(state->copy(ValueStack::StateAfter, std_entry->bci()));
+  base->set_state(state->copy(ValueStack::StateAfter, std_entry->bci()));
 
   if (base->std_entry()->state() == NULL) {
     // setup states for header blocks
@@ -2803,6 +2746,7 @@
   kill_all();
   _block = _osr_entry;
   _state = _osr_entry->state()->copy();
+  assert(_state->bci() == osr_bci, "mismatch");
   _last  = _osr_entry;
   Value e = append(new OsrEntry());
   e->set_needs_null_check(false);
@@ -2852,7 +2796,6 @@
   assert(state->caller_state() == NULL, "should be top scope");
   state->clear_locals();
   Goto* g = new Goto(target, false);
-  g->set_state(_state->copy());
   append(g);
   _osr_entry->set_end(g);
   target->merge(_osr_entry->end()->state());
@@ -2862,7 +2805,7 @@
 
 
 ValueStack* GraphBuilder::state_at_entry() {
-  ValueStack* state = new ValueStack(scope(), method()->max_locals(), method()->max_stack());
+  ValueStack* state = new ValueStack(scope(), NULL);
 
   // Set up locals for receiver
   int idx = 0;
@@ -2886,7 +2829,7 @@
 
   // lock synchronized method
   if (method()->is_synchronized()) {
-    state->lock(scope(), NULL);
+    state->lock(NULL);
   }
 
   return state;
@@ -2895,7 +2838,6 @@
 
 GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope)
   : _scope_data(NULL)
-  , _exception_state(NULL)
   , _instruction_count(0)
   , _osr_entry(NULL)
   , _memory(new MemoryBuffer())
@@ -2919,7 +2861,6 @@
 
   // complete graph
   _vmap        = new ValueMap();
-  scope->compute_lock_stack_size();
   switch (scope->method()->intrinsic_id()) {
   case vmIntrinsics::_dabs          : // fall through
   case vmIntrinsics::_dsqrt         : // fall through
@@ -2945,7 +2886,7 @@
 
       // setup the initial block state
       _block = start_block;
-      _state = start_block->state()->copy();
+      _state = start_block->state()->copy_for_parsing();
       _last  = start_block;
       load_local(doubleType, 0);
 
@@ -2957,7 +2898,6 @@
       // connect the begin and end blocks and we're all done.
       BlockEnd* end = last()->as_BlockEnd();
       block()->set_end(end);
-      end->set_state(state());
       break;
     }
   default:
@@ -2988,13 +2928,38 @@
 }
 
 
-ValueStack* GraphBuilder::lock_stack() {
-  // return a new ValueStack representing just the current lock stack
-  // (for debug info at safepoints in exception throwing or handling)
-  ValueStack* new_stack = state()->copy_locks();
-  return new_stack;
+ValueStack* GraphBuilder::copy_state_before() {
+  return copy_state_before_with_bci(bci());
 }
 
+ValueStack* GraphBuilder::copy_state_exhandling() {
+  return copy_state_exhandling_with_bci(bci());
+}
+
+ValueStack* GraphBuilder::copy_state_for_exception() {
+  return copy_state_for_exception_with_bci(bci());
+}
+
+ValueStack* GraphBuilder::copy_state_before_with_bci(int bci) {
+  return state()->copy(ValueStack::StateBefore, bci);
+}
+
+ValueStack* GraphBuilder::copy_state_exhandling_with_bci(int bci) {
+  if (!has_handler()) return NULL;
+  return state()->copy(ValueStack::StateBefore, bci);
+}
+
+ValueStack* GraphBuilder::copy_state_for_exception_with_bci(int bci) {
+  ValueStack* s = copy_state_exhandling_with_bci(bci);
+  if (s == NULL) {
+    if (_compilation->env()->jvmti_can_access_local_variables()) {
+      s = state()->copy(ValueStack::ExceptionState, bci);
+    } else {
+      s = state()->copy(ValueStack::EmptyExceptionState, bci);
+    }
+  }
+  return s;
+}
 
 int GraphBuilder::recursive_inline_level(ciMethod* cur_callee) const {
   int recur_level = 0;
@@ -3177,9 +3142,9 @@
   // create intrinsic node
   const bool has_receiver = !callee->is_static();
   ValueType* result_type = as_ValueType(callee->return_type());
+  ValueStack* state_before = copy_state_for_exception();
 
   Values* args = state()->pop_arguments(callee->arg_size());
-  ValueStack* locks = lock_stack();
 
   if (is_profiling()) {
     // Don't profile in the special case where the root method
@@ -3198,7 +3163,7 @@
     }
   }
 
-  Intrinsic* result = new Intrinsic(result_type, id, args, has_receiver, lock_stack(),
+  Intrinsic* result = new Intrinsic(result_type, id, args, has_receiver, state_before,
                                     preserves_state, cantrap);
   // append instruction & push result
   Value value = append_split(result);
@@ -3236,10 +3201,9 @@
   assert(jsr_start_block != NULL, "jsr start block must exist");
   assert(!jsr_start_block->is_set(BlockBegin::was_visited_flag), "should not have visited jsr yet");
   Goto* goto_sub = new Goto(jsr_start_block, false);
-  goto_sub->set_state(state());
   // Must copy state to avoid wrong sharing when parsing bytecodes
   assert(jsr_start_block->state() == NULL, "should have fresh jsr starting block");
-  jsr_start_block->set_state(state()->copy());
+  jsr_start_block->set_state(copy_state_before_with_bci(jsr_dest_bci));
   append(goto_sub);
   _block->set_end(goto_sub);
   _last = _block = jsr_start_block;
@@ -3290,7 +3254,6 @@
 void GraphBuilder::inline_sync_entry(Value lock, BlockBegin* sync_handler) {
   assert(lock != NULL && sync_handler != NULL, "lock or handler missing");
 
-  set_exception_state(state()->copy());
   monitorenter(lock, SynchronizationEntryBCI);
   assert(_last->as_MonitorEnter() != NULL, "monitor enter expected");
   _last->set_needs_null_check(false);
@@ -3332,7 +3295,7 @@
   int bci = SynchronizationEntryBCI;
   if (lock) {
     assert(state()->locks_size() > 0 && state()->lock_at(state()->locks_size() - 1) == lock, "lock is missing");
-    if (lock->bci() == -99) {
+    if (!lock->is_linked()) {
       lock = append_with_bci(lock, -1);
     }
 
@@ -3342,21 +3305,17 @@
     // exit the context of the synchronized method
     if (!default_handler) {
       pop_scope();
-      _state = _state->copy();
-      bci = _state->scope()->caller_bci();
-      _state = _state->pop_scope()->copy();
+      bci = _state->caller_state()->bci();
+      _state = _state->caller_state()->copy_for_parsing();
     }
   }
 
   // perform the throw as if at the the call site
   apush(exception);
-
-  set_exception_state(state()->copy());
   throw_op(bci);
 
   BlockEnd* end = last()->as_BlockEnd();
   block()->set_end(end);
-  end->set_state(state());
 
   _block = orig_block;
   _state = orig_state;
@@ -3487,7 +3446,7 @@
   // Pass parameters into callee state: add assignments
   // note: this will also ensure that all arguments are computed before being passed
   ValueStack* callee_state = state();
-  ValueStack* caller_state = scope()->caller_state();
+  ValueStack* caller_state = state()->caller_state();
   { int i = args_base;
     while (i < caller_state->stack_size()) {
       const int par_no = i - args_base;
@@ -3502,16 +3461,7 @@
   // Note that we preserve locals state in case we can use it later
   // (see use of pop_scope() below)
   caller_state->truncate_stack(args_base);
-  callee_state->truncate_stack(args_base);
-
-  // Setup state that is used at returns form the inlined method.
-  // This is essentially the state of the continuation block,
-  // but without the return value on stack, if any, this will
-  // be pushed at the return instruction (see method_return).
-  scope_data()->set_continuation_state(caller_state->copy());
-
-  // Compute lock stack size for callee scope now that args have been passed
-  scope()->compute_lock_stack_size();
+  assert(callee_state->stack_size() == 0, "callee stack must be empty");
 
   Value lock;
   BlockBegin* sync_handler;
@@ -3520,11 +3470,8 @@
   if (callee->is_synchronized()) {
     lock = callee->is_static() ? append(new Constant(new InstanceConstant(callee->holder()->java_mirror())))
                                : state()->local_at(0);
-    sync_handler = new BlockBegin(-1);
+    sync_handler = new BlockBegin(SynchronizationEntryBCI);
     inline_sync_entry(lock, sync_handler);
-
-    // recompute the lock stack size
-    scope()->compute_lock_stack_size();
   }
 
 
@@ -3532,7 +3479,6 @@
   if (callee_start_block != NULL) {
     assert(callee_start_block->is_set(BlockBegin::parser_loop_header_flag), "must be loop header");
     Goto* goto_callee = new Goto(callee_start_block, false);
-    goto_callee->set_state(state());
     // The state for this goto is in the scope of the callee, so use
     // the entry bci for the callee instead of the call site bci.
     append_with_bci(goto_callee, 0);
@@ -3579,7 +3525,7 @@
       && block() == orig_block
       && block() == inline_cleanup_block()) {
     _last = inline_cleanup_return_prev();
-    _state = inline_cleanup_state()->pop_scope();
+    _state = inline_cleanup_state();
   } else if (continuation_preds == cont->number_of_preds()) {
     // Inlining caused that the instructions after the invoke in the
     // caller are not reachable any more. So skip filling this block
@@ -3645,8 +3591,7 @@
     blb.bci2block()->at_put(0, NULL);
   }
 
-  callee_scope->set_caller_state(state());
-  set_state(state()->push_scope(callee_scope));
+  set_state(new ValueStack(callee_scope, state()->copy(ValueStack::CallerState, bci())));
 
   ScopeData* data = new ScopeData(scope_data());
   data->set_scope(callee_scope);
@@ -3670,10 +3615,6 @@
   data->set_scope(scope());
   data->setup_jsr_xhandlers();
   data->set_continuation(continuation());
-  if (continuation() != NULL) {
-    assert(continuation_state() != NULL, "");
-    data->set_continuation_state(continuation_state()->copy());
-  }
   data->set_jsr_continuation(jsr_continuation);
   _scope_data = data;
 }
@@ -3768,6 +3709,7 @@
 
 
 void GraphBuilder::append_unsafe_CAS(ciMethod* callee) {
+  ValueStack* state_before = copy_state_for_exception();
   ValueType* result_type = as_ValueType(callee->return_type());
   assert(result_type->is_int(), "int result");
   Values* args = state()->pop_arguments(callee->arg_size());
@@ -3796,7 +3738,7 @@
   // know which ones so mark the state as no preserved.  This will
   // cause CSE to invalidate memory across it.
   bool preserves_state = false;
-  Intrinsic* result = new Intrinsic(result_type, callee->intrinsic_id(), args, false, lock_stack(), preserves_state);
+  Intrinsic* result = new Intrinsic(result_type, callee->intrinsic_id(), args, false, state_before, preserves_state);
   append_split(result);
   push(result_type, result);
   compilation()->set_has_unsafe_access(true);
--- a/src/share/vm/c1/c1_GraphBuilder.hpp	Wed Sep 22 23:51:03 2010 -0700
+++ b/src/share/vm/c1/c1_GraphBuilder.hpp	Tue Dec 29 19:08:54 2009 +0100
@@ -58,9 +58,6 @@
     // BlockEnds.
     BlockBegin*  _continuation;
 
-    // Without return value of inlined method on stack
-    ValueStack*  _continuation_state;
-
     // Was this ScopeData created only for the parsing and inlining of
     // a jsr?
     bool         _parsing_jsr;
@@ -125,14 +122,10 @@
     void set_stream(ciBytecodeStream* stream)      { _stream = stream;          }
 
     intx max_inline_size() const                   { return _max_inline_size;   }
-    int  caller_stack_size() const;
 
     BlockBegin* continuation() const               { return _continuation;      }
     void set_continuation(BlockBegin* cont)        { _continuation = cont;      }
 
-    ValueStack* continuation_state() const         { return _continuation_state; }
-    void set_continuation_state(ValueStack* s)     { _continuation_state = s; }
-
     // Indicates whether this ScopeData was pushed only for the
     // parsing and inlining of a jsr
     bool parsing_jsr() const                       { return _parsing_jsr;       }
@@ -163,7 +156,6 @@
 
   // for all GraphBuilders
   static bool       _can_trap[Bytecodes::number_of_java_codes];
-  static bool       _is_async[Bytecodes::number_of_java_codes];
 
   // for each instance of GraphBuilder
   ScopeData*        _scope_data;                 // Per-scope data; used for inlining
@@ -179,7 +171,6 @@
   // for each call to connect_to_end; can also be set by inliner
   BlockBegin*       _block;                      // the current block
   ValueStack*       _state;                      // the current execution state
-  ValueStack*       _exception_state;            // state that will be used by handle_exception
   Instruction*      _last;                       // the last instruction added
   bool              _skip_block;                 // skip processing of the rest of this block
 
@@ -194,8 +185,6 @@
   ValueStack*       state() const                { return _state; }
   void              set_state(ValueStack* state) { _state = state; }
   IRScope*          scope() const                { return scope_data()->scope(); }
-  ValueStack*       exception_state() const      { return _exception_state; }
-  void              set_exception_state(ValueStack* s) { _exception_state = s; }
   ciMethod*         method() const               { return scope()->method(); }
   ciBytecodeStream* stream() const               { return scope_data()->stream(); }
   Instruction*      last() const                 { return _last; }
@@ -230,7 +219,7 @@
   void load_indexed (BasicType type);
   void store_indexed(BasicType type);
   void stack_op(Bytecodes::Code code);
-  void arithmetic_op(ValueType* type, Bytecodes::Code code, ValueStack* lock_stack = NULL);
+  void arithmetic_op(ValueType* type, Bytecodes::Code code, ValueStack* state_before = NULL);
   void negate_op(ValueType* type);
   void shift_op(ValueType* type, Bytecodes::Code code);
   void logic_op(ValueType* type, Bytecodes::Code code);
@@ -267,12 +256,8 @@
   Instruction* append_split(StateSplit* instr);
 
   // other helpers
-  static bool is_async(Bytecodes::Code code) {
-    assert(0 <= code && code < Bytecodes::number_of_java_codes, "illegal bytecode");
-    return _is_async[code];
-  }
   BlockBegin* block_at(int bci)                  { return scope_data()->block_at(bci); }
-  XHandlers* handle_exception(int bci);
+  XHandlers* handle_exception(Instruction* instruction);
   void connect_to_end(BlockBegin* beg);
   void null_check(Value value);
   void eliminate_redundant_phis(BlockBegin* start);
@@ -283,7 +268,28 @@
 
   void kill_all();
 
-  ValueStack* lock_stack();
+  // use of state copy routines (try to minimize unnecessary state
+  // object allocations):
+
+  // - if the instruction unconditionally needs a full copy of the
+  // state (for patching for example), then use copy_state_before*
+
+  // - if the instruction needs a full copy of the state only for
+  // handler generation (Instruction::needs_exception_state() returns
+  // false) then use copy_state_exhandling*
+
+  // - if the instruction needs either a full copy of the state for
+  // handler generation and a least a minimal copy of the state (as
+  // returned by Instruction::exception_state()) for debug info
+  // generation (that is when Instruction::needs_exception_state()
+  // returns true) then use copy_state_for_exception*
+
+  ValueStack* copy_state_before_with_bci(int bci);
+  ValueStack* copy_state_before();
+  ValueStack* copy_state_exhandling_with_bci(int bci);
+  ValueStack* copy_state_exhandling();
+  ValueStack* copy_state_for_exception_with_bci(int bci);
+  ValueStack* copy_state_for_exception();
 
   //
   // Inlining support
@@ -292,9 +298,7 @@
   // accessors
   bool parsing_jsr() const                               { return scope_data()->parsing_jsr();           }
   BlockBegin* continuation() const                       { return scope_data()->continuation();          }
-  ValueStack* continuation_state() const                 { return scope_data()->continuation_state();    }
   BlockBegin* jsr_continuation() const                   { return scope_data()->jsr_continuation();      }
-  int caller_stack_size() const                          { return scope_data()->caller_stack_size();     }
   void set_continuation(BlockBegin* continuation)        { scope_data()->set_continuation(continuation); }
   void set_inline_cleanup_info(BlockBegin* block,
                                Instruction* return_prev,
--- a/src/share/vm/c1/c1_IR.cpp	Wed Sep 22 23:51:03 2010 -0700
+++ b/src/share/vm/c1/c1_IR.cpp	Tue Dec 29 19:08:54 2009 +0100
@@ -116,24 +116,6 @@
 
 
 // Implementation of IRScope
-
-BlockBegin* IRScope::header_block(BlockBegin* entry, BlockBegin::Flag f, ValueStack* state) {
-  if (entry == NULL) return NULL;
-  assert(entry->is_set(f), "entry/flag mismatch");
-  // create header block
-  BlockBegin* h = new BlockBegin(entry->bci());
-  BlockEnd* g = new Goto(entry, false);
-  h->set_next(g, entry->bci());
-  h->set_end(g);
-  h->set(f);
-  // setup header block end state
-  ValueStack* s = state->copy(); // can use copy since stack is empty (=> no phis)
-  assert(s->stack_is_empty(), "must have empty stack at entry point");
-  g->set_state(s);
-  return h;
-}
-
-
 BlockBegin* IRScope::build_graph(Compilation* compilation, int osr_bci) {
   GraphBuilder gm(compilation, this);
   NOT_PRODUCT(if (PrintValueNumbering && Verbose) gm.print_stats());
@@ -145,12 +127,9 @@
 IRScope::IRScope(Compilation* compilation, IRScope* caller, int caller_bci, ciMethod* method, int osr_bci, bool create_graph)
 : _callees(2)
 , _compilation(compilation)
-, _lock_stack_size(-1)
 , _requires_phi_function(method->max_locals())
 {
   _caller             = caller;
-  _caller_bci         = caller == NULL ? -1 : caller_bci;
-  _caller_state       = NULL; // Must be set later if needed
   _level              = caller == NULL ?  0 : caller->level() + 1;
   _method             = method;
   _xhandlers          = new XHandlers(method);
@@ -182,32 +161,6 @@
 }
 
 
-void IRScope::compute_lock_stack_size() {
-  if (!InlineMethodsWithExceptionHandlers) {
-    _lock_stack_size = 0;
-    return;
-  }
-
-  // Figure out whether we have to preserve expression stack elements
-  // for parent scopes, and if so, how many
-  IRScope* cur_scope = this;
-  while (cur_scope != NULL && !cur_scope->xhandlers()->has_handlers()) {
-    cur_scope = cur_scope->caller();
-  }
-  _lock_stack_size = (cur_scope == NULL ? 0 :
-                      (cur_scope->caller_state() == NULL ? 0 :
-                       cur_scope->caller_state()->stack_size()));
-}
-
-int IRScope::top_scope_bci() const {
-  assert(!is_top_scope(), "no correct answer for top scope possible");
-  const IRScope* scope = this;
-  while (!scope->caller()->is_top_scope()) {
-    scope = scope->caller();
-  }
-  return scope->caller_bci();
-}
-
 bool IRScopeDebugInfo::should_reexecute() {
   ciMethod* cur_method = scope()->method();
   int       cur_bci    = bci();
@@ -222,37 +175,24 @@
 // Implementation of CodeEmitInfo
 
 // Stack must be NON-null
-CodeEmitInfo::CodeEmitInfo(int bci, ValueStack* stack, XHandlers* exception_handlers)
+CodeEmitInfo::CodeEmitInfo(ValueStack* stack, XHandlers* exception_handlers)
   : _scope(stack->scope())
-  , _bci(bci)
   , _scope_debug_info(NULL)
   , _oop_map(NULL)
   , _stack(stack)
   , _exception_handlers(exception_handlers)
-  , _next(NULL)
-  , _id(-1)
   , _is_method_handle_invoke(false) {
   assert(_stack != NULL, "must be non null");
-  assert(_bci == SynchronizationEntryBCI || Bytecodes::is_defined(scope()->method()->java_code_at_bci(_bci)), "make sure bci points at a real bytecode");
 }
 
 
-CodeEmitInfo::CodeEmitInfo(CodeEmitInfo* info, bool lock_stack_only)
+CodeEmitInfo::CodeEmitInfo(CodeEmitInfo* info, ValueStack* stack)
   : _scope(info->_scope)
   , _exception_handlers(NULL)
-  , _bci(info->_bci)
   , _scope_debug_info(NULL)
   , _oop_map(NULL)
+  , _stack(stack == NULL ? info->_stack : stack)
   , _is_method_handle_invoke(info->_is_method_handle_invoke) {
-  if (lock_stack_only) {
-    if (info->_stack != NULL) {
-      _stack = info->_stack->copy_locks();
-    } else {
-      _stack = NULL;
-    }
-  } else {
-    _stack = info->_stack;
-  }
 
   // deep copy of exception handlers
   if (info->_exception_handlers != NULL) {
@@ -273,8 +213,6 @@
   assert(_oop_map != NULL, "oop map must already exist");
   assert(opr->is_single_cpu(), "should not call otherwise");
 
-  int frame_size = frame_map()->framesize();
-  int arg_count = frame_map()->oop_map_arg_count();
   VMReg name = frame_map()->regname(opr);
   _oop_map->set_oop(name);
 }
@@ -383,8 +321,7 @@
   void visit(Value* n) {
     // Local instructions and Phis for expression stack values at the
     // start of basic blocks are not added to the instruction list
-    if ((*n)->bci() == -99 && (*n)->as_Local() == NULL &&
-        (*n)->as_Phi() == NULL) {
+    if (!(*n)->is_linked()&& (*n)->can_be_linked()) {
       assert(false, "a node was not appended to the graph");
       Compilation::current()->bailout("a node was not appended to the graph");
     }
@@ -1338,7 +1275,7 @@
     // need to remove this instruction from the instruction stream
     if (n->subst() != n) {
       assert(last != NULL, "must have last");
-      last->set_next(n->next(), n->next()->bci());
+      last->set_next(n->next());
     } else {
       last = n;
     }
--- a/src/share/vm/c1/c1_IR.hpp	Wed Sep 22 23:51:03 2010 -0700
+++ b/src/share/vm/c1/c1_IR.hpp	Tue Dec 29 19:08:54 2009 +0100
@@ -132,8 +132,6 @@
   // hierarchy
   Compilation*  _compilation;                    // the current compilation
   IRScope*      _caller;                         // the caller scope, or NULL
-  int           _caller_bci;                     // the caller bci of the corresponding (inlined) invoke, or < 0
-  ValueStack*   _caller_state;                   // the caller state, or NULL
   int           _level;                          // the inlining level
   ciMethod*     _method;                         // the corresponding method
   IRScopeList   _callees;                        // the inlined method scopes
@@ -144,15 +142,9 @@
   bool          _monitor_pairing_ok;             // the monitor pairing info
   BlockBegin*   _start;                          // the start block, successsors are method entries
 
-  // lock stack management
-  int           _lock_stack_size;                // number of expression stack elements which, if present,
-                                                 // must be spilled to the stack because of exception
-                                                 // handling inside inlined methods
-
   BitMap        _requires_phi_function;          // bit is set if phi functions at loop headers are necessary for a local variable
 
   // helper functions
-  BlockBegin* header_block(BlockBegin* entry, BlockBegin::Flag f, ValueStack* state);
   BlockBegin* build_graph(Compilation* compilation, int osr_bci);
 
  public:
@@ -162,33 +154,16 @@
   // accessors
   Compilation*  compilation() const              { return _compilation; }
   IRScope*      caller() const                   { return _caller; }
-  int           caller_bci() const               { return _caller_bci; }
-  ValueStack*   caller_state() const             { return _caller_state; }
   int           level() const                    { return _level; }
   ciMethod*     method() const                   { return _method; }
   int           max_stack() const;               // NOTE: expensive
-  int           lock_stack_size() const          {
-    assert(_lock_stack_size != -1, "uninitialized");
-    return _lock_stack_size;
-  }
   BitMap&       requires_phi_function()          { return _requires_phi_function; }
 
-  // mutators
-  // Needed because caller state is not ready at time of IRScope construction
-  void          set_caller_state(ValueStack* state) { _caller_state = state; }
-  // Needed because caller state changes after IRScope construction.
-  // Computes number of expression stack elements whose state must be
-  // preserved in the case of an exception; these may be seen by
-  // caller scopes. Zero when inlining of methods containing exception
-  // handlers is disabled, otherwise a conservative approximation.
-  void          compute_lock_stack_size();
-
   // hierarchy
   bool          is_top_scope() const             { return _caller == NULL; }
   void          add_callee(IRScope* callee)      { _callees.append(callee); }
   int           number_of_callees() const        { return _callees.length(); }
   IRScope*      callee_no(int i) const           { return _callees.at(i); }
-  int           top_scope_bci() const;
 
   // accessors, graph
   bool          is_valid() const                 { return start() != NULL; }
@@ -266,9 +241,6 @@
   XHandlers*        _exception_handlers;
   OopMap*           _oop_map;
   ValueStack*       _stack;                      // used by deoptimization (contains also monitors
-  int               _bci;
-  CodeEmitInfo*     _next;
-  int               _id;
   bool              _is_method_handle_invoke;    // true if the associated call site is a MethodHandle call site.
 
   FrameMap*     frame_map() const                { return scope()->compilation()->frame_map(); }
@@ -277,23 +249,10 @@
  public:
 
   // use scope from ValueStack
-  CodeEmitInfo(int bci, ValueStack* stack, XHandlers* exception_handlers);
-
-  // used by natives
-  CodeEmitInfo(IRScope* scope, int bci)
-    : _scope(scope)
-    , _bci(bci)
-    , _oop_map(NULL)
-    , _scope_debug_info(NULL)
-    , _stack(NULL)
-    , _exception_handlers(NULL)
-    , _next(NULL)
-    , _id(-1)
-    , _is_method_handle_invoke(false) {
-  }
+  CodeEmitInfo(ValueStack* stack, XHandlers* exception_handlers);
 
   // make a copy
-  CodeEmitInfo(CodeEmitInfo* info, bool lock_stack_only = false);
+  CodeEmitInfo(CodeEmitInfo* info, ValueStack* stack = NULL);
 
   // accessors
   OopMap* oop_map()                              { return _oop_map; }
@@ -301,17 +260,10 @@
   IRScope* scope() const                         { return _scope; }
   XHandlers* exception_handlers() const          { return _exception_handlers; }
   ValueStack* stack() const                      { return _stack; }
-  int bci() const                                { return _bci; }
 
   void add_register_oop(LIR_Opr opr);
   void record_debug_info(DebugInformationRecorder* recorder, int pc_offset);
 
-  CodeEmitInfo* next() const        { return _next; }
-  void set_next(CodeEmitInfo* next) { _next = next; }
-
-  int id() const      { return _id; }
-  void set_id(int id) { _id = id; }
-
   bool     is_method_handle_invoke() const { return _is_method_handle_invoke;     }
   void set_is_method_handle_invoke(bool x) {        _is_method_handle_invoke = x; }
 };
--- a/src/share/vm/c1/c1_Instruction.cpp	Wed Sep 22 23:51:03 2010 -0700
+++ b/src/share/vm/c1/c1_Instruction.cpp	Tue Dec 29 19:08:54 2009 +0100
@@ -29,13 +29,6 @@
 // Implementation of Instruction
 
 
-#ifdef ASSERT
-void Instruction::create_hi_word() {
-  assert(type()->is_double_word() && _hi_word == NULL, "only double word has high word");
-  _hi_word = new HiWord(this);
-}
-#endif
-
 Instruction::Condition Instruction::mirror(Condition cond) {
   switch (cond) {
     case eql: return eql;
@@ -63,6 +56,15 @@
   return eql;
 }
 
+void Instruction::update_exception_state(ValueStack* state) {
+  if (state != NULL && (state->kind() == ValueStack::EmptyExceptionState || state->kind() == ValueStack::ExceptionState)) {
+    assert(state->kind() == ValueStack::EmptyExceptionState || Compilation::current()->env()->jvmti_can_access_local_variables(), "unexpected state kind");
+    _exception_state = state;
+  } else {
+    _exception_state = NULL;
+  }
+}
+
 
 Instruction* Instruction::prev(BlockBegin* block) {
   Instruction* p = NULL;
@@ -75,7 +77,24 @@
 }
 
 
+void Instruction::state_values_do(ValueVisitor* f) {
+  if (state_before() != NULL) {
+    state_before()->values_do(f);
+  }
+  if (exception_state() != NULL){
+    exception_state()->values_do(f);
+  }
+}
+
+
 #ifndef PRODUCT
+void Instruction::check_state(ValueStack* state) {
+  if (state != NULL) {
+    state->verify();
+  }
+}
+
+
 void Instruction::print() {
   InstructionPrinter ip;
   print(ip);
@@ -190,35 +209,6 @@
   return NULL;
 }
 
-
-void ArithmeticOp::other_values_do(ValueVisitor* f) {
-  if (lock_stack() != NULL) lock_stack()->values_do(f);
-}
-
-void NullCheck::other_values_do(ValueVisitor* f) {
-  lock_stack()->values_do(f);
-}
-
-void AccessArray::other_values_do(ValueVisitor* f) {
-  if (lock_stack() != NULL) lock_stack()->values_do(f);
-}
-
-
-// Implementation of AccessField
-
-void AccessField::other_values_do(ValueVisitor* f) {
-  if (state_before() != NULL) state_before()->values_do(f);
-  if (lock_stack() != NULL) lock_stack()->values_do(f);
-}
-
-
-// Implementation of StoreIndexed
-
-IRScope* StoreIndexed::scope() const {
-  return lock_stack()->scope();
-}
-
-
 // Implementation of ArithmeticOp
 
 bool ArithmeticOp::is_commutative() const {
@@ -266,13 +256,6 @@
 }
 
 
-// Implementation of CompareOp
-
-void CompareOp::other_values_do(ValueVisitor* f) {
-  if (state_before() != NULL) state_before()->values_do(f);
-}
-
-
 // Implementation of IfOp
 
 bool IfOp::is_commutative() const {
@@ -301,6 +284,7 @@
 
 
 void StateSplit::state_values_do(ValueVisitor* f) {
+  Instruction::state_values_do(f);
   if (state() != NULL) state()->values_do(f);
 }
 
@@ -316,30 +300,17 @@
 }
 
 
-void MonitorEnter::state_values_do(ValueVisitor* f) {
-  StateSplit::state_values_do(f);
-  _lock_stack_before->values_do(f);
-}
-
-
-void Intrinsic::state_values_do(ValueVisitor* f) {
-  StateSplit::state_values_do(f);
-  if (lock_stack() != NULL) lock_stack()->values_do(f);
-}
-
-
 // Implementation of Invoke
 
 
 Invoke::Invoke(Bytecodes::Code code, ValueType* result_type, Value recv, Values* args,
                int vtable_index, ciMethod* target, ValueStack* state_before)
-  : StateSplit(result_type)
+  : StateSplit(result_type, state_before)
   , _code(code)
   , _recv(recv)
   , _args(args)
   , _vtable_index(vtable_index)
   , _target(target)
-  , _state_before(state_before)
 {
   set_flag(TargetIsLoadedFlag,   target->is_loaded());
   set_flag(TargetIsFinalFlag,    target_is_loaded() && target->is_final_method());
@@ -376,7 +347,7 @@
 
 // Implementation of Contant
 intx Constant::hash() const {
-  if (_state == NULL) {
+  if (state_before() == NULL) {
     switch (type()->tag()) {
     case intTag:
       return HASH2(name(), type()->as_IntConstant()->value());
@@ -499,25 +470,6 @@
 }
 
 
-void Constant::other_values_do(ValueVisitor* f) {
-  if (state() != NULL) state()->values_do(f);
-}
-
-
-// Implementation of NewArray
-
-void NewArray::other_values_do(ValueVisitor* f) {
-  if (state_before() != NULL) state_before()->values_do(f);
-}
-
-
-// Implementation of TypeCheck
-
-void TypeCheck::other_values_do(ValueVisitor* f) {
-  if (state_before() != NULL) state_before()->values_do(f);
-}
-
-
 // Implementation of BlockBegin
 
 void BlockBegin::set_end(BlockEnd* end) {
@@ -604,23 +556,14 @@
 // of the inserted block, without recomputing the values of the other blocks
 // in the CFG. Therefore the value of "depth_first_number" in BlockBegin becomes meaningless.
 BlockBegin* BlockBegin::insert_block_between(BlockBegin* sux) {
-  // Try to make the bci close to a block with a single pred or sux,
-  // since this make the block layout algorithm work better.
-  int bci = -1;
-  if (sux->number_of_preds() == 1) {
-    bci = sux->bci();
-  } else {
-    bci = end()->bci();
-  }
-
-  BlockBegin* new_sux = new BlockBegin(bci);
+  BlockBegin* new_sux = new BlockBegin(-99);
 
   // mark this block (special treatment when block order is computed)
   new_sux->set(critical_edge_split_flag);
 
   // This goto is not a safepoint.
   Goto* e = new Goto(sux, false);
-  new_sux->set_next(e, bci);
+  new_sux->set_next(e, end()->state()->bci());
   new_sux->set_end(e);
   // setup states
   ValueStack* s = end()->state();
@@ -763,7 +706,7 @@
     }
 
     // copy state because it is altered
-    new_state = new_state->copy();
+    new_state = new_state->copy(ValueStack::BlockBeginState, bci());
 
     // Use method liveness to invalidate dead locals
     MethodLivenessResult liveness = new_state->scope()->method()->liveness_at_bci(bci());
@@ -800,19 +743,9 @@
     // initialize state of block
     set_state(new_state);
 
-  } else if (existing_state->is_same_across_scopes(new_state)) {
+  } else if (existing_state->is_same(new_state)) {
     TRACE_PHI(tty->print_cr("exisiting state found"));
 
-    // Inlining may cause the local state not to match up, so walk up
-    // the new state until we get to the same scope as the
-    // existing and then start processing from there.
-    while (existing_state->scope() != new_state->scope()) {
-      new_state = new_state->caller_state();
-      assert(new_state != NULL, "could not match up scopes");
-
-      assert(false, "check if this is necessary");
-    }
-
     assert(existing_state->scope() == new_state->scope(), "not matching");
     assert(existing_state->locals_size() == new_state->locals_size(), "not matching");
     assert(existing_state->stack_size() == new_state->stack_size(), "not matching");
@@ -969,11 +902,6 @@
 }
 
 
-void BlockEnd::other_values_do(ValueVisitor* f) {
-  if (state_before() != NULL) state_before()->values_do(f);
-}
-
-
 // Implementation of Phi
 
 // Normal phi functions take their operands from the last instruction of the
@@ -1006,11 +934,6 @@
 }
 
 
-// Implementation of Throw
-
-void Throw::state_values_do(ValueVisitor* f) {
-  BlockEnd::state_values_do(f);
-}
 
 void ProfileInvoke::state_values_do(ValueVisitor* f) {
   if (state() != NULL) state()->values_do(f);
--- a/src/share/vm/c1/c1_Instruction.hpp	Wed Sep 22 23:51:03 2010 -0700
+++ b/src/share/vm/c1/c1_Instruction.hpp	Tue Dec 29 19:08:54 2009 +0100
@@ -38,7 +38,6 @@
 // serve factoring.
 
 class Instruction;
-class   HiWord;
 class   Phi;
 class   Local;
 class   Constant;
@@ -149,7 +148,6 @@
 
 class InstructionVisitor: public StackObj {
  public:
-          void do_HiWord         (HiWord*          x) { ShouldNotReachHere(); }
   virtual void do_Phi            (Phi*             x) = 0;
   virtual void do_Local          (Local*           x) = 0;
   virtual void do_Constant       (Constant*        x) = 0;
@@ -272,7 +270,9 @@
 class Instruction: public CompilationResourceObj {
  private:
   int          _id;                              // the unique instruction id
-  int          _bci;                             // the instruction bci
+#ifndef PRODUCT
+  int          _printable_bci;                   // the bci of the instruction for printing
+#endif
   int          _use_count;                       // the number of instructions refering to this value (w/o prev/next); only roots can have use count = 0 or > 1
   int          _pin_state;                       // set of PinReason describing the reason for pinning
   ValueType*   _type;                            // the instruction value type
@@ -281,17 +281,18 @@
   LIR_Opr      _operand;                         // LIR specific information
   unsigned int _flags;                           // Flag bits
 
+  ValueStack*  _state_before;                    // Copy of state with input operands still on stack (or NULL)
+  ValueStack*  _exception_state;                 // Copy of state for exception handling
   XHandlers*   _exception_handlers;              // Flat list of exception handlers covering this instruction
 
-#ifdef ASSERT
-  HiWord*      _hi_word;
-#endif
-
   friend class UseCountComputer;
   friend class BlockBegin;
 
+  void update_exception_state(ValueStack* state);
+
+  bool has_printable_bci() const                 { return NOT_PRODUCT(_printable_bci != -99) PRODUCT_ONLY(false); }
+
  protected:
-  void set_bci(int bci)                          { assert(bci == SynchronizationEntryBCI || bci >= 0, "illegal bci"); _bci = bci; }
   void set_type(ValueType* type) {
     assert(type != NULL, "type must exist");
     _type = type;
@@ -325,6 +326,7 @@
     NeedsPatchingFlag,
     ThrowIncompatibleClassChangeErrorFlag,
     ProfileMDOFlag,
+    IsLinkedInBlockFlag,
     InstructionLastFlag
   };
 
@@ -356,31 +358,31 @@
   }
 
   // creation
-  Instruction(ValueType* type, bool type_is_constant = false, bool create_hi = true)
-  : _bci(-99)
-  , _use_count(0)
+  Instruction(ValueType* type, ValueStack* state_before = NULL, bool type_is_constant = false, bool create_hi = true)
+  : _use_count(0)
+#ifndef PRODUCT
+  , _printable_bci(-99)
+#endif
   , _pin_state(0)
   , _type(type)
   , _next(NULL)
   , _subst(NULL)
   , _flags(0)
   , _operand(LIR_OprFact::illegalOpr)
+  , _state_before(state_before)
   , _exception_handlers(NULL)
-#ifdef ASSERT
-  , _hi_word(NULL)
-#endif
   {
+    check_state(state_before);
     assert(type != NULL && (!type->is_constant() || type_is_constant), "type must exist");
-#ifdef ASSERT
-    if (create_hi && type->is_double_word()) {
-      create_hi_word();
-    }
-#endif
+    update_exception_state(_state_before);
   }
 
   // accessors
   int id() const                                 { return _id; }
-  int bci() const                                { return _bci; }
+#ifndef PRODUCT
+  int printable_bci() const                      { assert(has_printable_bci(), "_printable_bci should have been set"); return _printable_bci; }
+  void set_printable_bci(int bci)                { NOT_PRODUCT(_printable_bci = bci;) }
+#endif
   int use_count() const                          { return _use_count; }
   int pin_state() const                          { return _pin_state; }
   bool is_pinned() const                         { return _pin_state != 0 || PinAllInstructions; }
@@ -393,9 +395,13 @@
 
   void set_needs_null_check(bool f)              { set_flag(NeedsNullCheckFlag, f); }
   bool needs_null_check() const                  { return check_flag(NeedsNullCheckFlag); }
+  bool is_linked() const                         { return check_flag(IsLinkedInBlockFlag); }
+  bool can_be_linked()                           { return as_Local() == NULL && as_Phi() == NULL; }
 
   bool has_uses() const                          { return use_count() > 0; }
-  bool is_root() const                           { return is_pinned() || use_count() > 1; }
+  ValueStack* state_before() const               { return _state_before; }
+  ValueStack* exception_state() const            { return _exception_state; }
+  virtual bool needs_exception_state() const     { return true; }
   XHandlers* exception_handlers() const          { return _exception_handlers; }
 
   // manipulation
@@ -403,17 +409,23 @@
   void pin()                                     { _pin_state |= PinUnknown; }
   // DANGEROUS: only used by EliminateStores
   void unpin(PinReason reason)                   { assert((reason & PinUnknown) == 0, "can't unpin unknown state"); _pin_state &= ~reason; }
-  virtual void set_lock_stack(ValueStack* l)     { /* do nothing*/ }
-  virtual ValueStack* lock_stack() const         { return NULL; }
+
+  Instruction* set_next(Instruction* next) {
+    assert(next->has_printable_bci(), "_printable_bci should have been set");
+    assert(next != NULL, "must not be NULL");
+    assert(as_BlockEnd() == NULL, "BlockEnd instructions must have no next");
+    assert(next->can_be_linked(), "shouldn't link these instructions into list");
+
+    next->set_flag(Instruction::IsLinkedInBlockFlag, true);
+    _next = next;
+    return next;
+  }
 
   Instruction* set_next(Instruction* next, int bci) {
-    if (next != NULL) {
-      assert(as_BlockEnd() == NULL, "BlockEnd instructions must have no next");
-      assert(next->as_Phi() == NULL && next->as_Local() == NULL, "shouldn't link these instructions into list");
-      next->set_bci(bci);
-    }
-    _next = next;
-    return next;
+#ifndef PRODUCT
+    next->set_printable_bci(bci);
+#endif
+    return set_next(next);
   }
 
   void set_subst(Instruction* subst)             {
@@ -423,14 +435,7 @@
     _subst = subst;
   }
   void set_exception_handlers(XHandlers *xhandlers) { _exception_handlers = xhandlers; }
-
-#ifdef ASSERT
-  // HiWord is used for debugging and is allocated early to avoid
-  // allocation at inconvenient points
-  HiWord* hi_word() { return _hi_word; }
-  void create_hi_word();
-#endif
-
+  void set_exception_state(ValueStack* s)        { check_state(s); _exception_state = s; }
 
   // machine-specifics
   void set_operand(LIR_Opr operand)              { assert(operand != LIR_OprFact::illegalOpr, "operand must exist"); _operand = operand; }
@@ -438,7 +443,6 @@
 
   // generic
   virtual Instruction*      as_Instruction()     { return this; } // to satisfy HASHING1 macro
-  virtual HiWord*           as_HiWord()          { return NULL; }
   virtual Phi*           as_Phi()          { return NULL; }
   virtual Local*            as_Local()           { return NULL; }
   virtual Constant*         as_Constant()        { return NULL; }
@@ -493,7 +497,7 @@
   virtual bool can_trap() const                  { return false; }
 
   virtual void input_values_do(ValueVisitor* f)   = 0;
-  virtual void state_values_do(ValueVisitor* f)   { /* usually no state - override on demand */ }
+  virtual void state_values_do(ValueVisitor* f);
   virtual void other_values_do(ValueVisitor* f)   { /* usually no other - override on demand */ }
           void       values_do(ValueVisitor* f)   { input_values_do(f); state_values_do(f); other_values_do(f); }
 
@@ -505,6 +509,7 @@
   HASHING1(Instruction, false, id())             // hashing disabled by default
 
   // debugging
+  static void check_state(ValueStack* state)     PRODUCT_RETURN;
   void print()                                   PRODUCT_RETURN;
   void print_line()                              PRODUCT_RETURN;
   void print(InstructionPrinter& ip)             PRODUCT_RETURN;
@@ -541,40 +546,6 @@
 #endif // ASSERT
 
 
-// A HiWord occupies the 'high word' of a 2-word
-// expression stack entry. Hi & lo words must be
-// paired on the expression stack (otherwise the
-// bytecode sequence is illegal). Note that 'hi'
-// refers to the IR expression stack format and
-// does *not* imply a machine word ordering. No
-// HiWords are used in optimized mode for speed,
-// but NULL pointers are used instead.
-
-LEAF(HiWord, Instruction)
- private:
-  Value _lo_word;
-
- public:
-  // creation
-  HiWord(Value lo_word)
-    : Instruction(illegalType, false, false),
-      _lo_word(lo_word) {
-    // hi-words are also allowed for illegal lo-words
-    assert(lo_word->type()->is_double_word() || lo_word->type()->is_illegal(),
-           "HiWord must be used for 2-word values only");
-  }
-
-  // accessors
-  Value lo_word() const                          { return _lo_word->subst(); }
-
-  // for invalidating of HiWords
-  void make_illegal()                            { set_type(illegalType); }
-
-  // generic
-  virtual void input_values_do(ValueVisitor* f)   { ShouldNotReachHere(); }
-};
-
-
 // A Phi is a phi function in the sense of SSA form. It stands for
 // the value of a local variable at the beginning of a join block.
 // A Phi consists of n operands, one for every incoming branch.
@@ -656,31 +627,25 @@
 
 
 LEAF(Constant, Instruction)
-  ValueStack* _state;
-
  public:
   // creation
   Constant(ValueType* type):
-      Instruction(type, true)
-  , _state(NULL) {
+      Instruction(type, NULL, true)
+  {
     assert(type->is_constant(), "must be a constant");
   }
 
-  Constant(ValueType* type, ValueStack* state):
-    Instruction(type, true)
-  , _state(state) {
-    assert(state != NULL, "only used for constants which need patching");
+  Constant(ValueType* type, ValueStack* state_before):
+    Instruction(type, state_before, true)
+  {
+    assert(state_before != NULL, "only used for constants which need patching");
     assert(type->is_constant(), "must be a constant");
     // since it's patching it needs to be pinned
     pin();
   }
 
-  ValueStack* state() const               { return _state; }
-
-  // generic
-  virtual bool can_trap() const                  { return state() != NULL; }
+  virtual bool can_trap() const                  { return state_before() != NULL; }
   virtual void input_values_do(ValueVisitor* f)   { /* no values */ }
-  virtual void other_values_do(ValueVisitor* f);
 
   virtual intx hash() const;
   virtual bool is_equal(Value v) const;
@@ -695,20 +660,16 @@
   Value       _obj;
   int         _offset;
   ciField*    _field;
-  ValueStack* _state_before;                     // state is set only for unloaded or uninitialized fields
-  ValueStack* _lock_stack;                       // contains lock and scope information
   NullCheck*  _explicit_null_check;              // For explicit null check elimination
 
  public:
   // creation
-  AccessField(Value obj, int offset, ciField* field, bool is_static, ValueStack* lock_stack,
+  AccessField(Value obj, int offset, ciField* field, bool is_static,
               ValueStack* state_before, bool is_loaded, bool is_initialized)
-  : Instruction(as_ValueType(field->type()->basic_type()))
+  : Instruction(as_ValueType(field->type()->basic_type()), state_before)
   , _obj(obj)
   , _offset(offset)
   , _field(field)
-  , _lock_stack(lock_stack)
-  , _state_before(state_before)
   , _explicit_null_check(NULL)
   {
     set_needs_null_check(!is_static);
@@ -734,13 +695,11 @@
   bool is_static() const                         { return check_flag(IsStaticFlag); }
   bool is_loaded() const                         { return check_flag(IsLoadedFlag); }
   bool is_initialized() const                    { return check_flag(IsInitializedFlag); }
-  ValueStack* state_before() const               { return _state_before; }
-  ValueStack* lock_stack() const                 { return _lock_stack; }
   NullCheck* explicit_null_check() const         { return _explicit_null_check; }
   bool needs_patching() const                    { return check_flag(NeedsPatchingFlag); }
 
   // manipulation
-  void set_lock_stack(ValueStack* l)             { _lock_stack = l; }
+
   // Under certain circumstances, if a previous NullCheck instruction
   // proved the target object non-null, we can eliminate the explicit
   // null check and do an implicit one, simply specifying the debug
@@ -751,16 +710,15 @@
   // generic
   virtual bool can_trap() const                  { return needs_null_check() || needs_patching(); }
   virtual void input_values_do(ValueVisitor* f)   { f->visit(&_obj); }
-  virtual void other_values_do(ValueVisitor* f);
 };
 
 
 LEAF(LoadField, AccessField)
  public:
   // creation
-  LoadField(Value obj, int offset, ciField* field, bool is_static, ValueStack* lock_stack,
+  LoadField(Value obj, int offset, ciField* field, bool is_static,
             ValueStack* state_before, bool is_loaded, bool is_initialized)
-  : AccessField(obj, offset, field, is_static, lock_stack, state_before, is_loaded, is_initialized)
+  : AccessField(obj, offset, field, is_static, state_before, is_loaded, is_initialized)
   {}
 
   ciType* declared_type() const;
@@ -777,9 +735,9 @@
 
  public:
   // creation
-  StoreField(Value obj, int offset, ciField* field, Value value, bool is_static, ValueStack* lock_stack,
+  StoreField(Value obj, int offset, ciField* field, Value value, bool is_static,
              ValueStack* state_before, bool is_loaded, bool is_initialized)
-  : AccessField(obj, offset, field, is_static, lock_stack, state_before, is_loaded, is_initialized)
+  : AccessField(obj, offset, field, is_static, state_before, is_loaded, is_initialized)
   , _value(value)
   {
     set_flag(NeedsWriteBarrierFlag, as_ValueType(field_type())->is_object());
@@ -799,29 +757,23 @@
 BASE(AccessArray, Instruction)
  private:
   Value       _array;
-  ValueStack* _lock_stack;
 
  public:
   // creation
-  AccessArray(ValueType* type, Value array, ValueStack* lock_stack)
-  : Instruction(type)
+  AccessArray(ValueType* type, Value array, ValueStack* state_before)
+  : Instruction(type, state_before)
   , _array(array)
-  , _lock_stack(lock_stack) {
+  {
     set_needs_null_check(true);
     ASSERT_VALUES
     pin(); // instruction with side effect (null exception or range check throwing)
   }
 
   Value array() const                            { return _array; }
-  ValueStack* lock_stack() const                 { return _lock_stack; }
-
-  // setters
-  void set_lock_stack(ValueStack* l)             { _lock_stack = l; }
 
   // generic
   virtual bool can_trap() const                  { return needs_null_check(); }
   virtual void input_values_do(ValueVisitor* f)   { f->visit(&_array); }
-  virtual void other_values_do(ValueVisitor* f);
 };
 
 
@@ -831,8 +783,8 @@
 
  public:
   // creation
-  ArrayLength(Value array, ValueStack* lock_stack)
-  : AccessArray(intType, array, lock_stack)
+  ArrayLength(Value array, ValueStack* state_before)
+  : AccessArray(intType, array, state_before)
   , _explicit_null_check(NULL) {}
 
   // accessors
@@ -855,8 +807,8 @@
 
  public:
   // creation
-  AccessIndexed(Value array, Value index, Value length, BasicType elt_type, ValueStack* lock_stack)
-  : AccessArray(as_ValueType(elt_type), array, lock_stack)
+  AccessIndexed(Value array, Value index, Value length, BasicType elt_type, ValueStack* state_before)
+  : AccessArray(as_ValueType(elt_type), array, state_before)
   , _index(index)
   , _length(length)
   , _elt_type(elt_type)
@@ -883,8 +835,8 @@
 
  public:
   // creation
-  LoadIndexed(Value array, Value index, Value length, BasicType elt_type, ValueStack* lock_stack)
-  : AccessIndexed(array, index, length, elt_type, lock_stack)
+  LoadIndexed(Value array, Value index, Value length, BasicType elt_type, ValueStack* state_before)
+  : AccessIndexed(array, index, length, elt_type, state_before)
   , _explicit_null_check(NULL) {}
 
   // accessors
@@ -910,8 +862,8 @@
   int       _profiled_bci;
  public:
   // creation
-  StoreIndexed(Value array, Value index, Value length, BasicType elt_type, Value value, ValueStack* lock_stack)
-  : AccessIndexed(array, index, length, elt_type, lock_stack)
+  StoreIndexed(Value array, Value index, Value length, BasicType elt_type, Value value, ValueStack* state_before)
+  : AccessIndexed(array, index, length, elt_type, state_before)
   , _value(value), _profiled_method(NULL), _profiled_bci(0)
   {
     set_flag(NeedsWriteBarrierFlag, (as_ValueType(elt_type)->is_object()));
@@ -922,7 +874,6 @@
 
   // accessors
   Value value() const                            { return _value; }
-  IRScope* scope() const;                        // the state's scope
   bool needs_write_barrier() const               { return check_flag(NeedsWriteBarrierFlag); }
   bool needs_store_check() const                 { return check_flag(NeedsStoreCheckFlag); }
   // Helpers for methodDataOop profiling
@@ -963,7 +914,12 @@
 
  public:
   // creation
-  Op2(ValueType* type, Bytecodes::Code op, Value x, Value y) : Instruction(type), _op(op), _x(x), _y(y) {
+  Op2(ValueType* type, Bytecodes::Code op, Value x, Value y, ValueStack* state_before = NULL)
+  : Instruction(type, state_before)
+  , _op(op)
+  , _x(x)
+  , _y(y)
+  {
     ASSERT_VALUES
   }
 
@@ -985,28 +941,21 @@
 
 
 LEAF(ArithmeticOp, Op2)
- private:
-  ValueStack* _lock_stack;                       // used only for division operations
  public:
   // creation
-  ArithmeticOp(Bytecodes::Code op, Value x, Value y, bool is_strictfp, ValueStack* lock_stack)
-  : Op2(x->type()->meet(y->type()), op, x, y)
-  ,  _lock_stack(lock_stack) {
+  ArithmeticOp(Bytecodes::Code op, Value x, Value y, bool is_strictfp, ValueStack* state_before)
+  : Op2(x->type()->meet(y->type()), op, x, y, state_before)
+  {
     set_flag(IsStrictfpFlag, is_strictfp);
     if (can_trap()) pin();
   }
 
   // accessors
-  ValueStack* lock_stack() const                 { return _lock_stack; }
   bool        is_strictfp() const                { return check_flag(IsStrictfpFlag); }
 
-  // setters
-  void set_lock_stack(ValueStack* l)             { _lock_stack = l; }
-
   // generic
   virtual bool is_commutative() const;
   virtual bool can_trap() const;
-  virtual void other_values_do(ValueVisitor* f);
   HASHING3(Op2, true, op(), x()->subst(), y()->subst())
 };
 
@@ -1033,21 +982,14 @@
 
 
 LEAF(CompareOp, Op2)
- private:
-  ValueStack* _state_before;                     // for deoptimization, when canonicalizing
  public:
   // creation
   CompareOp(Bytecodes::Code op, Value x, Value y, ValueStack* state_before)
-  : Op2(intType, op, x, y)
-  , _state_before(state_before)
+  : Op2(intType, op, x, y, state_before)
   {}
 
-  // accessors
-  ValueStack* state_before() const               { return _state_before; }
-
   // generic
   HASHING3(Op2, true, op(), x()->subst(), y()->subst())
-  virtual void other_values_do(ValueVisitor* f);
 };
 
 
@@ -1103,11 +1045,13 @@
 LEAF(NullCheck, Instruction)
  private:
   Value       _obj;
-  ValueStack* _lock_stack;
 
  public:
   // creation
-  NullCheck(Value obj, ValueStack* lock_stack) : Instruction(obj->type()->base()), _obj(obj), _lock_stack(lock_stack) {
+  NullCheck(Value obj, ValueStack* state_before)
+  : Instruction(obj->type()->base(), state_before)
+  , _obj(obj)
+  {
     ASSERT_VALUES
     set_can_trap(true);
     assert(_obj->type()->is_object(), "null check must be applied to objects only");
@@ -1116,16 +1060,13 @@
 
   // accessors
   Value obj() const                              { return _obj; }
-  ValueStack* lock_stack() const                 { return _lock_stack; }
 
   // setters
-  void set_lock_stack(ValueStack* l)             { _lock_stack = l; }
   void set_can_trap(bool can_trap)               { set_flag(CanTrapFlag, can_trap); }
 
   // generic
   virtual bool can_trap() const                  { return check_flag(CanTrapFlag); /* null-check elimination sets to false */ }
   virtual void input_values_do(ValueVisitor* f)   { f->visit(&_obj); }
-  virtual void other_values_do(ValueVisitor* f);
   HASHING1(NullCheck, true, obj()->subst())
 };
 
@@ -1139,7 +1080,10 @@
 
  public:
   // creation
-  StateSplit(ValueType* type) : Instruction(type), _state(NULL) {
+  StateSplit(ValueType* type, ValueStack* state_before = NULL)
+  : Instruction(type, state_before)
+  , _state(NULL)
+  {
     pin(PinStateSplitConstructor);
   }
 
@@ -1148,7 +1092,7 @@
   IRScope* scope() const;                        // the state's scope
 
   // manipulation
-  void set_state(ValueStack* state)              { _state = state; }
+  void set_state(ValueStack* state)              { assert(_state == NULL, "overwriting existing state"); check_state(state); _state = state; }
 
   // generic
   virtual void input_values_do(ValueVisitor* f)   { /* no values */ }
@@ -1164,7 +1108,6 @@
   BasicTypeList*  _signature;
   int             _vtable_index;
   ciMethod*       _target;
-  ValueStack*     _state_before;  // Required for deoptimization.
 
  public:
   // creation
@@ -1180,7 +1123,6 @@
   int vtable_index() const                       { return _vtable_index; }
   BasicTypeList* signature() const               { return _signature; }
   ciMethod* target() const                       { return _target; }
-  ValueStack* state_before() const               { return _state_before; }
 
   // Returns false if target is not loaded
   bool target_is_final() const                   { return check_flag(TargetIsFinalFlag); }
@@ -1191,6 +1133,8 @@
   // JSR 292 support
   bool is_invokedynamic() const                  { return code() == Bytecodes::_invokedynamic; }
 
+  virtual bool needs_exception_state() const     { return false; }
+
   // generic
   virtual bool can_trap() const                  { return true; }
   virtual void input_values_do(ValueVisitor* f) {
@@ -1208,11 +1152,16 @@
 
  public:
   // creation
-  NewInstance(ciInstanceKlass* klass) : StateSplit(instanceType), _klass(klass) {}
+  NewInstance(ciInstanceKlass* klass, ValueStack* state_before)
+  : StateSplit(instanceType, state_before)
+  , _klass(klass)
+  {}
 
   // accessors
   ciInstanceKlass* klass() const                 { return _klass; }
 
+  virtual bool needs_exception_state() const     { return false; }
+
   // generic
   virtual bool can_trap() const                  { return true; }
   ciType* exact_type() const;
@@ -1222,22 +1171,24 @@
 BASE(NewArray, StateSplit)
  private:
   Value       _length;
-  ValueStack* _state_before;
 
  public:
   // creation
-  NewArray(Value length, ValueStack* state_before) : StateSplit(objectType), _length(length), _state_before(state_before) {
+  NewArray(Value length, ValueStack* state_before)
+  : StateSplit(objectType, state_before)
+  , _length(length)
+  {
     // Do not ASSERT_VALUES since length is NULL for NewMultiArray
   }
 
   // accessors
-  ValueStack* state_before() const               { return _state_before; }
   Value length() const                           { return _length; }
 
+  virtual bool needs_exception_state() const     { return false; }
+
   // generic
   virtual bool can_trap() const                  { return true; }
   virtual void input_values_do(ValueVisitor* f)   { StateSplit::input_values_do(f); f->visit(&_length); }
-  virtual void other_values_do(ValueVisitor* f);
 };
 
 
@@ -1247,7 +1198,10 @@
 
  public:
   // creation
-  NewTypeArray(Value length, BasicType elt_type) : NewArray(length, NULL), _elt_type(elt_type) {}
+  NewTypeArray(Value length, BasicType elt_type, ValueStack* state_before)
+  : NewArray(length, state_before)
+  , _elt_type(elt_type)
+  {}
 
   // accessors
   BasicType elt_type() const                     { return _elt_type; }
@@ -1303,7 +1257,6 @@
  private:
   ciKlass*    _klass;
   Value       _obj;
-  ValueStack* _state_before;
 
   ciMethod* _profiled_method;
   int       _profiled_bci;
@@ -1311,14 +1264,13 @@
  public:
   // creation
   TypeCheck(ciKlass* klass, Value obj, ValueType* type, ValueStack* state_before)
-  : StateSplit(type), _klass(klass), _obj(obj), _state_before(state_before),
+  : StateSplit(type, state_before), _klass(klass), _obj(obj),
     _profiled_method(NULL), _profiled_bci(0) {
     ASSERT_VALUES
     set_direct_compare(false);
   }
 
   // accessors
-  ValueStack* state_before() const               { return _state_before; }
   ciKlass* klass() const                         { return _klass; }
   Value obj() const                              { return _obj; }
   bool is_loaded() const                         { return klass() != NULL; }
@@ -1330,7 +1282,6 @@
   // generic
   virtual bool can_trap() const                  { return true; }
   virtual void input_values_do(ValueVisitor* f)   { StateSplit::input_values_do(f); f->visit(&_obj); }
-  virtual void other_values_do(ValueVisitor* f);
 
   // Helpers for methodDataOop profiling
   void set_should_profile(bool value)                { set_flag(ProfileMDOFlag, value); }
@@ -1364,6 +1315,8 @@
  public:
   // creation
   InstanceOf(ciKlass* klass, Value obj, ValueStack* state_before) : TypeCheck(klass, obj, intType, state_before) {}
+
+  virtual bool needs_exception_state() const     { return false; }
 };
 
 
@@ -1374,8 +1327,8 @@
 
  public:
   // creation
-  AccessMonitor(Value obj, int monitor_no)
-  : StateSplit(illegalType)
+  AccessMonitor(Value obj, int monitor_no, ValueStack* state_before = NULL)
+  : StateSplit(illegalType, state_before)
   , _obj(obj)
   , _monitor_no(monitor_no)
   {
@@ -1393,22 +1346,14 @@
 
 
 LEAF(MonitorEnter, AccessMonitor)
- private:
-  ValueStack* _lock_stack_before;
-
  public:
   // creation
-  MonitorEnter(Value obj, int monitor_no, ValueStack* lock_stack_before)
-  : AccessMonitor(obj, monitor_no)
-  , _lock_stack_before(lock_stack_before)
+  MonitorEnter(Value obj, int monitor_no, ValueStack* state_before)
+  : AccessMonitor(obj, monitor_no, state_before)
   {
     ASSERT_VALUES
   }
 
-  // accessors
-  ValueStack* lock_stack_before() const          { return _lock_stack_before; }
-  virtual void state_values_do(ValueVisitor* f);
-
   // generic
   virtual bool can_trap() const                  { return true; }
 };
@@ -1417,7 +1362,11 @@
 LEAF(MonitorExit, AccessMonitor)
  public:
   // creation
-  MonitorExit(Value obj, int monitor_no) : AccessMonitor(obj, monitor_no) {}
+  MonitorExit(Value obj, int monitor_no)
+  : AccessMonitor(obj, monitor_no, NULL)
+  {
+    ASSERT_VALUES
+  }
 };
 
 
@@ -1425,7 +1374,6 @@
  private:
   vmIntrinsics::ID _id;
   Values*          _args;
-  ValueStack*      _lock_stack;
   Value            _recv;
 
  public:
@@ -1440,13 +1388,12 @@
             vmIntrinsics::ID id,
             Values* args,
             bool has_receiver,
-            ValueStack* lock_stack,
+            ValueStack* state_before,
             bool preserves_state,
             bool cantrap = true)
-  : StateSplit(type)
+  : StateSplit(type, state_before)
   , _id(id)
   , _args(args)
-  , _lock_stack(lock_stack)
   , _recv(NULL)
   {
     assert(args != NULL, "args must exist");
@@ -1468,7 +1415,6 @@
   vmIntrinsics::ID id() const                    { return _id; }
   int number_of_arguments() const                { return _args->length(); }
   Value argument_at(int i) const                 { return _args->at(i); }
-  ValueStack* lock_stack() const                 { return _lock_stack; }
 
   bool has_receiver() const                      { return (_recv != NULL); }
   Value receiver() const                         { assert(has_receiver(), "must have receiver"); return _recv; }
@@ -1480,8 +1426,6 @@
     StateSplit::input_values_do(f);
     for (int i = 0; i < _args->length(); i++) f->visit(_args->adr_at(i));
   }
-  virtual void state_values_do(ValueVisitor* f);
-
 };
 
 
@@ -1490,6 +1434,7 @@
 LEAF(BlockBegin, StateSplit)
  private:
   int        _block_id;                          // the unique block id
+  int        _bci;                               // start-bci of block
   int        _depth_first_number;                // number of this block in a depth-first ordering
   int        _linear_scan_number;                // number of this block in linear-scan ordering
   int        _loop_depth;                        // the loop nesting level of this block
@@ -1546,6 +1491,7 @@
   // creation
   BlockBegin(int bci)
   : StateSplit(illegalType)
+  , _bci(bci)
   , _depth_first_number(-1)
   , _linear_scan_number(-1)
   , _loop_depth(0)
@@ -1570,11 +1516,14 @@
   , _total_preds(0)
   , _stores_to_locals()
   {
-    set_bci(bci);
+#ifndef PRODUCT
+    set_printable_bci(bci);
+#endif
   }
 
   // accessors
   int block_id() const                           { return _block_id; }
+  int bci() const                                { return _bci; }
   BlockList* successors()                        { return &_successors; }
   BlockBegin* dominator() const                  { return _dominator; }
   int loop_depth() const                         { return _loop_depth; }
@@ -1596,7 +1545,6 @@
   BitMap& stores_to_locals()                     { return _stores_to_locals; }
 
   // manipulation
-  void set_bci(int bci)                          { Instruction::set_bci(bci); }
   void set_dominator(BlockBegin* dom)            { _dominator = dom; }
   void set_loop_depth(int d)                     { _loop_depth = d; }
   void set_depth_first_number(int dfn)           { _depth_first_number = dfn; }
@@ -1694,7 +1642,6 @@
  private:
   BlockBegin* _begin;
   BlockList*  _sux;
-  ValueStack* _state_before;
 
  protected:
   BlockList* sux() const                         { return _sux; }
@@ -1710,24 +1657,20 @@
  public:
   // creation
   BlockEnd(ValueType* type, ValueStack* state_before, bool is_safepoint)
-  : StateSplit(type)
+  : StateSplit(type, state_before)
   , _begin(NULL)
   , _sux(NULL)
-  , _state_before(state_before) {
+  {
     set_flag(IsSafepointFlag, is_safepoint);
   }
 
   // accessors
-  ValueStack* state_before() const               { return _state_before; }
   bool is_safepoint() const                      { return check_flag(IsSafepointFlag); }
   BlockBegin* begin() const                      { return _begin; }
 
   // manipulation
   void set_begin(BlockBegin* begin);
 
-  // generic
-  virtual void other_values_do(ValueVisitor* f);
-
   // successors
   int number_of_sux() const                      { return _sux != NULL ? _sux->length() : 0; }
   BlockBegin* sux_at(int i) const                { return _sux->at(i); }
@@ -1919,6 +1862,8 @@
   Value tag() const                              { return _tag; }
   int length() const                             { return number_of_sux() - 1; }
 
+  virtual bool needs_exception_state() const     { return false; }
+
   // generic
   virtual void input_values_do(ValueVisitor* f)   { BlockEnd::input_values_do(f); f->visit(&_tag); }
 };
@@ -1996,7 +1941,6 @@
   // generic
   virtual bool can_trap() const                  { return true; }
   virtual void input_values_do(ValueVisitor* f)   { BlockEnd::input_values_do(f); f->visit(&_exception); }
-  virtual void state_values_do(ValueVisitor* f);
 };
 
 
@@ -2091,7 +2035,6 @@
 
   // generic
   virtual void input_values_do(ValueVisitor* f)   { }
-  virtual void other_values_do(ValueVisitor* f)   { }
 };
 
 
--- a/src/share/vm/c1/c1_InstructionPrinter.cpp	Wed Sep 22 23:51:03 2010 -0700
+++ b/src/share/vm/c1/c1_InstructionPrinter.cpp	Tue Dec 29 19:08:54 2009 +0100
@@ -316,7 +316,7 @@
 void InstructionPrinter::print_line(Instruction* instr) {
   // print instruction data on one line
   if (instr->is_pinned()) output()->put('.');
-  fill_to(bci_pos  ); output()->print("%d", instr->bci());
+  fill_to(bci_pos  ); output()->print("%d", instr->printable_bci());
   fill_to(use_pos  ); output()->print("%d", instr->use_count());
   fill_to(temp_pos ); print_temp(instr);
   fill_to(instr_pos); print_instr(instr);
@@ -569,7 +569,7 @@
   if (printed_flag) output()->print(") ");
 
   // print block bci range
-  output()->print("[%d, %d]", x->bci(), (end == NULL ? -1 : end->bci()));
+  output()->print("[%d, %d]", x->bci(), (end == NULL ? -1 : end->printable_bci()));
 
   // print block successors
   if (end != NULL && end->number_of_sux() > 0) {
--- a/src/share/vm/c1/c1_LIR.cpp	Wed Sep 22 23:51:03 2010 -0700
+++ b/src/share/vm/c1/c1_LIR.cpp	Tue Dec 29 19:08:54 2009 +0100
@@ -1520,7 +1520,7 @@
   if (x->is_set(BlockBegin::linear_scan_loop_end_flag))    tty->print("le ");
 
   // print block bci range
-  tty->print("[%d, %d] ", x->bci(), (end == NULL ? -1 : end->bci()));
+  tty->print("[%d, %d] ", x->bci(), (end == NULL ? -1 : end->printable_bci()));
 
   // print predecessors and successors
   if (x->number_of_preds() > 0) {
@@ -1576,7 +1576,7 @@
   }
   out->print(name()); out->print(" ");
   print_instr(out);
-  if (info() != NULL) out->print(" [bci:%d]", info()->bci());
+  if (info() != NULL) out->print(" [bci:%d]", info()->stack()->bci());
 #ifdef ASSERT
   if (Verbose && _file != NULL) {
     out->print(" (%s:%d)", _file, _line);
@@ -1781,7 +1781,7 @@
     out->print("[");
     stub()->print_name(out);
     out->print(": 0x%x]", stub());
-    if (stub()->info() != NULL) out->print(" [bci:%d]", stub()->info()->bci());
+    if (stub()->info() != NULL) out->print(" [bci:%d]", stub()->info()->stack()->bci());
   } else {
     out->print("[label:0x%x] ", label());
   }
@@ -1896,7 +1896,7 @@
   tmp2()->print(out);                    out->print(" ");
   tmp3()->print(out);                    out->print(" ");
   result_opr()->print(out);              out->print(" ");
-  if (info_for_exception() != NULL) out->print(" [bci:%d]", info_for_exception()->bci());
+  if (info_for_exception() != NULL) out->print(" [bci:%d]", info_for_exception()->stack()->bci());
 }
 
 
--- a/src/share/vm/c1/c1_LIRAssembler.cpp	Wed Sep 22 23:51:03 2010 -0700
+++ b/src/share/vm/c1/c1_LIRAssembler.cpp	Tue Dec 29 19:08:54 2009 +0100
@@ -35,7 +35,7 @@
   append_patching_stub(patch);
 
 #ifdef ASSERT
-  Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->bci());
+  Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
   if (patch->id() == PatchingStub::access_field_id) {
     switch (code) {
       case Bytecodes::_putstatic:
@@ -221,7 +221,7 @@
 #ifndef PRODUCT
   if (CommentedAssembly) {
     stringStream st;
-    st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->bci());
+    st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci());
     _masm->block_comment(st.as_string());
   }
 #endif
@@ -312,7 +312,7 @@
 static ValueStack* debug_info(Instruction* ins) {
   StateSplit* ss = ins->as_StateSplit();
   if (ss != NULL) return ss->state();
-  return ins->lock_stack();
+  return ins->state_before();
 }
 
 void LIR_Assembler::process_debug_info(LIR_Op* op) {
@@ -327,8 +327,7 @@
   if (vstack == NULL)  return;
   if (_pending_non_safepoint != NULL) {
     // Got some old debug info.  Get rid of it.
-    if (_pending_non_safepoint->bci() == src->bci() &&
-        debug_info(_pending_non_safepoint) == vstack) {
+    if (debug_info(_pending_non_safepoint) == vstack) {
       _pending_non_safepoint_offset = pc_offset;
       return;
     }
@@ -358,7 +357,7 @@
     ValueStack* tc = t->caller_state();
     if (tc == NULL)  return s;
     t = tc;
-    bci_result = s->scope()->caller_bci();
+    bci_result = tc->bci();
     s = s->caller_state();
   }
 }
@@ -366,7 +365,7 @@
 void LIR_Assembler::record_non_safepoint_debug_info() {
   int         pc_offset = _pending_non_safepoint_offset;
   ValueStack* vstack    = debug_info(_pending_non_safepoint);
-  int         bci       = _pending_non_safepoint->bci();
+  int         bci       = vstack->bci();
 
   DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
   assert(debug_info->recording_non_safepoints(), "sanity");
@@ -380,7 +379,7 @@
     if (s == NULL)  break;
     IRScope* scope = s->scope();
     //Always pass false for reexecute since these ScopeDescs are never used for deopt
-    debug_info->describe_scope(pc_offset, scope->method(), s_bci, false/*reexecute*/);
+    debug_info->describe_scope(pc_offset, scope->method(), s->bci(), false/*reexecute*/);
   }
 
   debug_info->end_non_safepoint(pc_offset);
--- a/src/share/vm/c1/c1_LIRGenerator.cpp	Wed Sep 22 23:51:03 2010 -0700
+++ b/src/share/vm/c1/c1_LIRGenerator.cpp	Tue Dec 29 19:08:54 2009 +0100
@@ -386,18 +386,26 @@
 
 
 CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
-  int index;
-  Value value;
-  for_each_stack_value(state, index, value) {
-    assert(value->subst() == value, "missed substition");
-    if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
-      walk(value);
-      assert(value->operand()->is_valid(), "must be evaluated now");
+  assert(state != NULL, "state must be defined");
+
+  ValueStack* s = state;
+  for_each_state(s) {
+    if (s->kind() == ValueStack::EmptyExceptionState) {
+      assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty");
+      continue;
     }
-  }
-  ValueStack* s = state;
-  int bci = x->bci();
-  for_each_state(s) {
+
+    int index;
+    Value value;
+    for_each_stack_value(s, index, value) {
+      assert(value->subst() == value, "missed substitution");
+      if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
+        walk(value);
+        assert(value->operand()->is_valid(), "must be evaluated now");
+      }
+    }
+
+    int bci = s->bci();
     IRScope* scope = s->scope();
     ciMethod* method = scope->method();
 
@@ -428,15 +436,14 @@
         }
       }
     }
-    bci = scope->caller_bci();
   }
 
-  return new CodeEmitInfo(x->bci(), state, ignore_xhandler ? NULL : x->exception_handlers());
+  return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers());
 }
 
 
 CodeEmitInfo* LIRGenerator::state_for(Instruction* x) {
-  return state_for(x, x->lock_stack());
+  return state_for(x, x->exception_state());
 }
 
 
@@ -900,18 +907,14 @@
       Value sux_value;
       int index;
 
+      assert(cur_state->scope() == sux_state->scope(), "not matching");
+      assert(cur_state->locals_size() == sux_state->locals_size(), "not matching");
+      assert(cur_state->stack_size() == sux_state->stack_size(), "not matching");
+
       for_each_stack_value(sux_state, index, sux_value) {
         move_to_phi(&resolver, cur_state->stack_at(index), sux_value);
       }
 
-      // Inlining may cause the local state not to match up, so walk up
-      // the caller state until we get to the same scope as the
-      // successor and then start processing from there.
-      while (cur_state->scope() != sux_state->scope()) {
-        cur_state = cur_state->caller_state();
-        assert(cur_state != NULL, "scopes don't match up");
-      }
-
       for_each_local_value(sux_state, index, sux_value) {
         move_to_phi(&resolver, cur_state->local_at(index), sux_value);
       }
@@ -1023,10 +1026,10 @@
 
 // Code for a constant is generated lazily unless the constant is frequently used and can't be inlined.
 void LIRGenerator::do_Constant(Constant* x) {
-  if (x->state() != NULL) {
+  if (x->state_before() != NULL) {
     // Any constant with a ValueStack requires patching so emit the patch here
     LIR_Opr reg = rlock_result(x);
-    CodeEmitInfo* info = state_for(x, x->state());
+    CodeEmitInfo* info = state_for(x, x->state_before());
     __ oop2reg_patch(NULL, reg, info);
   } else if (x->use_count() > 1 && !can_inline_as_constant(x)) {
     if (!x->is_pinned()) {
@@ -1102,7 +1105,7 @@
   // need to perform the null check on the rcvr
   CodeEmitInfo* info = NULL;
   if (x->needs_null_check()) {
-    info = state_for(x, x->state()->copy_locks());
+    info = state_for(x);
   }
   __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_OBJECT), result, info);
   __ move(new LIR_Address(result, Klass::java_mirror_offset_in_bytes() +
@@ -1481,7 +1484,7 @@
   } else if (x->needs_null_check()) {
     NullCheck* nc = x->explicit_null_check();
     if (nc == NULL) {
-      info = state_for(x, x->lock_stack());
+      info = state_for(x);
     } else {
       info = state_for(nc);
     }
@@ -1509,10 +1512,12 @@
 
   set_no_result(x);
 
+#ifndef PRODUCT
   if (PrintNotLoaded && needs_patching) {
     tty->print_cr("   ###class not loaded at store_%s bci %d",
-                  x->is_static() ?  "static" : "field", x->bci());
+                  x->is_static() ?  "static" : "field", x->printable_bci());
   }
+#endif
 
   if (x->needs_null_check() &&
       (needs_patching ||
@@ -1575,7 +1580,7 @@
   } else if (x->needs_null_check()) {
     NullCheck* nc = x->explicit_null_check();
     if (nc == NULL) {
-      info = state_for(x, x->lock_stack());
+      info = state_for(x);
     } else {
       info = state_for(nc);
     }
@@ -1585,10 +1590,12 @@
 
   object.load_item();
 
+#ifndef PRODUCT
   if (PrintNotLoaded && needs_patching) {
     tty->print_cr("   ###class not loaded at load_%s bci %d",
-                  x->is_static() ?  "static" : "field", x->bci());
+                  x->is_static() ?  "static" : "field", x->printable_bci());
   }
+#endif
 
   if (x->needs_null_check() &&
       (needs_patching ||
@@ -1781,7 +1788,7 @@
   if (GenerateCompilerNullChecks &&
       (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL)) {
     // if the exception object wasn't created using new then it might be null.
-    __ null_check(exception_opr, new CodeEmitInfo(info, true));
+    __ null_check(exception_opr, new CodeEmitInfo(info, x->state()->copy(ValueStack::ExceptionState, x->state()->bci())));
   }
 
   if (compilation()->env()->jvmti_can_post_on_exceptions()) {
@@ -2127,7 +2134,6 @@
   int lo_key = x->lo_key();
   int hi_key = x->hi_key();
   int len = x->length();
-  CodeEmitInfo* info = state_for(x, x->state());
   LIR_Opr value = tag.result();
   if (UseTableRanges) {
     do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
@@ -2186,7 +2192,7 @@
 
     // increment backedge counter if needed
     CodeEmitInfo* info = state_for(x, state);
-    increment_backedge_counter(info, info->bci());
+    increment_backedge_counter(info, info->stack()->bci());
     CodeEmitInfo* safepoint_info = state_for(x, state);
     __ safepoint(safepoint_poll_register(), safepoint_info);
   }
@@ -2293,7 +2299,7 @@
       LIR_Opr lock = new_register(T_INT);
       __ load_stack_address_monitor(0, lock);
 
-      CodeEmitInfo* info = new CodeEmitInfo(SynchronizationEntryBCI, scope()->start()->state(), NULL);
+      CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL);
       CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
 
       // receiver is guaranteed non-NULL so don't need CodeEmitInfo
@@ -2303,7 +2309,7 @@
 
   // increment invocation counters if needed
   if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
-    CodeEmitInfo* info = new CodeEmitInfo(InvocationEntryBci, scope()->start()->state(), NULL);
+    CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state(), NULL);
     increment_invocation_counter(info);
   }
 
@@ -2463,7 +2469,7 @@
       break;
     case Bytecodes::_invokedynamic: {
       ciBytecodeStream bcs(x->scope()->method());
-      bcs.force_bci(x->bci());
+      bcs.force_bci(x->state()->bci());
       assert(bcs.cur_bc() == Bytecodes::_invokedynamic, "wrong stream");
       ciCPCache* cpcache = bcs.get_cpcache();
 
--- a/src/share/vm/c1/c1_LinearScan.cpp	Wed Sep 22 23:51:03 2010 -0700
+++ b/src/share/vm/c1/c1_LinearScan.cpp	Tue Dec 29 19:08:54 2009 +0100
@@ -2274,8 +2274,8 @@
 }
 
 void check_stack_depth(CodeEmitInfo* info, int stack_end) {
-  if (info->bci() != SynchronizationEntryBCI && !info->scope()->method()->is_native()) {
-    Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->bci());
+  if (info->stack()->bci() != SynchronizationEntryBCI && !info->scope()->method()->is_native()) {
+    Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
     switch (code) {
       case Bytecodes::_ifnull    : // fall through
       case Bytecodes::_ifnonnull : // fall through
@@ -2379,7 +2379,7 @@
 
   // add oops from lock stack
   assert(info->stack() != NULL, "CodeEmitInfo must always have a stack");
-  int locks_count = info->stack()->locks_size();
+  int locks_count = info->stack()->total_locks_size();
   for (int i = 0; i < locks_count; i++) {
     map->set_oop(frame_map()->monitor_object_regname(i));
   }
@@ -2762,19 +2762,13 @@
 }
 
 
-IRScopeDebugInfo* LinearScan::compute_debug_info_for_scope(int op_id, IRScope* cur_scope, ValueStack* cur_state, ValueStack* innermost_state, int cur_bci, int stack_end, int locks_end) {
+IRScopeDebugInfo* LinearScan::compute_debug_info_for_scope(int op_id, IRScope* cur_scope, ValueStack* cur_state, ValueStack* innermost_state) {
   IRScopeDebugInfo* caller_debug_info = NULL;
-  int stack_begin, locks_begin;
-
-  ValueStack* caller_state = cur_scope->caller_state();
+
+  ValueStack* caller_state = cur_state->caller_state();
   if (caller_state != NULL) {
     // process recursively to compute outermost scope first
-    stack_begin = caller_state->stack_size();
-    locks_begin = caller_state->locks_size();
-    caller_debug_info = compute_debug_info_for_scope(op_id, cur_scope->caller(), caller_state, innermost_state, cur_scope->caller_bci(), stack_begin, locks_begin);
-  } else {
-    stack_begin = 0;
-    locks_begin = 0;
+    caller_debug_info = compute_debug_info_for_scope(op_id, cur_scope->caller(), caller_state, innermost_state);
   }
 
   // initialize these to null.
@@ -2785,7 +2779,7 @@
   GrowableArray<MonitorValue*>* monitors    = NULL;
 
   // describe local variable values
-  int nof_locals = cur_scope->method()->max_locals();
+  int nof_locals = cur_state->locals_size();
   if (nof_locals > 0) {
     locals = new GrowableArray<ScopeValue*>(nof_locals);
 
@@ -2800,45 +2794,41 @@
     }
     assert(locals->length() == cur_scope->method()->max_locals(), "wrong number of locals");
     assert(locals->length() == cur_state->locals_size(), "wrong number of locals");
-  }
-
+  } else if (cur_scope->method()->max_locals() > 0) {
+    assert(cur_state->kind() == ValueStack::EmptyExceptionState, "should be");
+    nof_locals = cur_scope->method()->max_locals();
+    locals = new GrowableArray<ScopeValue*>(nof_locals);
+    for(int i = 0; i < nof_locals; i++) {
+      locals->append(&_illegal_value);
+    }
+  }
 
   // describe expression stack
-  //
-  // When we inline methods containing exception handlers, the
-  // "lock_stacks" are changed to preserve expression stack values
-  // in caller scopes when exception handlers are present. This
-  // can cause callee stacks to be smaller than caller stacks.
-  if (stack_end > innermost_state->stack_size()) {
-    stack_end = innermost_state->stack_size();
-  }
-
-
-
-  int nof_stack = stack_end - stack_begin;
+  int nof_stack = cur_state->stack_size();
   if (nof_stack > 0) {
     expressions = new GrowableArray<ScopeValue*>(nof_stack);
 
-    int pos = stack_begin;
-    while (pos < stack_end) {
-      Value expression = innermost_state->stack_at_inc(pos);
+    int pos = 0;
+    while (pos < nof_stack) {
+      Value expression = cur_state->stack_at_inc(pos);
       append_scope_value(op_id, expression, expressions);
 
-      assert(expressions->length() + stack_begin == pos, "must match");
-    }
+      assert(expressions->length() == pos, "must match");
+    }
+    assert(expressions->length() == cur_state->stack_size(), "wrong number of stack entries");
   }
 
   // describe monitors
-  assert(locks_begin <= locks_end, "error in scope iteration");
-  int nof_locks = locks_end - locks_begin;
+  int nof_locks = cur_state->locks_size();
   if (nof_locks > 0) {
+    int lock_offset = cur_state->caller_state() != NULL ? cur_state->caller_state()->total_locks_size() : 0;
     monitors = new GrowableArray<MonitorValue*>(nof_locks);
-    for (int i = locks_begin; i < locks_end; i++) {
-      monitors->append(location_for_monitor_index(i));
-    }
-  }
-
-  return new IRScopeDebugInfo(cur_scope, cur_bci, locals, expressions, monitors, caller_debug_info);
+    for (int i = 0; i < nof_locks; i++) {
+      monitors->append(location_for_monitor_index(lock_offset + i));
+    }
+  }
+
+  return new IRScopeDebugInfo(cur_scope, cur_state->bci(), locals, expressions, monitors, caller_debug_info);
 }
 
 
@@ -2850,17 +2840,14 @@
 
   assert(innermost_scope != NULL && innermost_state != NULL, "why is it missing?");
 
-  int stack_end = innermost_state->stack_size();
-  int locks_end = innermost_state->locks_size();
-
-  DEBUG_ONLY(check_stack_depth(info, stack_end));
+  DEBUG_ONLY(check_stack_depth(info, innermost_state->stack_size()));
 
   if (info->_scope_debug_info == NULL) {
     // compute debug information
-    info->_scope_debug_info = compute_debug_info_for_scope(op_id, innermost_scope, innermost_state, innermost_state, info->bci(), stack_end, locks_end);
+    info->_scope_debug_info = compute_debug_info_for_scope(op_id, innermost_scope, innermost_state, innermost_state);
   } else {
     // debug information already set. Check that it is correct from the current point of view
-    DEBUG_ONLY(assert_equal(info->_scope_debug_info, compute_debug_info_for_scope(op_id, innermost_scope, innermost_state, innermost_state, info->bci(), stack_end, locks_end)));
+    DEBUG_ONLY(assert_equal(info->_scope_debug_info, compute_debug_info_for_scope(op_id, innermost_scope, innermost_state, innermost_state)));
   }
 }
 
--- a/src/share/vm/c1/c1_LinearScan.hpp	Wed Sep 22 23:51:03 2010 -0700
+++ b/src/share/vm/c1/c1_LinearScan.hpp	Tue Dec 29 19:08:54 2009 +0100
@@ -346,7 +346,7 @@
   int append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeValue*>* scope_values);
   int append_scope_value(int op_id, Value value, GrowableArray<ScopeValue*>* scope_values);
 
-  IRScopeDebugInfo* compute_debug_info_for_scope(int op_id, IRScope* cur_scope, ValueStack* cur_state, ValueStack* innermost_state, int cur_bci, int stack_end, int locks_end);
+  IRScopeDebugInfo* compute_debug_info_for_scope(int op_id, IRScope* cur_scope, ValueStack* cur_state, ValueStack* innermost_state);
   void compute_debug_info(CodeEmitInfo* info, int op_id);
 
   void assign_reg_num(LIR_OpList* instructions, IntervalWalker* iw);
--- a/src/share/vm/c1/c1_Optimizer.cpp	Wed Sep 22 23:51:03 2010 -0700
+++ b/src/share/vm/c1/c1_Optimizer.cpp	Tue Dec 29 19:08:54 2009 +0100
@@ -140,25 +140,27 @@
     //    with an IfOp followed by a Goto
     // cut if_ away and get node before
     Instruction* cur_end = if_->prev(block);
-    int bci = if_->bci();
 
     // append constants of true- and false-block if necessary
     // clone constants because original block must not be destroyed
     assert((t_value != f_const && f_value != t_const) || t_const == f_const, "mismatch");
     if (t_value == t_const) {
       t_value = new Constant(t_const->type());
-      cur_end = cur_end->set_next(t_value, bci);
+      NOT_PRODUCT(t_value->set_printable_bci(if_->printable_bci()));
+      cur_end = cur_end->set_next(t_value);
     }
     if (f_value == f_const) {
       f_value = new Constant(f_const->type());
-      cur_end = cur_end->set_next(f_value, bci);
+      NOT_PRODUCT(f_value->set_printable_bci(if_->printable_bci()));
+      cur_end = cur_end->set_next(f_value);
     }
 
     // it is very unlikely that the condition can be statically decided
     // (this was checked previously by the Canonicalizer), so always
     // append IfOp
     Value result = new IfOp(if_->x(), if_->cond(), if_->y(), t_value, f_value);
-    cur_end = cur_end->set_next(result, bci);
+    NOT_PRODUCT(result->set_printable_bci(if_->printable_bci()));
+    cur_end = cur_end->set_next(result);
 
     // append Goto to successor
     ValueStack* state_before = if_->is_safepoint() ? if_->state_before() : NULL;
@@ -167,16 +169,15 @@
     // prepare state for Goto
     ValueStack* goto_state = if_->state();
     while (sux_state->scope() != goto_state->scope()) {
-      goto_state = goto_state->pop_scope();
+      goto_state = goto_state->caller_state();
       assert(goto_state != NULL, "states do not match up");
     }
-    goto_state = goto_state->copy();
+    goto_state = goto_state->copy(ValueStack::StateAfter, goto_state->bci());
     goto_state->push(result->type(), result);
-    assert(goto_state->is_same_across_scopes(sux_state), "states must match now");
+    assert(goto_state->is_same(sux_state), "states must match now");
     goto_->set_state(goto_state);
 
-    // Steal the bci for the goto from the sux
-    cur_end = cur_end->set_next(goto_, sux->bci());
+    cur_end = cur_end->set_next(goto_, goto_state->bci());
 
     // Adjust control flow graph
     BlockBegin::disconnect_edge(block, t_block);
@@ -251,10 +252,8 @@
         // no phi functions must be present at beginning of sux
         ValueStack* sux_state = sux->state();
         ValueStack* end_state = end->state();
-        while (end_state->scope() != sux_state->scope()) {
-          // match up inlining level
-          end_state = end_state->pop_scope();
-        }
+
+        assert(end_state->scope() == sux_state->scope(), "scopes must match");
         assert(end_state->stack_size() == sux_state->stack_size(), "stack not equal");
         assert(end_state->locals_size() == sux_state->locals_size(), "locals not equal");
 
@@ -273,7 +272,7 @@
         Instruction* prev = end->prev(block);
         Instruction* next = sux->next();
         assert(prev->as_BlockEnd() == NULL, "must not be a BlockEnd");
-        prev->set_next(next, next->bci());
+        prev->set_next(next);
         sux->disconnect_from_graph();
         block->set_end(sux->end());
         // add exception handlers of deleted block, if any
@@ -337,7 +336,8 @@
                   newif->set_state(if_->state()->copy());
 
                   assert(prev->next() == if_, "must be guaranteed by above search");
-                  prev->set_next(newif, if_->bci());
+                  NOT_PRODUCT(newif->set_printable_bci(if_->printable_bci()));
+                  prev->set_next(newif);
                   block->set_end(newif);
 
                   _merge_count++;
@@ -705,7 +705,7 @@
     // visiting instructions which are references in other blocks or
     // visiting instructions more than once.
     mark_visitable(instr);
-    if (instr->is_root() || instr->can_trap() || (instr->as_NullCheck() != NULL)) {
+    if (instr->is_pinned() || instr->can_trap() || (instr->as_NullCheck() != NULL)) {
       mark_visited(instr);
       instr->input_values_do(this);
       instr->visit(&_visitor);
--- a/src/share/vm/c1/c1_ValueStack.cpp	Wed Sep 22 23:51:03 2010 -0700
+++ b/src/share/vm/c1/c1_ValueStack.cpp	Tue Dec 29 19:08:54 2009 +0100
@@ -28,55 +28,60 @@
 
 // Implementation of ValueStack
 
-ValueStack::ValueStack(IRScope* scope, int locals_size, int max_stack_size)
+ValueStack::ValueStack(IRScope* scope, ValueStack* caller_state)
 : _scope(scope)
-, _locals(locals_size, NULL)
-, _stack(max_stack_size)
-, _lock_stack(false)
-, _locks(1)
+, _caller_state(caller_state)
+, _bci(-99)
+, _kind(Parsing)
+, _locals(scope->method()->max_locals(), NULL)
+, _stack(scope->method()->max_stack())
+, _locks()
 {
-  assert(scope != NULL, "scope must exist");
+  verify();
 }
 
-ValueStack* ValueStack::copy() {
-  ValueStack* s = new ValueStack(scope(), locals_size(), max_stack_size());
-  s->_stack.appendAll(&_stack);
-  s->_locks.appendAll(&_locks);
-  s->replace_locals(this);
-  return s;
+
+ValueStack::ValueStack(ValueStack* copy_from, Kind kind, int bci)
+  : _scope(copy_from->scope())
+  , _caller_state(copy_from->caller_state())
+  , _bci(bci)
+  , _kind(kind)
+  , _locals()
+  , _stack()
+  , _locks(copy_from->locks_size())
+{
+  assert(kind != EmptyExceptionState || !Compilation::current()->env()->jvmti_can_access_local_variables(), "need locals");
+  if (kind != EmptyExceptionState) {
+    // only allocate space if we need to copy the locals-array
+    _locals = Values(copy_from->locals_size());
+    _locals.appendAll(&copy_from->_locals);
+  }
+
+  if (kind != ExceptionState && kind != EmptyExceptionState) {
+    if (kind == Parsing) {
+      // stack will be modified, so reserve enough space to avoid resizing
+      _stack = Values(scope()->method()->max_stack());
+    } else {
+      // stack will not be modified, so do not waste space
+      _stack = Values(copy_from->stack_size());
+    }
+    _stack.appendAll(&copy_from->_stack);
+  }
+
+  _locks.appendAll(&copy_from->_locks);
+
+  verify();
 }
 
 
-ValueStack* ValueStack::copy_locks() {
-  int sz = scope()->lock_stack_size();
-  if (stack_size() == 0) {
-    sz = 0;
-  }
-  ValueStack* s = new ValueStack(scope(), locals_size(), sz);
-  s->_lock_stack = true;
-  s->_locks.appendAll(&_locks);
-  s->replace_locals(this);
-  if (sz > 0) {
-    assert(sz <= stack_size(), "lock stack underflow");
-    for (int i = 0; i < sz; i++) {
-      s->_stack.append(_stack[i]);
-    }
-  }
-  return s;
-}
+bool ValueStack::is_same(ValueStack* s) {
+  if (scope() != s->scope()) return false;
+  if (caller_state() != s->caller_state()) return false;
 
-bool ValueStack::is_same(ValueStack* s) {
-  assert(s != NULL, "state must exist");
-  assert(scope      () == s->scope      (), "scopes       must correspond");
-  assert(locals_size() == s->locals_size(), "locals sizes must correspond");
-  return is_same_across_scopes(s);
-}
+  if (locals_size() != s->locals_size()) return false;
+  if (stack_size() != s->stack_size()) return false;
+  if (locks_size() != s->locks_size()) return false;
 
-
-bool ValueStack::is_same_across_scopes(ValueStack* s) {
-  assert(s != NULL, "state must exist");
-  assert(stack_size () == s->stack_size (), "stack  sizes must correspond");
-  assert(locks_size () == s->locks_size (), "locks  sizes must correspond");
   // compare each stack element with the corresponding stack element of s
   int index;
   Value value;
@@ -89,12 +94,6 @@
   return true;
 }
 
-
-ValueStack* ValueStack::caller_state() const {
-  return scope()->caller_state();
-}
-
-
 void ValueStack::clear_locals() {
   for (int i = _locals.length() - 1; i >= 0; i--) {
     _locals.at_put(i, NULL);
@@ -102,13 +101,6 @@
 }
 
 
-void ValueStack::replace_locals(ValueStack* with) {
-  assert(locals_size() == with->locals_size(), "number of locals must match");
-  for (int i = locals_size() - 1; i >= 0; i--) {
-    _locals.at_put(i, with->_locals.at(i));
-  }
-}
-
 void ValueStack::pin_stack_for_linear_scan() {
   for_each_state_value(this, v,
     if (v->as_Constant() == NULL && v->as_Local() == NULL) {
@@ -123,33 +115,25 @@
   for (int i = 0; i < list.length(); i++) {
     Value* va = list.adr_at(i);
     Value v0 = *va;
-    if (v0 != NULL) {
-      if (!v0->type()->is_illegal()) {
-        assert(v0->as_HiWord() == NULL, "should never see HiWord during traversal");
-        f->visit(va);
+    if (v0 != NULL && !v0->type()->is_illegal()) {
+      f->visit(va);
 #ifdef ASSERT
-        Value v1 = *va;
-        if (v0 != v1) {
-          assert(v1->type()->is_illegal() || v0->type()->tag() == v1->type()->tag(), "types must match");
-          if (v0->type()->is_double_word()) {
-            list.at_put(i + 1, v0->hi_word());
-          }
-        }
+      Value v1 = *va;
+      assert(v1->type()->is_illegal() || v0->type()->tag() == v1->type()->tag(), "types must match");
+      assert(!v1->type()->is_double_word() || list.at(i + 1) == NULL, "hi-word of doubleword value must be NULL");
 #endif
-        if (v0->type()->is_double_word()) i++;
-      }
+      if (v0->type()->is_double_word()) i++;
     }
   }
 }
 
 
 void ValueStack::values_do(ValueVisitor* f) {
-  apply(_stack, f);
-  apply(_locks, f);
-
   ValueStack* state = this;
   for_each_state(state) {
     apply(state->_locals, f);
+    apply(state->_stack, f);
+    apply(state->_locks, f);
   }
 }
 
@@ -164,52 +148,26 @@
 }
 
 
-int ValueStack::lock(IRScope* scope, Value obj) {
+int ValueStack::total_locks_size() const {
+  int num_locks = 0;
+  const ValueStack* state = this;
+  for_each_state(state) {
+    num_locks += state->locks_size();
+  }
+  return num_locks;
+}
+
+int ValueStack::lock(Value obj) {
   _locks.push(obj);
-  scope->set_min_number_of_locks(locks_size());
-  return locks_size() - 1;
+  int num_locks = total_locks_size();
+  scope()->set_min_number_of_locks(num_locks);
+  return num_locks - 1;
 }
 
 
 int ValueStack::unlock() {
   _locks.pop();
-  return locks_size();
-}
-
-
-ValueStack* ValueStack::push_scope(IRScope* scope) {
-  assert(scope->caller() == _scope, "scopes must have caller/callee relationship");
-  ValueStack* res = new ValueStack(scope,
-                                   scope->method()->max_locals(),
-                                   max_stack_size() + scope->method()->max_stack());
-  // Preserves stack and monitors.
-  res->_stack.appendAll(&_stack);
-  res->_locks.appendAll(&_locks);
-  assert(res->_stack.size() <= res->max_stack_size(), "stack overflow");
-  return res;
-}
-
-
-ValueStack* ValueStack::pop_scope() {
-  assert(_scope->caller() != NULL, "scope must have caller");
-  IRScope* scope = _scope->caller();
-  int max_stack = max_stack_size() - _scope->method()->max_stack();
-  assert(max_stack >= 0, "stack underflow");
-  ValueStack* res = new ValueStack(scope,
-                                   scope->method()->max_locals(),
-                                   max_stack);
-  // Preserves stack and monitors. Restores local and store state from caller scope.
-  res->_stack.appendAll(&_stack);
-  res->_locks.appendAll(&_locks);
-  ValueStack* caller = caller_state();
-  if (caller != NULL) {
-    for (int i = 0; i < caller->_locals.length(); i++) {
-      res->_locals.at_put(i, caller->_locals.at(i));
-    }
-    assert(res->_locals.length() == res->scope()->method()->max_locals(), "just checking");
-  }
-  assert(res->_stack.size() <= res->max_stack_size(), "stack overflow");
-  return res;
+  return total_locks_size();
 }
 
 
@@ -220,11 +178,7 @@
   Value phi = new Phi(t, b, -index - 1);
   _stack[index] = phi;
 
-#ifdef ASSERT
-  if (t->is_double_word()) {
-    _stack[index + 1] = phi->hi_word();
-  }
-#endif
+  assert(!t->is_double_word() || _stack.at(index + 1) == NULL, "hi-word of doubleword value must be NULL");
 }
 
 void ValueStack::setup_phi_for_local(BlockBegin* b, int index) {
@@ -236,7 +190,9 @@
 }
 
 #ifndef PRODUCT
+
 void ValueStack::print() {
+  scope()->method()->print_name();
   if (stack_is_empty()) {
     tty->print_cr("empty stack");
   } else {
@@ -244,18 +200,20 @@
     for (int i = 0; i < stack_size();) {
       Value t = stack_at_inc(i);
       tty->print("%2d  ", i);
+      tty->print("%c%d ", t->type()->tchar(), t->id());
       ip.print_instr(t);
       tty->cr();
     }
   }
   if (!no_active_locks()) {
     InstructionPrinter ip;
-    for (int i = 0; i < locks_size(); i--) {
+    for (int i = 0; i < locks_size(); i++) {
       Value t = lock_at(i);
       tty->print("lock %2d  ", i);
       if (t == NULL) {
         tty->print("this");
       } else {
+        tty->print("%c%d ", t->type()->tchar(), t->id());
         ip.print_instr(t);
       }
       tty->cr();
@@ -270,16 +228,55 @@
         tty->print("null");
         i ++;
       } else {
+        tty->print("%c%d ", l->type()->tchar(), l->id());
         ip.print_instr(l);
         if (l->type()->is_illegal() || l->type()->is_single_word()) i ++; else i += 2;
       }
       tty->cr();
     }
   }
+
+  if (caller_state() != NULL) {
+    caller_state()->print();
+  }
 }
 
 
 void ValueStack::verify() {
-  Unimplemented();
+  assert(scope() != NULL, "scope must exist");
+  if (caller_state() != NULL) {
+    assert(caller_state()->scope() == scope()->caller(), "invalid caller scope");
+    caller_state()->verify();
+  }
+
+  if (kind() == Parsing) {
+    assert(bci() == -99, "bci not defined during parsing");
+  } else {
+    assert(bci() >= -1, "bci out of range");
+    assert(bci() < scope()->method()->code_size(), "bci out of range");
+    assert(bci() == SynchronizationEntryBCI || Bytecodes::is_defined(scope()->method()->java_code_at_bci(bci())), "make sure bci points at a real bytecode");
+    assert(scope()->method()->liveness_at_bci(bci()).is_valid(), "liveness at bci must be valid");
+  }
+
+  int i;
+  for (i = 0; i < stack_size(); i++) {
+    Value v = _stack.at(i);
+    if (v == NULL) {
+      assert(_stack.at(i - 1)->type()->is_double_word(), "only hi-words are NULL on stack");
+    } else if (v->type()->is_double_word()) {
+      assert(_stack.at(i + 1) == NULL, "hi-word must be NULL");
+    }
+  }
+
+  for (i = 0; i < locals_size(); i++) {
+    Value v = _locals.at(i);
+    if (v != NULL && v->type()->is_double_word()) {
+      assert(_locals.at(i + 1) == NULL, "hi-word must be NULL");
+    }
+  }
+
+  for_each_state_value(this, v,
+    assert(v != NULL, "just test if state-iteration succeeds");
+  );
 }
 #endif // PRODUCT
--- a/src/share/vm/c1/c1_ValueStack.hpp	Wed Sep 22 23:51:03 2010 -0700
+++ b/src/share/vm/c1/c1_ValueStack.hpp	Tue Dec 29 19:08:54 2009 +0100
@@ -23,9 +23,23 @@
  */
 
 class ValueStack: public CompilationResourceObj {
+ public:
+  enum Kind {
+    Parsing,             // During abstract interpretation in GraphBuilder
+    CallerState,         // Caller state when inlining
+    StateBefore,         // Before before execution of instruction
+    StateAfter,          // After execution of instruction
+    ExceptionState,      // Exception handling of instruction
+    EmptyExceptionState, // Exception handling of instructions not covered by an xhandler
+    BlockBeginState      // State of BlockBegin instruction with phi functions of this block
+  };
+
  private:
   IRScope* _scope;                               // the enclosing scope
-  bool     _lock_stack;                          // indicates that this ValueStack is for an exception site
+  ValueStack* _caller_state;
+  int      _bci;
+  Kind     _kind;
+
   Values   _locals;                              // the locals
   Values   _stack;                               // the expression stack
   Values   _locks;                               // the monitor stack (holding the locked values)
@@ -36,100 +50,79 @@
   }
 
   Value check(ValueTag tag, Value t, Value h) {
-    assert(h->as_HiWord()->lo_word() == t, "incorrect stack pair");
+    assert(h == NULL, "hi-word of doubleword value must be NULL");
     return check(tag, t);
   }
 
   // helper routine
   static void apply(Values list, ValueVisitor* f);
 
+  // for simplified copying
+  ValueStack(ValueStack* copy_from, Kind kind, int bci);
+
  public:
   // creation
-  ValueStack(IRScope* scope, int locals_size, int max_stack_size);
+  ValueStack(IRScope* scope, ValueStack* caller_state);
 
-  // merging
-  ValueStack* copy();                            // returns a copy of this w/ cleared locals
-  ValueStack* copy_locks();                      // returns a copy of this w/ cleared locals and stack
-                                                 // Note that when inlining of methods with exception
-                                                 // handlers is enabled, this stack may have a
-                                                 // non-empty expression stack (size defined by
-                                                 // scope()->lock_stack_size())
+  ValueStack* copy()                             { return new ValueStack(this, _kind, _bci); }
+  ValueStack* copy(Kind new_kind, int new_bci)   { return new ValueStack(this, new_kind, new_bci); }
+  ValueStack* copy_for_parsing()                 { return new ValueStack(this, Parsing, -99); }
+
+  void set_caller_state(ValueStack* s)           { assert(kind() == EmptyExceptionState, "only EmptyExceptionStates can be modified"); _caller_state = s; }
+
   bool is_same(ValueStack* s);                   // returns true if this & s's types match (w/o checking locals)
-  bool is_same_across_scopes(ValueStack* s);     // same as is_same but returns true even if stacks are in different scopes (used for block merging w/inlining)
 
   // accessors
   IRScope* scope() const                         { return _scope; }
-  bool is_lock_stack() const                     { return _lock_stack; }
+  ValueStack* caller_state() const               { return _caller_state; }
+  int bci() const                                { return _bci; }
+  Kind kind() const                              { return _kind; }
+
   int locals_size() const                        { return _locals.length(); }
   int stack_size() const                         { return _stack.length(); }
   int locks_size() const                         { return _locks.length(); }
-  int max_stack_size() const                     { return _stack.capacity(); }
   bool stack_is_empty() const                    { return _stack.is_empty(); }
   bool no_active_locks() const                   { return _locks.is_empty(); }
-  ValueStack* caller_state() const;
+  int total_locks_size() const;
 
   // locals access
   void clear_locals();                           // sets all locals to NULL;
 
-  // Kill local i.  Also kill local i+1 if i was a long or double.
   void invalidate_local(int i) {
-    Value x = _locals.at(i);
-    if (x != NULL && x->type()->is_double_word()) {
-      assert(_locals.at(i + 1)->as_HiWord()->lo_word() == x, "locals inconsistent");
-      _locals.at_put(i + 1, NULL);
-    }
+    assert(_locals.at(i)->type()->is_single_word() ||
+           _locals.at(i + 1) == NULL, "hi-word of doubleword value must be NULL");
     _locals.at_put(i, NULL);
   }
 
-
-  Value load_local(int i) const {
+  Value local_at(int i) const {
     Value x = _locals.at(i);
-    if (x != NULL && x->type()->is_illegal()) return NULL;
-    assert(x == NULL || x->as_HiWord() == NULL, "index points to hi word");
-    assert(x == NULL || x->type()->is_illegal() || x->type()->is_single_word() || x == _locals.at(i+1)->as_HiWord()->lo_word(), "locals inconsistent");
+    assert(x == NULL || x->type()->is_single_word() ||
+           _locals.at(i + 1) == NULL, "hi-word of doubleword value must be NULL");
     return x;
   }
 
-  Value local_at(int i) const { return _locals.at(i); }
-
-  // Store x into local i.
   void store_local(int i, Value x) {
-    // Kill the old value
-    invalidate_local(i);
-    _locals.at_put(i, x);
-
-    // Writing a double word can kill other locals
-    if (x != NULL && x->type()->is_double_word()) {
-      // If x + i was the start of a double word local then kill i + 2.
-      Value x2 = _locals.at(i + 1);
-      if (x2 != NULL && x2->type()->is_double_word()) {
-        _locals.at_put(i + 2, NULL);
-      }
-
-      // If x is a double word local, also update i + 1.
-#ifdef ASSERT
-      _locals.at_put(i + 1, x->hi_word());
-#else
-      _locals.at_put(i + 1, NULL);
-#endif
-    }
-    // If x - 1 was the start of a double word local then kill i - 1.
+    // When overwriting local i, check if i - 1 was the start of a
+    // double word local and kill it.
     if (i > 0) {
       Value prev = _locals.at(i - 1);
       if (prev != NULL && prev->type()->is_double_word()) {
         _locals.at_put(i - 1, NULL);
       }
     }
+
+    _locals.at_put(i, x);
+    if (x->type()->is_double_word()) {
+      // hi-word of doubleword value is always NULL
+      _locals.at_put(i + 1, NULL);
+    }
   }
 
-  void replace_locals(ValueStack* with);
-
   // stack access
   Value stack_at(int i) const {
     Value x = _stack.at(i);
-    assert(x->as_HiWord() == NULL, "index points to hi word");
     assert(x->type()->is_single_word() ||
-           x->subst() == _stack.at(i+1)->as_HiWord()->lo_word(), "stack inconsistent");
+           _stack.at(i + 1) == NULL, "hi-word of doubleword value must be NULL");
     return x;
   }
 
@@ -146,7 +139,6 @@
   void values_do(ValueVisitor* f);
 
   // untyped manipulation (for dup_x1, etc.)
-  void clear_stack()                             { _stack.clear(); }
   void truncate_stack(int size)                  { _stack.trunc_to(size); }
   void raw_push(Value t)                         { _stack.push(t); }
   Value raw_pop()                                { return _stack.pop(); }
@@ -156,15 +148,8 @@
   void fpush(Value t)                            { _stack.push(check(floatTag  , t)); }
   void apush(Value t)                            { _stack.push(check(objectTag , t)); }
   void rpush(Value t)                            { _stack.push(check(addressTag, t)); }
-#ifdef ASSERT
-  // in debug mode, use HiWord for 2-word values
-  void lpush(Value t)                            { _stack.push(check(longTag   , t)); _stack.push(new HiWord(t)); }
-  void dpush(Value t)                            { _stack.push(check(doubleTag , t)); _stack.push(new HiWord(t)); }
-#else
-  // in optimized mode, use NULL for 2-word values
   void lpush(Value t)                            { _stack.push(check(longTag   , t)); _stack.push(NULL); }
   void dpush(Value t)                            { _stack.push(check(doubleTag , t)); _stack.push(NULL); }
-#endif // ASSERT
 
   void push(ValueType* type, Value t) {
     switch (type->tag()) {
@@ -182,15 +167,8 @@
   Value fpop()                                   { return check(floatTag  , _stack.pop()); }
   Value apop()                                   { return check(objectTag , _stack.pop()); }
   Value rpop()                                   { return check(addressTag, _stack.pop()); }
-#ifdef ASSERT
-  // in debug mode, check for HiWord consistency
   Value lpop()                                   { Value h = _stack.pop(); return check(longTag  , _stack.pop(), h); }
   Value dpop()                                   { Value h = _stack.pop(); return check(doubleTag, _stack.pop(), h); }
-#else
-  // in optimized mode, ignore HiWord since it is NULL
-  Value lpop()                                   { _stack.pop(); return check(longTag  , _stack.pop()); }
-  Value dpop()                                   { _stack.pop(); return check(doubleTag, _stack.pop()); }
-#endif // ASSERT
 
   Value pop(ValueType* type) {
     switch (type->tag()) {
@@ -208,16 +186,10 @@
   Values* pop_arguments(int argument_size);
 
   // locks access
-  int lock  (IRScope* scope, Value obj);
+  int lock  (Value obj);
   int unlock();
   Value lock_at(int i) const                     { return _locks.at(i); }
 
-  // Inlining support
-  ValueStack* push_scope(IRScope* scope);         // "Push" new scope, returning new resulting stack
-                                                  // Preserves stack and locks, destroys locals
-  ValueStack* pop_scope();                        // "Pop" topmost scope, returning new resulting stack
-                                                  // Preserves stack and locks, destroys locals
-
   // SSA form IR support
   void setup_phi_for_stack(BlockBegin* b, int index);
   void setup_phi_for_local(BlockBegin* b, int index);
@@ -298,16 +270,18 @@
 {                                                                                              \
   int cur_index;                                                                               \
   ValueStack* cur_state = v_state;                                                             \
-  Value v_value;                                                                                 \
-  {                                                                                            \
-    for_each_stack_value(cur_state, cur_index, v_value) {                                      \
-      v_code;                                                                                  \
+  Value v_value;                                                                               \
+  for_each_state(cur_state) {                                                                  \
+    {                                                                                            \
+      for_each_local_value(cur_state, cur_index, v_value) {                                      \
+        v_code;                                                                                  \
+      }                                                                                          \
     }                                                                                          \
-  }                                                                                            \
-  for_each_state(cur_state) {                                                                  \
-    for_each_local_value(cur_state, cur_index, v_value) {                                      \
-      v_code;                                                                                  \
-    }                                                                                          \
+    {                                                                                            \
+      for_each_stack_value(cur_state, cur_index, v_value) {                                      \
+        v_code;                                                                                  \
+      }                                                                                          \
+    }                                                                                            \
   }                                                                                            \
 }
 
--- a/src/share/vm/c1/c1_globals.hpp	Wed Sep 22 23:51:03 2010 -0700
+++ b/src/share/vm/c1/c1_globals.hpp	Tue Dec 29 19:08:54 2009 +0100
@@ -216,9 +216,6 @@
   develop(bool, DeoptC1, true,                                              \
           "Use deoptimization in C1")                                       \
                                                                             \
-  develop(bool, DeoptOnAsyncException, true,                                \
-          "Deoptimize upon Thread.stop(); improves precision of IR")        \
-                                                                            \
   develop(bool, PrintBailouts, false,                                       \
           "Print bailout and its reason")                                   \
                                                                             \
--- a/src/share/vm/includeDB_compiler1	Wed Sep 22 23:51:03 2010 -0700
+++ b/src/share/vm/includeDB_compiler1	Tue Dec 29 19:08:54 2009 +0100
@@ -448,3 +448,7 @@
 top.hpp                                 c1_globals.hpp
 
 vmStructs.hpp                           c1_Runtime1.hpp
+
+c1_Canonicalizer.cpp                    c1_ValueStack.hpp
+
+c1_LIR.cpp                              c1_ValueStack.hpp