changeset 460:4799b4508e19

meth: coordinate ports (x86_{32,64}, sparc); work through cleanups; fix bugs
author jrose
date Sat, 07 Jul 2012 05:52:26 -0700
parents 649f4375edec
children b06d7d2b0f2f
files meth-lazy-7023639.jit.patch meth-lazy-7023639.patch
diffstat 2 files changed, 693 insertions(+), 474 deletions(-) [+]
line wrap: on
line diff
--- a/meth-lazy-7023639.jit.patch	Fri Jul 06 17:06:21 2012 -0700
+++ b/meth-lazy-7023639.jit.patch	Sat Jul 07 05:52:26 2012 -0700
@@ -1516,24 +1516,6 @@
 diff --git a/src/share/vm/ci/ciMethod.cpp b/src/share/vm/ci/ciMethod.cpp
 --- a/src/share/vm/ci/ciMethod.cpp
 +++ b/src/share/vm/ci/ciMethod.cpp
-@@ -791,14 +791,14 @@
- }
- 
- // ------------------------------------------------------------------
--// ciMethod::has_member_arg
-+// ciMethod::has_member_appendix
- //
- // Return true if the method is a linker intrinsic like _linkToVirtual.
- // These are built by the JVM.
--bool ciMethod::has_member_arg() const {
-+bool ciMethod::has_member_appendix() const {
-   vmIntrinsics::ID iid = _intrinsic_id;  // do not check if loaded
-   return (MethodHandles::is_signature_polymorphic(iid) &&
--          MethodHandles::has_member_arg(iid));
-+          MethodHandles::has_member_appendix(iid));
- }
- 
- // ------------------------------------------------------------------
 @@ -1023,28 +1023,13 @@
  // ------------------------------------------------------------------
  // ciMethod::code_size_for_inlining
@@ -1633,15 +1615,6 @@
  
    int comp_level();
    int highest_osr_comp_level();
-@@ -260,7 +265,7 @@
-   // JSR 292 support
-   bool is_method_handle_intrinsic()  const;
-   bool is_compiled_lambda_form() const;
--  bool has_member_arg() const;
-+  bool has_member_appendix() const;
- 
-   // What kind of ciObject is this?
-   bool is_method()                               { return true; }
 diff --git a/src/share/vm/ci/ciMethodHandle.cpp b/src/share/vm/ci/ciMethodHandle.cpp
 --- a/src/share/vm/ci/ciMethodHandle.cpp
 +++ b/src/share/vm/ci/ciMethodHandle.cpp
@@ -2134,7 +2107,7 @@
 @@ -380,9 +381,8 @@
    if (pool->has_preresolution()
        || (resolved_klass() == SystemDictionary::MethodHandle_klass() &&
-           MethodHandles::is_signature_polymorphic(resolved_klass(), method_name))) {
+           MethodHandles::is_signature_polymorphic_name(resolved_klass(), method_name))) {
 -    oop appendix = NULL;
 -    methodOop result_oop = constantPoolOopDesc::method_at_if_loaded(pool, index, &appendix);
 -    if (result_oop != NULL && appendix == NULL) {
@@ -2170,7 +2143,7 @@
 +
 +
 +bool constantPoolOopDesc::has_appendix_at_if_loaded(constantPoolHandle cpool, int which) {
-+  if (cpool->cache() == NULL)  return NULL;  // nothing to load yet
++  if (cpool->cache() == NULL)  return false;  // nothing to load yet
 +  // XXX Is there a simpler way to get to the secondary entry?
 +  ConstantPoolCacheEntry* e;
 +  if (constantPoolCacheOopDesc::is_secondary_index(which)) {
@@ -2181,7 +2154,7 @@
 +      if (PrintMiscellaneous && (Verbose||WizardMode)) {
 +        tty->print_cr("bad operand %d in:", which); cpool->print();
 +      }
-+      return NULL;
++      return false;
 +    }
 +    e = cpool->cache()->entry_at(cache_index);
 +  }
@@ -2410,19 +2383,6 @@
  }
  
  
-@@ -877,10 +876,10 @@
-           MethodHandles::is_signature_polymorphic_intrinsic(iid));
- }
- 
--bool methodOopDesc::has_member_arg() const {
-+bool methodOopDesc::has_member_appendix() const {
-   vmIntrinsics::ID iid = intrinsic_id();
-   return (MethodHandles::is_signature_polymorphic(iid) &&
--          MethodHandles::has_member_arg(iid));
-+          MethodHandles::has_member_appendix(iid));
- }
- 
- // Make an instance of a signature-polymorphic internal MH primitive.
 @@ -964,7 +963,7 @@
  }
  
@@ -2432,15 +2392,16 @@
                                                  u_char* new_compressed_linenumber_table, int new_compressed_linenumber_size, TRAPS) {
    // Code below does not work for native methods - they should never get rewritten anyway
    assert(!m->is_native(), "cannot rewrite native methods");
-@@ -1123,6 +1122,12 @@
+@@ -1119,11 +1118,12 @@
  
  // These two methods are static since a GC may move the methodOopDesc
  bool methodOopDesc::load_signature_classes(methodHandle m, TRAPS) {
+-  if (THREAD->is_Compiler_thread())
 +  if (THREAD->is_Compiler_thread()) {
-+    // There is nothing useful this routine can do.
-+    // Hopefully, the signature contains only well-known classes.
-+    // We could scan for this and return true/false, but the caller won't care.
-+    return false;
+     // There is nothing useful this routine can do from within the Compile thread.
+     // Hopefully, the signature contains only well-known classes.
+     // We could scan for this and return true/false, but the caller won't care.
+     return false;
 +  }
    bool sig_is_loaded = true;
    Handle class_loader(THREAD, instanceKlass::cast(m->method_holder())->class_loader());
@@ -2492,15 +2453,6 @@
  
    // returns true if the method has any monitors.
    bool has_monitors() const                      { return is_synchronized() || access_flags().has_monitor_bytecodes(); }
-@@ -592,7 +587,7 @@
-   // JSR 292 support
-   bool is_method_handle_intrinsic() const;          // MethodHandles::is_signature_polymorphic_intrinsic(intrinsic_id)
-   bool is_compiled_lambda_form() const;             // intrinsic_id() == vmIntrinsics::_compiledLambdaForm
--  bool has_member_arg() const;                      // intrinsic_id() == vmIntrinsics::_linkToSpecial, etc.
-+  bool has_member_appendix() const;                 // intrinsic_id() == vmIntrinsics::_linkToSpecial, etc.
-   static methodHandle make_method_handle_intrinsic(vmIntrinsics::ID iid, // _invokeBasic, _linkToVirtual
-                                                    Symbol* signature, //anything at all
-                                                    TRAPS);
 @@ -647,8 +642,10 @@
    bool jfr_towrite()                 { return _jfr_towrite; }
    void set_jfr_towrite(bool towrite) { _jfr_towrite = towrite; }
@@ -3521,7 +3473,7 @@
 +      } else {
 +        // Case 2: Here we are *after* the invoke (in the callee) and need to
 +        //         remove any appendix arguments that were popped.
-+        inputs = callee->invoke_arg_size(code) - (callee->has_member_appendix() ? 1 : 0);
++        inputs = callee->invoke_arg_size(code) - (callee->has_member_arg() ? 1 : 0);
 +      }
 +      int size = callee->return_type()->size();
        depth = size - inputs;
@@ -3690,13 +3642,7 @@
 diff --git a/src/share/vm/prims/methodHandles.hpp b/src/share/vm/prims/methodHandles.hpp
 --- a/src/share/vm/prims/methodHandles.hpp
 +++ b/src/share/vm/prims/methodHandles.hpp
-@@ -98,11 +98,19 @@
-             iid <= vmIntrinsics::LAST_MH_SIG_POLY);
-   }
- 
--  static bool has_member_arg(vmIntrinsics::ID iid) {
-+  static bool has_member_appendix(vmIntrinsics::ID iid) {
-     assert(is_signature_polymorphic(iid), "");
+@@ -104,6 +104,14 @@
      return (iid >= vmIntrinsics::_linkToVirtual &&
              iid <= vmIntrinsics::_linkToInterface);
    }
@@ -3704,13 +3650,13 @@
 +    if ((klass == vmSymbols::java_lang_invoke_MethodHandle()) &&
 +        is_signature_polymorphic_name(name)) {
 +      vmIntrinsics::ID iid = signature_polymorphic_name_id(name);
-+      return has_member_appendix(iid);
++      return has_member_arg(iid);
 +    }
 +    return false;
 +  }
  
    static Symbol* signature_polymorphic_intrinsic_name(vmIntrinsics::ID iid);
-   static vmIntrinsics::ID signature_polymorphic_name_id(Symbol* name);
+   static int signature_polymorphic_intrinsic_ref_kind(vmIntrinsics::ID iid);
 diff --git a/src/share/vm/runtime/fieldDescriptor.hpp b/src/share/vm/runtime/fieldDescriptor.hpp
 --- a/src/share/vm/runtime/fieldDescriptor.hpp
 +++ b/src/share/vm/runtime/fieldDescriptor.hpp
@@ -3887,9 +3833,9 @@
 +      Bytecode_invoke inv(caller, elem->bci());
 +      // invokedynamic instructions don't have a class but obviously don't have a MemberName appendix.
 +      // NOTE:  Use machinery here that avoids resolving of any kind.
-+      const bool has_member_appendix =
++      const bool has_member_arg =
 +          !inv.is_invokedynamic() && MethodHandles::has_member_arg(inv.klass(), inv.name());
-+      callee_parameters = callee->size_of_parameters() + (has_member_appendix ? 1 : 0);
++      callee_parameters = callee->size_of_parameters() + (has_member_arg ? 1 : 0);
 +      callee_locals     = callee->max_locals();
 +    }
 +    elem->unpack_on_stack(caller_actual_parameters,
--- a/meth-lazy-7023639.patch	Fri Jul 06 17:06:21 2012 -0700
+++ b/meth-lazy-7023639.patch	Sat Jul 07 05:52:26 2012 -0700
@@ -5,7 +5,7 @@
 diff --git a/src/cpu/sparc/vm/assembler_sparc.cpp b/src/cpu/sparc/vm/assembler_sparc.cpp
 --- a/src/cpu/sparc/vm/assembler_sparc.cpp
 +++ b/src/cpu/sparc/vm/assembler_sparc.cpp
-@@ -2998,26 +2998,55 @@
+@@ -2998,26 +2998,60 @@
  }
  
  
@@ -13,15 +13,20 @@
 +void MacroAssembler::lookup_virtual_method(Register recv_klass,
 +                                           RegisterOrConstant vtable_index,
 +                                           Register method_result) {
-+  const int base = instanceKlass::vtable_start_offset() * wordSize;
-+  assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
-+  if (vtable_index.is_constant()) {
-+    add(recv_klass, vtable_index.as_constant() << LogBytesPerWord, recv_klass);
-+  } else {
-+    sll_ptr(vtable_index.as_register(), LogBytesPerWord, vtable_index.as_register());
-+    add(recv_klass, vtable_index.as_register(), recv_klass);
-+  }
-+  Address vtable_entry_addr(recv_klass, base + vtableEntry::method_offset_in_bytes());
++  assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg());
++  Register sethi_temp = method_result;
++  const int base = (instanceKlass::vtable_start_offset() * wordSize +
++                    // method pointer offset within the vtable entry:
++                    vtableEntry::method_offset_in_bytes());
++  RegisterOrConstant vtable_offset = vtable_index;
++  // Each of the following three lines potentially generates an instruction.
++  // But the total number of address formation instructions will always be
++  // at most two, and will often be zero.  In any case, it will be optimal.
++  // If vtable_index is a register, we will have (sll_ptr N,x; inc_ptr B,x; ld_ptr k,x).
++  // If vtable_index is a constant, we will have at most (set B+X<<N,t; ld_ptr k,t).
++  vtable_offset = regcon_sll_ptr(vtable_index, exact_log2(vtableEntry::size() * wordSize), vtable_offset);
++  vtable_offset = regcon_inc_ptr(vtable_offset, base, vtable_offset, sethi_temp);
++  Address vtable_entry_addr(recv_klass, ensure_simm13_or_reg(vtable_offset, sethi_temp));
 +  ld_ptr(vtable_entry_addr, method_result);
 +}
 +
@@ -67,7 +72,7 @@
    restore();
    ba_short(L_success);
  
-@@ -3234,54 +3263,6 @@
+@@ -3234,54 +3268,6 @@
  }
  
  
@@ -263,7 +268,7 @@
 diff --git a/src/cpu/sparc/vm/methodHandles_sparc.cpp b/src/cpu/sparc/vm/methodHandles_sparc.cpp
 --- a/src/cpu/sparc/vm/methodHandles_sparc.cpp
 +++ b/src/cpu/sparc/vm/methodHandles_sparc.cpp
-@@ -31,300 +31,19 @@
+@@ -31,452 +31,37 @@
  
  #ifdef PRODUCT
  #define BLOCK_COMMENT(str) /* nothing */
@@ -569,7 +574,8 @@
  void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg, Register temp_reg, Register temp2_reg) {
    if (VerifyMethodHandles)
      verify_klass(_masm, klass_reg, SystemDictionaryHandles::Class_klass(), temp_reg, temp2_reg,
-@@ -332,151 +51,17 @@
+-                 "AMH argument is a Class");
++                 "MH argument is a Class");
    __ load_heap_oop(Address(klass_reg, java_lang_Class::klass_offset_in_bytes()), klass_reg);
  }
  
@@ -744,7 +750,7 @@
    Label L_ok, L_bad;
    BLOCK_COMMENT("verify_klass {");
    __ verify_oop(obj_reg);
-@@ -498,19 +91,52 @@
+@@ -498,538 +91,405 @@
    __ set(ExternalAddress(klass_addr), temp2_reg);
    __ ld_ptr(Address(temp2_reg, 0), temp2_reg);
    __ cmp_and_brx_short(temp_reg, temp2_reg, Assembler::equal, Assembler::pt, L_ok);
@@ -759,21 +765,10 @@
 +
 +void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) {
 +  Label L;
-+  __ ld(Address(member_reg, NONZERO(java_lang_invoke_MemberName::flags_offset_in_bytes())), temp);
++  __ lduw(Address(member_reg, NONZERO(java_lang_invoke_MemberName::flags_offset_in_bytes())), temp);
 +  __ srl( temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT, temp);
 +  __ and3(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK,  temp);
-+  if (ref_kind < (1<<BitsPerByte)) {
-+    __ cmp_and_br_short(temp, ref_kind, Assembler::equal, Assembler::pt, L);
-+  } else {
-+    // hack for >=2 ref_kinds
-+    int ref_kinds = (ref_kind >> BitsPerByte);
-+    for (int rk2 = 0; ref_kinds != 0; rk2++) {
-+      if ((ref_kinds & nth_bit(rk2)) != 0) {
-+        __ cmp_and_br_short(temp, rk2, Assembler::equal, Assembler::pt, L);
-+        ref_kinds -= nth_bit(rk2);
-+      }
-+    }
-+  }
++  __ cmp_and_br_short(temp, ref_kind, Assembler::equal, Assembler::pt, L);
 +  { char* buf = NEW_C_HEAP_ARRAY(char, 100);
 +    jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind);
 +    if (ref_kind == JVM_REF_invokeVirtual ||
@@ -795,18 +790,42 @@
    __ verify_oop(method);
 -  __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_interpreted_offset()), target);
 -  if (JvmtiExport::can_post_interpreter_events()) {
++
++  if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) {
++    Label run_compiled_code;
+     // JVMTI events, such as single-stepping, are implemented partly by avoiding running
+     // compiled code in threads for which the event is enabled.  Check here for
+     // interp_only_mode if these events CAN be enabled.
+     __ verify_thread();
+-    Label skip_compiled_code;
+-
+     const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
+     __ ld(interp_only, temp);
+     __ tst(temp);
+-    __ br(Assembler::notZero, true, Assembler::pn, skip_compiled_code);
+-    __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), target);
+-    __ bind(skip_compiled_code);
++    __ br(Assembler::zero, true, Assembler::pt, run_compiled_code);
++    __ delayed()->nop();
++    __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), target);
++    __ jmp(target, 0);
++    __ delayed()->nop();
++    __ BIND(run_compiled_code);
++    // Note: we could fill some delay slots here, but
++    // it doesn't matter, since this is interpreter code.
+   }
++
 +  const ByteSize entry_offset = for_compiler_entry ? methodOopDesc::from_compiled_offset() :
 +                                                     methodOopDesc::from_interpreted_offset();
 +  __ ld_ptr(G5_method, in_bytes(entry_offset), target);
-+  if (JvmtiExport::can_post_interpreter_events() && !for_compiler_entry) {
-     // JVMTI events, such as single-stepping, are implemented partly by avoiding running
-     // compiled code in threads for which the event is enabled.  Check here for
-     // interp_only_mode if these events CAN be enabled.
-@@ -528,500 +154,352 @@
+   __ jmp(target, 0);
    __ delayed()->nop();
  }
  
-+void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm, Register recv, Register method_temp, Register temp2, Register temp3) {
++void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
++                                        Register recv, Register method_temp,
++                                        Register temp2, Register temp3,
++                                        bool for_compiler_entry) {
 +  BLOCK_COMMENT("jump_to_lambda_form {");
 +  // This is the initial entry point of a lazy method handle.
 +  // After type checking, it picks up the invoker from the LambdaForm.
@@ -824,14 +843,14 @@
 +  __ load_heap_oop(Address(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())),     method_temp);
 +  __ verify_oop(method_temp);
 +
-+  if (VerifyMethodHandles) {
++  if (VerifyMethodHandles && !for_compiler_entry) {
 +    // make sure recv is already on stack
 +    __ load_sized_value(Address(method_temp, methodOopDesc::size_of_parameters_offset()),
 +                        temp2,
 +                        sizeof(u2), /*is_signed*/ false);
 +    // assert(sizeof(u2) == sizeof(methodOopDesc::_size_of_parameters), "");
++    Label L;
 +    __ ld_ptr(__ argument_address(temp2, temp2, -1), temp2);
-+    Label L;
 +    __ cmp_and_br_short(temp2, recv, Assembler::equal, Assembler::pt, L);
 +    __ STOP("receiver not on stack");
 +    __ BIND(L);
@@ -844,6 +863,7 @@
  
  // Code generation
 -address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
+-  // I5_savedSP/O5_savedSP: sender SP (must preserve)
 +address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm,
 +                                                                vmIntrinsics::ID iid) {
 +  const bool not_for_compiler_entry = false;  // this is the interpreter entry
@@ -857,7 +877,8 @@
 +    return NULL;
 +  }
 +
-   // I5_savedSP/O5_savedSP: sender SP (must preserve)
++  // I5_savedSP/O5_savedSP: sender SP (must preserve; see prepare_to_jump_from_interpreted)
++  // G5_method:  methodOop
    // G4 (Gargs): incoming argument list (must preserve)
 -  // G5_method:  invoke methodOop
 -  // G3_method_handle: receiver method handle (must load from sp[MethodTypeForm.vmslots])
@@ -868,7 +889,6 @@
 -  Register O3_scratch = O3;
 -  Register O4_argslot = O4;
 -  Register O4_argbase = O4;
-+  // G5_method:  methodOop
 +  // O0: used as temp to hold mh or receiver
 +  // O1, O4: garbage temps, blown away
 +  Register O1_scratch    = O1;
@@ -931,6 +951,7 @@
 +                      sizeof(u2), /*is_signed*/ false);
 +  // assert(sizeof(u2) == sizeof(methodOopDesc::_size_of_parameters), "");
 +  Address G4_first_arg_addr = __ argument_address(O4_param_size, O1_scratch, -1);
++  DEBUG_ONLY(O4_param_size = noreg);
  
 -  trace_method_handle(_masm, "invokeExact");
 +  Register O0_mh = noreg;
@@ -939,7 +960,7 @@
 +  }
  
 -  __ check_method_handle_type(O0_mtype, G3_method_handle, O1_scratch, wrong_method_type);
-+  // O4_param_size is live!
++  // G4_first_arg_addr is live!
  
 -  // Nobody uses the MH receiver slot after this.  Make sure.
 -  DEBUG_ONLY(__ set((int32_t) 0x999999, O1_scratch); __ st_ptr(O1_scratch, mh_receiver_slot_addr));
@@ -988,9 +1009,7 @@
 +    Register G5_member = G5_method;  // MemberName ptr; incoming method ptr is dead now
 +    __ ld_ptr(__ argument_address(constant(0)), G5_member);
 +    __ add(Gargs, Interpreter::stackElementSize, Gargs);
-+    __ dec(O4_param_size);  // decrease parameter size (not really live at this point...)
 +    generate_method_handle_dispatch(_masm, iid, O0_recv, G5_member, not_for_compiler_entry);
-+
    }
 -#endif //ASSERT
  
@@ -1150,14 +1169,19 @@
 +      // same as TemplateTable::invokevirtual,
 +      // minus the CP setup and profiling:
 +
++      if (VerifyMethodHandles) {
++        verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp3);
++      }
++
 +      // pick out the vtable index from the MemberName, and then we can discard it:
 +      Register temp2_index = temp2;
 +      __ ld_ptr(member_vmindex, temp2_index);
-+      Label L_skip_vtable;
-+      __ cmp_and_br_short(temp2_index, (int) 0, Assembler::less, Assembler::pn, L_skip_vtable);
 +
 +      if (VerifyMethodHandles) {
-+        verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp3);
++        Label L_index_ok;
++        __ cmp_and_br_short(temp2_index, (int) 0, Assembler::greaterEqual, Assembler::pn, L_index_ok);
++        __ STOP("no virtual index");
++        __ BIND(L_index_ok);
 +      }
 +
 +      // Note:  The verifier invariants allow us to ignore MemberName.clazz and vmtarget
@@ -1165,22 +1189,6 @@
 +
 +      // get target methodOop & entry point
 +      __ lookup_virtual_method(temp1_recv_klass, temp2_index, G5_method);
-+      __ verify_oop(G5_method);
-+      jump_from_method_handle(_masm, G5_method, temp1, temp3, for_compiler_entry);
-+
-+      __ BIND(L_skip_vtable);
-+      if (VerifyMethodHandles) {
-+        Label L;
-+        __ cmp_and_br_short(temp2_index, (int) methodOopDesc::nonvirtual_vtable_index, Assembler::equal, Assembler::pt, L);
-+        __ STOP("invalid vtable index for MH.invokeVirtual");
-+        __ bind(L);
-+        // The MemberName.ref_kind should be invokeSpecial.
-+        // But allow some wiggle room.
-+        int ref_kinds = ((1 << JVM_REF_invokeVirtual) |
-+                         (1 << JVM_REF_invokeSpecial));
-+        verify_ref_kind(_masm, ref_kinds << BitsPerByte, member_reg, temp3);
-+      }
-+      __ load_heap_oop(member_vmtarget, G5_method);
 +      method_is_live = true;
 +      break;
      }
@@ -1239,7 +1247,7 @@
 +    }
 +
 +    if (method_is_live) {
-+      // live at this point:  G5_method
++      // live at this point:  G5_method, O5_savedSP (if interpreted)
 +
 +      // After figuring out which concrete method to call, jump into it.
 +      // Note that this works in the interpreter with no data motion.
@@ -1269,14 +1277,7 @@
 -    return temp_reg;
 -  }
 -}
-+#ifndef PRODUCT
-+enum {
-+  ARG_LIMIT = 255, SLOP = 45,
-+  // use this parameter for checking for garbage stack movements:
-+  UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP)
-+  // the slop defends against false alarms due to fencepost errors
-+};
- 
+-
 -// Helper to insert argument slots into the stack.
 -// arg_slots must be a multiple of stack_move_unit() and < 0
 -// argslot_reg is decremented to point to the new (shifted) location of the argslot
@@ -1593,7 +1594,7 @@
 -  BLOCK_COMMENT("} move_return_value");
 -}
 -
--#ifndef PRODUCT
+ #ifndef PRODUCT
 -void MethodHandles::RicochetFrame::describe(const frame* fr, FrameValues& values, int frame_no)  {
 -    RicochetFrame* rf = new RicochetFrame(*fr);
 -
@@ -1616,23 +1617,47 @@
  void trace_method_handle_stub(const char* adaptername,
                                oopDesc* mh,
                                intptr_t* saved_sp,
-@@ -1098,9 +576,12 @@
+                               intptr_t* args,
+                               intptr_t* tracing_fp) {
+-  bool has_mh = (strstr(adaptername, "return/") == NULL);  // return adapters don't have mh
+-
+-  tty->print_cr("MH %s mh="INTPTR_FORMAT " saved_sp=" INTPTR_FORMAT " args=" INTPTR_FORMAT, adaptername, (intptr_t) mh, saved_sp, args);
++  bool has_mh = (strstr(adaptername, "/static") == NULL &&
++                 strstr(adaptername, "linkTo") == NULL);    // static linkers don't have MH
++  const char* mh_reg_name = has_mh ? "G3_mh" : "G3";
++  tty->print_cr("MH %s %s="INTPTR_FORMAT " saved_sp=" INTPTR_FORMAT " args=" INTPTR_FORMAT,
++                adaptername, mh_reg_name,
++                (intptr_t) mh, saved_sp, args);
+ 
+   if (Verbose) {
+     // dumping last frame with frame::describe
+@@ -1090,6 +550,7 @@
+ 
+     // mark saved_sp, if seems valid (may not be valid for some adapters)
+     intptr_t *unbiased_sp = (intptr_t *)(STACK_BIAS+(uintptr_t)saved_sp);
++    const int ARG_LIMIT = 255, SLOP = 45, UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP);
+     if ((unbiased_sp >= dump_sp - UNREASONABLE_STACK_MOVE) && (unbiased_sp < dump_fp)) {
+       values.describe(-1, unbiased_sp, "*saved_sp+STACK_BIAS");
+     }
+@@ -1097,10 +558,13 @@
+     // Note: the unextended_sp may not be correct
      tty->print_cr("  stack layout:");
      values.print(p);
-   }
+-  }
 -
 -  if (has_mh) {
 -    print_method_handle(mh);
-+  if (has_mh && mh->is_oop()) {
-+    mh->print();
-+    if (java_lang_invoke_MethodHandle::is_instance(mh)) {
-+      if (java_lang_invoke_MethodHandle::form_offset_in_bytes() != 0)
-+        java_lang_invoke_MethodHandle::form(mh)->print();
-+    }
-   }
- }
- 
-@@ -1143,1260 +624,3 @@
++    if (has_mh && mh->is_oop()) {
++      mh->print();
++      if (java_lang_invoke_MethodHandle::is_instance(mh)) {
++        if (java_lang_invoke_MethodHandle::form_offset_in_bytes() != 0)
++          java_lang_invoke_MethodHandle::form(mh)->print();
++      }
++    }
+   }
+ }
+ 
+@@ -1143,1260 +607,3 @@
    BLOCK_COMMENT("} trace_method_handle");
  }
  #endif // PRODUCT
@@ -3083,7 +3108,7 @@
  
    static void verify_klass(MacroAssembler* _masm,
                             Register obj_reg, KlassHandle klass,
-@@ -223,8 +46,19 @@
+@@ -223,8 +46,17 @@
                   "reference is a MH");
    }
  
@@ -3096,14 +3121,157 @@
 +                                      Register temp, Register temp2,
 +                                      bool for_compiler_entry);
 +
-+  static void throw_exception_from_method_handle(MacroAssembler* _masm,
-+                                                 RegisterOrConstant code_reg,
-+                                                 Register actual_reg,
-+                                                 Register required_reg);
-+
-+  static void jump_to_lambda_form(MacroAssembler* _masm, Register recv, Register method_temp, Register temp2, Register temp3, bool for_compiler_entry);
++  static void jump_to_lambda_form(MacroAssembler* _masm,
++                                  Register recv, Register method_temp,
++                                  Register temp2, Register temp3,
++                                  bool for_compiler_entry);
  
    static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN;
+diff --git a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp
+--- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp
++++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp
+@@ -822,6 +822,7 @@
+     bool G1_forced = false;
+ #endif // ASSERT
+     if (r_1->is_stack()) {        // Pretend stack targets are loaded into G1
++@@@
+ #ifdef _LP64
+       Register ld_off = Rdisp;
+       __ set(reg2offset(r_1) + extraspace + bias, ld_off);
+@@ -1937,20 +1938,131 @@
+   __ bind(done);
+ }
+ 
++static void gen_special_dispatch(MacroAssembler* masm,
++                                 int total_args_passed,
++                                 int comp_args_on_stack,
++                                 vmIntrinsics::ID special_dispatch,
++                                 const BasicType* sig_bt,
++                                 const VMRegPair* regs) {
++
++  // Now write the args into the outgoing interpreter space
++  bool     has_receiver   = false;
++  Register receiver_reg   = noreg;
++  int      member_arg_pos = -1;
++  Register member_reg     = noreg;
++  int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(special_dispatch);
++  if (ref_kind != 0) {
++    member_arg_pos = total_args_passed - 1;  // trailing MemberName argument
++    member_reg = G5_method;  // known to be free at this point
++    has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
++  } else if (special_dispatch == vmIntrinsics::_invokeBasic) {
++    has_receiver = true;
++  } else {
++    guarantee(false, err_msg("special_dispatch=%d", special_dispatch));
++  }
++
++  if (member_reg != noreg) {
++    // Load the member_arg into register, if necessary.
++    assert(member_arg_pos >= 0 && member_arg_pos < total_args_passed, "oob");
++    assert(sig_bt[member_arg_pos] == T_OBJECT, "dispatch argument must be an object");
++    VMReg r = regs[member_arg_pos].first();
++    assert(r->is_valid(), "bad member arg");
++    if (r->is_stack()) {
++      RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
++      ld_off = __ ensure_simm13_or_reg(ld_off, member_reg);
++      __ ld_ptr(SP, ld_off, member_reg);
++    } else {
++      // no data motion is needed
++      member_reg = r->as_Register();
++    }
++  }
++
++  if (has_receiver) {
++    // Make sure the receiver is loaded into a register.
++    assert(total_args_passed > 0, "oob");
++    assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
++    VMReg r = regs[0].first();
++    assert(r->is_valid(), "bad receiver arg");
++    if (r->is_stack()) {
++      // Porting note:  This assumes that compiled calling conventions always
++      // pass the receiver oop in a register.  If this is not true on some
++      // platform, pick a temp and load the receiver from stack.
++      assert(false, "receiver always in a register");
++      receiver_reg = G3_scratch;  // known to be free at this point
++      RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
++      ld_off = __ ensure_simm13_or_reg(ld_off, member_reg);
++      __ ld_ptr(SP, ld_off, receiver_reg);
++    } else {
++      // no data motion is needed
++      receiver_reg = r->as_Register();
++    }
++  }
++
++  // Figure out which address we are really jumping to:
++  MethodHandles::generate_method_handle_dispatch(masm, special_dispatch,
++                                                 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
++}
++
+ // ---------------------------------------------------------------------------
+ // Generate a native wrapper for a given method.  The method takes arguments
+ // in the Java compiled code convention, marshals them to the native
+ // convention (handlizes oops, etc), transitions to native, makes the call,
+ // returns to java state (possibly blocking), unhandlizes any result and
+ // returns.
++//
++// Critical native functions are a shorthand for the use of
++// GetPrimtiveArrayCritical and disallow the use of any other JNI
++// functions.  The wrapper is expected to unpack the arguments before
++// passing them to the callee and perform checks before and after the
++// native call to ensure that they GC_locker
++// lock_critical/unlock_critical semantics are followed.  Some other
++// parts of JNI setup are skipped like the tear down of the JNI handle
++// block and the check for pending exceptions it's impossible for them
++// to be thrown.
++//
++// They are roughly structured like this:
++//    if (GC_locker::needs_gc())
++//      SharedRuntime::block_for_jni_critical();
++//    tranistion to thread_in_native
++//    unpack arrray arguments and call native entry point
++//    check for safepoint in progress
++//    check if any thread suspend flags are set
++//      call into JVM and possible unlock the JNI critical
++//      if a GC was suppressed while in the critical native.
++//    transition back to thread_in_Java
++//    return to caller
++//
+ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
+                                                 methodHandle method,
+                                                 int compile_id,
+                                                 int total_in_args,
+                                                 int comp_args_on_stack, // in VMRegStackSlots
+-                                                BasicType *in_sig_bt,
+-                                                VMRegPair *in_regs,
++                                                BasicType* in_sig_bt,
++                                                VMRegPair* in_regs,
+                                                 BasicType ret_type) {
++  if (method->is_method_handle_intrinsic()) {
++    vmIntrinsics::ID iid = method->intrinsic_id();
++    intptr_t start = (intptr_t)__ pc();
++    int vep_offset = ((intptr_t)__ pc()) - start;
++    gen_special_dispatch(masm,
++                         total_in_args,
++                         comp_args_on_stack,
++                         method->intrinsic_id(),
++                         in_sig_bt,
++                         in_regs);
++    int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
++    __ flush();
++    int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
++    return nmethod::new_native_nmethod(method,
++                                       compile_id,
++                                       masm->code(),
++                                       vep_offset,
++                                       frame_complete,
++                                       stack_slots / VMRegImpl::slots_per_word,
++                                       in_ByteSize(-1),
++                                       in_ByteSize(-1),
++                                       (OopMapSet*)NULL);
++  }
+   bool is_critical_native = true;
+   address native_func = method->critical_native_function();
+   if (native_func == NULL) {
 diff --git a/src/cpu/sparc/vm/stubGenerator_sparc.cpp b/src/cpu/sparc/vm/stubGenerator_sparc.cpp
 --- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp
 +++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp
@@ -3189,7 +3357,7 @@
    }
    // first time invocation - must resolve first
    __ call_VM(noreg, entry, O1);
-@@ -2139,48 +2144,52 @@
+@@ -2139,48 +2144,54 @@
  }
  
  void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
@@ -3257,6 +3425,8 @@
 -  if (Ritable_index != noreg) {
 -    __ ld_ptr(Rcache, index_offset, Ritable_index);
 +  if (itable_index != noreg) {
++    // pick up itable index from f2 also:
++    assert(byte_no == f1_byte, "already picked up f1");
 +    __ ld_ptr(Address(cache, index_offset), itable_index);
    }
 -  __ ld_ptr(Rcache, flags_offset, Rflags);
@@ -3264,7 +3434,7 @@
  }
  
  // The Rcache register must be set before call
-@@ -2272,7 +2281,7 @@
+@@ -2272,7 +2283,7 @@
  
    if (__ membar_has_effect(membar_bits)) {
      // Get volatile flag
@@ -3273,7 +3443,7 @@
      __ and3(Rflags, Lscratch, Lscratch);
    }
  
-@@ -2280,9 +2289,9 @@
+@@ -2280,9 +2291,9 @@
  
    // compute field type
    Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj;
@@ -3286,7 +3456,7 @@
  
    // Check atos before itos for getstatic, more likely (in Queens at least)
    __ cmp(Rflags, atos);
-@@ -2445,7 +2454,7 @@
+@@ -2445,7 +2456,7 @@
    if (__ membar_has_effect(membar_bits)) {
      // Get volatile flag
      __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Rflags);
@@ -3295,7 +3465,7 @@
    }
  
    switch (bytecode()) {
-@@ -2569,9 +2578,9 @@
+@@ -2569,9 +2580,9 @@
        Label two_word, valsizeknown;
        __ ld_ptr(G1_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
        __ mov(Lesp, G4_scratch);
@@ -3308,7 +3478,7 @@
        __ cmp(Rflags, ltos);
        __ br(Assembler::equal, false, Assembler::pt, two_word);
        __ delayed()->cmp(Rflags, dtos);
-@@ -2625,7 +2634,7 @@
+@@ -2625,7 +2636,7 @@
  
    Label notVolatile, checkVolatile, exit;
    if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
@@ -3317,7 +3487,7 @@
      __ and3(Rflags, Lscratch, Lscratch);
  
      if (__ membar_has_effect(read_bits)) {
-@@ -2635,9 +2644,9 @@
+@@ -2635,9 +2646,9 @@
      }
    }
  
@@ -3330,7 +3500,7 @@
  
    // compute field type
    Label notInt, notShort, notChar, notObj, notByte, notLong, notFloat;
-@@ -2833,7 +2842,7 @@
+@@ -2833,7 +2844,7 @@
    Label notVolatile, checkVolatile, exit;
    if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
      __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
@@ -3339,7 +3509,7 @@
      __ and3(Rflags, Lscratch, Lscratch);
      if (__ membar_has_effect(read_bits)) {
        __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile);
-@@ -2916,7 +2925,7 @@
+@@ -2916,7 +2927,7 @@
  
      // Test volatile
      Label notVolatile;
@@ -3348,7 +3518,7 @@
      __ btst(Rflags, Lscratch);
      __ br(Assembler::zero, false, Assembler::pt, notVolatile);
      __ delayed()->nop();
-@@ -2936,6 +2945,72 @@
+@@ -2936,27 +2947,82 @@
    ShouldNotReachHere();
  }
  
@@ -3390,6 +3560,9 @@
 +    __ btst(flags, temp);
 +    __ br(Assembler::zero, false, Assembler::pt, L_no_push);
 +    __ delayed()->nop();
++    // Push the appendix as a trailing parameter.
++    // This must be done before we get the receiver,
++    // since the parameter_size includes it.
 +    __ push_ptr(index);  // push appendix (MethodType, CallSite, etc.)
 +    __ bind(L_no_push);
 +  }
@@ -3405,7 +3578,7 @@
 +  __ srl(flags, ConstantPoolCacheEntry::tos_state_shift, ra);
 +  // Make sure we don't need to mask flags after the above shift
 +  ConstantPoolCacheEntry::verify_tos_state_shift();
-+  // get return address
++  // load return address
 +  {
 +    const address table_addr = (is_invokeinterface || is_invokedynamic) ?
 +        (address)Interpreter::return_5_addrs_by_index_table() :
@@ -3421,7 +3594,29 @@
  void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Register Rret) {
    Register Rtemp = G4_scratch;
    Register Rcall = Rindex;
-@@ -2974,7 +3049,7 @@
+   assert_different_registers(Rcall, G5_method, Gargs, Rret);
+ 
+   // get target methodOop & entry point
+-  const int base = instanceKlass::vtable_start_offset() * wordSize;
+-  if (vtableEntry::size() % 3 == 0) {
+-    // scale the vtable index by 12:
+-    int one_third = vtableEntry::size() / 3;
+-    __ sll(Rindex, exact_log2(one_third * 1 * wordSize), Rtemp);
+-    __ sll(Rindex, exact_log2(one_third * 2 * wordSize), Rindex);
+-    __ add(Rindex, Rtemp, Rindex);
+-  } else {
+-    // scale the vtable index by 8:
+-    __ sll(Rindex, exact_log2(vtableEntry::size() * wordSize), Rindex);
+-  }
+-
+-  __ add(Rrecv, Rindex, Rrecv);
+-  __ ld_ptr(Rrecv, base + vtableEntry::method_offset_in_bytes(), G5_method);
+-
++  __ lookup_virtual_method(Rrecv, Rindex, G5_method);
+   __ call_from_interpreter(Rcall, Gargs, Rret);
+ }
+ 
+@@ -2974,7 +3040,7 @@
    __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
  
    // Check for vfinal
@@ -3430,7 +3625,7 @@
    __ btst(Rret, G4_scratch);
    __ br(Assembler::zero, false, Assembler::pt, notFinal);
    __ delayed()->and3(Rret, 0xFF, G4_scratch);      // gets number of parameters
-@@ -2993,9 +3068,9 @@
+@@ -2993,9 +3059,9 @@
    // get return address
    AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
    __ set(table, Rtemp);
@@ -3443,7 +3638,7 @@
    __ sll(Rret,  LogBytesPerWord, Rret);
    __ ld_ptr(Rtemp, Rret, Rret);         // get return address
  
-@@ -3036,9 +3111,9 @@
+@@ -3036,9 +3102,9 @@
    // get return address
    AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
    __ set(table, Rtemp);
@@ -3456,7 +3651,7 @@
    __ sll(Rret,  LogBytesPerWord, Rret);
    __ ld_ptr(Rtemp, Rret, Rret);         // get return address
  
-@@ -3047,65 +3122,37 @@
+@@ -3047,65 +3113,37 @@
    __ call_from_interpreter(Rscratch, Gargs, Rret);
  }
  
@@ -3538,7 +3733,7 @@
    __ call_from_interpreter(Rscratch, Gargs, Rret);
  }
  
-@@ -3122,7 +3169,7 @@
+@@ -3122,7 +3160,7 @@
    Label notFinal;
  
    // Check for vfinal
@@ -3547,7 +3742,7 @@
    __ btst(Rflags, Rscratch);
    __ br(Assembler::zero, false, Assembler::pt, notFinal);
    __ delayed()->nop();
-@@ -3144,36 +3191,20 @@
+@@ -3144,36 +3182,20 @@
    transition(vtos, vtos);
    assert(byte_no == f1_byte, "use this argument");
  
@@ -3594,7 +3789,7 @@
    __ verify_oop(RklassOop);
  
    // Special case of invokeinterface called for virtual method of
-@@ -3181,7 +3212,7 @@
+@@ -3181,7 +3203,7 @@
    // This code isn't produced by javac, but could be produced by
    // another compliant java compiler.
    Label notMethod;
@@ -3603,7 +3798,7 @@
    __ btst(Rflags, Rscratch);
    __ br(Assembler::zero, false, Assembler::pt, notMethod);
    __ delayed()->nop();
-@@ -3260,13 +3291,42 @@
+@@ -3260,13 +3282,42 @@
  
    __ verify_oop(G5_method);
    __ call_from_interpreter(Rcall, Gargs, Rret);
@@ -3648,7 +3843,7 @@
  
    if (!EnableInvokeDynamic) {
      // We should not encounter this bytecode if !EnableInvokeDynamic.
-@@ -3279,42 +3339,24 @@
+@@ -3279,42 +3330,24 @@
      return;
    }
  
@@ -3664,6 +3859,7 @@
 -  load_invoke_cp_cache_entry(byte_no, G5_callsite, noreg, Rret,
 -                             /*virtual*/ false, /*vfinal*/ false, /*indy*/ true);
 -  __ mov(SP, O5_savedSP);  // record SP that we wanted the callee to restore
+-
 +  const Register Rret        = Lscratch;
 +  const Register G4_callsite = G4_scratch;
 +  const Register Rscratch    = G3_scratch;
@@ -3674,7 +3870,8 @@
 +  // G5: MH.linkToCallSite method (from f2)
 +
 +  // Note:  G4_callsite is already pushed by prepare_invoke
- 
++
++  // %%% should make a type profile for any invokedynamic that takes a ref argument
    // profile this call
    __ profile_call(O4);
  
@@ -3700,7 +3897,6 @@
 -  __ delayed()->mov(SP, Llast_SP);
 +  // do the call
 +  __ verify_oop(G5_method);
-+  __ profile_final_call(O4);  // FIXME: profile the LambdaForm also
 +  __ call_from_interpreter(Rscratch, Gargs, Rret);
  }
  
@@ -3722,6 +3918,33 @@
    // helper function
    static void invokevfinal_helper(Register Rcache, Register Rret);
    static void invokeinterface_object_method(Register RklassOop, Register Rcall,
+diff --git a/src/cpu/sparc/vm/vtableStubs_sparc.cpp b/src/cpu/sparc/vm/vtableStubs_sparc.cpp
+--- a/src/cpu/sparc/vm/vtableStubs_sparc.cpp
++++ b/src/cpu/sparc/vm/vtableStubs_sparc.cpp
+@@ -70,7 +70,6 @@
+   __ load_klass(O0, G3_scratch);
+ 
+   // set methodOop (in case of interpreted method), and destination address
+-  int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
+ #ifndef PRODUCT
+   if (DebugVtables) {
+     Label L;
+@@ -82,13 +81,8 @@
+     __ bind(L);
+   }
+ #endif
+-  int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
+-  if (Assembler::is_simm13(v_off)) {
+-    __ ld_ptr(G3, v_off, G5_method);
+-  } else {
+-    __ set(v_off,G5);
+-    __ ld_ptr(G3, G5, G5_method);
+-  }
++
++  __ lookup_virtual_method(G3_scratch, vtable_index, G5_method);
+ 
+ #ifndef PRODUCT
+   if (DebugVtables) {
 diff --git a/src/cpu/x86/vm/assembler_x86.cpp b/src/cpu/x86/vm/assembler_x86.cpp
 --- a/src/cpu/x86/vm/assembler_x86.cpp
 +++ b/src/cpu/x86/vm/assembler_x86.cpp
@@ -3960,25 +4183,22 @@
    // Test sub_klass against super_klass, with fast and slow paths.
  
    // The fast path produces a tri-state answer: yes / no / maybe-slow.
-@@ -2149,12 +2155,16 @@
+@@ -2147,15 +2153,8 @@
+                            Label& L_success);
+ 
    // method handles (JSR 292)
-   void check_method_handle_type(Register mtype_reg, Register mh_reg,
-                                 Register temp_reg,
+-  void check_method_handle_type(Register mtype_reg, Register mh_reg,
+-                                Register temp_reg,
 -                                Label& wrong_method_type);
-+                                Label& wrong_method_type,
-+                                bool check_layout_only = false);
-   void load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
-                                   Register temp_reg);
-   void jump_to_method_handle_entry(Register mh_reg, Register temp_reg);
+-  void load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
+-                                  Register temp_reg);
+-  void jump_to_method_handle_entry(Register mh_reg, Register temp_reg);
    Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
--
-+  // Insert 'count' argument at 'cursor', which is presumed to be just above RSP.
-+  // Update (decrement) 'cursor' so it points at the empty space.
-+  void insert_arguments_at(Register cursor, int count,
-+                           Register temp1, Register temp2);
- 
+ 
+-
    //----
    void set_word_if_not_zero(Register reg); // sets reg to 1 if not zero, otherwise 0
+ 
 diff --git a/src/cpu/x86/vm/frame_x86.cpp b/src/cpu/x86/vm/frame_x86.cpp
 --- a/src/cpu/x86/vm/frame_x86.cpp
 +++ b/src/cpu/x86/vm/frame_x86.cpp
@@ -4637,7 +4857,7 @@
    __ load_klass(temp, obj);
    __ cmpptr(temp, ExternalAddress((address) klass_addr));
    __ jcc(Assembler::equal, L_ok);
-@@ -541,17 +87,50 @@
+@@ -541,17 +87,40 @@
    __ movptr(temp, Address(temp, super_check_offset));
    __ cmpptr(temp, ExternalAddress((address) klass_addr));
    __ jcc(Assembler::equal, L_ok);
@@ -4657,20 +4877,8 @@
 +  __ movl(temp, Address(member_reg, NONZERO(java_lang_invoke_MemberName::flags_offset_in_bytes())));
 +  __ shrl(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT);
 +  __ andl(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK);
-+  if (ref_kind < (1<<BitsPerByte)) {
-+    __ cmpl(temp, ref_kind);
-+    __ jcc(Assembler::equal, L);
-+  } else {
-+    // hack for >=2 ref_kinds
-+    int ref_kinds = (ref_kind >> BitsPerByte);
-+    for (int rk2 = 0; ref_kinds != 0; rk2++) {
-+      if ((ref_kinds & nth_bit(rk2)) != 0) {
-+        __ cmpl(temp, rk2);
-+        __ jcc(Assembler::equal, L);
-+        ref_kinds -= nth_bit(rk2);
-+      }
-+    }
-+  }
++  __ cmpl(temp, ref_kind);
++  __ jcc(Assembler::equal, L);
 +  { char* buf = NEW_C_HEAP_ARRAY(char, 100);
 +    jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind);
 +    if (ref_kind == JVM_REF_invokeVirtual ||
@@ -4688,16 +4896,22 @@
 -  if (JvmtiExport::can_post_interpreter_events()) {
 +void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp,
 +                                            bool for_compiler_entry) {
-+  assert(method == rbx, "method must be in rbx for correct interpreter linkage");
-+  if (JvmtiExport::can_post_interpreter_events() && !for_compiler_entry) {
++  assert(method == rbx, "interpreter calling convention");
++  __ verify_oop(method);
++
++  if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) {
      Label run_compiled_code;
      // JVMTI events, such as single-stepping, are implemented partly by avoiding running
      // compiled code in threads for which the event is enabled.  Check here for
-@@ -569,458 +148,368 @@
+@@ -567,462 +136,364 @@
+     __ cmpb(Address(rthread, JavaThread::interp_only_mode_offset()), 0);
+     __ jccb(Assembler::zero, run_compiled_code);
      __ jmp(Address(method, methodOopDesc::interpreter_entry_offset()));
-     __ bind(run_compiled_code);
+-    __ bind(run_compiled_code);
++    __ BIND(run_compiled_code);
    }
 -  __ jmp(Address(method, methodOopDesc::from_interpreted_offset()));
++
 +  const ByteSize entry_offset = for_compiler_entry ? methodOopDesc::from_compiled_offset() :
 +                                                     methodOopDesc::from_interpreted_offset();
 +  __ jmp(Address(method, entry_offset));
@@ -4705,7 +4919,8 @@
  
 +void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
 +                                        Register recv, Register method_temp,
-+                                        Register temp2, bool for_compiler_entry) {
++                                        Register temp2,
++                                        bool for_compiler_entry) {
 +  BLOCK_COMMENT("jump_to_lambda_form {");
 +  // This is the initial entry point of a lazy method handle.
 +  // After type checking, it picks up the invoker from the LambdaForm.
@@ -4733,8 +4948,8 @@
 +    __ cmpptr(recv, __ argument_address(temp2, -1));
 +    __ jcc(Assembler::equal, L);
 +    __ movptr(rax, __ argument_address(temp2, -1));
-+    __ STOP("receiver not on stack (bad value in rax)");
-+    __ bind(L);
++    __ STOP("receiver not on stack");
++    __ BIND(L);
 +  }
 +
 +  jump_from_method_handle(_masm, method_temp, temp2, for_compiler_entry);
@@ -4757,13 +4972,14 @@
 +    return NULL;
 +  }
 +
++  // rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
    // rbx: methodOop
 -  // rcx: receiver method handle (must load from sp[MethodTypeForm.vmslots])
-+  // rcx: receiver method handle (optional; must load from sp[rbx.size_of_parameters])
-+  // rax or rbx: extra argument to strip (optional, must load from sp[1])
-   // rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
+-  // rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
 -  // rdx, rdi: garbage temp, blown away
-+  // rdx, rdi: garbage temps, blown away
++  // rdx: argument locator (parameter slot count, added to rsp)
++  // rcx: used as temp to hold mh or receiver
++  // rax, rdi: garbage temps, blown away
 +  Register rdx_argp   = rdx;   // argument list ptr, live on error paths
 +  Register rax_temp   = rax;
 +  Register rcx_mh     = rcx;   // MH receiver; dies quickly and is recycled
@@ -4832,7 +5048,7 @@
 +  }
  
 -  __ check_method_handle_type(rax_mtype, rcx_recv, rdi_temp, wrong_method_type);
-+  // rdx_argp is live!
++  // rdx_first_arg_addr is live!
  
 -  // Nobody uses the MH receiver slot after this.  Make sure.
 -  DEBUG_ONLY(__ movptr(mh_receiver_slot_addr, (int32_t)0x999999));
@@ -4849,6 +5065,7 @@
 +        suffix = "/private";
 +    }
 +    jio_snprintf(qname, 100, "MethodHandle::interpreter_entry::%s%s", name, suffix);
++    // note: stub look for mh in rcx
 +    trace_method_handle(_masm, qname);
 +  }
  
@@ -4871,17 +5088,17 @@
 +      // Load the receiver (not the MH; the actual MemberName's receiver) up from the interpreter stack.
 +      __ movptr(rcx_recv = rcx, rdx_first_arg_addr);
 +    }
++    DEBUG_ONLY(rdx_argp = noreg);
 +    Register rbx_member = rbx_method;  // MemberName ptr; incoming method ptr is dead now
 +    __ pop(rax_temp);           // return address
 +    __ pop(rbx_member);         // extract last argument
 +    __ push(rax_temp);          // re-push return address
-+    // rbx: trailing MemberName
 +    generate_method_handle_dispatch(_masm, iid, rcx_recv, rbx_member, not_for_compiler_entry);
++  }
  
 -  // Stub wants expected type in rax and the actual type in rcx
 -  __ jump(ExternalAddress(StubRoutines::throw_WrongMethodTypeException_entry()));
-+  }
- 
+-
 -  // for invokeGeneric (only), apply argument and result conversions on the fly
 -  __ bind(invoke_generic_slow_path);
 -#ifdef ASSERT
@@ -4976,10 +5193,10 @@
 +  Register temp2 = rscratch2;
 +  Register temp3 = rax;
 +  if (for_compiler_entry) {
++    assert(receiver_reg == j_rarg0, "only valid assignment");
 +    assert_different_registers(temp1,        j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5);
 +    assert_different_registers(temp2,        j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5);
 +    assert_different_registers(temp3,        j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5);
-+    assert_different_registers(receiver_reg,          j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5);
    }
 -
 -  // Now move the argslot down, to point to the opened-up space.
@@ -5131,10 +5348,10 @@
 +  Register temp2 = rdi;
 +  Register temp3 = rax;
 +  if (for_compiler_entry) {
++    assert(receiver_reg == rcx, "only valid assignment");
 +    assert_different_registers(temp1,        rcx, rdx);
 +    assert_different_registers(temp2,        rcx, rdx);
 +    assert_different_registers(temp3,        rcx, rdx);
-+    assert_different_registers(receiver_reg,      rdx);
    }
  #endif
 -  __ cmpptr(rbx_bottom, rax_top);
@@ -5263,15 +5480,20 @@
 +      // same as TemplateTable::invokevirtual,
 +      // minus the CP setup and profiling:
 +
++      if (VerifyMethodHandles) {
++        verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp3);
++      }
++
 +      // pick out the vtable index from the MemberName, and then we can discard it:
 +      Register temp2_index = temp2;
 +      __ movptr(temp2_index, member_vmindex);
-+      Label L_skip_vtable;
-+      __ cmpl(temp2_index, 0);
-+      __ jcc(Assembler::less, L_skip_vtable);
 +
 +      if (VerifyMethodHandles) {
-+        verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp3);
++        Label L_index_ok;
++        __ cmpl(temp2_index, 0);
++        __ jcc(Assembler::greaterEqual, L_index_ok);
++        __ STOP("no virtual index");
++        __ BIND(L_index_ok);
 +      }
 +
 +      // Note:  The verifier invariants allow us to ignore MemberName.clazz and vmtarget
@@ -5279,23 +5501,6 @@
 +
 +      // get target methodOop & entry point
 +      __ lookup_virtual_method(temp1_recv_klass, temp2_index, rbx_method);
-+      __ verify_oop(rbx_method);
-+      jump_from_method_handle(_masm, rbx_method, temp3, for_compiler_entry);
-+
-+      __ BIND(L_skip_vtable);
-+      if (VerifyMethodHandles) {
-+        Label L;
-+        __ cmpl(temp2_index, (int) methodOopDesc::nonvirtual_vtable_index);
-+        __ jcc(Assembler::equal, L);
-+        __ STOP("invalid vtable index for MH.invokeVirtual");
-+        __ bind(L);
-+        // The MemberName.ref_kind should be invokeSpecial.
-+        // But allow some wiggle room.
-+        int ref_kinds = ((1 << JVM_REF_invokeVirtual) |
-+                         (1 << JVM_REF_invokeSpecial));
-+        verify_ref_kind(_masm, ref_kinds << BitsPerByte, member_reg, temp3);
-+      }
-+      __ load_heap_oop(rbx_method, member_vmtarget);
 +      method_is_live = true;
 +      break;
 +    }
@@ -5346,7 +5551,7 @@
 +    }
 +
 +    if (method_is_live) {
-+      // live at this point:  rbx_method, rsi/r13
++      // live at this point:  rbx_method, rsi/r13 (if interpreted)
 +
 +      // After figuring out which concrete method to call, jump into it.
 +      // Note that this works in the interpreter with no data motion.
@@ -5469,12 +5674,17 @@
                                intptr_t* entry_sp) {
    // called as a leaf from native code: do not block the JVM!
 -  bool has_mh = (strstr(adaptername, "return/") == NULL);  // return adapters don't have rcx_mh
-+  bool has_mh = (strstr(adaptername, "return/") == NULL &&  // return adapters don't have rcx_mh
++  bool has_mh = (strstr(adaptername, "/static") == NULL &&
 +                 strstr(adaptername, "linkTo") == NULL);    // static linkers don't have MH
    const char* mh_reg_name = has_mh ? "rcx_mh" : "rcx";
-   tty->print_cr("MH %s %s="PTR_FORMAT" sp="PTR_FORMAT, adaptername, mh_reg_name, mh, entry_sp);
- 
-@@ -1086,12 +575,18 @@
+-  tty->print_cr("MH %s %s="PTR_FORMAT" sp="PTR_FORMAT, adaptername, mh_reg_name, mh, entry_sp);
++  tty->print_cr("MH %s %s="PTR_FORMAT" sp="PTR_FORMAT,
++                adaptername, mh_reg_name,
++                mh, entry_sp);
+ 
+   if (Verbose) {
+     tty->print_cr("Registers:");
+@@ -1086,12 +557,18 @@
          values.describe(-1, dump_fp, "fp for #1 <not parsed, cannot trust pc>");
          values.describe(-1, dump_sp, "sp for #1");
        }
@@ -5495,7 +5705,7 @@
    }
  }
  
-@@ -1159,1363 +654,3 @@
+@@ -1159,1363 +636,3 @@
  }
  #endif //PRODUCT
  
@@ -7152,8 +7362,8 @@
 diff --git a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp
 --- a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp
 +++ b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp
-@@ -867,6 +867,68 @@
-   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
+@@ -1293,6 +1293,66 @@
+   __ bind(done);
  }
  
 +static void gen_special_dispatch(MacroAssembler* masm,
@@ -7171,7 +7381,7 @@
 +  int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(special_dispatch);
 +  if (ref_kind != 0) {
 +    member_arg_pos = total_args_passed - 1;  // trailing MemberName argument
-+    member_reg = rbx;
++    member_reg = rbx;  // known to be free at this point
 +    has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
 +  } else if (special_dispatch == vmIntrinsics::_invokeBasic) {
 +    has_receiver = true;
@@ -7186,7 +7396,6 @@
 +    VMReg r = regs[member_arg_pos].first();
 +    assert(r->is_valid(), "bad member arg");
 +    if (r->is_stack()) {
-+      member_reg = rbx;  // known to be free at this point
 +      __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
 +    } else {
 +      // no data motion is needed
@@ -7203,9 +7412,9 @@
 +    if (r->is_stack()) {
 +      // Porting note:  This assumes that compiled calling conventions always
 +      // pass the receiver oop in a register.  If this is not true on some
-+      // platform, pick a temp and load the reciever from stack.
++      // platform, pick a temp and load the receiver from stack.
 +      assert(false, "receiver always in a register");
-+      receiver_reg = rax;  // known to be free at this point
++      receiver_reg = rcx;  // known to be free at this point
 +      __ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
 +    } else {
 +      // no data motion is needed
@@ -7217,11 +7426,10 @@
 +  MethodHandles::generate_method_handle_dispatch(masm, special_dispatch,
 +                                                 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
 +}
-+
- int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
-                                          VMRegPair *regs,
-                                          int total_args_passed) {
-@@ -1323,14 +1385,37 @@
+ 
+ // ---------------------------------------------------------------------------
+ // Generate a native wrapper for a given method.  The method takes arguments
+@@ -1323,14 +1383,37 @@
  //    transition back to thread_in_Java
  //    return to caller
  //
@@ -7262,7 +7470,7 @@
    bool is_critical_native = true;
    address native_func = method->critical_native_function();
    if (native_func == NULL) {
-@@ -1436,7 +1521,7 @@
+@@ -1436,7 +1519,7 @@
        if (in_regs[i].first()->is_Register()) {
          const Register reg = in_regs[i].first()->as_Register();
          switch (in_sig_bt[i]) {
@@ -7274,76 +7482,7 @@
 diff --git a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
 --- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
 +++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
-@@ -910,6 +910,68 @@
-   return stk_args;
- }
- 
-+static void gen_special_dispatch(MacroAssembler* masm,
-+                                 int total_args_passed,
-+                                 int comp_args_on_stack,
-+                                 vmIntrinsics::ID special_dispatch,
-+                                 const BasicType* sig_bt,
-+                                 const VMRegPair* regs) {
-+
-+  // Now write the args into the outgoing interpreter space
-+  bool     has_receiver   = false;
-+  Register receiver_reg   = noreg;
-+  int      member_arg_pos = -1;
-+  Register member_reg     = noreg;
-+  int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(special_dispatch);
-+  if (ref_kind != 0) {
-+    member_arg_pos = total_args_passed - 1;  // trailing MemberName argument
-+    member_reg = rbx;
-+    has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
-+  } else if (special_dispatch == vmIntrinsics::_invokeBasic) {
-+    has_receiver = true;
-+  } else {
-+    guarantee(false, err_msg("special_dispatch=%d", special_dispatch));
-+  }
-+
-+  if (member_reg != noreg) {
-+    // Load the member_arg into register, if necessary.
-+    assert(member_arg_pos >= 0 && member_arg_pos < total_args_passed, "oob");
-+    assert(sig_bt[member_arg_pos] == T_OBJECT, "dispatch argument must be an object");
-+    VMReg r = regs[member_arg_pos].first();
-+    assert(r->is_valid(), "bad member arg");
-+    if (r->is_stack()) {
-+      member_reg = rbx;  // known to be free at this point
-+      __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
-+    } else {
-+      // no data motion is needed
-+      member_reg = r->as_Register();
-+    }
-+  }
-+
-+  if (has_receiver) {
-+    // Make sure the receiver is loaded into a register.
-+    assert(total_args_passed > 0, "oob");
-+    assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
-+    VMReg r = regs[0].first();
-+    assert(r->is_valid(), "bad receiver arg");
-+    if (r->is_stack()) {
-+      // Porting note:  This assumes that compiled calling conventions always
-+      // pass the receiver oop in a register.  If this is not true on some
-+      // platform, pick a temp and load the reciever from stack.
-+      assert(false, "receiver always in a register");
-+      receiver_reg = rax;  // known to be free at this point
-+      __ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
-+    } else {
-+      // no data motion is needed
-+      receiver_reg = r->as_Register();
-+    }
-+  }
-+
-+  // Figure out which address we are really jumping to:
-+  MethodHandles::generate_method_handle_dispatch(masm, special_dispatch,
-+                                                 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
-+}
-+
- // On 64 bit we will store integer like items to the stack as
- // 64 bits items (sparc abi) even though java would only store
- // 32bits for a parameter. On 32bit it will simply be 32 bits
-@@ -1366,6 +1428,14 @@
+@@ -1366,6 +1366,14 @@
  }
  
  
@@ -7358,7 +7497,74 @@
  class ComputeMoveOrder: public StackObj {
    class MoveOperation: public ResourceObj {
      friend class ComputeMoveOrder;
-@@ -1539,14 +1609,60 @@
+@@ -1532,6 +1540,66 @@
+   }
+ };
+ 
++static void gen_special_dispatch(MacroAssembler* masm,
++                                 int total_args_passed,
++                                 int comp_args_on_stack,
++                                 vmIntrinsics::ID special_dispatch,
++                                 const BasicType* sig_bt,
++                                 const VMRegPair* regs) {
++
++  // Now write the args into the outgoing interpreter space
++  bool     has_receiver   = false;
++  Register receiver_reg   = noreg;
++  int      member_arg_pos = -1;
++  Register member_reg     = noreg;
++  int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(special_dispatch);
++  if (ref_kind != 0) {
++    member_arg_pos = total_args_passed - 1;  // trailing MemberName argument
++    member_reg = rbx;  // known to be free at this point
++    has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
++  } else if (special_dispatch == vmIntrinsics::_invokeBasic) {
++    has_receiver = true;
++  } else {
++    guarantee(false, err_msg("special_dispatch=%d", special_dispatch));
++  }
++
++  if (member_reg != noreg) {
++    // Load the member_arg into register, if necessary.
++    assert(member_arg_pos >= 0 && member_arg_pos < total_args_passed, "oob");
++    assert(sig_bt[member_arg_pos] == T_OBJECT, "dispatch argument must be an object");
++    VMReg r = regs[member_arg_pos].first();
++    assert(r->is_valid(), "bad member arg");
++    if (r->is_stack()) {
++      __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
++    } else {
++      // no data motion is needed
++      member_reg = r->as_Register();
++    }
++  }
++
++  if (has_receiver) {
++    // Make sure the receiver is loaded into a register.
++    assert(total_args_passed > 0, "oob");
++    assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
++    VMReg r = regs[0].first();
++    assert(r->is_valid(), "bad receiver arg");
++    if (r->is_stack()) {
++      // Porting note:  This assumes that compiled calling conventions always
++      // pass the receiver oop in a register.  If this is not true on some
++      // platform, pick a temp and load the receiver from stack.
++      assert(false, "receiver always in a register");
++      receiver_reg = rcx;  // known to be free at this point
++      __ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
++    } else {
++      // no data motion is needed
++      receiver_reg = r->as_Register();
++    }
++  }
++
++  // Figure out which address we are really jumping to:
++  MethodHandles::generate_method_handle_dispatch(masm, special_dispatch,
++                                                 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
++}
+ 
+ // ---------------------------------------------------------------------------
+ // Generate a native wrapper for a given method.  The method takes arguments
+@@ -1539,14 +1607,60 @@
  // convention (handlizes oops, etc), transitions to native, makes the call,
  // returns to java state (possibly blocking), unhandlizes any result and
  // returns.
@@ -7422,7 +7628,7 @@
    bool is_critical_native = true;
    address native_func = method->critical_native_function();
    if (native_func == NULL) {
-@@ -1658,7 +1774,7 @@
+@@ -1658,7 +1772,7 @@
            case T_SHORT:
            case T_CHAR:
            case T_INT:  single_slots++; break;
@@ -7699,7 +7905,8 @@
 +                                   Register flags    // if caller wants to test it
 +                                   ) {
    // determine flags
-   Bytecodes::Code code = bytecode();
+-  Bytecodes::Code code = bytecode();
++  const Bytecodes::Code code = bytecode();
    const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
    const bool is_invokedynamic    = code == Bytecodes::_invokedynamic;
 +  const bool is_invokehandle     = code == Bytecodes::_invokehandle;
@@ -8228,7 +8435,8 @@
 +                                   Register flags    // if caller wants to test it
 +                                   ) {
    // determine flags
-   Bytecodes::Code code = bytecode();
+-  Bytecodes::Code code = bytecode();
++  const Bytecodes::Code code = bytecode();
    const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
    const bool is_invokedynamic    = code == Bytecodes::_invokedynamic;
 +  const bool is_invokehandle     = code == Bytecodes::_invokehandle;
@@ -9030,7 +9238,7 @@
    if (cpool->has_preresolution()
        || (holder == ciEnv::MethodHandle_klass() &&
 -          methodOopDesc::is_method_handle_invoke_name(name_sym))) {
-+          MethodHandles::is_signature_polymorphic(holder->get_klassOop(), name_sym))) {
++          MethodHandles::is_signature_polymorphic_name(holder->get_klassOop(), name_sym))) {
      // Short-circuit lookups for JSR 292-related call sites.
      // That is, do not rely only on name-based lookups, because they may fail
      // if the names are not resolvable in the boot class loader (7056328).
@@ -11558,7 +11766,8 @@
 +         "linkMethod must return one of these");
    int vtable_index = methodOopDesc::nonvirtual_vtable_index;
    assert(resolved_method->vtable_index() == vtable_index, "");
-   set_common(resolved_klass, KlassHandle(), resolved_method, resolved_method, vtable_index, CHECK);
+-  set_common(resolved_klass, KlassHandle(), resolved_method, resolved_method, vtable_index, CHECK);
++  set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, vtable_index, CHECK);
 +  _resolved_appendix = resolved_appendix;
  }
  
@@ -11735,7 +11944,7 @@
 -          methodOopDesc::is_method_handle_invoke_name(method_name))) {
 -    methodOop result_oop = constantPoolOopDesc::method_at_if_loaded(pool, index);
 -    if (result_oop != NULL) {
-+          MethodHandles::is_signature_polymorphic(resolved_klass(), method_name))) {
++          MethodHandles::is_signature_polymorphic_name(resolved_klass(), method_name))) {
 +    oop appendix = NULL;
 +    methodOop result_oop = constantPoolOopDesc::method_at_if_loaded(pool, index, &appendix);
 +    if (result_oop != NULL && appendix == NULL) {
@@ -12071,8 +12280,8 @@
 +      assert(status >= -1 && status <= 1, "oob tri-state");
 +      if (status == 0) {
 +        if (_pool->klass_ref_at_noresolve(cp_index) == vmSymbols::java_lang_invoke_MethodHandle() &&
-+            SystemDictionary::MethodHandle_klass() != NULL &&
-+            MethodHandles::is_signature_polymorphic(SystemDictionary::MethodHandle_klass(), _pool->name_ref_at(cp_index)))
++            MethodHandles::is_signature_polymorphic_name(SystemDictionary::MethodHandle_klass(),
++                                                         _pool->name_ref_at(cp_index)))
 +          status = +1;
 +        else
 +          status = -1;
@@ -13435,7 +13644,7 @@
    vmSymbols::SID   sig_id = vmSymbols::find_sid(signature());
    if (klass_id != vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_MethodHandle)
        && sig_id == vmSymbols::NO_SID)  return;
-@@ -1167,21 +1103,14 @@
+@@ -1167,21 +1103,10 @@
  
    // Signature-polymorphic methods: MethodHandle.invoke*, InvokeDynamic.*.
    case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_MethodHandle):
@@ -13444,12 +13653,7 @@
 -    case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeGeneric_name):
 -      if (!AllowInvokeGeneric)  break;
 -    case vmSymbols::VM_SYMBOL_ENUM_NAME(invoke_name):
-+    if (!is_native())  break;
-+    id = MethodHandles::signature_polymorphic_name_id(name());
-+    if (id == vmIntrinsics::_none &&
-+        name()->starts_with("invoke") &&
-+        MethodHandles::is_signature_polymorphic(method_holder(), name()))
-       id = vmIntrinsics::_invokeGeneric;
+-      id = vmIntrinsics::_invokeGeneric;
 -      break;
 -    case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeExact_name):
 -      id = vmIntrinsics::_invokeExact;
@@ -13459,11 +13663,25 @@
 -  case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_InvokeDynamic):
 -    if (!is_static() || !is_native())  break;
 -    id = vmIntrinsics::_invokeDynamic;
++    if (!is_native())  break;
++    id = MethodHandles::signature_polymorphic_name_id(method_holder(), name());
 +    if (is_static() != MethodHandles::is_signature_polymorphic_static(id))
 +      id = vmIntrinsics::_none;
      break;
    }
  
+@@ -1194,6 +1119,11 @@
+ 
+ // These two methods are static since a GC may move the methodOopDesc
+ bool methodOopDesc::load_signature_classes(methodHandle m, TRAPS) {
++  if (THREAD->is_Compiler_thread())
++    // There is nothing useful this routine can do from within the Compile thread.
++    // Hopefully, the signature contains only well-known classes.
++    // We could scan for this and return true/false, but the caller won't care.
++    return false;
+   bool sig_is_loaded = true;
+   Handle class_loader(THREAD, instanceKlass::cast(m->method_holder())->class_loader());
+   Handle protection_domain(THREAD, Klass::cast(m->method_holder())->protection_domain());
 diff --git a/src/share/vm/oops/methodOop.hpp b/src/share/vm/oops/methodOop.hpp
 --- a/src/share/vm/oops/methodOop.hpp
 +++ b/src/share/vm/oops/methodOop.hpp
@@ -16829,7 +17047,7 @@
  };
  
  Handle MethodHandles::new_MemberName(TRAPS) {
-@@ -485,72 +123,217 @@
+@@ -485,72 +123,265 @@
    return Handle(THREAD, k->allocate_instance(THREAD));
  }
  
@@ -16949,6 +17167,31 @@
 +  return mname_oop;
 +}
 +
++Handle MethodHandles::init_method_MemberName(oop mname_oop, CallInfo& info, TRAPS) {
++  Handle empty;
++  if (info.resolved_appendix().not_null()) {
++    // The resolved MemberName must not be accompanied by an appendix argument,
++    // since there is no way to bind this value into the MemberName.
++    // Caller is responsible to prevent this from happening.
++    THROW_MSG_(vmSymbols::java_lang_InternalError(), "appendix", empty);
++  }
++  methodHandle m = info.resolved_method();
++  KlassHandle defc = info.resolved_klass();
++  int vmindex = -1;
++  if (defc->is_interface() && Klass::cast(m->method_holder())->is_interface()) {
++    // LinkResolver does not report itable indexes!  (fix this?)
++    vmindex = klassItable::compute_itable_index(m());
++  } else if (m->can_be_statically_bound()) {
++    // LinkResolver reports vtable index even for final methods!
++    vmindex = methodOopDesc::nonvirtual_vtable_index;
++  } else {
++    vmindex = info.vtable_index();
++  }
++  oop res = init_method_MemberName(mname_oop, m(), (vmindex >= 0), defc());
++  assert(res == NULL || (java_lang_invoke_MemberName::vmindex(res) == vmindex), "");
++  return Handle(THREAD, res);
++}
++
 +oop MethodHandles::init_field_MemberName(oop mname_oop, klassOop field_holder,
 +                                         AccessFlags mods, oop type, oop name,
 +                                         intptr_t offset, bool is_setter) {
@@ -16987,13 +17230,40 @@
 -  java_lang_invoke_MemberName::set_vmindex(mname_oop,  vmindex);
 -  java_lang_invoke_MemberName::set_flags(mname_oop,    flags);
 -  java_lang_invoke_MemberName::set_clazz(mname_oop,    Klass::cast(field_holder)->java_mirror());
++Handle MethodHandles::init_field_MemberName(oop mname_oop, FieldAccessInfo& info, TRAPS) {
++  return Handle();
++#if 0
++  KlassHandle field_holder = info.klass();
++  intptr_t    field_offset = info.field_offset();
++  return init_field_MemberName(mname_oop, field_holder(),
++                               info.access_flags(),
++                               type, name,
++                               field_offset, false /*is_setter*/);  
++#endif
+ }
+ 
+ 
+-methodHandle MethodHandles::decode_MemberName(oop mname, KlassHandle& receiver_limit_result, int& decode_flags_result) {
+-  methodHandle empty;
+-  int flags  = java_lang_invoke_MemberName::flags(mname);
+-  if ((flags & (IS_METHOD | IS_CONSTRUCTOR)) == 0)  return empty;  // not invocable
+-  oop vmtarget = java_lang_invoke_MemberName::vmtarget(mname);
+-  int vmindex  = java_lang_invoke_MemberName::vmindex(mname);
+-  if (vmindex == VM_INDEX_UNINITIALIZED)  return empty;  // not resolved
+-  methodHandle m = decode_vmtarget(vmtarget, vmindex, NULL, receiver_limit_result, decode_flags_result);
+-  oop clazz = java_lang_invoke_MemberName::clazz(mname);
+-  if (clazz != NULL && java_lang_Class::is_instance(clazz)) {
+-    klassOop klass = java_lang_Class::as_klassOop(clazz);
+-    if (klass != NULL)  receiver_limit_result = klass;
+-  }
+-  return m;
 +// JVM 2.9 Special Methods:
 +// A method is signature polymorphic if and only if all of the following conditions hold :
 +// * It is declared in the java.lang.invoke.MethodHandle class.
 +// * It has a single formal parameter of type Object[].
 +// * It has a return type of Object.
 +// * It has the ACC_VARARGS and ACC_NATIVE flags set.
-+bool MethodHandles::is_signature_polymorphic(klassOop klass, Symbol* name) {
++bool MethodHandles::is_method_handle_invoke_name(klassOop klass, Symbol* name) {
 +  if (klass == NULL)
 +    return false;
 +  // The following test will fail spuriously during bootstrap of MethodHandle itself:
@@ -17009,19 +17279,7 @@
 +  return (flags & required) == required;
  }
  
- 
--methodHandle MethodHandles::decode_MemberName(oop mname, KlassHandle& receiver_limit_result, int& decode_flags_result) {
--  methodHandle empty;
--  int flags  = java_lang_invoke_MemberName::flags(mname);
--  if ((flags & (IS_METHOD | IS_CONSTRUCTOR)) == 0)  return empty;  // not invocable
--  oop vmtarget = java_lang_invoke_MemberName::vmtarget(mname);
--  int vmindex  = java_lang_invoke_MemberName::vmindex(mname);
--  if (vmindex == VM_INDEX_UNINITIALIZED)  return empty;  // not resolved
--  methodHandle m = decode_vmtarget(vmtarget, vmindex, NULL, receiver_limit_result, decode_flags_result);
--  oop clazz = java_lang_invoke_MemberName::clazz(mname);
--  if (clazz != NULL && java_lang_Class::is_instance(clazz)) {
--    klassOop klass = java_lang_Class::as_klassOop(clazz);
--    if (klass != NULL)  receiver_limit_result = klass;
++
 +Symbol* MethodHandles::signature_polymorphic_intrinsic_name(vmIntrinsics::ID iid) {
 +  assert(is_signature_polymorphic_intrinsic(iid), err_msg("iid=%d", iid));
 +  switch (iid) {
@@ -17030,8 +17288,7 @@
 +  case vmIntrinsics::_linkToStatic:     return vmSymbols::linkToStatic_name();
 +  case vmIntrinsics::_linkToSpecial:    return vmSymbols::linkToSpecial_name();
 +  case vmIntrinsics::_linkToInterface:  return vmSymbols::linkToInterface_name();
-   }
--  return m;
++  }
 +  assert(false, "");
 +  return 0;
 +}
@@ -17062,21 +17319,31 @@
 +  case vmSymbols::VM_SYMBOL_ENUM_NAME(linkToSpecial_name):    return vmIntrinsics::_linkToSpecial;
 +  case vmSymbols::VM_SYMBOL_ENUM_NAME(linkToInterface_name):  return vmIntrinsics::_linkToInterface;
 +  }
-+  if (name->starts_with("invoke")) {
-+    // FIXME: merge with the copy in methodOop.cpp
-+    klassOop mh_klass = SystemDictionary::well_known_klass(
++
++  // Cover the case of invokeExact and any future variants of invokeFoo.
++  klassOop mh_klass = SystemDictionary::well_known_klass(
 +                              SystemDictionary::WK_KLASS_ENUM_NAME(MethodHandle_klass) );
-+    if (is_signature_polymorphic(mh_klass, name)) {
-+      // Cover the case of invokeExact and any future variants of invokeFoo.
-+      return vmIntrinsics::_invokeGeneric;
-+    }
-+  }
++  if (mh_klass != NULL && is_method_handle_invoke_name(mh_klass, name))
++    return vmIntrinsics::_invokeGeneric;
 +
 +  // Note: The pseudo-intrinsic _compiledLambdaForm is never linked against.
 +  // Instead it is used to mark lambda forms bound to invokehandle or invokedynamic.
 +  return vmIntrinsics::_none;
- }
- 
++}
++
++vmIntrinsics::ID MethodHandles::signature_polymorphic_name_id(klassOop klass, Symbol* name) {
++  if (klass != NULL &&
++      Klass::cast(klass)->name() == vmSymbols::java_lang_invoke_MethodHandle()) {
++    vmIntrinsics::ID iid = signature_polymorphic_name_id(name);
++    if (iid != vmIntrinsics::_none)
++      return iid;
++    if (is_method_handle_invoke_name(klass, name))
++      return vmIntrinsics::_invokeGeneric;
++  }
++  return vmIntrinsics::_none;
++}
++
++
  // convert the external string or reflective type to an internal signature
 -Symbol* MethodHandles::convert_to_signature(oop type_str, bool polymorphic, TRAPS) {
 +Symbol* MethodHandles::lookup_signature(oop type_str, bool intern_if_not_found, TRAPS) {
@@ -17091,7 +17358,7 @@
        return java_lang_String::as_symbol(type_str, CHECK_NULL);
      } else {
        return java_lang_String::as_symbol_or_null(type_str);
-@@ -560,121 +342,289 @@
+@@ -560,121 +391,238 @@
    }
  }
  
@@ -17322,7 +17589,6 @@
    case IS_METHOD:
      {
        CallInfo result;
-+      DEBUG_ONLY(int resolved_ref_kind = 0);
 +      bool do_dispatch = true;  // default, neutral setting
        {
 -        EXCEPTION_MARK;
@@ -17330,27 +17596,22 @@
 +        assert(!HAS_PENDING_EXCEPTION, "");
 +        if (ref_kind == JVM_REF_invokeStatic) {
 +          //do_dispatch = false;  // no need, since statics are never dispatched
-+          DEBUG_ONLY(resolved_ref_kind = JVM_REF_invokeStatic);
            LinkResolver::resolve_static_call(result,
                          defc, name, type, KlassHandle(), false, false, THREAD);
 -        } else if (defc->is_interface()) {
 +        } else if (ref_kind == JVM_REF_invokeInterface) {
-+          DEBUG_ONLY(resolved_ref_kind = JVM_REF_invokeInterface);
            LinkResolver::resolve_interface_call(result, Handle(), defc,
                          defc, name, type, KlassHandle(), false, false, THREAD);
 -        } else {
 +        } else if (mh_invoke_id != vmIntrinsics::_none) {
-+          DEBUG_ONLY(resolved_ref_kind = JVM_REF_invokeVirtual);
 +          assert(!is_signature_polymorphic_static(mh_invoke_id), "");
 +          LinkResolver::resolve_handle_call(result,
 +                        defc, name, type, KlassHandle(), THREAD);
 +        } else if (ref_kind == JVM_REF_invokeSpecial) {
 +          do_dispatch = false;  // force non-virtual linkage
-+          DEBUG_ONLY(resolved_ref_kind = JVM_REF_invokeSpecial);
 +          LinkResolver::resolve_special_call(result,
 +                        defc, name, type, KlassHandle(), false, THREAD);
 +        } else if (ref_kind == JVM_REF_invokeVirtual) {
-+          DEBUG_ONLY(resolved_ref_kind = JVM_REF_invokeVirtual);
            LinkResolver::resolve_virtual_call(result, Handle(), defc,
                          defc, name, type, KlassHandle(), false, false, THREAD);
 +        } else {
@@ -17362,46 +17623,16 @@
 +          return empty;
          }
        }
-+      if (result.resolved_appendix().not_null()) {
-+        // The resolved MemberName must not be accompanied by an appendix argument,
-+        // since there is no way to bind this value into the MemberName.
-+        // Caller is responsible to prevent this from happening.
-+        THROW_MSG_(vmSymbols::java_lang_InternalError(), vmIntrinsics::name_at(mh_invoke_id), empty);
-+      }
-       methodHandle m = result.resolved_method();
+-      methodHandle m = result.resolved_method();
 -      oop vmtarget = NULL;
-+      mname = Handle(THREAD, init_method_MemberName(mname(), m(), do_dispatch, defc()));
-+#ifdef ASSERT_QQ_disabled //@@
-+      // Make sure the internal logic of init_method_MemberName matches what the LinkResolver did.
-+      assert(defc->is_subtype_of(m->method_holder()),
-+             "interface holder clazz consistent with method holder");
-       int vmindex = methodOopDesc::nonvirtual_vtable_index;
+-      int vmindex = methodOopDesc::nonvirtual_vtable_index;
 -      if (defc->is_interface()) {
-+      if (Klass::cast(m->method_holder())->is_interface()) {
-+        /*
-+//@@
-+  ResourceMark rm(THREAD);
-+  klassVtable *vt = instanceKlass::cast(klass())->vtable();
-+  return vt->index_of_miranda(name, signature);
-+        */
-         vmindex = klassItable::compute_itable_index(m());
-         assert(vmindex >= 0, "");
+-        vmindex = klassItable::compute_itable_index(m());
+-        assert(vmindex >= 0, "");
 -      } else if (result.has_vtable_index()) {
-+        assert(resolved_ref_kind == JVM_REF_invokeInterface, "");//@@
-+        assert(defc() == m->method_holder(), "interface holder clazz is exact");//@@
-+      } else if (result.has_vtable_index() && !m->can_be_statically_bound()) {
-         vmindex = result.vtable_index();
-         assert(vmindex >= 0, "");
-+        if (resolved_ref_kind == JVM_REF_invokeInterface)
-+          resolved_ref_kind = JVM_REF_invokeVirtual;  // Corner case.
-+        assert(resolved_ref_kind == JVM_REF_invokeVirtual, "");
-+      } else {
-+        // Come here if either (1) m can be statically bound,
-+        // or (2) m has a vtable index but do_dispatch was false.
-+        if (resolved_ref_kind == JVM_REF_invokeInterface || resolved_ref_kind == JVM_REF_invokeVirtual)
-+          resolved_ref_kind = JVM_REF_invokeSpecial;  // Corner cases.
-+        assert(resolved_ref_kind == JVM_REF_invokeSpecial || resolved_ref_kind == JVM_REF_invokeStatic, "");
-       }
+-        vmindex = result.vtable_index();
+-        assert(vmindex >= 0, "");
+-      }
 -      assert(vmindex != VM_INDEX_UNINITIALIZED, "");
 -      if (vmindex < 0) {
 -        assert(result.is_statically_bound(), "");
@@ -17417,15 +17648,7 @@
 -      assert(decode_MemberName(mname(), junk1, junk2) == result.resolved_method(),
 -             "properly stored for later decoding");
 -      return;
-+      int resolved_flags = (IS_METHOD | (resolved_ref_kind << REFERENCE_KIND_SHIFT)
-+                            | (m->access_flags().as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS));
-+      int actual_flags   = java_lang_invoke_MemberName::flags(mname());
-+      int actual_vmindex = java_lang_invoke_MemberName::vmindex(mname());
-+      if (resolved_flags != actual_flags || vmindex != actual_vmindex)  m->print();
-+      assert(resolved_flags == actual_flags, err_msg("resolved=%x actual=%x", resolved_flags, actual_flags));//@@
-+      assert(vmindex == actual_vmindex, err_msg("vmindex=%x actual=%x", vmindex, actual_vmindex));
-+#endif //ASSERT
-+      return mname;
++      return init_method_MemberName(mname(), result, THREAD);
      }
    case IS_CONSTRUCTOR:
      {
@@ -17436,7 +17659,7 @@
          if (name == vmSymbols::object_initializer_name()) {
            LinkResolver::resolve_special_call(result,
                          defc, name, type, KlassHandle(), false, THREAD);
-@@ -682,22 +632,24 @@
+@@ -682,22 +630,11 @@
            break;                // will throw after end of switch
          }
          if (HAS_PENDING_EXCEPTION) {
@@ -17446,12 +17669,9 @@
          }
        }
        assert(result.is_statically_bound(), "");
-       methodHandle m = result.resolved_method();
+-      methodHandle m = result.resolved_method();
 -      oop vmtarget = m();
-+      mname = Handle(THREAD, init_method_MemberName(mname(), m(), false, NULL));
-+#ifdef ASSERT
-+      assert(defc() == m->method_holder(), "constructor holder clazz is exact");
-       int vmindex  = methodOopDesc::nonvirtual_vtable_index;
+-      int vmindex  = methodOopDesc::nonvirtual_vtable_index;
 -      int mods     = (m->access_flags().as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS);
 -      java_lang_invoke_MemberName::set_vmtarget(mname(), vmtarget);
 -      java_lang_invoke_MemberName::set_vmindex(mname(),  vmindex);
@@ -17460,19 +17680,11 @@
 -      assert(decode_MemberName(mname(), junk1, junk2) == result.resolved_method(),
 -             "properly stored for later decoding");
 -      return;
-+      int resolved_flags = (IS_CONSTRUCTOR | (JVM_REF_invokeSpecial << REFERENCE_KIND_SHIFT)
-+                            | (m->access_flags().as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS));
-+      int actual_flags   = java_lang_invoke_MemberName::flags(mname());
-+      int actual_vmindex = java_lang_invoke_MemberName::vmindex(mname());
-+      if (resolved_flags != actual_flags || vmindex != actual_vmindex)  m->print();
-+      assert(resolved_flags == actual_flags, err_msg("resolved=%x actual=%x", resolved_flags, actual_flags));
-+      assert(vmindex == actual_vmindex, err_msg("vmindex=%x actual=%x", vmindex, actual_vmindex));
-+#endif //ASSERT
-+      return mname;
++      return init_method_MemberName(mname(), result, THREAD);
      }
    case IS_FIELD:
      {
-@@ -705,54 +657,20 @@
+@@ -705,54 +642,20 @@
        fieldDescriptor fd; // find_field initializes fd if found
        KlassHandle sel_klass(THREAD, instanceKlass::cast(defc())->find_field(name, type, &fd));
        // check if field exists; i.e., if a klass containing the field def has been selected
@@ -17537,7 +17749,7 @@
  }
  
  // Conversely, a member name which is only initialized from JVM internals
-@@ -763,7 +681,7 @@
+@@ -763,7 +666,7 @@
    assert(java_lang_invoke_MemberName::is_instance(mname()), "");
    oop vmtarget = java_lang_invoke_MemberName::vmtarget(mname());
    int vmindex  = java_lang_invoke_MemberName::vmindex(mname());
@@ -17546,7 +17758,7 @@
      THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "nothing to expand");
    }
  
-@@ -784,14 +702,12 @@
+@@ -784,14 +687,12 @@
    case IS_METHOD:
    case IS_CONSTRUCTOR:
      {
@@ -17564,7 +17776,7 @@
          java_lang_invoke_MemberName::set_clazz(mname(), Klass::cast(defc)->java_mirror());
        }
        if (!have_name) {
-@@ -808,9 +724,10 @@
+@@ -808,9 +709,10 @@
    case IS_FIELD:
      {
        // This is taken from LinkResolver::resolve_field, sans access checks.
@@ -17576,7 +17788,7 @@
        bool is_static = ((flags & JVM_ACC_STATIC) != 0);
        fieldDescriptor fd; // find_field initializes fd if found
        if (!defc->find_field_from_offset(vmindex, is_static, &fd))
-@@ -824,7 +741,11 @@
+@@ -824,7 +726,11 @@
          java_lang_invoke_MemberName::set_name(mname(), name());
        }
        if (!have_type) {
@@ -17589,7 +17801,7 @@
          java_lang_invoke_MemberName::set_type(mname(), type());
        }
        return;
-@@ -882,7 +803,13 @@
+@@ -882,7 +788,13 @@
          oop result = results->obj_at(rfill++);
          if (!java_lang_invoke_MemberName::is_instance(result))
            return -99;  // caller bug!
@@ -17604,7 +17816,7 @@
        } else if (++overflow >= overflow_limit) {
          match_flags = 0; break; // got tired of looking at overflow
        }
-@@ -930,7 +857,9 @@
+@@ -930,7 +842,9 @@
          oop result = results->obj_at(rfill++);
          if (!java_lang_invoke_MemberName::is_instance(result))
            return -99;  // caller bug!
@@ -17615,7 +17827,7 @@
        } else if (++overflow >= overflow_limit) {
          match_flags = 0; break; // got tired of looking at overflow
        }
-@@ -941,1925 +870,16 @@
+@@ -941,1925 +855,16 @@
    return rfill + overflow;
  }
  
@@ -19542,7 +19754,7 @@
    case MethodHandles::GC_COUNT_GWT:
  #ifdef COMPILER2
      return true;
-@@ -2872,64 +892,54 @@
+@@ -2872,64 +877,54 @@
  JVM_END
  
  #ifndef PRODUCT
@@ -19636,28 +19848,53 @@
      int con = con_values[which];
      objArrayHandle box(THREAD, (objArrayOop) JNIHandles::resolve(box_jh));
      if (box.not_null() && box->klass() == Universe::objectArrayKlassObj() && box->length() > 0) {
-@@ -2984,10 +994,64 @@
+@@ -2965,13 +960,14 @@
+ JVM_END
+ 
+ // void resolve(MemberName self, Class<?> caller)
+-JVM_ENTRY(void, MHN_resolve_Mem(JNIEnv *env, jobject igcls, jobject mname_jh, jclass caller_jh)) {
+-  if (mname_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "mname is null"); }
++JVM_ENTRY(jobject, MHN_resolve_Mem(JNIEnv *env, jobject igcls, jobject mname_jh, jclass caller_jh)) {
++  if (mname_jh == NULL) { THROW_MSG_NULL(vmSymbols::java_lang_InternalError(), "mname is null"); }
+   Handle mname(THREAD, JNIHandles::resolve_non_null(mname_jh));
+ 
+   // The trusted Java code that calls this method should already have performed
+   // access checks on behalf of the given caller.  But, we can verify this.
+-  if (VerifyMethodHandles && caller_jh != NULL) {
++  if (VerifyMethodHandles && caller_jh != NULL &&
++      java_lang_invoke_MemberName::clazz(mname()) != NULL) {
+     klassOop reference_klass = java_lang_Class::as_klassOop(java_lang_invoke_MemberName::clazz(mname()));
+     if (reference_klass != NULL) {
+       // Emulate LinkResolver::check_klass_accessability.
+@@ -2979,15 +975,97 @@
+       if (!Reflection::verify_class_access(caller,
+                                            reference_klass,
+                                            true)) {
+-        THROW_MSG(vmSymbols::java_lang_InternalError(), Klass::cast(reference_klass)->external_name());
++        THROW_MSG_NULL(vmSymbols::java_lang_InternalError(), Klass::cast(reference_klass)->external_name());
+       }
      }
    }
  
 -  MethodHandles::resolve_MemberName(mname, CHECK);
-+  Handle resolved = MethodHandles::resolve_MemberName(mname, CHECK);
++  Handle resolved = MethodHandles::resolve_MemberName(mname, CHECK_NULL);
 +  if (resolved.is_null()) {
 +    int flags = java_lang_invoke_MemberName::flags(mname());
 +    int ref_kind = (flags >> REFERENCE_KIND_SHIFT) & REFERENCE_KIND_MASK;
 +    if (!MethodHandles::ref_kind_is_valid(ref_kind)) {
-+      THROW_MSG(vmSymbols::java_lang_InternalError(), "obsolete MemberName format");
++      THROW_MSG_NULL(vmSymbols::java_lang_InternalError(), "obsolete MemberName format");
 +    }
 +    if ((flags & ALL_KINDS) == IS_FIELD) {
-+      THROW_MSG(vmSymbols::java_lang_NoSuchMethodError(), "field resolution failed");
++      THROW_MSG_NULL(vmSymbols::java_lang_NoSuchMethodError(), "field resolution failed");
 +    } else if ((flags & ALL_KINDS) == IS_METHOD ||
 +               (flags & ALL_KINDS) == IS_CONSTRUCTOR) {
-+      THROW_MSG(vmSymbols::java_lang_NoSuchFieldError(), "method resolution failed");
++      THROW_MSG_NULL(vmSymbols::java_lang_NoSuchFieldError(), "method resolution failed");
 +    } else {
-+      THROW_MSG(vmSymbols::java_lang_LinkageError(), "resolution failed");
-+    }
-+  }
-+  // otherwise, ignore possibly updated value of resolved mname
++      THROW_MSG_NULL(vmSymbols::java_lang_LinkageError(), "resolution failed");
++    }
++  }
++
++  return JNIHandles::make_local(THREAD, resolved());
  }
  JVM_END
  
@@ -19698,11 +19935,38 @@
 +}
 +JVM_END
 +
++JVM_ENTRY(jobject, MHN_getMemberVMInfo(JNIEnv *env, jobject igcls, jobject mname_jh)) {
++  if (mname_jh == NULL)  return NULL;
++  Handle mname(THREAD, JNIHandles::resolve_non_null(mname_jh));
++  intptr_t vmindex  = java_lang_invoke_MemberName::vmindex(mname());
++  Handle   vmtarget = java_lang_invoke_MemberName::vmtarget(mname());
++  objArrayHandle result = oopFactory::new_objArray(SystemDictionary::Object_klass(), 2, CHECK_NULL);
++  jvalue vmindex_value; vmindex_value.j = (long)vmindex;
++  oop x = java_lang_boxing_object::create(T_LONG, &vmindex_value, CHECK_NULL);
++  result->obj_at_put(0, x);
++  x = NULL;
++  if (vmtarget.is_null() || vmtarget->is_instance()) {
++    x = vmtarget();
++  } else if (vmtarget->is_klass()) {
++    x = Klass::cast((klassOop) vmtarget())->java_mirror();
++  } else {
++    Handle mname2 = MethodHandles::new_MemberName(CHECK_NULL);
++    if (vmtarget->is_method())
++      x = MethodHandles::init_method_MemberName(mname2(), methodOop(vmtarget()), false, NULL);
++    else
++      x = MethodHandles::init_MemberName(mname2(), vmtarget());
++  }
++  result->obj_at_put(1, x);
++  return JNIHandles::make_local(env, result());
++}
++JVM_END
++
++
 +
  //  static native int getMembers(Class<?> defc, String matchName, String matchSig,
  //          int matchFlags, Class<?> caller, int skip, MemberName[] results);
  JVM_ENTRY(jint, MHN_getMembers(JNIEnv *env, jobject igcls,
-@@ -3053,45 +1117,6 @@
+@@ -3053,45 +1131,6 @@
  }
  JVM_END
  
@@ -19748,7 +20012,7 @@
  JVM_ENTRY(jobject, MH_invoke_UOE(JNIEnv *env, jobject igmh, jobjectArray igargs)) {
      TempNewSymbol UOE_name = SymbolTable::new_symbol("java/lang/UnsupportedOperationException", CHECK_NULL);
      THROW_MSG_NULL(UOE_name, "MethodHandle.invoke cannot be invoked reflectively");
-@@ -3121,39 +1146,29 @@
+@@ -3121,39 +1160,30 @@
  #define MT    JLINV"MethodType;"
  #define MH    JLINV"MethodHandle;"
  #define MEM   JLINV"MemberName;"
@@ -19769,8 +20033,9 @@
 +static JNINativeMethod required_methods_JDK8[] = {
    {CC"init",                      CC"("MEM""OBJ")V",                     FN_PTR(MHN_init_Mem)},
    {CC"expand",                    CC"("MEM")V",                          FN_PTR(MHN_expand_Mem)},
-   {CC"resolve",                   CC"("MEM""CLS")V",                     FN_PTR(MHN_resolve_Mem)},
+-  {CC"resolve",                   CC"("MEM""CLS")V",                     FN_PTR(MHN_resolve_Mem)},
 -  {CC"getTarget",                 CC"("MH"I)"OBJ,                        FN_PTR(MHN_getTarget)},
++  {CC"resolve",                   CC"("MEM""CLS")"MEM,                   FN_PTR(MHN_resolve_Mem)},
    {CC"getConstant",               CC"(I)I",                              FN_PTR(MHN_getConstant)},
    //  static native int getNamedCon(int which, Object[] name)
    {CC"getNamedCon",               CC"(I["OBJ")I",                        FN_PTR(MHN_getNamedCon)},
@@ -19786,7 +20051,8 @@
 -  {CC"setCallSiteTargetVolatile", CC"("CS""MH")V",                       FN_PTR(MHN_setCallSiteTargetVolatile)}
 +  {CC"setCallSiteTargetVolatile", CC"("CS""MH")V",                       FN_PTR(MHN_setCallSiteTargetVolatile)},
 +  {CC"staticFieldOffset",         CC"("MEM")J",                          FN_PTR(MHN_staticFieldOffset)},
-+  {CC"staticFieldBase"  ,         CC"("MEM")"OBJ,                        FN_PTR(MHN_staticFieldBase)}
++  {CC"staticFieldBase",           CC"("MEM")"OBJ,                        FN_PTR(MHN_staticFieldBase)},
++  {CC"getMemberVMInfo",           CC"("MEM")"OBJ,                        FN_PTR(MHN_getMemberVMInfo)}
  };
  
  static JNINativeMethod invoke_methods[] = {
@@ -19794,7 +20060,7 @@
    {CC"invoke",                    CC"(["OBJ")"OBJ,                       FN_PTR(MH_invoke_UOE)},
    {CC"invokeExact",               CC"(["OBJ")"OBJ,                       FN_PTR(MH_invokeExact_UOE)}
  };
-@@ -3161,8 +1176,6 @@
+@@ -3161,8 +1191,6 @@
  // This one function is exported, used by NativeLookup.
  
  JVM_ENTRY(void, JVM_RegisterMethodHandleMethods(JNIEnv *env, jclass MHN_class)) {
@@ -19803,7 +20069,7 @@
    if (!EnableInvokeDynamic) {
      warning("JSR 292 is disabled in this JVM.  Use -XX:+UnlockDiagnosticVMOptions -XX:+EnableInvokeDynamic to enable.");
      return;  // bind nothing
-@@ -3171,36 +1184,32 @@
+@@ -3171,36 +1199,32 @@
    assert(!MethodHandles::enabled(), "must not be enabled");
    bool enable_MH = true;
  
@@ -19860,7 +20126,7 @@
 diff --git a/src/share/vm/prims/methodHandles.hpp b/src/share/vm/prims/methodHandles.hpp
 --- a/src/share/vm/prims/methodHandles.hpp
 +++ b/src/share/vm/prims/methodHandles.hpp
-@@ -33,523 +33,34 @@
+@@ -33,523 +33,36 @@
  
  class MacroAssembler;
  class Label;
@@ -20388,11 +20654,13 @@
 +  static oop init_field_MemberName(oop mname_oop, klassOop field_holder,
 +                                   AccessFlags mods, oop type, oop name,
 +                                   intptr_t offset, bool is_setter = false);
++  static Handle init_method_MemberName(oop mname_oop, CallInfo& info, TRAPS);
++  static Handle init_field_MemberName(oop mname_oop, FieldAccessInfo& info, TRAPS);
 +  static int method_ref_kind(methodOop m, bool do_dispatch_if_possible = true);
    static int find_MemberNames(klassOop k, Symbol* name, Symbol* sig,
                                int mflags, klassOop caller,
                                int skip, objArrayOop results);
-@@ -559,169 +70,96 @@
+@@ -559,169 +72,101 @@
    // Generate MethodHandles adapters.
    static void generate_adapters();
  
@@ -20412,7 +20680,6 @@
 -  static int argument_slot_count(oop method_type) { return argument_slot(method_type, -1); }
 -  static int argument_slot_to_argnum(oop method_type, int argslot);
 +  // Queries
-+  static bool is_signature_polymorphic(klassOop klass, Symbol* name);
 +  static bool is_signature_polymorphic(vmIntrinsics::ID iid) {
 +    return (iid >= vmIntrinsics::FIRST_MH_SIG_POLY &&
 +            iid <= vmIntrinsics::LAST_MH_SIG_POLY);
@@ -20451,11 +20718,17 @@
 +  }
 +
 +  static Symbol* signature_polymorphic_intrinsic_name(vmIntrinsics::ID iid);
++  static int signature_polymorphic_intrinsic_ref_kind(vmIntrinsics::ID iid);
++
++  static vmIntrinsics::ID signature_polymorphic_name_id(klassOop klass, Symbol* name);
 +  static vmIntrinsics::ID signature_polymorphic_name_id(Symbol* name);
 +  static bool is_signature_polymorphic_name(Symbol* name) {
 +    return signature_polymorphic_name_id(name) != vmIntrinsics::_none;
 +  }
-+  static int signature_polymorphic_intrinsic_ref_kind(vmIntrinsics::ID iid);
++  static bool is_method_handle_invoke_name(klassOop klass, Symbol* name);
++  static bool is_signature_polymorphic_name(klassOop klass, Symbol* name) {
++    return signature_polymorphic_name_id(klass, name) != vmIntrinsics::_none;
++  }
 +
    enum {
      // format of query to getConstant:
@@ -20642,7 +20915,7 @@
  
  #ifdef TARGET_ARCH_x86
  # include "methodHandles_x86.hpp"
-@@ -738,63 +176,11 @@
+@@ -738,63 +183,11 @@
  #ifdef TARGET_ARCH_ppc
  # include "methodHandles_ppc.hpp"
  #endif