changeset 459:649f4375edec

meth-lazy: compiler snapshot
author twisti
date Fri, 06 Jul 2012 17:06:21 -0700
parents b43840515862
children 4799b4508e19
files meth-lazy-7023639.jit.patch
diffstat 1 files changed, 1183 insertions(+), 398 deletions(-) [+]
line wrap: on
line diff
--- a/meth-lazy-7023639.jit.patch	Wed Jul 04 16:10:00 2012 -0700
+++ b/meth-lazy-7023639.jit.patch	Fri Jul 06 17:06:21 2012 -0700
@@ -373,31 +373,17 @@
        ShouldNotReachHere();
        break;
    }
-@@ -1604,31 +1644,55 @@
+@@ -1604,31 +1644,50 @@
  
  
  void GraphBuilder::invoke(Bytecodes::Code code) {
 +  const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
    bool will_link;
 -  ciMethod* target = stream()->get_method(will_link);
-+  ciObject* extra_arg = NULL;
-+  ciMethod*             target = stream()->get_method(will_link, &extra_arg);
++  ciMethod*             target = stream()->get_method(will_link);
 +  ciKlass*              holder = stream()->get_declared_method_holder();
 +  const Bytecodes::Code bc_raw = stream()->cur_bc_raw();
 +
-+  // We require the debug info to be the "state before" because
-+  // invokedynamics may deoptimize.
-+  ValueStack* state_before = is_invokedynamic ? copy_state_before() : copy_state_exhandling();
-+
-+  // Push extra argument, if there is one.
-+  if (extra_arg) {
-+//    tty->print("callee: "); target->print(); tty->cr();
-+//    tty->print_cr("code: %s", Bytecodes::name(code));
-+//    tty->print("extra_arg: "); extra_arg->print(); tty->cr();
-+    Value arg = append(new Constant(new ObjectConstant(extra_arg)));
-+    apush(arg);
-+  }
-+
    // we have to make sure the argument size (incl. the receiver)
    // is correct for compilation (the call would fail later during
    // linkage anyway) - was bug (gri 7/28/99)
@@ -428,24 +414,39 @@
 +  // Some methods are obviously bindable without any type checks so
 +  // convert them directly to an invokespecial or invokestatic.
 +  if (target->is_loaded() && !target->is_abstract() && target->can_be_statically_bound()) {
-+      if (bc_raw == Bytecodes::_invokevirtual) { code = Bytecodes::_invokespecial; } else
-+      if (bc_raw == Bytecodes::_invokehandle ) { code = Bytecodes::_invokestatic;  }
++    switch (bc_raw) {
++    case Bytecodes::_invokevirtual:  code = Bytecodes::_invokespecial;  break;
++    case Bytecodes::_invokehandle:   code = Bytecodes::_invokestatic;   break;
++    }
    }
  
 -  bool is_invokedynamic = code == Bytecodes::_invokedynamic;
--
++  // Push appendix argument (MethodType, CallSite, etc.), if one.
++  if (stream()->has_appendix()) {
++    ciObject* appendix = stream()->get_appendix();
++    Value arg = append(new Constant(new ObjectConstant(appendix)));
++    apush(arg);
++  }
+ 
    // NEEDS_CLEANUP
 -  // I've added the target-is_loaded() test below but I don't really understand
 +  // I've added the target->is_loaded() test below but I don't really understand
    // how klass->is_loaded() can be true and yet target->is_loaded() is false.
    // this happened while running the JCK invokevirtual tests under doit.  TKR
    ciMethod* cha_monomorphic_target = NULL;
-@@ -1768,20 +1832,12 @@
-           target->is_compiled_lambda_form()) {
+@@ -1763,25 +1822,15 @@
+         code == Bytecodes::_invokedynamic) {
+       ciMethod* inline_target = (cha_monomorphic_target != NULL) ? cha_monomorphic_target : target;
+       bool success = false;
+-      if (// %%% FIXME: Are both of these relevant?
+-          target->is_method_handle_intrinsic() ||
+-          target->is_compiled_lambda_form()) {
++      if (target->is_method_handle_intrinsic()) {
          // method handle invokes
-         success = !is_invokedynamic ? for_method_handle_inline(target) : for_invokedynamic_inline(target);
+-        success = !is_invokedynamic ? for_method_handle_inline(target) : for_invokedynamic_inline(target);
 -      }
 -      if (!success) {
++        success = for_method_handle_inline(target);
 +      } else {
          // static binding => check if callee is ok
 -        success = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL), better_receiver);
@@ -463,7 +464,7 @@
        clear_inline_bailout();
        if (success) {
          // Register dependence if JVMTI has either breakpoint
-@@ -1792,8 +1848,13 @@
+@@ -1792,8 +1841,13 @@
          }
          return;
        }
@@ -477,18 +478,7 @@
    // If we attempted an inline which did not succeed because of a
    // bailout during construction of the callee graph, the entire
    // compilation has to be aborted. This is fairly rare and currently
-@@ -1813,10 +1874,6 @@
-     code == Bytecodes::_invokeinterface;
-   ValueType* result_type = as_ValueType(target->return_type());
- 
--  // We require the debug info to be the "state before" because
--  // invokedynamics may deoptimize.
--  ValueStack* state_before = is_invokedynamic ? copy_state_before() : copy_state_exhandling();
--
-   Values* args = state()->pop_arguments(target->arg_size_no_receiver());
-   Value recv = has_receiver ? apop() : NULL;
-   int vtable_index = methodOopDesc::invalid_vtable_index;
-@@ -1859,7 +1916,7 @@
+@@ -1859,7 +1913,7 @@
        } else if (exact_target != NULL) {
          target_klass = exact_target->holder();
        }
@@ -497,7 +487,7 @@
      }
    }
  
-@@ -3101,30 +3158,61 @@
+@@ -3101,30 +3155,61 @@
  }
  
  
@@ -580,7 +570,7 @@
  }
  
  
-@@ -3308,7 +3396,7 @@
+@@ -3308,7 +3393,7 @@
            recv = args->at(0);
            null_check(recv);
          }
@@ -589,7 +579,7 @@
        }
      }
    }
-@@ -3319,13 +3407,6 @@
+@@ -3319,13 +3404,6 @@
    Value value = append_split(result);
    if (result_type != voidType) push(result_type, value);
  
@@ -603,7 +593,7 @@
    // done
    return true;
  }
-@@ -3481,7 +3562,7 @@
+@@ -3481,7 +3559,7 @@
  }
  
  
@@ -612,7 +602,7 @@
    assert(!callee->is_native(), "callee must not be native");
    if (CompilationPolicy::policy()->should_not_inline(compilation()->env(), callee)) {
      INLINE_BAILOUT("inlining prohibited by policy");
-@@ -3511,10 +3592,10 @@
+@@ -3511,10 +3589,10 @@
    // now perform tests that are based on flag settings
    if (callee->force_inline() || callee->should_inline()) {
      // ignore heuristic controls on inlining
@@ -626,7 +616,7 @@
      if (callee->code_size_for_inlining() > max_inline_size()    ) INLINE_BAILOUT("callee is too large");
  
      // don't inline throwable methods unless the inlining tree is rooted in a throwable class
-@@ -3533,28 +3614,25 @@
+@@ -3533,28 +3611,25 @@
      if (compilation()->env()->num_inlined_bytecodes() > DesiredMethodLimit) {
        INLINE_BAILOUT("total inlining greater than DesiredMethodLimit");
      }
@@ -661,7 +651,7 @@
      // note: null check must happen even if first instruction of callee does
      //       an implicit null check since the callee is in a different scope
      //       and we must make sure exception handling does the right thing
-@@ -3570,7 +3648,7 @@
+@@ -3570,7 +3645,7 @@
      compilation()->set_would_profile(true);
  
      if (profile_calls()) {
@@ -670,7 +660,7 @@
      }
    }
  
-@@ -3612,17 +3690,10 @@
+@@ -3612,17 +3687,10 @@
    // note: this will also ensure that all arguments are computed before being passed
    ValueStack* callee_state = state();
    ValueStack* caller_state = state()->caller_state();
@@ -692,7 +682,7 @@
    }
  
    // Remove args from stack.
-@@ -3736,26 +3807,113 @@
+@@ -3736,26 +3804,88 @@
  
  
  bool GraphBuilder::for_method_handle_inline(ciMethod* callee) {
@@ -704,121 +694,103 @@
 -    ciMethodHandle* method_handle = receiver->type()->as_ObjectType()->constant_value()->as_method_handle();
 -
 -    // TODO new implementation goes here
-+  const bool has_member_arg = callee->has_member_arg();
 +  ValueStack* state_before = state()->copy_for_parsing();
-+  Value value = has_member_arg ? apop() : state()->stack_at(0);
-+  ValueType* type = value->type();
-+//  tty->print_cr("for_method_handle_inline:");
-+//  tty->print("callee: "); callee->print(); tty->cr();
++  vmIntrinsics::ID iid = callee->intrinsic_id();
++  switch (iid) {
++  case vmIntrinsics::_invokeBasic:
++    {
++      // get MethodHandle receiver
++      const int args_base = state()->stack_size() - callee->arg_size();
++      ValueType* type = state()->stack_at(args_base)->type();
++      if (type->is_constant()) {
++        ciMethod* target = type->as_ObjectType()->constant_value()->as_method_handle()->get_vmtarget();
++        guarantee(!target->is_method_handle_intrinsic(), "should not happen");  // XXX remove
++        Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual;
++        if (try_inline(target, /*holder_known*/ true, bc)) {
++          return true;
++        }
++      } else {
++        print_inlining(callee, "receiver not constant", /*success*/ false);
++      }
++    }
++    break;
 +
-+  if (type->is_constant()) {
-+    // Decide where to get the target from:
-+    ciObject* con = type->as_ObjectType()->constant_value();
-+    ciMethod* target = has_member_arg ?
-+        con->as_member_name()->get_vmtarget() :
-+        con->as_method_handle()->get_vmtarget();
-+    if (target != NULL) {
-+//      tty->print("target: "); target->print(); tty->cr();
-+//      tty->print_cr("force_inline: %d", target->force_inline());
-+//      target->print_codes();
-+
-+      if (target->is_compiled_lambda_form()) {
-+        // lambda form exact invokers are static and take the method handle
-+        // receiver as first argument and thus have the same argument size.
-+        assert(callee->arg_size() == target->arg_size(), "must be");
-+        return try_inline(target, /*holder_known*/ true, Bytecodes::_invokestatic);
-+      } else {
-+        assert(callee->is_method_handle_intrinsic(), "must be");
-+        assert(callee->arg_size() - 1 == target->arg_size(), err_msg("must be equal: callee=%d, target=%d", callee->arg_size() - 1, target->arg_size()));
-+
-+        // If the target is another method handle invoker try recursivly to get
++  case vmIntrinsics::_linkToVirtual:
++  case vmIntrinsics::_linkToStatic:
++  case vmIntrinsics::_linkToSpecial:
++  case vmIntrinsics::_linkToInterface:
++    {
++      // pop MemberName argument
++      const int args_base = state()->stack_size() - callee->arg_size();
++      ValueType* type = apop()->type();
++      if (type->is_constant()) {
++        ciMethod* target = type->as_ObjectType()->constant_value()->as_member_name()->get_vmtarget();
++        // If the target is another method handle invoke try recursivly to get
 +        // a better target.
 +        if (target->is_method_handle_intrinsic()) {
 +          if (for_method_handle_inline(target)) {
 +            return true;
 +          }
-+          set_state(state_before);
-+          return false;
-+        }
-+
-+        ciSignature* signature = target->signature();
-+        const int receiver_skip = target->is_static() ? 0 : 1;
-+        // Cast receiver to its type.
-+        if (!target->is_static()) {
-+          Value obj = state()->stack_at(0);
-+          ciKlass* tk = signature->accessing_klass();
-+          if (obj->exact_type() == NULL &&
-+              obj->declared_type() != tk && tk != compilation()->env()->Object_klass()) {
-+            TypeCast* c = new TypeCast(tk, obj, state_before);
-+            append(c);
-+            state()->stack_at_put(0, c);
++        } else {
++          ciSignature* signature = target->signature();
++          const int receiver_skip = target->is_static() ? 0 : 1;
++          // Cast receiver to its type.
++          if (!target->is_static()) {
++            ciKlass* tk = signature->accessing_klass();
++            Value obj = state()->stack_at(args_base);
++            if (obj->exact_type() == NULL &&
++                obj->declared_type() != tk && tk != compilation()->env()->Object_klass()) {
++              TypeCast* c = new TypeCast(tk, obj, state_before);
++              append(c);
++              state()->stack_at_put(args_base, c);
++            }
++          }
++          // Cast reference arguments to its type.
++          for (int i = 0, j = 0; i < signature->count(); i++) {
++            ciType* t = signature->type_at(i);
++            if (t->is_klass()) {
++              ciKlass* tk = t->as_klass();
++              Value obj = state()->stack_at(args_base + receiver_skip + j);
++              if (obj->exact_type() == NULL &&
++                  obj->declared_type() != tk && tk != compilation()->env()->Object_klass()) {
++                TypeCast* c = new TypeCast(t, obj, state_before);
++                append(c);
++                state()->stack_at_put(args_base + receiver_skip + j, c);
++              }
++            }
++            j += t->size();  // long and double take two slots
++          }
++          Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual;
++          if (try_inline(target, /*holder_known*/ true, bc)) {
++            return true;
 +          }
 +        }
-+        // Cast reference arguments to its type.
-+        for (int i = 0, j = 0; i < signature->count(); i++) {
-+          ciType* t = signature->type_at(i);
-+          if (t->is_klass()) {
-+            Value obj = state()->stack_at(receiver_skip + i);
-+            ciKlass* tk = t->as_klass();
-+            if (obj->exact_type() == NULL &&
-+                obj->declared_type() != tk && tk != compilation()->env()->Object_klass()) {
-+              TypeCast* c = new TypeCast(t, obj, state_before);
-+              append(c);
-+              state()->stack_at_put(receiver_skip + i, c);
-+            }
-+          }
-+          j += t->size();  // long and double take two slots
-+        }
-+
-+        Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual;
-+        if (try_inline(target, /*holder_known*/ true, bc)) {
-+          return true;
-+        }
-+        set_state(state_before);
-+        return false;
++      } else {
++        print_inlining(callee, "MemberName not constant", /*success*/ false);
 +      }
 +    }
++    break;
++
++  default:
++    fatal(err_msg("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
++    break;
    }
-+  set_state(state_before);
-+  print_inlining(callee, "receiver not constant", /*success*/ false);
-   return false;
- }
- 
- 
- bool GraphBuilder::for_invokedynamic_inline(ciMethod* callee) {
-+  tty->print_cr("for_invokedynamic_inline:");
-+  ValueStack* state_before = state()->copy_for_parsing();
-   // Get the MethodHandle from the CallSite.
+-  return false;
+-}
+-
+-
+-bool GraphBuilder::for_invokedynamic_inline(ciMethod* callee) {
+-  // Get the MethodHandle from the CallSite.
 -  ciCallSite*     call_site     = stream()->get_call_site();
-+  Value value = apop();
-+  ciCallSite*     call_site     = value->type()->as_ObjectType()->constant_value()->as_call_site();
-   ciMethodHandle* method_handle = call_site->get_target();
+-  ciMethodHandle* method_handle = call_site->get_target();
 -
 -  // TODO new implementation goes here
 -
-+  ciMethod*       target        = method_handle->get_vmtarget();
-+  if (target != NULL) {
-+//    // Push the synthetic receiver on the stack.
-+//    Value receiver = append(new Constant(new InstanceConstant(method_handle)));
-+//    // Subtract 1 from the arg_size since the receiver (the
-+//    // synthetic MH) is not on the stack.
-+//    state()->apush_at(receiver, target->arg_size_no_receiver() - 1);
-+    if (try_inline(target, /*holder_known*/ true)) {
-+      // Add a dependence for invalidation of the optimization.
-+      if (!call_site->is_constant_call_site()) {
-+        dependency_recorder()->assert_call_site_target_value(call_site, method_handle);
-+      }
-+      return true;
-+    }
-+    set_state(state_before);
-+    return false;
-+  }
 +  set_state(state_before);
-+  print_inlining(callee, "unable to inline", /*success*/ false);
    return false;
  }
  
-@@ -3947,22 +4105,24 @@
+@@ -3947,22 +4077,24 @@
  }
  
  
@@ -861,7 +833,7 @@
    void load_indexed (BasicType type);
    void store_indexed(BasicType type);
    void stack_op(Bytecodes::Code code);
-@@ -337,11 +337,14 @@
+@@ -337,14 +337,16 @@
    void fill_sync_handler(Value lock, BlockBegin* sync_handler, bool default_handler = false);
  
    // inliners
@@ -877,8 +849,11 @@
 +
    // JSR 292 support
    bool for_method_handle_inline(ciMethod* callee);
-   bool for_invokedynamic_inline(ciMethod* callee);
-@@ -366,9 +369,9 @@
+-  bool for_invokedynamic_inline(ciMethod* callee);
+ 
+   // helpers
+   void inline_bailout(const char* msg);
+@@ -366,9 +368,9 @@
    bool append_unsafe_prefetch(ciMethod* callee, bool is_store, bool is_static);
    void append_unsafe_CAS(ciMethod* callee);
  
@@ -1337,6 +1312,38 @@
  };
  
  
+diff --git a/src/share/vm/ci/bcEscapeAnalyzer.cpp b/src/share/vm/ci/bcEscapeAnalyzer.cpp
+--- a/src/share/vm/ci/bcEscapeAnalyzer.cpp
++++ b/src/share/vm/ci/bcEscapeAnalyzer.cpp
+@@ -238,9 +238,11 @@
+ 
+   // some methods are obviously bindable without any type checks so
+   // convert them directly to an invokespecial.
+-  if (target->is_loaded() && !target->is_abstract() &&
+-      target->can_be_statically_bound() && code == Bytecodes::_invokevirtual) {
+-    code = Bytecodes::_invokespecial;
++  if (target->is_loaded() && !target->is_abstract() && target->can_be_statically_bound()) {
++    switch (code) {
++    case Bytecodes::_invokevirtual:  code = Bytecodes::_invokespecial;  break;
++    case Bytecodes::_invokehandle:   code = Bytecodes::_invokestatic;   break;
++    }
+   }
+ 
+   // compute size of arguments
+@@ -866,7 +868,12 @@
+         { bool will_link;
+           ciMethod* target = s.get_method(will_link);
+           ciKlass* holder = s.get_declared_method_holder();
+-          invoke(state, s.cur_bc(), target, holder);
++          // Push appendix argument, if one.
++          if (s.has_appendix()) {
++            state.apush(unknown_obj);
++          }
++          // Pass in raw bytecode because we need to see invokehandle instructions.
++          invoke(state, s.cur_bc_raw(), target, holder);
+           ciType* return_type = target->return_type();
+           if (!return_type->is_primitive_type()) {
+             state.apush(unknown_obj);
 diff --git a/src/share/vm/ci/ciClassList.hpp b/src/share/vm/ci/ciClassList.hpp
 --- a/src/share/vm/ci/ciClassList.hpp
 +++ b/src/share/vm/ci/ciClassList.hpp
@@ -1359,17 +1366,7 @@
 diff --git a/src/share/vm/ci/ciEnv.cpp b/src/share/vm/ci/ciEnv.cpp
 --- a/src/share/vm/ci/ciEnv.cpp
 +++ b/src/share/vm/ci/ciEnv.cpp
-@@ -737,7 +737,8 @@
- // ciEnv::get_method_by_index_impl
- ciMethod* ciEnv::get_method_by_index_impl(constantPoolHandle cpool,
-                                           int index, Bytecodes::Code bc,
--                                          ciInstanceKlass* accessor) {
-+                                          ciInstanceKlass* accessor,
-+                                          ciObject** appendix_result) {
-   int holder_index = cpool->klass_ref_index_at(index);
-   bool holder_is_accessible;
-   ciKlass* holder = get_klass_by_index_impl(cpool, holder_index, holder_is_accessible, accessor);
-@@ -759,13 +760,17 @@
+@@ -759,13 +759,13 @@
      case Bytecodes::_invokespecial:
      case Bytecodes::_invokestatic:
        {
@@ -1377,12 +1374,8 @@
 -        methodOop m = constantPoolOopDesc::method_at_if_loaded(cpool, index, &no_appendix);
 -        assert(no_appendix == NULL, "");
 +        oop appendix_oop = NULL;
-+        methodOop m = constantPoolOopDesc::method_at_if_loaded(cpool, index, &appendix_oop);
++        methodOop m = constantPoolOopDesc::method_at_if_loaded(cpool, index);
          if (m != NULL) {
-+          if (appendix_oop != NULL) {
-+            assert(appendix_result != NULL, "must be able to return extra argument");
-+            (*appendix_result) = get_object(appendix_oop);
-+          }
            return get_object(m)->as_method();
          }
        }
@@ -1390,13 +1383,7 @@
      }
    }
  
-@@ -797,11 +802,13 @@
- // ciEnv::get_fake_invokedynamic_method_impl
- ciMethod* ciEnv::get_fake_invokedynamic_method_impl(constantPoolHandle cpool,
-                                                     int index, Bytecodes::Code bc,
--                                                    ciInstanceKlass* accessor) {
-+                                                    ciInstanceKlass* accessor,
-+                                                    ciObject** extra_arg) {
+@@ -801,7 +801,8 @@
    // Compare the following logic with InterpreterRuntime::resolve_invokedynamic.
    assert(bc == Bytecodes::_invokedynamic, "must be invokedynamic");
  
@@ -1406,7 +1393,13 @@
    // FIXME: code generation could allow for null (unlinked) call site
    // The call site could be made patchable as follows:
    // Load the appendix argument from the constant pool.
-@@ -819,9 +826,11 @@
+@@ -814,14 +815,13 @@
+   // Stop the code path here with an unlinked method.
+   if (!is_resolved) {
+     ciInstanceKlass* holder    = get_object(SystemDictionary::MethodHandle_klass())->as_instance_klass();
+-    ciSymbol*        name      = ciSymbol::invoke_name();
++    ciSymbol*        name      = ciSymbol::invokeBasic_name();
+     ciSymbol*        signature = get_symbol(cpool->signature_ref_at(index));
      return get_unloaded_method(holder, name, signature, accessor);
    }
  
@@ -1415,29 +1408,10 @@
 -  methodOop adapter = cpool->cache()->secondary_entry_at(index)->f2_as_vfinal_method();
 +  // Get the invoker methodOop and the extra argument from the constant pool.
 +  methodOop adapter = secondary_entry->f2_as_vfinal_method();
-+  oop       call_site         = secondary_entry->f1_as_instance();
-+  assert(extra_arg != NULL, "must be able to return extra argument");
-+  (*extra_arg) = get_object(call_site);
    return get_object(adapter)->as_method();
  }
  
-@@ -852,11 +861,12 @@
- // ciEnv::get_method_by_index
- ciMethod* ciEnv::get_method_by_index(constantPoolHandle cpool,
-                                      int index, Bytecodes::Code bc,
--                                     ciInstanceKlass* accessor) {
-+                                     ciInstanceKlass* accessor,
-+                                     ciObject** extra_arg) {
-   if (bc == Bytecodes::_invokedynamic) {
--    GUARDED_VM_ENTRY(return get_fake_invokedynamic_method_impl(cpool, index, bc, accessor);)
-+    GUARDED_VM_ENTRY(return get_fake_invokedynamic_method_impl(cpool, index, bc, accessor, extra_arg);)
-   } else {
--    GUARDED_VM_ENTRY(return get_method_by_index_impl(          cpool, index, bc, accessor);)
-+    GUARDED_VM_ENTRY(return get_method_by_index_impl(          cpool, index, bc, accessor, extra_arg);)
-   }
- }
- 
-@@ -1133,7 +1143,7 @@
+@@ -1133,7 +1133,7 @@
  // ------------------------------------------------------------------
  // ciEnv::notice_inlined_method()
  void ciEnv::notice_inlined_method(ciMethod* method) {
@@ -1446,34 +1420,6 @@
  }
  
  // ------------------------------------------------------------------
-diff --git a/src/share/vm/ci/ciEnv.hpp b/src/share/vm/ci/ciEnv.hpp
---- a/src/share/vm/ci/ciEnv.hpp
-+++ b/src/share/vm/ci/ciEnv.hpp
-@@ -133,7 +133,8 @@
-                                 int field_index);
-   ciMethod*  get_method_by_index(constantPoolHandle cpool,
-                                  int method_index, Bytecodes::Code bc,
--                                 ciInstanceKlass* loading_klass);
-+                                 ciInstanceKlass* loading_klass,
-+                                 ciObject** extra_arg = NULL);
- 
-   // Implementation methods for loading and constant pool access.
-   ciKlass* get_klass_by_name_impl(ciKlass* accessing_klass,
-@@ -151,10 +152,12 @@
-                                      int field_index);
-   ciMethod*  get_method_by_index_impl(constantPoolHandle cpool,
-                                       int method_index, Bytecodes::Code bc,
--                                      ciInstanceKlass* loading_klass);
-+                                      ciInstanceKlass* loading_klass,
-+                                      ciObject** extra_arg = NULL);
-   ciMethod*  get_fake_invokedynamic_method_impl(constantPoolHandle cpool,
-                                                 int index, Bytecodes::Code bc,
--                                                ciInstanceKlass* accessor);
-+                                                ciInstanceKlass* accessor,
-+                                                ciObject** extra_arg = NULL);
- 
-   // Helper methods
-   bool       check_klass_accessibility(ciKlass* accessing_klass,
 diff --git a/src/share/vm/ci/ciMemberName.cpp b/src/share/vm/ci/ciMemberName.cpp
 new file mode 100644
 --- /dev/null
@@ -1570,6 +1516,24 @@
 diff --git a/src/share/vm/ci/ciMethod.cpp b/src/share/vm/ci/ciMethod.cpp
 --- a/src/share/vm/ci/ciMethod.cpp
 +++ b/src/share/vm/ci/ciMethod.cpp
+@@ -791,14 +791,14 @@
+ }
+ 
+ // ------------------------------------------------------------------
+-// ciMethod::has_member_arg
++// ciMethod::has_member_appendix
+ //
+ // Return true if the method is a linker intrinsic like _linkToVirtual.
+ // These are built by the JVM.
+-bool ciMethod::has_member_arg() const {
++bool ciMethod::has_member_appendix() const {
+   vmIntrinsics::ID iid = _intrinsic_id;  // do not check if loaded
+   return (MethodHandles::is_signature_polymorphic(iid) &&
+-          MethodHandles::has_member_arg(iid));
++          MethodHandles::has_member_appendix(iid));
+ }
+ 
+ // ------------------------------------------------------------------
 @@ -1023,28 +1023,13 @@
  // ------------------------------------------------------------------
  // ciMethod::code_size_for_inlining
@@ -1633,7 +1597,35 @@
 diff --git a/src/share/vm/ci/ciMethod.hpp b/src/share/vm/ci/ciMethod.hpp
 --- a/src/share/vm/ci/ciMethod.hpp
 +++ b/src/share/vm/ci/ciMethod.hpp
-@@ -161,6 +161,7 @@
+@@ -133,16 +133,20 @@
+     return _signature->size() + (_flags.is_static() ? 0 : 1);
+   }
+   // Report the number of elements on stack when invoking this method.
+-  // This is different than the regular arg_size because invokdynamic
++  // This is different than the regular arg_size because invokedynamic
+   // has an implicit receiver.
+   int invoke_arg_size(Bytecodes::Code code) const {
+-    int arg_size = _signature->size();
+-    // Add a receiver argument, maybe:
+-    if (code != Bytecodes::_invokestatic &&
+-        code != Bytecodes::_invokedynamic) {
+-      arg_size++;
++    if (is_loaded()) {
++      return arg_size();
++    } else {
++      int arg_size = _signature->size();
++      // Add a receiver argument, maybe:
++      if (code != Bytecodes::_invokestatic &&
++          code != Bytecodes::_invokedynamic) {
++        arg_size++;
++      }
++      return arg_size;
+     }
+-    return arg_size;
+   }
+ 
+ 
+@@ -161,6 +165,7 @@
    int code_size_for_inlining();
  
    bool force_inline() { return get_methodOop()->force_inline(); }
@@ -1641,6 +1633,15 @@
  
    int comp_level();
    int highest_osr_comp_level();
+@@ -260,7 +265,7 @@
+   // JSR 292 support
+   bool is_method_handle_intrinsic()  const;
+   bool is_compiled_lambda_form() const;
+-  bool has_member_arg() const;
++  bool has_member_appendix() const;
+ 
+   // What kind of ciObject is this?
+   bool is_method()                               { return true; }
 diff --git a/src/share/vm/ci/ciMethodHandle.cpp b/src/share/vm/ci/ciMethodHandle.cpp
 --- a/src/share/vm/ci/ciMethodHandle.cpp
 +++ b/src/share/vm/ci/ciMethodHandle.cpp
@@ -1723,6 +1724,16 @@
    ciMethod*                as_method() {
      assert(is_method(), "bad cast");
      return (ciMethod*)this;
+@@ -290,7 +295,8 @@
+   }
+ 
+   // Print debugging output about this ciObject.
+-  void print(outputStream* st = tty);
++  void print(outputStream* st);
++  void print() { print(tty); }  // GDB cannot handle default arguments
+ 
+   // Print debugging output about the oop this ciObject represents.
+   void print_oop(outputStream* st = tty);
 diff --git a/src/share/vm/ci/ciObjectFactory.cpp b/src/share/vm/ci/ciObjectFactory.cpp
 --- a/src/share/vm/ci/ciObjectFactory.cpp
 +++ b/src/share/vm/ci/ciObjectFactory.cpp
@@ -1760,31 +1771,98 @@
 diff --git a/src/share/vm/ci/ciStreams.cpp b/src/share/vm/ci/ciStreams.cpp
 --- a/src/share/vm/ci/ciStreams.cpp
 +++ b/src/share/vm/ci/ciStreams.cpp
-@@ -355,10 +355,10 @@
- // ciBytecodeStream::get_method
+@@ -364,6 +364,29 @@
+ }
+ 
+ // ------------------------------------------------------------------
++// ciBytecodeStream::has_appendix
++//
++// Returns true if there is an appendix argument stored in the
++// constant pool cache at the current bci.
++bool ciBytecodeStream::has_appendix() {
++  VM_ENTRY_MARK;
++  constantPoolHandle cpool(_method->get_methodOop()->constants());
++  return constantPoolOopDesc::has_appendix_at_if_loaded(cpool, get_method_index());
++}
++
++// ------------------------------------------------------------------
++// ciBytecodeStream::get_appendix
++//
++// Return the appendix argument stored in the constant pool cache at
++// the current bci.
++ciObject* ciBytecodeStream::get_appendix() {
++  VM_ENTRY_MARK;
++  constantPoolHandle cpool(_method->get_methodOop()->constants());
++  oop appendix_oop = constantPoolOopDesc::appendix_at_if_loaded(cpool, get_method_index());
++  return CURRENT_ENV->get_object(appendix_oop);
++}
++
++// ------------------------------------------------------------------
+ // ciBytecodeStream::get_declared_method_holder
  //
- // If this is a method invocation bytecode, get the invoked method.
--ciMethod* ciBytecodeStream::get_method(bool& will_link) {
-+ciMethod* ciBytecodeStream::get_method(bool& will_link, ciObject** extra_arg) {
-   VM_ENTRY_MARK;
-   constantPoolHandle cpool(_method->get_methodOop()->constants());
--  ciMethod* m = CURRENT_ENV->get_method_by_index(cpool, get_method_index(), cur_bc(), _holder);
-+  ciMethod* m = CURRENT_ENV->get_method_by_index(cpool, get_method_index(), cur_bc(), _holder, extra_arg);
-   will_link = m->is_loaded();
-   return m;
- }
+ // Get the declared holder of the currently referenced method.
 diff --git a/src/share/vm/ci/ciStreams.hpp b/src/share/vm/ci/ciStreams.hpp
 --- a/src/share/vm/ci/ciStreams.hpp
 +++ b/src/share/vm/ci/ciStreams.hpp
-@@ -258,7 +258,7 @@
-   int      get_field_signature_index();
+@@ -259,6 +259,8 @@
  
    // If this is a method invocation bytecode, get the invoked method.
--  ciMethod* get_method(bool& will_link);
-+  ciMethod* get_method(bool& will_link, ciObject** extra_arg = NULL);
+   ciMethod* get_method(bool& will_link);
++  bool      has_appendix();
++  ciObject* get_appendix();
    ciKlass*  get_declared_method_holder();
    int       get_method_holder_index();
    int       get_method_signature_index();
+diff --git a/src/share/vm/ci/ciTypeFlow.cpp b/src/share/vm/ci/ciTypeFlow.cpp
+--- a/src/share/vm/ci/ciTypeFlow.cpp
++++ b/src/share/vm/ci/ciTypeFlow.cpp
+@@ -643,9 +643,9 @@
+ // ------------------------------------------------------------------
+ // ciTypeFlow::StateVector::do_invoke
+ void ciTypeFlow::StateVector::do_invoke(ciBytecodeStream* str,
+-                                        bool has_receiver) {
++                                        bool has_receiver_foo) {
+   bool will_link;
+-  ciMethod* method = str->get_method(will_link);
++  ciMethod* callee = str->get_method(will_link);
+   if (!will_link) {
+     // We weren't able to find the method.
+     if (str->cur_bc() == Bytecodes::_invokedynamic) {
+@@ -654,12 +654,24 @@
+            (Deoptimization::Reason_uninitialized,
+             Deoptimization::Action_reinterpret));
+     } else {
+-      ciKlass* unloaded_holder = method->holder();
++      ciKlass* unloaded_holder = callee->holder();
+       trap(str, unloaded_holder, str->get_method_holder_index());
+     }
+   } else {
+-    ciSignature* signature = method->signature();
++    // TODO Use Bytecode_invoke after metadata changes.
++    //Bytecode_invoke inv(str->method(), str->cur_bci());
++    //const bool has_receiver = callee->is_loaded() ? !callee->is_static() : inv.has_receiver();
++    Bytecode inv(str);
++    Bytecodes::Code code = inv.invoke_code();
++    const bool has_receiver = callee->is_loaded() ? !callee->is_static() : code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic;
++
++    ciSignature* signature = callee->signature();
+     ciSignatureStream sigstr(signature);
++    // Push appendix argument, if one.
++    if (str->has_appendix()) {
++      ciObject* appendix = str->get_appendix();
++      push_object(appendix->klass());
++    }
+     int arg_size = signature->size();
+     int stack_base = stack_size() - arg_size;
+     int i = 0;
+@@ -677,6 +689,7 @@
+     for (int j = 0; j < arg_size; j++) {
+       pop();
+     }
++    assert(!callee->is_loaded() || has_receiver == !callee->is_static(), "mismatch");
+     if (has_receiver) {
+       // Check this?
+       pop_object();
 diff --git a/src/share/vm/classfile/classFileParser.cpp b/src/share/vm/classfile/classFileParser.cpp
 --- a/src/share/vm/classfile/classFileParser.cpp
 +++ b/src/share/vm/classfile/classFileParser.cpp
@@ -1831,6 +1909,63 @@
        _method_LambdaForm_Compiled,
        _annotation_LIMIT
      };
+diff --git a/src/share/vm/classfile/systemDictionary.cpp b/src/share/vm/classfile/systemDictionary.cpp
+--- a/src/share/vm/classfile/systemDictionary.cpp
++++ b/src/share/vm/classfile/systemDictionary.cpp
+@@ -194,7 +194,10 @@
+ // Forwards to resolve_instance_class_or_null
+ 
+ klassOop SystemDictionary::resolve_or_null(Symbol* class_name, Handle class_loader, Handle protection_domain, TRAPS) {
+-  assert(!THREAD->is_Compiler_thread(), "Can not load classes with the Compiler thread");
++  assert(!THREAD->is_Compiler_thread(),
++         err_msg("can not load classes with compiler thread: class=%s, classloader=%s",
++                 class_name->as_C_string(),
++                 class_loader.is_null() ? "null" : class_loader->klass()->klass_part()->name()->as_C_string()));
+   if (FieldType::is_array(class_name)) {
+     return resolve_array_class_or_null(class_name, class_loader, protection_domain, CHECK_NULL);
+   } else if (FieldType::is_obj(class_name)) {
+@@ -2367,7 +2370,7 @@
+   assert(MethodHandles::is_signature_polymorphic(iid) &&
+          MethodHandles::is_signature_polymorphic_intrinsic(iid) &&
+          iid != vmIntrinsics::_invokeGeneric,
+-         err_msg("must be a known MH intrinsic; iid=%d", iid));
++         err_msg("must be a known MH intrinsic iid=%d: %s", iid, vmIntrinsics::name_at(iid)));
+ 
+   unsigned int hash  = invoke_method_table()->compute_hash(signature, iid);
+   int          index = invoke_method_table()->hash_to_index(hash);
+@@ -2486,6 +2489,7 @@
+     assert(java_lang_invoke_MethodType::is_instance(spe->property_oop()), "");
+     return Handle(THREAD, spe->property_oop());
+   } else if (THREAD->is_Compiler_thread()) {
++    tty->print_cr("SystemDictionary::find_method_handle_type called from compiler thread");
+     return Handle();  // do not attempt from within compiler, unless it was cached
+   }
+ 
+@@ -2634,13 +2638,13 @@
+   guarantee(java_lang_invoke_MethodHandle::is_instance(bsm()),
+             "caller must supply a valid BSM");
+ 
+-  Handle caller_mname = MethodHandles::new_MemberName(CHECK_NULL);
++  Handle caller_mname = MethodHandles::new_MemberName(CHECK_(empty));
+   MethodHandles::init_method_MemberName(caller_mname(), caller_method(), false, NULL);
+ 
+   Handle method_name = java_lang_String::create_from_symbol(name, CHECK_(empty));
+   Handle method_type = find_method_handle_type(type, caller_method->method_holder(), CHECK_(empty));
+ 
+-  objArrayHandle appendix_box = oopFactory::new_objArray(SystemDictionary::Object_klass(), 1, CHECK_NULL);
++  objArrayHandle appendix_box = oopFactory::new_objArray(SystemDictionary::Object_klass(), 1, CHECK_(empty));
+   assert(appendix_box->obj_at(0) == NULL, "");
+ 
+   // call java.lang.invoke.MethodHandleNatives::makeDynamicCallSite(bsm, name, mtype, info, caller_mname, caller_pos)
+@@ -2657,7 +2661,7 @@
+                          SystemDictionary::MethodHandleNatives_klass(),
+                          vmSymbols::linkCallSite_name(),
+                          vmSymbols::linkCallSite_signature(),
+-                         &args, CHECK_NULL);
++                         &args, CHECK_(empty));
+   Handle mname(THREAD, (oop) result.get_jobject());
+   return unpack_method_and_appendix(mname, appendix_box, appendix_result, THREAD);
+ }
 diff --git a/src/share/vm/classfile/vmSymbols.hpp b/src/share/vm/classfile/vmSymbols.hpp
 --- a/src/share/vm/classfile/vmSymbols.hpp
 +++ b/src/share/vm/classfile/vmSymbols.hpp
@@ -1910,10 +2045,230 @@
  
    if (msg != NULL) {
      st->print("   %s", msg);
+diff --git a/src/share/vm/interpreter/bytecode.cpp b/src/share/vm/interpreter/bytecode.cpp
+--- a/src/share/vm/interpreter/bytecode.cpp
++++ b/src/share/vm/interpreter/bytecode.cpp
+@@ -124,8 +124,8 @@
+ }
+ 
+ 
+-Symbol* Bytecode_member_ref::signature() const {
+-  return constants()->signature_ref_at(index());
++Symbol* Bytecode_member_ref::klass() const {
++  return constants()->klass_ref_at_noresolve(index());
+ }
+ 
+ 
+@@ -134,6 +134,11 @@
+ }
+ 
+ 
++Symbol* Bytecode_member_ref::signature() const {
++  return constants()->signature_ref_at(index());
++}
++
++
+ BasicType Bytecode_member_ref::result_type() const {
+   ResultTypeFinder rts(signature());
+   rts.iterate();
+@@ -176,7 +181,7 @@
+   int index = this->index();
+   DEBUG_ONLY({
+       if (!has_index_u4(code()))
+-        index -= constantPoolOopDesc::CPCACHE_INDEX_TAG;
++        index = constantPoolOopDesc::get_cpcache_index(index);
+     });
+   return cpcache()->entry_at(index);
+ }
+diff --git a/src/share/vm/interpreter/bytecode.hpp b/src/share/vm/interpreter/bytecode.hpp
+--- a/src/share/vm/interpreter/bytecode.hpp
++++ b/src/share/vm/interpreter/bytecode.hpp
+@@ -203,6 +203,7 @@
+  public:
+   int          index() const;                    // cache index (loaded from instruction)
+   int          pool_index() const;               // constant pool index
++  Symbol*      klass() const;                    // returns the klass of the method or field
+   Symbol*      name() const;                     // returns the name of the method or field
+   Symbol*      signature() const;                // returns the signature of the method or field
+ 
+@@ -241,6 +242,8 @@
+                                                           is_invokedynamic()   ||
+                                                           is_invokehandle(); }
+ 
++  bool has_appendix()                            { return cpcache_entry()->has_appendix(); }
++
+  private:
+   // Helper to skip verification.   Used is_valid() to check if the result is really an invoke
+   inline friend Bytecode_invoke Bytecode_invoke_check(methodHandle method, int bci);
+diff --git a/src/share/vm/interpreter/interpreter.cpp b/src/share/vm/interpreter/interpreter.cpp
+--- a/src/share/vm/interpreter/interpreter.cpp
++++ b/src/share/vm/interpreter/interpreter.cpp
+@@ -37,6 +37,7 @@
+ #include "oops/oop.inline.hpp"
+ #include "prims/forte.hpp"
+ #include "prims/jvmtiExport.hpp"
++#include "prims/methodHandles.hpp"
+ #include "runtime/handles.inline.hpp"
+ #include "runtime/sharedRuntime.hpp"
+ #include "runtime/stubRoutines.hpp"
+diff --git a/src/share/vm/interpreter/linkResolver.cpp b/src/share/vm/interpreter/linkResolver.cpp
+--- a/src/share/vm/interpreter/linkResolver.cpp
++++ b/src/share/vm/interpreter/linkResolver.cpp
+@@ -101,7 +101,7 @@
+ 
+ void CallInfo::set_handle(methodHandle resolved_method, Handle resolved_appendix, TRAPS) {
+   if (resolved_method.is_null()) {
+-    THROW(vmSymbols::java_lang_InternalError());
++    THROW_MSG(vmSymbols::java_lang_InternalError(), "resolved method is null");
+   }
+   KlassHandle resolved_klass = SystemDictionaryHandles::MethodHandle_klass();
+   assert(resolved_method->intrinsic_id() == vmIntrinsics::_invokeBasic ||
+@@ -338,6 +338,7 @@
+     new_flags = new_flags | JVM_ACC_PUBLIC;
+     flags.set_flags(new_flags);
+   }
++//  assert(extra_arg_result_or_null != NULL, "must be able to return extra argument");
+ 
+   if (!Reflection::verify_field_access(ref_klass->as_klassOop(),
+                                        resolved_klass->as_klassOop(),
+@@ -380,9 +381,8 @@
+   if (pool->has_preresolution()
+       || (resolved_klass() == SystemDictionary::MethodHandle_klass() &&
+           MethodHandles::is_signature_polymorphic(resolved_klass(), method_name))) {
+-    oop appendix = NULL;
+-    methodOop result_oop = constantPoolOopDesc::method_at_if_loaded(pool, index, &appendix);
+-    if (result_oop != NULL && appendix == NULL) {
++    methodOop result_oop = constantPoolOopDesc::method_at_if_loaded(pool, index);
++    if (result_oop != NULL) {
+       resolved_method = methodHandle(THREAD, result_oop);
+       return;
+     }
+diff --git a/src/share/vm/oops/constantPoolOop.cpp b/src/share/vm/oops/constantPoolOop.cpp
+--- a/src/share/vm/oops/constantPoolOop.cpp
++++ b/src/share/vm/oops/constantPoolOop.cpp
+@@ -267,11 +267,10 @@
+ 
+ 
+ methodOop constantPoolOopDesc::method_at_if_loaded(constantPoolHandle cpool,
+-                                                   int which,
+-                                                   oop* appendix_result) {
++                                                   int which) {
+   assert(!constantPoolCacheOopDesc::is_secondary_index(which), "no indy instruction here");
+   if (cpool->cache() == NULL)  return NULL;  // nothing to load yet
+-  int cache_index = which - CPCACHE_INDEX_TAG;
++  int cache_index = get_cpcache_index(which);
+   if (!(cache_index >= 0 && cache_index < cpool->cache()->length())) {
+     if (PrintMiscellaneous && (Verbose||WizardMode)) {
+       tty->print_cr("bad operand %d in:", which); cpool->print();
+@@ -279,7 +278,50 @@
+     return NULL;
+   }
+   ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index);
+-  return e->method_if_resolved(cpool, appendix_result);
++  return e->method_if_resolved(cpool);
++}
++
++
++bool constantPoolOopDesc::has_appendix_at_if_loaded(constantPoolHandle cpool, int which) {
++  if (cpool->cache() == NULL)  return NULL;  // nothing to load yet
++  // XXX Is there a simpler way to get to the secondary entry?
++  ConstantPoolCacheEntry* e;
++  if (constantPoolCacheOopDesc::is_secondary_index(which)) {
++    e = cpool->cache()->secondary_entry_at(which);
++  } else {
++    int cache_index = get_cpcache_index(which);
++    if (!(cache_index >= 0 && cache_index < cpool->cache()->length())) {
++      if (PrintMiscellaneous && (Verbose||WizardMode)) {
++        tty->print_cr("bad operand %d in:", which); cpool->print();
++      }
++      return NULL;
++    }
++    e = cpool->cache()->entry_at(cache_index);
++  }
++  return e->has_appendix();
++}
++
++
++oop constantPoolOopDesc::appendix_at_if_loaded(constantPoolHandle cpool, int which) {
++  if (cpool->cache() == NULL)  return NULL;  // nothing to load yet
++  // XXX Is there a simpler way to get to the secondary entry?
++  ConstantPoolCacheEntry* e;
++  if (constantPoolCacheOopDesc::is_secondary_index(which)) {
++    e = cpool->cache()->secondary_entry_at(which);
++  } else {
++    int cache_index = get_cpcache_index(which);
++    if (!(cache_index >= 0 && cache_index < cpool->cache()->length())) {
++      if (PrintMiscellaneous && (Verbose||WizardMode)) {
++        tty->print_cr("bad operand %d in:", which); cpool->print();
++      }
++      return NULL;
++    }
++    e = cpool->cache()->entry_at(cache_index);
++  }
++  if (!e->has_appendix()) {
++    return NULL;
++  }
++  return e->f1_as_instance();
+ }
+ 
+ 
+diff --git a/src/share/vm/oops/constantPoolOop.hpp b/src/share/vm/oops/constantPoolOop.hpp
+--- a/src/share/vm/oops/constantPoolOop.hpp
++++ b/src/share/vm/oops/constantPoolOop.hpp
+@@ -671,12 +671,13 @@
+   friend class SystemDictionary;
+ 
+   // Used by compiler to prevent classloading.
+-  static methodOop method_at_if_loaded        (constantPoolHandle this_oop, int which,
+-                                               oop* appendix_result);
+-  static klassOop klass_at_if_loaded          (constantPoolHandle this_oop, int which);
+-  static klassOop klass_ref_at_if_loaded      (constantPoolHandle this_oop, int which);
++  static methodOop       method_at_if_loaded      (constantPoolHandle this_oop, int which);
++  static bool      has_appendix_at_if_loaded      (constantPoolHandle this_oop, int which);
++  static oop           appendix_at_if_loaded      (constantPoolHandle this_oop, int which);
++  static klassOop         klass_at_if_loaded      (constantPoolHandle this_oop, int which);
++  static klassOop     klass_ref_at_if_loaded      (constantPoolHandle this_oop, int which);
+   // Same as above - but does LinkResolving.
+-  static klassOop klass_ref_at_if_loaded_check(constantPoolHandle this_oop, int which, TRAPS);
++  static klassOop     klass_ref_at_if_loaded_check(constantPoolHandle this_oop, int which, TRAPS);
+ 
+   // Routines currently used for annotations (only called by jvm.cpp) but which might be used in the
+   // future by other Java code. These take constant pool indices rather than
+@@ -702,6 +703,8 @@
+   enum { CPCACHE_INDEX_TAG = 0 };        // in product mode, this zero value is a no-op
+ #endif //ASSERT
+ 
++  static int get_cpcache_index(int index) { return index - CPCACHE_INDEX_TAG; }
++
+  private:
+ 
+   Symbol* impl_name_ref_at(int which, bool uncached);
 diff --git a/src/share/vm/oops/cpCacheOop.cpp b/src/share/vm/oops/cpCacheOop.cpp
 --- a/src/share/vm/oops/cpCacheOop.cpp
 +++ b/src/share/vm/oops/cpCacheOop.cpp
-@@ -374,10 +374,13 @@
+@@ -291,7 +291,7 @@
+                    (                 1      << is_vfinal_shift)     |
+                    (                 1      << is_final_shift),
+                    adapter->size_of_parameters());
+-  assert(old_flags == 0 || old_flags == _flags, "flags should be the same");
++  assert(old_flags == 0 || old_flags == _flags, err_msg("flags should be the same: old_flags=%x, _flags=%x", old_flags, _flags));
+ 
+   if (TraceInvokeDynamic) {
+     tty->print_cr("set_method_handle bc=%d appendix="PTR_FORMAT"%s method="PTR_FORMAT" ",
+@@ -334,10 +334,9 @@
+   }
+ }
+ 
+-methodOop ConstantPoolCacheEntry::method_if_resolved(constantPoolHandle cpool,
+-                                                     oop* appendix_result) {
++methodOop ConstantPoolCacheEntry::method_if_resolved(constantPoolHandle cpool) {
+   if (is_secondary_entry()) {
+-    return cpool->cache()->entry_at(main_entry_index())->method_if_resolved(cpool, appendix_result);
++    return cpool->cache()->entry_at(main_entry_index())->method_if_resolved(cpool);
+   }
+   // Decode the action of set_method and set_interface_call
+   Bytecodes::Code invoke_code = bytecode_1();
+@@ -374,10 +373,9 @@
            return instanceKlass::cast(klass)->method_at_vtable(f2_as_index());
          }
        }
@@ -1921,13 +2276,60 @@
      case Bytecodes::_invokehandle:
      case Bytecodes::_invokedynamic:
 -      if (has_appendix())
-+      if (has_appendix()) {
-+        assert(appendix_result != NULL, "must be able to return extra argument");
-         (*appendix_result) = f1_as_instance();
-+      }
+-        (*appendix_result) = f1_as_instance();
        return f2_as_vfinal_method();
      }
    }
+@@ -385,6 +383,16 @@
+ }
+ 
+ 
++oop ConstantPoolCacheEntry::appendix_if_resolved(constantPoolHandle cpool) {
++  if (is_secondary_entry()) {
++    return cpool->cache()->entry_at(main_entry_index())->appendix_if_resolved(cpool);
++  }
++  if (!has_appendix()) {
++    return NULL;
++  }
++  return f1_as_instance();
++}
++
+ 
+ class LocalOopClosure: public OopClosure {
+  private:
+diff --git a/src/share/vm/oops/cpCacheOop.hpp b/src/share/vm/oops/cpCacheOop.hpp
+--- a/src/share/vm/oops/cpCacheOop.hpp
++++ b/src/share/vm/oops/cpCacheOop.hpp
+@@ -66,8 +66,8 @@
+ //
+ // The flags after TosState have the following interpretation:
+ // bit 27: 0 for fields, 1 for methods
+-// f flag  true if field is marked final
+-// v flag true if field is volatile (only for fields)
++// f  flag true if field is marked final
++// v  flag true if field is volatile (only for fields)
+ // f2 flag true if f2 contains an oop (e.g., virtual final method)
+ // fv flag true if invokeinterface used for method in class Object
+ //
+@@ -165,7 +165,7 @@
+     is_vfinal_shift            = 21,
+     is_volatile_shift          = 22,
+     is_final_shift             = 23,
+-    has_appendix_shift        = 24,
++    has_appendix_shift         = 24,
+     is_forced_virtual_shift    = 25,
+     is_field_entry_shift       = 26,
+     // low order bits give field index (for FieldInfo) or method parameter size:
+@@ -247,7 +247,8 @@
+     Handle appendix                              // appendix such as CallSite, MethodType, etc. (f1)
+   );
+ 
+-  methodOop method_if_resolved(constantPoolHandle cpool, oop* appendix_result);
++  methodOop   method_if_resolved(constantPoolHandle cpool);
++  oop       appendix_if_resolved(constantPoolHandle cpool);
+ 
+   void set_parameter_size(int value);
+ 
 diff --git a/src/share/vm/oops/methodOop.cpp b/src/share/vm/oops/methodOop.cpp
 --- a/src/share/vm/oops/methodOop.cpp
 +++ b/src/share/vm/oops/methodOop.cpp
@@ -1939,7 +2341,7 @@
  #include "prims/nativeLookup.hpp"
  #include "runtime/arguments.hpp"
  #include "runtime/compilationPolicy.hpp"
-@@ -399,41 +400,43 @@
+@@ -399,41 +400,39 @@
  }
  
  
@@ -1990,7 +2392,7 @@
 +    case Bytecodes::_if_acmpne:
 +    case Bytecodes::_goto:
 +    case Bytecodes::_jsr:
-+      if (bcs.dest() < bcs.next_bci()) _access_flags.set_has_loops();
++      if (bcs.dest()   < bcs.next_bci())  _access_flags.set_has_loops();
 +      break;
  
 -      case Bytecodes::_goto_w:
@@ -1999,11 +2401,7 @@
 -        break;
 +    case Bytecodes::_goto_w:
 +    case Bytecodes::_jsr_w:
-+      if (bcs.dest_w() < bcs.next_bci()) _access_flags.set_has_loops();
-+      break;
-+
-+    case Bytecodes::_invokedynamic:
-+      _access_flags.set_has_invokedynamics();
++      if (bcs.dest_w() < bcs.next_bci())  _access_flags.set_has_loops();
 +      break;
      }
    }
@@ -2012,6 +2410,41 @@
  }
  
  
+@@ -877,10 +876,10 @@
+           MethodHandles::is_signature_polymorphic_intrinsic(iid));
+ }
+ 
+-bool methodOopDesc::has_member_arg() const {
++bool methodOopDesc::has_member_appendix() const {
+   vmIntrinsics::ID iid = intrinsic_id();
+   return (MethodHandles::is_signature_polymorphic(iid) &&
+-          MethodHandles::has_member_arg(iid));
++          MethodHandles::has_member_appendix(iid));
+ }
+ 
+ // Make an instance of a signature-polymorphic internal MH primitive.
+@@ -964,7 +963,7 @@
+ }
+ 
+ 
+-methodHandle methodOopDesc:: clone_with_new_data(methodHandle m, u_char* new_code, int new_code_length,
++methodHandle methodOopDesc::clone_with_new_data(methodHandle m, u_char* new_code, int new_code_length,
+                                                 u_char* new_compressed_linenumber_table, int new_compressed_linenumber_size, TRAPS) {
+   // Code below does not work for native methods - they should never get rewritten anyway
+   assert(!m->is_native(), "cannot rewrite native methods");
+@@ -1123,6 +1122,12 @@
+ 
+ // These two methods are static since a GC may move the methodOopDesc
+ bool methodOopDesc::load_signature_classes(methodHandle m, TRAPS) {
++  if (THREAD->is_Compiler_thread()) {
++    // There is nothing useful this routine can do.
++    // Hopefully, the signature contains only well-known classes.
++    // We could scan for this and return true/false, but the caller won't care.
++    return false;
++  }
+   bool sig_is_loaded = true;
+   Handle class_loader(THREAD, instanceKlass::cast(m->method_holder())->class_loader());
+   Handle protection_domain(THREAD, Klass::cast(m->method_holder())->protection_domain());
 diff --git a/src/share/vm/oops/methodOop.hpp b/src/share/vm/oops/methodOop.hpp
 --- a/src/share/vm/oops/methodOop.hpp
 +++ b/src/share/vm/oops/methodOop.hpp
@@ -2030,11 +2463,11 @@
  
    // max stack
 -  int  max_stack() const                         { return _max_stack; }
-+  int  max_stack() const                         { return _max_stack + (has_invokedynamics() ? 1 : 0); }
++  int  max_stack() const                         { return _max_stack + extra_stack_entries(); }
    void set_max_stack(int size)                   { _max_stack = size; }
  
    // max locals
-@@ -490,19 +491,14 @@
+@@ -490,19 +491,13 @@
    // true if method needs no dynamic dispatch (final and/or no vtable entry)
    bool can_be_statically_bound() const;
  
@@ -2045,7 +2478,6 @@
 -    return access_flags().loops_flag_init() ? access_flags().has_loops() : compute_has_loops_flag();
 -  };
 +  bool has_loops()          const { return access_flags().has_loops();          }
-+  bool has_invokedynamics() const { return access_flags().has_invokedynamics(); }
  
 -  bool compute_has_loops_flag();
 -
@@ -2060,7 +2492,16 @@
  
    // returns true if the method has any monitors.
    bool has_monitors() const                      { return is_synchronized() || access_flags().has_monitor_bytecodes(); }
-@@ -647,8 +643,10 @@
+@@ -592,7 +587,7 @@
+   // JSR 292 support
+   bool is_method_handle_intrinsic() const;          // MethodHandles::is_signature_polymorphic_intrinsic(intrinsic_id)
+   bool is_compiled_lambda_form() const;             // intrinsic_id() == vmIntrinsics::_compiledLambdaForm
+-  bool has_member_arg() const;                      // intrinsic_id() == vmIntrinsics::_linkToSpecial, etc.
++  bool has_member_appendix() const;                 // intrinsic_id() == vmIntrinsics::_linkToSpecial, etc.
+   static methodHandle make_method_handle_intrinsic(vmIntrinsics::ID iid, // _invokeBasic, _linkToVirtual
+                                                    Symbol* signature, //anything at all
+                                                    TRAPS);
+@@ -647,8 +642,10 @@
    bool jfr_towrite()                 { return _jfr_towrite; }
    void set_jfr_towrite(bool towrite) { _jfr_towrite = towrite; }
  
@@ -2073,7 +2514,7 @@
  
    // On-stack replacement support
    bool has_osr_nmethod(int level, bool match_level) {
-@@ -695,8 +693,8 @@
+@@ -695,8 +692,8 @@
    static bool has_unloaded_classes_in_signature(methodHandle m, TRAPS);
  
    // Printing
@@ -2096,7 +2537,42 @@
  const char* InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const {
    // Allows targeted inlining
    if(callee_method->should_inline()) {
-@@ -190,7 +190,7 @@
+@@ -131,34 +131,6 @@
+   int call_site_count  = method()->scale_count(profile.count());
+   int invoke_count     = method()->interpreter_invocation_count();
+ 
+-  // Bytecoded method handle adapters do not have interpreter
+-  // profiling data but only made up MDO data.  Get the counter from
+-  // there.
+-  // %%% FIXME:  Is this still correct, now that Java code generates the LFIs?
+-  if (caller_method->is_compiled_lambda_form()) {
+-    assert(method()->method_data_or_null(), "must have an MDO");
+-    ciMethodData* mdo = method()->method_data();
+-    ciProfileData* mha_profile = mdo->bci_to_data(caller_bci);
+-    assert(mha_profile, "must exist");
+-    CounterData* cd = mha_profile->as_CounterData();
+-    invoke_count = cd->count();
+-    if (invoke_count == 0) {
+-      return "method handle not reached";
+-    }
+-
+-    if (_caller_jvms != NULL && _caller_jvms->method() != NULL &&
+-        _caller_jvms->method()->method_data() != NULL &&
+-        !_caller_jvms->method()->method_data()->is_empty()) {
+-      ciMethodData* mdo = _caller_jvms->method()->method_data();
+-      ciProfileData* mha_profile = mdo->bci_to_data(_caller_jvms->bci());
+-      assert(mha_profile, "must exist");
+-      CounterData* cd = mha_profile->as_CounterData();
+-      call_site_count = cd->count();
+-    } else {
+-      call_site_count = invoke_count;  // use the same value
+-    }
+-  }
+-
+   assert(invoke_count != 0, "require invocation count greater than zero");
+   int freq = call_site_count / invoke_count;
+ 
+@@ -190,15 +162,16 @@
  }
  
  
@@ -2105,16 +2581,78 @@
  const char* InlineTree::should_not_inline(ciMethod *callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const {
    // negative filter: should send NOT be inlined?  returns NULL (--> inline) or rejection msg
    if (!UseOldInlining) {
-@@ -226,7 +226,7 @@
+     const char* fail = NULL;
+-    if (callee_method->is_abstract())               fail = "abstract method";
++    if ( callee_method->is_abstract())               fail = "abstract method";
+     // note: we allow ik->is_abstract()
+-    if (!callee_method->holder()->is_initialized()) fail = "method holder not initialized";
+-    if (callee_method->is_native())                 fail = "native method";
++    if (!callee_method->holder()->is_initialized())  fail = "method holder not initialized";
++    if ( callee_method->is_native())                 fail = "native method";
++    if ( callee_method->dont_inline())               fail = "don't inline by annotation";
+ 
+     if (fail) {
+       *wci_result = *(WarmCallInfo::always_cold());
+@@ -218,7 +191,8 @@
+       }
+     }
+ 
+-    if (callee_method->has_compiled_code() && callee_method->instructions_size(CompLevel_full_optimization) > InlineSmallCode) {
++    if (callee_method->has_compiled_code() &&
++        callee_method->instructions_size(CompLevel_full_optimization) > InlineSmallCode) {
+       wci_result->set_profit(wci_result->profit() * 0.1);
+       // %%% adjust wci_result->size()?
+     }
+@@ -226,28 +200,25 @@
      return NULL;
    }
  
 -  // Always inline MethodHandle intrinsics and generated MethodHandle adapters.
-+  // Always inline MethodHandle intrinsics and lambda form invokers.
-   if (callee_method->is_method_handle_intrinsic())  // %%% FIXME: Can this happen, since such intrinsics are natives?
+-  if (callee_method->is_method_handle_intrinsic())  // %%% FIXME: Can this happen, since such intrinsics are natives?
+-    return NULL;
+-  if (callee_method->is_compiled_lambda_form())  // %%% FIXME: Is this correct?  Probably not needed since we have @ForceInline
+-    return NULL;
++  // First check all inlining restrictions which are required for correctness
++  if ( callee_method->is_abstract())                        return "abstract method";
++  // note: we allow ik->is_abstract()
++  if (!callee_method->holder()->is_initialized())           return "method holder not initialized";
++  if ( callee_method->is_native())                          return "native method";
++  if ( callee_method->dont_inline())                        return "don't inline by annotation";
++  if ( callee_method->has_unloaded_classes_in_signature())  return "unloaded signature classes";
+ 
+-  // First check all inlining restrictions which are required for correctness
+-  if (callee_method->is_abstract())               return "abstract method";
+-  // note: we allow ik->is_abstract()
+-  if (!callee_method->holder()->is_initialized()) return "method holder not initialized";
+-  if (callee_method->is_native())                 return "native method";
+-  if (callee_method->has_unloaded_classes_in_signature()) return "unloaded signature classes";
+-
+-  if (callee_method->should_inline()) {
++  if (callee_method->force_inline() || callee_method->should_inline()) {
+     // ignore heuristic controls on inlining
      return NULL;
-   if (callee_method->is_compiled_lambda_form())  // %%% FIXME: Is this correct?  Probably not needed since we have @ForceInline
-@@ -415,6 +415,7 @@
+   }
+ 
+   // Now perform checks which are heuristic
+ 
+-  if( callee_method->has_compiled_code() && callee_method->instructions_size(CompLevel_full_optimization) > InlineSmallCode )
++  if (callee_method->has_compiled_code() &&
++      callee_method->instructions_size(CompLevel_full_optimization) > InlineSmallCode) {
+     return "already compiled into a big method";
++  }
+ 
+   // don't inline exception code unless the top method belongs to an
+   // exception class
+@@ -349,7 +320,7 @@
+   }
+ 
+   // detect direct and indirect recursive inlining
+-  {
++  if (!callee_method->is_compiled_lambda_form()) {
+     // count the current method and the callee
+     int inline_level = (method() == callee_method) ? 1 : 0;
+     if (inline_level > MaxRecursiveInlineLevel)
+@@ -415,6 +386,7 @@
  const char* InlineTree::check_can_parse(ciMethod* callee) {
    // Certain methods cannot be parsed at all:
    if ( callee->is_native())                     return "native method";
@@ -2122,7 +2660,7 @@
    if (!callee->can_be_compiled())               return "not compilable (disabled)";
    if (!callee->has_balanced_monitors())         return "not compilable (unbalanced monitors)";
    if ( callee->get_flow_analysis()->failing())  return "not compilable (flow analysis failed)";
-@@ -429,7 +430,7 @@
+@@ -429,7 +401,7 @@
    if (Verbose && callee_method) {
      const InlineTree *top = this;
      while( top->caller_tree() != NULL ) { top = top->caller_tree(); }
@@ -2131,7 +2669,32 @@
    }
  }
  
-@@ -544,7 +545,8 @@
+@@ -452,10 +424,7 @@
+ 
+   // Do some initial checks.
+   if (!pass_initial_checks(caller_method, caller_bci, callee_method)) {
+-    if (PrintInlining) {
+-      failure_msg = "failed_initial_checks";
+-      print_inlining(callee_method, caller_bci, failure_msg);
+-    }
++    if (PrintInlining)  print_inlining(callee_method, caller_bci, "failed initial checks");
+     return NULL;
+   }
+ 
+@@ -466,6 +435,12 @@
+     return NULL;
+   }
+ 
++  // Always inline ForceInline methods.
++  if (callee_method->force_inline()) {
++    if (PrintInlining)  print_inlining(callee_method, caller_bci, "force inline by annotation");
++    return WarmCallInfo::always_hot();
++  }
++
+   // Check if inlining policy says no.
+   WarmCallInfo wci = *(initial_wci);
+   failure_msg = try_to_inline(callee_method, caller_method, caller_bci, profile, &wci);
+@@ -544,7 +519,8 @@
    if (caller_jvms->method() != NULL) {
      if (caller_jvms->method()->is_compiled_lambda_form())
        max_inline_level_adjust += 1;  // don't count actions in MH or indy adapter frames
@@ -2141,6 +2704,33 @@
        max_inline_level_adjust += 1;  // don't count method handle calls from java.lang.invoke implem
      }
      if (max_inline_level_adjust != 0 && PrintInlining && (Verbose || WizardMode)) {
+@@ -593,7 +569,7 @@
+ // Given a jvms, which determines a call chain from the root method,
+ // find the corresponding inline tree.
+ // Note: This method will be removed or replaced as InlineTree goes away.
+-InlineTree* InlineTree::find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee, bool create_if_not_found) {
++InlineTree* InlineTree::find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee) {
+   InlineTree* iltp = root;
+   uint depth = jvms && jvms->has_method() ? jvms->depth() : 0;
+   for (uint d = 1; d <= depth; d++) {
+@@ -602,12 +578,12 @@
+     assert(jvmsp->method() == iltp->method(), "tree still in sync");
+     ciMethod* d_callee = (d == depth) ? callee : jvms->of_depth(d+1)->method();
+     InlineTree* sub = iltp->callee_at(jvmsp->bci(), d_callee);
+-    if (!sub) {
+-      if (create_if_not_found && d == depth) {
+-        return iltp->build_inline_tree_for_callee(d_callee, jvmsp, jvmsp->bci());
++    if (sub == NULL) {
++      if (d == depth) {
++        sub = iltp->build_inline_tree_for_callee(d_callee, jvmsp, jvmsp->bci());
+       }
+-      assert(sub != NULL, "should be a sub-ilt here");
+-      return NULL;
++      guarantee(sub != NULL, "should be a sub-ilt here");
++      return sub;
+     }
+     iltp = sub;
+   }
 diff --git a/src/share/vm/opto/callGenerator.cpp b/src/share/vm/opto/callGenerator.cpp
 --- a/src/share/vm/opto/callGenerator.cpp
 +++ b/src/share/vm/opto/callGenerator.cpp
@@ -2151,7 +2741,15 @@
   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   *
   * This code is free software; you can redistribute it and/or modify it
-@@ -39,9 +39,6 @@
+@@ -26,6 +26,7 @@
+ #include "ci/bcEscapeAnalyzer.hpp"
+ #include "ci/ciCallSite.hpp"
+ #include "ci/ciCPCache.hpp"
++#include "ci/ciMemberName.hpp"
+ #include "ci/ciMethodHandle.hpp"
+ #include "classfile/javaClasses.hpp"
+ #include "compiler/compileLog.hpp"
+@@ -39,9 +40,6 @@
  #include "opto/runtime.hpp"
  #include "opto/subnode.hpp"
  
@@ -2161,7 +2759,7 @@
  
  // Utility function.
  const TypeFunc* CallGenerator::tf() const {
-@@ -660,35 +657,35 @@
+@@ -660,35 +658,35 @@
  // Internal class which handles all out-of-line calls checking receiver type.
  class PredictedDynamicCallGenerator : public CallGenerator {
    ciMethodHandle* _predicted_method_handle;
@@ -2212,107 +2810,119 @@
  
  
  CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee) {
-@@ -710,6 +707,66 @@
-     ciMethodHandle* method_handle = const_oop->as_method_handle();
- 
-     // TODO new implementation goes here
-+    if (false /*AllowLambdaForms?*/) {
-+#if 0
-+//      tty->print("method handle: "); method_handle->print(); tty->cr();
-+      ciMethod* target = method_handle->get_vmtarget();
-+      if (target != NULL) {
-+//        tty->print("target: "); target->print(); tty->cr();
-+//        target->print_codes();
-+        if (callee->arg_size() == target->arg_size()) {
-+          // lambda form exact invokers take the method handle receiver as first
-+          // argument and thus have the same argument size.
-+          assert(target->is_lambda_form_exact_invoker(), "must be");
-+          // TODO remove the follow code:
-+          CallGenerator* cg = Compile::current()->call_generator(target, -1, false, jvms, true, PROB_ALWAYS);
-+          if (cg != NULL && cg->is_inline())
-+            return cg;
-+        } else {
-+          assert(callee->is_method_handle_invoke() && method_handle->is_direct(), "must be");
-+          assert(callee->arg_size() - 1 == target->arg_size(), err_msg("must be equal: callee=%d, target=%d", callee->arg_size() - 1, target->arg_size()));
-+
-+          // TODO remove receiver from stack and store number of removed receivers until call generation
-+          GraphKit kit(jvms);
-+          PhaseGVN& gvn = kit.gvn();
-+
-+          // In lamda forms we erase signature types to avoid resolving issues
-+          // involving class loaders.  When we optimize a method handle invoke
-+          // to a direct call we must cast the receiver and arguments to its
-+          // actual types.
-+          ciSignature* signature = target->signature();
-+          const int receiver_skip = target->is_static() ? 0 : 1;
-+          // Cast receiver to its type.
-+          if (!target->is_static()) {
-+            Node* arg = kit.argument(0);
+@@ -702,39 +700,84 @@
+ 
+ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee) {
+   GraphKit kit(jvms);
+-  Node* method_handle = kit.argument(0);
++  PhaseGVN& gvn = kit.gvn();
++  Compile* C = kit.C;
++  vmIntrinsics::ID iid = callee->intrinsic_id();
++  switch (iid) {
++  case vmIntrinsics::_invokeBasic:
++    {
++      // get MethodHandle receiver
++      Node* receiver = kit.argument(0);
++      if (receiver->Opcode() == Op_ConP) {
++        const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
++        ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
++        guarantee(!target->is_method_handle_intrinsic(), "should not happen");  // XXX remove
++        const int vtable_index = methodOopDesc::invalid_vtable_index;
++        CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS);
++        if (cg != NULL && cg->is_inline())
++          return cg;
++      } else {
++        if (PrintInlining)  CompileTask::print_inlining(callee, jvms->depth() - 1, jvms->bci(), "receiver not constant");
++      }
++    }
++    break;
+ 
+-  if (method_handle->Opcode() == Op_ConP) {
+-    const TypeOopPtr* oop_ptr = method_handle->bottom_type()->is_oopptr();
+-    ciObject* const_oop = oop_ptr->const_oop();
+-    ciMethodHandle* method_handle = const_oop->as_method_handle();
++  case vmIntrinsics::_linkToVirtual:
++  case vmIntrinsics::_linkToStatic:
++  case vmIntrinsics::_linkToSpecial:
++  case vmIntrinsics::_linkToInterface:
++    {
++      // pop MemberName argument
++      Node* member_name = kit.argument(callee->arg_size() - 1);
++      if (member_name->Opcode() == Op_ConP) {
++        const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
++        ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
+ 
+-    // TODO new implementation goes here
++        // In lamda forms we erase signature types to avoid resolving issues
++        // involving class loaders.  When we optimize a method handle invoke
++        // to a direct call we must cast the receiver and arguments to its
++        // actual types.
++        ciSignature* signature = target->signature();
++        const int receiver_skip = target->is_static() ? 0 : 1;
++        // Cast receiver to its type.
++        if (!target->is_static()) {
++          Node* arg = kit.argument(0);
++          const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
++          const Type*       sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
++          if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
++            Node* cast_obj = gvn.transform(new (C, 2) CheckCastPPNode(kit.control(), arg, sig_type));
++            kit.set_argument(0, cast_obj);
++          }
++        }
++        // Cast reference arguments to its type.
++        for (int i = 0; i < signature->count(); i++) {
++          ciType* t = signature->type_at(i);
++          if (t->is_klass()) {
++            Node* arg = kit.argument(receiver_skip + i);
 +            const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
-+            const Type*       sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
-+            if (!arg_type->higher_equal(sig_type)) {
-+              Node* cast_obj = gvn.transform(new (kit.C, 2) CheckCastPPNode(kit.control(), arg, sig_type));
-+              kit.set_argument(0, cast_obj);
-+            }
-+          }
-+          // Cast reference arguments to its type.
-+          for (int i = 0; i < signature->count(); i++) {
-+            ciType* t = signature->type_at(i);
-+            if (t->is_klass()) {
-+              Node* arg = kit.argument(receiver_skip + i);
-+              const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
-+              const Type*       sig_type = TypeOopPtr::make_from_klass(t->as_klass());
-+              if (!arg_type->higher_equal(sig_type)) {
-+                Node* cast_obj = gvn.transform(new (kit.C, 2) CheckCastPPNode(kit.control(), arg, sig_type));
-+                kit.set_argument(receiver_skip + i, cast_obj);
-+              }
++            const Type*       sig_type = TypeOopPtr::make_from_klass(t->as_klass());
++            if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
++              Node* cast_obj = gvn.transform(new (C, 2) CheckCastPPNode(kit.control(), arg, sig_type));
++              kit.set_argument(receiver_skip + i, cast_obj);
 +            }
 +          }
 +        }
 +        const int vtable_index = methodOopDesc::invalid_vtable_index;
-+        CallGenerator* cg = Compile::current()->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS);
++        const bool call_is_virtual = target->is_abstract();  // FIXME workaround
++        CallGenerator* cg = C->call_generator(target, vtable_index, call_is_virtual, jvms, true, PROB_ALWAYS);
 +        if (cg != NULL && cg->is_inline())
 +          return cg;
-+        }
-+#endif
 +      }
++    }
++    break;
++
++  default:
++    fatal(err_msg("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
++    break;
    }
    return NULL;
  }
-@@ -731,6 +788,32 @@
-   ciMethodHandle* method_handle = call_site->get_target();
- 
-   // TODO new implementation goes here
-+  if (false /*AllowLambdaForms?*/) {
-+#if 0
-+    ciMethod* target = method_handle->get_vmtarget();
-+    // tty->print("target: "); target->print(); tty->cr();
-+    // target->print_codes();
-+    if (target != NULL) {
-+      GraphKit kit(jvms);
-+
-+      // Pass the method handle as receiver.
-+      const TypeOopPtr* method_handle_ptr = TypeOopPtr::make_from_constant(method_handle);
-+      Node* method_handle_node = kit.makecon(method_handle_ptr);
-+      kit.insert_argument(callee, 0, method_handle_node);
-+
-+      Compile *C = Compile::current();
-+      CallGenerator* cg = C->call_generator(target, -1, false, jvms, true, PROB_ALWAYS);
-+      if (cg != NULL && cg->is_inline()) {
-+        // Add a dependence for invalidation of the optimization.
-+        if (!call_site->is_constant_call_site()) {
-+          C->dependencies()->assert_call_site_target_value(call_site, method_handle);
-+        }
-+        return cg;
-+      }
-+      kit.remove_argument(callee, 0);  // inline failed; remove method handle receiver
-+    }
-+#endif
-+  }
- 
-   return NULL;
- }
-@@ -789,17 +872,20 @@
+ 
+-CallGenerator* CallGenerator::for_invokedynamic_call(JVMState* jvms, ciMethod* caller, ciMethod* callee) {
+-  assert(callee->is_compiled_lambda_form(), "for_invokedynamic_call mismatch");
+-  // FIXME: It should be possible to link anything at this point.
+-  CallGenerator* cg = CallGenerator::for_invokedynamic_inline(jvms, caller, callee);
+-  if (cg != NULL)
+-    return cg;
+-  return CallGenerator::for_dynamic_call(callee);
+-}
+-
+-CallGenerator* CallGenerator::for_invokedynamic_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee) {
+-  // Get the CallSite object.
+-  ciBytecodeStream str(caller);
+-  str.force_bci(jvms->bci());  // Set the stream to the invokedynamic bci.
+-  ciCallSite* call_site = str.get_call_site();
+-  ciMethodHandle* method_handle = call_site->get_target();
+-
+-  // TODO new implementation goes here
+-
+-  return NULL;
+-}
+-
+ 
+ JVMState* PredictedDynamicCallGenerator::generate(JVMState* jvms) {
+   GraphKit kit(jvms);
+@@ -789,17 +832,20 @@
      bol = gvn.transform(new (C, 2) BoolNode(cmp, BoolTest::eq) );
    }
    IfNode* iff = kit.create_and_xform_if(kit.control(), bol, _hit_prob, COUNT_UNKNOWN);
@@ -2338,7 +2948,7 @@
        assert(slow_jvms != NULL, "must be");
        kit.add_exception_states_from(slow_jvms);
        kit.set_map(slow_jvms->map());
-@@ -814,12 +900,12 @@
+@@ -814,12 +860,12 @@
      return kit.transfer_exceptions_into_jvms();
    }
  
@@ -2355,7 +2965,7 @@
      new_jvms = cg->generate(kit.sync_jvms());
    }
    kit.add_exception_states_from(new_jvms);
-@@ -836,27 +922,75 @@
+@@ -836,27 +882,75 @@
      kit.set_jvms(slow_jvms);
      return kit.transfer_exceptions_into_jvms();
    }
@@ -2457,7 +3067,20 @@
  
   public:
    // Accessors
-@@ -145,13 +146,21 @@
+@@ -111,11 +112,8 @@
+   static CallGenerator* for_virtual_call(ciMethod* m, int vtable_index);  // virtual, interface
+   static CallGenerator* for_dynamic_call(ciMethod* m);   // invokedynamic
+ 
+-  static CallGenerator* for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee);
+-  static CallGenerator* for_invokedynamic_call(JVMState* jvms, ciMethod* caller, ciMethod* callee);
+-
++  static CallGenerator* for_method_handle_call(  JVMState* jvms, ciMethod* caller, ciMethod* callee);
+   static CallGenerator* for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee);
+-  static CallGenerator* for_invokedynamic_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee);
+ 
+   // How to generate a replace a direct call with an inline version
+   static CallGenerator* for_late_inline(ciMethod* m, CallGenerator* inline_cg);
+@@ -145,13 +143,21 @@
    // Registry for intrinsics:
    static CallGenerator* for_intrinsic(ciMethod* m);
    static void register_intrinsic(ciMethod* m, CallGenerator* cg);
@@ -2602,21 +3225,19 @@
      if (cg != NULL)  return cg;
    }
  
-@@ -117,17 +117,17 @@
+@@ -117,17 +117,12 @@
    // NOTE: This must happen before normal inlining logic below since
    // MethodHandle.invoke* are native methods which obviously don't
    // have bytecodes and so normal inlining fails.
 -  if (call_method->is_method_handle_intrinsic() ||  // FIXME: Split these out better.
 -      call_method->is_compiled_lambda_form()) {
-+  if (callee->is_method_handle_intrinsic() ||  // FIXME: Split these out better.
-+      callee->is_compiled_lambda_form()) {
-     if (bytecode != Bytecodes::_invokedynamic) {
+-    if (bytecode != Bytecodes::_invokedynamic) {
 -      return CallGenerator::for_method_handle_call(jvms, caller, call_method);
-+      return CallGenerator::for_method_handle_call(jvms, caller, callee);
-     } else {
+-    } else {
 -      return CallGenerator::for_invokedynamic_call(jvms, caller, call_method);
-+      return CallGenerator::for_invokedynamic_call(jvms, caller, callee);
-     }
+-    }
++  if (callee->is_method_handle_intrinsic()) {
++    return CallGenerator::for_method_handle_call(jvms, caller, callee);
    }
  
    // Do not inline strict fp into non-strict code, or the reverse
@@ -2625,7 +3246,7 @@
      allow_inline = false;
    }
  
-@@ -153,26 +153,26 @@
+@@ -153,26 +148,26 @@
        }
        WarmCallInfo scratch_ci;
        if (!UseOldInlining)
@@ -2658,7 +3279,7 @@
            return CallGenerator::for_warm_call(ci, cold_cg, cg);
          }
        }
-@@ -187,7 +187,7 @@
+@@ -187,7 +182,7 @@
            (profile.morphism() == 2 && UseBimorphicInlining)) {
          // receiver_method = profile.method();
          // Profiles do not suggest methods now.  Look it up in the major receiver.
@@ -2667,7 +3288,7 @@
                                                        profile.receiver(0));
        }
        if (receiver_method != NULL) {
-@@ -199,7 +199,7 @@
+@@ -199,7 +194,7 @@
            CallGenerator* next_hit_cg = NULL;
            ciMethod* next_receiver_method = NULL;
            if (profile.morphism() == 2 && UseBimorphicInlining) {
@@ -2676,7 +3297,7 @@
                                                                 profile.receiver(1));
              if (next_receiver_method != NULL) {
                next_hit_cg = this->call_generator(next_receiver_method,
-@@ -222,12 +222,12 @@
+@@ -222,12 +217,12 @@
               ) {
              // Generate uncommon trap for class check failure path
              // in case of monomorphic or bimorphic virtual call site.
@@ -2691,7 +3312,7 @@
            }
            if (miss_cg != NULL) {
              if (next_hit_cg != NULL) {
-@@ -250,11 +250,11 @@
+@@ -250,11 +245,11 @@
    // There was no special inlining tactic, or it bailed out.
    // Use a more generic tactic, like a simple call.
    if (call_is_virtual) {
@@ -2705,7 +3326,7 @@
    }
  }
  
-@@ -353,27 +353,27 @@
+@@ -353,33 +348,40 @@
  
    // Find target being called
    bool             will_link;
@@ -2717,9 +3338,8 @@
    ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);
  
 -  int nargs = dest_method->arg_size();
-+  int nargs = bc_callee->arg_size();
-   if (is_invokedynamic)  nargs -= 1;
- 
+-  if (is_invokedynamic)  nargs -= 1;
+-
    // uncommon-trap when callee is unloaded, uninitialized or will not link
    // bailout when too many arguments for register representation
 -  if (!will_link || can_not_compile_call_site(dest_method, klass)) {
@@ -2735,11 +3355,27 @@
    }
    assert(holder_klass->is_loaded(), "");
 -  assert((dest_method->is_static() || is_invokedynamic) == !has_receiver , "must match bc");
-+  assert((bc_callee->is_static() || is_invokedynamic) == !has_receiver , "must match bc");
++  //assert((bc_callee->is_static() || is_invokedynamic) == !has_receiver , "must match bc");  // XXX invokehandle (cur_bc_raw)
    // Note: this takes into account invokeinterface of methods declared in java/lang/Object,
    // which should be invokevirtuals but according to the VM spec may be invokeinterfaces
    assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc");
-@@ -390,21 +390,21 @@
+   // Note:  In the absence of miranda methods, an abstract class K can perform
+   // an invokevirtual directly on an interface method I.m if K implements I.
+ 
++  const int nargs = bc_callee->arg_size();
++
++  // Push appendix argument (MethodType, CallSite, etc.), if one.
++  if (iter().has_appendix()) {
++    ciObject* appendix_arg = iter().get_appendix();
++    const TypeOopPtr* appendix_arg_type = TypeOopPtr::make_from_constant(appendix_arg);
++    Node* appendix_arg_node = _gvn.makecon(appendix_arg_type);
++    push(appendix_arg_node);
++  }
++
+   // ---------------------
+   // Does Class Hierarchy Analysis reveal only a single target of a v-call?
+   // Then we may inline or make a static call, but become dependent on there being only 1 target.
+@@ -390,21 +392,21 @@
    // Choose call strategy.
    bool call_is_virtual = is_virtual_or_interface;
    int vtable_index = methodOopDesc::invalid_vtable_index;
@@ -2766,7 +3402,7 @@
      }
    }
  
-@@ -414,22 +414,24 @@
+@@ -414,22 +416,24 @@
    bool try_inline = (C->do_inlining() || InlineAccessors);
  
    // ---------------------
@@ -2795,7 +3431,7 @@
  
    // Record first part of parsing work for this call
    parse_histogram()->record_change();
-@@ -445,8 +447,8 @@
+@@ -445,8 +449,8 @@
    // because exceptions don't return to the call site.)
    profile_call(receiver);
  
@@ -2806,7 +3442,7 @@
      // When inlining attempt fails (e.g., too many arguments),
      // it may contaminate the current compile state, making it
      // impossible to pull back and try again.  Once we call
-@@ -458,7 +460,7 @@
+@@ -458,7 +462,7 @@
      // the call site, perhaps because it did not match a pattern the
      // intrinsic was expecting to optimize. Should always be possible to
      // get a normal java call that may inline in that case
@@ -2815,7 +3451,7 @@
      if ((new_jvms = cg->generate(jvms)) == NULL) {
        guarantee(failing(), "call failed to generate:  calls should work");
        return;
-@@ -467,8 +469,8 @@
+@@ -467,8 +471,8 @@
  
    if (cg->is_inline()) {
      // Accumulate has_loops estimate
@@ -2826,7 +3462,7 @@
    }
  
    // Reset parser state from [new_]jvms, which now carries results of the call.
-@@ -490,20 +492,20 @@
+@@ -490,20 +494,20 @@
      }
  
      // Round double result after a call from strict to non-strict code
@@ -2854,15 +3490,44 @@
 diff --git a/src/share/vm/opto/graphKit.cpp b/src/share/vm/opto/graphKit.cpp
 --- a/src/share/vm/opto/graphKit.cpp
 +++ b/src/share/vm/opto/graphKit.cpp
-@@ -963,6 +963,7 @@
+@@ -963,9 +963,10 @@
    assert(call->jvms()->debug_start() == non_debug_edges, "");
    assert(call->jvms()->debug_end()   == call->req(), "");
    assert(call->jvms()->debug_depth() == call->req() - non_debug_edges, "");
 +//  tty->print("debug info: "); call->dump();
  }
  
- bool GraphKit::compute_stack_effects(int& inputs, int& depth) {
-@@ -1373,6 +1374,29 @@
+-bool GraphKit::compute_stack_effects(int& inputs, int& depth) {
++bool GraphKit::compute_stack_effects(int& inputs, int& depth, bool for_parse) {
+   Bytecodes::Code code = java_bc();
+   if (code == Bytecodes::_wide) {
+     code = method()->java_code_at_bci(bci() + 1);
+@@ -1032,12 +1033,21 @@
+       ciBytecodeStream iter(method());
+       iter.reset_to_bci(bci());
+       iter.next();
+-      ciMethod* method = iter.get_method(ignore);
++      ciMethod* callee = iter.get_method(ignore);
+       // (Do not use ciMethod::arg_size(), because
+       // it might be an unloaded method, which doesn't
+       // know whether it is static or not.)
+-      inputs = method->invoke_arg_size(code);
+-      int size = method->return_type()->size();
++      if (for_parse) {
++        // Case 1: When called from parse we are *before* the invoke (in the
++        //         caller) and need to to adjust the inputs by an appendix
++        //         argument that will be pushed implicitly.
++        inputs = callee->invoke_arg_size(code) - (iter.has_appendix() ? 1 : 0);
++      } else {
++        // Case 2: Here we are *after* the invoke (in the callee) and need to
++        //         remove any appendix arguments that were popped.
++        inputs = callee->invoke_arg_size(code) - (callee->has_member_appendix() ? 1 : 0);
++      }
++      int size = callee->return_type()->size();
+       depth = size - inputs;
+     }
+     break;
+@@ -1373,6 +1383,29 @@
  }
  
  
@@ -2903,6 +3568,15 @@
    void set_bci(int bci)               { _bci = bci; }
  
    // Make sure jvms has current bci & sp.
+@@ -285,7 +286,7 @@
+   // How many stack inputs does the current BC consume?
+   // And, how does the stack change after the bytecode?
+   // Returns false if unknown.
+-  bool compute_stack_effects(int& inputs, int& depth);
++  bool compute_stack_effects(int& inputs, int& depth, bool for_parse = false);
+ 
+   // Add a fixed offset to a pointer
+   Node* basic_plus_adr(Node* base, Node* ptr, intptr_t offset) {
 @@ -370,9 +371,9 @@
    // Replace all occurrences of one node by another.
    void replace_in_map(Node* old, Node* neww);
@@ -2967,6 +3641,41 @@
      assert( !VerifyHashTableKeys || _hash_lock == 0,
              "remove node from hash table before modifying it");
      Node** p = &_in[i];    // cache this._in, across the del_out call
+diff --git a/src/share/vm/opto/parse.hpp b/src/share/vm/opto/parse.hpp
+--- a/src/share/vm/opto/parse.hpp
++++ b/src/share/vm/opto/parse.hpp
+@@ -84,7 +84,7 @@
+   static const char* check_can_parse(ciMethod* callee);
+ 
+   static InlineTree* build_inline_tree_root();
+-  static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee, bool create_if_not_found = false);
++  static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee);
+ 
+   // For temporary (stack-allocated, stateless) ilts:
+   InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio, int max_inline_level);
+diff --git a/src/share/vm/opto/parse1.cpp b/src/share/vm/opto/parse1.cpp
+--- a/src/share/vm/opto/parse1.cpp
++++ b/src/share/vm/opto/parse1.cpp
+@@ -398,7 +398,7 @@
+   if (PrintCompilation || PrintOpto) {
+     // Make sure I have an inline tree, so I can print messages about it.
+     JVMState* ilt_caller = is_osr_parse() ? caller->caller() : caller;
+-    InlineTree::find_subtree_from_root(C->ilt(), ilt_caller, parse_method, true);
++    InlineTree::find_subtree_from_root(C->ilt(), ilt_caller, parse_method);
+   }
+   _max_switch_depth = 0;
+   _est_switch_depth = 0;
+@@ -1398,8 +1398,8 @@
+ #ifdef ASSERT
+     int pre_bc_sp = sp();
+     int inputs, depth;
+-    bool have_se = !stopped() && compute_stack_effects(inputs, depth);
+-    assert(!have_se || pre_bc_sp >= inputs, "have enough stack to execute this BC");
++    bool have_se = !stopped() && compute_stack_effects(inputs, depth, /*for_parse*/ true);
++    assert(!have_se || pre_bc_sp >= inputs, err_msg("have enough stack to execute this BC: pre_bc_sp=%d, inputs=%d", pre_bc_sp, inputs));
+ #endif //ASSERT
+ 
+     do_one_bytecode();
 diff --git a/src/share/vm/opto/phaseX.hpp b/src/share/vm/opto/phaseX.hpp
 --- a/src/share/vm/opto/phaseX.hpp
 +++ b/src/share/vm/opto/phaseX.hpp
@@ -2978,6 +3687,30 @@
      const Type* t = _types.fast_lookup(n->_idx);
      assert(t != NULL, "must set before get");
      return t;
+diff --git a/src/share/vm/prims/methodHandles.hpp b/src/share/vm/prims/methodHandles.hpp
+--- a/src/share/vm/prims/methodHandles.hpp
++++ b/src/share/vm/prims/methodHandles.hpp
+@@ -98,11 +98,19 @@
+             iid <= vmIntrinsics::LAST_MH_SIG_POLY);
+   }
+ 
+-  static bool has_member_arg(vmIntrinsics::ID iid) {
++  static bool has_member_appendix(vmIntrinsics::ID iid) {
+     assert(is_signature_polymorphic(iid), "");
+     return (iid >= vmIntrinsics::_linkToVirtual &&
+             iid <= vmIntrinsics::_linkToInterface);
+   }
++  static bool has_member_arg(Symbol* klass, Symbol* name) {
++    if ((klass == vmSymbols::java_lang_invoke_MethodHandle()) &&
++        is_signature_polymorphic_name(name)) {
++      vmIntrinsics::ID iid = signature_polymorphic_name_id(name);
++      return has_member_appendix(iid);
++    }
++    return false;
++  }
+ 
+   static Symbol* signature_polymorphic_intrinsic_name(vmIntrinsics::ID iid);
+   static vmIntrinsics::ID signature_polymorphic_name_id(Symbol* name);
 diff --git a/src/share/vm/runtime/fieldDescriptor.hpp b/src/share/vm/runtime/fieldDescriptor.hpp
 --- a/src/share/vm/runtime/fieldDescriptor.hpp
 +++ b/src/share/vm/runtime/fieldDescriptor.hpp
@@ -3054,6 +3787,54 @@
      }
    }
  #endif
+@@ -1152,6 +1159,7 @@
+   methodHandle callee_method = call_info.selected_method();
+ 
+   assert((!is_virtual && invoke_code == Bytecodes::_invokestatic) ||
++         (!is_virtual && invoke_code == Bytecodes::_invokehandle) ||
+          ( is_virtual && invoke_code != Bytecodes::_invokestatic), "inconsistent bytecode");
+ 
+ #ifndef PRODUCT
+@@ -2854,7 +2862,7 @@
+   assert(false, "Should have found handler");
+ }
+ 
+-void AdapterHandlerEntry::print_on(outputStream* st) {
++void AdapterHandlerEntry::print_on(outputStream* st) const {
+   st->print_cr("AHE@" INTPTR_FORMAT ": %s i2c: " INTPTR_FORMAT " c2i: " INTPTR_FORMAT " c2iUV: " INTPTR_FORMAT,
+                (intptr_t) this, fingerprint()->as_string(),
+                get_i2c_entry(), get_c2i_entry(), get_c2i_unverified_entry());
+diff --git a/src/share/vm/runtime/sharedRuntime.hpp b/src/share/vm/runtime/sharedRuntime.hpp
+--- a/src/share/vm/runtime/sharedRuntime.hpp
++++ b/src/share/vm/runtime/sharedRuntime.hpp
+@@ -618,14 +618,14 @@
+   AdapterHandlerEntry();
+ 
+  public:
+-  address get_i2c_entry()            { return _i2c_entry; }
+-  address get_c2i_entry()            { return _c2i_entry; }
+-  address get_c2i_unverified_entry() { return _c2i_unverified_entry; }
++  address get_i2c_entry()            const { return _i2c_entry; }
++  address get_c2i_entry()            const { return _c2i_entry; }
++  address get_c2i_unverified_entry() const { return _c2i_unverified_entry; }
+ 
+   address base_address();
+   void relocate(address new_base);
+ 
+-  AdapterFingerPrint* fingerprint()  { return _fingerprint; }
++  AdapterFingerPrint* fingerprint() const { return _fingerprint; }
+ 
+   AdapterHandlerEntry* next() {
+     return (AdapterHandlerEntry*)BasicHashtableEntry::next();
+@@ -637,7 +637,7 @@
+   bool compare_code(unsigned char* code, int length, int total_args_passed, BasicType* sig_bt);
+ #endif
+ 
+-  void print_on(outputStream* st);
++  virtual void print_on(outputStream* st) const;
+ };
+ 
+ class AdapterHandlerLibrary: public AllStatic {
 diff --git a/src/share/vm/runtime/vframeArray.cpp b/src/share/vm/runtime/vframeArray.cpp
 --- a/src/share/vm/runtime/vframeArray.cpp
 +++ b/src/share/vm/runtime/vframeArray.cpp
@@ -3075,7 +3856,7 @@
    // Get the youngest frame we will unpack (last to be unpacked)
    frame me = unpack_frame.sender(&map);
    int index;
-@@ -520,29 +522,44 @@
+@@ -520,29 +522,37 @@
      me = me.sender(&map);
    }
  
@@ -3103,20 +3884,13 @@
 +    } else {
 +      methodHandle caller = elem->method();
 +      methodHandle callee = element(index - 1)->method();
-+      callee_parameters = callee->size_of_parameters();
++      Bytecode_invoke inv(caller, elem->bci());
++      // invokedynamic instructions don't have a class but obviously don't have a MemberName appendix.
++      // NOTE:  Use machinery here that avoids resolving of any kind.
++      const bool has_member_appendix =
++          !inv.is_invokedynamic() && MethodHandles::has_member_arg(inv.klass(), inv.name());
++      callee_parameters = callee->size_of_parameters() + (has_member_appendix ? 1 : 0);
 +      callee_locals     = callee->max_locals();
-+      Bytecodes::Code bc = caller->java_code_at(elem->bci());
-+      if (Bytecodes::is_invoke(bc)) {
-+        Bytecode_invoke inv(caller, elem->bci());
-+        if (inv.is_invokedynamic() ||
-+            inv.is_invokehandle()) {
-+          // MH invokes might have been optimized to direct calls; get
-+          // the original callee and its data.
-+          methodHandle orig_callee = inv.static_target(CATCH);
-+          callee_parameters = orig_callee->size_of_parameters();
-+          callee_locals     = orig_callee->max_locals();
-+        }
-+      }
 +    }
 +    elem->unpack_on_stack(caller_actual_parameters,
 +                          callee_parameters,
@@ -3145,7 +3919,7 @@
    declare_constant(JVM_ACC_HAS_MONITOR_BYTECODES)                         \
    declare_constant(JVM_ACC_HAS_LOOPS)                                     \
 -  declare_constant(JVM_ACC_LOOPS_FLAG_INIT)                               \
-+  declare_constant(JVM_ACC_HAS_INVOKEDYNAMICS)                            \
++  declare_constant(JVM_ACC_UNUSED)                                        \
    declare_constant(JVM_ACC_QUEUED)                                        \
    declare_constant(JVM_ACC_NOT_OSR_COMPILABLE)                            \
    declare_constant(JVM_ACC_HAS_LINE_NUMBER_TABLE)                         \
@@ -3157,25 +3931,36 @@
    JVM_ACC_HAS_MONITOR_BYTECODES   = 0x20000000,     // Method contains monitorenter/monitorexit bytecodes
    JVM_ACC_HAS_LOOPS               = 0x40000000,     // Method has loops
 -  JVM_ACC_LOOPS_FLAG_INIT         = (int)0x80000000,// The loop flag has been initialized
-+  JVM_ACC_HAS_INVOKEDYNAMICS      = (int)0x80000000,// Method has invokedynamic bytecodes
++  JVM_ACC_UNUSED                  = (int)0x80000000,// currently unused
    JVM_ACC_QUEUED                  = 0x01000000,     // Queued for compilation
    JVM_ACC_NOT_C2_COMPILABLE       = 0x02000000,
    JVM_ACC_NOT_C1_COMPILABLE       = 0x04000000,
-@@ -118,7 +118,7 @@
+@@ -118,7 +118,6 @@
    bool is_monitor_matching     () const { return (_flags & JVM_ACC_MONITOR_MATCH          ) != 0; }
    bool has_monitor_bytecodes   () const { return (_flags & JVM_ACC_HAS_MONITOR_BYTECODES  ) != 0; }
    bool has_loops               () const { return (_flags & JVM_ACC_HAS_LOOPS              ) != 0; }
 -  bool loops_flag_init         () const { return (_flags & JVM_ACC_LOOPS_FLAG_INIT        ) != 0; }
-+  bool has_invokedynamics      () const { return (_flags & JVM_ACC_HAS_INVOKEDYNAMICS     ) != 0; }
    bool queued_for_compilation  () const { return (_flags & JVM_ACC_QUEUED                 ) != 0; }
    bool is_not_c1_compilable () const    { return (_flags & JVM_ACC_NOT_C1_COMPILABLE      ) != 0; }
    bool is_not_c2_compilable () const    { return (_flags & JVM_ACC_NOT_C2_COMPILABLE      ) != 0; }
-@@ -181,7 +181,7 @@
+@@ -181,7 +180,6 @@
    void set_monitor_matching()          { atomic_set_bits(JVM_ACC_MONITOR_MATCH);           }
    void set_has_monitor_bytecodes()     { atomic_set_bits(JVM_ACC_HAS_MONITOR_BYTECODES);   }
    void set_has_loops()                 { atomic_set_bits(JVM_ACC_HAS_LOOPS);               }
 -  void set_loops_flag_init()           { atomic_set_bits(JVM_ACC_LOOPS_FLAG_INIT);         }
-+  void set_has_invokedynamics()        { atomic_set_bits(JVM_ACC_HAS_INVOKEDYNAMICS);      }
    void set_not_c1_compilable()         { atomic_set_bits(JVM_ACC_NOT_C1_COMPILABLE);       }
    void set_not_c2_compilable()         { atomic_set_bits(JVM_ACC_NOT_C2_COMPILABLE);       }
    void set_not_osr_compilable()        { atomic_set_bits(JVM_ACC_NOT_OSR_COMPILABLE);      }
+diff --git a/src/share/vm/utilities/exceptions.hpp b/src/share/vm/utilities/exceptions.hpp
+--- a/src/share/vm/utilities/exceptions.hpp
++++ b/src/share/vm/utilities/exceptions.hpp
+@@ -241,6 +241,9 @@
+ #define THROW_ARG_(name, signature, args, result) \
+   { Exceptions::_throw_args(THREAD_AND_LOCATION, name, signature, args); return result; }
+ 
++#define THROW_MSG_CAUSE(name, message, cause)   \
++  { Exceptions::_throw_msg_cause(THREAD_AND_LOCATION, name, message, cause); return; }
++
+ #define THROW_MSG_CAUSE_(name, message, cause, result)   \
+   { Exceptions::_throw_msg_cause(THREAD_AND_LOCATION, name, message, cause); return result; }
+