changeset 53894:7e268f863ff0

8214338: Move IC stub refilling out of IC cache transitions Reviewed-by: dlong, rbackman
author eosterlund
date Wed, 05 Dec 2018 15:57:26 +0100
parents 5f3b9b633731
children dad45affbdaa
files src/hotspot/share/code/codeBehaviours.cpp src/hotspot/share/code/compiledIC.cpp src/hotspot/share/code/compiledIC.hpp src/hotspot/share/code/compiledMethod.cpp src/hotspot/share/code/compiledMethod.hpp src/hotspot/share/code/icBuffer.cpp src/hotspot/share/code/icBuffer.hpp src/hotspot/share/code/nmethod.cpp src/hotspot/share/code/relocInfo.cpp src/hotspot/share/code/relocInfo.hpp src/hotspot/share/code/stubs.cpp src/hotspot/share/code/vtableStubs.cpp src/hotspot/share/runtime/mutexLocker.cpp src/hotspot/share/runtime/safepoint.cpp src/hotspot/share/runtime/sharedRuntime.cpp src/hotspot/share/runtime/sharedRuntime.hpp src/hotspot/share/runtime/sweeper.cpp
diffstat 17 files changed, 393 insertions(+), 282 deletions(-) [+]
line wrap: on
line diff
--- a/src/hotspot/share/code/codeBehaviours.cpp	Wed Dec 05 17:33:01 2018 +0000
+++ b/src/hotspot/share/code/codeBehaviours.cpp	Wed Dec 05 15:57:26 2018 +0100
@@ -30,10 +30,10 @@
 CompiledICProtectionBehaviour* CompiledICProtectionBehaviour::_current = NULL;
 
 bool DefaultICProtectionBehaviour::lock(CompiledMethod* method) {
-  if (CompiledIC_lock->owned_by_self()) {
+  if (is_safe(method)) {
     return false;
   }
-  CompiledIC_lock->lock();
+  CompiledIC_lock->lock_without_safepoint_check();
   return true;
 }
 
--- a/src/hotspot/share/code/compiledIC.cpp	Wed Dec 05 17:33:01 2018 +0000
+++ b/src/hotspot/share/code/compiledIC.cpp	Wed Dec 05 15:57:26 2018 +0100
@@ -237,7 +237,13 @@
   initialize_from_iter(iter);
 }
 
-bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
+// This function may fail for two reasons: either due to running out of vtable
+// stubs, or due to running out of IC stubs in an attempted transition to a
+// transitional state. The needs_ic_stub_refill value will be set if the failure
+// was due to running out of IC stubs, in which case the caller will refill IC
+// stubs and retry.
+bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode,
+                                    bool& needs_ic_stub_refill, TRAPS) {
   assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
   assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
   assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
@@ -259,7 +265,11 @@
     CompiledICHolder* holder = new CompiledICHolder(call_info->resolved_method()->method_holder(),
                                                     call_info->resolved_klass(), false);
     holder->claim();
-    InlineCacheBuffer::create_transition_stub(this, holder, entry);
+    if (!InlineCacheBuffer::create_transition_stub(this, holder, entry)) {
+      delete holder;
+      needs_ic_stub_refill = true;
+      return false;
+    }
   } else {
     assert(call_info->call_kind() == CallInfo::vtable_call, "either itable or vtable");
     // Can be different than selected_method->vtable_index(), due to package-private etc.
@@ -269,7 +279,10 @@
     if (entry == NULL) {
       return false;
     }
-    InlineCacheBuffer::create_transition_stub(this, NULL, entry);
+    if (!InlineCacheBuffer::create_transition_stub(this, NULL, entry)) {
+      needs_ic_stub_refill = true;
+      return false;
+    }
   }
 
   if (TraceICs) {
@@ -350,7 +363,7 @@
   return is_call_to_interpreted;
 }
 
-void CompiledIC::set_to_clean(bool in_use) {
+bool CompiledIC::set_to_clean(bool in_use) {
   assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
   if (TraceInlineCacheClearing || TraceICs) {
     tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", p2i(instruction_address()));
@@ -373,7 +386,9 @@
     }
   } else {
     // Unsafe transition - create stub.
-    InlineCacheBuffer::create_transition_stub(this, NULL, entry);
+    if (!InlineCacheBuffer::create_transition_stub(this, NULL, entry)) {
+      return false;
+    }
   }
   // We can't check this anymore. With lazy deopt we could have already
   // cleaned this IC entry before we even return. This is possible if
@@ -382,6 +397,7 @@
   // race because the IC entry was complete when we safepointed so
   // cleaning it immediately is harmless.
   // assert(is_clean(), "sanity check");
+  return true;
 }
 
 bool CompiledIC::is_clean() const {
@@ -393,7 +409,7 @@
   return is_clean;
 }
 
-void CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
+bool CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
   assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
   // Updating a cache to the wrong entry can cause bugs that are very hard
   // to track down - if cache entry gets invalid - we just clean it. In
@@ -430,7 +446,11 @@
       }
     } else {
       // Call via method-klass-holder
-      InlineCacheBuffer::create_transition_stub(this, info.claim_cached_icholder(), info.entry());
+      CompiledICHolder* holder = info.claim_cached_icholder();
+      if (!InlineCacheBuffer::create_transition_stub(this, holder, info.entry())) {
+        delete holder;
+        return false;
+      }
       if (TraceICs) {
          ResourceMark rm(thread);
          tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter via icholder ", p2i(instruction_address()));
@@ -450,7 +470,9 @@
                 (!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean()));
 
     if (!safe) {
-      InlineCacheBuffer::create_transition_stub(this, info.cached_metadata(), info.entry());
+      if (!InlineCacheBuffer::create_transition_stub(this, info.cached_metadata(), info.entry())) {
+        return false;
+      }
     } else {
       if (is_optimized()) {
         set_ic_destination(info.entry());
@@ -475,6 +497,7 @@
   // race because the IC entry was complete when we safepointed so
   // cleaning it immediately is harmless.
   // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
+  return true;
 }
 
 
@@ -575,7 +598,7 @@
 
 // ----------------------------------------------------------------------------
 
-void CompiledStaticCall::set_to_clean(bool in_use) {
+bool CompiledStaticCall::set_to_clean(bool in_use) {
   // in_use is unused but needed to match template function in CompiledMethod
   assert(CompiledICLocker::is_safe(instruction_address()), "mt unsafe call");
   // Reset call site
@@ -585,6 +608,7 @@
   // Do not reset stub here:  It is too expensive to call find_stub.
   // Instead, rely on caller (nmethod::clear_inline_caches) to clear
   // both the call and its stub.
+  return true;
 }
 
 bool CompiledStaticCall::is_clean() const {
--- a/src/hotspot/share/code/compiledIC.hpp	Wed Dec 05 17:33:01 2018 +0000
+++ b/src/hotspot/share/code/compiledIC.hpp	Wed Dec 05 15:57:26 2018 +0100
@@ -28,6 +28,7 @@
 #include "code/nativeInst.hpp"
 #include "interpreter/linkResolver.hpp"
 #include "oops/compiledICHolder.hpp"
+#include "runtime/safepointVerifiers.hpp"
 
 //-----------------------------------------------------------------------------
 // The CompiledIC represents a compiled inline cache.
@@ -67,6 +68,7 @@
   CompiledMethod* _method;
   CompiledICProtectionBehaviour* _behaviour;
   bool _locked;
+  NoSafepointVerifier _nsv;
 
 public:
   CompiledICLocker(CompiledMethod* method);
@@ -272,13 +274,13 @@
   //
   // They all takes a TRAP argument, since they can cause a GC if the inline-cache buffer is full.
   //
-  void set_to_clean(bool in_use = true);
-  void set_to_monomorphic(CompiledICInfo& info);
+  bool set_to_clean(bool in_use = true);
+  bool set_to_monomorphic(CompiledICInfo& info);
   void clear_ic_stub();
 
   // Returns true if successful and false otherwise. The call can fail if memory
-  // allocation in the code cache fails.
-  bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
+  // allocation in the code cache fails, or ic stub refill is required.
+  bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, bool& needs_ic_stub_refill, TRAPS);
 
   static void compute_monomorphic_entry(const methodHandle& method, Klass* receiver_klass,
                                         bool is_optimized, bool static_bound, bool caller_is_nmethod,
@@ -372,7 +374,7 @@
   virtual address destination() const = 0;
 
   // Clean static call (will force resolving on next use)
-  void set_to_clean(bool in_use = true);
+  bool set_to_clean(bool in_use = true);
 
   // Set state. The entry must be the same, as computed by compute_entry.
   // Computation and setting is split up, since the actions are separate during
--- a/src/hotspot/share/code/compiledMethod.cpp	Wed Dec 05 17:33:01 2018 +0000
+++ b/src/hotspot/share/code/compiledMethod.cpp	Wed Dec 05 15:57:26 2018 +0100
@@ -27,6 +27,7 @@
 #include "code/compiledMethod.inline.hpp"
 #include "code/scopeDesc.hpp"
 #include "code/codeCache.hpp"
+#include "code/icBuffer.hpp"
 #include "gc/shared/barrierSet.hpp"
 #include "gc/shared/gcBehaviours.hpp"
 #include "interpreter/bytecode.inline.hpp"
@@ -430,27 +431,30 @@
 #endif // ASSERT
 
 
-void CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) {
+bool CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) {
+  if (ic->is_clean()) {
+    return true;
+  }
   if (ic->is_icholder_call()) {
     // The only exception is compiledICHolder metdata which may
     // yet be marked below. (We check this further below).
     CompiledICHolder* cichk_metdata = ic->cached_icholder();
 
     if (cichk_metdata->is_loader_alive()) {
-      return;
+      return true;
     }
   } else {
     Metadata* ic_metdata = ic->cached_metadata();
     if (ic_metdata != NULL) {
       if (ic_metdata->is_klass()) {
         if (((Klass*)ic_metdata)->is_loader_alive()) {
-          return;
+          return true;
         }
       } else if (ic_metdata->is_method()) {
         Method* method = (Method*)ic_metdata;
         assert(!method->is_old(), "old method should have been cleaned");
         if (method->method_holder()->is_loader_alive()) {
-          return;
+          return true;
         }
       } else {
         ShouldNotReachHere();
@@ -458,7 +462,7 @@
     }
   }
 
-  ic->set_to_clean();
+  return ic->set_to_clean();
 }
 
 // static_stub_Relocations may have dangling references to
@@ -496,7 +500,7 @@
 
 // Clean references to unloaded nmethods at addr from this one, which is not unloaded.
 template <class CompiledICorStaticCall>
-static void clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from,
+static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from,
                                          bool clean_all) {
   // Ok, to lookup references to zombies here
   CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
@@ -504,20 +508,23 @@
   if (nm != NULL) {
     // Clean inline caches pointing to both zombie and not_entrant methods
     if (clean_all || !nm->is_in_use() || nm->is_unloading() || (nm->method()->code() != nm)) {
-      ic->set_to_clean(from->is_alive());
+      if (!ic->set_to_clean(from->is_alive())) {
+        return false;
+      }
       assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
     }
   }
+  return true;
 }
 
-static void clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from,
+static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from,
                                          bool clean_all) {
-  clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, clean_all);
+  return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, clean_all);
 }
 
-static void clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from,
+static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from,
                                          bool clean_all) {
-  clean_if_nmethod_is_unloaded(csc, csc->destination(), from, clean_all);
+  return clean_if_nmethod_is_unloaded(csc, csc->destination(), from, clean_all);
 }
 
 // Cleans caches in nmethods that point to either classes that are unloaded
@@ -527,7 +534,7 @@
 // nmethods are unloaded.  Return postponed=true in the parallel case for
 // inline caches found that point to nmethods that are not yet visited during
 // the do_unloading walk.
-void CompiledMethod::unload_nmethod_caches(bool unloading_occurred) {
+bool CompiledMethod::unload_nmethod_caches(bool unloading_occurred) {
   ResourceMark rm;
 
   // Exception cache only needs to be called if unloading occurred
@@ -535,18 +542,32 @@
     clean_exception_cache();
   }
 
-  cleanup_inline_caches_impl(unloading_occurred, false);
+  if (!cleanup_inline_caches_impl(unloading_occurred, false)) {
+    return false;
+  }
 
   // All static stubs need to be cleaned.
   clean_ic_stubs();
 
   // Check that the metadata embedded in the nmethod is alive
   DEBUG_ONLY(metadata_do(check_class));
+  return true;
+}
+
+void CompiledMethod::cleanup_inline_caches(bool clean_all) {
+  for (;;) {
+    { CompiledICLocker ic_locker(this);
+      if (cleanup_inline_caches_impl(false, clean_all)) {
+        return;
+      }
+    }
+    InlineCacheBuffer::refill_ic_stubs();
+  }
 }
 
 // Called to clean up after class unloading for live nmethods and from the sweeper
 // for all methods.
-void CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) {
+bool CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) {
   assert(CompiledICLocker::is_safe(this), "mt unsafe call");
   ResourceMark rm;
 
@@ -561,30 +582,34 @@
       if (unloading_occurred) {
         // If class unloading occurred we first clear ICs where the cached metadata
         // is referring to an unloaded klass or method.
-        clean_ic_if_metadata_is_dead(CompiledIC_at(&iter));
+        if (!clean_ic_if_metadata_is_dead(CompiledIC_at(&iter))) {
+          return false;
+        }
       }
 
-      clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all);
+      if (!clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all)) {
+        return false;
+      }
       break;
 
     case relocInfo::opt_virtual_call_type:
-      clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all);
+      if (!clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all)) {
+        return false;
+      }
       break;
 
     case relocInfo::static_call_type:
-      clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, clean_all);
+      if (!clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, clean_all)) {
+        return false;
+      }
       break;
 
-    case relocInfo::oop_type:
-      break;
-
-    case relocInfo::metadata_type:
-      break; // nothing to do.
-
     default:
       break;
     }
   }
+
+  return true;
 }
 
 // Iterating over all nmethods, e.g. with the help of CodeCache::nmethods_do(fun) was found
--- a/src/hotspot/share/code/compiledMethod.hpp	Wed Dec 05 17:33:01 2018 +0000
+++ b/src/hotspot/share/code/compiledMethod.hpp	Wed Dec 05 15:57:26 2018 +0100
@@ -352,12 +352,11 @@
 
   // Inline cache support for class unloading and nmethod unloading
  private:
-  void cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all);
+  bool cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all);
+
  public:
-  void cleanup_inline_caches(bool clean_all) {
-    // Serial version used by sweeper and whitebox test
-    cleanup_inline_caches_impl(false, clean_all);
-  }
+  // Serial version used by sweeper and whitebox test
+  void cleanup_inline_caches(bool clean_all);
 
   virtual void clear_inline_caches();
   void clear_ic_stubs();
@@ -390,7 +389,7 @@
   address oops_reloc_begin() const;
 
  private:
-  void static clean_ic_if_metadata_is_dead(CompiledIC *ic);
+  bool static clean_ic_if_metadata_is_dead(CompiledIC *ic);
 
   void clean_ic_stubs();
 
@@ -400,7 +399,7 @@
 
   virtual bool is_unloading() = 0;
 
-  void unload_nmethod_caches(bool class_unloading_occurred);
+  bool unload_nmethod_caches(bool class_unloading_occurred);
   virtual void do_unloading(bool unloading_occurred) { }
 
 private:
--- a/src/hotspot/share/code/icBuffer.cpp	Wed Dec 05 17:33:01 2018 +0000
+++ b/src/hotspot/share/code/icBuffer.cpp	Wed Dec 05 15:57:26 2018 +0100
@@ -42,10 +42,10 @@
 DEF_STUB_INTERFACE(ICStub);
 
 StubQueue* InlineCacheBuffer::_buffer    = NULL;
-ICStub*    InlineCacheBuffer::_next_stub = NULL;
 
 CompiledICHolder* InlineCacheBuffer::_pending_released = NULL;
 int InlineCacheBuffer::_pending_count = 0;
+DEBUG_ONLY(volatile int InlineCacheBuffer::_needs_refill = 0;)
 
 void ICStub::finalize() {
   if (!is_empty()) {
@@ -103,52 +103,45 @@
 //-----------------------------------------------------------------------------------------------
 // Implementation of InlineCacheBuffer
 
-void InlineCacheBuffer::init_next_stub() {
-  ICStub* ic_stub = (ICStub*)buffer()->request_committed (ic_stub_code_size());
-  assert (ic_stub != NULL, "no room for a single stub");
-  set_next_stub(ic_stub);
-}
 
 void InlineCacheBuffer::initialize() {
   if (_buffer != NULL) return; // already initialized
   _buffer = new StubQueue(new ICStubInterface, 10*K, InlineCacheBuffer_lock, "InlineCacheBuffer");
   assert (_buffer != NULL, "cannot allocate InlineCacheBuffer");
-  init_next_stub();
 }
 
 
 ICStub* InlineCacheBuffer::new_ic_stub() {
-  while (true) {
-    ICStub* ic_stub = (ICStub*)buffer()->request_committed(ic_stub_code_size());
-    if (ic_stub != NULL) {
-      return ic_stub;
-    }
-    // we ran out of inline cache buffer space; must enter safepoint.
-    // We do this by forcing a safepoint
-    EXCEPTION_MARK;
-
-    VM_ICBufferFull ibf;
-    VMThread::execute(&ibf);
-    // We could potential get an async. exception at this point.
-    // In that case we will rethrow it to ourselvs.
-    if (HAS_PENDING_EXCEPTION) {
-      oop exception = PENDING_EXCEPTION;
-      CLEAR_PENDING_EXCEPTION;
-      Thread::send_async_exception(JavaThread::current()->threadObj(), exception);
-    }
-  }
-  ShouldNotReachHere();
-  return NULL;
+  return (ICStub*)buffer()->request_committed(ic_stub_code_size());
 }
 
 
+void InlineCacheBuffer::refill_ic_stubs() {
+  DEBUG_ONLY(Atomic::store(0, &_needs_refill));
+  // we ran out of inline cache buffer space; must enter safepoint.
+  // We do this by forcing a safepoint
+  EXCEPTION_MARK;
+
+  VM_ICBufferFull ibf;
+  VMThread::execute(&ibf);
+  // We could potential get an async. exception at this point.
+  // In that case we will rethrow it to ourselvs.
+  if (HAS_PENDING_EXCEPTION) {
+    oop exception = PENDING_EXCEPTION;
+    CLEAR_PENDING_EXCEPTION;
+    Thread::send_async_exception(JavaThread::current()->threadObj(), exception);
+  }
+}
+
+
 void InlineCacheBuffer::update_inline_caches() {
-  if (buffer()->number_of_stubs() > 1) {
+  assert(_needs_refill == 0,
+         "Forgot to handle a failed IC transition requiring IC stubs");
+  if (buffer()->number_of_stubs() > 0) {
     if (TraceICBuffer) {
       tty->print_cr("[updating inline caches with %d stubs]", buffer()->number_of_stubs());
     }
     buffer()->remove_all();
-    init_next_stub();
   }
   release_pending_icholders();
 }
@@ -160,7 +153,7 @@
 
 
 bool InlineCacheBuffer::is_empty() {
-  return buffer()->number_of_stubs() == 1;    // always has sentinel
+  return buffer()->number_of_stubs() == 0;
 }
 
 
@@ -169,8 +162,7 @@
 }
 
 
-void InlineCacheBuffer::create_transition_stub(CompiledIC *ic, void* cached_value, address entry) {
-  MutexLockerEx ml(CompiledIC_lock->owned_by_self() ? NULL : CompiledIC_lock);
+bool InlineCacheBuffer::create_transition_stub(CompiledIC *ic, void* cached_value, address entry) {
   assert(!SafepointSynchronize::is_at_safepoint(), "should not be called during a safepoint");
   assert(CompiledICLocker::is_safe(ic->instruction_address()), "mt unsafe call");
   if (TraceICBuffer) {
@@ -178,20 +170,24 @@
                   p2i(ic->instruction_address()), p2i(entry), p2i(cached_value));
   }
 
+  // allocate and initialize new "out-of-line" inline-cache
+  ICStub* ic_stub = new_ic_stub();
+  if (ic_stub == NULL) {
+    DEBUG_ONLY(Atomic::inc(&_needs_refill));
+    return false;
+  }
+
   // If an transition stub is already associate with the inline cache, then we remove the association.
   if (ic->is_in_transition_state()) {
     ICStub* old_stub = ICStub_from_destination_address(ic->stub_address());
     old_stub->clear();
   }
 
-  // allocate and initialize new "out-of-line" inline-cache
-  ICStub* ic_stub = get_next_stub();
   ic_stub->set_stub(ic, cached_value, entry);
 
   // Update inline cache in nmethod to point to new "out-of-line" allocated inline cache
   ic->set_ic_destination(ic_stub);
-
-  set_next_stub(new_ic_stub()); // can cause safepoint synchronization
+  return true;
 }
 
 
@@ -225,9 +221,7 @@
 // not safe to free them until them since they might be visible to
 // another thread.
 void InlineCacheBuffer::queue_for_release(CompiledICHolder* icholder) {
-  MutexLockerEx mex1((CompiledIC_lock->owned_by_self() ||
-                      SafepointSynchronize::is_at_safepoint()) ? NULL : CompiledIC_lock);
-  MutexLockerEx mex2(InlineCacheBuffer_lock);
+  MutexLockerEx mex(InlineCacheBuffer_lock, Mutex::_no_safepoint_check_flag);
   icholder->set_next(_pending_released);
   _pending_released = icholder;
   _pending_count++;
--- a/src/hotspot/share/code/icBuffer.hpp	Wed Dec 05 17:33:01 2018 +0000
+++ b/src/hotspot/share/code/icBuffer.hpp	Wed Dec 05 15:57:26 2018 +0100
@@ -30,6 +30,7 @@
 #include "interpreter/bytecodes.hpp"
 #include "memory/allocation.hpp"
 #include "utilities/align.hpp"
+#include "utilities/macros.hpp"
 
 //
 // For CompiledIC's:
@@ -100,20 +101,16 @@
   static int ic_stub_code_size();
 
   static StubQueue* _buffer;
-  static ICStub*    _next_stub;
 
   static CompiledICHolder* _pending_released;
   static int _pending_count;
 
+  DEBUG_ONLY(static volatile int _needs_refill;)
+
   static StubQueue* buffer()                         { return _buffer;         }
-  static void       set_next_stub(ICStub* next_stub) { _next_stub = next_stub; }
-  static ICStub*    get_next_stub()                  { return _next_stub;      }
-
-  static void       init_next_stub();
 
   static ICStub* new_ic_stub();
 
-
   // Machine-dependent implementation of ICBuffer
   static void    assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point);
   static address ic_buffer_entry_point  (address code_begin);
@@ -129,6 +126,7 @@
 
     // removes the ICStubs after backpatching
   static void update_inline_caches();
+  static void refill_ic_stubs();
 
   // for debugging
   static bool is_empty();
@@ -138,7 +136,7 @@
   static int pending_icholder_count() { return _pending_count; }
 
   // New interface
-  static void    create_transition_stub(CompiledIC *ic, void* cached_value, address entry);
+  static bool    create_transition_stub(CompiledIC *ic, void* cached_value, address entry);
   static address ic_destination_for(CompiledIC *ic);
   static void*   cached_value_for(CompiledIC *ic);
 };
--- a/src/hotspot/share/code/nmethod.cpp	Wed Dec 05 17:33:01 2018 +0000
+++ b/src/hotspot/share/code/nmethod.cpp	Wed Dec 05 15:57:26 2018 +0100
@@ -1650,7 +1650,8 @@
     }
 #endif
 
-    unload_nmethod_caches(unloading_occurred);
+    guarantee(unload_nmethod_caches(unloading_occurred),
+              "Should not need transition stubs");
   }
 }
 
--- a/src/hotspot/share/code/relocInfo.cpp	Wed Dec 05 17:33:01 2018 +0000
+++ b/src/hotspot/share/code/relocInfo.cpp	Wed Dec 05 15:57:26 2018 +0100
@@ -644,12 +644,12 @@
   return (Method*)m;
 }
 
-void virtual_call_Relocation::clear_inline_cache() {
+bool virtual_call_Relocation::clear_inline_cache() {
   // No stubs for ICs
   // Clean IC
   ResourceMark rm;
   CompiledIC* icache = CompiledIC_at(this);
-  icache->set_to_clean();
+  return icache->set_to_clean();
 }
 
 
@@ -672,15 +672,20 @@
   return (Method*)m;
 }
 
-void opt_virtual_call_Relocation::clear_inline_cache() {
+template<typename CompiledICorStaticCall>
+static bool set_to_clean_no_ic_refill(CompiledICorStaticCall* ic) {
+  guarantee(ic->set_to_clean(), "Should not need transition stubs");
+  return true;
+}
+
+bool opt_virtual_call_Relocation::clear_inline_cache() {
   // No stubs for ICs
   // Clean IC
   ResourceMark rm;
   CompiledIC* icache = CompiledIC_at(this);
-  icache->set_to_clean();
+  return set_to_clean_no_ic_refill(icache);
 }
 
-
 address opt_virtual_call_Relocation::static_stub(bool is_aot) {
   // search for the static stub who points back to this static call
   address static_call_addr = addr();
@@ -715,10 +720,10 @@
   _method_index = unpack_1_int();
 }
 
-void static_call_Relocation::clear_inline_cache() {
+bool static_call_Relocation::clear_inline_cache() {
   // Safe call site info
   CompiledStaticCall* handler = this->code()->compiledStaticCall_at(this);
-  handler->set_to_clean();
+  return set_to_clean_no_ic_refill(handler);
 }
 
 
@@ -757,10 +762,11 @@
   return NULL;
 }
 
-void static_stub_Relocation::clear_inline_cache() {
+bool static_stub_Relocation::clear_inline_cache() {
   // Call stub is only used when calling the interpreted code.
   // It does not really need to be cleared, except that we want to clean out the methodoop.
   CompiledDirectStaticCall::set_stub_to_clean(this);
+  return true;
 }
 
 
--- a/src/hotspot/share/code/relocInfo.hpp	Wed Dec 05 17:33:01 2018 +0000
+++ b/src/hotspot/share/code/relocInfo.hpp	Wed Dec 05 15:57:26 2018 +0100
@@ -814,7 +814,7 @@
   // all relocations are able to reassert their values
   virtual void set_value(address x);
 
-  virtual void clear_inline_cache()              { }
+  virtual bool clear_inline_cache()              { return true; }
 
   // This method assumes that all virtual/static (inline) caches are cleared (since for static_call_type and
   // ic_call_type is not always posisition dependent (depending on the state of the cache)). However, this is
@@ -1052,7 +1052,7 @@
   void pack_data_to(CodeSection* dest);
   void unpack_data();
 
-  void clear_inline_cache();
+  bool clear_inline_cache();
 };
 
 
@@ -1083,7 +1083,7 @@
   void pack_data_to(CodeSection* dest);
   void unpack_data();
 
-  void clear_inline_cache();
+  bool clear_inline_cache();
 
   // find the matching static_stub
   address static_stub(bool is_aot);
@@ -1117,7 +1117,7 @@
   void pack_data_to(CodeSection* dest);
   void unpack_data();
 
-  void clear_inline_cache();
+  bool clear_inline_cache();
 
   // find the matching static_stub
   address static_stub(bool is_aot);
@@ -1146,7 +1146,7 @@
   static_stub_Relocation() { }
 
  public:
-  void clear_inline_cache();
+  bool clear_inline_cache();
 
   address static_call() { return _static_call; }
   bool is_aot() { return _is_aot; }
--- a/src/hotspot/share/code/stubs.cpp	Wed Dec 05 17:33:01 2018 +0000
+++ b/src/hotspot/share/code/stubs.cpp	Wed Dec 05 15:57:26 2018 +0100
@@ -117,7 +117,7 @@
 
 Stub* StubQueue::request(int requested_code_size) {
   assert(requested_code_size > 0, "requested_code_size must be > 0");
-  if (_mutex != NULL) _mutex->lock();
+  if (_mutex != NULL) _mutex->lock_without_safepoint_check();
   Stub* s = current_stub();
   int requested_size = align_up(stub_code_size_to_size(requested_code_size), CodeEntryAlignment);
   if (requested_size <= available_space()) {
@@ -207,7 +207,7 @@
 void StubQueue::verify() {
   // verify only if initialized
   if (_stub_buffer == NULL) return;
-  MutexLockerEx lock(_mutex);
+  MutexLockerEx lock(_mutex, Mutex::_no_safepoint_check_flag);
   // verify index boundaries
   guarantee(0 <= _buffer_size, "buffer size must be positive");
   guarantee(0 <= _buffer_limit && _buffer_limit <= _buffer_size , "_buffer_limit out of bounds");
@@ -234,9 +234,8 @@
 
 
 void StubQueue::print() {
-  MutexLockerEx lock(_mutex);
+  MutexLockerEx lock(_mutex, Mutex::_no_safepoint_check_flag);
   for (Stub* s = first(); s != NULL; s = next(s)) {
     stub_print(s);
   }
 }
-
--- a/src/hotspot/share/code/vtableStubs.cpp	Wed Dec 05 17:33:01 2018 +0000
+++ b/src/hotspot/share/code/vtableStubs.cpp	Wed Dec 05 15:57:26 2018 +0100
@@ -124,7 +124,7 @@
 void VtableStubs::initialize() {
   VtableStub::_receiver_location = SharedRuntime::name_for_receiver();
   {
-    MutexLocker ml(VtableStubs_lock);
+    MutexLockerEx ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
     assert(_number_of_vtable_stubs == 0, "potential performance bug: VtableStubs initialized more than once");
     assert(is_power_of_2(N), "N must be a power of 2");
     for (int i = 0; i < N; i++) {
@@ -247,7 +247,7 @@
 
 
 VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index) {
-  MutexLocker ml(VtableStubs_lock);
+  MutexLockerEx ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
   unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index);
   VtableStub* s = _table[hash];
   while( s && !s->matches(is_vtable_stub, vtable_index)) s = s->next();
@@ -256,7 +256,7 @@
 
 
 void VtableStubs::enter(bool is_vtable_stub, int vtable_index, VtableStub* s) {
-  MutexLocker ml(VtableStubs_lock);
+  MutexLockerEx ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
   assert(s->matches(is_vtable_stub, vtable_index), "bad vtable stub");
   unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index);
   // enter s at the beginning of the corresponding list
@@ -266,7 +266,7 @@
 }
 
 VtableStub* VtableStubs::entry_point(address pc) {
-  MutexLocker ml(VtableStubs_lock);
+  MutexLockerEx ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
   VtableStub* stub = (VtableStub*)(pc - VtableStub::entry_offset());
   uint hash = VtableStubs::hash(stub->is_vtable_stub(), stub->index());
   VtableStub* s;
--- a/src/hotspot/share/runtime/mutexLocker.cpp	Wed Dec 05 17:33:01 2018 +0000
+++ b/src/hotspot/share/runtime/mutexLocker.cpp	Wed Dec 05 15:57:26 2018 +0100
@@ -251,7 +251,7 @@
   def(SystemDictionary_lock        , PaddedMonitor, leaf,        true,  Monitor::_safepoint_check_always);     // lookups done by VM thread
   def(SharedDictionary_lock        , PaddedMutex  , leaf,        true,  Monitor::_safepoint_check_always);     // lookups done by VM thread
   def(Module_lock                  , PaddedMutex  , leaf+2,      true,  Monitor::_safepoint_check_always);
-  def(InlineCacheBuffer_lock       , PaddedMutex  , leaf,        true,  Monitor::_safepoint_check_always);
+  def(InlineCacheBuffer_lock       , PaddedMutex  , leaf,        true,  Monitor::_safepoint_check_never);
   def(VMStatistic_lock             , PaddedMutex  , leaf,        false, Monitor::_safepoint_check_always);
   def(ExpandHeap_lock              , PaddedMutex  , leaf,        true,  Monitor::_safepoint_check_always);     // Used during compilation by VM thread
   def(JNIHandleBlockFreeList_lock  , PaddedMutex  , leaf-1,      true,  Monitor::_safepoint_check_never);      // handles are used by VM thread
@@ -281,7 +281,7 @@
   def(VMOperationRequest_lock      , PaddedMonitor, nonleaf,     true,  Monitor::_safepoint_check_sometimes);
   def(RetData_lock                 , PaddedMutex  , nonleaf,     false, Monitor::_safepoint_check_always);
   def(Terminator_lock              , PaddedMonitor, nonleaf,     true,  Monitor::_safepoint_check_sometimes);
-  def(VtableStubs_lock             , PaddedMutex  , nonleaf,     true,  Monitor::_safepoint_check_always);
+  def(VtableStubs_lock             , PaddedMutex  , nonleaf,     true,  Monitor::_safepoint_check_never);
   def(Notify_lock                  , PaddedMonitor, nonleaf,     true,  Monitor::_safepoint_check_always);
   def(JNIGlobalAlloc_lock          , PaddedMutex  , nonleaf,     true,  Monitor::_safepoint_check_never);
   def(JNIGlobalActive_lock         , PaddedMutex  , nonleaf-1,   true,  Monitor::_safepoint_check_never);
@@ -294,7 +294,7 @@
   def(JfieldIdCreation_lock        , PaddedMutex  , nonleaf+1,   true,  Monitor::_safepoint_check_always);     // jfieldID, Used in VM_Operation
   def(ResolvedMethodTable_lock     , PaddedMutex  , nonleaf+1,   false, Monitor::_safepoint_check_always);     // Used to protect ResolvedMethodTable
 
-  def(CompiledIC_lock              , PaddedMutex  , nonleaf+2,   false, Monitor::_safepoint_check_always);     // locks VtableStubs_lock, InlineCacheBuffer_lock
+  def(CompiledIC_lock              , PaddedMutex  , nonleaf+2,   false, Monitor::_safepoint_check_never);      // locks VtableStubs_lock, InlineCacheBuffer_lock
   def(CompileTaskAlloc_lock        , PaddedMutex  , nonleaf+2,   true,  Monitor::_safepoint_check_always);
   def(CompileStatistics_lock       , PaddedMutex  , nonleaf+2,   false, Monitor::_safepoint_check_always);
   def(DirectivesStack_lock         , PaddedMutex  , special,     true,  Monitor::_safepoint_check_never);
--- a/src/hotspot/share/runtime/safepoint.cpp	Wed Dec 05 17:33:01 2018 +0000
+++ b/src/hotspot/share/runtime/safepoint.cpp	Wed Dec 05 15:57:26 2018 +0100
@@ -732,6 +732,7 @@
   // Finish monitor deflation.
   ObjectSynchronizer::finish_deflate_idle_monitors(&deflate_counters);
 
+  assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
 }
 
 
--- a/src/hotspot/share/runtime/sharedRuntime.cpp	Wed Dec 05 17:33:01 2018 +0000
+++ b/src/hotspot/share/runtime/sharedRuntime.cpp	Wed Dec 05 15:57:26 2018 +0100
@@ -25,12 +25,13 @@
 #include "precompiled.hpp"
 #include "jvm.h"
 #include "aot/aotLoader.hpp"
-#include "code/compiledMethod.inline.hpp"
 #include "classfile/stringTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "code/codeCache.hpp"
 #include "code/compiledIC.hpp"
+#include "code/icBuffer.hpp"
+#include "code/compiledMethod.inline.hpp"
 #include "code/scopeDesc.hpp"
 #include "code/vtableStubs.hpp"
 #include "compiler/abstractCompiler.hpp"
@@ -1245,76 +1246,10 @@
   return callee_method;
 }
 
-// Resolves a call.  The compilers generate code for calls that go here
-// and are patched with the real destination of the call.
-methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread,
-                                           bool is_virtual,
-                                           bool is_optimized, TRAPS) {
-
-  ResourceMark rm(thread);
-  RegisterMap cbl_map(thread, false);
-  frame caller_frame = thread->last_frame().sender(&cbl_map);
-
-  CodeBlob* caller_cb = caller_frame.cb();
-  guarantee(caller_cb != NULL && caller_cb->is_compiled(), "must be called from compiled method");
-  CompiledMethod* caller_nm = caller_cb->as_compiled_method_or_null();
-
-  // make sure caller is not getting deoptimized
-  // and removed before we are done with it.
-  // CLEANUP - with lazy deopt shouldn't need this lock
-  nmethodLocker caller_lock(caller_nm);
-
-  // determine call info & receiver
-  // note: a) receiver is NULL for static calls
-  //       b) an exception is thrown if receiver is NULL for non-static calls
-  CallInfo call_info;
-  Bytecodes::Code invoke_code = Bytecodes::_illegal;
-  Handle receiver = find_callee_info(thread, invoke_code,
-                                     call_info, CHECK_(methodHandle()));
-  methodHandle callee_method = call_info.selected_method();
-
-  assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
-         (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
-         (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
-         (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
-         ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
-
-  assert(caller_nm->is_alive() && !caller_nm->is_unloading(), "It should be alive");
-
-#ifndef PRODUCT
-  // tracing/debugging/statistics
-  int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
-                (is_virtual) ? (&_resolve_virtual_ctr) :
-                               (&_resolve_static_ctr);
-  Atomic::inc(addr);
-
-  if (TraceCallFixup) {
-    ResourceMark rm(thread);
-    tty->print("resolving %s%s (%s) call to",
-      (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
-      Bytecodes::name(invoke_code));
-    callee_method->print_short_name(tty);
-    tty->print_cr(" at pc: " INTPTR_FORMAT " to code: " INTPTR_FORMAT,
-                  p2i(caller_frame.pc()), p2i(callee_method->code()));
-  }
-#endif
-
-  // JSR 292 key invariant:
-  // If the resolved method is a MethodHandle invoke target, the call
-  // site must be a MethodHandle call site, because the lambda form might tail-call
-  // leaving the stack in a state unknown to either caller or callee
-  // TODO detune for now but we might need it again
-//  assert(!callee_method->is_compiled_lambda_form() ||
-//         caller_nm->is_method_handle_return(caller_frame.pc()), "must be MH call site");
-
-  // Compute entry points. This might require generation of C2I converter
-  // frames, so we cannot be holding any locks here. Furthermore, the
-  // computation of the entry points is independent of patching the call.  We
-  // always return the entry-point, but we only patch the stub if the call has
-  // not been deoptimized.  Return values: For a virtual call this is an
-  // (cached_oop, destination address) pair. For a static call/optimized
-  // virtual this is just a destination address.
-
+// This fails if resolution required refilling of IC stubs
+bool SharedRuntime::resolve_sub_helper_internal(methodHandle callee_method, const frame& caller_frame,
+                                                CompiledMethod* caller_nm, bool is_virtual, bool is_optimized,
+                                                Handle receiver, CallInfo& call_info, Bytecodes::Code invoke_code, TRAPS) {
   StaticCallInfo static_call_info;
   CompiledICInfo virtual_call_info;
 
@@ -1343,7 +1278,7 @@
     Klass* klass = invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass();
     CompiledIC::compute_monomorphic_entry(callee_method, klass,
                      is_optimized, static_bound, is_nmethod, virtual_call_info,
-                     CHECK_(methodHandle()));
+                     CHECK_false);
   } else {
     // static call
     CompiledStaticCall::compute_entry(callee_method, is_nmethod, static_call_info);
@@ -1374,17 +1309,102 @@
       if (is_virtual) {
         CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
         if (inline_cache->is_clean()) {
-          inline_cache->set_to_monomorphic(virtual_call_info);
+          if (!inline_cache->set_to_monomorphic(virtual_call_info)) {
+            return false;
+          }
         }
       } else {
         CompiledStaticCall* ssc = caller_nm->compiledStaticCall_before(caller_frame.pc());
         if (ssc->is_clean()) ssc->set(static_call_info);
       }
     }
-
   } // unlock CompiledICLocker
-
-  return callee_method;
+  return true;
+}
+
+// Resolves a call.  The compilers generate code for calls that go here
+// and are patched with the real destination of the call.
+methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread,
+                                               bool is_virtual,
+                                               bool is_optimized, TRAPS) {
+
+  ResourceMark rm(thread);
+  RegisterMap cbl_map(thread, false);
+  frame caller_frame = thread->last_frame().sender(&cbl_map);
+
+  CodeBlob* caller_cb = caller_frame.cb();
+  guarantee(caller_cb != NULL && caller_cb->is_compiled(), "must be called from compiled method");
+  CompiledMethod* caller_nm = caller_cb->as_compiled_method_or_null();
+
+  // make sure caller is not getting deoptimized
+  // and removed before we are done with it.
+  // CLEANUP - with lazy deopt shouldn't need this lock
+  nmethodLocker caller_lock(caller_nm);
+
+  // determine call info & receiver
+  // note: a) receiver is NULL for static calls
+  //       b) an exception is thrown if receiver is NULL for non-static calls
+  CallInfo call_info;
+  Bytecodes::Code invoke_code = Bytecodes::_illegal;
+  Handle receiver = find_callee_info(thread, invoke_code,
+                                     call_info, CHECK_(methodHandle()));
+  methodHandle callee_method = call_info.selected_method();
+
+  assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
+         (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
+         (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
+         (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
+         ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
+
+  assert(caller_nm->is_alive() && !caller_nm->is_unloading(), "It should be alive");
+
+#ifndef PRODUCT
+  // tracing/debugging/statistics
+  int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
+                (is_virtual) ? (&_resolve_virtual_ctr) :
+                               (&_resolve_static_ctr);
+  Atomic::inc(addr);
+
+  if (TraceCallFixup) {
+    ResourceMark rm(thread);
+    tty->print("resolving %s%s (%s) call to",
+      (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
+      Bytecodes::name(invoke_code));
+    callee_method->print_short_name(tty);
+    tty->print_cr(" at pc: " INTPTR_FORMAT " to code: " INTPTR_FORMAT,
+                  p2i(caller_frame.pc()), p2i(callee_method->code()));
+  }
+#endif
+
+  // JSR 292 key invariant:
+  // If the resolved method is a MethodHandle invoke target, the call
+  // site must be a MethodHandle call site, because the lambda form might tail-call
+  // leaving the stack in a state unknown to either caller or callee
+  // TODO detune for now but we might need it again
+//  assert(!callee_method->is_compiled_lambda_form() ||
+//         caller_nm->is_method_handle_return(caller_frame.pc()), "must be MH call site");
+
+  // Compute entry points. This might require generation of C2I converter
+  // frames, so we cannot be holding any locks here. Furthermore, the
+  // computation of the entry points is independent of patching the call.  We
+  // always return the entry-point, but we only patch the stub if the call has
+  // not been deoptimized.  Return values: For a virtual call this is an
+  // (cached_oop, destination address) pair. For a static call/optimized
+  // virtual this is just a destination address.
+
+  // Patching IC caches may fail if we run out if transition stubs.
+  // We refill the ic stubs then and try again.
+  for (;;) {
+    bool successful = resolve_sub_helper_internal(callee_method, caller_frame, caller_nm,
+                                                  is_virtual, is_optimized, receiver,
+                                                  call_info, invoke_code, CHECK_(methodHandle()));
+    if (successful) {
+      return callee_method;
+    } else {
+      InlineCacheBuffer::refill_ic_stubs();
+    }
+  }
+
 }
 
 
@@ -1518,7 +1538,85 @@
   return callee_method->verified_code_entry();
 JRT_END
 
-
+// The handle_ic_miss_helper_internal function returns false if it failed due
+// to either running out of vtable stubs or ic stubs due to IC transitions
+// to transitional states. The needs_ic_stub_refill value will be set if
+// the failure was due to running out of IC stubs, in which case handle_ic_miss_helper
+// refills the IC stubs and tries again.
+bool SharedRuntime::handle_ic_miss_helper_internal(Handle receiver, CompiledMethod* caller_nm,
+                                                   const frame& caller_frame, methodHandle callee_method,
+                                                   Bytecodes::Code bc, CallInfo& call_info,
+                                                   bool& needs_ic_stub_refill, TRAPS) {
+  CompiledICLocker ml(caller_nm);
+  CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
+  bool should_be_mono = false;
+  if (inline_cache->is_optimized()) {
+    if (TraceCallFixup) {
+      ResourceMark rm(THREAD);
+      tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc));
+      callee_method->print_short_name(tty);
+      tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
+    }
+    should_be_mono = true;
+  } else if (inline_cache->is_icholder_call()) {
+    CompiledICHolder* ic_oop = inline_cache->cached_icholder();
+    if (ic_oop != NULL) {
+      if (!ic_oop->is_loader_alive()) {
+        // Deferred IC cleaning due to concurrent class unloading
+        if (!inline_cache->set_to_clean()) {
+          needs_ic_stub_refill = true;
+          return false;
+        }
+      } else if (receiver()->klass() == ic_oop->holder_klass()) {
+        // This isn't a real miss. We must have seen that compiled code
+        // is now available and we want the call site converted to a
+        // monomorphic compiled call site.
+        // We can't assert for callee_method->code() != NULL because it
+        // could have been deoptimized in the meantime
+        if (TraceCallFixup) {
+          ResourceMark rm(THREAD);
+          tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc));
+          callee_method->print_short_name(tty);
+          tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
+        }
+        should_be_mono = true;
+      }
+    }
+  }
+
+  if (should_be_mono) {
+    // We have a path that was monomorphic but was going interpreted
+    // and now we have (or had) a compiled entry. We correct the IC
+    // by using a new icBuffer.
+    CompiledICInfo info;
+    Klass* receiver_klass = receiver()->klass();
+    inline_cache->compute_monomorphic_entry(callee_method,
+                                            receiver_klass,
+                                            inline_cache->is_optimized(),
+                                            false, caller_nm->is_nmethod(),
+                                            info, CHECK_false);
+    if (!inline_cache->set_to_monomorphic(info)) {
+      needs_ic_stub_refill = true;
+      return false;
+    }
+  } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
+    // Potential change to megamorphic
+
+    bool successful = inline_cache->set_to_megamorphic(&call_info, bc, needs_ic_stub_refill, CHECK_false);
+    if (!successful) {
+      if (!needs_ic_stub_refill) {
+        return false;
+      }
+      if (!inline_cache->set_to_clean()) {
+        needs_ic_stub_refill = true;
+        return false;
+      }
+    }
+  } else {
+    // Either clean or megamorphic
+  }
+  return true;
+}
 
 methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) {
   ResourceMark rm(thread);
@@ -1555,8 +1653,6 @@
 
   methodHandle callee_method = call_info.selected_method();
 
-  bool should_be_mono = false;
-
 #ifndef PRODUCT
   Atomic::inc(&_ic_miss_ctr);
 
@@ -1585,75 +1681,41 @@
   JvmtiDynamicCodeEventCollector event_collector;
 
   // Update inline cache to megamorphic. Skip update if we are called from interpreted.
-  {
-    RegisterMap reg_map(thread, false);
-    frame caller_frame = thread->last_frame().sender(&reg_map);
-    CodeBlob* cb = caller_frame.cb();
-    CompiledMethod* caller_nm = cb->as_compiled_method_or_null();
-    CompiledICLocker ml(caller_nm);
-
-    if (cb->is_compiled()) {
-      CompiledIC* inline_cache = CompiledIC_before(((CompiledMethod*)cb), caller_frame.pc());
-      bool should_be_mono = false;
-      if (inline_cache->is_optimized()) {
-        if (TraceCallFixup) {
-          ResourceMark rm(thread);
-          tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc));
-          callee_method->print_short_name(tty);
-          tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
-        }
-        should_be_mono = true;
-      } else if (inline_cache->is_icholder_call()) {
-        CompiledICHolder* ic_oop = inline_cache->cached_icholder();
-        if (ic_oop != NULL) {
-          if (!ic_oop->is_loader_alive()) {
-            // Deferred IC cleaning due to concurrent class unloading
-            inline_cache->set_to_clean();
-          } else if (receiver()->klass() == ic_oop->holder_klass()) {
-            // This isn't a real miss. We must have seen that compiled code
-            // is now available and we want the call site converted to a
-            // monomorphic compiled call site.
-            // We can't assert for callee_method->code() != NULL because it
-            // could have been deoptimized in the meantime
-            if (TraceCallFixup) {
-              ResourceMark rm(thread);
-              tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc));
-              callee_method->print_short_name(tty);
-              tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
-            }
-            should_be_mono = true;
-          }
-        }
-      }
-
-      if (should_be_mono) {
-
-        // We have a path that was monomorphic but was going interpreted
-        // and now we have (or had) a compiled entry. We correct the IC
-        // by using a new icBuffer.
-        CompiledICInfo info;
-        Klass* receiver_klass = receiver()->klass();
-        inline_cache->compute_monomorphic_entry(callee_method,
-                                                receiver_klass,
-                                                inline_cache->is_optimized(),
-                                                false, caller_nm->is_nmethod(),
-                                                info, CHECK_(methodHandle()));
-        inline_cache->set_to_monomorphic(info);
-      } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
-        // Potential change to megamorphic
-        bool successful = inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
-        if (!successful) {
-          inline_cache->set_to_clean();
-        }
-      } else {
-        // Either clean or megamorphic
-      }
+  // Transitioning IC caches may require transition stubs. If we run out
+  // of transition stubs, we have to drop locks and perform a safepoint
+  // that refills them.
+  RegisterMap reg_map(thread, false);
+  frame caller_frame = thread->last_frame().sender(&reg_map);
+  CodeBlob* cb = caller_frame.cb();
+  CompiledMethod* caller_nm = cb->as_compiled_method();
+
+  for (;;) {
+    bool needs_ic_stub_refill = false;
+    bool successful = handle_ic_miss_helper_internal(receiver, caller_nm, caller_frame, callee_method,
+                                                     bc, call_info, needs_ic_stub_refill, CHECK_(methodHandle()));
+    if (successful || !needs_ic_stub_refill) {
+      return callee_method;
     } else {
-      fatal("Unimplemented");
+      InlineCacheBuffer::refill_ic_stubs();
     }
-  } // Release CompiledICLocker
-
-  return callee_method;
+  }
+}
+
+static bool clear_ic_at_addr(CompiledMethod* caller_nm, address call_addr, bool is_static_call) {
+  CompiledICLocker ml(caller_nm);
+  if (is_static_call) {
+    CompiledStaticCall* ssc = caller_nm->compiledStaticCall_at(call_addr);
+    if (!ssc->is_clean()) {
+      return ssc->set_to_clean();
+    }
+  } else {
+    // compiled, dispatched call (which used to call an interpreted method)
+    CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
+    if (!inline_cache->is_clean()) {
+      return inline_cache->set_to_clean();
+    }
+  }
+  return true;
 }
 
 //
@@ -1735,14 +1797,12 @@
       // to a wrong method). It should not be performance critical, since the
       // resolve is only done once.
 
-      CompiledICLocker ml(caller_nm);
-      if (is_static_call) {
-        CompiledStaticCall* ssc = caller_nm->compiledStaticCall_at(call_addr);
-        ssc->set_to_clean();
-      } else {
-        // compiled, dispatched call (which used to call an interpreted method)
-        CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
-        inline_cache->set_to_clean();
+      for (;;) {
+        if (!clear_ic_at_addr(caller_nm, call_addr, is_static_call)) {
+          InlineCacheBuffer::refill_ic_stubs();
+        } else {
+          break;
+        }
       }
     }
   }
--- a/src/hotspot/share/runtime/sharedRuntime.hpp	Wed Dec 05 17:33:01 2018 +0000
+++ b/src/hotspot/share/runtime/sharedRuntime.hpp	Wed Dec 05 15:57:26 2018 +0100
@@ -48,6 +48,9 @@
   friend class VMStructs;
 
  private:
+  static bool resolve_sub_helper_internal(methodHandle callee_method, const frame& caller_frame,
+                                          CompiledMethod* caller_nm, bool is_virtual, bool is_optimized,
+                                          Handle receiver, CallInfo& call_info, Bytecodes::Code invoke_code, TRAPS);
   static methodHandle resolve_sub_helper(JavaThread *thread,
                                          bool is_virtual,
                                          bool is_optimized, TRAPS);
@@ -324,6 +327,10 @@
   // deopt blob
   static void generate_deopt_blob(void);
 
+  static bool handle_ic_miss_helper_internal(Handle receiver, CompiledMethod* caller_nm, const frame& caller_frame,
+                                             methodHandle callee_method, Bytecodes::Code bc, CallInfo& call_info,
+                                             bool& needs_ic_stub_refill, TRAPS);
+
  public:
   static DeoptimizationBlob* deopt_blob(void)      { return _deopt_blob; }
 
--- a/src/hotspot/share/runtime/sweeper.cpp	Wed Dec 05 17:33:01 2018 +0000
+++ b/src/hotspot/share/runtime/sweeper.cpp	Wed Dec 05 15:57:26 2018 +0100
@@ -699,7 +699,6 @@
     // But still remember to clean-up inline caches for alive nmethods
     if (cm->is_alive() && !cm->is_unloading()) {
       // Clean inline caches that point to zombie/non-entrant/unloaded nmethods
-      CompiledICLocker ml(cm);
       cm->cleanup_inline_caches(false);
       SWEEP(cm);
     }
@@ -745,19 +744,16 @@
       }
     } else {
       // Still alive, clean up its inline caches
-      CompiledICLocker ml(cm);
       cm->cleanup_inline_caches(false);
       SWEEP(cm);
     }
   } else if (cm->is_unloaded()) {
     // Code is unloaded, so there are no activations on the stack.
     // Convert the nmethod to zombie or flush it directly in the OSR case.
-    {
-      // Clean ICs of unloaded nmethods as well because they may reference other
-      // unloaded nmethods that may be flushed earlier in the sweeper cycle.
-      CompiledICLocker ml(cm);
-      cm->cleanup_inline_caches(false);
-    }
+
+    // Clean ICs of unloaded nmethods as well because they may reference other
+    // unloaded nmethods that may be flushed earlier in the sweeper cycle.
+    cm->cleanup_inline_caches(false);
     if (cm->is_osr_method()) {
       SWEEP(cm);
       // No inline caches will ever point to osr methods, so we can just remove it
@@ -776,7 +772,6 @@
       possibly_flush((nmethod*)cm);
     }
     // Clean inline caches that point to zombie/non-entrant/unloaded nmethods
-    CompiledICLocker ml(cm);
     cm->cleanup_inline_caches(false);
     SWEEP(cm);
   }