changeset 55988:1fbfb9c358bf fibers

Merge
author rbackman
date Thu, 18 Jul 2019 11:31:49 +0200
parents 40cdbdac00bc c4fd70d6ee4a
children 998f76b51216
files src/hotspot/share/runtime/continuation.hpp
diffstat 18 files changed, 398 insertions(+), 77 deletions(-) [+]
line wrap: on
line diff
--- a/src/hotspot/cpu/x86/continuation_x86.inline.hpp	Tue Jul 02 09:43:31 2019 +0100
+++ b/src/hotspot/cpu/x86/continuation_x86.inline.hpp	Thu Jul 18 11:31:49 2019 +0200
@@ -798,4 +798,4 @@
   }
 }
 
-#endif // CPU_X86_CONTINUATION_X86_INLINE_HPP
\ No newline at end of file
+#endif // CPU_X86_CONTINUATION_X86_INLINE_HPP
--- a/src/hotspot/share/c1/c1_CodeStubs.hpp	Tue Jul 02 09:43:31 2019 +0100
+++ b/src/hotspot/share/c1/c1_CodeStubs.hpp	Thu Jul 18 11:31:49 2019 +0200
@@ -59,6 +59,7 @@
   virtual bool is_range_check_stub() const       { return false; }
   virtual bool is_divbyzero_stub() const         { return false; }
   virtual bool is_simple_exception_stub() const  { return false; }
+  virtual int nr_immediate_oops_patched() const  { return 0; }
 #ifndef PRODUCT
   virtual void print_name(outputStream* out) const = 0;
 #endif
@@ -401,6 +402,13 @@
     masm->bind(_patch_site_entry);
   }
 
+  virtual int nr_immediate_oops_patched() const  { 
+    if (_id == load_mirror_id || _id == load_appendix_id) {
+      return 1;
+    }
+    return 0; 
+  }
+
   void install(MacroAssembler* masm, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
     _info = info;
     _obj = obj;
--- a/src/hotspot/share/c1/c1_Compilation.cpp	Tue Jul 02 09:43:31 2019 +0100
+++ b/src/hotspot/share/c1/c1_Compilation.cpp	Thu Jul 18 11:31:49 2019 +0200
@@ -363,6 +363,7 @@
   }
 #endif /* PRODUCT */
 
+  _immediate_oops_patched = lir_asm.nr_immediate_oops_patched();
   return frame_map()->framesize();
 }
 
@@ -426,7 +427,8 @@
     compiler(),
     has_unsafe_access(),
     SharedRuntime::is_wide_vector(max_vector_size()),
-    has_monitors()
+    has_monitors(),
+    _immediate_oops_patched
   );
 }
 
@@ -570,6 +572,7 @@
 , _code(buffer_blob)
 , _has_access_indexed(false)
 , _interpreter_frame_size(0)
+, _immediate_oops_patched(0)
 , _current_instruction(NULL)
 #ifndef PRODUCT
 , _last_instruction_printed(NULL)
--- a/src/hotspot/share/c1/c1_Compilation.hpp	Tue Jul 02 09:43:31 2019 +0100
+++ b/src/hotspot/share/c1/c1_Compilation.hpp	Thu Jul 18 11:31:49 2019 +0200
@@ -91,6 +91,7 @@
   CodeBuffer         _code;
   bool               _has_access_indexed;
   int                _interpreter_frame_size; // Stack space needed in case of a deoptimization
+  int                _immediate_oops_patched;
 
   // compilation helpers
   void initialize();
--- a/src/hotspot/share/c1/c1_LIRAssembler.cpp	Tue Jul 02 09:43:31 2019 +0100
+++ b/src/hotspot/share/c1/c1_LIRAssembler.cpp	Thu Jul 18 11:31:49 2019 +0200
@@ -106,6 +106,7 @@
  , _current_block(NULL)
  , _pending_non_safepoint(NULL)
  , _pending_non_safepoint_offset(0)
+ , _immediate_oops_patched(0)
 {
   _slow_case_stubs = new CodeStubList();
 }
@@ -127,6 +128,7 @@
 
 
 void LIR_Assembler::append_code_stub(CodeStub* stub) {
+  _immediate_oops_patched += stub->nr_immediate_oops_patched();
   _slow_case_stubs->append(stub);
 }
 
--- a/src/hotspot/share/c1/c1_LIRAssembler.hpp	Tue Jul 02 09:43:31 2019 +0100
+++ b/src/hotspot/share/c1/c1_LIRAssembler.hpp	Thu Jul 18 11:31:49 2019 +0200
@@ -46,6 +46,7 @@
 
   Instruction*       _pending_non_safepoint;
   int                _pending_non_safepoint_offset;
+  int                _immediate_oops_patched;
 
   Label              _unwind_handler_entry;
 
@@ -267,6 +268,7 @@
 #include CPU_HEADER(c1_LIRAssembler)
 
  public:
+  int nr_immediate_oops_patched() const { return _immediate_oops_patched; }
 
   static int call_stub_size() {
     if (UseAOT) {
--- a/src/hotspot/share/c1/c1_Runtime1.cpp	Tue Jul 02 09:43:31 2019 +0100
+++ b/src/hotspot/share/c1/c1_Runtime1.cpp	Thu Jul 18 11:31:49 2019 +0200
@@ -56,6 +56,7 @@
 #include "runtime/atomic.hpp"
 #include "runtime/biasedLocking.hpp"
 #include "runtime/compilationPolicy.hpp"
+#include "runtime/continuation.hpp"
 #include "runtime/fieldDescriptor.inline.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/handles.inline.hpp"
@@ -1276,6 +1277,7 @@
 
     // Since we've patched some oops in the nmethod,
     // (re)register it with the heap.
+    Continuation::nmethod_patched(nm);
     Universe::heap()->register_nmethod(nm);
   }
 JRT_END
--- a/src/hotspot/share/ci/ciEnv.cpp	Tue Jul 02 09:43:31 2019 +0100
+++ b/src/hotspot/share/ci/ciEnv.cpp	Thu Jul 18 11:31:49 2019 +0200
@@ -967,6 +967,7 @@
                             bool has_unsafe_access,
                             bool has_wide_vectors,
                             bool has_monitors,
+                            int immediate_oops_patched,
                             RTMState  rtm_state) {
   VM_ENTRY_MARK;
   nmethod* nm = NULL;
@@ -1056,6 +1057,7 @@
       nm->set_has_unsafe_access(has_unsafe_access);
       nm->set_has_wide_vectors(has_wide_vectors);
       nm->set_has_monitors(has_monitors);
+      nm->set_immediate_oops_patched(immediate_oops_patched);
       assert (!method->is_synchronized() || nm->has_monitors(), "");
 #if INCLUDE_RTM_OPT
       nm->set_rtm_state(rtm_state);
--- a/src/hotspot/share/ci/ciEnv.hpp	Tue Jul 02 09:43:31 2019 +0100
+++ b/src/hotspot/share/ci/ciEnv.hpp	Thu Jul 18 11:31:49 2019 +0200
@@ -380,6 +380,7 @@
                        bool                      has_unsafe_access,
                        bool                      has_wide_vectors,
                        bool                      has_monitors,
+                       int                       immediate_oops_patched,
                        RTMState                  rtm_state = NoRTM);
 
 
--- a/src/hotspot/share/code/compiledMethod.cpp	Tue Jul 02 09:43:31 2019 +0100
+++ b/src/hotspot/share/code/compiledMethod.cpp	Thu Jul 18 11:31:49 2019 +0200
@@ -39,6 +39,7 @@
 #include "oops/method.inline.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/deoptimization.hpp"
+#include "runtime/jniHandles.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/sharedRuntime.hpp"
@@ -49,7 +50,8 @@
   : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments, compiled),
     _mark_for_deoptimization_status(not_marked),
     _method(method),
-    _gc_data(NULL)
+    _gc_data(NULL),
+    _shadow(NULL)
 {
   init_defaults();
 }
@@ -61,7 +63,8 @@
              frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments, compiled),
     _mark_for_deoptimization_status(not_marked),
     _method(method),
-    _gc_data(NULL)
+    _gc_data(NULL),
+    _shadow(NULL)
 {
   init_defaults();
 }
@@ -72,7 +75,6 @@
   _lazy_critical_native       = 0;
   _has_wide_vectors           = 0;
   _has_monitors               = 0;
-  _on_continuation_stack      = 0;
 }
 
 bool CompiledMethod::is_method_handle_return(address return_pc) {
@@ -714,3 +716,8 @@
   }
   return check_evol.has_evol_dependency();
 }
+
+bool CompiledMethod::is_on_continuation_stack() {
+  return JNIHandles::resolve(_shadow) != NULL;
+}
+
--- a/src/hotspot/share/code/compiledMethod.hpp	Tue Jul 02 09:43:31 2019 +0100
+++ b/src/hotspot/share/code/compiledMethod.hpp	Thu Jul 18 11:31:49 2019 +0200
@@ -176,6 +176,8 @@
   void* _gc_data;
 
   virtual void flush() = 0;
+
+  jweak _shadow;
 protected:
   CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled);
   CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled);
@@ -417,13 +419,13 @@
   bool unload_nmethod_caches(bool class_unloading_occurred);
   virtual void do_unloading(bool unloading_occurred) = 0;
 
-  void inc_on_continuation_stack();
-  void dec_on_continuation_stack();
-  bool is_on_continuation_stack() const { return _on_continuation_stack > 0; }
+  bool is_on_continuation_stack();
 
+  jweak get_shadow();
+  jweak set_shadow(jweak shadow);
+  bool clear_shadow(jweak old);
 
 private:
-  volatile int _on_continuation_stack; // Counter that tells on how many unmounted continuation stacks this method are
   PcDesc* find_pc_desc(address pc, bool approximate) {
     return _pc_desc_container.find_pc_desc(pc, approximate, PcDescSearch(code_begin(), scopes_pcs_begin(), scopes_pcs_end()));
   }
--- a/src/hotspot/share/code/compiledMethod.inline.hpp	Tue Jul 02 09:43:31 2019 +0100
+++ b/src/hotspot/share/code/compiledMethod.inline.hpp	Thu Jul 18 11:31:49 2019 +0200
@@ -59,13 +59,14 @@
   return NULL;
 }
 
-inline void CompiledMethod::dec_on_continuation_stack() {
-  Atomic::dec(&_on_continuation_stack);
-  assert (_on_continuation_stack >= 0, "");
+inline jweak CompiledMethod::get_shadow() { return _shadow; }
+
+inline jweak CompiledMethod::set_shadow(jweak obj) {
+  return Atomic::cmpxchg(obj, &_shadow, (jweak) NULL);
 }
 
-inline void CompiledMethod::inc_on_continuation_stack() {
-  Atomic::inc(&_on_continuation_stack);
+inline bool CompiledMethod::clear_shadow(jweak old) {
+  return Atomic::cmpxchg((jweak) NULL, &_shadow, (jweak) old) == old;
 }
 
 // class ExceptionCache methods
--- a/src/hotspot/share/code/nmethod.cpp	Tue Jul 02 09:43:31 2019 +0100
+++ b/src/hotspot/share/code/nmethod.cpp	Thu Jul 18 11:31:49 2019 +0200
@@ -576,6 +576,18 @@
   return nm;
 }
 
+  class CountOops : public OopClosure {
+  private:
+    int _nr_oops;
+  public:
+    CountOops() : _nr_oops(0) {}
+    int nr_oops() const { return _nr_oops; }
+
+
+    virtual void do_oop(oop* o) { _nr_oops++; }
+    virtual void do_oop(narrowOop* o) { _nr_oops++; }
+  };
+
 // For native wrappers
 nmethod::nmethod(
   Method* method,
@@ -833,9 +845,21 @@
     assert(compiler->is_c2() || compiler->is_jvmci() ||
            _method->is_static() == (entry_point() == _verified_entry_point),
            " entry points must be same for static methods and vice versa");
+
+    {
+      CountOops count;
+      this->oops_do(&count, false, true);
+      _nr_oops = count.nr_oops();
+    }
   }
 }
 
+int nmethod::count_oops() {
+  CountOops count;
+  this->oops_do(&count, false, true);
+  return count.nr_oops();
+}
+
 // Print a short set of xml attributes to identify this nmethod.  The
 // output should be embedded in some other element.
 void nmethod::log_identity(xmlStream* log) const {
@@ -1773,7 +1797,7 @@
   }
 }
 
-void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
+void nmethod::oops_do(OopClosure* f, bool allow_zombie, bool allow_null) {
   // make sure the oops ready to receive visitors
   assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");
   assert(!is_unloaded(), "should not call follow on unloaded nmethod");
@@ -1790,7 +1814,7 @@
         assert(1 == (r->oop_is_immediate()) +
                (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
                "oop must be found in exactly one place");
-        if (r->oop_is_immediate() && r->oop_value() != NULL) {
+        if (r->oop_is_immediate() && (r->oop_value() != NULL || allow_null)) {
           f->do_oop(r->oop_addr());
         }
       }
--- a/src/hotspot/share/code/nmethod.hpp	Tue Jul 02 09:43:31 2019 +0100
+++ b/src/hotspot/share/code/nmethod.hpp	Thu Jul 18 11:31:49 2019 +0200
@@ -112,6 +112,12 @@
 
   int _compile_id;                           // which compilation made this nmethod
   int _comp_level;                           // compilation level
+  int _nr_oops;
+ public:
+  int nr_oops() const { return _nr_oops; }
+  void verify_nr_oops();
+  int count_oops();
+ private:
 
   // protected by CodeCache_lock
   bool _has_flushed_dependencies;            // Used for maintenance of dependencies (CodeCache_lock)
@@ -432,6 +438,8 @@
   nmethod* osr_link() const                       { return _osr_link; }
   void     set_osr_link(nmethod *n)               { _osr_link = n; }
 
+  void set_immediate_oops_patched(int nr)         { _nr_oops += nr; }
+
   // Verify calls to dead methods have been cleaned.
   void verify_clean_inline_caches();
 
@@ -472,8 +480,8 @@
 #endif
 
  public:
-  void oops_do(OopClosure* f) { oops_do(f, false); }
-  void oops_do(OopClosure* f, bool allow_zombie);
+  void oops_do(OopClosure* f) { oops_do(f, false, false); }
+  void oops_do(OopClosure* f, bool allow_zombie, bool allow_null = false);
 
   bool test_set_oops_do_mark();
   static void oops_do_marking_prologue();
--- a/src/hotspot/share/gc/shared/gcBehaviours.cpp	Tue Jul 02 09:43:31 2019 +0100
+++ b/src/hotspot/share/gc/shared/gcBehaviours.cpp	Thu Jul 18 11:31:49 2019 +0200
@@ -23,7 +23,7 @@
  */
 
 #include "precompiled.hpp"
-#include "code/compiledMethod.hpp"
+#include "code/compiledMethod.inline.hpp"
 #include "code/nmethod.hpp"
 #include "gc/shared/gcBehaviours.hpp"
 
--- a/src/hotspot/share/opto/compile.cpp	Tue Jul 02 09:43:31 2019 +0100
+++ b/src/hotspot/share/opto/compile.cpp	Thu Jul 18 11:31:49 2019 +0200
@@ -950,6 +950,7 @@
                            has_unsafe_access(),
                            SharedRuntime::is_wide_vector(max_vector_size()),
                            has_monitors(),
+                           0,
                            rtm_state()
                            );
 
--- a/src/hotspot/share/runtime/continuation.cpp	Tue Jul 02 09:43:31 2019 +0100
+++ b/src/hotspot/share/runtime/continuation.cpp	Thu Jul 18 11:31:49 2019 +0200
@@ -184,6 +184,9 @@
 class ContMirror;
 class hframe;
 
+template <typename ConfigT>
+class CompiledMethodKeepalive;
+
 class Frame {
 public:
   template<typename RegisterMapT> static inline intptr_t** map_link_address(const RegisterMapT* map);
@@ -211,6 +214,8 @@
   DEBUG_ONLY(static const char* name;)
   static const bool interpreted = true;
   static const bool stub = false;
+  static const int extra_oops = 0;
+  static const char type = 'i';
 
 public:
   static inline address* return_pc_address(const frame& f);
@@ -225,6 +230,8 @@
   static int size(const frame&f, InterpreterOopMap* mask);
   static inline int expression_stack_size(const frame &f, InterpreterOopMap* mask);
   static bool is_owning_locks(const frame& f);
+
+  typedef InterpreterOopMap* ExtraT;
 };
 
 DEBUG_ONLY(const char* Interpreted::name = "Interpreted";)
@@ -254,11 +261,18 @@
 
 DEBUG_ONLY(const char* NonInterpretedUnknown::name = "NonInterpretedUnknown";)
 
+class FpOopInfo;
+typedef int (*FreezeFnT)(address, address, address, address, int, FpOopInfo*);
+
 class Compiled : public NonInterpreted<Compiled>  {
 public:
   DEBUG_ONLY(static const char* name;)
   static const bool interpreted = false;
   static const bool stub = false;
+  static const int extra_oops = 1;
+  static const char type = 'c';
+
+  typedef FreezeFnT ExtraT;
 };
 
 DEBUG_ONLY(const char* Compiled::name = "Compiled";)
@@ -268,6 +282,8 @@
   DEBUG_ONLY(static const char* name;)
   static const bool interpreted = false;
   static const bool stub = true;
+  static const int extra_oops = 0;
+  static const char type = 's';
 };
 
 DEBUG_ONLY(const char* StubF::name = "Stub";)
@@ -382,7 +398,7 @@
     assert (mode != mode_fast || !FKind::interpreted, "");
     return self().template sender<FKind, mode>(cont, num_oops);
   }
-  template<typename FKind, op_mode mode> SelfPD sender(const ContMirror& cont, const InterpreterOopMap* mask) const;
+  template<typename FKind, op_mode mode> SelfPD sender(const ContMirror& cont, const InterpreterOopMap* mask, int extra_oops = 0) const;
   template<op_mode mode /* = mode_slow*/> SelfPD sender(const ContMirror& cont) const;
 
   template<typename FKind> bool is_bottom(const ContMirror& cont) const;
@@ -459,6 +475,7 @@
   static void copy_primitive_array(typeArrayOop old_array, int old_start, typeArrayOop new_array, int new_start, int count);
   template <typename ConfigT> bool allocate_ref_stack(int nr_oops);
   template <typename ConfigT> objArrayOop  allocate_refstack_array(size_t nr_oops);
+  template <typename ConfigT> objArrayOop  allocate_shadow_array(size_t nr_oops);
   template <typename ConfigT> bool grow_ref_stack(int nr_oops);
   template <typename ConfigT> void copy_ref_array(objArrayOop old_array, int old_start, objArrayOop new_array, int new_start, int count);
   template <typename ConfigT> void zero_ref_array(objArrayOop new_array, int new_length, int min_length);
@@ -513,6 +530,10 @@
   JavaThread* thread() const { return _thread; }
 
   template <typename ConfigT> inline void allocate_stacks(int size, int oops, int frames);
+
+  template <typename ConfigT>
+  void make_keepalive(CompiledMethodKeepalive<ConfigT>* keepalive);
+
   inline bool in_hstack(void *p) { return (_hstack != NULL && p >= _hstack && p < (_hstack + _stack_length)); }
 
   bool valid_stack_index(int idx) const { return idx >= 0 && idx < _stack_length; }
@@ -668,9 +689,9 @@
 
 template<typename SelfPD>
 template<typename FKind, op_mode mode>
-SelfPD HFrameBase<SelfPD>::sender(const ContMirror& cont, const InterpreterOopMap* mask) const {
+SelfPD HFrameBase<SelfPD>::sender(const ContMirror& cont, const InterpreterOopMap* mask, int extra_oops) const {
   assert (mode != mode_fast || !FKind::interpreted, "");
-  return sender<FKind, mode>(cont, FKind::interpreted ? interpreted_frame_num_oops(*mask) : compiled_frame_num_oops());
+  return sender<FKind, mode>(cont, extra_oops + (FKind::interpreted ? interpreted_frame_num_oops(*mask) : compiled_frame_num_oops()));
 }
 
 template<typename SelfPD>
@@ -805,15 +826,19 @@
 
 void ContMirror::cleanup() {
   // cleanup nmethods
+  /*
   for (hframe hf = last_frame<mode_slow>(); !hf.is_empty(); hf = hf.sender<mode_slow>(*this)) {
-    if (!hf.is_interpreted_frame())
+    if (!hf.is_interpreted_frame()) {
       hf.cb()->as_compiled_method()->dec_on_continuation_stack();
+    }
   }
+  */
 }
 
 void ContMirror::null_ref_stack(int start, int num) {
   if (java_lang_Continuation::is_reset(_cont)) return;
 
+  //log_develop_info(jvmcont)("clearing %d at %d", num, start);
   for (int i = 0; i < num; i++)
     _ref_stack->obj_at_put(start + i, NULL);
 }
@@ -1077,7 +1102,7 @@
 inline int NonInterpreted<Self>::num_oops(const frame&f) {
   assert (!f.is_interpreted_frame() && Self::is_instance(f), "");
   assert (f.oop_map() != NULL, "");
-  return f.oop_map()->num_oops();
+  return f.oop_map()->num_oops() + Self::extra_oops;
 }
 
 template<typename Self>
@@ -1254,6 +1279,9 @@
 
 typedef freeze_result (*FreezeContFnT)(JavaThread*, ContMirror&, FrameInfo*);
 
+static void freeze_compiled_frame_bp() {}
+static void thaw_compiled_frame_bp() {}
+
 static FreezeContFnT cont_freeze_fast = NULL;
 static FreezeContFnT cont_freeze_slow = NULL;
 static FreezeContFnT cont_freeze_preempt = NULL;
@@ -1270,6 +1298,18 @@
   }
 }
 
+class CountOops : public OopClosure {
+private:
+  int _nr_oops;
+public:
+  CountOops() : _nr_oops(0) {}
+  int nr_oops() const { return _nr_oops; }
+
+
+  virtual void do_oop(oop* o) { _nr_oops++; }
+  virtual void do_oop(narrowOop* o) { _nr_oops++; }
+};
+
 struct FpOopInfo {
   bool _has_fp_oop; // is fp used to store a derived pointer
   int _fp_index;    // see FreezeOopFn::do_derived_oop
@@ -1286,9 +1326,128 @@
   }
 };
 
+template <typename OopT>
+class PersistOops : public OopClosure {
+private:
+  int _limit;
+  int _current;
+  objArrayOop _array;
+public:
+  PersistOops(int limit, objArrayOop array) : _limit(limit), _current(0), _array(array) {}
+
+  virtual void do_oop(oop* o) { write_oop(o); }
+  virtual void do_oop(narrowOop* o) { write_oop(o); }
+
+private:
+  template <typename T>
+  void write_oop(T* p) {
+    assert(_current < _limit, "");
+    oop obj = NativeAccess<>::oop_load(p);
+    OopT* addr = _array->obj_at_address<OopT>(_current++); // depends on UseCompressedOops
+    NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(addr, obj);
+  }
+};
+
+template <typename ConfigT>
+class CompiledMethodKeepalive {
+private:
+  typedef typename ConfigT::OopT OopT;
+  typedef CompiledMethodKeepalive<ConfigT> SelfT;
+
+  Handle _shadowHolder;
+  CompiledMethod* _method;
+  SelfT* _parent;
+  int _nr_oops;
+  bool _required;
+
+public:
+  CompiledMethodKeepalive(CompiledMethod* cm, SelfT* parent, JavaThread* thread) : _method(cm), _parent(NULL), _nr_oops(0), _required(false) {
+    jweak shadow = cm->get_shadow();
+    oop resolved = JNIHandles::resolve(shadow);
+    if (resolved != NULL) {
+      _shadowHolder = Handle(thread, resolved);
+      return;
+    }
+
+    if (shadow != NULL) {
+      if (cm->clear_shadow(shadow)) {
+        // TODO: We are currently leaking handles here. We can't just delete it straight away because someone else might be looking at it.
+        // The right thing to do would be to put it on a list and clear them at safepoint or similiar.
+      }
+    }
+
+    nmethod* nm = cm->as_nmethod_or_null();
+    if (nm != NULL) {
+      _nr_oops = nm->nr_oops();
+      //log_info(jvmcont)("need shadow for %d oops", _nr_oops());
+      _required = true;
+      _parent = parent;
+    }
+  }
+
+  void write_at(ContMirror& mirror, int index) {
+    //log_develop_info(jvmcont)("writing mirror at %d\n", index);
+    mirror.add_oop<ConfigT>(_shadowHolder(), index);
+    //*(hsp + index)
+  }
+
+  void persist_oops() {
+    if (!_required) {
+      // Even though our first one might have said require, someone else might have written a new entry before we wrote our own.
+      return;
+    }
+
+    nmethod* nm = _method->as_nmethod_or_null();
+    if (nm != NULL) {
+      PersistOops<OopT> persist(_nr_oops, (objArrayOop) _shadowHolder());
+      nm->oops_do(&persist);
+      //log_info(jvmcont)("oops persisted");
+    }
+  }
+
+  void set_handle(Handle shadow) {
+    _shadowHolder = shadow;
+    jobject obj = JNIHandles::make_weak_global(shadow);
+    jobject result = _method->set_shadow(obj);
+    if (result != NULL) {
+      // someone else managed to do it before us, destroy the weak
+      _required = false;
+      JNIHandles::destroy_weak_global(obj);
+    }
+  }
+
+  SelfT* parent() { return _parent; }
+  bool required() const { return _required; }
+  int nr_oops() const { return _nr_oops; }
+};
+
+template <typename FKind>
+class FreezeFrame {
+};
+
+template <>
+class FreezeFrame<Interpreted> {
+  public:
+  template <bool top, bool bottom, bool IsKeepalive, typename FreezeT>
+  static hframe dispatch(FreezeT& self, const frame& f, const hframe& caller, int fsize, int argsize, int oops, InterpreterOopMap* mask, typename FreezeT::CompiledMethodKeepaliveT* ignore) {
+    return self.template freeze_interpreted_frame<top, bottom>(f, caller, fsize, oops, mask);
+  }
+};
+
+template <>
+class FreezeFrame<Compiled> {
+  public:
+  template <bool top, bool bottom, bool IsKeepalive, typename FreezeT>
+  static hframe dispatch(FreezeT& self, const frame& f, const hframe& caller, int fsize, int argsize, int oops, FreezeFnT f_fn, typename FreezeT::CompiledMethodKeepaliveT* kd) {
+    return self.template freeze_compiled_frame<Compiled, top, bottom, IsKeepalive>(f, caller, fsize, argsize, oops, f_fn, kd);
+  }
+};
+
 template <typename ConfigT, op_mode mode>
 class Freeze {
   typedef typename Conditional<mode == mode_preempt, RegisterMap, SmallRegisterMap>::type RegisterMapT;
+  typedef Freeze<ConfigT, mode> SelfT;
+  typedef CompiledMethodKeepalive<ConfigT> CompiledMethodKeepaliveT;
 
 private:
   JavaThread* _thread;
@@ -1308,6 +1467,7 @@
   frame _safepoint_stub;
   hframe _safepoint_stub_h;
   bool  _safepoint_stub_caller;
+  CompiledMethodKeepaliveT* _keepalive;
 #ifndef PRODUCT
   intptr_t* _safepoint_stub_hsp;
 #endif
@@ -1320,15 +1480,13 @@
   template<typename FKind> hframe new_callee_hframe(const frame& f, intptr_t* vsp, const hframe& caller, int fsize, int num_oops);
   template<bool cont_empty> hframe new_bottom_hframe(int sp, int ref_sp, address pc, bool interpreted);
 
-  typedef int (*FreezeFnT)(address, address, address, address, int, FpOopInfo*);
-
 public:
 
   Freeze(JavaThread* thread, ContMirror& mirror) :
     _thread(thread), _cont(mirror), _bottom_address(mirror.entrySP()),
     _oops(0), _size(0), _frames(0), _cgrind_interpreted_frames(0),
     _fp_oop_info(), _map(thread, false, false, false),
-    _safepoint_stub_caller(false) {
+    _safepoint_stub_caller(false), _keepalive(NULL) {
 
     _map.set_include_argument_oops(false);
   }
@@ -1340,6 +1498,8 @@
   freeze_result freeze(FrameInfo* fi) {
     _fi = fi;
 
+    HandleMark hm(_thread);
+
     // assert (map.update_map(), "RegisterMap not set to update");
     assert (!_map.include_argument_oops(), "should be");
     frame f = freeze_start_frame(_map);
@@ -1357,7 +1517,7 @@
 
     // Note: if the doYield stub does not have its own frame, we may need to consider deopt here, especially if yield is inlinable
     frame f = ContinuationHelper::last_frame(_thread); // thread->last_frame();
-    assert (StubRoutines::cont_doYield_stub()->contains(f.pc()), "must be");
+    assert(StubRoutines::cont_doYield_stub()->contains(f.pc()), "must be");
     ContinuationHelper::update_register_map<StubF>(&_map, f);
     f = sender<StubF>(f);  // this is the yield frame
 
@@ -1402,7 +1562,7 @@
 
   template<bool top>
   NOINLINE freeze_result freeze(const frame& f, hframe& caller, hframe::callee_info callee_info, int callee_argsize) {
-    assert (f.unextended_sp() < _bottom_address - SP_WIGGLE, ""); // see recurse_java_frame
+    assert (f.unextended_sp() < _bottom_address - SP_WIGGLE, ""); // see recurse_freeze_java_frame
     assert (f.is_interpreted_frame() || ((top && mode == mode_preempt) == is_stub(f.cb())), "");
     assert (mode != mode_fast || (f.is_compiled_frame() && f.oop_map() != NULL), "");
     assert (mode != mode_fast || !f.is_deoptimized_frame(), "");
@@ -1413,23 +1573,29 @@
       if (Compiled::is_owning_locks(_cont.thread(), &_map, f)) return freeze_pinned_monitor;
 
       assert (f.oop_map() != NULL, "");
-
-      return recurse_compiled_frame<top>(f, caller, callee_info);
+      // Keepalive info here...
+      CompiledMethodKeepaliveT kd(f.cb()->as_compiled_method(), _keepalive, _thread);
+      if (kd.required()) {
+        _keepalive = &kd;
+        return recurse_freeze_compiled_frame<top, true>(f, caller, callee_info, &kd);
+      }
+
+      return recurse_freeze_compiled_frame<top, false>(f, caller, callee_info, &kd);
     } else if (f.is_interpreted_frame()) {
       if (Interpreted::is_owning_locks(f)) return freeze_pinned_monitor;
 
-      return recurse_interpreted_frame<top>(f, caller, callee_info, callee_argsize);
+      return recurse_freeze_interpreted_frame<top>(f, caller, callee_info, callee_argsize);
     } else if (mode == mode_preempt && top && is_stub(f.cb())) {
-      return recurse_stub_frame(f, caller);
+      return recurse_freeze_stub_frame(f, caller);
     } else {
       return freeze_pinned_native;
     }
   }
 
-  template<typename FKind, bool top>
-  inline freeze_result recurse_java_frame(const frame& f, hframe& caller, hframe::callee_info callee_info, int fsize, int argsize, int oops, void* extra) {
+  template<typename FKind, bool top, bool IsKeepalive>
+  inline freeze_result recurse_freeze_java_frame(const frame& f, hframe& caller, hframe::callee_info callee_info, int fsize, int argsize, int oops, typename FKind::ExtraT extra, CompiledMethodKeepaliveT* kd) {
     assert (FKind::is_instance(f), "");
-    log_develop_trace(jvmcont)("recurse_java_frame fsize: %d oops: %d", fsize, oops);
+    log_develop_trace(jvmcont)("recurse_freeze_java_frame fsize: %d oops: %d", fsize, oops);
 
     hframe::callee_info my_info;
     frame senderf = sender<FKind>(f, &my_info);
@@ -1442,7 +1608,7 @@
         return result;
 
       ContinuationHelper::update_register_map(&_map, callee_info); // restore saved link
-      freeze_java_frame<FKind, top, true>(f, caller, fsize, argsize, oops, extra);
+      freeze_java_frame<FKind, top, true, IsKeepalive>(f, caller, fsize, argsize, oops, extra, kd);
 
       if (log_develop_is_enabled(Trace, jvmcont)) {
         log_develop_trace(jvmcont)("bottom h-frame:");
@@ -1462,7 +1628,7 @@
       if (mode == mode_preempt) _safepoint_stub_caller = safepoint_stub_caller; // restore _stub_caller
       ContinuationHelper::update_register_map(&_map, callee_info);  // restore saved link
 
-      freeze_java_frame<FKind, top, false>(f, caller, fsize, argsize, oops, extra);
+      freeze_java_frame<FKind, top, false, IsKeepalive>(f, caller, fsize, argsize, oops, extra, kd);
     }
 
     if (top) {
@@ -1471,6 +1637,18 @@
     return freeze_ok;
   }
 
+  void allocate_keepalive() {
+    if (_keepalive == NULL) {
+      return;
+    }
+
+    CompiledMethodKeepaliveT* current = _keepalive;
+    while (current != NULL) {
+      _cont.make_keepalive<ConfigT>(current);
+      current = current->parent();
+    }
+  }
+
   template<typename FKind> // the callee's type
   NOINLINE freeze_result finalize(const frame& f, const frame& callee, int argsize, hframe& caller) {
   #ifdef CALLGRIND_START_INSTRUMENTATION
@@ -1504,8 +1682,11 @@
     PERFTEST_ONLY(if (PERFTEST_LEVEL <= 15) return freeze_ok;)
 
     _cont.allocate_stacks<ConfigT>(_size, _oops, _frames);
-    if (_thread->has_pending_exception())
+    if (_thread->has_pending_exception()) {
       return freeze_exception;
+    }
+
+    allocate_keepalive();
 
     if (_cont.is_empty()) {
       assert (argsize == 0, ""); // the entry frame has an argsize of 0
@@ -1548,8 +1729,11 @@
   #endif
   }
 
-  template<typename FKind, bool top, bool bottom>
-  void freeze_java_frame(const frame& f, hframe& caller, int fsize, int argsize, int oops, void* extra) {
+  template <typename T>
+  friend class FreezeFrame;
+
+  template<typename FKind, bool top, bool bottom, bool IsKeepalive>
+  void freeze_java_frame(const frame& f, hframe& caller, int fsize, int argsize, int oops, typename FKind::ExtraT extra, CompiledMethodKeepaliveT* kd) {
     PERFTEST_ONLY(if (PERFTEST_LEVEL <= 15) return;)
 
     log_develop_trace(jvmcont)("============================= FREEZING FRAME interpreted: %d top: %d bottom: %d", FKind::interpreted, top, bottom);
@@ -1557,15 +1741,13 @@
     if (log_develop_is_enabled(Trace, jvmcont)) f.print_on(tty);
     assert ((mode == mode_fast && !bottom) || caller.is_interpreted_frame() == Interpreter::contains(caller.pc()), "");
 
-    caller.copy_partial<mode>(
-      FKind::interpreted
-        ? freeze_interpreted_frame       <top, bottom>(f, caller, fsize,          oops, (InterpreterOopMap*)extra)
-        : freeze_compiled_frame<Compiled, top, bottom>(f, caller, fsize, argsize, oops, (FreezeFnT)extra));
+    caller.copy_partial<mode>(FreezeFrame<FKind>::template dispatch<top, bottom, IsKeepalive, SelfT>(*this, f, caller, fsize, argsize, oops, extra, kd));
   }
 
   template <typename FKind>
   void freeze_oops(const frame& f, intptr_t* vsp, intptr_t *hsp, int index, int num_oops, void* extra) {
     PERFTEST_ONLY(if (PERFTEST_LEVEL < 30) return;)
+    //log_develop_info(jvmcont)("writing %d oops from %d (%c)", num_oops, index, FKind::type);
 
     log_develop_trace(jvmcont)("Walking oops (freeze)");
 
@@ -1618,20 +1800,20 @@
   }
 
   template<bool top>
-  NOINLINE freeze_result recurse_interpreted_frame(const frame& f, hframe& caller, hframe::callee_info callee_info, int callee_argsize) {
+  NOINLINE freeze_result recurse_freeze_interpreted_frame(const frame& f, hframe& caller, hframe::callee_info callee_info, int callee_argsize) {
     // ResourceMark rm(_thread);
     InterpreterOopMap mask;
     Interpreted::oop_map(f, &mask);
     int fsize = Interpreted::size(f, &mask);
     int oops  = Interpreted::num_oops(f, &mask);
-    
+
     log_develop_trace(jvmcont)("recurse_interpreted_frame _size: %d add fsize: %d callee_argsize: %d -- %d", _size, fsize, callee_argsize, fsize + callee_argsize);
     _size += fsize + callee_argsize;
     _oops += oops;
     _frames++;
     _cgrind_interpreted_frames++;
 
-    return recurse_java_frame<Interpreted, top>(f, caller, callee_info, fsize, 0, oops, (void*)&mask);
+    return recurse_freeze_java_frame<Interpreted, top, false>(f, caller, callee_info, fsize, 0, oops, &mask, NULL);
   }
 
   template <bool top, bool bottom>
@@ -1661,27 +1843,25 @@
     return oopFn.count();
   }
 
-  template<bool top>
-  inline freeze_result recurse_compiled_frame(const frame& f, hframe& caller, hframe::callee_info callee_info) {
+  template<bool top, bool IsKeepalive>
+  freeze_result recurse_freeze_compiled_frame(const frame& f, hframe& caller, hframe::callee_info callee_info, CompiledMethodKeepaliveT* kd) {
     int fsize = Compiled::size(f);
     int oops  = Compiled::num_oops(f);
     int argsize = Compiled::stack_argsize(f);
     FreezeFnT f_fn = get_oopmap_stub(f); // try to do this early, so we wouldn't need to look at the oopMap again.
 
-    log_develop_trace(jvmcont)("recurse_compiled_frame _size: %d add fsize: %d", _size, fsize);
+    log_develop_trace(jvmcont)("recurse_freeze_compiled_frame _size: %d add fsize: %d", _size, fsize);
     _size += fsize;
     _oops += oops;
     _frames++;
 
     // TODO: consider recalculating fsize, argsize and oops in freeze_compiled_frame instead of passing them, as we now do in thaw
-    return recurse_java_frame<Compiled, top>(f, caller, callee_info, fsize, argsize, oops, (void*)f_fn);
+    return recurse_freeze_java_frame<Compiled, top, IsKeepalive>(f, caller, callee_info, fsize, argsize, oops, f_fn, kd);
   }
 
-  template <typename FKind, bool top, bool bottom>
-  hframe freeze_compiled_frame(const frame& f, const hframe& caller, int fsize, int argsize, int oops, FreezeFnT f_fn) {
-    if (!FKind::stub) {
-      f.cb()->as_compiled_method()->inc_on_continuation_stack();
-    }
+  template <typename FKind, bool top, bool bottom, bool IsKeepalive>
+  hframe freeze_compiled_frame(const frame& f, const hframe& caller, int fsize, int argsize, int oops, FreezeFnT f_fn, CompiledMethodKeepaliveT* kd) {
+    freeze_compiled_frame_bp();
 
     intptr_t* vsp = FKind::frame_top(f);
 
@@ -1709,18 +1889,25 @@
         _safepoint_stub_h = freeze_safepoint_stub(hf);
       }
 
-      freeze_oops<Compiled>(f, vsp, hsp, hf.ref_sp(), oops, (void*)f_fn);
+      // ref_sp: 3, oops 4  -> [ 3: oop, 4: oop, 5: oop, 6: nmethod ]
+      kd->write_at(_cont, hf.ref_sp() + oops - 1);
+      //freeze_oops<Compiled>(f, vsp, hsp, hf.ref_sp() + 1, oops - 1, (void*)f_fn);
+      freeze_oops<Compiled>(f, vsp, hsp, hf.ref_sp(), oops - 1, (void*)f_fn);
 
       if (mode == mode_preempt && _safepoint_stub_caller) {
         assert (!_fp_oop_info._has_fp_oop, "must be");
         _safepoint_stub = frame();
       }
+
+      if (IsKeepalive) {
+        kd->persist_oops();
+      }
     } else { // stub frame has no oops
       _fp_oop_info._has_fp_oop = false;
     }
 
     patch<FKind, top, bottom>(f, hf, caller);
-    
+
     log_develop_trace(jvmcont)("freeze_compiled_frame real_pc: %p address: %p sp: %p", Frame::real_pc(f), &(((address*) f.sp())[-1]), f.sp());
 
     assert(bottom || mode == mode_fast || Interpreter::contains(hf.return_pc<FKind>()) == caller.is_interpreted_frame(), "");
@@ -1794,7 +1981,7 @@
       "flag: %d is_interpreted: %d", _cont.is_flag(FLAG_LAST_FRAME_INTERPRETED), _cont.last_frame<mode_slow>().is_interpreted_frame());
   }
 
-  NOINLINE freeze_result recurse_stub_frame(const frame& f, hframe& caller) {
+  NOINLINE freeze_result recurse_freeze_stub_frame(const frame& f, hframe& caller) {
     int fsize = StubF::size(f);
 
     log_develop_trace(jvmcont)("recurse_stub_frame _size: %d add fsize: %d", _size, fsize);
@@ -1813,7 +2000,7 @@
 
     // we can have stub_caller as a value template argument, but that's unnecessary
     _safepoint_stub_caller = true;
-    freeze_result result = recurse_compiled_frame<false>(senderf, caller, my_info);
+    freeze_result result = recurse_freeze_compiled_frame<false, false>(senderf, caller, my_info, NULL);
     if (result == freeze_ok) {
       finish(f, _safepoint_stub_h);
     }
@@ -1828,7 +2015,7 @@
 
     int fsize = StubF::size(_safepoint_stub);
 
-    hframe hf = freeze_compiled_frame<StubF, true, false>(_safepoint_stub, caller, fsize, 0, 0, NULL);
+    hframe hf = freeze_compiled_frame<StubF, true, false, false>(_safepoint_stub, caller, fsize, 0, 0, NULL, NULL);
 
 #ifndef PRODUCT
     _safepoint_stub_hsp = _cont.stack_address(hf.sp());
@@ -1866,6 +2053,7 @@
   #endif
 
     int add_oop(oop obj, int index) {
+      //log_develop_info(jvmcont)("writing oop at %d", index);
       return this->_cont->template add_oop<ConfigT>(obj, index);
     }
 
@@ -1968,6 +2156,7 @@
 
   static void obj_at_put(objArrayOop array, int index, oop obj) {
     OopT* addr = array->obj_at_addr<OopT>(index); // depends on UseCompressedOops
+    //assert(*addr == (OopT) NULL, "");
     RawAccess<IS_DEST_UNINITIALIZED>::oop_store(addr, obj);
   }
 
@@ -2351,10 +2540,12 @@
   }
 
   template<typename FKind, bool top>
-  void recurse_java_frame(const hframe& hf, frame& caller, int num_frames, void* extra) {
+  void recurse_thaw_java_frame(const hframe& hf, frame& caller, int num_frames, void* extra) {
     assert (num_frames > 0, "");
 
-    hframe hsender = hf.sender<FKind, mode>(_cont, FKind::interpreted ? (InterpreterOopMap*)extra : NULL); // TODO PERF maybe we can reuse fsize?
+    //hframe hsender = hf.sender<FKind, mode(_cont, 
+    //return sender<FKind, mode>(cont, FKind::interpreted ? interpreted_frame_num_oops(*mask) : compiled_frame_num_oops());
+    hframe hsender = hf.sender<FKind, mode>(_cont, FKind::interpreted ? (InterpreterOopMap*)extra : NULL, FKind::extra_oops); // TODO PERF maybe we can reuse fsize?
 
     bool is_empty = hsender.is_empty();
     if (num_frames == 1 || is_empty) {
@@ -2450,10 +2641,17 @@
     int thawed;
     if (!FKind::interpreted && extra != NULL) {
       thawed = thaw_compiled_oops_stub(f, (ThawFnT)extra, vsp, oop_index);
+      //log_develop_info(jvmcont)("thawing %d oops from %d (stub)", thawed, oop_index);
     } else {
       int num_oops = FKind::interpreted ? Interpreted::num_oops(f, (InterpreterOopMap*)extra) : NonInterpreted<FKind>::num_oops(f);
-      if (num_oops == 0)
+      num_oops -= FKind::extra_oops;
+      //log_develop_info(jvmcont)("thawing %d oops from %d", num_oops, oop_index);
+      if (num_oops == 0) {
+        if (FKind::extra_oops > 0) {
+          _cont.null_ref_stack(oop_index, FKind::extra_oops);
+        }
         return;
+      }
 
       thawed = FKind::interpreted ? thaw_interpreted_oops(f, vsp, oop_index, (InterpreterOopMap*)extra)
                                   : thaw_compiled_oops   (f, vsp, oop_index);
@@ -2462,10 +2660,10 @@
     log_develop_trace(jvmcont)("count: %d", thawed);
 #ifdef ASSERT
     int num_oops = FKind::interpreted ? Interpreted::num_oops(f, (InterpreterOopMap*)extra) : NonInterpreted<FKind>::num_oops(f);
-    assert(thawed == num_oops, "closure oop count different.");
+    assert(thawed == num_oops - FKind::extra_oops, "closure oop count different.");
 #endif
 
-    _cont.null_ref_stack(oop_index, thawed);
+    _cont.null_ref_stack(oop_index, thawed + FKind::extra_oops);
     _cont.e_add_refs(thawed);
 
     log_develop_trace(jvmcont)("Done walking oops");
@@ -2498,8 +2696,8 @@
     hf.interpreted_frame_oop_map(&mask);
     int fsize = hf.interpreted_frame_size();
     int oops  = hf.interpreted_frame_num_oops(mask);
-    
-    return recurse_java_frame<Interpreted, top>(hf, caller, num_frames, (void*)&mask);
+
+    return recurse_thaw_java_frame<Interpreted, top>(hf, caller, num_frames, (void*)&mask);
   }
 
   template<bool top, bool bottom>
@@ -2545,11 +2743,12 @@
   void recurse_compiled_frame(const hframe& hf, frame& caller, int num_frames) {
     ThawFnT t_fn = get_oopmap_stub(hf); // try to do this early, so we wouldn't need to look at the oopMap again.
 
-    return recurse_java_frame<Compiled, top>(hf, caller, num_frames, (void*)t_fn);
+    return recurse_thaw_java_frame<Compiled, top>(hf, caller, num_frames, (void*)t_fn);
   }
 
   template<typename FKind, bool top, bool bottom>
   frame thaw_compiled_frame(const hframe& hf, const frame& caller, ThawFnT t_fn) {
+    thaw_compiled_frame_bp();
     assert(FKind::stub == is_stub(hf.cb()), "");
 
     int fsize = hf.compiled_frame_size();
@@ -2569,7 +2768,7 @@
     _cont.sub_size(fsize);
 
     intptr_t* hsp = _cont.stack_address(hf.sp());
-    
+
     log_develop_trace(jvmcont)("hsp: %d ", _cont.stack_index(hsp));
 
     frame f = new_frame<FKind>(hf, vsp);
@@ -2577,8 +2776,6 @@
     thaw_raw_frame(hsp, vsp, fsize);
 
     if (!FKind::stub) {
-      hf.cb()->as_compiled_method()->dec_on_continuation_stack();
-
       if (mode == mode_preempt && _safepoint_stub_caller) {
         _safepoint_stub_f = thaw_safepoint_stub(f);
       }
@@ -2623,7 +2820,8 @@
 
     DEBUG_ONLY(if (tmp_fp != f.fp()) log_develop_trace(jvmcont)("WHOA link has changed (thaw) f.fp: " INTPTR_FORMAT " link: " INTPTR_FORMAT, p2i(f.fp()), p2i(tmp_fp));) // TODO PD
 
-    return oopFn.count();
+    int cnt = oopFn.count();
+    return cnt;
   }
 
   int thaw_compiled_oops_stub(frame& f, ThawFnT t_fn, intptr_t* vsp, int starting_index) {
@@ -3409,6 +3607,24 @@
 ///// Allocation
 
 template <typename ConfigT>
+void ContMirror::make_keepalive(CompiledMethodKeepalive<ConfigT>* keepalive) {
+  Handle conth(_thread, _cont);
+  int oops = keepalive->nr_oops();
+  if (oops == 0) {
+    oops = 1;
+  }
+  Handle shadow = Handle(_thread, allocate_shadow_array<ConfigT>(oops));
+
+  uint64_t counter = SafepointSynchronize::safepoint_counter();
+  // check gc cycle
+  keepalive->set_handle(shadow);
+  // check gc cycle and maybe reload
+  //if (!SafepointSynchronize::is_same_safepoint(counter)) {
+    post_safepoint(conth);
+  //}
+}
+
+template <typename ConfigT>
 inline void ContMirror::allocate_stacks(int size, int oops, int frames) {
   bool needs_stack_allocation    = (_stack == NULL || to_index(size) > (_sp >= 0 ? _sp : _stack_length));
   bool needs_refStack_allocation = (_ref_stack == NULL || oops > _ref_sp);
@@ -3639,6 +3855,18 @@
 }
 
 template <typename ConfigT>
+objArrayOop ContMirror::allocate_shadow_array(size_t nr_oops) {
+  //assert(nr_oops > 0, "");
+  bool zero = true; // !BarrierSet::barrier_set()->is_a(BarrierSet::ModRef);
+  log_develop_trace(jvmcont)("allocate_shadow_array nr_oops: %lu zero: %d", nr_oops, zero);
+
+  ArrayKlass* klass = ArrayKlass::cast(Universe::objectArrayKlassObj());
+  size_t size_in_words = objArrayOopDesc::object_size((int)nr_oops);
+  return objArrayOop(raw_allocate(klass, size_in_words, nr_oops, zero));
+}
+
+
+template <typename ConfigT>
 void ContMirror::zero_ref_array(objArrayOop new_array, int new_length, int min_length) {
   assert (new_length == new_array->length(), "");
   int extra_oops = new_length - min_length;
@@ -3684,10 +3912,13 @@
   if (start != NULL) {
     return allocator.initialize(start);
   } else {
-    HandleMark hm(_thread);
+    //HandleMark hm(_thread);
     Handle conth(_thread, _cont);
+    uint64_t counter = SafepointSynchronize::safepoint_counter();
     oop result = allocator.allocate(/* use_tlab */ false);
-    post_safepoint(conth);
+    //if (!SafepointSynchronize::is_same_safepoint(counter)) {
+      post_safepoint(conth);
+    //}
     return result;
   }
 }
@@ -3696,7 +3927,7 @@
   guarantee (false, "unreachable");
   int old_stack_length = _stack_length;
 
-  HandleMark hm(_thread);
+  //HandleMark hm(_thread);
   Handle conth(_thread, _cont);
   JavaCallArguments args;
   args.push_oop(conth);
@@ -3898,6 +4129,30 @@
 }
 #endif
 
+void Continuation::nmethod_patched(nmethod* nm) {
+  log_info(jvmcont)("nmethod patched %p", nm);
+  jweak shadow = nm->get_shadow();
+  oop resolved = JNIHandles::resolve(shadow);
+
+#ifndef PRODUCT
+  CountOops count;
+  nm->oops_do(&count, false, true);
+  assert(nm->nr_oops() >= count.nr_oops(), "should be");
+#endif
+
+  if (resolved == NULL) {
+    return;
+  }
+
+  if (UseCompressedOops) {
+    PersistOops<narrowOop> persist(nm->nr_oops(), (objArrayOop) resolved);
+    nm->oops_do(&persist);
+  } else {
+    PersistOops<oop> persist(nm->nr_oops(), (objArrayOop) resolved);;
+    nm->oops_do(&persist);
+  }
+}
+
 static void print_oop(void *p, oop obj, outputStream* st) {
   if (!log_develop_is_enabled(Trace, jvmcont) && st != NULL) return;
 
--- a/src/hotspot/share/runtime/continuation.hpp	Tue Jul 02 09:43:31 2019 +0100
+++ b/src/hotspot/share/runtime/continuation.hpp	Thu Jul 18 11:31:49 2019 +0200
@@ -112,6 +112,8 @@
   static void describe(FrameValues &values);
 #endif
 
+  static void nmethod_patched(nmethod* nm);
+
 private:
   // declared here as it's used in friend declarations
   static address oop_address(objArrayOop ref_stack, int ref_sp, int index);