changeset 12234:80445c3c0f9f

8168909: aarch3264: arm32/64 port contribution cleanups Reviewed-by: enevill
author bobv
date Wed, 02 Nov 2016 10:25:21 -0400
parents 1b1a49b438a7
children 436a6eb5a7a3
files make/gensrc/GensrcAdlc.gmk make/lib/CompileJvm.gmk make/lib/JvmFeatures.gmk src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp src/cpu/arm/vm/assembler_arm_64.cpp src/cpu/arm/vm/c1_LIRAssembler_arm.cpp src/cpu/arm/vm/c1_LIRGenerator_arm.cpp src/cpu/arm/vm/frame_arm.cpp src/cpu/arm/vm/icache_arm.cpp src/cpu/arm/vm/jniFastGetField_arm.cpp src/cpu/arm/vm/macroAssembler_arm.hpp src/cpu/arm/vm/nativeInst_arm.hpp src/cpu/arm/vm/nativeInst_arm_32.cpp src/cpu/arm/vm/nativeInst_arm_32.hpp src/cpu/arm/vm/nativeInst_arm_64.cpp src/cpu/arm/vm/nativeInst_arm_64.hpp src/cpu/arm/vm/sharedRuntime_arm.cpp src/cpu/arm/vm/stubGenerator_arm.cpp src/cpu/arm/vm/vm_version_arm_32.cpp src/cpu/arm/vm/vm_version_arm_64.cpp src/cpu/arm/vm/vm_version_ext_arm.hpp src/share/vm/c1/c1_Runtime1.cpp src/share/vm/code/codeBlob.cpp src/share/vm/code/codeBlob.hpp src/share/vm/code/codeCacheExtensions.hpp src/share/vm/code/codeCacheExtensions_ext.hpp src/share/vm/code/stubs.cpp src/share/vm/code/stubs.hpp src/share/vm/interpreter/interpreterRuntime.cpp src/share/vm/interpreter/templateInterpreter.cpp src/share/vm/interpreter/templateInterpreterGenerator.cpp src/share/vm/memory/virtualspace.cpp src/share/vm/precompiled/precompiled.hpp src/share/vm/prims/methodHandles.cpp src/share/vm/runtime/arguments.cpp src/share/vm/runtime/init.cpp src/share/vm/runtime/sharedRuntime.cpp src/share/vm/runtime/stubCodeGenerator.cpp src/share/vm/runtime/stubRoutines.cpp src/share/vm/runtime/thread.cpp src/share/vm/runtime/vm_operations.cpp src/share/vm/runtime/vm_version.cpp
diffstat 42 files changed, 271 insertions(+), 562 deletions(-) [+]
line wrap: on
line diff
--- a/make/gensrc/GensrcAdlc.gmk	Fri Oct 28 11:10:38 2016 -0400
+++ b/make/gensrc/GensrcAdlc.gmk	Wed Nov 02 10:25:21 2016 -0400
@@ -114,6 +114,10 @@
     ADLCFLAGS += -U_LP64
   endif
 
+  ifeq ($(HOTSPOT_TARGET_CPU_ARCH), arm)
+    ADLCFLAGS += -DARM=1
+  endif
+
   ##############################################################################
   # Concatenate all ad source files into a single file, which will be fed to
   # adlc. Also include a #line directive at the start of every included file
--- a/make/lib/CompileJvm.gmk	Fri Oct 28 11:10:38 2016 -0400
+++ b/make/lib/CompileJvm.gmk	Wed Nov 02 10:25:21 2016 -0400
@@ -28,6 +28,7 @@
 include lib/JvmFeatures.gmk
 include lib/JvmOverrideFiles.gmk
 
+$(eval $(call IncludeCustomExtension, hotspot, lib/CompileJvm.gmk))
 
 ################################################################################
 # Setup compilation of the main Hotspot native library (libjvm).
@@ -139,7 +140,6 @@
 # Platform specific setup
 
 # ARM source selection
-# TODO - Need a better way of selecting open versus closed aarch64 sources
 
 ifeq ($(OPENJDK_TARGET_OS)-$(OPENJDK_TARGET_CPU), linux-arm)
   JVM_EXCLUDE_PATTERNS += arm_64
--- a/make/lib/JvmFeatures.gmk	Fri Oct 28 11:10:38 2016 -0400
+++ b/make/lib/JvmFeatures.gmk	Wed Nov 02 10:25:21 2016 -0400
@@ -252,4 +252,3 @@
     BUILD_LIBJVM_systemDictionary.cpp_CXXFLAGS := -fno-optimize-sibling-calls
   endif
 endif
-
--- a/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp	Wed Nov 02 10:25:21 2016 -0400
@@ -476,6 +476,7 @@
     }
 #endif
   }
+#endif
   // handle exceptions
   {
     Label L;
--- a/src/cpu/arm/vm/assembler_arm_64.cpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/cpu/arm/vm/assembler_arm_64.cpp	Wed Nov 02 10:25:21 2016 -0400
@@ -26,7 +26,6 @@
 #include "asm/assembler.hpp"
 #include "asm/assembler.inline.hpp"
 #include "ci/ciEnv.hpp"
-#include "code/codeCacheExtensions.hpp"
 #include "gc/shared/cardTableModRefBS.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "interpreter/interpreter.hpp"
--- a/src/cpu/arm/vm/c1_LIRAssembler_arm.cpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/cpu/arm/vm/c1_LIRAssembler_arm.cpp	Wed Nov 02 10:25:21 2016 -0400
@@ -2830,6 +2830,27 @@
     __ cbz(dst, *stub->entry());
   }
 
+  // If the compiler was not able to prove that exact type of the source or the destination
+  // of the arraycopy is an array type, check at runtime if the source or the destination is
+  // an instance type.
+  if (flags & LIR_OpArrayCopy::type_check) {
+    if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) {
+      __ load_klass(tmp, dst);
+      __ ldr_u32(tmp2, Address(tmp, in_bytes(Klass::layout_helper_offset())));
+      __ mov_slow(tmp, Klass::_lh_neutral_value);
+      __ cmp_32(tmp2, tmp);
+      __ b(*stub->entry(), ge);
+    }
+
+    if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) {
+      __ load_klass(tmp, src);
+      __ ldr_u32(tmp2, Address(tmp, in_bytes(Klass::layout_helper_offset())));
+      __ mov_slow(tmp, Klass::_lh_neutral_value);
+      __ cmp_32(tmp2, tmp);
+      __ b(*stub->entry(), ge);
+    }
+  }
+
   // Check if negative
   const int all_positive_checks = LIR_OpArrayCopy::src_pos_positive_check |
                                   LIR_OpArrayCopy::dst_pos_positive_check |
--- a/src/cpu/arm/vm/c1_LIRGenerator_arm.cpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/cpu/arm/vm/c1_LIRGenerator_arm.cpp	Wed Nov 02 10:25:21 2016 -0400
@@ -51,7 +51,9 @@
 void LIRItem::load_nonconstant() {
   LIR_Opr r = value()->operand();
   if (_gen->can_inline_as_constant(value())) {
-    assert(r->is_constant(), "should be");
+    if (!r->is_constant()) {
+      r = LIR_OprFact::value_type(value()->type());
+    }
     _result = r;
   } else {
     load_item();
--- a/src/cpu/arm/vm/frame_arm.cpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/cpu/arm/vm/frame_arm.cpp	Wed Nov 02 10:25:21 2016 -0400
@@ -122,6 +122,10 @@
       // fp does not have to be safe (although it could be check for c1?)
 
       sender_sp = _unextended_sp + _cb->frame_size();
+      // Is sender_sp safe?
+      if ((address)sender_sp >= thread->stack_base()) {
+        return false;
+      }
       // With our calling conventions, the return_address should
       // end up being the word on the stack
       sender_pc = (address) *(sender_sp - sender_sp_offset + return_addr_offset);
--- a/src/cpu/arm/vm/icache_arm.cpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/cpu/arm/vm/icache_arm.cpp	Wed Nov 02 10:25:21 2016 -0400
@@ -25,7 +25,6 @@
 #include "precompiled.hpp"
 #include "asm/macroAssembler.hpp"
 #include "assembler_arm.inline.hpp"
-#include "code/codeCacheExtensions.hpp"
 #include "runtime/icache.hpp"
 
 #define __ _masm->
--- a/src/cpu/arm/vm/jniFastGetField_arm.cpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/cpu/arm/vm/jniFastGetField_arm.cpp	Wed Nov 02 10:25:21 2016 -0400
@@ -25,7 +25,6 @@
 #include "precompiled.hpp"
 #include "asm/macroAssembler.hpp"
 #include "assembler_arm.inline.hpp"
-#include "code/codeCacheExtensions.hpp"
 #include "memory/resourceArea.hpp"
 #include "prims/jniFastGetField.hpp"
 #include "prims/jvm_misc.hpp"
--- a/src/cpu/arm/vm/macroAssembler_arm.hpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/cpu/arm/vm/macroAssembler_arm.hpp	Wed Nov 02 10:25:21 2016 -0400
@@ -25,7 +25,6 @@
 #ifndef CPU_ARM_VM_MACROASSEMBLER_ARM_HPP
 #define CPU_ARM_VM_MACROASSEMBLER_ARM_HPP
 
-#include "code/codeCacheExtensions.hpp"
 #include "code/relocInfo.hpp"
 #include "code/relocInfo_ext.hpp"
 
--- a/src/cpu/arm/vm/nativeInst_arm.hpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/cpu/arm/vm/nativeInst_arm.hpp	Wed Nov 02 10:25:21 2016 -0400
@@ -26,7 +26,6 @@
 #define CPU_ARM_VM_NATIVEINST_ARM_HPP
 
 #include "asm/macroAssembler.hpp"
-#include "code/codeCacheExtensions.hpp"
 #include "memory/allocation.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
--- a/src/cpu/arm/vm/nativeInst_arm_32.cpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/cpu/arm/vm/nativeInst_arm_32.cpp	Wed Nov 02 10:25:21 2016 -0400
@@ -25,7 +25,6 @@
 #include "precompiled.hpp"
 #include "assembler_arm.inline.hpp"
 #include "code/codeCache.hpp"
-#include "code/codeCacheExtensions.hpp"
 #include "memory/resourceArea.hpp"
 #include "nativeInst_arm.hpp"
 #include "oops/oop.inline.hpp"
--- a/src/cpu/arm/vm/nativeInst_arm_32.hpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/cpu/arm/vm/nativeInst_arm_32.hpp	Wed Nov 02 10:25:21 2016 -0400
@@ -27,7 +27,6 @@
 
 #include "asm/macroAssembler.hpp"
 #include "code/codeCache.hpp"
-#include "code/codeCacheExtensions.hpp"
 #include "memory/allocation.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
--- a/src/cpu/arm/vm/nativeInst_arm_64.cpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/cpu/arm/vm/nativeInst_arm_64.cpp	Wed Nov 02 10:25:21 2016 -0400
@@ -25,7 +25,6 @@
 #include "precompiled.hpp"
 #include "assembler_arm.inline.hpp"
 #include "code/codeCache.hpp"
-#include "code/codeCacheExtensions.hpp"
 #include "memory/resourceArea.hpp"
 #include "nativeInst_arm.hpp"
 #include "oops/klass.inline.hpp"
--- a/src/cpu/arm/vm/nativeInst_arm_64.hpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/cpu/arm/vm/nativeInst_arm_64.hpp	Wed Nov 02 10:25:21 2016 -0400
@@ -27,7 +27,6 @@
 
 #include "asm/macroAssembler.hpp"
 #include "code/codeCache.hpp"
-#include "code/codeCacheExtensions.hpp"
 #include "memory/allocation.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
--- a/src/cpu/arm/vm/sharedRuntime_arm.cpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/cpu/arm/vm/sharedRuntime_arm.cpp	Wed Nov 02 10:25:21 2016 -0400
@@ -26,7 +26,6 @@
 #include "asm/assembler.hpp"
 #include "assembler_arm.inline.hpp"
 #include "code/debugInfoRec.hpp"
-#include "code/codeCacheExtensions.hpp"
 #include "code/icBuffer.hpp"
 #include "code/vtableStubs.hpp"
 #include "interpreter/interpreter.hpp"
--- a/src/cpu/arm/vm/stubGenerator_arm.cpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/cpu/arm/vm/stubGenerator_arm.cpp	Wed Nov 02 10:25:21 2016 -0400
@@ -25,7 +25,6 @@
 #include "precompiled.hpp"
 #include "asm/assembler.hpp"
 #include "assembler_arm.inline.hpp"
-#include "code/codeCacheExtensions.hpp"
 #include "interpreter/interpreter.hpp"
 #include "nativeInst_arm.hpp"
 #include "oops/instanceOop.hpp"
--- a/src/cpu/arm/vm/vm_version_arm_32.cpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/cpu/arm/vm/vm_version_arm_32.cpp	Wed Nov 02 10:25:21 2016 -0400
@@ -24,7 +24,6 @@
 
 #include "precompiled.hpp"
 #include "asm/macroAssembler.inline.hpp"
-#include "code/codeCacheExtensions.hpp"
 #include "memory/resourceArea.hpp"
 #include "runtime/java.hpp"
 #include "runtime/os.inline.hpp"
--- a/src/cpu/arm/vm/vm_version_arm_64.cpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/cpu/arm/vm/vm_version_arm_64.cpp	Wed Nov 02 10:25:21 2016 -0400
@@ -24,7 +24,6 @@
 
 #include "precompiled.hpp"
 #include "asm/macroAssembler.inline.hpp"
-#include "code/codeCacheExtensions.hpp"
 #include "memory/resourceArea.hpp"
 #include "runtime/java.hpp"
 #include "runtime/os.inline.hpp"
--- a/src/cpu/arm/vm/vm_version_ext_arm.hpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/cpu/arm/vm/vm_version_ext_arm.hpp	Wed Nov 02 10:25:21 2016 -0400
@@ -22,8 +22,8 @@
  *
  */
 
-/* ARM sources Merged up to hotspot/src/closed  changeset 2356:bc66520ce0ca */
-/*                          hotspot/make/closed changeset 531:595af6fa421f */
+/* ARM sources Merged up to hotspot/src/closed  changeset 2374:a7e1339e4594 */
+/*                          hotspot/make/closed changeset 539:11ed5b132ece */
 
 #ifndef CPU_ARM_VM_VM_VERSION_EXT_ARM_HPP
 #define CPU_ARM_VM_VM_VERSION_EXT_ARM_HPP
--- a/src/share/vm/c1/c1_Runtime1.cpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/share/vm/c1/c1_Runtime1.cpp	Wed Nov 02 10:25:21 2016 -0400
@@ -33,7 +33,6 @@
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "code/codeBlob.hpp"
-#include "code/codeCacheExtensions.hpp"
 #include "code/compiledIC.hpp"
 #include "code/pcDesc.hpp"
 #include "code/scopeDesc.hpp"
@@ -189,52 +188,44 @@
   int frame_size;
   bool must_gc_arguments;
 
-  if (!CodeCacheExtensions::skip_compiler_support()) {
-    // bypass useless code generation
-    Compilation::setup_code_buffer(&code, 0);
+  Compilation::setup_code_buffer(&code, 0);
 
-    // create assembler for code generation
-    StubAssembler* sasm = new StubAssembler(&code, name_for(id), id);
-    // generate code for runtime stub
-    oop_maps = generate_code_for(id, sasm);
-    assert(oop_maps == NULL || sasm->frame_size() != no_frame_size,
-           "if stub has an oop map it must have a valid frame size");
+  // create assembler for code generation
+  StubAssembler* sasm = new StubAssembler(&code, name_for(id), id);
+  // generate code for runtime stub
+  oop_maps = generate_code_for(id, sasm);
+  assert(oop_maps == NULL || sasm->frame_size() != no_frame_size,
+         "if stub has an oop map it must have a valid frame size");
 
 #ifdef ASSERT
-    // Make sure that stubs that need oopmaps have them
-    switch (id) {
-      // These stubs don't need to have an oopmap
-    case dtrace_object_alloc_id:
-    case g1_pre_barrier_slow_id:
-    case g1_post_barrier_slow_id:
-    case slow_subtype_check_id:
-    case fpu2long_stub_id:
-    case unwind_exception_id:
-    case counter_overflow_id:
+  // Make sure that stubs that need oopmaps have them
+  switch (id) {
+    // These stubs don't need to have an oopmap
+  case dtrace_object_alloc_id:
+  case g1_pre_barrier_slow_id:
+  case g1_post_barrier_slow_id:
+  case slow_subtype_check_id:
+  case fpu2long_stub_id:
+  case unwind_exception_id:
+  case counter_overflow_id:
 #if defined(SPARC) || defined(PPC32)
-    case handle_exception_nofpu_id:  // Unused on sparc
+  case handle_exception_nofpu_id:  // Unused on sparc
 #endif
-      break;
+    break;
 
-      // All other stubs should have oopmaps
-    default:
-      assert(oop_maps != NULL, "must have an oopmap");
-    }
+    // All other stubs should have oopmaps
+  default:
+    assert(oop_maps != NULL, "must have an oopmap");
+  }
 #endif
 
-    // align so printing shows nop's instead of random code at the end (SimpleStubs are aligned)
-    sasm->align(BytesPerWord);
-    // make sure all code is in code buffer
-    sasm->flush();
+  // align so printing shows nop's instead of random code at the end (SimpleStubs are aligned)
+  sasm->align(BytesPerWord);
+  // make sure all code is in code buffer
+  sasm->flush();
 
-    frame_size = sasm->frame_size();
-    must_gc_arguments = sasm->must_gc_arguments();
-  } else {
-    /* ignored values */
-    oop_maps = NULL;
-    frame_size = 0;
-    must_gc_arguments = false;
-  }
+  frame_size = sasm->frame_size();
+  must_gc_arguments = sasm->must_gc_arguments();
   // create blob - distinguish a few special cases
   CodeBlob* blob = RuntimeStub::new_runtime_stub(name_for(id),
                                                  &code,
--- a/src/share/vm/code/codeBlob.cpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/share/vm/code/codeBlob.cpp	Wed Nov 02 10:25:21 2016 -0400
@@ -25,7 +25,6 @@
 #include "precompiled.hpp"
 #include "code/codeBlob.hpp"
 #include "code/codeCache.hpp"
-#include "code/codeCacheExtensions.hpp"
 #include "code/relocInfo.hpp"
 #include "compiler/disassembler.hpp"
 #include "interpreter/bytecode.hpp"
@@ -228,7 +227,6 @@
 
   BufferBlob* blob = NULL;
   unsigned int size = sizeof(BufferBlob);
-  CodeCacheExtensions::size_blob(name, &buffer_size);
   // align the size to CodeEntryAlignment
   size = CodeBlob::align_code_offset(size);
   size += round_to(buffer_size, oopSize);
@@ -312,7 +310,6 @@
 
   MethodHandlesAdapterBlob* blob = NULL;
   unsigned int size = sizeof(MethodHandlesAdapterBlob);
-  CodeCacheExtensions::size_blob("MethodHandles adapters", &buffer_size);
   // align the size to CodeEntryAlignment
   size = CodeBlob::align_code_offset(size);
   size += round_to(buffer_size, oopSize);
@@ -354,13 +351,11 @@
 {
   RuntimeStub* stub = NULL;
   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
-  if (!CodeCacheExtensions::skip_code_generation()) {
-    // bypass useless code generation
+  {
     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
     unsigned int size = CodeBlob::allocation_size(cb, sizeof(RuntimeStub));
     stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments);
   }
-  stub = (RuntimeStub*) CodeCacheExtensions::handle_generated_blob(stub, stub_name);
 
   trace_new_stub(stub, "RuntimeStub - ", stub_name);
 
--- a/src/share/vm/code/codeBlob.hpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/share/vm/code/codeBlob.hpp	Wed Nov 02 10:25:21 2016 -0400
@@ -39,8 +39,7 @@
     MethodProfiled      = 1,    // Execution level 2 and 3 (profiled) nmethods
     NonNMethod          = 2,    // Non-nmethods like Buffers, Adapters and Runtime Stubs
     All                 = 3,    // All types (No code cache segmentation)
-    Pregenerated        = 4,    // Special blobs, managed by CodeCacheExtensions
-    NumTypes            = 5     // Number of CodeBlobTypes
+    NumTypes            = 4     // Number of CodeBlobTypes
   };
 };
 
--- a/src/share/vm/code/codeCacheExtensions.hpp	Fri Oct 28 11:10:38 2016 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_CODE_CODE_CACHE_EXTENSIONS_HPP
-#define SHARE_VM_CODE_CODE_CACHE_EXTENSIONS_HPP
-
-#include "memory/allocation.hpp"
-
-class CodeCacheExtensionsSteps: AllStatic {
-public:
-  enum Step {
-    // Support for optional fine grain initialization hooks
-    // Note: these hooks must support refining the granularity
-    // (e.g. adding intermediate steps in the ordered enum
-    // if needed for future features)
-    Start,
-    VMVersion,
-    StubRoutines1,
-    Universe,
-    TemplateInterpreter,
-    Interpreter,
-    StubRoutines2,
-    InitGlobals,
-    CreateVM,
-    LastStep
-  };
-};
-
-#include "code/codeCacheExtensions_ext.hpp"
-
-#endif // SHARE_VM_CODE_CODE_CACHE_EXTENSIONS_HPP
--- a/src/share/vm/code/codeCacheExtensions_ext.hpp	Fri Oct 28 11:10:38 2016 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,130 +0,0 @@
-/*
- * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_CODE_CODE_CACHE_EXTENSIONS_EXT_HPP
-#define SHARE_VM_CODE_CODE_CACHE_EXTENSIONS_EXT_HPP
-
-#include "utilities/macros.hpp"
-#include "memory/allocation.hpp"
-#include "utilities/globalDefinitions.hpp"
-#include "interpreter/bytecodes.hpp"
-
-class AdapterHandlerEntry;
-class CodeBlob;
-class CodeBuffer;
-class InterpreterMacroAssembler;
-class Template;
-
-// All the methods defined here are placeholders for possible extensions.
-
-class CodeCacheExtensions: AllStatic {
-  friend class CodeCacheDumper;
-
-public:
-  // init both code saving and loading
-  // Must be called very early, before any code is generated.
-  static void initialize() {}
-
-  // Check whether the generated interpreter will be saved.
-  static bool saving_generated_interpreter() { return false; }
-
-  // Check whether a pregenerated interpreter is used.
-  static bool use_pregenerated_interpreter() { return false; }
-
-  // Placeholder for additional VM initialization code
-  static void complete_step(CodeCacheExtensionsSteps::Step phase) {}
-
-  // Return false for newly generated code, on systems where it is not
-  // executable.
-  static bool is_executable(void *pc) { return true; }
-
-  // Return whether dynamically generated code can be executable
-  static bool support_dynamic_code() { return true; }
-
-  // Skip new code generation when known to be useless.
-  static bool skip_code_generation() { return false; }
-
-  // Skip stubs used only for compiled code support.
-  static bool skip_compiler_support() { return false; }
-
-  // Ignore UseFastSignatureHandlers when returning false
-  static bool support_fast_signature_handlers() { return true; }
-
-  /////////////////////////
-  // Handle generated code:
-  // - allow newly generated code to be shared
-  // - allow pregenerated code to be used in place of the newly generated one
-  //   (modifying pc).
-  // - support remapping when doing both save and load
-  // 'remap' can be set to false if the addresses handled are not referenced
-  // from code generated later.
-
-  // Associate a name to a generated codelet and possibly modify the pc
-  // Note: use instead the specialized versions when they exist:
-  // - handle_generated_blob for CodeBlob
-  // - handle_generated_handler for SignatureHandlers
-  // See also the optimized calls below that handle several PCs at once.
-  static void handle_generated_pc(address &pc, const char *name) {}
-
-  // Adds a safe definition of the codelet, for codelets used right after
-  // generation (else we would need to immediately stop the JVM and convert
-  // the generated code to executable format before being able to go further).
-  static void handle_generated_pc(address &pc, const char *name, address default_entry) {}
-
-  // Special cases
-
-  // Special case for CodeBlobs, which may require blob specific actions.
-  static CodeBlob* handle_generated_blob(CodeBlob* blob, const char *name = NULL) { return blob; }
-
-  // Special case for Signature Handlers.
-  static void handle_generated_handler(address &handler_start, const char *name, address handler_end) {}
-
-  // Support for generating different variants of the interpreter
-  // that can be dynamically selected after reload.
-  //
-  // - init_interpreter_assembler allows to configure the assembler for
-  //   the current variant
-  //
-  // - needs_other_interpreter_variant returns true as long as other
-  //   variants are needed.
-  //
-  // - skip_template_interpreter_entries returns true if new entries
-  //   need not be generated for this masm setup and this bytecode
-  //
-  // - completed_template_interpreter_entries is called after new
-  //   entries have been generated and installed, for any non skipped
-  //   bytecode.
-  static void init_interpreter_assembler(InterpreterMacroAssembler* masm, CodeBuffer* code) {}
-  static bool needs_other_interpreter_variant() { return false; }
-  static bool skip_template_interpreter_entries(Bytecodes::Code code) { return false; }
-  static void completed_template_interpreter_entries(InterpreterMacroAssembler* masm, Bytecodes::Code code) {}
-
-  // Code size optimization. May optimize the requested size.
-  static void size_blob(const char* name, int *updatable_size) {}
-
-  // ergonomics
-  static void set_ergonomics_flags() {}
-};
-
-#endif // SHARE_VM_CODE_CODE_CACHE_EXTENSIONS_EXT_HPP
--- a/src/share/vm/code/stubs.cpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/share/vm/code/stubs.cpp	Wed Nov 02 10:25:21 2016 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -262,16 +262,3 @@
   }
 }
 
-// Fixup for pregenerated code
-void StubQueue::fix_buffer(address buffer, address queue_end, address buffer_end, int number_of_stubs) {
-  const int extra_bytes = CodeEntryAlignment;
-  _stub_buffer = buffer;
-  _queue_begin = 0;
-  _queue_end = queue_end - buffer;
-  _number_of_stubs = number_of_stubs;
-  int size = buffer_end - buffer;
-  // Note: _buffer_limit must differ from _queue_end in the iteration loops
-  // => add extra space at the end (preserving alignment for asserts) if needed
-  if (buffer_end == queue_end) size += extra_bytes;
-  _buffer_limit = _buffer_size = size;
-}
--- a/src/share/vm/code/stubs.hpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/share/vm/code/stubs.hpp	Wed Nov 02 10:25:21 2016 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -217,8 +217,6 @@
   void  verify();                                // verifies the stub queue
   void  print();                                 // prints information about the stub queue
 
-  // Fixup for pregenerated code
-  void fix_buffer(address buffer, address queue_end, address buffer_end, int number_of_stubs);
 };
 
 #endif // SHARE_VM_CODE_STUBS_HPP
--- a/src/share/vm/interpreter/interpreterRuntime.cpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/share/vm/interpreter/interpreterRuntime.cpp	Wed Nov 02 10:25:21 2016 -0400
@@ -27,7 +27,6 @@
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "code/codeCache.hpp"
-#include "code/codeCacheExtensions.hpp"
 #include "compiler/compileBroker.hpp"
 #include "compiler/disassembler.hpp"
 #include "gc/shared/collectedHeap.hpp"
@@ -1199,7 +1198,6 @@
     ICache::invalidate_range(handler, insts_size);
     _handler = handler + insts_size;
   }
-  CodeCacheExtensions::handle_generated_handler(handler, buffer->name(), _handler);
   return handler;
 }
 
@@ -1208,7 +1206,7 @@
     // use slow signature handler if we can't do better
     int handler_index = -1;
     // check if we can use customized (fast) signature handler
-    if (UseFastSignatureHandlers && CodeCacheExtensions::support_fast_signature_handlers() && method->size_of_parameters() <= Fingerprinter::max_size_of_parameters) {
+    if (UseFastSignatureHandlers && method->size_of_parameters() <= Fingerprinter::max_size_of_parameters) {
       // use customized signature handler
       MutexLocker mu(SignatureHandlerLibrary_lock);
       // make sure data structure is initialized
@@ -1225,15 +1223,6 @@
           round_to((intptr_t)_buffer, CodeEntryAlignment) - (address)_buffer;
         CodeBuffer buffer((address)(_buffer + align_offset),
                           SignatureHandlerLibrary::buffer_size - align_offset);
-        if (!CodeCacheExtensions::support_dynamic_code()) {
-          // we need a name for the signature (for lookups or saving)
-          const int SYMBOL_SIZE = 50;
-          char *symbolName = NEW_RESOURCE_ARRAY(char, SYMBOL_SIZE);
-          // support for named signatures
-          jio_snprintf(symbolName, SYMBOL_SIZE,
-                       "native_" UINT64_FORMAT, fingerprint);
-          buffer.set_name(symbolName);
-        }
         InterpreterRuntime::SignatureHandlerGenerator(method, &buffer).generate(fingerprint);
         // copy into code heap
         address handler = set_handler(&buffer);
--- a/src/share/vm/interpreter/templateInterpreter.cpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/share/vm/interpreter/templateInterpreter.cpp	Wed Nov 02 10:25:21 2016 -0400
@@ -23,7 +23,6 @@
  */
 
 #include "precompiled.hpp"
-#include "code/codeCacheExtensions.hpp"
 #include "interpreter/interpreter.hpp"
 #include "interpreter/interpreterRuntime.hpp"
 #include "interpreter/interp_masm.hpp"
@@ -52,29 +51,10 @@
     TraceTime timer("Interpreter generation", TRACETIME_LOG(Info, startuptime));
     int code_size = InterpreterCodeSize;
     NOT_PRODUCT(code_size *= 4;)  // debug uses extra interpreter code space
-#if INCLUDE_JVMTI
-    if (CodeCacheExtensions::saving_generated_interpreter()) {
-      // May requires several versions of the codelets.
-      // Final size will automatically be optimized.
-      code_size *= 2;
-    }
-#endif
     _code = new StubQueue(new InterpreterCodeletInterface, code_size, NULL,
                           "Interpreter");
     TemplateInterpreterGenerator g(_code);
   }
-  if (PrintInterpreter) {
-    if (CodeCacheExtensions::saving_generated_interpreter() &&
-        CodeCacheExtensions::use_pregenerated_interpreter()) {
-      ResourceMark rm;
-      tty->print("Printing the newly generated interpreter first");
-      print();
-      tty->print("Printing the pregenerated interpreter next");
-    }
-  }
-
-  // Install the pregenerated interpreter code before printing it
-  CodeCacheExtensions::complete_step(CodeCacheExtensionsSteps::TemplateInterpreter);
 
   if (PrintInterpreter) {
     ResourceMark rm;
--- a/src/share/vm/interpreter/templateInterpreterGenerator.cpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/share/vm/interpreter/templateInterpreterGenerator.cpp	Wed Nov 02 10:25:21 2016 -0400
@@ -23,7 +23,6 @@
  */
 
 #include "precompiled.hpp"
-#include "code/codeCacheExtensions.hpp"
 #include "interpreter/interpreter.hpp"
 #include "interpreter/interpreterRuntime.hpp"
 #include "interpreter/interp_masm.hpp"
@@ -55,226 +54,220 @@
 };
 
 void TemplateInterpreterGenerator::generate_all() {
-  // Loop, in case we need several variants of the interpreter entries
-  do {
-    if (!CodeCacheExtensions::skip_code_generation()) {
-      // bypass code generation when useless
-      { CodeletMark cm(_masm, "slow signature handler");
-        AbstractInterpreter::_slow_signature_handler = generate_slow_signature_handler();
-      }
+  { CodeletMark cm(_masm, "slow signature handler");
+    AbstractInterpreter::_slow_signature_handler = generate_slow_signature_handler();
+  }
 
-      { CodeletMark cm(_masm, "error exits");
-        _unimplemented_bytecode    = generate_error_exit("unimplemented bytecode");
-        _illegal_bytecode_sequence = generate_error_exit("illegal bytecode sequence - method not verified");
-      }
+  { CodeletMark cm(_masm, "error exits");
+    _unimplemented_bytecode    = generate_error_exit("unimplemented bytecode");
+    _illegal_bytecode_sequence = generate_error_exit("illegal bytecode sequence - method not verified");
+  }
 
 #ifndef PRODUCT
-      if (TraceBytecodes) {
-        CodeletMark cm(_masm, "bytecode tracing support");
-        Interpreter::_trace_code =
-          EntryPoint(
-                     generate_trace_code(btos),
-                     generate_trace_code(ztos),
-                     generate_trace_code(ctos),
-                     generate_trace_code(stos),
-                     generate_trace_code(atos),
-                     generate_trace_code(itos),
-                     generate_trace_code(ltos),
-                     generate_trace_code(ftos),
-                     generate_trace_code(dtos),
-                     generate_trace_code(vtos)
-                     );
-      }
+  if (TraceBytecodes) {
+    CodeletMark cm(_masm, "bytecode tracing support");
+    Interpreter::_trace_code =
+      EntryPoint(
+                 generate_trace_code(btos),
+                 generate_trace_code(ztos),
+                 generate_trace_code(ctos),
+                 generate_trace_code(stos),
+                 generate_trace_code(atos),
+                 generate_trace_code(itos),
+                 generate_trace_code(ltos),
+                 generate_trace_code(ftos),
+                 generate_trace_code(dtos),
+                 generate_trace_code(vtos)
+                 );
+  }
 #endif // !PRODUCT
 
-      { CodeletMark cm(_masm, "return entry points");
-        const int index_size = sizeof(u2);
-        for (int i = 0; i < Interpreter::number_of_return_entries; i++) {
-          Interpreter::_return_entry[i] =
-            EntryPoint(
-                       generate_return_entry_for(itos, i, index_size),
-                       generate_return_entry_for(itos, i, index_size),
-                       generate_return_entry_for(itos, i, index_size),
-                       generate_return_entry_for(itos, i, index_size),
-                       generate_return_entry_for(atos, i, index_size),
-                       generate_return_entry_for(itos, i, index_size),
-                       generate_return_entry_for(ltos, i, index_size),
-                       generate_return_entry_for(ftos, i, index_size),
-                       generate_return_entry_for(dtos, i, index_size),
-                       generate_return_entry_for(vtos, i, index_size)
-                       );
-        }
+  { CodeletMark cm(_masm, "return entry points");
+    const int index_size = sizeof(u2);
+    for (int i = 0; i < Interpreter::number_of_return_entries; i++) {
+      Interpreter::_return_entry[i] =
+        EntryPoint(
+                   generate_return_entry_for(itos, i, index_size),
+                   generate_return_entry_for(itos, i, index_size),
+                   generate_return_entry_for(itos, i, index_size),
+                   generate_return_entry_for(itos, i, index_size),
+                   generate_return_entry_for(atos, i, index_size),
+                   generate_return_entry_for(itos, i, index_size),
+                   generate_return_entry_for(ltos, i, index_size),
+                   generate_return_entry_for(ftos, i, index_size),
+                   generate_return_entry_for(dtos, i, index_size),
+                   generate_return_entry_for(vtos, i, index_size)
+                   );
+    }
+  }
+
+  { CodeletMark cm(_masm, "invoke return entry points");
+    // These states are in order specified in TosState, except btos/ztos/ctos/stos are
+    // really the same as itos since there is no top of stack optimization for these types
+    const TosState states[] = {itos, itos, itos, itos, itos, ltos, ftos, dtos, atos, vtos, ilgl};
+    const int invoke_length = Bytecodes::length_for(Bytecodes::_invokestatic);
+    const int invokeinterface_length = Bytecodes::length_for(Bytecodes::_invokeinterface);
+    const int invokedynamic_length = Bytecodes::length_for(Bytecodes::_invokedynamic);
+
+    for (int i = 0; i < Interpreter::number_of_return_addrs; i++) {
+      TosState state = states[i];
+      assert(state != ilgl, "states array is wrong above");
+      Interpreter::_invoke_return_entry[i] = generate_return_entry_for(state, invoke_length, sizeof(u2));
+      Interpreter::_invokeinterface_return_entry[i] = generate_return_entry_for(state, invokeinterface_length, sizeof(u2));
+      Interpreter::_invokedynamic_return_entry[i] = generate_return_entry_for(state, invokedynamic_length, sizeof(u4));
+    }
+  }
+
+  { CodeletMark cm(_masm, "earlyret entry points");
+    Interpreter::_earlyret_entry =
+      EntryPoint(
+                 generate_earlyret_entry_for(btos),
+                 generate_earlyret_entry_for(ztos),
+                 generate_earlyret_entry_for(ctos),
+                 generate_earlyret_entry_for(stos),
+                 generate_earlyret_entry_for(atos),
+                 generate_earlyret_entry_for(itos),
+                 generate_earlyret_entry_for(ltos),
+                 generate_earlyret_entry_for(ftos),
+                 generate_earlyret_entry_for(dtos),
+                 generate_earlyret_entry_for(vtos)
+                 );
+  }
+
+  { CodeletMark cm(_masm, "deoptimization entry points");
+    for (int i = 0; i < Interpreter::number_of_deopt_entries; i++) {
+      Interpreter::_deopt_entry[i] =
+        EntryPoint(
+                   generate_deopt_entry_for(itos, i),
+                   generate_deopt_entry_for(itos, i),
+                   generate_deopt_entry_for(itos, i),
+                   generate_deopt_entry_for(itos, i),
+                   generate_deopt_entry_for(atos, i),
+                   generate_deopt_entry_for(itos, i),
+                   generate_deopt_entry_for(ltos, i),
+                   generate_deopt_entry_for(ftos, i),
+                   generate_deopt_entry_for(dtos, i),
+                   generate_deopt_entry_for(vtos, i)
+                   );
+    }
+  }
+
+  { CodeletMark cm(_masm, "result handlers for native calls");
+    // The various result converter stublets.
+    int is_generated[Interpreter::number_of_result_handlers];
+    memset(is_generated, 0, sizeof(is_generated));
+
+    for (int i = 0; i < Interpreter::number_of_result_handlers; i++) {
+      BasicType type = types[i];
+      if (!is_generated[Interpreter::BasicType_as_index(type)]++) {
+        Interpreter::_native_abi_to_tosca[Interpreter::BasicType_as_index(type)] = generate_result_handler_for(type);
       }
+    }
+  }
 
-      { CodeletMark cm(_masm, "invoke return entry points");
-        // These states are in order specified in TosState, except btos/ztos/ctos/stos are
-        // really the same as itos since there is no top of stack optimization for these types
-        const TosState states[] = {itos, itos, itos, itos, itos, ltos, ftos, dtos, atos, vtos, ilgl};
-        const int invoke_length = Bytecodes::length_for(Bytecodes::_invokestatic);
-        const int invokeinterface_length = Bytecodes::length_for(Bytecodes::_invokeinterface);
-        const int invokedynamic_length = Bytecodes::length_for(Bytecodes::_invokedynamic);
+  { CodeletMark cm(_masm, "continuation entry points");
+    Interpreter::_continuation_entry =
+      EntryPoint(
+                 generate_continuation_for(btos),
+                 generate_continuation_for(ztos),
+                 generate_continuation_for(ctos),
+                 generate_continuation_for(stos),
+                 generate_continuation_for(atos),
+                 generate_continuation_for(itos),
+                 generate_continuation_for(ltos),
+                 generate_continuation_for(ftos),
+                 generate_continuation_for(dtos),
+                 generate_continuation_for(vtos)
+                 );
+  }
 
-        for (int i = 0; i < Interpreter::number_of_return_addrs; i++) {
-          TosState state = states[i];
-          assert(state != ilgl, "states array is wrong above");
-          Interpreter::_invoke_return_entry[i] = generate_return_entry_for(state, invoke_length, sizeof(u2));
-          Interpreter::_invokeinterface_return_entry[i] = generate_return_entry_for(state, invokeinterface_length, sizeof(u2));
-          Interpreter::_invokedynamic_return_entry[i] = generate_return_entry_for(state, invokedynamic_length, sizeof(u4));
-        }
-      }
+  { CodeletMark cm(_masm, "safepoint entry points");
+    Interpreter::_safept_entry =
+      EntryPoint(
+                 generate_safept_entry_for(btos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
+                 generate_safept_entry_for(ztos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
+                 generate_safept_entry_for(ctos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
+                 generate_safept_entry_for(stos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
+                 generate_safept_entry_for(atos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
+                 generate_safept_entry_for(itos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
+                 generate_safept_entry_for(ltos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
+                 generate_safept_entry_for(ftos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
+                 generate_safept_entry_for(dtos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
+                 generate_safept_entry_for(vtos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint))
+                 );
+  }
 
-      { CodeletMark cm(_masm, "earlyret entry points");
-        Interpreter::_earlyret_entry =
-          EntryPoint(
-                     generate_earlyret_entry_for(btos),
-                     generate_earlyret_entry_for(ztos),
-                     generate_earlyret_entry_for(ctos),
-                     generate_earlyret_entry_for(stos),
-                     generate_earlyret_entry_for(atos),
-                     generate_earlyret_entry_for(itos),
-                     generate_earlyret_entry_for(ltos),
-                     generate_earlyret_entry_for(ftos),
-                     generate_earlyret_entry_for(dtos),
-                     generate_earlyret_entry_for(vtos)
-                     );
-      }
+  { CodeletMark cm(_masm, "exception handling");
+    // (Note: this is not safepoint safe because thread may return to compiled code)
+    generate_throw_exception();
+  }
 
-      { CodeletMark cm(_masm, "deoptimization entry points");
-        for (int i = 0; i < Interpreter::number_of_deopt_entries; i++) {
-          Interpreter::_deopt_entry[i] =
-            EntryPoint(
-                       generate_deopt_entry_for(itos, i),
-                       generate_deopt_entry_for(itos, i),
-                       generate_deopt_entry_for(itos, i),
-                       generate_deopt_entry_for(itos, i),
-                       generate_deopt_entry_for(atos, i),
-                       generate_deopt_entry_for(itos, i),
-                       generate_deopt_entry_for(ltos, i),
-                       generate_deopt_entry_for(ftos, i),
-                       generate_deopt_entry_for(dtos, i),
-                       generate_deopt_entry_for(vtos, i)
-                       );
-        }
-      }
-
-      { CodeletMark cm(_masm, "result handlers for native calls");
-        // The various result converter stublets.
-        int is_generated[Interpreter::number_of_result_handlers];
-        memset(is_generated, 0, sizeof(is_generated));
-
-        for (int i = 0; i < Interpreter::number_of_result_handlers; i++) {
-          BasicType type = types[i];
-          if (!is_generated[Interpreter::BasicType_as_index(type)]++) {
-            Interpreter::_native_abi_to_tosca[Interpreter::BasicType_as_index(type)] = generate_result_handler_for(type);
-          }
-        }
-      }
-
-      { CodeletMark cm(_masm, "continuation entry points");
-        Interpreter::_continuation_entry =
-          EntryPoint(
-                     generate_continuation_for(btos),
-                     generate_continuation_for(ztos),
-                     generate_continuation_for(ctos),
-                     generate_continuation_for(stos),
-                     generate_continuation_for(atos),
-                     generate_continuation_for(itos),
-                     generate_continuation_for(ltos),
-                     generate_continuation_for(ftos),
-                     generate_continuation_for(dtos),
-                     generate_continuation_for(vtos)
-                     );
-      }
-
-      { CodeletMark cm(_masm, "safepoint entry points");
-        Interpreter::_safept_entry =
-          EntryPoint(
-                     generate_safept_entry_for(btos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
-                     generate_safept_entry_for(ztos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
-                     generate_safept_entry_for(ctos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
-                     generate_safept_entry_for(stos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
-                     generate_safept_entry_for(atos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
-                     generate_safept_entry_for(itos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
-                     generate_safept_entry_for(ltos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
-                     generate_safept_entry_for(ftos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
-                     generate_safept_entry_for(dtos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
-                     generate_safept_entry_for(vtos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint))
-                     );
-      }
-
-      { CodeletMark cm(_masm, "exception handling");
-        // (Note: this is not safepoint safe because thread may return to compiled code)
-        generate_throw_exception();
-      }
-
-      { CodeletMark cm(_masm, "throw exception entrypoints");
-        Interpreter::_throw_ArrayIndexOutOfBoundsException_entry = generate_ArrayIndexOutOfBounds_handler("java/lang/ArrayIndexOutOfBoundsException");
-        Interpreter::_throw_ArrayStoreException_entry            = generate_klass_exception_handler("java/lang/ArrayStoreException"                 );
-        Interpreter::_throw_ArithmeticException_entry            = generate_exception_handler("java/lang/ArithmeticException"           , "/ by zero");
-        Interpreter::_throw_ClassCastException_entry             = generate_ClassCastException_handler();
-        Interpreter::_throw_NullPointerException_entry           = generate_exception_handler("java/lang/NullPointerException"          , NULL       );
-        Interpreter::_throw_StackOverflowError_entry             = generate_StackOverflowError_handler();
-      }
+  { CodeletMark cm(_masm, "throw exception entrypoints");
+    Interpreter::_throw_ArrayIndexOutOfBoundsException_entry = generate_ArrayIndexOutOfBounds_handler("java/lang/ArrayIndexOutOfBoundsException");
+    Interpreter::_throw_ArrayStoreException_entry            = generate_klass_exception_handler("java/lang/ArrayStoreException"                 );
+    Interpreter::_throw_ArithmeticException_entry            = generate_exception_handler("java/lang/ArithmeticException"           , "/ by zero");
+    Interpreter::_throw_ClassCastException_entry             = generate_ClassCastException_handler();
+    Interpreter::_throw_NullPointerException_entry           = generate_exception_handler("java/lang/NullPointerException"          , NULL       );
+    Interpreter::_throw_StackOverflowError_entry             = generate_StackOverflowError_handler();
+  }
 
 
 
 #define method_entry(kind)                                              \
-      { CodeletMark cm(_masm, "method entry point (kind = " #kind ")"); \
-        Interpreter::_entry_table[Interpreter::kind] = generate_method_entry(Interpreter::kind); \
-        Interpreter::update_cds_entry_table(Interpreter::kind); \
-      }
+  { CodeletMark cm(_masm, "method entry point (kind = " #kind ")"); \
+    Interpreter::_entry_table[Interpreter::kind] = generate_method_entry(Interpreter::kind); \
+    Interpreter::update_cds_entry_table(Interpreter::kind); \
+  }
 
-      // all non-native method kinds
-      method_entry(zerolocals)
-      method_entry(zerolocals_synchronized)
-      method_entry(empty)
-      method_entry(accessor)
-      method_entry(abstract)
-      method_entry(java_lang_math_sin  )
-      method_entry(java_lang_math_cos  )
-      method_entry(java_lang_math_tan  )
-      method_entry(java_lang_math_abs  )
-      method_entry(java_lang_math_sqrt )
-      method_entry(java_lang_math_log  )
-      method_entry(java_lang_math_log10)
-      method_entry(java_lang_math_exp  )
-      method_entry(java_lang_math_pow  )
-      if (UseFMA) {
-        method_entry(java_lang_math_fmaF)
-        method_entry(java_lang_math_fmaD)
-      }
-      method_entry(java_lang_ref_reference_get)
+  // all non-native method kinds
+  method_entry(zerolocals)
+  method_entry(zerolocals_synchronized)
+  method_entry(empty)
+  method_entry(accessor)
+  method_entry(abstract)
+  method_entry(java_lang_math_sin  )
+  method_entry(java_lang_math_cos  )
+  method_entry(java_lang_math_tan  )
+  method_entry(java_lang_math_abs  )
+  method_entry(java_lang_math_sqrt )
+  method_entry(java_lang_math_log  )
+  method_entry(java_lang_math_log10)
+  method_entry(java_lang_math_exp  )
+  method_entry(java_lang_math_pow  )
+  if (UseFMA) {
+    method_entry(java_lang_math_fmaF)
+    method_entry(java_lang_math_fmaD)
+  }
+  method_entry(java_lang_ref_reference_get)
 
-      AbstractInterpreter::initialize_method_handle_entries();
+  AbstractInterpreter::initialize_method_handle_entries();
 
-      // all native method kinds (must be one contiguous block)
-      Interpreter::_native_entry_begin = Interpreter::code()->code_end();
-      method_entry(native)
-      method_entry(native_synchronized)
-      Interpreter::_native_entry_end = Interpreter::code()->code_end();
+  // all native method kinds (must be one contiguous block)
+  Interpreter::_native_entry_begin = Interpreter::code()->code_end();
+  method_entry(native)
+  method_entry(native_synchronized)
+  Interpreter::_native_entry_end = Interpreter::code()->code_end();
 
-      if (UseCRC32Intrinsics) {
-        method_entry(java_util_zip_CRC32_update)
-        method_entry(java_util_zip_CRC32_updateBytes)
-        method_entry(java_util_zip_CRC32_updateByteBuffer)
-      }
+  if (UseCRC32Intrinsics) {
+    method_entry(java_util_zip_CRC32_update)
+    method_entry(java_util_zip_CRC32_updateBytes)
+    method_entry(java_util_zip_CRC32_updateByteBuffer)
+  }
 
-      if (UseCRC32CIntrinsics) {
-        method_entry(java_util_zip_CRC32C_updateBytes)
-        method_entry(java_util_zip_CRC32C_updateDirectByteBuffer)
-      }
+  if (UseCRC32CIntrinsics) {
+    method_entry(java_util_zip_CRC32C_updateBytes)
+    method_entry(java_util_zip_CRC32C_updateDirectByteBuffer)
+  }
 
-      method_entry(java_lang_Float_intBitsToFloat);
-      method_entry(java_lang_Float_floatToRawIntBits);
-      method_entry(java_lang_Double_longBitsToDouble);
-      method_entry(java_lang_Double_doubleToRawLongBits);
+  method_entry(java_lang_Float_intBitsToFloat);
+  method_entry(java_lang_Float_floatToRawIntBits);
+  method_entry(java_lang_Double_longBitsToDouble);
+  method_entry(java_lang_Double_doubleToRawLongBits);
 
 #undef method_entry
 
-      // Bytecodes
-      set_entry_points_for_all_bytes();
-    }
-  } while (CodeCacheExtensions::needs_other_interpreter_variant());
+  // Bytecodes
+  set_entry_points_for_all_bytes();
 
   // installation of code in other places in the runtime
   // (ExcutableCodeManager calls not needed to copy the entries)
@@ -321,9 +314,6 @@
 
 
 void TemplateInterpreterGenerator::set_entry_points(Bytecodes::Code code) {
-  if (CodeCacheExtensions::skip_template_interpreter_entries(code)) {
-    return;
-  }
   CodeletMark cm(_masm, Bytecodes::name(code), code);
   // initialize entry points
   assert(_unimplemented_bytecode    != NULL, "should have been generated before");
@@ -354,7 +344,6 @@
   EntryPoint entry(bep, zep, cep, sep, aep, iep, lep, fep, dep, vep);
   Interpreter::_normal_table.set_entry(code, entry);
   Interpreter::_wentry_point[code] = wep;
-  CodeCacheExtensions::completed_template_interpreter_entries(_masm, code);
 }
 
 
--- a/src/share/vm/memory/virtualspace.cpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/share/vm/memory/virtualspace.cpp	Wed Nov 02 10:25:21 2016 -0400
@@ -23,7 +23,6 @@
  */
 
 #include "precompiled.hpp"
-#include "code/codeCacheExtensions.hpp"
 #include "logging/log.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/virtualspace.hpp"
@@ -592,7 +591,7 @@
 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
                                      size_t rs_align,
                                      bool large) :
-  ReservedSpace(r_size, rs_align, large, /*executable*/ CodeCacheExtensions::support_dynamic_code()) {
+  ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 }
 
--- a/src/share/vm/precompiled/precompiled.hpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/share/vm/precompiled/precompiled.hpp	Wed Nov 02 10:25:21 2016 -0400
@@ -66,7 +66,6 @@
 # include "classfile/vmSymbols.hpp"
 # include "code/codeBlob.hpp"
 # include "code/codeCache.hpp"
-# include "code/codeCacheExtensions.hpp"
 # include "code/compressedStream.hpp"
 # include "code/debugInfo.hpp"
 # include "code/debugInfoRec.hpp"
--- a/src/share/vm/prims/methodHandles.cpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/share/vm/prims/methodHandles.cpp	Wed Nov 02 10:25:21 2016 -0400
@@ -26,7 +26,6 @@
 #include "classfile/javaClasses.inline.hpp"
 #include "classfile/stringTable.hpp"
 #include "code/codeCache.hpp"
-#include "code/codeCacheExtensions.hpp"
 #include "code/dependencyContext.hpp"
 #include "compiler/compileBroker.hpp"
 #include "interpreter/interpreter.hpp"
@@ -94,7 +93,6 @@
     StubCodeMark mark(this, "MethodHandle::interpreter_entry", vmIntrinsics::name_at(iid));
     address entry = MethodHandles::generate_method_handle_interpreter_entry(_masm, iid);
     if (entry != NULL) {
-      CodeCacheExtensions::handle_generated_pc(entry, vmIntrinsics::name_at(iid));
       Interpreter::set_entry_for_kind(mk, entry);
     }
     // If the entry is not set, it will throw AbstractMethodError.
--- a/src/share/vm/runtime/arguments.cpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/share/vm/runtime/arguments.cpp	Wed Nov 02 10:25:21 2016 -0400
@@ -27,7 +27,6 @@
 #include "classfile/javaAssertions.hpp"
 #include "classfile/stringTable.hpp"
 #include "classfile/symbolTable.hpp"
-#include "code/codeCacheExtensions.hpp"
 #include "gc/shared/cardTableRS.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/referenceProcessor.hpp"
@@ -1868,7 +1867,6 @@
 #endif // _LP64
 #endif // !ZERO
 
-  CodeCacheExtensions::set_ergonomics_flags();
 }
 
 void Arguments::set_parallel_gc_flags() {
--- a/src/share/vm/runtime/init.cpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/share/vm/runtime/init.cpp	Wed Nov 02 10:25:21 2016 -0400
@@ -25,7 +25,6 @@
 #include "precompiled.hpp"
 #include "classfile/stringTable.hpp"
 #include "classfile/symbolTable.hpp"
-#include "code/codeCacheExtensions.hpp"
 #include "code/icBuffer.hpp"
 #include "gc/shared/collectedHeap.hpp"
 #include "interpreter/bytecodes.hpp"
@@ -105,20 +104,15 @@
   classLoader_init1();
   compilationPolicy_init();
   codeCache_init();
-  CodeCacheExtensions::initialize();
   VM_Version_init();
-  CodeCacheExtensions::complete_step(CodeCacheExtensionsSteps::VMVersion);
   os_init_globals();
   stubRoutines_init1();
-  CodeCacheExtensions::complete_step(CodeCacheExtensionsSteps::StubRoutines1);
   jint status = universe_init();  // dependent on codeCache_init and
                                   // stubRoutines_init1 and metaspace_init.
   if (status != JNI_OK)
     return status;
 
-  CodeCacheExtensions::complete_step(CodeCacheExtensionsSteps::Universe);
   interpreter_init();  // before any methods loaded
-  CodeCacheExtensions::complete_step(CodeCacheExtensionsSteps::Interpreter);
   invocationCounter_init();  // before any methods loaded
   marksweep_init();
   accessFlags_init();
@@ -148,7 +142,6 @@
   javaClasses_init();   // must happen after vtable initialization
   stubRoutines_init2(); // note: StubRoutines need 2-phase init
   MethodHandles::generate_adapters();
-  CodeCacheExtensions::complete_step(CodeCacheExtensionsSteps::StubRoutines2);
 
 #if INCLUDE_NMT
   // Solaris stack is walkable only after stubRoutines are set up.
@@ -162,7 +155,6 @@
     CommandLineFlags::printFlags(tty, false, PrintFlagsRanges);
   }
 
-  CodeCacheExtensions::complete_step(CodeCacheExtensionsSteps::InitGlobals);
   return JNI_OK;
 }
 
--- a/src/share/vm/runtime/sharedRuntime.cpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/share/vm/runtime/sharedRuntime.cpp	Wed Nov 02 10:25:21 2016 -0400
@@ -28,7 +28,6 @@
 #include "classfile/vmSymbols.hpp"
 #include "code/codeCache.hpp"
 #include "code/compiledIC.hpp"
-#include "code/codeCacheExtensions.hpp"
 #include "code/scopeDesc.hpp"
 #include "code/vtableStubs.hpp"
 #include "compiler/abstractCompiler.hpp"
@@ -2505,27 +2504,15 @@
   if (_adapters != NULL) return;
   _adapters = new AdapterHandlerTable();
 
-  if (!CodeCacheExtensions::skip_compiler_support()) {
-    // Create a special handler for abstract methods.  Abstract methods
-    // are never compiled so an i2c entry is somewhat meaningless, but
-    // throw AbstractMethodError just in case.
-    // Pass wrong_method_abstract for the c2i transitions to return
-    // AbstractMethodError for invalid invocations.
-    address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub();
-    _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(0, NULL),
-                                                                StubRoutines::throw_AbstractMethodError_entry(),
-                                                                wrong_method_abstract, wrong_method_abstract);
-  } else {
-    // Adapters are not supposed to be used.
-    // Generate a special one to cause an error if used (and store this
-    // singleton in place of the useless _abstract_method_error adapter).
-    address entry = (address) &unexpected_adapter_call;
-    _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(0, NULL),
-                                                                entry,
-                                                                entry,
-                                                                entry);
-
-  }
+  // Create a special handler for abstract methods.  Abstract methods
+  // are never compiled so an i2c entry is somewhat meaningless, but
+  // throw AbstractMethodError just in case.
+  // Pass wrong_method_abstract for the c2i transitions to return
+  // AbstractMethodError for invalid invocations.
+  address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub();
+  _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(0, NULL),
+                                                              StubRoutines::throw_AbstractMethodError_entry(),
+                                                              wrong_method_abstract, wrong_method_abstract);
 }
 
 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint,
@@ -2574,17 +2561,6 @@
     // make sure data structure is initialized
     initialize();
 
-    // during dump time, always generate adapters, even if the
-    // compiler has been turned off.
-    if (!DumpSharedSpaces && CodeCacheExtensions::skip_compiler_support()) {
-      // adapters are useless and should not be used, including the
-      // abstract_method_handler. However, some callers check that
-      // an adapter was installed.
-      // Return the singleton adapter, stored into _abstract_method_handler
-      // and modified to cause an error if we ever call it.
-      return _abstract_method_handler;
-    }
-
     if (method->is_abstract()) {
       return _abstract_method_handler;
     }
--- a/src/share/vm/runtime/stubCodeGenerator.cpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/share/vm/runtime/stubCodeGenerator.cpp	Wed Nov 02 10:25:21 2016 -0400
@@ -26,7 +26,6 @@
 #include "asm/macroAssembler.hpp"
 #include "asm/macroAssembler.inline.hpp"
 #include "code/codeCache.hpp"
-#include "code/codeCacheExtensions.hpp"
 #include "compiler/disassembler.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/forte.hpp"
--- a/src/share/vm/runtime/stubRoutines.cpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/share/vm/runtime/stubRoutines.cpp	Wed Nov 02 10:25:21 2016 -0400
@@ -24,7 +24,6 @@
 
 #include "precompiled.hpp"
 #include "asm/codeBuffer.hpp"
-#include "code/codeCacheExtensions.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/interfaceSupport.hpp"
@@ -204,12 +203,6 @@
 
 // simple tests of generated arraycopy functions
 static void test_arraycopy_func(address func, int alignment) {
-  if (CodeCacheExtensions::use_pregenerated_interpreter() || !CodeCacheExtensions::is_executable(func)) {
-    // Exit safely if stubs were generated but cannot be used.
-    // Also excluding pregenerated interpreter since the code may depend on
-    // some registers being properly initialized (for instance Rthread)
-    return;
-  }
   int v = 0xcc;
   int v2 = 0x11;
   jlong lbuffer[8];
--- a/src/share/vm/runtime/thread.cpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/share/vm/runtime/thread.cpp	Wed Nov 02 10:25:21 2016 -0400
@@ -29,7 +29,6 @@
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "code/codeCache.hpp"
-#include "code/codeCacheExtensions.hpp"
 #include "code/scopeDesc.hpp"
 #include "compiler/compileBroker.hpp"
 #include "compiler/compileTask.hpp"
@@ -3842,8 +3841,6 @@
     }
   }
 
-  CodeCacheExtensions::complete_step(CodeCacheExtensionsSteps::CreateVM);
-
   create_vm_timer.end();
 #ifdef ASSERT
   _vm_complete = true;
--- a/src/share/vm/runtime/vm_operations.cpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/share/vm/runtime/vm_operations.cpp	Wed Nov 02 10:25:21 2016 -0400
@@ -26,7 +26,6 @@
 #include "classfile/symbolTable.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "code/codeCache.hpp"
-#include "code/codeCacheExtensions.hpp"
 #include "compiler/compileBroker.hpp"
 #include "gc/shared/isGCActiveMark.hpp"
 #include "logging/log.hpp"
@@ -390,7 +389,6 @@
 Thread * VM_Exit::_shutdown_thread = NULL;
 
 int VM_Exit::set_vm_exited() {
-  CodeCacheExtensions::complete_step(CodeCacheExtensionsSteps::LastStep);
 
   Thread * thr_cur = Thread::current();
 
--- a/src/share/vm/runtime/vm_version.cpp	Fri Oct 28 11:10:38 2016 -0400
+++ b/src/share/vm/runtime/vm_version.cpp	Wed Nov 02 10:25:21 2016 -0400
@@ -23,7 +23,6 @@
  */
 
 #include "precompiled.hpp"
-#include "code/codeCacheExtensions.hpp"
 #include "logging/log.hpp"
 #include "memory/universe.hpp"
 #include "oops/oop.inline.hpp"
@@ -127,9 +126,6 @@
 
 
 const char* Abstract_VM_Version::vm_info_string() {
-  if (CodeCacheExtensions::use_pregenerated_interpreter()) {
-    return "interpreted mode, pregenerated";
-  }
   switch (Arguments::mode()) {
     case Arguments::_int:
       return UseSharedSpaces ? "interpreted mode, sharing" : "interpreted mode";