changeset 48914:9095559d55e1 lvti

Automatic merge with default
author mcimadamore
date Thu, 18 Jan 2018 22:05:24 +0100
parents 554ae8fe9a3f 1dab70e20292
children 1b4d864fa681
files src/java.compiler/share/classes/javax/lang/model/overview.html src/java.compiler/share/classes/javax/tools/FileManagerUtils.java src/java.compiler/share/classes/javax/tools/overview.html src/java.xml/share/classes/com/sun/org/apache/xalan/internal/utils/FactoryImpl.java src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Resolve.java src/jdk.compiler/share/classes/com/sun/tools/javac/parser/JavacParser.java src/jdk.compiler/share/classes/com/sun/tools/javac/resources/compiler.properties src/jdk.jdeps/share/classes/com/sun/tools/javap/overview.html src/jdk.unsupported/share/classes/sun/reflect/Reflection.java test/jdk/jdk/internal/reflect/Reflection/GetCallerClassWithDepth.java test/jdk/sun/reflect/Reflection/GetCallerClassWithDepth.java test/langtools/tools/javac/T8192885/AddGotoAfterForLoopToLNTTest.java test/langtools/tools/javac/lvti/ParserTest.java test/langtools/tools/javac/lvti/ParserTest.out
diffstat 880 files changed, 11819 insertions(+), 5426 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Thu Jan 11 22:05:09 2018 +0100
+++ b/.hgtags	Thu Jan 18 22:05:24 2018 +0100
@@ -464,3 +464,5 @@
 959f2f7cbaa6d2ee45d50029744efb219721576c jdk-10+36
 4f830b447edf04fb4a52151a5ad44d9bb60723cd jdk-10+37
 e569e83139fdfbecfeb3cd9014d560917787f158 jdk-10+38
+5b834ec962366e00d4445352a999a3ac14e26f64 jdk-10+39
+860326263d1f6a83996d7da0f4c66806ae4aa1eb jdk-10+40
--- a/make/autoconf/generated-configure.sh	Thu Jan 11 22:05:09 2018 +0100
+++ b/make/autoconf/generated-configure.sh	Thu Jan 18 22:05:24 2018 +0100
@@ -902,6 +902,9 @@
 VERSION_UPDATE
 VERSION_INTERIM
 VERSION_FEATURE
+VENDOR_URL_VM_BUG
+VENDOR_URL_BUG
+VENDOR_URL
 COMPANY_NAME
 MACOSX_BUNDLE_ID_BASE
 MACOSX_BUNDLE_NAME_BASE
@@ -1150,6 +1153,9 @@
 with_version_minor
 with_version_security
 with_vendor_name
+with_vendor_url
+with_vendor_bug_url
+with_vendor_vm_bug_url
 with_version_string
 with_version_pre
 with_version_opt
@@ -2074,7 +2080,16 @@
                           compatibility and is ignored
   --with-version-security Deprecated. Option is kept for backwards
                           compatibility and is ignored
-  --with-vendor-name      Set vendor name [not specified]
+  --with-vendor-name      Set vendor name. Among others, used to set the
+                          'java.vendor' and 'java.vm.vendor' system
+                          properties. [not specified]
+  --with-vendor-url       Set the 'java.vendor.url' system property [not
+                          specified]
+  --with-vendor-bug-url   Set the 'java.vendor.url.bug' system property [not
+                          specified]
+  --with-vendor-vm-bug-url
+                          Sets the bug URL which will be displayed when the VM
+                          crashes [not specified]
   --with-version-string   Set version string [calculated]
   --with-version-pre      Set the base part of the version 'PRE' field
                           (pre-release identifier) ['internal']
@@ -5176,7 +5191,7 @@
 #CUSTOM_AUTOCONF_INCLUDE
 
 # Do not change or remove the following line, it is needed for consistency checks:
-DATE_WHEN_GENERATED=1513805283
+DATE_WHEN_GENERATED=1516225089
 
 ###############################################################################
 #
@@ -25072,12 +25087,62 @@
   if test "x$with_vendor_name" = xyes; then
     as_fn_error $? "--with-vendor-name must have a value" "$LINENO" 5
   elif  ! [[ $with_vendor_name =~ ^[[:print:]]*$ ]] ; then
-    as_fn_error $? "--with--vendor-name contains non-printing characters: $with_vendor_name" "$LINENO" 5
-  else
+    as_fn_error $? "--with-vendor-name contains non-printing characters: $with_vendor_name" "$LINENO" 5
+  elif test "x$with_vendor_name" != x; then
+    # Only set COMPANY_NAME if '--with-vendor-name' was used and is not empty.
+    # Otherwise we will use the value from "version-numbers" included above.
     COMPANY_NAME="$with_vendor_name"
   fi
 
 
+  # The vendor URL, if any
+
+# Check whether --with-vendor-url was given.
+if test "${with_vendor_url+set}" = set; then :
+  withval=$with_vendor_url;
+fi
+
+  if test "x$with_vendor_url" = xyes; then
+    as_fn_error $? "--with-vendor-url must have a value" "$LINENO" 5
+  elif  ! [[ $with_vendor_url =~ ^[[:print:]]*$ ]] ; then
+    as_fn_error $? "--with-vendor-url contains non-printing characters: $with_vendor_url" "$LINENO" 5
+  else
+    VENDOR_URL="$with_vendor_url"
+  fi
+
+
+  # The vendor bug URL, if any
+
+# Check whether --with-vendor-bug-url was given.
+if test "${with_vendor_bug_url+set}" = set; then :
+  withval=$with_vendor_bug_url;
+fi
+
+  if test "x$with_vendor_bug_url" = xyes; then
+    as_fn_error $? "--with-vendor-bug-url must have a value" "$LINENO" 5
+  elif  ! [[ $with_vendor_bug_url =~ ^[[:print:]]*$ ]] ; then
+    as_fn_error $? "--with-vendor-bug-url contains non-printing characters: $with_vendor_bug_url" "$LINENO" 5
+  else
+    VENDOR_URL_BUG="$with_vendor_bug_url"
+  fi
+
+
+  # The vendor VM bug URL, if any
+
+# Check whether --with-vendor-vm-bug-url was given.
+if test "${with_vendor_vm_bug_url+set}" = set; then :
+  withval=$with_vendor_vm_bug_url;
+fi
+
+  if test "x$with_vendor_vm_bug_url" = xyes; then
+    as_fn_error $? "--with-vendor-vm-bug-url must have a value" "$LINENO" 5
+  elif  ! [[ $with_vendor_vm_bug_url =~ ^[[:print:]]*$ ]] ; then
+    as_fn_error $? "--with-vendor-vm-bug-url contains non-printing characters: $with_vendor_vm_bug_url" "$LINENO" 5
+  else
+    VENDOR_URL_VM_BUG="$with_vendor_vm_bug_url"
+  fi
+
+
   # Override version from arguments
 
   # If --with-version-string is set, process it first. It is possible to
--- a/make/autoconf/jdk-version.m4	Thu Jan 11 22:05:09 2018 +0100
+++ b/make/autoconf/jdk-version.m4	Thu Jan 18 22:05:24 2018 +0100
@@ -80,16 +80,55 @@
 
   # The vendor name, if any
   AC_ARG_WITH(vendor-name, [AS_HELP_STRING([--with-vendor-name],
-      [Set vendor name @<:@not specified@:>@])])
+      [Set vendor name. Among others, used to set the 'java.vendor'
+       and 'java.vm.vendor' system properties. @<:@not specified@:>@])])
   if test "x$with_vendor_name" = xyes; then
     AC_MSG_ERROR([--with-vendor-name must have a value])
   elif [ ! [[ $with_vendor_name =~ ^[[:print:]]*$ ]] ]; then
-    AC_MSG_ERROR([--with--vendor-name contains non-printing characters: $with_vendor_name])
-  else
+    AC_MSG_ERROR([--with-vendor-name contains non-printing characters: $with_vendor_name])
+  elif test "x$with_vendor_name" != x; then
+    # Only set COMPANY_NAME if '--with-vendor-name' was used and is not empty.
+    # Otherwise we will use the value from "version-numbers" included above.
     COMPANY_NAME="$with_vendor_name"
   fi
   AC_SUBST(COMPANY_NAME)
 
+  # The vendor URL, if any
+  AC_ARG_WITH(vendor-url, [AS_HELP_STRING([--with-vendor-url],
+      [Set the 'java.vendor.url' system property @<:@not specified@:>@])])
+  if test "x$with_vendor_url" = xyes; then
+    AC_MSG_ERROR([--with-vendor-url must have a value])
+  elif [ ! [[ $with_vendor_url =~ ^[[:print:]]*$ ]] ]; then
+    AC_MSG_ERROR([--with-vendor-url contains non-printing characters: $with_vendor_url])
+  else
+    VENDOR_URL="$with_vendor_url"
+  fi
+  AC_SUBST(VENDOR_URL)
+
+  # The vendor bug URL, if any
+  AC_ARG_WITH(vendor-bug-url, [AS_HELP_STRING([--with-vendor-bug-url],
+      [Set the 'java.vendor.url.bug' system property @<:@not specified@:>@])])
+  if test "x$with_vendor_bug_url" = xyes; then
+    AC_MSG_ERROR([--with-vendor-bug-url must have a value])
+  elif [ ! [[ $with_vendor_bug_url =~ ^[[:print:]]*$ ]] ]; then
+    AC_MSG_ERROR([--with-vendor-bug-url contains non-printing characters: $with_vendor_bug_url])
+  else
+    VENDOR_URL_BUG="$with_vendor_bug_url"
+  fi
+  AC_SUBST(VENDOR_URL_BUG)
+
+  # The vendor VM bug URL, if any
+  AC_ARG_WITH(vendor-vm-bug-url, [AS_HELP_STRING([--with-vendor-vm-bug-url],
+      [Sets the bug URL which will be displayed when the VM crashes @<:@not specified@:>@])])
+  if test "x$with_vendor_vm_bug_url" = xyes; then
+    AC_MSG_ERROR([--with-vendor-vm-bug-url must have a value])
+  elif [ ! [[ $with_vendor_vm_bug_url =~ ^[[:print:]]*$ ]] ]; then
+    AC_MSG_ERROR([--with-vendor-vm-bug-url contains non-printing characters: $with_vendor_vm_bug_url])
+  else
+    VENDOR_URL_VM_BUG="$with_vendor_vm_bug_url"
+  fi
+  AC_SUBST(VENDOR_URL_VM_BUG)
+
   # Override version from arguments
 
   # If --with-version-string is set, process it first. It is possible to
--- a/make/autoconf/spec.gmk.in	Thu Jan 11 22:05:09 2018 +0100
+++ b/make/autoconf/spec.gmk.in	Thu Jan 18 22:05:24 2018 +0100
@@ -142,6 +142,20 @@
 
 COPYRIGHT_YEAR:=@COPYRIGHT_YEAR@
 
+# Platform naming variables
+LAUNCHER_NAME:=@LAUNCHER_NAME@
+PRODUCT_NAME:=@PRODUCT_NAME@
+PRODUCT_SUFFIX:=@PRODUCT_SUFFIX@
+JDK_RC_PLATFORM_NAME:=@JDK_RC_PLATFORM_NAME@
+COMPANY_NAME:=@COMPANY_NAME@
+HOTSPOT_VM_DISTRO:=@HOTSPOT_VM_DISTRO@
+MACOSX_BUNDLE_NAME_BASE=@MACOSX_BUNDLE_NAME_BASE@
+MACOSX_BUNDLE_ID_BASE=@MACOSX_BUNDLE_ID_BASE@
+USERNAME:=@USERNAME@
+VENDOR_URL:=@VENDOR_URL@
+VENDOR_URL_BUG:=@VENDOR_URL_BUG@
+VENDOR_URL_VM_BUG:=@VENDOR_URL_VM_BUG@
+
 # New (JEP-223) version information
 
 ## Building blocks of the version string
@@ -201,16 +215,30 @@
     -DVERSION_CLASSFILE_MINOR=$(VERSION_CLASSFILE_MINOR) \
     #
 
-# Platform naming variables
-LAUNCHER_NAME:=@LAUNCHER_NAME@
-PRODUCT_NAME:=@PRODUCT_NAME@
-PRODUCT_SUFFIX:=@PRODUCT_SUFFIX@
-JDK_RC_PLATFORM_NAME:=@JDK_RC_PLATFORM_NAME@
-COMPANY_NAME:=@COMPANY_NAME@
-HOTSPOT_VM_DISTRO:=@HOTSPOT_VM_DISTRO@
-MACOSX_BUNDLE_NAME_BASE=@MACOSX_BUNDLE_NAME_BASE@
-MACOSX_BUNDLE_ID_BASE=@MACOSX_BUNDLE_ID_BASE@
-USERNAME:=@USERNAME@
+ifneq ($(COMPANY_NAME),)
+  # COMPANY_NAME is set to "N/A" in $AUTOCONF_DIR/version-numbers by default,
+  # but can be customized with the '--with-vendor-name' configure option.
+  # Only export "VENDOR" to the build if COMPANY_NAME contains a real value.
+  # Otherwise the default value for VENDOR, which is used to set the "java.vendor"
+  # and "java.vm.vendor" properties is hard-coded into the source code (i.e. in
+  # System.c in the jdk for "vm.vendor" and vm_version.cpp in the VM for "java.vm.vendor") 
+  ifneq ($(COMPANY_NAME), N/A)
+    VERSION_CFLAGS += -DVENDOR='"$(COMPANY_NAME)"' 
+  endif
+endif
+
+# Only export VENDOR_URL, VENDOR_URL_BUG and VENDOR_VM_URL_BUG to the build if
+# they are not empty. Otherwise, default values which are defined in the sources
+# will be used.
+ifneq ($(VENDOR_URL),)
+  VERSION_CFLAGS += -DVENDOR_URL='"$(VENDOR_URL)"'
+endif
+ifneq ($(VENDOR_URL_BUG),)
+  VERSION_CFLAGS += -DVENDOR_URL_BUG='"$(VENDOR_URL_BUG)"'
+endif
+ifneq ($(VENDOR_URL_VM_BUG),)
+  VERSION_CFLAGS += -DVENDOR_URL_VM_BUG='"$(VENDOR_URL_VM_BUG)"'
+endif
 
 # Different naming strings generated from the above information.
 RUNTIME_NAME=$(PRODUCT_NAME) $(PRODUCT_SUFFIX)
--- a/make/data/currency/CurrencyData.properties	Thu Jan 11 22:05:09 2018 +0100
+++ b/make/data/currency/CurrencyData.properties	Thu Jan 18 22:05:24 2018 +0100
@@ -32,7 +32,7 @@
 # Version of the currency code information in this class.
 # It is a serial number that accompanies with each amendment.
 
-dataVersion=162
+dataVersion=164
 
 # List of all valid ISO 4217 currency codes.
 # To ensure compatibility, do not remove codes.
@@ -52,7 +52,7 @@
     NIO558-NLG528-NOK578-NPR524-NZD554-OMR512-PAB590-PEN604-PGK598-PHP608-\
     PKR586-PLN985-PTE620-PYG600-QAR634-ROL946-RON946-RSD941-RUB643-RUR810-RWF646-SAR682-\
     SBD090-SCR690-SDD736-SDG938-SEK752-SGD702-SHP654-SIT705-SKK703-SLL694-SOS706-\
-    SRD968-SRG740-SSP728-STD678-SVC222-SYP760-SZL748-THB764-TJS972-TMM795-TMT934-TND788-TOP776-\
+    SRD968-SRG740-SSP728-STD678-STN930-SVC222-SYP760-SZL748-THB764-TJS972-TMM795-TMT934-TND788-TOP776-\
     TPE626-TRL792-TRY949-TTD780-TWD901-TZS834-UAH980-UGX800-USD840-USN997-USS998-UYI940-\
     UYU858-UZS860-VEB862-VEF937-VND704-VUV548-WST882-XAF950-XAG961-XAU959-XBA955-\
     XBB956-XBC957-XBD958-XCD951-XDR960-XFO000-XFU000-XOF952-XPD964-XPF953-\
@@ -196,7 +196,7 @@
 CW=ANG
 # CYPRUS
 CY=EUR
-# CZECH REPUBLIC (THE)
+# CZECHIA
 CZ=CZK
 # DENMARK
 DK=DKK
@@ -470,7 +470,7 @@
 # SOUTH SUDAN
 SS=SSP
 # SAO TOME AND PRINCIPE
-ST=STD
+ST=STN
 # SAUDI ARABIA
 SA=SAR
 # SENEGAL
--- a/make/gensrc/Gensrc-java.desktop.gmk	Thu Jan 11 22:05:09 2018 +0100
+++ b/make/gensrc/Gensrc-java.desktop.gmk	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -46,7 +46,6 @@
     $(TOPDIR)/src/java.desktop/share/classes/sun/awt/resources \
     $(TOPDIR)/src/java.desktop/share/classes/com/sun/accessibility/internal/resources \
     $(TOPDIR)/src/java.desktop/share/classes/com/sun/java/swing/plaf/motif/resources \
-    $(TOPDIR)/src/java.desktop/share/classes/com/sun/java/swing/plaf/windows/resources \
     $(TOPDIR)/src/java.desktop/share/classes/com/sun/swing/internal/plaf/basic/resources \
     $(TOPDIR)/src/java.desktop/share/classes/com/sun/swing/internal/plaf/metal/resources \
     $(TOPDIR)/src/java.desktop/share/classes/com/sun/swing/internal/plaf/synth/resources \
@@ -61,7 +60,10 @@
 endif
 
 ifeq ($(OPENJDK_TARGET_OS), windows)
-  PROP_SRC_DIRS += $(TOPDIR)/src/java.desktop/windows/classes/sun/awt/windows
+  PROP_SRC_DIRS += \
+      $(TOPDIR)/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/resources \
+      $(TOPDIR)/src/java.desktop/windows/classes/sun/awt/windows \
+      #
 endif
 
 ifeq ($(filter $(OPENJDK_TARGET_OS), windows macosx), )
--- a/make/hotspot/lib/CompileJvm.gmk	Thu Jan 11 22:05:09 2018 +0100
+++ b/make/hotspot/lib/CompileJvm.gmk	Thu Jan 18 22:05:24 2018 +0100
@@ -222,6 +222,7 @@
     CFLAGS_DEBUG_SYMBOLS := $(JVM_CFLAGS_SYMBOLS), \
     CXXFLAGS_DEBUG_SYMBOLS := $(JVM_CFLAGS_SYMBOLS), \
     vm_version.cpp_CXXFLAGS := $(CFLAGS_VM_VERSION), \
+    arguments.cpp_CXXFLAGS := $(CFLAGS_VM_VERSION), \
     DISABLED_WARNINGS_clang := tautological-compare, \
     DISABLED_WARNINGS_xlc := 1540-0216 1540-0198 1540-1090 1540-1639 \
         1540-1088 1500-010, \
--- a/make/mapfiles/libjava/mapfile-vers	Thu Jan 11 22:05:09 2018 +0100
+++ b/make/mapfiles/libjava/mapfile-vers	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -74,6 +74,7 @@
 		JNU_ThrowStringIndexOutOfBoundsException;
 		JNU_ToString;
 
+		Java_java_io_FileDescriptor_cleanupClose0;
 		Java_java_io_FileDescriptor_close0;
 		Java_java_io_FileDescriptor_initIDs;
 		Java_java_io_FileDescriptor_sync;
@@ -257,7 +258,6 @@
 		Java_jdk_internal_reflect_NativeConstructorAccessorImpl_newInstance0;
 		Java_jdk_internal_reflect_NativeMethodAccessorImpl_invoke0;
 		Java_jdk_internal_reflect_Reflection_getCallerClass__;
-		Java_jdk_internal_reflect_Reflection_getCallerClass__I;
 		Java_jdk_internal_reflect_Reflection_getClassAccessFlags;
 		Java_jdk_internal_misc_VM_latestUserDefinedLoader0;
                 Java_jdk_internal_misc_VM_getuid;
--- a/make/mapfiles/libjava/reorder-sparc	Thu Jan 11 22:05:09 2018 +0100
+++ b/make/mapfiles/libjava/reorder-sparc	Thu Jan 18 22:05:24 2018 +0100
@@ -27,7 +27,6 @@
 text: .text%Java_java_io_FileOutputStream_initIDs;
 text: .text%Java_java_lang_System_setIn0;
 text: .text%Java_sun_reflect_Reflection_getCallerClass__;
-text: .text%Java_sun_reflect_Reflection_getCallerClass__I;
 text: .text%Java_java_lang_Class_forName0;
 text: .text%Java_java_lang_Object_getClass;
 text: .text%Java_sun_reflect_Reflection_getClassAccessFlags;
--- a/make/mapfiles/libjava/reorder-sparcv9	Thu Jan 11 22:05:09 2018 +0100
+++ b/make/mapfiles/libjava/reorder-sparcv9	Thu Jan 18 22:05:24 2018 +0100
@@ -26,7 +26,6 @@
 text: .text%Java_java_io_FileOutputStream_initIDs;
 text: .text%Java_java_lang_System_setIn0;
 text: .text%Java_sun_reflect_Reflection_getCallerClass__;
-text: .text%Java_sun_reflect_Reflection_getCallerClass__I;
 text: .text%Java_java_lang_Class_forName0;
 text: .text%Java_java_lang_String_intern;
 text: .text%Java_java_lang_StringUTF16_isBigEndian;
--- a/make/mapfiles/libjava/reorder-x86	Thu Jan 11 22:05:09 2018 +0100
+++ b/make/mapfiles/libjava/reorder-x86	Thu Jan 18 22:05:24 2018 +0100
@@ -27,7 +27,6 @@
 text: .text%Java_java_io_FileOutputStream_initIDs;
 text: .text%Java_java_lang_System_setIn0;
 text: .text%Java_sun_reflect_Reflection_getCallerClass__;
-text: .text%Java_sun_reflect_Reflection_getCallerClass__I;
 text: .text%Java_java_lang_Class_forName0;
 text: .text%Java_java_lang_String_intern;
 text: .text%Java_java_lang_StringUTF16_isBigEndian;
--- a/src/hotspot/cpu/aarch64/aarch64.ad	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/aarch64/aarch64.ad	Thu Jan 18 22:05:24 2018 +0100
@@ -5844,8 +5844,8 @@
 operand immByteMapBase()
 %{
   // Get base of card map
-  predicate((jbyte*)n->get_ptr() ==
-        ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
+  predicate(Universe::heap()->barrier_set()->is_a(BarrierSet::CardTableModRef) &&
+    (jbyte*)n->get_ptr() == ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
   match(ConP);
 
   op_cost(0);
--- a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -848,7 +848,7 @@
   // architecture.  In debug mode we shrink it in order to test
   // trampolines, but not so small that branches in the interpreter
   // are out of range.
-  static const unsigned long branch_range = INCLUDE_JVMCI ? 128 * M : NOT_DEBUG(128 * M) DEBUG_ONLY(2 * M);
+  static const unsigned long branch_range = NOT_DEBUG(128 * M) DEBUG_ONLY(2 * M);
 
   static bool reachable_from_branch_at(address branch, address target) {
     return uabs(target - branch) < branch_range;
--- a/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -71,6 +71,13 @@
   return 7 * NativeInstruction::instruction_size;
 }
 
+int CompiledStaticCall::to_trampoline_stub_size() {
+  // Somewhat pessimistically, we count 3 instructions here (although
+  // there are only two) because we sometimes emit an alignment nop.
+  // Trampoline stubs are always word aligned.
+  return 3 * NativeInstruction::instruction_size + wordSize;
+}
+
 // Relocation entries for call stub, compiled java to interpreter.
 int CompiledStaticCall::reloc_to_interp_stub() {
   return 4; // 3 in emit_to_interp_stub + 1 in emit_call
--- a/src/hotspot/cpu/aarch64/jvmciCodeInstaller_aarch64.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/aarch64/jvmciCodeInstaller_aarch64.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -109,7 +109,7 @@
   TRACE_jvmci_3("relocating (foreign call) at " PTR_FORMAT, p2i(inst));
 }
 
-void CodeInstaller::pd_relocate_JavaMethod(Handle hotspot_method, jint pc_offset, TRAPS) {
+void CodeInstaller::pd_relocate_JavaMethod(CodeBuffer &cbuf, Handle hotspot_method, jint pc_offset, TRAPS) {
 #ifdef ASSERT
   Method* method = NULL;
   // we need to check, this might also be an unresolved method
@@ -124,22 +124,22 @@
     case INVOKEINTERFACE: {
       assert(method == NULL || !method->is_static(), "cannot call static method with invokeinterface");
       NativeCall* call = nativeCall_at(_instructions->start() + pc_offset);
-      call->set_destination(SharedRuntime::get_resolve_virtual_call_stub());
       _instructions->relocate(call->instruction_address(), virtual_call_Relocation::spec(_invoke_mark_pc));
+      call->trampoline_jump(cbuf, SharedRuntime::get_resolve_virtual_call_stub());
       break;
     }
     case INVOKESTATIC: {
       assert(method == NULL || method->is_static(), "cannot call non-static method with invokestatic");
       NativeCall* call = nativeCall_at(_instructions->start() + pc_offset);
-      call->set_destination(SharedRuntime::get_resolve_static_call_stub());
       _instructions->relocate(call->instruction_address(), relocInfo::static_call_type);
+      call->trampoline_jump(cbuf, SharedRuntime::get_resolve_static_call_stub());
       break;
     }
     case INVOKESPECIAL: {
       assert(method == NULL || !method->is_static(), "cannot call static method with invokespecial");
       NativeCall* call = nativeCall_at(_instructions->start() + pc_offset);
-      call->set_destination(SharedRuntime::get_resolve_opt_virtual_call_stub());
       _instructions->relocate(call->instruction_address(), relocInfo::opt_virtual_call_type);
+      call->trampoline_jump(cbuf, SharedRuntime::get_resolve_opt_virtual_call_stub());
       break;
     }
     default:
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -801,7 +801,7 @@
   assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline");
 
   end_a_stub();
-  return stub;
+  return stub_start_addr;
 }
 
 address MacroAssembler::ic_call(address entry, jint method_index) {
--- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -367,3 +367,24 @@
   set_ptr_at(data_offset, new_destination);
   OrderAccess::release();
 }
+
+// Generate a trampoline for a branch to dest.  If there's no need for a
+// trampoline, simply patch the call directly to dest.
+address NativeCall::trampoline_jump(CodeBuffer &cbuf, address dest) {
+  MacroAssembler a(&cbuf);
+  address stub = NULL;
+
+  if (a.far_branches()
+      && ! is_NativeCallTrampolineStub_at(instruction_address() + displacement())) {
+    stub = a.emit_trampoline_stub(instruction_address() - cbuf.insts()->start(), dest);
+  }
+
+  if (stub == NULL) {
+    // If we generated no stub, patch this call directly to dest.
+    // This will happen if we don't need far branches or if there
+    // already was a trampoline.
+    set_destination(dest);
+  }
+
+  return stub;
+}
--- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -61,7 +61,7 @@
     return uint_at(0);
   }
 
-  bool is_blr()                      const { return (encoding() & 0xfffffc1f) == 0xd63f0000; }
+  bool is_blr()                      const { return (encoding() & 0xff9ffc1f) == 0xd61f0000; } // blr(register) or br(register)
   bool is_adr_aligned()              const { return (encoding() & 0xff000000) == 0x10000000; } // adr Xn, <label>, where label is aligned to 4 bytes (address of instruction).
 
   inline bool is_nop();
@@ -143,8 +143,9 @@
 }
 
 inline NativeCall* nativeCall_at(address address);
-// The NativeCall is an abstraction for accessing/manipulating native call imm32/rel32off
-// instructions (used to manipulate inline caches, primitive & dll calls, etc.).
+// The NativeCall is an abstraction for accessing/manipulating native
+// call instructions (used to manipulate inline caches, primitive &
+// DSO calls, etc.).
 
 class NativeCall: public NativeInstruction {
  public:
@@ -155,7 +156,6 @@
     return_address_offset       =    4
   };
 
-  enum { cache_line_size = BytesPerWord };  // conservative estimate!
   address instruction_address() const       { return addr_at(instruction_offset); }
   address next_instruction_address() const  { return addr_at(return_address_offset); }
   int   displacement() const                { return (int_at(displacement_offset) << 6) >> 4; }
@@ -206,6 +206,7 @@
   void set_destination_mt_safe(address dest, bool assert_lock = true);
 
   address get_trampoline();
+  address trampoline_jump(CodeBuffer &cbuf, address dest);
 };
 
 inline NativeCall* nativeCall_at(address address) {
--- a/src/hotspot/cpu/aarch64/relocInfo_aarch64.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/aarch64/relocInfo_aarch64.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -35,4 +35,10 @@
     format_width       =  0
   };
 
+ public:
+
+  // This platform has no oops in the code that are not also
+  // listed in the oop section.
+  static bool mustIterateImmediateOopsInCode() { return false; }
+
 #endif // CPU_AARCH64_VM_RELOCINFO_AARCH64_HPP
--- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -774,7 +774,7 @@
     __ load_klass(rscratch1, receiver);
     __ ldr(tmp, Address(holder, CompiledICHolder::holder_klass_offset()));
     __ cmp(rscratch1, tmp);
-    __ ldr(rmethod, Address(holder, CompiledICHolder::holder_method_offset()));
+    __ ldr(rmethod, Address(holder, CompiledICHolder::holder_metadata_offset()));
     __ br(Assembler::EQ, ok);
     __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 
--- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -3404,7 +3404,6 @@
   Label done;
   Label initialize_header;
   Label initialize_object; // including clearing the fields
-  Label allocate_shared;
 
   __ get_cpool_and_tags(r4, r0);
   // Make sure the class we're about to instantiate has been resolved.
@@ -3433,18 +3432,24 @@
   // test to see if it has a finalizer or is malformed in some way
   __ tbnz(r3, exact_log2(Klass::_lh_instance_slow_path_bit), slow_case);
 
-  // Allocate the instance
-  // 1) Try to allocate in the TLAB
-  // 2) if fail and the object is large allocate in the shared Eden
-  // 3) if the above fails (or is not applicable), go to a slow case
-  // (creates a new TLAB, etc.)
-
+  // Allocate the instance:
+  //  If TLAB is enabled:
+  //    Try to allocate in the TLAB.
+  //    If fails, go to the slow path.
+  //  Else If inline contiguous allocations are enabled:
+  //    Try to allocate in eden.
+  //    If fails due to heap end, go to slow path.
+  //
+  //  If TLAB is enabled OR inline contiguous is enabled:
+  //    Initialize the allocation.
+  //    Exit.
+  //
+  //  Go to slow path.
   const bool allow_shared_alloc =
     Universe::heap()->supports_inline_contig_alloc();
 
   if (UseTLAB) {
-    __ tlab_allocate(r0, r3, 0, noreg, r1,
-                     allow_shared_alloc ? allocate_shared : slow_case);
+    __ tlab_allocate(r0, r3, 0, noreg, r1, slow_case);
 
     if (ZeroTLAB) {
       // the fields have been already cleared
@@ -3453,19 +3458,19 @@
       // initialize both the header and fields
       __ b(initialize_object);
     }
+  } else {
+    // Allocation in the shared Eden, if allowed.
+    //
+    // r3: instance size in bytes
+    if (allow_shared_alloc) {
+      __ eden_allocate(r0, r3, 0, r10, slow_case);
+      __ incr_allocated_bytes(rthread, r3, 0, rscratch1);
+    }
   }
 
-  // Allocation in the shared Eden, if allowed.
-  //
-  // r3: instance size in bytes
-  if (allow_shared_alloc) {
-    __ bind(allocate_shared);
-
-    __ eden_allocate(r0, r3, 0, r10, slow_case);
-    __ incr_allocated_bytes(rthread, r3, 0, rscratch1);
-  }
-
-  if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
+  // If UseTLAB or allow_shared_alloc are true, the object is created above and
+  // there is an initialize need. Otherwise, skip and go to the slow path.
+  if (UseTLAB || allow_shared_alloc) {
     // The object is initialized before the header.  If the object size is
     // zero, go directly to the header initialization.
     __ bind(initialize_object);
--- a/src/hotspot/cpu/arm/macroAssembler_arm.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/arm/macroAssembler_arm.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -2475,49 +2475,65 @@
 // On success, the result will be in method_result, and execution falls through.
 // On failure, execution transfers to the given label.
 void MacroAssembler::lookup_interface_method(Register Rklass,
-                                             Register Rinterf,
-                                             Register Rindex,
+                                             Register Rintf,
+                                             RegisterOrConstant itable_index,
                                              Register method_result,
-                                             Register temp_reg1,
-                                             Register temp_reg2,
+                                             Register Rscan,
+                                             Register Rtmp,
                                              Label& L_no_such_interface) {
 
-  assert_different_registers(Rklass, Rinterf, temp_reg1, temp_reg2, Rindex);
-
-  Register Ritable = temp_reg1;
+  assert_different_registers(Rklass, Rintf, Rscan, Rtmp);
+
+  const int entry_size = itableOffsetEntry::size() * HeapWordSize;
+  assert(itableOffsetEntry::interface_offset_in_bytes() == 0, "not added for convenience");
 
   // Compute start of first itableOffsetEntry (which is at the end of the vtable)
   const int base = in_bytes(Klass::vtable_start_offset());
   const int scale = exact_log2(vtableEntry::size_in_bytes());
-  ldr_s32(temp_reg2, Address(Rklass, Klass::vtable_length_offset())); // Get length of vtable
-  add(Ritable, Rklass, base);
-  add(Ritable, Ritable, AsmOperand(temp_reg2, lsl, scale));
-
-  Label entry, search;
-
-  b(entry);
-
-  bind(search);
-  add(Ritable, Ritable, itableOffsetEntry::size() * HeapWordSize);
-
-  bind(entry);
-
-  // Check that the entry is non-null.  A null entry means that the receiver
-  // class doesn't implement the interface, and wasn't the same as the
-  // receiver class checked when the interface was resolved.
-
-  ldr(temp_reg2, Address(Ritable, itableOffsetEntry::interface_offset_in_bytes()));
-  cbz(temp_reg2, L_no_such_interface);
-
-  cmp(Rinterf, temp_reg2);
-  b(search, ne);
-
-  ldr_s32(temp_reg2, Address(Ritable, itableOffsetEntry::offset_offset_in_bytes()));
-  add(temp_reg2, temp_reg2, Rklass); // Add offset to Klass*
-  assert(itableMethodEntry::size() * HeapWordSize == wordSize, "adjust the scaling in the code below");
-  assert(itableMethodEntry::method_offset_in_bytes() == 0, "adjust the offset in the code below");
-
-  ldr(method_result, Address::indexed_ptr(temp_reg2, Rindex));
+  ldr_s32(Rtmp, Address(Rklass, Klass::vtable_length_offset())); // Get length of vtable
+  add(Rscan, Rklass, base);
+  add(Rscan, Rscan, AsmOperand(Rtmp, lsl, scale));
+
+  // Search through the itable for an interface equal to incoming Rintf
+  // itable looks like [intface][offset][intface][offset][intface][offset]
+
+  Label loop;
+  bind(loop);
+  ldr(Rtmp, Address(Rscan, entry_size, post_indexed));
+#ifdef AARCH64
+  Label found;
+  cmp(Rtmp, Rintf);
+  b(found, eq);
+  cbnz(Rtmp, loop);
+#else
+  cmp(Rtmp, Rintf);  // set ZF and CF if interface is found
+  cmn(Rtmp, 0, ne);  // check if tmp == 0 and clear CF if it is
+  b(loop, ne);
+#endif // AARCH64
+
+#ifdef AARCH64
+  b(L_no_such_interface);
+  bind(found);
+#else
+  // CF == 0 means we reached the end of itable without finding icklass
+  b(L_no_such_interface, cc);
+#endif // !AARCH64
+
+  if (method_result != noreg) {
+    // Interface found at previous position of Rscan, now load the method
+    ldr_s32(Rtmp, Address(Rscan, itableOffsetEntry::offset_offset_in_bytes() - entry_size));
+    if (itable_index.is_register()) {
+      add(Rtmp, Rtmp, Rklass); // Add offset to Klass*
+      assert(itableMethodEntry::size() * HeapWordSize == wordSize, "adjust the scaling in the code below");
+      assert(itableMethodEntry::method_offset_in_bytes() == 0, "adjust the offset in the code below");
+      ldr(method_result, Address::indexed_ptr(Rtmp, itable_index.as_register()));
+    } else {
+      int method_offset = itableMethodEntry::size() * HeapWordSize * itable_index.as_constant() +
+                          itableMethodEntry::method_offset_in_bytes();
+      add_slow(method_result, Rklass, method_offset);
+      ldr(method_result, Address(method_result, Rtmp));
+    }
+  }
 }
 
 #ifdef COMPILER2
--- a/src/hotspot/cpu/arm/macroAssembler_arm.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/arm/macroAssembler_arm.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1316,7 +1316,7 @@
 
   void lookup_interface_method(Register recv_klass,
                                Register intf_klass,
-                               Register itable_index,
+                               RegisterOrConstant itable_index,
                                Register method_result,
                                Register temp_reg1,
                                Register temp_reg2,
--- a/src/hotspot/cpu/arm/relocInfo_arm.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/arm/relocInfo_arm.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,4 +32,10 @@
     format_width = 0
   };
 
+ public:
+
+  // This platform has no oops in the code that are not also
+  // listed in the oop section.
+  static bool mustIterateImmediateOopsInCode() { return false; }
+
 #endif // CPU_ARM_VM_RELOCINFO_ARM_HPP
--- a/src/hotspot/cpu/arm/sharedRuntime_arm.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/arm/sharedRuntime_arm.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -984,7 +984,7 @@
 
   __ load_klass(receiver_klass, receiver);
   __ ldr(holder_klass, Address(Ricklass, CompiledICHolder::holder_klass_offset()));
-  __ ldr(Rmethod, Address(Ricklass, CompiledICHolder::holder_method_offset()));
+  __ ldr(Rmethod, Address(Ricklass, CompiledICHolder::holder_metadata_offset()));
   __ cmp(receiver_klass, holder_klass);
 
 #ifdef AARCH64
--- a/src/hotspot/cpu/arm/templateTable_arm.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/arm/templateTable_arm.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -4198,7 +4198,7 @@
   const Register Rflags  = R3_tmp;
   const Register Rklass  = R3_tmp;
 
-  prepare_invoke(byte_no, Rinterf, Rindex, Rrecv, Rflags);
+  prepare_invoke(byte_no, Rinterf, Rmethod, Rrecv, Rflags);
 
   // Special case of invokeinterface called for virtual method of
   // java.lang.Object.  See cpCacheOop.cpp for details.
@@ -4207,56 +4207,39 @@
   Label notMethod;
   __ tbz(Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift, notMethod);
 
-  __ mov(Rmethod, Rindex);
   invokevirtual_helper(Rmethod, Rrecv, Rflags);
   __ bind(notMethod);
 
   // Get receiver klass into Rklass - also a null check
   __ load_klass(Rklass, Rrecv);
 
+  Label no_such_interface;
+
+  // Receiver subtype check against REFC.
+  __ lookup_interface_method(// inputs: rec. class, interface
+                             Rklass, Rinterf, noreg,
+                             // outputs:  scan temp. reg1, scan temp. reg2
+                             noreg, Ritable, Rtemp,
+                             no_such_interface);
+
   // profile this call
   __ profile_virtual_call(R0_tmp, Rklass);
 
-  // Compute start of first itableOffsetEntry (which is at the end of the vtable)
-  const int base = in_bytes(Klass::vtable_start_offset());
-  assert(vtableEntry::size() == 1, "adjust the scaling in the code below");
-  __ ldr_s32(Rtemp, Address(Rklass, Klass::vtable_length_offset())); // Get length of vtable
-  __ add(Ritable, Rklass, base);
-  __ add(Ritable, Ritable, AsmOperand(Rtemp, lsl, LogBytesPerWord));
-
-  Label entry, search, interface_ok;
-
-  __ b(entry);
-
-  __ bind(search);
-  __ add(Ritable, Ritable, itableOffsetEntry::size() * HeapWordSize);
-
-  __ bind(entry);
-
-  // Check that the entry is non-null.  A null entry means that the receiver
-  // class doesn't implement the interface, and wasn't the same as the
-  // receiver class checked when the interface was resolved.
-
-  __ ldr(Rtemp, Address(Ritable, itableOffsetEntry::interface_offset_in_bytes()));
-  __ cbnz(Rtemp, interface_ok);
-
-  // throw exception
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address,
-                   InterpreterRuntime::throw_IncompatibleClassChangeError));
-
-  // the call_VM checks for exception, so we should never return here.
-  __ should_not_reach_here();
-
-  __ bind(interface_ok);
-
-  __ cmp(Rinterf, Rtemp);
-  __ b(search, ne);
-
-  __ ldr_s32(Rtemp, Address(Ritable, itableOffsetEntry::offset_offset_in_bytes()));
-  __ add(Rtemp, Rtemp, Rklass); // Add offset to Klass*
-  assert(itableMethodEntry::size() == 1, "adjust the scaling in the code below");
-
-  __ ldr(Rmethod, Address::indexed_ptr(Rtemp, Rindex));
+  // Get declaring interface class from method
+  __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
+  __ ldr(Rtemp, Address(Rtemp, ConstMethod::constants_offset()));
+  __ ldr(Rinterf, Address(Rtemp, ConstantPool::pool_holder_offset_in_bytes()));
+
+  // Get itable index from method
+  __ ldr_s32(Rtemp, Address(Rmethod, Method::itable_index_offset()));
+  __ add(Rtemp, Rtemp, (-Method::itable_index_max)); // small negative constant is too large for an immediate on arm32
+  __ neg(Rindex, Rtemp);
+
+  __ lookup_interface_method(// inputs: rec. class, interface
+                             Rklass, Rinterf, Rindex,
+                             // outputs:  scan temp. reg1, scan temp. reg2
+                             Rmethod, Ritable, Rtemp,
+                             no_such_interface);
 
   // Rmethod: Method* to call
 
@@ -4278,6 +4261,13 @@
 
   // do the call
   __ jump_from_interpreted(Rmethod);
+
+  // throw exception
+  __ bind(no_such_interface);
+  __ restore_method();
+  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
+  // the call_VM checks for exception, so we should never return here.
+  __ should_not_reach_here();
 }
 
 void TemplateTable::invokehandle(int byte_no) {
--- a/src/hotspot/cpu/arm/vtableStubs_arm.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/arm/vtableStubs_arm.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -28,6 +28,7 @@
 #include "code/vtableStubs.hpp"
 #include "interp_masm_arm.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/compiledICHolder.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/klassVtable.hpp"
 #include "runtime/sharedRuntime.hpp"
@@ -118,67 +119,48 @@
 
   // R0-R3 / R0-R7 registers hold the arguments and cannot be spoiled
   const Register Rclass  = AARCH64_ONLY(R9)  NOT_AARCH64(R4);
-  const Register Rlength = AARCH64_ONLY(R10)  NOT_AARCH64(R5);
+  const Register Rintf   = AARCH64_ONLY(R10) NOT_AARCH64(R5);
   const Register Rscan   = AARCH64_ONLY(R11) NOT_AARCH64(R6);
-  const Register tmp     = Rtemp;
 
-  assert_different_registers(Ricklass, Rclass, Rlength, Rscan, tmp);
+  assert_different_registers(Ricklass, Rclass, Rintf, Rscan, Rtemp);
 
   // Calculate the start of itable (itable goes after vtable)
   const int scale = exact_log2(vtableEntry::size_in_bytes());
   address npe_addr = __ pc();
   __ load_klass(Rclass, R0);
-  __ ldr_s32(Rlength, Address(Rclass, Klass::vtable_length_offset()));
 
-  __ add(Rscan, Rclass, in_bytes(Klass::vtable_start_offset()));
-  __ add(Rscan, Rscan, AsmOperand(Rlength, lsl, scale));
+  Label L_no_such_interface;
 
-  // Search through the itable for an interface equal to incoming Ricklass
-  // itable looks like [intface][offset][intface][offset][intface][offset]
-  const int entry_size = itableOffsetEntry::size() * HeapWordSize;
-  assert(itableOffsetEntry::interface_offset_in_bytes() == 0, "not added for convenience");
+  // Receiver subtype check against REFC.
+  __ ldr(Rintf, Address(Ricklass, CompiledICHolder::holder_klass_offset()));
+  __ lookup_interface_method(// inputs: rec. class, interface, itable index
+                             Rclass, Rintf, noreg,
+                             // outputs: temp reg1, temp reg2
+                             noreg, Rscan, Rtemp,
+                             L_no_such_interface);
 
-  Label loop;
-  __ bind(loop);
-  __ ldr(tmp, Address(Rscan, entry_size, post_indexed));
-#ifdef AARCH64
-  Label found;
-  __ cmp(tmp, Ricklass);
-  __ b(found, eq);
-  __ cbnz(tmp, loop);
-#else
-  __ cmp(tmp, Ricklass);  // set ZF and CF if interface is found
-  __ cmn(tmp, 0, ne);     // check if tmp == 0 and clear CF if it is
-  __ b(loop, ne);
-#endif // AARCH64
-
-  assert(StubRoutines::throw_IncompatibleClassChangeError_entry() != NULL, "Check initialization order");
-#ifdef AARCH64
-  __ jump(StubRoutines::throw_IncompatibleClassChangeError_entry(), relocInfo::runtime_call_type, tmp);
-  __ bind(found);
-#else
-  // CF == 0 means we reached the end of itable without finding icklass
-  __ jump(StubRoutines::throw_IncompatibleClassChangeError_entry(), relocInfo::runtime_call_type, noreg, cc);
-#endif // !AARCH64
-
-  // Interface found at previous position of Rscan, now load the method oop
-  __ ldr_s32(tmp, Address(Rscan, itableOffsetEntry::offset_offset_in_bytes() - entry_size));
-  {
-    const int method_offset = itableMethodEntry::size() * HeapWordSize * itable_index +
-      itableMethodEntry::method_offset_in_bytes();
-    __ add_slow(Rmethod, Rclass, method_offset);
-  }
-  __ ldr(Rmethod, Address(Rmethod, tmp));
+  // Get Method* and entry point for compiler
+  __ ldr(Rintf, Address(Ricklass, CompiledICHolder::holder_metadata_offset()));
+  __ lookup_interface_method(// inputs: rec. class, interface, itable index
+                             Rclass, Rintf, itable_index,
+                             // outputs: temp reg1, temp reg2, temp reg3
+                             Rmethod, Rscan, Rtemp,
+                             L_no_such_interface);
 
   address ame_addr = __ pc();
 
 #ifdef AARCH64
-  __ ldr(tmp, Address(Rmethod, Method::from_compiled_offset()));
-  __ br(tmp);
+  __ ldr(Rtemp, Address(Rmethod, Method::from_compiled_offset()));
+  __ br(Rtemp);
 #else
   __ ldr(PC, Address(Rmethod, Method::from_compiled_offset()));
 #endif // AARCH64
 
+  __ bind(L_no_such_interface);
+
+  assert(StubRoutines::throw_IncompatibleClassChangeError_entry() != NULL, "check initialization order");
+  __ jump(StubRoutines::throw_IncompatibleClassChangeError_entry(), relocInfo::runtime_call_type, Rtemp);
+
   masm->flush();
 
   if (PrintMiscellaneous && (WizardMode || Verbose)) {
@@ -205,7 +187,7 @@
     instr_count = NOT_AARCH64(4) AARCH64_ONLY(5);
   } else {
     // itable stub size
-    instr_count = NOT_AARCH64(20) AARCH64_ONLY(20);
+    instr_count = NOT_AARCH64(31) AARCH64_ONLY(31);
   }
 
 #ifdef AARCH64
--- a/src/hotspot/cpu/ppc/globalDefinitions_ppc.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/ppc/globalDefinitions_ppc.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -55,5 +55,9 @@
 #define SUPPORT_RESERVED_STACK_AREA
 
 #define THREAD_LOCAL_POLL
+// If UseSIGTRAP is active, we only use the poll bit and no polling page.
+// Otherwise, we fall back to usage of the polling page in nmethods.
+// Define the condition to use this -XX flag.
+#define USE_POLL_BIT_ONLY UseSIGTRAP
 
 #endif // CPU_PPC_VM_GLOBALDEFINITIONS_PPC_HPP
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1788,11 +1788,10 @@
                                              RegisterOrConstant itable_index,
                                              Register method_result,
                                              Register scan_temp,
-                                             Register sethi_temp,
-                                             Label& L_no_such_interface) {
+                                             Register temp2,
+                                             Label& L_no_such_interface,
+                                             bool return_method) {
   assert_different_registers(recv_klass, intf_klass, method_result, scan_temp);
-  assert(itable_index.is_constant() || itable_index.as_register() == method_result,
-         "caller must use same register for non-constant itable index as for method");
 
   // Compute start of first itableOffsetEntry (which is at the end of the vtable).
   int vtable_base = in_bytes(Klass::vtable_start_offset());
@@ -1810,15 +1809,17 @@
   add(scan_temp, recv_klass, scan_temp);
 
   // Adjust recv_klass by scaled itable_index, so we can free itable_index.
-  if (itable_index.is_register()) {
-    Register itable_offset = itable_index.as_register();
-    sldi(itable_offset, itable_offset, logMEsize);
-    if (itentry_off) addi(itable_offset, itable_offset, itentry_off);
-    add(recv_klass, itable_offset, recv_klass);
-  } else {
-    long itable_offset = (long)itable_index.as_constant();
-    load_const_optimized(sethi_temp, (itable_offset<<logMEsize)+itentry_off); // static address, no relocation
-    add(recv_klass, sethi_temp, recv_klass);
+  if (return_method) {
+    if (itable_index.is_register()) {
+      Register itable_offset = itable_index.as_register();
+      sldi(method_result, itable_offset, logMEsize);
+      if (itentry_off) { addi(method_result, method_result, itentry_off); }
+      add(method_result, method_result, recv_klass);
+    } else {
+      long itable_offset = (long)itable_index.as_constant();
+      // static address, no relocation
+      add_const_optimized(method_result, recv_klass, (itable_offset << logMEsize) + itentry_off, temp2);
+    }
   }
 
   // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
@@ -1831,12 +1832,12 @@
   for (int peel = 1; peel >= 0; peel--) {
     // %%%% Could load both offset and interface in one ldx, if they were
     // in the opposite order. This would save a load.
-    ld(method_result, itableOffsetEntry::interface_offset_in_bytes(), scan_temp);
+    ld(temp2, itableOffsetEntry::interface_offset_in_bytes(), scan_temp);
 
     // Check that this entry is non-null. A null entry means that
     // the receiver class doesn't implement the interface, and wasn't the
     // same as when the caller was compiled.
-    cmpd(CCR0, method_result, intf_klass);
+    cmpd(CCR0, temp2, intf_klass);
 
     if (peel) {
       beq(CCR0, found_method);
@@ -1849,7 +1850,7 @@
 
     bind(search);
 
-    cmpdi(CCR0, method_result, 0);
+    cmpdi(CCR0, temp2, 0);
     beq(CCR0, L_no_such_interface);
     addi(scan_temp, scan_temp, scan_step);
   }
@@ -1857,9 +1858,11 @@
   bind(found_method);
 
   // Got a hit.
-  int ito_offset = itableOffsetEntry::offset_offset_in_bytes();
-  lwz(scan_temp, ito_offset, scan_temp);
-  ldx(method_result, scan_temp, recv_klass);
+  if (return_method) {
+    int ito_offset = itableOffsetEntry::offset_offset_in_bytes();
+    lwz(scan_temp, ito_offset, scan_temp);
+    ldx(method_result, scan_temp, method_result);
+  }
 }
 
 // virtual method calling
@@ -5601,12 +5604,17 @@
 
 #endif // !PRODUCT
 
-SkipIfEqualZero::SkipIfEqualZero(MacroAssembler* masm, Register temp, const bool* flag_addr) : _masm(masm), _label() {
+void SkipIfEqualZero::skip_to_label_if_equal_zero(MacroAssembler* masm, Register temp,
+                                                  const bool* flag_addr, Label& label) {
   int simm16_offset = masm->load_const_optimized(temp, (address)flag_addr, R0, true);
   assert(sizeof(bool) == 1, "PowerPC ABI");
   masm->lbz(temp, simm16_offset, temp);
   masm->cmpwi(CCR0, temp, 0);
-  masm->beq(CCR0, _label);
+  masm->beq(CCR0, label);
+}
+
+SkipIfEqualZero::SkipIfEqualZero(MacroAssembler* masm, Register temp, const bool* flag_addr) : _masm(masm), _label() {
+  skip_to_label_if_equal_zero(masm, temp, flag_addr, _label);
 }
 
 SkipIfEqualZero::~SkipIfEqualZero() {
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -519,7 +519,8 @@
                                RegisterOrConstant itable_index,
                                Register method_result,
                                Register temp_reg, Register temp2_reg,
-                               Label& no_such_interface);
+                               Label& no_such_interface,
+                               bool return_method = true);
 
   // virtual method calling
   void lookup_virtual_method(Register recv_klass,
@@ -979,6 +980,8 @@
  public:
    // 'Temp' is a temp register that this object can use (and trash).
    explicit SkipIfEqualZero(MacroAssembler*, Register temp, const bool* flag_addr);
+   static void skip_to_label_if_equal_zero(MacroAssembler*, Register temp,
+                                           const bool* flag_addr, Label& label);
    ~SkipIfEqualZero();
 };
 
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.inline.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.inline.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -30,6 +30,7 @@
 #include "asm/macroAssembler.hpp"
 #include "asm/codeBuffer.hpp"
 #include "code/codeCache.hpp"
+#include "runtime/safepointMechanism.hpp"
 
 inline bool MacroAssembler::is_ld_largeoffset(address a) {
   const int inst1 = *(int *)a;
@@ -261,7 +262,12 @@
 
 // Read from the polling page, its address is already in a register.
 inline void MacroAssembler::load_from_polling_page(Register polling_page_address, int offset) {
-  ld(R0, offset, polling_page_address);
+  if (SafepointMechanism::uses_thread_local_poll() && USE_POLL_BIT_ONLY) {
+    int encoding = SafepointMechanism::poll_bit();
+    tdi(traptoGreaterThanUnsigned | traptoEqual, polling_page_address, encoding);
+  } else {
+    ld(R0, offset, polling_page_address);
+  }
 }
 
 // Trap-instruction-based checks.
--- a/src/hotspot/cpu/ppc/nativeInst_ppc.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/ppc/nativeInst_ppc.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -31,6 +31,7 @@
 #include "memory/allocation.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
+#include "runtime/safepointMechanism.hpp"
 
 // We have interfaces for the following instructions:
 //
@@ -93,6 +94,11 @@
   bool is_safepoint_poll() {
     // Is the current instruction a POTENTIAL read access to the polling page?
     // The current arguments of the instruction are not checked!
+    if (SafepointMechanism::uses_thread_local_poll() && USE_POLL_BIT_ONLY) {
+      int encoding = SafepointMechanism::poll_bit();
+      return MacroAssembler::is_tdi(long_at(0), Assembler::traptoGreaterThanUnsigned | Assembler::traptoEqual,
+                                    -1, encoding);
+    }
     return MacroAssembler::is_load_from_polling_page(long_at(0), NULL);
   }
 
--- a/src/hotspot/cpu/ppc/relocInfo_ppc.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/ppc/relocInfo_ppc.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2013 SAP SE. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -43,4 +43,10 @@
 #endif
   };
 
+ public:
+
+  // This platform has no oops in the code that are not also
+  // listed in the oop section.
+  static bool mustIterateImmediateOopsInCode() { return false; }
+
 #endif // CPU_PPC_VM_RELOCINFO_PPC_HPP
--- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1188,7 +1188,7 @@
   // Argument is valid and klass is as expected, continue.
 
   // Extract method from inline cache, verified entry point needs it.
-  __ ld(R19_method, CompiledICHolder::holder_method_offset(), ic);
+  __ ld(R19_method, CompiledICHolder::holder_metadata_offset(), ic);
   assert(R19_method == ic, "the inline cache register is dead here");
 
   __ ld(code, method_(code));
--- a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -3486,11 +3486,11 @@
 void TemplateTable::invokeinterface_object_method(Register Rrecv_klass,
                                                   Register Rret,
                                                   Register Rflags,
-                                                  Register Rindex,
+                                                  Register Rmethod,
                                                   Register Rtemp1,
                                                   Register Rtemp2) {
 
-  assert_different_registers(Rindex, Rret, Rrecv_klass, Rflags, Rtemp1, Rtemp2);
+  assert_different_registers(Rmethod, Rret, Rrecv_klass, Rflags, Rtemp1, Rtemp2);
   Label LnotFinal;
 
   // Check for vfinal.
@@ -3502,14 +3502,14 @@
   // Final call case.
   __ profile_final_call(Rtemp1, Rscratch);
   // Argument and return type profiling.
-  __ profile_arguments_type(Rindex, Rscratch, Rrecv_klass /* scratch */, true);
+  __ profile_arguments_type(Rmethod, Rscratch, Rrecv_klass /* scratch */, true);
   // Do the final call - the index (f2) contains the method.
-  __ call_from_interpreter(Rindex, Rret, Rscratch, Rrecv_klass /* scratch */);
+  __ call_from_interpreter(Rmethod, Rret, Rscratch, Rrecv_klass /* scratch */);
 
   // Non-final callc case.
   __ bind(LnotFinal);
   __ profile_virtual_call(Rrecv_klass, Rtemp1, Rscratch, false);
-  generate_vtable_call(Rrecv_klass, Rindex, Rret, Rscratch);
+  generate_vtable_call(Rrecv_klass, Rmethod, Rret, Rscratch);
 }
 
 void TemplateTable::invokeinterface(int byte_no) {
@@ -3518,58 +3518,61 @@
 
   const Register Rscratch1        = R11_scratch1,
                  Rscratch2        = R12_scratch2,
-                 Rscratch3        = R9_ARG7,
-                 Rscratch4        = R10_ARG8,
-                 Rtable_addr      = Rscratch2,
+                 Rmethod          = R6_ARG4,
+                 Rmethod2         = R9_ARG7,
                  Rinterface_klass = R5_ARG3,
-                 Rret_type        = R8_ARG6,
-                 Rret_addr        = Rret_type,
-                 Rindex           = R6_ARG4,
-                 Rreceiver        = R4_ARG2,
-                 Rrecv_klass      = Rreceiver,
+                 Rret_addr        = R8_ARG6,
+                 Rindex           = R10_ARG8,
+                 Rreceiver        = R3_ARG1,
+                 Rrecv_klass      = R4_ARG2,
                  Rflags           = R7_ARG5;
 
-  prepare_invoke(byte_no, Rinterface_klass, Rret_addr, Rindex, Rreceiver, Rflags, Rscratch1);
+  prepare_invoke(byte_no, Rinterface_klass, Rret_addr, Rmethod, Rreceiver, Rflags, Rscratch1);
 
   // Get receiver klass.
-  __ null_check_throw(Rreceiver, oopDesc::klass_offset_in_bytes(), Rscratch3);
+  __ null_check_throw(Rreceiver, oopDesc::klass_offset_in_bytes(), Rscratch2);
   __ load_klass(Rrecv_klass, Rreceiver);
 
   // Check corner case object method.
-  Label LobjectMethod;
-
+  Label LobjectMethod, L_no_such_interface, Lthrow_ame;
   __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift);
   __ btrue(CCR0, LobjectMethod);
 
-  // Fallthrough: The normal invokeinterface case.
+  __ lookup_interface_method(Rrecv_klass, Rinterface_klass, noreg, noreg, Rscratch1, Rscratch2,
+                             L_no_such_interface, /*return_method=*/false);
+
   __ profile_virtual_call(Rrecv_klass, Rscratch1, Rscratch2, false);
 
   // Find entry point to call.
-  Label Lthrow_icc, Lthrow_ame;
-  // Result will be returned in Rindex.
-  __ mr(Rscratch4, Rrecv_klass);
-  __ mr(Rscratch3, Rindex);
-  __ lookup_interface_method(Rrecv_klass, Rinterface_klass, Rindex, Rindex, Rscratch1, Rscratch2, Lthrow_icc);
-
-  __ cmpdi(CCR0, Rindex, 0);
+
+  // Get declaring interface class from method
+  __ ld(Rinterface_klass, in_bytes(Method::const_offset()), Rmethod);
+  __ ld(Rinterface_klass, in_bytes(ConstMethod::constants_offset()), Rinterface_klass);
+  __ ld(Rinterface_klass, ConstantPool::pool_holder_offset_in_bytes(), Rinterface_klass);
+
+  // Get itable index from method
+  __ lwa(Rindex, in_bytes(Method::itable_index_offset()), Rmethod);
+  __ subfic(Rindex, Rindex, Method::itable_index_max);
+
+  __ lookup_interface_method(Rrecv_klass, Rinterface_klass, Rindex, Rmethod2, Rscratch1, Rscratch2,
+                             L_no_such_interface);
+
+  __ cmpdi(CCR0, Rmethod2, 0);
   __ beq(CCR0, Lthrow_ame);
   // Found entry. Jump off!
   // Argument and return type profiling.
-  __ profile_arguments_type(Rindex, Rscratch1, Rscratch2, true);
-  __ call_from_interpreter(Rindex, Rret_addr, Rscratch1, Rscratch2);
+  __ profile_arguments_type(Rmethod2, Rscratch1, Rscratch2, true);
+  //__ profile_called_method(Rindex, Rscratch1);
+  __ call_from_interpreter(Rmethod2, Rret_addr, Rscratch1, Rscratch2);
 
   // Vtable entry was NULL => Throw abstract method error.
   __ bind(Lthrow_ame);
-  __ mr(Rrecv_klass, Rscratch4);
-  __ mr(Rindex, Rscratch3);
   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
 
   // Interface was not found => Throw incompatible class change error.
-  __ bind(Lthrow_icc);
-  __ mr(Rrecv_klass, Rscratch4);
+  __ bind(L_no_such_interface);
   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
-
-  __ should_not_reach_here();
+  DEBUG_ONLY( __ should_not_reach_here(); )
 
   // Special case of invokeinterface called for virtual method of
   // java.lang.Object. See ConstantPoolCacheEntry::set_method() for details:
@@ -3577,7 +3580,7 @@
   // to handle this corner case. This code isn't produced by javac, but could
   // be produced by another compliant java compiler.
   __ bind(LobjectMethod);
-  invokeinterface_object_method(Rrecv_klass, Rret_addr, Rflags, Rindex, Rscratch1, Rscratch2);
+  invokeinterface_object_method(Rrecv_klass, Rret_addr, Rflags, Rmethod, Rscratch1, Rscratch2);
 }
 
 void TemplateTable::invokedynamic(int byte_no) {
@@ -3634,10 +3637,7 @@
   transition(vtos, atos);
 
   Label Lslow_case,
-        Ldone,
-        Linitialize_header,
-        Lallocate_shared,
-        Linitialize_object;  // Including clearing the fields.
+        Ldone;
 
   const Register RallocatedObject = R17_tos,
                  RinstanceKlass   = R9_ARG7,
@@ -3648,8 +3648,6 @@
                  Rtags            = R3_ARG1,
                  Rindex           = R5_ARG3;
 
-  const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc();
-
   // --------------------------------------------------------------------------
   // Check if fast case is possible.
 
@@ -3658,6 +3656,8 @@
   // Load index of constant pool entry.
   __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned);
 
+  // Note: compared to other architectures, PPC's implementation always goes
+  // to the slow path if TLAB is used and fails.
   if (UseTLAB) {
     // Make sure the class we're about to instantiate has been resolved
     // This is done before loading instanceKlass to be consistent with the order
@@ -3687,8 +3687,7 @@
     // Fast case:
     // Allocate the instance.
     // 1) Try to allocate in the TLAB.
-    // 2) If fail, and the TLAB is not full enough to discard, allocate in the shared Eden.
-    // 3) If the above fails (or is not applicable), go to a slow case (creates a new TLAB, etc.).
+    // 2) If the above fails (or is not applicable), go to a slow case (creates a new TLAB, etc.).
 
     Register RoldTopValue = RallocatedObject; // Object will be allocated here if it fits.
     Register RnewTopValue = R6_ARG4;
@@ -3702,53 +3701,13 @@
 
     // If there is enough space, we do not CAS and do not clear.
     __ cmpld(CCR0, RnewTopValue, RendValue);
-    __ bgt(CCR0, allow_shared_alloc ? Lallocate_shared : Lslow_case);
+    __ bgt(CCR0, Lslow_case);
 
     __ std(RnewTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread);
 
-    if (ZeroTLAB) {
-      // The fields have already been cleared.
-      __ b(Linitialize_header);
-    } else {
-      // Initialize both the header and fields.
-      __ b(Linitialize_object);
-    }
-
-    // Fall through: TLAB was too small.
-    if (allow_shared_alloc) {
-      Register RtlabWasteLimitValue = R10_ARG8;
-      Register RfreeValue = RnewTopValue;
-
-      __ bind(Lallocate_shared);
-      // Check if tlab should be discarded (refill_waste_limit >= free).
-      __ ld(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread);
-      __ subf(RfreeValue, RoldTopValue, RendValue);
-      __ srdi(RfreeValue, RfreeValue, LogHeapWordSize); // in dwords
-      __ cmpld(CCR0, RtlabWasteLimitValue, RfreeValue);
-      __ bge(CCR0, Lslow_case);
-
-      // Increment waste limit to prevent getting stuck on this slow path.
-      __ add_const_optimized(RtlabWasteLimitValue, RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment());
-      __ std(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread);
-    }
-    // else: No allocation in the shared eden. // fallthru: __ b(Lslow_case);
-  }
-  // else: Always go the slow path.
-
-  // --------------------------------------------------------------------------
-  // slow case
-  __ bind(Lslow_case);
-  call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex);
-
-  if (UseTLAB) {
-    __ b(Ldone);
-    // --------------------------------------------------------------------------
-    // Init1: Zero out newly allocated memory.
-
-    if (!ZeroTLAB || allow_shared_alloc) {
-      // Clear object fields.
-      __ bind(Linitialize_object);
-
+    if (!ZeroTLAB) {
+      // --------------------------------------------------------------------------
+      // Init1: Zero out newly allocated memory.
       // Initialize remaining object fields.
       Register Rbase = Rtags;
       __ addi(Rinstance_size, Rinstance_size, 7 - (int)sizeof(oopDesc));
@@ -3757,13 +3716,10 @@
 
       // Clear out object skipping header. Takes also care of the zero length case.
       __ clear_memory_doubleword(Rbase, Rinstance_size);
-      // fallthru: __ b(Linitialize_header);
     }
 
     // --------------------------------------------------------------------------
     // Init2: Initialize the header: mark, klass
-    __ bind(Linitialize_header);
-
     // Init mark.
     if (UseBiasedLocking) {
       __ ld(Rscratch, in_bytes(Klass::prototype_header_offset()), RinstanceKlass);
@@ -3777,14 +3733,19 @@
     __ store_klass(RallocatedObject, RinstanceKlass, Rscratch); // klass (last for cms)
 
     // Check and trigger dtrace event.
-    {
-      SkipIfEqualZero skip_if(_masm, Rscratch, &DTraceAllocProbes);
-      __ push(atos);
-      __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc));
-      __ pop(atos);
-    }
+    SkipIfEqualZero::skip_to_label_if_equal_zero(_masm, Rscratch, &DTraceAllocProbes, Ldone);
+    __ push(atos);
+    __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc));
+    __ pop(atos);
+
+    __ b(Ldone);
   }
 
+  // --------------------------------------------------------------------------
+  // slow case
+  __ bind(Lslow_case);
+  call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex);
+
   // continue
   __ bind(Ldone);
 
--- a/src/hotspot/cpu/ppc/vtableStubs_ppc_64.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/ppc/vtableStubs_ppc_64.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2017 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
 #include "code/vtableStubs.hpp"
 #include "interp_masm_ppc.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/compiledICHolder.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/klassVtable.hpp"
 #include "runtime/sharedRuntime.hpp"
@@ -55,17 +56,22 @@
   // PPC port: use fixed size.
   const int code_length = VtableStub::pd_code_size_limit(true);
   VtableStub* s = new (code_length) VtableStub(true, vtable_index);
+
+  // Can be NULL if there is no free space in the code cache.
+  if (s == NULL) {
+    return NULL;
+  }
+
   ResourceMark rm;
   CodeBuffer cb(s->entry_point(), code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
-  address start_pc;
 
 #ifndef PRODUCT
   if (CountCompiledCalls) {
-    __ load_const(R11_scratch1, SharedRuntime::nof_megamorphic_calls_addr());
-    __ lwz(R12_scratch2, 0, R11_scratch1);
+    int offs = __ load_const_optimized(R11_scratch1, SharedRuntime::nof_megamorphic_calls_addr(), R12_scratch2, true);
+    __ lwz(R12_scratch2, offs, R11_scratch1);
     __ addi(R12_scratch2, R12_scratch2, 1);
-    __ stw(R12_scratch2, 0, R11_scratch1);
+    __ stw(R12_scratch2, offs, R11_scratch1);
   }
 #endif
 
@@ -116,6 +122,7 @@
   __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method);
   __ mtctr(R12_scratch2);
   __ bctr();
+
   masm->flush();
 
   guarantee(__ pc() <= s->code_end(), "overflowed buffer");
@@ -125,10 +132,16 @@
   return s;
 }
 
-VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
+VtableStub* VtableStubs::create_itable_stub(int itable_index) {
   // PPC port: use fixed size.
   const int code_length = VtableStub::pd_code_size_limit(false);
-  VtableStub* s = new (code_length) VtableStub(false, vtable_index);
+  VtableStub* s = new (code_length) VtableStub(false, itable_index);
+
+  // Can be NULL if there is no free space in the code cache.
+  if (s == NULL) {
+    return NULL;
+  }
+
   ResourceMark rm;
   CodeBuffer cb(s->entry_point(), code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
@@ -136,10 +149,10 @@
 
 #ifndef PRODUCT
   if (CountCompiledCalls) {
-    __ load_const(R11_scratch1, SharedRuntime::nof_megamorphic_calls_addr());
-    __ lwz(R12_scratch2, 0, R11_scratch1);
+    int offs = __ load_const_optimized(R11_scratch1, SharedRuntime::nof_megamorphic_calls_addr(), R12_scratch2, true);
+    __ lwz(R12_scratch2, offs, R11_scratch1);
     __ addi(R12_scratch2, R12_scratch2, 1);
-    __ stw(R12_scratch2, 0, R11_scratch1);
+    __ stw(R12_scratch2, offs, R11_scratch1);
   }
 #endif
 
@@ -148,62 +161,28 @@
   // Entry arguments:
   //  R19_method: Interface
   //  R3_ARG1:    Receiver
-  //
 
-  const Register rcvr_klass = R11_scratch1;
-  const Register vtable_len = R12_scratch2;
-  const Register itable_entry_addr = R21_tmp1;
-  const Register itable_interface = R22_tmp2;
+  Label L_no_such_interface;
+  const Register rcvr_klass = R11_scratch1,
+                 interface  = R12_scratch2,
+                 tmp1       = R21_tmp1,
+                 tmp2       = R22_tmp2;
 
-  // Get receiver klass.
-
-  // We might implicit NULL fault here.
   address npe_addr = __ pc(); // npe = null pointer exception
   __ null_check(R3_ARG1, oopDesc::klass_offset_in_bytes(), /*implicit only*/NULL);
   __ load_klass(rcvr_klass, R3_ARG1);
 
-  BLOCK_COMMENT("Load start of itable entries into itable_entry.");
-  __ lwz(vtable_len, in_bytes(Klass::vtable_length_offset()), rcvr_klass);
-  __ slwi(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes()));
-  __ add(itable_entry_addr, vtable_len, rcvr_klass);
+  // Receiver subtype check against REFC.
+  __ ld(interface, CompiledICHolder::holder_klass_offset(), R19_method);
+  __ lookup_interface_method(rcvr_klass, interface, noreg,
+                             R0, tmp1, tmp2,
+                             L_no_such_interface, /*return_method=*/ false);
 
-  // Loop over all itable entries until desired interfaceOop(Rinterface) found.
-  BLOCK_COMMENT("Increment itable_entry_addr in loop.");
-  const int vtable_base_offset = in_bytes(Klass::vtable_start_offset());
-  __ addi(itable_entry_addr, itable_entry_addr, vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes());
-
-  const int itable_offset_search_inc = itableOffsetEntry::size() * wordSize;
-  Label search;
-  __ bind(search);
-  __ ld(itable_interface, 0, itable_entry_addr);
-
-  // Handle IncompatibleClassChangeError in itable stubs.
-  // If the entry is NULL then we've reached the end of the table
-  // without finding the expected interface, so throw an exception.
-  BLOCK_COMMENT("Handle IncompatibleClassChangeError in itable stubs.");
-  Label throw_icce;
-  __ cmpdi(CCR1, itable_interface, 0);
-  __ cmpd(CCR0, itable_interface, R19_method);
-  __ addi(itable_entry_addr, itable_entry_addr, itable_offset_search_inc);
-  __ beq(CCR1, throw_icce);
-  __ bne(CCR0, search);
-
-  // Entry found and itable_entry_addr points to it, get offset of vtable for interface.
-
-  const Register vtable_offset = R12_scratch2;
-  const Register itable_method = R11_scratch1;
-
-  const int vtable_offset_offset = (itableOffsetEntry::offset_offset_in_bytes() -
-                                    itableOffsetEntry::interface_offset_in_bytes()) -
-                                   itable_offset_search_inc;
-  __ lwz(vtable_offset, vtable_offset_offset, itable_entry_addr);
-
-  // Compute itableMethodEntry and get method and entry point for compiler.
-  const int method_offset = (itableMethodEntry::size() * wordSize * vtable_index) +
-    itableMethodEntry::method_offset_in_bytes();
-
-  __ add(itable_method, rcvr_klass, vtable_offset);
-  __ ld(R19_method, method_offset, itable_method);
+  // Get Method* and entrypoint for compiler
+  __ ld(interface, CompiledICHolder::holder_metadata_offset(), R19_method);
+  __ lookup_interface_method(rcvr_klass, interface, itable_index,
+                             R19_method, tmp1, tmp2,
+                             L_no_such_interface, /*return_method=*/ true);
 
 #ifndef PRODUCT
   if (DebugVtables) {
@@ -219,7 +198,7 @@
   address ame_addr = __ pc(); // ame = abstract method error
 
   // Must do an explicit check if implicit checks are disabled.
-  __ null_check(R19_method, in_bytes(Method::from_compiled_offset()), &throw_icce);
+  __ null_check(R19_method, in_bytes(Method::from_compiled_offset()), &L_no_such_interface);
   __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method);
   __ mtctr(R12_scratch2);
   __ bctr();
@@ -229,8 +208,8 @@
   // We force resolving of the call site by jumping to the "handle
   // wrong method" stub, and so let the interpreter runtime do all the
   // dirty work.
-  __ bind(throw_icce);
-  __ load_const(R11_scratch1, SharedRuntime::get_handle_wrong_method_stub());
+  __ bind(L_no_such_interface);
+  __ load_const_optimized(R11_scratch1, SharedRuntime::get_handle_wrong_method_stub(), R12_scratch2);
   __ mtctr(R11_scratch1);
   __ bctr();
 
@@ -245,14 +224,15 @@
 int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
   if (DebugVtables || CountCompiledCalls || VerifyOops) {
     return 1000;
-  } else {
-    int decode_klass_size = MacroAssembler::instr_size_for_decode_klass_not_null();
-    if (is_vtable_stub) {
-      return 20 + decode_klass_size +  8 + 8;   // Plain + cOops + Traps + safety
-    } else {
-      return 96 + decode_klass_size + 12 + 8;   // Plain + cOops + Traps + safety
-    }
   }
+  int size = is_vtable_stub ? 20 + 8 : 164 + 20; // Plain + safety
+  if (UseCompressedClassPointers) {
+    size += MacroAssembler::instr_size_for_decode_klass_not_null();
+  }
+  if (!ImplicitNullChecks || !os::zero_page_read_protected()) {
+    size += is_vtable_stub ? 8 : 12;
+  }
+  return size;
 }
 
 int VtableStub::pd_code_alignment() {
--- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -2806,8 +2806,8 @@
                                              RegisterOrConstant itable_index,
                                              Register           method_result,
                                              Register           temp1_reg,
-                                             Register           temp2_reg,
-                                             Label&             no_such_interface) {
+                                             Label&             no_such_interface,
+                                             bool               return_method) {
 
   const Register vtable_len = temp1_reg;    // Used to compute itable_entry_addr.
   const Register itable_entry_addr = Z_R1_scratch;
@@ -2842,38 +2842,36 @@
   z_brne(search);
 
   // Entry found and itable_entry_addr points to it, get offset of vtable for interface.
-
-  const int vtable_offset_offset = (itableOffsetEntry::offset_offset_in_bytes() -
-                                    itableOffsetEntry::interface_offset_in_bytes()) -
-                                   itable_offset_search_inc;
-
-  // Compute itableMethodEntry and get method and entry point
-  // we use addressing with index and displacement, since the formula
-  // for computing the entry's offset has a fixed and a dynamic part,
-  // the latter depending on the matched interface entry and on the case,
-  // that the itable index has been passed as a register, not a constant value.
-  int method_offset = itableMethodEntry::method_offset_in_bytes();
-                           // Fixed part (displacement), common operand.
-  Register itable_offset;  // Dynamic part (index register).
-
-  if (itable_index.is_register()) {
-     // Compute the method's offset in that register, for the formula, see the
-     // else-clause below.
-     itable_offset = itable_index.as_register();
-
-     z_sllg(itable_offset, itable_offset, exact_log2(itableMethodEntry::size() * wordSize));
-     z_agf(itable_offset, vtable_offset_offset, itable_entry_addr);
-  } else {
-    itable_offset = Z_R1_scratch;
-    // Displacement increases.
-    method_offset += itableMethodEntry::size() * wordSize * itable_index.as_constant();
-
-    // Load index from itable.
-    z_llgf(itable_offset, vtable_offset_offset, itable_entry_addr);
-  }
-
-  // Finally load the method's oop.
-  z_lg(method_result, method_offset, itable_offset, recv_klass);
+  if (return_method) {
+    const int vtable_offset_offset = (itableOffsetEntry::offset_offset_in_bytes() -
+                                      itableOffsetEntry::interface_offset_in_bytes()) -
+                                     itable_offset_search_inc;
+
+    // Compute itableMethodEntry and get method and entry point
+    // we use addressing with index and displacement, since the formula
+    // for computing the entry's offset has a fixed and a dynamic part,
+    // the latter depending on the matched interface entry and on the case,
+    // that the itable index has been passed as a register, not a constant value.
+    int method_offset = itableMethodEntry::method_offset_in_bytes();
+                             // Fixed part (displacement), common operand.
+    Register itable_offset = method_result;  // Dynamic part (index register).
+
+    if (itable_index.is_register()) {
+       // Compute the method's offset in that register, for the formula, see the
+       // else-clause below.
+       z_sllg(itable_offset, itable_index.as_register(), exact_log2(itableMethodEntry::size() * wordSize));
+       z_agf(itable_offset, vtable_offset_offset, itable_entry_addr);
+    } else {
+      // Displacement increases.
+      method_offset += itableMethodEntry::size() * wordSize * itable_index.as_constant();
+
+      // Load index from itable.
+      z_llgf(itable_offset, vtable_offset_offset, itable_entry_addr);
+    }
+
+    // Finally load the method's oop.
+    z_lg(method_result, method_offset, itable_offset, recv_klass);
+  }
   BLOCK_COMMENT("} lookup_interface_method");
 }
 
--- a/src/hotspot/cpu/s390/macroAssembler_s390.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -671,8 +671,8 @@
                                RegisterOrConstant itable_index,
                                Register           method_result,
                                Register           temp1_reg,
-                               Register           temp2_reg,
-                               Label&             no_such_interface);
+                               Label&             no_such_interface,
+                               bool               return_method = true);
 
   // virtual method calling
   void lookup_virtual_method(Register             recv_klass,
--- a/src/hotspot/cpu/s390/methodHandles_s390.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/s390/methodHandles_s390.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -498,7 +498,7 @@
       Label L_no_such_interface;
       __ lookup_interface_method(temp1_recv_klass, temp3_intf,
                                  // Note: next two args must be the same:
-                                 Z_index, Z_method, temp2, noreg,
+                                 Z_index, Z_method, temp2,
                                  L_no_such_interface);
       jump_from_method_handle(_masm, Z_method, temp2, Z_R0, for_compiler_entry);
 
--- a/src/hotspot/cpu/s390/relocInfo_s390.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/s390/relocInfo_s390.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -110,6 +110,10 @@
     pcrel_data_format   = 3   // Relocation is for the target data of a pc-relative instruction.
   };
 
+  // This platform has no oops in the code that are not also
+  // listed in the oop section.
+  static bool mustIterateImmediateOopsInCode() { return false; }
+
   // Store the new target address into an oop_Relocation cell, if any.
   // Return indication if update happened.
   static bool update_oop_pool(address begin, address end, address newTarget, CodeBlob* cb);
--- a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -2660,9 +2660,9 @@
   Label skip_fixup;
   {
     Label ic_miss;
-    const int klass_offset         = oopDesc::klass_offset_in_bytes();
-    const int holder_klass_offset  = CompiledICHolder::holder_klass_offset();
-    const int holder_method_offset = CompiledICHolder::holder_method_offset();
+    const int klass_offset           = oopDesc::klass_offset_in_bytes();
+    const int holder_klass_offset    = CompiledICHolder::holder_klass_offset();
+    const int holder_metadata_offset = CompiledICHolder::holder_metadata_offset();
 
     // Out-of-line call to ic_miss handler.
     __ call_ic_miss_handler(ic_miss, 0x11, 0, Z_R1_scratch);
@@ -2691,7 +2691,7 @@
     // This def MUST MATCH code in gen_c2i_adapter!
     const Register code = Z_R11;
 
-    __ z_lg(Z_method, holder_method_offset, Z_method);
+    __ z_lg(Z_method, holder_metadata_offset, Z_method);
     __ load_and_test_long(Z_R0, method_(code));
     __ z_brne(ic_miss);  // Cache miss: call runtime to handle this.
 
--- a/src/hotspot/cpu/s390/templateTable_s390.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/s390/templateTable_s390.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -3557,66 +3557,67 @@
   transition(vtos, vtos);
 
   assert(byte_no == f1_byte, "use this argument");
-  Register interface = Z_tos;
-  Register index = Z_ARG3;
-  Register receiver = Z_tmp_1;
-  Register flags = Z_ARG5;
+  Register klass     = Z_ARG2,
+           method    = Z_ARG3,
+           interface = Z_ARG4,
+           flags     = Z_ARG5,
+           receiver  = Z_tmp_1;
 
   BLOCK_COMMENT("invokeinterface {");
 
-  // Destroys Z_ARG1 and Z_ARG2, thus use Z_ARG4 and copy afterwards.
-  prepare_invoke(byte_no, Z_ARG4, index,  // Get f1 klassOop, f2 itable index.
+  prepare_invoke(byte_no, interface, method,  // Get f1 klassOop, f2 itable index.
                  receiver, flags);
 
   // Z_R14 (== Z_bytecode) : return entry
 
-  __ z_lgr(interface, Z_ARG4);
-
   // Special case of invokeinterface called for virtual method of
   // java.lang.Object. See cpCacheOop.cpp for details.
   // This code isn't produced by javac, but could be produced by
   // another compliant java compiler.
-  Label notMethod;
+  NearLabel notMethod, no_such_interface, no_such_method;
   __ testbit(flags, ConstantPoolCacheEntry::is_forced_virtual_shift);
   __ z_brz(notMethod);
-  invokevirtual_helper(index, receiver, flags);
+  invokevirtual_helper(method, receiver, flags);
   __ bind(notMethod);
 
   // Get receiver klass into klass - also a null check.
-  Register klass = flags;
-
   __ restore_locals();
   __ load_klass(klass, receiver);
 
+  __ lookup_interface_method(klass, interface, noreg, noreg, /*temp*/Z_ARG1,
+                             no_such_interface, /*return_method=*/false);
+
   // Profile this call.
-  __ profile_virtual_call(klass, Z_ARG2/*mdp*/, Z_ARG4/*scratch*/);
-
-  NearLabel  no_such_interface, no_such_method;
-  Register   method = Z_tmp_2;
-
-  // TK 2010-08-24: save the index to Z_ARG4. needed in case of an error
-  //                in throw_AbstractMethodErrorByTemplateTable
-  __ z_lgr(Z_ARG4, index);
-  // TK 2011-03-24: copy also klass because it could be changed in
-  //                lookup_interface_method
-  __ z_lgr(Z_ARG2, klass);
-  __ lookup_interface_method(// inputs: rec. class, interface, itable index
-                              klass, interface, index,
-                              // outputs: method, scan temp. reg
-                              method, Z_tmp_2, Z_R1_scratch,
-                              no_such_interface);
+  __ profile_virtual_call(klass, Z_ARG1/*mdp*/, flags/*scratch*/);
+
+  // Find entry point to call.
+
+  // Get declaring interface class from method
+  __ z_lg(interface, Address(method, Method::const_offset()));
+  __ z_lg(interface, Address(interface, ConstMethod::constants_offset()));
+  __ z_lg(interface, Address(interface, ConstantPool::pool_holder_offset_in_bytes()));
+
+  // Get itable index from method
+  Register index   = receiver,
+           method2 = flags;
+  __ z_lgf(index, Address(method, Method::itable_index_offset()));
+  __ z_aghi(index, -Method::itable_index_max);
+  __ z_lcgr(index, index);
+
+  __ lookup_interface_method(klass, interface, index, method2, Z_tmp_2,
+                             no_such_interface);
 
   // Check for abstract method error.
   // Note: This should be done more efficiently via a throw_abstract_method_error
   // interpreter entry point and a conditional jump to it in case of a null
   // method.
-  __ compareU64_and_branch(method, (intptr_t) 0,
+  __ compareU64_and_branch(method2, (intptr_t) 0,
                             Assembler::bcondZero, no_such_method);
 
-  __ profile_arguments_type(Z_ARG3, method, Z_ARG5, true);
+  __ profile_arguments_type(Z_tmp_1, method2, Z_tmp_2, true);
 
   // Do the call.
-  __ jump_from_interpreted(method, Z_ARG5);
+  __ jump_from_interpreted(method2, Z_tmp_2);
   __ should_not_reach_here();
 
   // exception handling code follows...
@@ -3628,12 +3629,8 @@
   // Throw exception.
   __ restore_bcp();      // Bcp must be correct for exception handler   (was destroyed).
   __ restore_locals();   // Make sure locals pointer is correct as well (was destroyed).
-  // TK 2010-08-24: Call throw_AbstractMethodErrorByTemplateTable now with the
-  //                relevant information for generating a better error message
   __ call_VM(noreg,
-              CAST_FROM_FN_PTR(address,
-                               InterpreterRuntime::throw_AbstractMethodError),
-              Z_ARG2, interface, Z_ARG4);
+             CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
   // The call_VM checks for exception, so we should never return here.
   __ should_not_reach_here();
 
@@ -3642,12 +3639,8 @@
   // Throw exception.
   __ restore_bcp();      // Bcp must be correct for exception handler   (was destroyed).
   __ restore_locals();   // Make sure locals pointer is correct as well (was destroyed).
-  // TK 2010-08-24: Call throw_IncompatibleClassChangeErrorByTemplateTable now with the
-  //                relevant information for generating a better error message
   __ call_VM(noreg,
-             CAST_FROM_FN_PTR(address,
-                              InterpreterRuntime::throw_IncompatibleClassChangeError),
-             Z_ARG2, interface);
+             CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
   // The call_VM checks for exception, so we should never return here.
   __ should_not_reach_here();
 
--- a/src/hotspot/cpu/s390/vtableStubs_s390.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/s390/vtableStubs_s390.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
 #include "code/vtableStubs.hpp"
 #include "interp_masm_s390.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/compiledICHolder.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/klassVtable.hpp"
 #include "runtime/sharedRuntime.hpp"
@@ -57,7 +58,6 @@
   ResourceMark    rm;
   CodeBuffer      cb(s->entry_point(), code_length);
   MacroAssembler *masm = new MacroAssembler(&cb);
-  address start_pc;
   int     padding_bytes = 0;
 
 #if (!defined(PRODUCT) && defined(COMPILER2))
@@ -144,9 +144,9 @@
   return s;
 }
 
-VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
+VtableStub* VtableStubs::create_itable_stub(int itable_index) {
   const int   code_length = VtableStub::pd_code_size_limit(false);
-  VtableStub *s = new(code_length) VtableStub(false, vtable_index);
+  VtableStub *s = new(code_length) VtableStub(false, itable_index);
   if (s == NULL) { // Indicates OOM in the code cache.
     return NULL;
   }
@@ -154,7 +154,6 @@
   ResourceMark    rm;
   CodeBuffer      cb(s->entry_point(), code_length);
   MacroAssembler *masm = new MacroAssembler(&cb);
-  address start_pc;
   int     padding_bytes = 0;
 
 #if (!defined(PRODUCT) && defined(COMPILER2))
@@ -174,11 +173,9 @@
   // Entry arguments:
   //  Z_method: Interface
   //  Z_ARG1:   Receiver
-  const Register rcvr_klass = Z_tmp_1;    // Used to compute itable_entry_addr.
-                                          // Use extra reg to avoid re-load.
-  const Register vtable_len = Z_tmp_2;    // Used to compute itable_entry_addr.
-  const Register itable_entry_addr = Z_R1_scratch;
-  const Register itable_interface  = Z_R0_scratch;
+  NearLabel no_such_interface;
+  const Register rcvr_klass = Z_tmp_1,
+                 interface  = Z_tmp_2;
 
   // Get receiver klass.
   // Must do an explicit check if implicit checks are disabled.
@@ -186,50 +183,15 @@
   __ null_check(Z_ARG1, Z_R1_scratch, oopDesc::klass_offset_in_bytes());
   __ load_klass(rcvr_klass, Z_ARG1);
 
-  // Load start of itable entries into itable_entry.
-  __ z_llgf(vtable_len, Address(rcvr_klass, Klass::vtable_length_offset()));
-  __ z_sllg(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes()));
+  // Receiver subtype check against REFC.
+  __ z_lg(interface, Address(Z_method, CompiledICHolder::holder_klass_offset()));
+  __ lookup_interface_method(rcvr_klass, interface, noreg,
+                             noreg, Z_R1, no_such_interface, /*return_method=*/ false);
 
-  // Loop over all itable entries until desired interfaceOop(Rinterface) found.
-  const int vtable_base_offset = in_bytes(Klass::vtable_start_offset());
-  // Count unused bytes.
-  start_pc = __ pc();
-  __ add2reg_with_index(itable_entry_addr, vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes(), rcvr_klass, vtable_len);
-  padding_bytes += 20 - (__ pc() - start_pc);
-
-  const int itable_offset_search_inc = itableOffsetEntry::size() * wordSize;
-  Label search;
-  __ bind(search);
-
-  // Handle IncompatibleClassChangeError in itable stubs.
-  // If the entry is NULL then we've reached the end of the table
-  // without finding the expected interface, so throw an exception.
-  NearLabel   throw_icce;
-  __ load_and_test_long(itable_interface, Address(itable_entry_addr));
-  __ z_bre(throw_icce); // Throw the exception out-of-line.
-  // Count unused bytes.
-  start_pc = __ pc();
-  __ add2reg(itable_entry_addr, itable_offset_search_inc);
-  padding_bytes += 20 - (__ pc() - start_pc);
-  __ z_cgr(itable_interface, Z_method);
-  __ z_brne(search);
-
-  // Entry found. Itable_entry_addr points to the subsequent entry (itable_offset_search_inc too far).
-  // Get offset of vtable for interface.
-
-  const Register vtable_offset = Z_R1_scratch;
-  const Register itable_method = rcvr_klass;   // Calculated before.
-
-  const int vtable_offset_offset = (itableOffsetEntry::offset_offset_in_bytes() -
-                                    itableOffsetEntry::interface_offset_in_bytes()) -
-                                   itable_offset_search_inc;
-  __ z_llgf(vtable_offset, vtable_offset_offset, itable_entry_addr);
-
-  // Compute itableMethodEntry and get method and entry point for compiler.
-  const int method_offset = (itableMethodEntry::size() * wordSize * vtable_index) +
-                            itableMethodEntry::method_offset_in_bytes();
-
-  __ z_lg(Z_method, method_offset, vtable_offset, itable_method);
+  // Get Method* and entrypoint for compiler
+  __ z_lg(interface, Address(Z_method, CompiledICHolder::holder_metadata_offset()));
+  __ lookup_interface_method(rcvr_klass, interface, itable_index,
+                             Z_method, Z_R1, no_such_interface, /*return_method=*/ true);
 
 #ifndef PRODUCT
   if (DebugVtables) {
@@ -244,13 +206,13 @@
   address ame_addr = __ pc();
   // Must do an explicit check if implicit checks are disabled.
   if (!ImplicitNullChecks) {
-    __ compare64_and_branch(Z_method, (intptr_t) 0, Assembler::bcondEqual, throw_icce);
+    __ compare64_and_branch(Z_method, (intptr_t) 0, Assembler::bcondEqual, no_such_interface);
   }
   __ z_lg(Z_R1_scratch, in_bytes(Method::from_compiled_offset()), Z_method);
   __ z_br(Z_R1_scratch);
 
   // Handle IncompatibleClassChangeError in itable stubs.
-  __ bind(throw_icce);
+  __ bind(no_such_interface);
   // Count unused bytes
   //                  worst case          actual size
   // We force resolving of the call site by jumping to
@@ -273,13 +235,12 @@
   if (CountCompiledCalls) {
     size += 6 * 4;
   }
-  if (is_vtable_stub) {
-    size += 52;
-  } else {
-    size += 104;
+  size += is_vtable_stub ? 36 : 140;
+  if (UseCompressedClassPointers) {
+    size += MacroAssembler::instr_size_for_decode_klass_not_null();
   }
-  if (Universe::narrow_klass_base() != NULL) {
-    size += 16; // A guess.
+  if (!ImplicitNullChecks) {
+    size += 36;
   }
   return size;
 }
--- a/src/hotspot/cpu/sparc/compiledIC_sparc.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/sparc/compiledIC_sparc.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -73,6 +73,11 @@
 }
 #undef __
 
+int CompiledStaticCall::to_trampoline_stub_size() {
+  // SPARC doesn't use trampolines.
+  return 0;
+}
+
 int CompiledStaticCall::to_interp_stub_size() {
   // This doesn't need to be accurate but it must be larger or equal to
   // the real size of the stub.
--- a/src/hotspot/cpu/sparc/jvmciCodeInstaller_sparc.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/sparc/jvmciCodeInstaller_sparc.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -115,7 +115,7 @@
   TRACE_jvmci_3("relocating (foreign call) at " PTR_FORMAT, p2i(inst));
 }
 
-void CodeInstaller::pd_relocate_JavaMethod(Handle hotspot_method, jint pc_offset, TRAPS) {
+void CodeInstaller::pd_relocate_JavaMethod(CodeBuffer &, Handle hotspot_method, jint pc_offset, TRAPS) {
 #ifdef ASSERT
   Method* method = NULL;
   // we need to check, this might also be an unresolved method
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -2058,9 +2058,10 @@
                                              Register method_result,
                                              Register scan_temp,
                                              Register sethi_temp,
-                                             Label& L_no_such_interface) {
+                                             Label& L_no_such_interface,
+                                             bool return_method) {
   assert_different_registers(recv_klass, intf_klass, method_result, scan_temp);
-  assert(itable_index.is_constant() || itable_index.as_register() == method_result,
+  assert(!return_method || itable_index.is_constant() || itable_index.as_register() == method_result,
          "caller must use same register for non-constant itable index as for method");
 
   Label L_no_such_interface_restore;
@@ -2092,11 +2093,13 @@
   add(scan_temp, itb_offset, scan_temp);
   add(recv_klass, scan_temp, scan_temp);
 
-  // Adjust recv_klass by scaled itable_index, so we can free itable_index.
-  RegisterOrConstant itable_offset = itable_index;
-  itable_offset = regcon_sll_ptr(itable_index, exact_log2(itableMethodEntry::size() * wordSize), itable_offset);
-  itable_offset = regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes(), itable_offset);
-  add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass);
+  if (return_method) {
+    // Adjust recv_klass by scaled itable_index, so we can free itable_index.
+    RegisterOrConstant itable_offset = itable_index;
+    itable_offset = regcon_sll_ptr(itable_index, exact_log2(itableMethodEntry::size() * wordSize), itable_offset);
+    itable_offset = regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes(), itable_offset);
+    add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass);
+  }
 
   // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
   //   if (scan->interface() == intf) {
@@ -2131,12 +2134,14 @@
 
   bind(L_found_method);
 
-  // Got a hit.
-  int ito_offset = itableOffsetEntry::offset_offset_in_bytes();
-  // scan_temp[-scan_step] points to the vtable offset we need
-  ito_offset -= scan_step;
-  lduw(scan_temp, ito_offset, scan_temp);
-  ld_ptr(recv_klass, scan_temp, method_result);
+  if (return_method) {
+    // Got a hit.
+    int ito_offset = itableOffsetEntry::offset_offset_in_bytes();
+    // scan_temp[-scan_step] points to the vtable offset we need
+    ito_offset -= scan_step;
+    lduw(scan_temp, ito_offset, scan_temp);
+    ld_ptr(recv_klass, scan_temp, method_result);
+  }
 
   if (did_save) {
     Label L_done;
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1277,7 +1277,8 @@
                                RegisterOrConstant itable_index,
                                Register method_result,
                                Register temp_reg, Register temp2_reg,
-                               Label& no_such_interface);
+                               Label& no_such_interface,
+                               bool return_method = true);
 
   // virtual method calling
   void lookup_virtual_method(Register recv_klass,
--- a/src/hotspot/cpu/sparc/relocInfo_sparc.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/sparc/relocInfo_sparc.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,6 +38,11 @@
     format_width       =  1
   };
 
+ public:
+
+  // This platform has no oops in the code that are not also
+  // listed in the oop section.
+  static bool mustIterateImmediateOopsInCode() { return false; }
 
 //Reconciliation History
 // 1.3 97/10/15 15:38:36 relocInfo_i486.hpp
--- a/src/hotspot/cpu/sparc/sharedRuntime_sparc.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/sparc/sharedRuntime_sparc.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -904,7 +904,7 @@
 
     Label ok, ok2;
     __ brx(Assembler::equal, false, Assembler::pt, ok);
-    __ delayed()->ld_ptr(G5_method, CompiledICHolder::holder_method_offset(), G5_method);
+    __ delayed()->ld_ptr(G5_method, CompiledICHolder::holder_metadata_offset(), G5_method);
     __ jump_to(ic_miss, G3_scratch);
     __ delayed()->nop();
 
--- a/src/hotspot/cpu/sparc/templateTable_sparc.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/sparc/templateTable_sparc.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -3081,15 +3081,15 @@
   assert(byte_no == f1_byte, "use this argument");
 
   const Register Rinterface  = G1_scratch;
+  const Register Rmethod     = Lscratch;
   const Register Rret        = G3_scratch;
-  const Register Rindex      = Lscratch;
   const Register O0_recv     = O0;
   const Register O1_flags    = O1;
   const Register O2_Klass    = O2;
   const Register Rscratch    = G4_scratch;
   assert_different_registers(Rscratch, G5_method);
 
-  prepare_invoke(byte_no, Rinterface, Rret, Rindex, O0_recv, O1_flags);
+  prepare_invoke(byte_no, Rinterface, Rret, Rmethod, O0_recv, O1_flags);
 
   // get receiver klass
   __ null_check(O0_recv, oopDesc::klass_offset_in_bytes());
@@ -3109,55 +3109,40 @@
 
   __ bind(notMethod);
 
+  Register Rtemp = O1_flags;
+
+  Label L_no_such_interface;
+
+  // Receiver subtype check against REFC.
+  __ lookup_interface_method(// inputs: rec. class, interface, itable index
+                             O2_Klass, Rinterface, noreg,
+                             // outputs: temp reg1, temp reg2, temp reg3
+                             G5_method, Rscratch, Rtemp,
+                             L_no_such_interface,
+                             /*return_method=*/false);
+
   __ profile_virtual_call(O2_Klass, O4);
 
   //
   // find entry point to call
   //
 
-  // compute start of first itableOffsetEntry (which is at end of vtable)
-  const int base = in_bytes(Klass::vtable_start_offset());
-  Label search;
-  Register Rtemp = O1_flags;
-
-  __ ld(O2_Klass, in_bytes(Klass::vtable_length_offset()), Rtemp);
-  __ sll(Rtemp, LogBytesPerWord, Rtemp);   // Rscratch *= 4;
-  if (Assembler::is_simm13(base)) {
-    __ add(Rtemp, base, Rtemp);
-  } else {
-    __ set(base, Rscratch);
-    __ add(Rscratch, Rtemp, Rtemp);
-  }
-  __ add(O2_Klass, Rtemp, Rscratch);
-
-  __ bind(search);
-
-  __ ld_ptr(Rscratch, itableOffsetEntry::interface_offset_in_bytes(), Rtemp);
-  {
-    Label ok;
-
-    // Check that entry is non-null.  Null entries are probably a bytecode
-    // problem.  If the interface isn't implemented by the receiver class,
-    // the VM should throw IncompatibleClassChangeError.  linkResolver checks
-    // this too but that's only if the entry isn't already resolved, so we
-    // need to check again.
-    __ br_notnull_short( Rtemp, Assembler::pt, ok);
-    call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
-    __ should_not_reach_here();
-    __ bind(ok);
-  }
-
-  __ cmp(Rinterface, Rtemp);
-  __ brx(Assembler::notEqual, true, Assembler::pn, search);
-  __ delayed()->add(Rscratch, itableOffsetEntry::size() * wordSize, Rscratch);
-
-  // entry found and Rscratch points to it
-  __ ld(Rscratch, itableOffsetEntry::offset_offset_in_bytes(), Rscratch);
-
-  assert(itableMethodEntry::method_offset_in_bytes() == 0, "adjust instruction below");
-  __ sll(Rindex, exact_log2(itableMethodEntry::size() * wordSize), Rindex);       // Rindex *= 8;
-  __ add(Rscratch, Rindex, Rscratch);
-  __ ld_ptr(O2_Klass, Rscratch, G5_method);
+  // Get declaring interface class from method
+  __ ld_ptr(Rmethod, Method::const_offset(), Rinterface);
+  __ ld_ptr(Rinterface, ConstMethod::constants_offset(), Rinterface);
+  __ ld_ptr(Rinterface, ConstantPool::pool_holder_offset_in_bytes(), Rinterface);
+
+  // Get itable index from method
+  const Register Rindex = G5_method;
+  __ ld(Rmethod, Method::itable_index_offset(), Rindex);
+  __ sub(Rindex, Method::itable_index_max, Rindex);
+  __ neg(Rindex);
+
+  __ lookup_interface_method(// inputs: rec. class, interface, itable index
+                             O2_Klass, Rinterface, Rindex,
+                             // outputs: method, scan temp reg, temp reg
+                             G5_method, Rscratch, Rtemp,
+                             L_no_such_interface);
 
   // Check for abstract method error.
   {
@@ -3174,6 +3159,10 @@
   __ profile_arguments_type(G5_method, Rcall, Gargs, true);
   __ profile_called_method(G5_method, Rscratch);
   __ call_from_interpreter(Rcall, Gargs, Rret);
+
+  __ bind(L_no_such_interface);
+  call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
+  __ should_not_reach_here();
 }
 
 void TemplateTable::invokehandle(int byte_no) {
@@ -3270,11 +3259,19 @@
   __ br(Assembler::notZero, false, Assembler::pn, slow_case);
   __ delayed()->nop();
 
-  // allocate the instance
-  // 1) Try to allocate in the TLAB
-  // 2) if fail, and the TLAB is not full enough to discard, allocate in the shared Eden
-  // 3) if the above fails (or is not applicable), go to a slow case
-  // (creates a new TLAB, etc.)
+  // Allocate the instance:
+  //  If TLAB is enabled:
+  //    Try to allocate in the TLAB.
+  //    If fails, go to the slow path.
+  //  Else If inline contiguous allocations are enabled:
+  //    Try to allocate in eden.
+  //    If fails due to heap end, go to slow path.
+  //
+  //  If TLAB is enabled OR inline contiguous is enabled:
+  //    Initialize the allocation.
+  //    Exit.
+  //
+  //  Go to slow path.
 
   const bool allow_shared_alloc =
     Universe::heap()->supports_inline_contig_alloc();
@@ -3302,61 +3299,43 @@
     }
     __ delayed()->st_ptr(RnewTopValue, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
 
+    // Allocation does not fit in the TLAB.
+    __ ba_short(slow_case);
+  } else {
+    // Allocation in the shared Eden
     if (allow_shared_alloc) {
-      // Check if tlab should be discarded (refill_waste_limit >= free)
-      __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), RtlabWasteLimitValue);
-      __ sub(RendValue, RoldTopValue, RfreeValue);
-      __ srlx(RfreeValue, LogHeapWordSize, RfreeValue);
-      __ cmp_and_brx_short(RtlabWasteLimitValue, RfreeValue, Assembler::greaterEqualUnsigned, Assembler::pt, slow_case); // tlab waste is small
-
-      // increment waste limit to prevent getting stuck on this slow path
-      if (Assembler::is_simm13(ThreadLocalAllocBuffer::refill_waste_limit_increment())) {
-        __ add(RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment(), RtlabWasteLimitValue);
-      } else {
-        // set64 does not use the temp register if the given constant is 32 bit. So
-        // we can just use any register; using G0 results in ignoring of the upper 32 bit
-        // of that value.
-        __ set64(ThreadLocalAllocBuffer::refill_waste_limit_increment(), G4_scratch, G0);
-        __ add(RtlabWasteLimitValue, G4_scratch, RtlabWasteLimitValue);
-      }
-      __ st_ptr(RtlabWasteLimitValue, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
-    } else {
-      // No allocation in the shared eden.
-      __ ba_short(slow_case);
+      Register RoldTopValue = G1_scratch;
+      Register RtopAddr = G3_scratch;
+      Register RnewTopValue = RallocatedObject;
+      Register RendValue = Rscratch;
+
+      __ set((intptr_t)Universe::heap()->top_addr(), RtopAddr);
+
+      Label retry;
+      __ bind(retry);
+      __ set((intptr_t)Universe::heap()->end_addr(), RendValue);
+      __ ld_ptr(RendValue, 0, RendValue);
+      __ ld_ptr(RtopAddr, 0, RoldTopValue);
+      __ add(RoldTopValue, Roffset, RnewTopValue);
+
+      // RnewTopValue contains the top address after the new object
+      // has been allocated.
+      __ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case);
+
+      __ cas_ptr(RtopAddr, RoldTopValue, RnewTopValue);
+
+      // if someone beat us on the allocation, try again, otherwise continue
+      __ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry);
+
+      // bump total bytes allocated by this thread
+      // RoldTopValue and RtopAddr are dead, so can use G1 and G3
+      __ incr_allocated_bytes(Roffset, G1_scratch, G3_scratch);
     }
   }
 
-  // Allocation in the shared Eden
-  if (allow_shared_alloc) {
-    Register RoldTopValue = G1_scratch;
-    Register RtopAddr = G3_scratch;
-    Register RnewTopValue = RallocatedObject;
-    Register RendValue = Rscratch;
-
-    __ set((intptr_t)Universe::heap()->top_addr(), RtopAddr);
-
-    Label retry;
-    __ bind(retry);
-    __ set((intptr_t)Universe::heap()->end_addr(), RendValue);
-    __ ld_ptr(RendValue, 0, RendValue);
-    __ ld_ptr(RtopAddr, 0, RoldTopValue);
-    __ add(RoldTopValue, Roffset, RnewTopValue);
-
-    // RnewTopValue contains the top address after the new object
-    // has been allocated.
-    __ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case);
-
-    __ cas_ptr(RtopAddr, RoldTopValue, RnewTopValue);
-
-    // if someone beat us on the allocation, try again, otherwise continue
-    __ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry);
-
-    // bump total bytes allocated by this thread
-    // RoldTopValue and RtopAddr are dead, so can use G1 and G3
-    __ incr_allocated_bytes(Roffset, G1_scratch, G3_scratch);
-  }
-
-  if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
+  // If UseTLAB or allow_shared_alloc are true, the object is created above and
+  // there is an initialize need. Otherwise, skip and go to the slow path.
+  if (UseTLAB || allow_shared_alloc) {
     // clear object fields
     __ bind(initialize_object);
     __ deccc(Roffset, sizeof(oopDesc));
--- a/src/hotspot/cpu/sparc/vtableStubs_sparc.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/sparc/vtableStubs_sparc.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -27,6 +27,7 @@
 #include "code/vtableStubs.hpp"
 #include "interp_masm_sparc.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/compiledICHolder.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/klassVtable.hpp"
 #include "runtime/sharedRuntime.hpp"
@@ -140,7 +141,8 @@
   MacroAssembler* masm = new MacroAssembler(&cb);
 
   Register G3_Klass = G3_scratch;
-  Register G5_interface = G5;  // Passed in as an argument
+  Register G5_icholder = G5;  // Passed in as an argument
+  Register G4_interface = G4_scratch;
   Label search;
 
   // Entry arguments:
@@ -164,14 +166,26 @@
   }
 #endif /* PRODUCT */
 
-  Label throw_icce;
+  Label L_no_such_interface;
 
   Register L5_method = L5;
+
+  // Receiver subtype check against REFC.
+  __ ld_ptr(G5_icholder, CompiledICHolder::holder_klass_offset(), G4_interface);
   __ lookup_interface_method(// inputs: rec. class, interface, itable index
-                             G3_Klass, G5_interface, itable_index,
+                             G3_Klass, G4_interface, itable_index,
+                             // outputs: scan temp. reg1, scan temp. reg2
+                             L5_method, L2, L3,
+                             L_no_such_interface,
+                             /*return_method=*/ false);
+
+  // Get Method* and entrypoint for compiler
+  __ ld_ptr(G5_icholder, CompiledICHolder::holder_metadata_offset(), G4_interface);
+  __ lookup_interface_method(// inputs: rec. class, interface, itable index
+                             G3_Klass, G4_interface, itable_index,
                              // outputs: method, scan temp. reg
                              L5_method, L2, L3,
-                             throw_icce);
+                             L_no_such_interface);
 
 #ifndef PRODUCT
   if (DebugVtables) {
@@ -197,7 +211,7 @@
   __ JMP(G3_scratch, 0);
   __ delayed()->nop();
 
-  __ bind(throw_icce);
+  __ bind(L_no_such_interface);
   AddressLiteral icce(StubRoutines::throw_IncompatibleClassChangeError_entry());
   __ jump_to(icce, G3_scratch);
   __ delayed()->restore();
@@ -232,7 +246,7 @@
                           MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
       return basic + slop;
     } else {
-      const int basic = 34 * BytesPerInstWord +
+      const int basic = 54 * BytesPerInstWord +
                         // shift;add for load_klass (only shift with zero heap based)
                         (UseCompressedClassPointers ?
                           MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
--- a/src/hotspot/cpu/x86/assembler_x86.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/x86/assembler_x86.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -3167,6 +3167,89 @@
     return;
   }
 
+  if (UseAddressNop && VM_Version::is_zx()) {
+    //
+    // Using multi-bytes nops "0x0F 0x1F [address]" for ZX
+    //  1: 0x90
+    //  2: 0x66 0x90
+    //  3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
+    //  4: 0x0F 0x1F 0x40 0x00
+    //  5: 0x0F 0x1F 0x44 0x00 0x00
+    //  6: 0x66 0x0F 0x1F 0x44 0x00 0x00
+    //  7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
+    //  8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
+    //  9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
+    // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
+    // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
+
+    // The rest coding is ZX specific - don't use consecutive address nops
+
+    // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
+    // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
+    // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
+    // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
+
+    while (i >= 15) {
+      // For ZX don't generate consecutive addess nops (mix with regular nops)
+      i -= 15;
+      emit_int8(0x66);   // size prefix
+      emit_int8(0x66);   // size prefix
+      emit_int8(0x66);   // size prefix
+      addr_nop_8();
+      emit_int8(0x66);   // size prefix
+      emit_int8(0x66);   // size prefix
+      emit_int8(0x66);   // size prefix
+      emit_int8((unsigned char)0x90);
+                         // nop
+    }
+    switch (i) {
+      case 14:
+        emit_int8(0x66); // size prefix
+      case 13:
+        emit_int8(0x66); // size prefix
+      case 12:
+        addr_nop_8();
+        emit_int8(0x66); // size prefix
+        emit_int8(0x66); // size prefix
+        emit_int8(0x66); // size prefix
+        emit_int8((unsigned char)0x90);
+                         // nop
+        break;
+      case 11:
+        emit_int8(0x66); // size prefix
+      case 10:
+        emit_int8(0x66); // size prefix
+      case 9:
+        emit_int8(0x66); // size prefix
+      case 8:
+        addr_nop_8();
+        break;
+      case 7:
+        addr_nop_7();
+        break;
+      case 6:
+        emit_int8(0x66); // size prefix
+      case 5:
+        addr_nop_5();
+        break;
+      case 4:
+        addr_nop_4();
+        break;
+      case 3:
+        // Don't use "0x0F 0x1F 0x00" - need patching safe padding
+        emit_int8(0x66); // size prefix
+      case 2:
+        emit_int8(0x66); // size prefix
+      case 1:
+        emit_int8((unsigned char)0x90);
+                         // nop
+        break;
+      default:
+        assert(i == 0, " ");
+    }
+    return;
+  }
+
   // Using nops with size prefixes "0x66 0x90".
   // From AMD Optimization Guide:
   //  1: 0x90
--- a/src/hotspot/cpu/x86/compiledIC_x86.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/x86/compiledIC_x86.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -73,6 +73,11 @@
          LP64_ONLY(15);  // movq (1+1+8); jmp (1+4)
 }
 
+int CompiledStaticCall::to_trampoline_stub_size() {
+  // x86 doesn't use trampolines.
+  return 0;
+}
+
 // Relocation entries for call stub, compiled java to interpreter.
 int CompiledStaticCall::reloc_to_interp_stub() {
   return 4; // 3 in emit_to_interp_stub + 1 in emit_call
--- a/src/hotspot/cpu/x86/jvmciCodeInstaller_x86.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/x86/jvmciCodeInstaller_x86.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -144,7 +144,7 @@
   TRACE_jvmci_3("relocating (foreign call)  at " PTR_FORMAT, p2i(inst));
 }
 
-void CodeInstaller::pd_relocate_JavaMethod(Handle hotspot_method, jint pc_offset, TRAPS) {
+void CodeInstaller::pd_relocate_JavaMethod(CodeBuffer &, Handle hotspot_method, jint pc_offset, TRAPS) {
 #ifdef ASSERT
   Method* method = NULL;
   // we need to check, this might also be an unresolved method
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -5809,8 +5809,13 @@
                                              RegisterOrConstant itable_index,
                                              Register method_result,
                                              Register scan_temp,
-                                             Label& L_no_such_interface) {
-  assert_different_registers(recv_klass, intf_klass, method_result, scan_temp);
+                                             Label& L_no_such_interface,
+                                             bool return_method) {
+  assert_different_registers(recv_klass, intf_klass, scan_temp);
+  assert_different_registers(method_result, intf_klass, scan_temp);
+  assert(recv_klass != method_result || !return_method,
+         "recv_klass can be destroyed when method isn't needed");
+
   assert(itable_index.is_constant() || itable_index.as_register() == method_result,
          "caller must use same register for non-constant itable index as for method");
 
@@ -5827,9 +5832,11 @@
   // %%% Could store the aligned, prescaled offset in the klassoop.
   lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
 
-  // Adjust recv_klass by scaled itable_index, so we can free itable_index.
-  assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
-  lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
+  if (return_method) {
+    // Adjust recv_klass by scaled itable_index, so we can free itable_index.
+    assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
+    lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
+  }
 
   // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
   //   if (scan->interface() == intf) {
@@ -5863,9 +5870,11 @@
 
   bind(found_method);
 
-  // Got a hit.
-  movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes()));
-  movptr(method_result, Address(recv_klass, scan_temp, Address::times_1));
+  if (return_method) {
+    // Got a hit.
+    movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes()));
+    movptr(method_result, Address(recv_klass, scan_temp, Address::times_1));
+  }
 }
 
 
--- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -544,7 +544,8 @@
                                RegisterOrConstant itable_index,
                                Register method_result,
                                Register scan_temp,
-                               Label& no_such_interface);
+                               Label& no_such_interface,
+                               bool return_method = true);
 
   // virtual method calling
   void lookup_virtual_method(Register recv_klass,
--- a/src/hotspot/cpu/x86/relocInfo_x86.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/x86/relocInfo_x86.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -40,4 +40,10 @@
 #endif
   };
 
+ public:
+
+  // Instruct loadConP of x86_64.ad places oops in code that are not also
+  // listed in the oop section.
+  static bool mustIterateImmediateOopsInCode() { return true; }
+
 #endif // CPU_X86_VM_RELOCINFO_X86_HPP
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -957,7 +957,7 @@
     Label missed;
     __ movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
     __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
-    __ movptr(rbx, Address(holder, CompiledICHolder::holder_method_offset()));
+    __ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset()));
     __ jcc(Assembler::notEqual, missed);
     // Method might have been compiled since the call site was patched to
     // interpreted if that is the case treat it as a miss so we can get
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -949,7 +949,7 @@
   {
     __ load_klass(temp, receiver);
     __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
-    __ movptr(rbx, Address(holder, CompiledICHolder::holder_method_offset()));
+    __ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset()));
     __ jcc(Assembler::equal, ok);
     __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 
--- a/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -433,7 +433,7 @@
 
 
   //----------------------------------------------------------------------------------------------------
-  // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest)
+  // Support for int32_t Atomic::xchg(int32_t exchange_value, volatile int32_t* dest)
   //
   // xchg exists as far back as 8086, lock needed for MP only
   // Stack layout immediately after call:
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -611,8 +611,8 @@
     return start;
   }
 
-  // Support for jbyte atomic::atomic_cmpxchg(jbyte exchange_value, volatile jbyte* dest,
-  //                                          jbyte compare_value)
+  // Support for int8_t atomic::atomic_cmpxchg(int8_t exchange_value, volatile int8_t* dest,
+  //                                           int8_t compare_value)
   //
   // Arguments :
   //    c_rarg0: exchange_value
@@ -637,9 +637,9 @@
     return start;
   }
 
-  // Support for jlong atomic::atomic_cmpxchg(jlong exchange_value,
-  //                                          volatile jlong* dest,
-  //                                          jlong compare_value)
+  // Support for int64_t atomic::atomic_cmpxchg(int64_t exchange_value,
+  //                                            volatile int64_t* dest,
+  //                                            int64_t compare_value)
   // Arguments :
   //    c_rarg0: exchange_value
   //    c_rarg1: dest
@@ -694,8 +694,8 @@
   // Result:
   //    *dest += add_value
   //    return *dest;
-  address generate_atomic_add_ptr() {
-    StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr");
+  address generate_atomic_add_long() {
+    StubCodeMark mark(this, "StubRoutines", "atomic_add_long");
     address start = __ pc();
 
     __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
@@ -5015,14 +5015,14 @@
     StubRoutines::_catch_exception_entry = generate_catch_exception();
 
     // atomic calls
-    StubRoutines::_atomic_xchg_entry         = generate_atomic_xchg();
-    StubRoutines::_atomic_xchg_long_entry    = generate_atomic_xchg_long();
-    StubRoutines::_atomic_cmpxchg_entry      = generate_atomic_cmpxchg();
-    StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte();
-    StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
-    StubRoutines::_atomic_add_entry          = generate_atomic_add();
-    StubRoutines::_atomic_add_ptr_entry      = generate_atomic_add_ptr();
-    StubRoutines::_fence_entry               = generate_orderaccess_fence();
+    StubRoutines::_atomic_xchg_entry          = generate_atomic_xchg();
+    StubRoutines::_atomic_xchg_long_entry     = generate_atomic_xchg_long();
+    StubRoutines::_atomic_cmpxchg_entry       = generate_atomic_cmpxchg();
+    StubRoutines::_atomic_cmpxchg_byte_entry  = generate_atomic_cmpxchg_byte();
+    StubRoutines::_atomic_cmpxchg_long_entry  = generate_atomic_cmpxchg_long();
+    StubRoutines::_atomic_add_entry           = generate_atomic_add();
+    StubRoutines::_atomic_add_long_entry      = generate_atomic_add_long();
+    StubRoutines::_fence_entry                = generate_orderaccess_fence();
 
     // platform dependent
     StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp();
--- a/src/hotspot/cpu/x86/templateTable_x86.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/x86/templateTable_x86.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -3712,11 +3712,11 @@
 void TemplateTable::invokeinterface(int byte_no) {
   transition(vtos, vtos);
   assert(byte_no == f1_byte, "use this argument");
-  prepare_invoke(byte_no, rax, rbx,  // get f1 Klass*, f2 itable index
+  prepare_invoke(byte_no, rax, rbx,  // get f1 Klass*, f2 Method*
                  rcx, rdx); // recv, flags
 
-  // rax: interface klass (from f1)
-  // rbx: itable index (from f2)
+  // rax: reference klass (from f1)
+  // rbx: method (from f2)
   // rcx: receiver
   // rdx: flags
 
@@ -3738,10 +3738,28 @@
   __ null_check(rcx, oopDesc::klass_offset_in_bytes());
   __ load_klass(rdx, rcx);
 
+  Label no_such_interface, no_such_method;
+
+  // Receiver subtype check against REFC.
+  // Superklass in rax. Subklass in rdx. Blows rcx, rdi.
+  __ lookup_interface_method(// inputs: rec. class, interface, itable index
+                             rdx, rax, noreg,
+                             // outputs: scan temp. reg, scan temp. reg
+                             rbcp, rlocals,
+                             no_such_interface,
+                             /*return_method=*/false);
+
   // profile this call
+  __ restore_bcp(); // rbcp was destroyed by receiver type check
   __ profile_virtual_call(rdx, rbcp, rlocals);
 
-  Label no_such_interface, no_such_method;
+  // Get declaring interface class from method, and itable index
+  __ movptr(rax, Address(rbx, Method::const_offset()));
+  __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
+  __ movptr(rax, Address(rax, ConstantPool::pool_holder_offset_in_bytes()));
+  __ movl(rbx, Address(rbx, Method::itable_index_offset()));
+  __ subl(rbx, Method::itable_index_max);
+  __ negl(rbx);
 
   __ lookup_interface_method(// inputs: rec. class, interface, itable index
                              rdx, rax, rbx,
@@ -3851,7 +3869,6 @@
   Label done;
   Label initialize_header;
   Label initialize_object;  // including clearing the fields
-  Label allocate_shared;
 
   __ get_cpool_and_tags(rcx, rax);
 
@@ -3877,12 +3894,19 @@
   __ testl(rdx, Klass::_lh_instance_slow_path_bit);
   __ jcc(Assembler::notZero, slow_case);
 
+  // Allocate the instance:
+  //  If TLAB is enabled:
+  //    Try to allocate in the TLAB.
+  //    If fails, go to the slow path.
+  //  Else If inline contiguous allocations are enabled:
+  //    Try to allocate in eden.
+  //    If fails due to heap end, go to slow path.
   //
-  // Allocate the instance
-  // 1) Try to allocate in the TLAB
-  // 2) if fail and the object is large allocate in the shared Eden
-  // 3) if the above fails (or is not applicable), go to a slow case
-  // (creates a new TLAB, etc.)
+  //  If TLAB is enabled OR inline contiguous is enabled:
+  //    Initialize the allocation.
+  //    Exit.
+  //
+  //  Go to slow path.
 
   const bool allow_shared_alloc =
     Universe::heap()->supports_inline_contig_alloc();
@@ -3898,7 +3922,7 @@
     __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset())));
     __ lea(rbx, Address(rax, rdx, Address::times_1));
     __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset())));
-    __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
+    __ jcc(Assembler::above, slow_case);
     __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
     if (ZeroTLAB) {
       // the fields have been already cleared
@@ -3907,40 +3931,40 @@
       // initialize both the header and fields
       __ jmp(initialize_object);
     }
+  } else {
+    // Allocation in the shared Eden, if allowed.
+    //
+    // rdx: instance size in bytes
+    if (allow_shared_alloc) {
+      ExternalAddress heap_top((address)Universe::heap()->top_addr());
+      ExternalAddress heap_end((address)Universe::heap()->end_addr());
+
+      Label retry;
+      __ bind(retry);
+      __ movptr(rax, heap_top);
+      __ lea(rbx, Address(rax, rdx, Address::times_1));
+      __ cmpptr(rbx, heap_end);
+      __ jcc(Assembler::above, slow_case);
+
+      // Compare rax, with the top addr, and if still equal, store the new
+      // top addr in rbx, at the address of the top addr pointer. Sets ZF if was
+      // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
+      //
+      // rax,: object begin
+      // rbx,: object end
+      // rdx: instance size in bytes
+      __ locked_cmpxchgptr(rbx, heap_top);
+
+      // if someone beat us on the allocation, try again, otherwise continue
+      __ jcc(Assembler::notEqual, retry);
+
+      __ incr_allocated_bytes(thread, rdx, 0);
+    }
   }
 
-  // Allocation in the shared Eden, if allowed.
-  //
-  // rdx: instance size in bytes
-  if (allow_shared_alloc) {
-    __ bind(allocate_shared);
-
-    ExternalAddress heap_top((address)Universe::heap()->top_addr());
-    ExternalAddress heap_end((address)Universe::heap()->end_addr());
-
-    Label retry;
-    __ bind(retry);
-    __ movptr(rax, heap_top);
-    __ lea(rbx, Address(rax, rdx, Address::times_1));
-    __ cmpptr(rbx, heap_end);
-    __ jcc(Assembler::above, slow_case);
-
-    // Compare rax, with the top addr, and if still equal, store the new
-    // top addr in rbx, at the address of the top addr pointer. Sets ZF if was
-    // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
-    //
-    // rax,: object begin
-    // rbx,: object end
-    // rdx: instance size in bytes
-    __ locked_cmpxchgptr(rbx, heap_top);
-
-    // if someone beat us on the allocation, try again, otherwise continue
-    __ jcc(Assembler::notEqual, retry);
-
-    __ incr_allocated_bytes(thread, rdx, 0);
-  }
-
-  if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
+  // If UseTLAB or allow_shared_alloc are true, the object is created above and
+  // there is an initialize need. Otherwise, skip and go to the slow path.
+  if (UseTLAB || allow_shared_alloc) {
     // The object is initialized before the header.  If the object size is
     // zero, go directly to the header initialization.
     __ bind(initialize_object);
--- a/src/hotspot/cpu/x86/vm_version_x86.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/x86/vm_version_x86.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -628,6 +628,11 @@
   if (UseSSE < 1)
     _features &= ~CPU_SSE;
 
+  //since AVX instructions is slower than SSE in some ZX cpus, force USEAVX=0.
+  if (is_zx() && ((cpu_family() == 6) || (cpu_family() == 7))) {
+    UseAVX = 0;
+  }
+
   // first try initial setting and detect what we can support
   int use_avx_limit = 0;
   if (UseAVX > 0) {
@@ -1078,6 +1083,66 @@
   // UseXmmRegToRegMoveAll == true  --> movaps(xmm, xmm), movapd(xmm, xmm).
   // UseXmmRegToRegMoveAll == false --> movss(xmm, xmm),  movsd(xmm, xmm).
 
+
+  if (is_zx()) { // ZX cpus specific settings
+    if (FLAG_IS_DEFAULT(UseStoreImmI16)) {
+      UseStoreImmI16 = false; // don't use it on ZX cpus
+    }
+    if ((cpu_family() == 6) || (cpu_family() == 7)) {
+      if (FLAG_IS_DEFAULT(UseAddressNop)) {
+        // Use it on all ZX cpus
+        UseAddressNop = true;
+      }
+    }
+    if (FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper)) {
+      UseXmmLoadAndClearUpper = true; // use movsd on all ZX cpus
+    }
+    if (FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll)) {
+      if (supports_sse3()) {
+        UseXmmRegToRegMoveAll = true; // use movaps, movapd on new ZX cpus
+      } else {
+        UseXmmRegToRegMoveAll = false;
+      }
+    }
+    if (((cpu_family() == 6) || (cpu_family() == 7)) && supports_sse3()) { // new ZX cpus
+#ifdef COMPILER2
+      if (FLAG_IS_DEFAULT(MaxLoopPad)) {
+        // For new ZX cpus do the next optimization:
+        // don't align the beginning of a loop if there are enough instructions
+        // left (NumberOfLoopInstrToAlign defined in c2_globals.hpp)
+        // in current fetch line (OptoLoopAlignment) or the padding
+        // is big (> MaxLoopPad).
+        // Set MaxLoopPad to 11 for new ZX cpus to reduce number of
+        // generated NOP instructions. 11 is the largest size of one
+        // address NOP instruction '0F 1F' (see Assembler::nop(i)).
+        MaxLoopPad = 11;
+      }
+#endif // COMPILER2
+      if (FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
+        UseXMMForArrayCopy = true; // use SSE2 movq on new ZX cpus
+      }
+      if (supports_sse4_2()) { // new ZX cpus
+        if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
+          UseUnalignedLoadStores = true; // use movdqu on newest ZX cpus
+        }
+      }
+      if (supports_sse4_2()) {
+        if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) {
+          FLAG_SET_DEFAULT(UseSSE42Intrinsics, true);
+        }
+      } else {
+        if (UseSSE42Intrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
+          warning("SSE4.2 intrinsics require SSE4.2 instructions or higher. Intrinsics will be disabled.");
+        }
+        FLAG_SET_DEFAULT(UseSSE42Intrinsics, false);
+      }
+    }
+
+    if (FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) {
+      FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);
+    }
+  }
+
   if( is_amd() ) { // AMD cpus specific settings
     if( supports_sse2() && FLAG_IS_DEFAULT(UseAddressNop) ) {
       // Use it on new AMD cpus starting from Opteron.
@@ -1374,6 +1439,14 @@
 #endif
   }
 
+  if (is_zx() && ((cpu_family() == 6) || (cpu_family() == 7)) && supports_sse4_2()) {
+#ifdef COMPILER2
+    if (FLAG_IS_DEFAULT(UseFPUForSpilling)) {
+      FLAG_SET_DEFAULT(UseFPUForSpilling, true);
+    }
+#endif
+  }
+
 #ifdef _LP64
   // Prefetch settings
 
--- a/src/hotspot/cpu/x86/vm_version_x86.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/x86/vm_version_x86.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -305,6 +305,9 @@
   enum Extended_Family {
     // AMD
     CPU_FAMILY_AMD_11H       = 0x11,
+    // ZX
+    CPU_FAMILY_ZX_CORE_F6    = 6,
+    CPU_FAMILY_ZX_CORE_F7    = 7,
     // Intel
     CPU_FAMILY_INTEL_CORE    = 6,
     CPU_MODEL_NEHALEM        = 0x1e,
@@ -549,6 +552,16 @@
       }
     }
 
+    // ZX features.
+    if (is_zx()) {
+      if (_cpuid_info.ext_cpuid1_ecx.bits.lzcnt_intel != 0)
+        result |= CPU_LZCNT;
+      // for ZX, ecx.bits.misalignsse bit (bit 8) indicates support for prefetchw
+      if (_cpuid_info.ext_cpuid1_ecx.bits.misalignsse != 0) {
+        result |= CPU_3DNOW_PREFETCH;
+      }
+    }
+
     return result;
   }
 
@@ -657,6 +670,7 @@
   static bool is_P6()             { return cpu_family() >= 6; }
   static bool is_amd()            { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x68747541; } // 'htuA'
   static bool is_intel()          { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x756e6547; } // 'uneG'
+  static bool is_zx()             { assert_is_initialized(); return (_cpuid_info.std_vendor_name_0 == 0x746e6543) || (_cpuid_info.std_vendor_name_0 == 0x68532020); } // 'tneC'||'hS  '
   static bool is_atom_family()    { return ((cpu_family() == 0x06) && ((extended_cpu_model() == 0x36) || (extended_cpu_model() == 0x37) || (extended_cpu_model() == 0x4D))); } //Silvermont and Centerton
   static bool is_knights_family() { return ((cpu_family() == 0x06) && ((extended_cpu_model() == 0x57) || (extended_cpu_model() == 0x85))); } // Xeon Phi 3200/5200/7200 and Future Xeon Phi
 
@@ -680,6 +694,15 @@
       }
     } else if (is_amd()) {
       result = (_cpuid_info.ext_cpuid8_ecx.bits.cores_per_cpu + 1);
+    } else if (is_zx()) {
+      bool supports_topology = supports_processor_topology();
+      if (supports_topology) {
+        result = _cpuid_info.tpl_cpuidB1_ebx.bits.logical_cpus /
+                 _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
+      }
+      if (!supports_topology || result == 0) {
+        result = (_cpuid_info.dcp_cpuid4_eax.bits.cores_per_cpu + 1);
+      }
     }
     return result;
   }
@@ -688,6 +711,8 @@
     uint result = 1;
     if (is_intel() && supports_processor_topology()) {
       result = _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
+    } else if (is_zx() && supports_processor_topology()) {
+      result = _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
     } else if (_cpuid_info.std_cpuid1_edx.bits.ht != 0) {
       if (cpu_family() >= 0x17) {
         result = _cpuid_info.ext_cpuid1E_ebx.bits.threads_per_core + 1;
@@ -705,6 +730,8 @@
       result = (_cpuid_info.dcp_cpuid4_ebx.bits.L1_line_size + 1);
     } else if (is_amd()) {
       result = _cpuid_info.ext_cpuid5_ecx.bits.L1_line_size;
+    } else if (is_zx()) {
+      result = (_cpuid_info.dcp_cpuid4_ebx.bits.L1_line_size + 1);
     }
     if (result < 32) // not defined ?
       result = 32;   // 32 bytes by default on x86 and other x64
--- a/src/hotspot/cpu/x86/vtableStubs_x86_32.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/x86/vtableStubs_x86_32.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -27,6 +27,7 @@
 #include "code/vtableStubs.hpp"
 #include "interp_masm_x86.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/compiledICHolder.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/klassVtable.hpp"
 #include "runtime/sharedRuntime.hpp"
@@ -147,7 +148,7 @@
   MacroAssembler* masm = new MacroAssembler(&cb);
 
   // Entry arguments:
-  //  rax,: Interface
+  //  rax: CompiledICHolder
   //  rcx: Receiver
 
 #ifndef PRODUCT
@@ -155,25 +156,42 @@
     __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
   }
 #endif /* PRODUCT */
-  // get receiver (need to skip return address on top of stack)
 
-  assert(VtableStub::receiver_location() == rcx->as_VMReg(), "receiver expected in rcx");
+  // Most registers are in use; we'll use rax, rbx, rsi, rdi
+  // (If we need to make rsi, rdi callee-save, do a push/pop here.)
+  const Register recv_klass_reg     = rsi;
+  const Register holder_klass_reg   = rax; // declaring interface klass (DECC)
+  const Register resolved_klass_reg = rbx; // resolved interface klass (REFC)
+  const Register temp_reg           = rdi;
+
+  const Register icholder_reg = rax;
+  __ movptr(resolved_klass_reg, Address(icholder_reg, CompiledICHolder::holder_klass_offset()));
+  __ movptr(holder_klass_reg,   Address(icholder_reg, CompiledICHolder::holder_metadata_offset()));
+
+  Label L_no_such_interface;
 
   // get receiver klass (also an implicit null-check)
   address npe_addr = __ pc();
-  __ movptr(rsi, Address(rcx, oopDesc::klass_offset_in_bytes()));
+  assert(VtableStub::receiver_location() ==  rcx->as_VMReg(), "receiver expected in  rcx");
+  __ load_klass(recv_klass_reg, rcx);
 
-  // Most registers are in use; we'll use rax, rbx, rsi, rdi
-  // (If we need to make rsi, rdi callee-save, do a push/pop here.)
+  // Receiver subtype check against REFC.
+  // Destroys recv_klass_reg value.
+  __ lookup_interface_method(// inputs: rec. class, interface
+                             recv_klass_reg, resolved_klass_reg, noreg,
+                             // outputs:  scan temp. reg1, scan temp. reg2
+                             recv_klass_reg, temp_reg,
+                             L_no_such_interface,
+                             /*return_method=*/false);
+
+  // Get selected method from declaring class and itable index
   const Register method = rbx;
-  Label throw_icce;
-
-  // Get Method* and entrypoint for compiler
+  __ load_klass(recv_klass_reg, rcx); // restore recv_klass_reg
   __ lookup_interface_method(// inputs: rec. class, interface, itable index
-                             rsi, rax, itable_index,
+                             recv_klass_reg, holder_klass_reg, itable_index,
                              // outputs: method, scan temp. reg
-                             method, rdi,
-                             throw_icce);
+                             method, temp_reg,
+                             L_no_such_interface);
 
   // method (rbx): Method*
   // rcx: receiver
@@ -193,9 +211,10 @@
   address ame_addr = __ pc();
   __ jmp(Address(method, Method::from_compiled_offset()));
 
-  __ bind(throw_icce);
+  __ bind(L_no_such_interface);
   __ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
-  masm->flush();
+
+  __ flush();
 
   if (PrintMiscellaneous && (WizardMode || Verbose)) {
     tty->print_cr("itable #%d at " PTR_FORMAT "[%d] left over: %d",
@@ -220,7 +239,7 @@
     return (DebugVtables ? 210 : 16) + (CountCompiledCalls ? 6 : 0);
   } else {
     // Itable stub size
-    return (DebugVtables ? 256 : 66) + (CountCompiledCalls ? 6 : 0);
+    return (DebugVtables ? 256 : 110) + (CountCompiledCalls ? 6 : 0);
   }
   // In order to tune these parameters, run the JVM with VM options
   // +PrintMiscellaneous and +WizardMode to see information about
--- a/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -27,6 +27,7 @@
 #include "code/vtableStubs.hpp"
 #include "interp_masm_x86.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/compiledICHolder.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/klassVtable.hpp"
 #include "runtime/sharedRuntime.hpp"
@@ -147,36 +148,50 @@
 #endif
 
   // Entry arguments:
-  //  rax: Interface
+  //  rax: CompiledICHolder
   //  j_rarg0: Receiver
 
-  // Free registers (non-args) are rax (interface), rbx
-
-  // get receiver (need to skip return address on top of stack)
-
-  assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
-  // get receiver klass (also an implicit null-check)
-  address npe_addr = __ pc();
-
   // Most registers are in use; we'll use rax, rbx, r10, r11
   // (various calling sequences use r[cd]x, r[sd]i, r[89]; stay away from them)
-  __ load_klass(r10, j_rarg0);
+  const Register recv_klass_reg     = r10;
+  const Register holder_klass_reg   = rax; // declaring interface klass (DECC)
+  const Register resolved_klass_reg = rbx; // resolved interface klass (REFC)
+  const Register temp_reg           = r11;
+
+  Label L_no_such_interface;
+
+  const Register icholder_reg = rax;
+  __ movptr(resolved_klass_reg, Address(icholder_reg, CompiledICHolder::holder_klass_offset()));
+  __ movptr(holder_klass_reg,   Address(icholder_reg, CompiledICHolder::holder_metadata_offset()));
+
+  // get receiver klass (also an implicit null-check)
+  assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
+  address npe_addr = __ pc();
+  __ load_klass(recv_klass_reg, j_rarg0);
+
+  // Receiver subtype check against REFC.
+  // Destroys recv_klass_reg value.
+  __ lookup_interface_method(// inputs: rec. class, interface
+                             recv_klass_reg, resolved_klass_reg, noreg,
+                             // outputs:  scan temp. reg1, scan temp. reg2
+                             recv_klass_reg, temp_reg,
+                             L_no_such_interface,
+                             /*return_method=*/false);
+
+  // Get selected method from declaring class and itable index
+  const Register method = rbx;
+  __ load_klass(recv_klass_reg, j_rarg0);   // restore recv_klass_reg
+  __ lookup_interface_method(// inputs: rec. class, interface, itable index
+                       recv_klass_reg, holder_klass_reg, itable_index,
+                       // outputs: method, scan temp. reg
+                       method, temp_reg,
+                       L_no_such_interface);
 
   // If we take a trap while this arg is on the stack we will not
   // be able to walk the stack properly. This is not an issue except
   // when there are mistakes in this assembly code that could generate
   // a spurious fault. Ask me how I know...
 
-  const Register method = rbx;
-  Label throw_icce;
-
-  // Get Method* and entrypoint for compiler
-  __ lookup_interface_method(// inputs: rec. class, interface, itable index
-                             r10, rax, itable_index,
-                             // outputs: method, scan temp. reg
-                             method, r11,
-                             throw_icce);
-
   // method (rbx): Method*
   // j_rarg0: receiver
 
@@ -197,7 +212,7 @@
   address ame_addr = __ pc();
   __ jmp(Address(method, Method::from_compiled_offset()));
 
-  __ bind(throw_icce);
+  __ bind(L_no_such_interface);
   __ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
 
   __ flush();
@@ -224,8 +239,8 @@
            (UseCompressedClassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
   } else {
     // Itable stub size
-    return (DebugVtables ? 512 : 74) + (CountCompiledCalls ? 13 : 0) +
-           (UseCompressedClassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
+    return (DebugVtables ? 512 : 140) + (CountCompiledCalls ? 13 : 0) +
+           (UseCompressedClassPointers ? 2 * MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
   }
   // In order to tune these parameters, run the JVM with VM options
   // +PrintMiscellaneous and +WizardMode to see information about
--- a/src/hotspot/cpu/zero/relocInfo_zero.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/zero/relocInfo_zero.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -34,4 +34,8 @@
     format_width =  1
   };
 
+ public:
+
+  static bool mustIterateImmediateOopsInCode() { return true; }
+
 #endif // CPU_ZERO_VM_RELOCINFO_ZERO_HPP
--- a/src/hotspot/cpu/zero/stubGenerator_zero.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/cpu/zero/stubGenerator_zero.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -258,7 +258,7 @@
     StubRoutines::_atomic_cmpxchg_byte_entry = ShouldNotCallThisStub();
     StubRoutines::_atomic_cmpxchg_long_entry = ShouldNotCallThisStub();
     StubRoutines::_atomic_add_entry          = ShouldNotCallThisStub();
-    StubRoutines::_atomic_add_ptr_entry      = ShouldNotCallThisStub();
+    StubRoutines::_atomic_add_long_entry     = ShouldNotCallThisStub();
     StubRoutines::_fence_entry               = ShouldNotCallThisStub();
   }
 
--- a/src/hotspot/os/aix/safepointMechanism_aix.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/os/aix/safepointMechanism_aix.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -30,8 +30,18 @@
 #include <sys/mman.h>
 
 void SafepointMechanism::pd_initialize() {
+  // No special code needed if we can use SIGTRAP
+  if (ThreadLocalHandshakes && USE_POLL_BIT_ONLY) {
+    default_initialize();
+    return;
+  }
+
+  // Allocate one protected page
   char* map_address = (char*)MAP_FAILED;
   const size_t page_size = os::vm_page_size();
+  const int prot  = PROT_READ;
+  const int flags = MAP_PRIVATE | MAP_ANONYMOUS;
+
   // Use optimized addresses for the polling page,
   // e.g. map it to a special 32-bit address.
   if (OptimizePollingPageLocation) {
@@ -57,14 +67,14 @@
       // Try to map with current address wish.
       // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
       // fail if the address is already mapped.
-      map_address = (char*) ::mmap(address_wishes[i] - (ssize_t)page_size,
-                                   page_size, PROT_READ,
-                                   MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+      map_address = (char*) ::mmap(address_wishes[i],
+                                   page_size, prot,
+                                   flags | MAP_FIXED,
                                    -1, 0);
-      log_debug(os)("SafePoint Polling  Page address: %p (wish) => %p",
-                    address_wishes[i], map_address + (ssize_t)page_size);
+      log_debug(os)("SafePoint Polling Page address: %p (wish) => %p",
+                    address_wishes[i], map_address);
 
-      if (map_address + (ssize_t)page_size == address_wishes[i]) {
+      if (map_address == address_wishes[i]) {
         // Map succeeded and map_address is at wished address, exit loop.
         break;
       }
@@ -78,8 +88,17 @@
     }
   }
   if (map_address == (char*)MAP_FAILED) {
-    map_address = os::reserve_memory(page_size, NULL, page_size);
+    map_address = (char*) ::mmap(NULL, page_size, prot, flags, -1, 0);
   }
   guarantee(map_address != (char*)MAP_FAILED, "SafepointMechanism::pd_initialize: failed to allocate polling page");
+  log_info(os)("SafePoint Polling address: " INTPTR_FORMAT, p2i(map_address));
   os::set_polling_page((address)(map_address));
+
+  // Use same page for ThreadLocalHandshakes without SIGTRAP
+  if (ThreadLocalHandshakes) {
+    set_uses_thread_local_poll();
+    intptr_t bad_page_val = reinterpret_cast<intptr_t>(map_address);
+    _poll_armed_value    = reinterpret_cast<void*>(bad_page_val | poll_bit());
+    _poll_disarmed_value = NULL; // Readable on AIX
+  }
 }
--- a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -47,6 +47,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/osThread.hpp"
+#include "runtime/safepointMechanism.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.inline.hpp"
@@ -374,9 +375,12 @@
         goto run_stub;
       }
 
-      else if (sig == SIGSEGV && os::is_poll_address(addr)) {
+      else if ((SafepointMechanism::uses_thread_local_poll() && USE_POLL_BIT_ONLY)
+               ? (sig == SIGTRAP && ((NativeInstruction*)pc)->is_safepoint_poll())
+               : (sig == SIGSEGV && os::is_poll_address(addr))) {
         if (TraceTraps) {
-          tty->print_cr("trap: safepoint_poll at " INTPTR_FORMAT " (SIGSEGV)", pc);
+          tty->print_cr("trap: safepoint_poll at " INTPTR_FORMAT " (%s)", p2i(pc),
+                        (SafepointMechanism::uses_thread_local_poll() && USE_POLL_BIT_ONLY) ? "SIGTRAP" : "SIGSEGV");
         }
         stub = SharedRuntime::get_poll_stub(pc);
         goto run_stub;
--- a/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -132,8 +132,8 @@
 
 extern "C" {
   // defined in bsd_x86.s
-  jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
-  void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst);
+  int64_t _Atomic_cmpxchg_long(int64_t, volatile int64_t*, int64_t, bool);
+  void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst);
 }
 
 template<>
@@ -143,15 +143,15 @@
                                                 T compare_value,
                                                 cmpxchg_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(T));
-  return cmpxchg_using_helper<jlong>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
+  return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
 }
 
 template<>
 template<typename T>
 inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
   STATIC_ASSERT(8 == sizeof(T));
-  volatile jlong dest;
-  _Atomic_move_long(reinterpret_cast<const volatile jlong*>(src), reinterpret_cast<volatile jlong*>(&dest));
+  volatile int64_t dest;
+  _Atomic_move_long(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
   return PrimitiveConversions::cast<T>(dest);
 }
 
@@ -160,7 +160,7 @@
 inline void Atomic::PlatformStore<8>::operator()(T store_value,
                                                  T volatile* dest) const {
   STATIC_ASSERT(8 == sizeof(T));
-  _Atomic_move_long(reinterpret_cast<const volatile jlong*>(&store_value), reinterpret_cast<volatile jlong*>(dest));
+  _Atomic_move_long(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
 }
 
 #endif // AMD64
--- a/src/hotspot/os_cpu/bsd_x86/bsd_x86_32.s	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/os_cpu/bsd_x86/bsd_x86_32.s	Thu Jan 18 22:05:24 2018 +0100
@@ -633,10 +633,10 @@
         ret
 
 
-        # Support for jlong Atomic::cmpxchg(jlong exchange_value,
-        #                                   volatile jlong* dest,
-        #                                   jlong compare_value,
-        #                                   bool is_MP)
+        # Support for int64_t Atomic::cmpxchg(int64_t exchange_value,
+        #                                     volatile int64_t* dest,
+        #                                     int64_t compare_value,
+        #                                     bool is_MP)
         #
         .p2align 4,,15
         ELF_TYPE(_Atomic_cmpxchg_long,@function)
@@ -658,8 +658,8 @@
         ret
 
 
-        # Support for jlong Atomic::load and Atomic::store.
-        # void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst)
+        # Support for int64_t Atomic::load and Atomic::store.
+        # void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst)
         .p2align 4,,15
         ELF_TYPE(_Atomic_move_long,@function)
 SYMBOL(_Atomic_move_long):
--- a/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -265,8 +265,8 @@
 template<typename T>
 inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
   STATIC_ASSERT(8 == sizeof(T));
-  volatile jlong dest;
-  os::atomic_copy64(reinterpret_cast<const volatile jlong*>(src), reinterpret_cast<volatile jlong*>(&dest));
+  volatile int64_t dest;
+  os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
   return PrimitiveConversions::cast<T>(dest);
 }
 
@@ -275,7 +275,7 @@
 inline void Atomic::PlatformStore<8>::operator()(T store_value,
                                                  T volatile* dest) const {
   STATIC_ASSERT(8 == sizeof(T));
-  os::atomic_copy64(reinterpret_cast<const volatile jlong*>(&store_value), reinterpret_cast<volatile jlong*>(dest));
+  os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
 }
 
 #endif // OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP
--- a/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -50,7 +50,7 @@
 inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
   STATIC_ASSERT(8 == sizeof(T));
   return PrimitiveConversions::cast<T>(
-    (*os::atomic_load_long_func)(reinterpret_cast<const volatile jlong*>(src)));
+    (*os::atomic_load_long_func)(reinterpret_cast<const volatile int64_t*>(src)));
 }
 
 template<>
@@ -59,7 +59,7 @@
                                                  T volatile* dest) const {
   STATIC_ASSERT(8 == sizeof(T));
   (*os::atomic_store_long_func)(
-    PrimitiveConversions::cast<jlong>(store_value), reinterpret_cast<volatile jlong*>(dest));
+    PrimitiveConversions::cast<int64_t>(store_value), reinterpret_cast<volatile int64_t*>(dest));
 }
 #endif
 
@@ -103,7 +103,7 @@
     : "memory");
   return val;
 #else
-  return add_using_helper<jint>(os::atomic_add_func, add_value, dest);
+  return add_using_helper<int32_t>(os::atomic_add_func, add_value, dest);
 #endif
 }
 
@@ -146,7 +146,7 @@
     : "memory");
   return old_val;
 #else
-  return xchg_using_helper<jint>(os::atomic_xchg_func, exchange_value, dest);
+  return xchg_using_helper<int32_t>(os::atomic_xchg_func, exchange_value, dest);
 #endif
 }
 
@@ -178,17 +178,17 @@
 
 #ifndef AARCH64
 
-inline jint reorder_cmpxchg_func(jint exchange_value,
-                                 jint volatile* dest,
-                                 jint compare_value) {
+inline int32_t reorder_cmpxchg_func(int32_t exchange_value,
+                                    int32_t volatile* dest,
+                                    int32_t compare_value) {
   // Warning:  Arguments are swapped to avoid moving them for kernel call
   return (*os::atomic_cmpxchg_func)(compare_value, exchange_value, dest);
 }
 
-inline jlong reorder_cmpxchg_long_func(jlong exchange_value,
-                                       jlong volatile* dest,
-                                       jlong compare_value) {
-  assert(VM_Version::supports_cx8(), "Atomic compare and exchange jlong not supported on this architecture!");
+inline int64_t reorder_cmpxchg_long_func(int64_t exchange_value,
+                                         int64_t volatile* dest,
+                                         int64_t compare_value) {
+  assert(VM_Version::supports_cx8(), "Atomic compare and exchange int64_t not supported on this architecture!");
   // Warning:  Arguments are swapped to avoid moving them for kernel call
   return (*os::atomic_cmpxchg_long_func)(compare_value, exchange_value, dest);
 }
@@ -221,7 +221,7 @@
     : "memory");
   return rv;
 #else
-  return cmpxchg_using_helper<jint>(reorder_cmpxchg_func, exchange_value, dest, compare_value);
+  return cmpxchg_using_helper<int32_t>(reorder_cmpxchg_func, exchange_value, dest, compare_value);
 #endif
 }
 
@@ -251,7 +251,7 @@
     : "memory");
   return rv;
 #else
-  return cmpxchg_using_helper<jlong>(reorder_cmpxchg_long_func, exchange_value, dest, compare_value);
+  return cmpxchg_using_helper<int64_t>(reorder_cmpxchg_long_func, exchange_value, dest, compare_value);
 #endif
 }
 
--- a/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -598,11 +598,11 @@
 
 #ifndef AARCH64
 
-typedef jlong cmpxchg_long_func_t(jlong, jlong, volatile jlong*);
+typedef int64_t cmpxchg_long_func_t(int64_t, int64_t, volatile int64_t*);
 
 cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
 
-jlong os::atomic_cmpxchg_long_bootstrap(jlong compare_value, jlong exchange_value, volatile jlong* dest) {
+int64_t os::atomic_cmpxchg_long_bootstrap(int64_t compare_value, int64_t exchange_value, volatile int64_t* dest) {
   // try to use the stub:
   cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
 
@@ -612,16 +612,16 @@
   }
   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 
-  jlong old_value = *dest;
+  int64_t old_value = *dest;
   if (old_value == compare_value)
     *dest = exchange_value;
   return old_value;
 }
-typedef jlong load_long_func_t(const volatile jlong*);
+typedef int64_t load_long_func_t(const volatile int64_t*);
 
 load_long_func_t* os::atomic_load_long_func = os::atomic_load_long_bootstrap;
 
-jlong os::atomic_load_long_bootstrap(const volatile jlong* src) {
+int64_t os::atomic_load_long_bootstrap(const volatile int64_t* src) {
   // try to use the stub:
   load_long_func_t* func = CAST_TO_FN_PTR(load_long_func_t*, StubRoutines::atomic_load_long_entry());
 
@@ -631,15 +631,15 @@
   }
   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 
-  jlong old_value = *src;
+  int64_t old_value = *src;
   return old_value;
 }
 
-typedef void store_long_func_t(jlong, volatile jlong*);
+typedef void store_long_func_t(int64_t, volatile int64_t*);
 
 store_long_func_t* os::atomic_store_long_func = os::atomic_store_long_bootstrap;
 
-void os::atomic_store_long_bootstrap(jlong val, volatile jlong* dest) {
+void os::atomic_store_long_bootstrap(int64_t val, volatile int64_t* dest) {
   // try to use the stub:
   store_long_func_t* func = CAST_TO_FN_PTR(store_long_func_t*, StubRoutines::atomic_store_long_entry());
 
@@ -652,11 +652,11 @@
   *dest = val;
 }
 
-typedef jint  atomic_add_func_t(jint add_value, volatile jint *dest);
+typedef int32_t  atomic_add_func_t(int32_t add_value, volatile int32_t *dest);
 
 atomic_add_func_t * os::atomic_add_func = os::atomic_add_bootstrap;
 
-jint  os::atomic_add_bootstrap(jint add_value, volatile jint *dest) {
+int32_t  os::atomic_add_bootstrap(int32_t add_value, volatile int32_t *dest) {
   atomic_add_func_t * func = CAST_TO_FN_PTR(atomic_add_func_t*,
                                             StubRoutines::atomic_add_entry());
   if (func != NULL) {
@@ -664,16 +664,16 @@
     return (*func)(add_value, dest);
   }
 
-  jint old_value = *dest;
+  int32_t old_value = *dest;
   *dest = old_value + add_value;
   return (old_value + add_value);
 }
 
-typedef jint  atomic_xchg_func_t(jint exchange_value, volatile jint *dest);
+typedef int32_t  atomic_xchg_func_t(int32_t exchange_value, volatile int32_t *dest);
 
 atomic_xchg_func_t * os::atomic_xchg_func = os::atomic_xchg_bootstrap;
 
-jint  os::atomic_xchg_bootstrap(jint exchange_value, volatile jint *dest) {
+int32_t  os::atomic_xchg_bootstrap(int32_t exchange_value, volatile int32_t *dest) {
   atomic_xchg_func_t * func = CAST_TO_FN_PTR(atomic_xchg_func_t*,
                                             StubRoutines::atomic_xchg_entry());
   if (func != NULL) {
@@ -681,16 +681,16 @@
     return (*func)(exchange_value, dest);
   }
 
-  jint old_value = *dest;
+  int32_t old_value = *dest;
   *dest = exchange_value;
   return (old_value);
 }
 
-typedef jint cmpxchg_func_t(jint, jint, volatile jint*);
+typedef int32_t cmpxchg_func_t(int32_t, int32_t, volatile int32_t*);
 
 cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap;
 
-jint os::atomic_cmpxchg_bootstrap(jint compare_value, jint exchange_value, volatile jint* dest) {
+int32_t os::atomic_cmpxchg_bootstrap(int32_t compare_value, int32_t exchange_value, volatile int32_t* dest) {
   // try to use the stub:
   cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
 
@@ -700,7 +700,7 @@
   }
   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 
-  jint old_value = *dest;
+  int32_t old_value = *dest;
   if (old_value == compare_value)
     *dest = exchange_value;
   return old_value;
--- a/src/hotspot/os_cpu/linux_arm/os_linux_arm.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/os_cpu/linux_arm/os_linux_arm.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -45,35 +45,35 @@
   static bool register_code_area(char *low, char *high) { return true; }
 
 #ifndef AARCH64
-  static jlong (*atomic_cmpxchg_long_func)(jlong compare_value,
-                                           jlong exchange_value,
-                                           volatile jlong *dest);
+  static int64_t (*atomic_cmpxchg_long_func)(int64_t compare_value,
+                                             int64_t exchange_value,
+                                             volatile int64_t *dest);
 
-  static jlong (*atomic_load_long_func)(const volatile jlong*);
+  static int64_t (*atomic_load_long_func)(const volatile int64_t*);
 
-  static void (*atomic_store_long_func)(jlong, volatile jlong*);
+  static void (*atomic_store_long_func)(int64_t, volatile int64_t*);
 
-  static jint  (*atomic_add_func)(jint add_value, volatile jint *dest);
+  static int32_t  (*atomic_add_func)(int32_t add_value, volatile int32_t *dest);
 
-  static jint  (*atomic_xchg_func)(jint exchange_value, volatile jint *dest);
+  static int32_t  (*atomic_xchg_func)(int32_t exchange_value, volatile int32_t *dest);
 
-  static jint  (*atomic_cmpxchg_func)(jint compare_value,
-                                      jint exchange_value,
-                                      volatile jint *dest);
+  static int32_t  (*atomic_cmpxchg_func)(int32_t compare_value,
+                                         int32_t exchange_value,
+                                         volatile int32_t *dest);
 
-  static jlong atomic_cmpxchg_long_bootstrap(jlong, jlong, volatile jlong*);
+  static int64_t atomic_cmpxchg_long_bootstrap(int64_t, int64_t, volatile int64_t*);
 
-  static jlong atomic_load_long_bootstrap(const volatile jlong*);
+  static int64_t atomic_load_long_bootstrap(const volatile int64_t*);
 
-  static void atomic_store_long_bootstrap(jlong, volatile jlong*);
+  static void atomic_store_long_bootstrap(int64_t, volatile int64_t*);
 
-  static jint  atomic_add_bootstrap(jint add_value, volatile jint *dest);
+  static int32_t  atomic_add_bootstrap(int32_t add_value, volatile int32_t *dest);
 
-  static jint  atomic_xchg_bootstrap(jint exchange_value, volatile jint *dest);
+  static int32_t  atomic_xchg_bootstrap(int32_t exchange_value, volatile int32_t *dest);
 
-  static jint  atomic_cmpxchg_bootstrap(jint compare_value,
-                                        jint exchange_value,
-                                        volatile jint *dest);
+  static int32_t  atomic_cmpxchg_bootstrap(int32_t compare_value,
+                                           int32_t exchange_value,
+                                           volatile int32_t *dest);
 #endif // !AARCH64
 
 #endif // OS_CPU_LINUX_ARM_VM_OS_LINUX_ARM_HPP
--- a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -46,6 +46,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/osThread.hpp"
+#include "runtime/safepointMechanism.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.inline.hpp"
@@ -382,7 +383,7 @@
         stub = SharedRuntime::get_handle_wrong_method_stub();
       }
 
-      else if (sig == SIGSEGV &&
+      else if (sig == ((SafepointMechanism::uses_thread_local_poll() && USE_POLL_BIT_ONLY) ? SIGTRAP : SIGSEGV) &&
                // A linux-ppc64 kernel before 2.6.6 doesn't set si_addr on some segfaults
                // in 64bit mode (cf. http://www.kernel.org/pub/linux/kernel/v2.6/ChangeLog-2.6.6),
                // especially when we try to read from the safepoint polling page. So the check
@@ -393,7 +394,8 @@
                ((cb = CodeCache::find_blob(pc)) != NULL) &&
                cb->is_compiled()) {
         if (TraceTraps) {
-          tty->print_cr("trap: safepoint_poll at " INTPTR_FORMAT " (SIGSEGV)", p2i(pc));
+          tty->print_cr("trap: safepoint_poll at " INTPTR_FORMAT " (%s)", p2i(pc),
+                        (SafepointMechanism::uses_thread_local_poll() && USE_POLL_BIT_ONLY) ? "SIGTRAP" : "SIGSEGV");
         }
         stub = SharedRuntime::get_poll_stub(pc);
       }
--- a/src/hotspot/os_cpu/linux_sparc/os_linux_sparc.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/os_cpu/linux_sparc/os_linux_sparc.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,15 +28,15 @@
   //
   // NOTE: we are back in class os here, not Linux
   //
-  static jint  (*atomic_xchg_func)        (jint,  volatile jint*);
-  static jint  (*atomic_cmpxchg_func)     (jint,  volatile jint*,  jint);
-  static jlong (*atomic_cmpxchg_long_func)(jlong, volatile jlong*, jlong);
-  static jint  (*atomic_add_func)         (jint,  volatile jint*);
+  static int32_t  (*atomic_xchg_func)        (int32_t,  volatile int32_t*);
+  static int32_t  (*atomic_cmpxchg_func)     (int32_t,  volatile int32_t*, int32_t);
+  static int64_t  (*atomic_cmpxchg_long_func)(int64_t,  volatile int64_t*, int64_t);
+  static int32_t  (*atomic_add_func)         (int32_t,  volatile int32_t*);
 
-  static jint  atomic_xchg_bootstrap        (jint,  volatile jint*);
-  static jint  atomic_cmpxchg_bootstrap     (jint,  volatile jint*,  jint);
-  static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong);
-  static jint  atomic_add_bootstrap         (jint,  volatile jint*);
+  static int32_t  atomic_xchg_bootstrap        (int32_t,  volatile int32_t*);
+  static int32_t  atomic_cmpxchg_bootstrap     (int32_t,  volatile int32_t*, int32_t);
+  static int64_t  atomic_cmpxchg_long_bootstrap(int64_t,  volatile int64_t*, int64_t);
+  static int32_t  atomic_add_bootstrap         (int32_t,  volatile int32_t*);
 
   static void setup_fpu() {}
 
--- a/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -133,8 +133,8 @@
 
 extern "C" {
   // defined in linux_x86.s
-  jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong);
-  void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst);
+  int64_t _Atomic_cmpxchg_long(int64_t, volatile int64_t*, int64_t);
+  void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst);
 }
 
 template<>
@@ -144,15 +144,15 @@
                                                 T compare_value,
                                                 cmpxchg_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(T));
-  return cmpxchg_using_helper<jlong>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
+  return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
 }
 
 template<>
 template<typename T>
 inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
   STATIC_ASSERT(8 == sizeof(T));
-  volatile jlong dest;
-  _Atomic_move_long(reinterpret_cast<const volatile jlong*>(src), reinterpret_cast<volatile jlong*>(&dest));
+  volatile int64_t dest;
+  _Atomic_move_long(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
   return PrimitiveConversions::cast<T>(dest);
 }
 
@@ -161,7 +161,7 @@
 inline void Atomic::PlatformStore<8>::operator()(T store_value,
                                                  T volatile* dest) const {
   STATIC_ASSERT(8 == sizeof(T));
-  _Atomic_move_long(reinterpret_cast<const volatile jlong*>(&store_value), reinterpret_cast<volatile jlong*>(dest));
+  _Atomic_move_long(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
 }
 
 #endif // AMD64
--- a/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -30,67 +30,6 @@
 
 // Implementation of class atomic
 
-#ifdef ARM
-
-/*
- * __kernel_cmpxchg
- *
- * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
- * Return zero if *ptr was changed or non-zero if no exchange happened.
- * The C flag is also set if *ptr was changed to allow for assembly
- * optimization in the calling code.
- *
- */
-
-typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
-#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0)
-
-
-
-/* Perform an atomic compare and swap: if the current value of `*PTR'
-   is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
-   `*PTR' before the operation.*/
-static inline int arm_compare_and_swap(int newval,
-                                       volatile int *ptr,
-                                       int oldval) {
-  for (;;) {
-      int prev = *ptr;
-      if (prev != oldval)
-        return prev;
-
-      if (__kernel_cmpxchg (prev, newval, ptr) == 0)
-        // Success.
-        return prev;
-
-      // We failed even though prev == oldval.  Try again.
-    }
-}
-
-/* Atomically add an int to memory.  */
-static inline int arm_add_and_fetch(int add_value, volatile int *ptr) {
-  for (;;) {
-      // Loop until a __kernel_cmpxchg succeeds.
-
-      int prev = *ptr;
-
-      if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
-        return prev + add_value;
-    }
-}
-
-/* Atomically write VALUE into `*PTR' and returns the previous
-   contents of `*PTR'.  */
-static inline int arm_lock_test_and_set(int newval, volatile int *ptr) {
-  for (;;) {
-      // Loop until a __kernel_cmpxchg succeeds.
-      int prev = *ptr;
-
-      if (__kernel_cmpxchg (prev, newval, ptr) == 0)
-        return prev;
-    }
-}
-#endif // ARM
-
 template<size_t byte_size>
 struct Atomic::PlatformAdd
   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
@@ -105,11 +44,7 @@
   STATIC_ASSERT(4 == sizeof(I));
   STATIC_ASSERT(4 == sizeof(D));
 
-#ifdef ARM
-  return add_using_helper<int>(arm_add_and_fetch, add_value, dest);
-#else
   return __sync_add_and_fetch(dest, add_value);
-#endif // ARM
 }
 
 template<>
@@ -117,7 +52,6 @@
 inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
   STATIC_ASSERT(8 == sizeof(I));
   STATIC_ASSERT(8 == sizeof(D));
-
   return __sync_add_and_fetch(dest, add_value);
 }
 
@@ -126,9 +60,6 @@
 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
                                              T volatile* dest) const {
   STATIC_ASSERT(4 == sizeof(T));
-#ifdef ARM
-  return xchg_using_helper<int>(arm_lock_test_and_set, exchange_value, dest);
-#else
   // __sync_lock_test_and_set is a bizarrely named atomic exchange
   // operation.  Note that some platforms only support this with the
   // limitation that the only valid value to store is the immediate
@@ -140,7 +71,6 @@
   // barrier.
   __sync_synchronize();
   return result;
-#endif // ARM
 }
 
 template<>
@@ -164,11 +94,7 @@
                                                 T compare_value,
                                                 cmpxchg_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(T));
-#ifdef ARM
-  return cmpxchg_using_helper<int>(arm_compare_and_swap, exchange_value, dest, compare_value);
-#else
   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
-#endif // ARM
 }
 
 template<>
@@ -185,8 +111,8 @@
 template<typename T>
 inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
   STATIC_ASSERT(8 == sizeof(T));
-  volatile jlong dest;
-  os::atomic_copy64(reinterpret_cast<const volatile jlong*>(src), reinterpret_cast<volatile jlong*>(&dest));
+  volatile int64_t dest;
+  os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
   return PrimitiveConversions::cast<T>(dest);
 }
 
@@ -195,7 +121,7 @@
 inline void Atomic::PlatformStore<8>::operator()(T store_value,
                                                  T volatile* dest) const {
   STATIC_ASSERT(8 == sizeof(T));
-  os::atomic_copy64(reinterpret_cast<const volatile jlong*>(&store_value), reinterpret_cast<volatile jlong*>(dest));
+  os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
 }
 
 #endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP
--- a/src/hotspot/os_cpu/solaris_sparc/os_solaris_sparc.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/os_cpu/solaris_sparc/os_solaris_sparc.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,15 +28,15 @@
   //
   // NOTE: we are back in class os here, not Solaris
   //
-  static jint  (*atomic_xchg_func)        (jint,  volatile jint*);
-  static jint  (*atomic_cmpxchg_func)     (jint,  volatile jint*,  jint);
-  static jlong (*atomic_cmpxchg_long_func)(jlong, volatile jlong*, jlong);
-  static jint  (*atomic_add_func)         (jint,  volatile jint*);
+  static int32_t  (*atomic_xchg_func)        (int32_t,  volatile int32_t*);
+  static int32_t  (*atomic_cmpxchg_func)     (int32_t,  volatile int32_t*,  int32_t);
+  static int64_t  (*atomic_cmpxchg_long_func)(int64_t,  volatile int64_t*,  int64_t);
+  static int32_t  (*atomic_add_func)         (int32_t,  volatile int32_t*);
 
-  static jint  atomic_xchg_bootstrap        (jint,  volatile jint*);
-  static jint  atomic_cmpxchg_bootstrap     (jint,  volatile jint*,  jint);
-  static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong);
-  static jint  atomic_add_bootstrap         (jint,  volatile jint*);
+  static int32_t  atomic_xchg_bootstrap        (int32_t,  volatile int32_t*);
+  static int32_t  atomic_cmpxchg_bootstrap     (int32_t,  volatile int32_t*,  int32_t);
+  static int64_t  atomic_cmpxchg_long_bootstrap(int64_t,  volatile int64_t*,  int64_t);
+  static int32_t  atomic_add_bootstrap         (int32_t,  volatile int32_t*);
 
   static void setup_fpu() {}
 
--- a/src/hotspot/os_cpu/solaris_x86/atomic_solaris_x86.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/os_cpu/solaris_x86/atomic_solaris_x86.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -28,16 +28,16 @@
 // For Sun Studio - implementation is in solaris_x86_64.il.
 
 extern "C" {
-  jint _Atomic_add(jint add_value, volatile jint* dest);
-  jlong _Atomic_add_long(jlong add_value, volatile jlong* dest);
+  int32_t _Atomic_add(int32_t add_value, volatile int32_t* dest);
+  int64_t _Atomic_add_long(int64_t add_value, volatile int64_t* dest);
 
-  jint _Atomic_xchg(jint exchange_value, volatile jint* dest);
-  jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest,
-                             jbyte compare_value);
-  jint _Atomic_cmpxchg(jint exchange_value, volatile jint* dest,
-                       jint compare_value);
-  jlong _Atomic_cmpxchg_long(jlong exchange_value, volatile jlong* dest,
-                             jlong compare_value);
+  int32_t _Atomic_xchg(int32_t exchange_value, volatile int32_t* dest);
+  int8_t  _Atomic_cmpxchg_byte(int8_t exchange_value, volatile int8_t* dest,
+                               int8_t compare_value);
+  int32_t _Atomic_cmpxchg(int32_t exchange_value, volatile int32_t* dest,
+                          int32_t compare_value);
+  int64_t _Atomic_cmpxchg_long(int64_t exchange_value, volatile int64_t* dest,
+                               int64_t compare_value);
 }
 
 template<size_t byte_size>
@@ -55,8 +55,8 @@
   STATIC_ASSERT(4 == sizeof(I));
   STATIC_ASSERT(4 == sizeof(D));
   return PrimitiveConversions::cast<D>(
-    _Atomic_add(PrimitiveConversions::cast<jint>(add_value),
-                reinterpret_cast<jint volatile*>(dest)));
+    _Atomic_add(PrimitiveConversions::cast<int32_t>(add_value),
+                reinterpret_cast<int32_t volatile*>(dest)));
 }
 
 // Not using add_using_helper; see comment for cmpxchg.
@@ -66,8 +66,8 @@
   STATIC_ASSERT(8 == sizeof(I));
   STATIC_ASSERT(8 == sizeof(D));
   return PrimitiveConversions::cast<D>(
-    _Atomic_add_long(PrimitiveConversions::cast<jlong>(add_value),
-                     reinterpret_cast<jlong volatile*>(dest)));
+    _Atomic_add_long(PrimitiveConversions::cast<int64_t>(add_value),
+                     reinterpret_cast<int64_t volatile*>(dest)));
 }
 
 template<>
@@ -76,11 +76,11 @@
                                              T volatile* dest) const {
   STATIC_ASSERT(4 == sizeof(T));
   return PrimitiveConversions::cast<T>(
-    _Atomic_xchg(PrimitiveConversions::cast<jint>(exchange_value),
-                 reinterpret_cast<jint volatile*>(dest)));
+    _Atomic_xchg(PrimitiveConversions::cast<int32_t>(exchange_value),
+                 reinterpret_cast<int32_t volatile*>(dest)));
 }
 
-extern "C" jlong _Atomic_xchg_long(jlong exchange_value, volatile jlong* dest);
+extern "C" int64_t _Atomic_xchg_long(int64_t exchange_value, volatile int64_t* dest);
 
 template<>
 template<typename T>
@@ -88,8 +88,8 @@
                                              T volatile* dest) const {
   STATIC_ASSERT(8 == sizeof(T));
   return PrimitiveConversions::cast<T>(
-    _Atomic_xchg_long(PrimitiveConversions::cast<jlong>(exchange_value),
-                      reinterpret_cast<jlong volatile*>(dest)));
+    _Atomic_xchg_long(PrimitiveConversions::cast<int64_t>(exchange_value),
+                      reinterpret_cast<int64_t volatile*>(dest)));
 }
 
 // Not using cmpxchg_using_helper here, because some configurations of
@@ -106,9 +106,9 @@
                                                 cmpxchg_memory_order order) const {
   STATIC_ASSERT(1 == sizeof(T));
   return PrimitiveConversions::cast<T>(
-    _Atomic_cmpxchg_byte(PrimitiveConversions::cast<jbyte>(exchange_value),
-                         reinterpret_cast<jbyte volatile*>(dest),
-                         PrimitiveConversions::cast<jbyte>(compare_value)));
+    _Atomic_cmpxchg_byte(PrimitiveConversions::cast<int8_t>(exchange_value),
+                         reinterpret_cast<int8_t volatile*>(dest),
+                         PrimitiveConversions::cast<int8_t>(compare_value)));
 }
 
 template<>
@@ -119,9 +119,9 @@
                                                 cmpxchg_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(T));
   return PrimitiveConversions::cast<T>(
-    _Atomic_cmpxchg(PrimitiveConversions::cast<jint>(exchange_value),
-                    reinterpret_cast<jint volatile*>(dest),
-                    PrimitiveConversions::cast<jint>(compare_value)));
+    _Atomic_cmpxchg(PrimitiveConversions::cast<int32_t>(exchange_value),
+                    reinterpret_cast<int32_t volatile*>(dest),
+                    PrimitiveConversions::cast<int32_t>(compare_value)));
 }
 
 template<>
@@ -132,9 +132,9 @@
                                                 cmpxchg_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(T));
   return PrimitiveConversions::cast<T>(
-    _Atomic_cmpxchg_long(PrimitiveConversions::cast<jlong>(exchange_value),
-                         reinterpret_cast<jlong volatile*>(dest),
-                         PrimitiveConversions::cast<jlong>(compare_value)));
+    _Atomic_cmpxchg_long(PrimitiveConversions::cast<int64_t>(exchange_value),
+                         reinterpret_cast<int64_t volatile*>(dest),
+                         PrimitiveConversions::cast<int64_t>(compare_value)));
 }
 
 #endif // OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_HPP
--- a/src/hotspot/os_cpu/solaris_x86/os_solaris_x86.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/os_cpu/solaris_x86/os_solaris_x86.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -904,12 +904,12 @@
 // until initialization is complete.
 // TODO - replace with .il implementation when compiler supports it.
 
-typedef jint  xchg_func_t        (jint,  volatile jint*);
-typedef jint  cmpxchg_func_t     (jint,  volatile jint*,  jint);
-typedef jlong cmpxchg_long_func_t(jlong, volatile jlong*, jlong);
-typedef jint  add_func_t         (jint,  volatile jint*);
+typedef int32_t  xchg_func_t        (int32_t,  volatile int32_t*);
+typedef int32_t  cmpxchg_func_t     (int32_t,  volatile int32_t*,  int32_t);
+typedef int64_t  cmpxchg_long_func_t(int64_t,  volatile int64_t*,  int64_t);
+typedef int32_t  add_func_t         (int32_t,  volatile int32_t*);
 
-jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) {
+int32_t os::atomic_xchg_bootstrap(int32_t exchange_value, volatile int32_t* dest) {
   // try to use the stub:
   xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry());
 
@@ -919,12 +919,12 @@
   }
   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 
-  jint old_value = *dest;
+  int32_t old_value = *dest;
   *dest = exchange_value;
   return old_value;
 }
 
-jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint compare_value) {
+int32_t os::atomic_cmpxchg_bootstrap(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value) {
   // try to use the stub:
   cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
 
@@ -934,13 +934,13 @@
   }
   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 
-  jint old_value = *dest;
+  int32_t old_value = *dest;
   if (old_value == compare_value)
     *dest = exchange_value;
   return old_value;
 }
 
-jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* dest, jlong compare_value) {
+int64_t os::atomic_cmpxchg_long_bootstrap(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value) {
   // try to use the stub:
   cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
 
@@ -950,13 +950,13 @@
   }
   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 
-  jlong old_value = *dest;
+  int64_t old_value = *dest;
   if (old_value == compare_value)
     *dest = exchange_value;
   return old_value;
 }
 
-jint os::atomic_add_bootstrap(jint add_value, volatile jint* dest) {
+int32_t os::atomic_add_bootstrap(int32_t add_value, volatile int32_t* dest) {
   // try to use the stub:
   add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry());
 
--- a/src/hotspot/os_cpu/solaris_x86/os_solaris_x86.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/os_cpu/solaris_x86/os_solaris_x86.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,15 +31,15 @@
 #ifdef AMD64
   static void setup_fpu() {}
 #else
-  static jint  (*atomic_xchg_func)        (jint,  volatile jint*);
-  static jint  (*atomic_cmpxchg_func)     (jint,  volatile jint*,  jint);
-  static jlong (*atomic_cmpxchg_long_func)(jlong, volatile jlong*, jlong);
-  static jint  (*atomic_add_func)         (jint,  volatile jint*);
+  static int32_t  (*atomic_xchg_func)        (int32_t,  volatile int32_t*);
+  static int32_t  (*atomic_cmpxchg_func)     (int32_t,  volatile int32_t*, int32_t);
+  static int64_t  (*atomic_cmpxchg_long_func)(int64_t,  volatile int64_t*, int64_t);
+  static int32_t  (*atomic_add_func)         (int32_t,  volatile int32_t*);
 
-  static jint  atomic_xchg_bootstrap        (jint,  volatile jint*);
-  static jint  atomic_cmpxchg_bootstrap     (jint,  volatile jint*,  jint);
-  static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong);
-  static jint  atomic_add_bootstrap         (jint,  volatile jint*);
+  static int32_t  atomic_xchg_bootstrap        (int32_t,  volatile int32_t*);
+  static int32_t  atomic_cmpxchg_bootstrap     (int32_t,  volatile int32_t*, int32_t);
+  static int64_t  atomic_cmpxchg_long_bootstrap(int64_t,  volatile int64_t*, int64_t);
+  static int32_t  atomic_add_bootstrap         (int32_t,  volatile int32_t*);
 
   static void setup_fpu();
 #endif // AMD64
--- a/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -54,13 +54,13 @@
 template<>
 template<typename I, typename D>
 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
-  return add_using_helper<jint>(os::atomic_add_func, add_value, dest);
+  return add_using_helper<int32_t>(os::atomic_add_func, add_value, dest);
 }
 
 template<>
 template<typename I, typename D>
 inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
-  return add_using_helper<intptr_t>(os::atomic_add_ptr_func, add_value, dest);
+  return add_using_helper<int64_t>(os::atomic_add_long_func, add_value, dest);
 }
 
 #define DEFINE_STUB_XCHG(ByteSize, StubType, StubName)                  \
@@ -72,8 +72,8 @@
     return xchg_using_helper<StubType>(StubName, exchange_value, dest); \
   }
 
-DEFINE_STUB_XCHG(4, jint, os::atomic_xchg_func)
-DEFINE_STUB_XCHG(8, jlong, os::atomic_xchg_long_func)
+DEFINE_STUB_XCHG(4, int32_t, os::atomic_xchg_func)
+DEFINE_STUB_XCHG(8, int64_t, os::atomic_xchg_long_func)
 
 #undef DEFINE_STUB_XCHG
 
@@ -88,9 +88,9 @@
     return cmpxchg_using_helper<StubType>(StubName, exchange_value, dest, compare_value); \
   }
 
-DEFINE_STUB_CMPXCHG(1, jbyte, os::atomic_cmpxchg_byte_func)
-DEFINE_STUB_CMPXCHG(4, jint,  os::atomic_cmpxchg_func)
-DEFINE_STUB_CMPXCHG(8, jlong, os::atomic_cmpxchg_long_func)
+DEFINE_STUB_CMPXCHG(1, int8_t,  os::atomic_cmpxchg_byte_func)
+DEFINE_STUB_CMPXCHG(4, int32_t, os::atomic_cmpxchg_func)
+DEFINE_STUB_CMPXCHG(8, int64_t, os::atomic_cmpxchg_long_func)
 
 #undef DEFINE_STUB_CMPXCHG
 
@@ -162,10 +162,10 @@
                                                 T compare_value,
                                                 cmpxchg_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(T));
-  jint ex_lo  = (jint)exchange_value;
-  jint ex_hi  = *( ((jint*)&exchange_value) + 1 );
-  jint cmp_lo = (jint)compare_value;
-  jint cmp_hi = *( ((jint*)&compare_value) + 1 );
+  int32_t ex_lo  = (int32_t)exchange_value;
+  int32_t ex_hi  = *( ((int32_t*)&exchange_value) + 1 );
+  int32_t cmp_lo = (int32_t)compare_value;
+  int32_t cmp_hi = *( ((int32_t*)&compare_value) + 1 );
   __asm {
     push ebx
     push edi
--- a/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -218,17 +218,17 @@
 
 // Atomics and Stub Functions
 
-typedef jint      xchg_func_t            (jint,     volatile jint*);
-typedef intptr_t  xchg_long_func_t       (jlong,    volatile jlong*);
-typedef jint      cmpxchg_func_t         (jint,     volatile jint*,  jint);
-typedef jbyte     cmpxchg_byte_func_t    (jbyte,    volatile jbyte*, jbyte);
-typedef jlong     cmpxchg_long_func_t    (jlong,    volatile jlong*, jlong);
-typedef jint      add_func_t             (jint,     volatile jint*);
-typedef intptr_t  add_ptr_func_t         (intptr_t, volatile intptr_t*);
+typedef int32_t   xchg_func_t            (int32_t,  volatile int32_t*);
+typedef int64_t   xchg_long_func_t       (int64_t,  volatile int64_t*);
+typedef int32_t   cmpxchg_func_t         (int32_t,  volatile int32_t*, int32_t);
+typedef int8_t    cmpxchg_byte_func_t    (int8_t,   volatile int8_t*,  int8_t);
+typedef int64_t   cmpxchg_long_func_t    (int64_t,  volatile int64_t*, int64_t);
+typedef int32_t   add_func_t             (int32_t,  volatile int32_t*);
+typedef int64_t   add_long_func_t        (int64_t,  volatile int64_t*);
 
 #ifdef AMD64
 
-jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) {
+int32_t os::atomic_xchg_bootstrap(int32_t exchange_value, volatile int32_t* dest) {
   // try to use the stub:
   xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry());
 
@@ -238,12 +238,12 @@
   }
   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 
-  jint old_value = *dest;
+  int32_t old_value = *dest;
   *dest = exchange_value;
   return old_value;
 }
 
-intptr_t os::atomic_xchg_long_bootstrap(jlong exchange_value, volatile jlong* dest) {
+int64_t os::atomic_xchg_long_bootstrap(int64_t exchange_value, volatile int64_t* dest) {
   // try to use the stub:
   xchg_long_func_t* func = CAST_TO_FN_PTR(xchg_long_func_t*, StubRoutines::atomic_xchg_long_entry());
 
@@ -253,13 +253,13 @@
   }
   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 
-  intptr_t old_value = *dest;
+  int64_t old_value = *dest;
   *dest = exchange_value;
   return old_value;
 }
 
 
-jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint compare_value) {
+int32_t os::atomic_cmpxchg_bootstrap(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value) {
   // try to use the stub:
   cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
 
@@ -269,13 +269,13 @@
   }
   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 
-  jint old_value = *dest;
+  int32_t old_value = *dest;
   if (old_value == compare_value)
     *dest = exchange_value;
   return old_value;
 }
 
-jbyte os::atomic_cmpxchg_byte_bootstrap(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) {
+int8_t os::atomic_cmpxchg_byte_bootstrap(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value) {
   // try to use the stub:
   cmpxchg_byte_func_t* func = CAST_TO_FN_PTR(cmpxchg_byte_func_t*, StubRoutines::atomic_cmpxchg_byte_entry());
 
@@ -285,7 +285,7 @@
   }
   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 
-  jbyte old_value = *dest;
+  int8_t old_value = *dest;
   if (old_value == compare_value)
     *dest = exchange_value;
   return old_value;
@@ -293,7 +293,7 @@
 
 #endif // AMD64
 
-jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* dest, jlong compare_value) {
+int64_t os::atomic_cmpxchg_long_bootstrap(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value) {
   // try to use the stub:
   cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
 
@@ -303,7 +303,7 @@
   }
   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 
-  jlong old_value = *dest;
+  int64_t old_value = *dest;
   if (old_value == compare_value)
     *dest = exchange_value;
   return old_value;
@@ -311,7 +311,7 @@
 
 #ifdef AMD64
 
-jint os::atomic_add_bootstrap(jint add_value, volatile jint* dest) {
+int32_t os::atomic_add_bootstrap(int32_t add_value, volatile int32_t* dest) {
   // try to use the stub:
   add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry());
 
@@ -324,12 +324,12 @@
   return (*dest) += add_value;
 }
 
-intptr_t os::atomic_add_ptr_bootstrap(intptr_t add_value, volatile intptr_t* dest) {
+int64_t os::atomic_add_long_bootstrap(int64_t add_value, volatile int64_t* dest) {
   // try to use the stub:
-  add_ptr_func_t* func = CAST_TO_FN_PTR(add_ptr_func_t*, StubRoutines::atomic_add_ptr_entry());
+  add_long_func_t* func = CAST_TO_FN_PTR(add_long_func_t*, StubRoutines::atomic_add_long_entry());
 
   if (func != NULL) {
-    os::atomic_add_ptr_func = func;
+    os::atomic_add_long_func = func;
     return (*func)(add_value, dest);
   }
   assert(Threads::number_of_threads() == 0, "for bootstrap only");
@@ -342,7 +342,7 @@
 cmpxchg_func_t*      os::atomic_cmpxchg_func      = os::atomic_cmpxchg_bootstrap;
 cmpxchg_byte_func_t* os::atomic_cmpxchg_byte_func = os::atomic_cmpxchg_byte_bootstrap;
 add_func_t*          os::atomic_add_func          = os::atomic_add_bootstrap;
-add_ptr_func_t*      os::atomic_add_ptr_func      = os::atomic_add_ptr_bootstrap;
+add_long_func_t*     os::atomic_add_long_func     = os::atomic_add_long_bootstrap;
 
 #endif // AMD64
 
--- a/src/hotspot/os_cpu/windows_x86/os_windows_x86.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/os_cpu/windows_x86/os_windows_x86.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -29,32 +29,32 @@
   // NOTE: we are back in class os here, not win32
   //
 #ifdef AMD64
-  static jint      (*atomic_xchg_func)          (jint,      volatile jint*);
-  static intptr_t  (*atomic_xchg_long_func)     (jlong,     volatile jlong*);
+  static int32_t   (*atomic_xchg_func)          (int32_t, volatile int32_t*);
+  static int64_t   (*atomic_xchg_long_func)     (int64_t, volatile int64_t*);
 
-  static jint      (*atomic_cmpxchg_func)       (jint,      volatile jint*,  jint);
-  static jbyte     (*atomic_cmpxchg_byte_func)  (jbyte,     volatile jbyte*, jbyte);
-  static jlong     (*atomic_cmpxchg_long_func)  (jlong,     volatile jlong*, jlong);
+  static int32_t   (*atomic_cmpxchg_func)       (int32_t,  volatile int32_t*, int32_t);
+  static int8_t    (*atomic_cmpxchg_byte_func)  (int8_t,   volatile int8_t*,  int8_t);
+  static int64_t   (*atomic_cmpxchg_long_func)  (int64_t,  volatile int64_t*, int64_t);
 
-  static jint      (*atomic_add_func)           (jint,      volatile jint*);
-  static intptr_t  (*atomic_add_ptr_func)       (intptr_t,  volatile intptr_t*);
+  static int32_t   (*atomic_add_func)           (int32_t,  volatile int32_t*);
+  static int64_t   (*atomic_add_long_func)      (int64_t,  volatile int64_t*);
 
-  static jint      atomic_xchg_bootstrap        (jint,      volatile jint*);
-  static intptr_t  atomic_xchg_long_bootstrap   (jlong,     volatile jlong*);
+  static int32_t   atomic_xchg_bootstrap        (int32_t,  volatile int32_t*);
+  static int64_t   atomic_xchg_long_bootstrap   (int64_t,  volatile int64_t*);
 
-  static jint      atomic_cmpxchg_bootstrap     (jint,      volatile jint*,  jint);
-  static jbyte     atomic_cmpxchg_byte_bootstrap(jbyte,     volatile jbyte*, jbyte);
+  static int32_t   atomic_cmpxchg_bootstrap     (int32_t,  volatile int32_t*, int32_t);
+  static int8_t    atomic_cmpxchg_byte_bootstrap(int8_t,   volatile int8_t*,  int8_t);
 #else
 
-  static jlong (*atomic_cmpxchg_long_func)  (jlong, volatile jlong*, jlong);
+  static int64_t (*atomic_cmpxchg_long_func)  (int64_t, volatile int64_t*, int64_t);
 
 #endif // AMD64
 
-  static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong);
+  static int64_t atomic_cmpxchg_long_bootstrap(int64_t, volatile int64_t*, int64_t);
 
 #ifdef AMD64
-  static jint      atomic_add_bootstrap         (jint,      volatile jint*);
-  static intptr_t  atomic_add_ptr_bootstrap     (intptr_t,  volatile intptr_t*);
+  static int32_t  atomic_add_bootstrap         (int32_t,  volatile int32_t*);
+  static int64_t  atomic_add_long_bootstrap    (int64_t,  volatile int64_t*);
 #endif // AMD64
 
   static void setup_fpu();
--- a/src/hotspot/share/aot/aotCompiledMethod.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/aot/aotCompiledMethod.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -270,7 +270,7 @@
         CompiledIC *ic = CompiledIC_at(&iter);
         if (ic->is_icholder_call()) {
           CompiledICHolder* cichk = ic->cached_icholder();
-          f(cichk->holder_method());
+          f(cichk->holder_metadata());
           f(cichk->holder_klass());
         } else {
           // Get Klass* or NULL (if value is -1) from GOT cell of virtual call PLT stub.
--- a/src/hotspot/share/ci/ciEnv.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/ci/ciEnv.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1164,28 +1164,30 @@
 
 void ciEnv::dump_compile_data(outputStream* out) {
   CompileTask* task = this->task();
-  Method* method = task->method();
-  int entry_bci = task->osr_bci();
-  int comp_level = task->comp_level();
-  out->print("compile %s %s %s %d %d",
-                method->klass_name()->as_quoted_ascii(),
-                method->name()->as_quoted_ascii(),
-                method->signature()->as_quoted_ascii(),
-                entry_bci, comp_level);
-  if (compiler_data() != NULL) {
-    if (is_c2_compile(comp_level)) {
+  if (task) {
+    Method* method = task->method();
+    int entry_bci = task->osr_bci();
+    int comp_level = task->comp_level();
+    out->print("compile %s %s %s %d %d",
+               method->klass_name()->as_quoted_ascii(),
+               method->name()->as_quoted_ascii(),
+               method->signature()->as_quoted_ascii(),
+               entry_bci, comp_level);
+    if (compiler_data() != NULL) {
+      if (is_c2_compile(comp_level)) {
 #ifdef COMPILER2
-      // Dump C2 inlining data.
-      ((Compile*)compiler_data())->dump_inline_data(out);
+        // Dump C2 inlining data.
+        ((Compile*)compiler_data())->dump_inline_data(out);
 #endif
-    } else if (is_c1_compile(comp_level)) {
+      } else if (is_c1_compile(comp_level)) {
 #ifdef COMPILER1
-      // Dump C1 inlining data.
-      ((Compilation*)compiler_data())->dump_inline_data(out);
+        // Dump C1 inlining data.
+        ((Compilation*)compiler_data())->dump_inline_data(out);
 #endif
+      }
     }
+    out->cr();
   }
-  out->cr();
 }
 
 void ciEnv::dump_replay_data_unsafe(outputStream* out) {
--- a/src/hotspot/share/classfile/classLoader.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/classfile/classLoader.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -802,6 +802,14 @@
   int end = 0;
   bool set_base_piece = true;
 
+#if INCLUDE_CDS
+  if (DumpSharedSpaces) {
+    if (!Arguments::has_jimage()) {
+      vm_exit_during_initialization("CDS is not supported in exploded JDK build", NULL);
+    }
+  }
+#endif
+
   // Iterate over class path entries
   for (int start = 0; start < len; start = end) {
     while (class_path[end] && class_path[end] != os::path_separator()[0]) {
--- a/src/hotspot/share/classfile/classLoaderData.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/classfile/classLoaderData.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -279,11 +279,6 @@
   ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies);
   ~ClassLoaderData();
 
-  // GC interface.
-  void clear_claimed()          { _claimed = 0; }
-  bool claimed() const          { return _claimed == 1; }
-  bool claim();
-
   // The CLD are not placed in the Heap, so the Card Table or
   // the Mod Union Table can't be used to mark when CLD have modified oops.
   // The CT and MUT bits saves this information for the whole class loader data.
@@ -315,6 +310,10 @@
 
   Dictionary* create_dictionary();
  public:
+  // GC interface.
+  void clear_claimed() { _claimed = 0; }
+  bool claimed() const { return _claimed == 1; }
+  bool claim();
 
   bool is_alive(BoolObjectClosure* is_alive_closure) const;
 
--- a/src/hotspot/share/classfile/defaultMethods.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/classfile/defaultMethods.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -26,6 +26,7 @@
 #include "classfile/bytecodeAssembler.hpp"
 #include "classfile/defaultMethods.hpp"
 #include "classfile/symbolTable.hpp"
+#include "classfile/systemDictionary.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/allocation.hpp"
@@ -683,10 +684,11 @@
   Symbol* _method_name;
   Symbol* _method_signature;
   StatefulMethodFamily*  _family;
+  bool _cur_class_is_interface;
 
  public:
-  FindMethodsByErasedSig(Symbol* name, Symbol* signature) :
-      _method_name(name), _method_signature(signature),
+  FindMethodsByErasedSig(Symbol* name, Symbol* signature, bool is_interf) :
+      _method_name(name), _method_signature(signature), _cur_class_is_interface(is_interf),
       _family(NULL) {}
 
   void get_discovered_family(MethodFamily** family) {
@@ -709,14 +711,17 @@
     InstanceKlass* iklass = current_class();
 
     Method* m = iklass->find_method(_method_name, _method_signature);
-    // private interface methods are not candidates for default methods
-    // invokespecial to private interface methods doesn't use default method logic
-    // private class methods are not candidates for default methods,
-    // private methods do not override default methods, so need to perform
-    // default method inheritance without including private methods
-    // The overpasses are your supertypes' errors, we do not include them
-    // future: take access controls into account for superclass methods
-    if (m != NULL && !m->is_static() && !m->is_overpass() && !m->is_private()) {
+    // Private interface methods are not candidates for default methods.
+    // invokespecial to private interface methods doesn't use default method logic.
+    // Private class methods are not candidates for default methods.
+    // Private methods do not override default methods, so need to perform
+    // default method inheritance without including private methods.
+    // The overpasses are your supertypes' errors, we do not include them.
+    // Non-public methods in java.lang.Object are not candidates for default
+    // methods.
+    // Future: take access controls into account for superclass methods
+    if (m != NULL && !m->is_static() && !m->is_overpass() && !m->is_private() &&
+     (!_cur_class_is_interface || !SystemDictionary::is_nonpublic_Object_method(m))) {
       if (_family == NULL) {
         _family = new StatefulMethodFamily();
       }
@@ -726,8 +731,8 @@
         scope->add_mark(restorer);
       } else {
         // This is the rule that methods in classes "win" (bad word) over
-        // methods in interfaces. This works because of single inheritance
-        // private methods in classes do not "win", they will be found
+        // methods in interfaces. This works because of single inheritance.
+        // Private methods in classes do not "win", they will be found
         // first on searching, but overriding for invokevirtual needs
         // to find default method candidates for the same signature
         _family->set_target_if_empty(m);
@@ -745,10 +750,10 @@
 
 static void generate_erased_defaults(
      InstanceKlass* klass, GrowableArray<EmptyVtableSlot*>* empty_slots,
-     EmptyVtableSlot* slot, TRAPS) {
+     EmptyVtableSlot* slot, bool is_intf, TRAPS) {
 
   // sets up a set of methods with the same exact erased signature
-  FindMethodsByErasedSig visitor(slot->name(), slot->signature());
+  FindMethodsByErasedSig visitor(slot->name(), slot->signature(), is_intf);
   visitor.run(klass);
 
   MethodFamily* family;
@@ -817,7 +822,7 @@
       slot->print_on(&ls);
       ls.cr();
     }
-    generate_erased_defaults(klass, empty_slots, slot, CHECK);
+    generate_erased_defaults(klass, empty_slots, slot, klass->is_interface(), CHECK);
   }
   log_debug(defaultmethods)("Creating defaults and overpasses...");
   create_defaults_and_exceptions(empty_slots, klass, CHECK);
--- a/src/hotspot/share/classfile/systemDictionary.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/classfile/systemDictionary.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -2734,43 +2734,68 @@
   return method_type;
 }
 
+Handle SystemDictionary::find_field_handle_type(Symbol* signature,
+                                                Klass* accessing_klass,
+                                                TRAPS) {
+  Handle empty;
+  ResourceMark rm(THREAD);
+  SignatureStream ss(signature, /*is_method=*/ false);
+  if (!ss.is_done()) {
+    Handle class_loader, protection_domain;
+    if (accessing_klass != NULL) {
+      class_loader      = Handle(THREAD, accessing_klass->class_loader());
+      protection_domain = Handle(THREAD, accessing_klass->protection_domain());
+    }
+    oop mirror = ss.as_java_mirror(class_loader, protection_domain, SignatureStream::NCDFError, CHECK_(empty));
+    ss.next();
+    if (ss.is_done()) {
+      return Handle(THREAD, mirror);
+    }
+  }
+  return empty;
+}
+
 // Ask Java code to find or construct a method handle constant.
 Handle SystemDictionary::link_method_handle_constant(Klass* caller,
                                                      int ref_kind, //e.g., JVM_REF_invokeVirtual
                                                      Klass* callee,
-                                                     Symbol* name_sym,
+                                                     Symbol* name,
                                                      Symbol* signature,
                                                      TRAPS) {
   Handle empty;
-  Handle name = java_lang_String::create_from_symbol(name_sym, CHECK_(empty));
-  Handle type;
-  if (signature->utf8_length() > 0 && signature->byte_at(0) == '(') {
-    type = find_method_handle_type(signature, caller, CHECK_(empty));
-  } else if (caller == NULL) {
-    // This should not happen.  JDK code should take care of that.
+  if (caller == NULL) {
     THROW_MSG_(vmSymbols::java_lang_InternalError(), "bad MH constant", empty);
+  }
+  Handle name_str      = java_lang_String::create_from_symbol(name,      CHECK_(empty));
+  Handle signature_str = java_lang_String::create_from_symbol(signature, CHECK_(empty));
+
+  // Put symbolic info from the MH constant into freshly created MemberName and resolve it.
+  Handle mname = MemberName_klass()->allocate_instance_handle(CHECK_(empty));
+  java_lang_invoke_MemberName::set_clazz(mname(), callee->java_mirror());
+  java_lang_invoke_MemberName::set_name (mname(), name_str());
+  java_lang_invoke_MemberName::set_type (mname(), signature_str());
+  java_lang_invoke_MemberName::set_flags(mname(), MethodHandles::ref_kind_to_flags(ref_kind));
+
+  if (ref_kind == JVM_REF_invokeVirtual &&
+      callee->name() == vmSymbols::java_lang_invoke_MethodHandle() &&
+      (name == vmSymbols::invoke_name() || name == vmSymbols::invokeExact_name())) {
+    // Skip resolution for j.l.i.MethodHandle.invoke()/invokeExact().
+    // They are public signature polymorphic methods, but require appendix argument
+    // which MemberName resolution doesn't handle. There's special logic on JDK side to handle them
+    // (see MethodHandles.linkMethodHandleConstant() and MethodHandles.findVirtualForMH()).
   } else {
-    ResourceMark rm(THREAD);
-    SignatureStream ss(signature, false);
-    if (!ss.is_done()) {
-      oop mirror = ss.as_java_mirror(Handle(THREAD, caller->class_loader()),
-                                     Handle(THREAD, caller->protection_domain()),
-                                     SignatureStream::NCDFError, CHECK_(empty));
-      type = Handle(THREAD, mirror);
-      ss.next();
-      if (!ss.is_done())  type = Handle();  // error!
-    }
+    MethodHandles::resolve_MemberName(mname, caller, CHECK_(empty));
   }
-  if (type.is_null()) {
-    THROW_MSG_(vmSymbols::java_lang_LinkageError(), "bad signature", empty);
-  }
+
+  // After method/field resolution succeeded, it's safe to resolve MH signature as well.
+  Handle type = MethodHandles::resolve_MemberName_type(mname, caller, CHECK_(empty));
 
   // call java.lang.invoke.MethodHandleNatives::linkMethodHandleConstant(Class caller, int refKind, Class callee, String name, Object type) -> MethodHandle
   JavaCallArguments args;
   args.push_oop(Handle(THREAD, caller->java_mirror()));  // the referring class
   args.push_int(ref_kind);
   args.push_oop(Handle(THREAD, callee->java_mirror()));  // the target class
-  args.push_oop(name);
+  args.push_oop(name_str);
   args.push_oop(type);
   JavaValue result(T_OBJECT);
   JavaCalls::call_static(&result,
--- a/src/hotspot/share/classfile/systemDictionary.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/classfile/systemDictionary.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -532,6 +532,11 @@
                                            Klass* accessing_klass,
                                            TRAPS);
 
+  // find a java.lang.Class object for a given signature
+  static Handle    find_field_handle_type(Symbol* signature,
+                                          Klass* accessing_klass,
+                                          TRAPS);
+
   // ask Java to compute a java.lang.invoke.MethodHandle object for a given CP entry
   static Handle    link_method_handle_constant(Klass* caller,
                                                int ref_kind, //e.g., JVM_REF_invokeVirtual
@@ -649,6 +654,12 @@
   static bool is_platform_class_loader(oop class_loader);
   static void clear_invoke_method_table();
 
+  // Returns TRUE if the method is a non-public member of class java.lang.Object.
+  static bool is_nonpublic_Object_method(Method* m) {
+    assert(m != NULL, "Unexpected NULL Method*");
+    return !m->is_public() && m->method_holder() == SystemDictionary::Object_klass();
+  }
+
 protected:
   static InstanceKlass* find_shared_class(Symbol* class_name);
 
--- a/src/hotspot/share/classfile/vmSymbols.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/classfile/vmSymbols.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -304,6 +304,7 @@
   /* internal up-calls made only by the JVM, via class sun.invoke.MethodHandleNatives: */         \
   template(findMethodHandleType_name,                 "findMethodHandleType")                     \
   template(findMethodHandleType_signature,       "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/lang/invoke/MethodType;") \
+  template(invokeExact_name,                          "invokeExact")                              \
   template(linkMethodHandleConstant_name,             "linkMethodHandleConstant")                 \
   template(linkMethodHandleConstant_signature, "(Ljava/lang/Class;ILjava/lang/Class;Ljava/lang/String;Ljava/lang/Object;)Ljava/lang/invoke/MethodHandle;") \
   template(linkMethod_name,                           "linkMethod")                               \
--- a/src/hotspot/share/code/compiledIC.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/code/compiledIC.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -230,10 +230,13 @@
 #ifdef ASSERT
     int index = call_info->resolved_method()->itable_index();
     assert(index == itable_index, "CallInfo pre-computes this");
-#endif //ASSERT
     InstanceKlass* k = call_info->resolved_method()->method_holder();
     assert(k->verify_itable_index(itable_index), "sanity check");
-    InlineCacheBuffer::create_transition_stub(this, k, entry);
+#endif //ASSERT
+    CompiledICHolder* holder = new CompiledICHolder(call_info->resolved_method()->method_holder(),
+                                                    call_info->resolved_klass());
+    holder->claim();
+    InlineCacheBuffer::create_transition_stub(this, holder, entry);
   } else {
     assert(call_info->call_kind() == CallInfo::vtable_call, "either itable or vtable");
     // Can be different than selected_method->vtable_index(), due to package-private etc.
@@ -517,7 +520,14 @@
 
 bool CompiledIC::is_icholder_entry(address entry) {
   CodeBlob* cb = CodeCache::find_blob_unsafe(entry);
-  return (cb != NULL && cb->is_adapter_blob());
+  if (cb != NULL && cb->is_adapter_blob()) {
+    return true;
+  }
+  // itable stubs also use CompiledICHolder
+  if (VtableStubs::is_entry_point(entry) && VtableStubs::stub_containing(entry)->is_itable_stub()) {
+    return true;
+  }
+  return false;
 }
 
 bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm) {
--- a/src/hotspot/share/code/compiledIC.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/code/compiledIC.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -45,11 +45,11 @@
 //          \                        /   \     /
 //       [4] \                      / [4] \->-/
 //            \->-  Megamorphic -<-/
-//                  (Method*)
+//              (CompiledICHolder*)
 //
-// The text in paranteses () refere to the value of the inline cache receiver (mov instruction)
+// The text in parentheses () refers to the value of the inline cache receiver (mov instruction)
 //
-// The numbers in square brackets refere to the kind of transition:
+// The numbers in square brackets refer to the kind of transition:
 // [1]: Initial fixup. Receiver it found from debug information
 // [2]: Compilation of a method
 // [3]: Recompilation of a method (note: only entry is changed. The Klass* must stay the same)
@@ -344,6 +344,7 @@
   // Code
   static address emit_to_interp_stub(CodeBuffer &cbuf, address mark = NULL);
   static int to_interp_stub_size();
+  static int to_trampoline_stub_size();
   static int reloc_to_interp_stub();
   static void emit_to_aot_stub(CodeBuffer &cbuf, address mark = NULL);
   static int to_aot_stub_size();
--- a/src/hotspot/share/code/compiledMethod.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/code/compiledMethod.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -404,8 +404,7 @@
     // yet be marked below. (We check this further below).
     CompiledICHolder* cichk_oop = ic->cached_icholder();
 
-    if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
-        cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
+    if (cichk_oop->is_loader_alive(is_alive)) {
       return;
     }
   } else {
--- a/src/hotspot/share/code/dependencies.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/code/dependencies.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -241,8 +241,18 @@
     bool is_object() const            { assert(is_valid(), "oops"); return _id < 0; }
 
     Metadata*  as_metadata(OopRecorder* rec) const    { assert(is_metadata(), "oops"); return rec->metadata_at(index()); }
-    Klass*     as_klass(OopRecorder* rec) const       { assert(as_metadata(rec)->is_klass(), "oops"); return (Klass*) as_metadata(rec); }
-    Method*    as_method(OopRecorder* rec) const      { assert(as_metadata(rec)->is_method(), "oops"); return (Method*) as_metadata(rec); }
+    Klass*     as_klass(OopRecorder* rec) const {
+      Metadata* m = as_metadata(rec);
+      assert(m != NULL, "as_metadata returned NULL");
+      assert(m->is_klass(), "oops");
+      return (Klass*) m;
+    }
+    Method*    as_method(OopRecorder* rec) const {
+      Metadata* m = as_metadata(rec);
+      assert(m != NULL, "as_metadata returned NULL");
+      assert(m->is_method(), "oops");
+      return (Method*) m;
+    }
     jobject    as_object(OopRecorder* rec) const      { assert(is_object(), "oops"); return rec->oop_at(index()); }
   };
 #endif // INCLUDE_JVMCI
--- a/src/hotspot/share/code/nmethod.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/code/nmethod.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1485,16 +1485,18 @@
 
 bool nmethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred) {
   // Compiled code
-  {
-  RelocIterator iter(this, low_boundary);
-  while (iter.next()) {
-    if (iter.type() == relocInfo::oop_type) {
-      if (unload_if_dead_at(&iter, is_alive, unloading_occurred)) {
-        return true;
+
+  // Prevent extra code cache walk for platforms that don't have immediate oops.
+  if (relocInfo::mustIterateImmediateOopsInCode()) {
+    RelocIterator iter(this, low_boundary);
+    while (iter.next()) {
+      if (iter.type() == relocInfo::oop_type) {
+        if (unload_if_dead_at(&iter, is_alive, unloading_occurred)) {
+          return true;
+        }
       }
     }
   }
-  }
 
   return do_unloading_scopes(is_alive, unloading_occurred);
 }
@@ -1545,7 +1547,7 @@
         CompiledIC *ic = CompiledIC_at(&iter);
         if (ic->is_icholder_call()) {
           CompiledICHolder* cichk = ic->cached_icholder();
-          f(cichk->holder_method());
+          f(cichk->holder_metadata());
           f(cichk->holder_klass());
         } else {
           Metadata* ic_oop = ic->cached_metadata();
@@ -1584,18 +1586,21 @@
     // (See comment above.)
   }
 
-  RelocIterator iter(this, low_boundary);
+  // Prevent extra code cache walk for platforms that don't have immediate oops.
+  if (relocInfo::mustIterateImmediateOopsInCode()) {
+    RelocIterator iter(this, low_boundary);
 
-  while (iter.next()) {
-    if (iter.type() == relocInfo::oop_type ) {
-      oop_Relocation* r = iter.oop_reloc();
-      // In this loop, we must only follow those oops directly embedded in
-      // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
-      assert(1 == (r->oop_is_immediate()) +
-                   (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
-             "oop must be found in exactly one place");
-      if (r->oop_is_immediate() && r->oop_value() != NULL) {
-        f->do_oop(r->oop_addr());
+    while (iter.next()) {
+      if (iter.type() == relocInfo::oop_type ) {
+        oop_Relocation* r = iter.oop_reloc();
+        // In this loop, we must only follow those oops directly embedded in
+        // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
+        assert(1 == (r->oop_is_immediate()) +
+               (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
+               "oop must be found in exactly one place");
+        if (r->oop_is_immediate() && r->oop_value() != NULL) {
+          f->do_oop(r->oop_addr());
+        }
       }
     }
   }
@@ -1620,7 +1625,7 @@
   assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
   if (_oops_do_mark_link == NULL) {
     // Claim this nmethod for this thread to mark.
-    if (Atomic::cmpxchg(NMETHOD_SENTINEL, &_oops_do_mark_link, (nmethod*)NULL) == NULL) {
+    if (Atomic::replace_if_null(NMETHOD_SENTINEL, &_oops_do_mark_link)) {
       // Atomically append this nmethod (now claimed) to the head of the list:
       nmethod* observed_mark_nmethods = _oops_do_mark_nmethods;
       for (;;) {
--- a/src/hotspot/share/code/relocInfo.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/code/relocInfo.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -910,6 +910,10 @@
   }
   // an oop in the instruction stream
   static RelocationHolder spec_for_immediate() {
+    // If no immediate oops are generated, we can skip some walks over nmethods.
+    // Assert that they don't get generated accidently!
+    assert(relocInfo::mustIterateImmediateOopsInCode(),
+           "Must return true so we will search for oops as roots etc. in the code.");
     const int oop_index = 0;
     const int offset    = 0;    // if you want an offset, use the oop pool
     RelocationHolder rh = newHolder();
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -4786,7 +4786,7 @@
       timer->record_time_secs(G1GCPhaseTimes::YoungFreeCSet, worker_id, young_time);
     }
     if (has_non_young_time) {
-      timer->record_time_secs(G1GCPhaseTimes::NonYoungFreeCSet, worker_id, young_time);
+      timer->record_time_secs(G1GCPhaseTimes::NonYoungFreeCSet, worker_id, non_young_time);
     }
   }
 };
--- a/src/hotspot/share/gc/parallel/gcTaskThread.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/gc/parallel/gcTaskThread.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -77,7 +77,7 @@
   if (_time_stamps == NULL) {
     // We allocate the _time_stamps array lazily since logging can be enabled dynamically
     GCTaskTimeStamp* time_stamps = NEW_C_HEAP_ARRAY(GCTaskTimeStamp, GCTaskTimeStampEntries, mtGC);
-    if (Atomic::cmpxchg(time_stamps, &_time_stamps, (GCTaskTimeStamp*)NULL) != NULL) {
+    if (!Atomic::replace_if_null(time_stamps, &_time_stamps)) {
       // Someone already setup the time stamps
       FREE_C_HEAP_ARRAY(GCTaskTimeStamp, time_stamps);
     }
--- a/src/hotspot/share/include/jvm.h	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/include/jvm.h	Thu Jan 18 22:05:24 2018 +0100
@@ -317,23 +317,18 @@
 JNIEXPORT jobject JNICALL
 JVM_NewMultiArray(JNIEnv *env, jclass eltClass, jintArray dim);
 
-/*
- * java.lang.Class and java.lang.ClassLoader
- */
-
-#define JVM_CALLER_DEPTH -1
 
 /*
  * Returns the immediate caller class of the native method invoking
  * JVM_GetCallerClass.  The Method.invoke and other frames due to
  * reflection machinery are skipped.
  *
- * The depth parameter must be -1 (JVM_DEPTH). The caller is expected
- * to be marked with sun.reflect.CallerSensitive.  The JVM will throw
- * an error if it is not marked propertly.
+ * The caller is expected to be marked with
+ * jdk.internal.reflect.CallerSensitive. The JVM will throw an
+ * error if it is not marked properly.
  */
 JNIEXPORT jclass JNICALL
-JVM_GetCallerClass(JNIEnv *env, int depth);
+JVM_GetCallerClass(JNIEnv *env);
 
 
 /*
--- a/src/hotspot/share/interpreter/interpreterRuntime.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -804,7 +804,7 @@
   // it is not an interface.  The receiver for invokespecial calls within interface
   // methods must be checked for every call.
   InstanceKlass* sender = pool->pool_holder();
-  sender = sender->is_anonymous() ? sender->host_klass() : sender;
+  sender = sender->has_host_klass() ? sender->host_klass() : sender;
 
   switch (info.call_kind()) {
   case CallInfo::direct_call:
@@ -822,6 +822,7 @@
   case CallInfo::itable_call:
     cp_cache_entry->set_itable_call(
       bytecode,
+      info.resolved_klass(),
       info.resolved_method(),
       info.itable_index());
     break;
--- a/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -699,6 +699,7 @@
   // Estimate the number of static and aot call stubs that might be emitted.
   int static_call_stubs = 0;
   int aot_call_stubs = 0;
+  int trampoline_stubs = 0;
   objArrayOop sites = this->sites();
   for (int i = 0; i < sites->length(); i++) {
     oop site = sites->obj_at(i);
@@ -710,8 +711,18 @@
             JVMCI_ERROR_0("expected Integer id, got %s", id_obj->klass()->signature_name());
           }
           jint id = id_obj->int_field(java_lang_boxing_object::value_offset_in_bytes(T_INT));
-          if (id == INVOKESTATIC || id == INVOKESPECIAL) {
+          switch (id) {
+          case INVOKEINTERFACE:
+          case INVOKEVIRTUAL:
+            trampoline_stubs++;
+            break;
+          case INVOKESTATIC:
+          case INVOKESPECIAL:
             static_call_stubs++;
+            trampoline_stubs++;
+            break;
+          default:
+            break;
           }
         }
       }
@@ -726,6 +737,7 @@
     }
   }
   int size = static_call_stubs * CompiledStaticCall::to_interp_stub_size();
+  size += trampoline_stubs * CompiledStaticCall::to_trampoline_stub_size();
 #if INCLUDE_AOT
   size += aot_call_stubs * CompiledStaticCall::to_aot_stub_size();
 #endif
@@ -1171,7 +1183,7 @@
     }
 
     TRACE_jvmci_3("method call");
-    CodeInstaller::pd_relocate_JavaMethod(hotspot_method, pc_offset, CHECK);
+    CodeInstaller::pd_relocate_JavaMethod(buffer, hotspot_method, pc_offset, CHECK);
     if (_next_call_type == INVOKESTATIC || _next_call_type == INVOKESPECIAL) {
       // Need a static call stub for transitions from compiled to interpreted.
       CompiledStaticCall::emit_to_interp_stub(buffer, _instructions->start() + pc_offset);
@@ -1282,4 +1294,3 @@
     }
   }
 }
-
--- a/src/hotspot/share/jvmci/jvmciCodeInstaller.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/jvmci/jvmciCodeInstaller.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -185,7 +185,7 @@
   void pd_patch_MetaspaceConstant(int pc_offset, Handle constant, TRAPS);
   void pd_patch_DataSectionReference(int pc_offset, int data_offset, TRAPS);
   void pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination, TRAPS);
-  void pd_relocate_JavaMethod(Handle method, jint pc_offset, TRAPS);
+  void pd_relocate_JavaMethod(CodeBuffer &cbuf, Handle method, jint pc_offset, TRAPS);
   void pd_relocate_poll(address pc, jint mark, TRAPS);
 
   objArrayOop sites() { return (objArrayOop) JNIHandles::resolve(_sites_handle); }
--- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -749,8 +749,13 @@
 C2V_END
 
 C2V_VMENTRY(jobject, getImplementor, (JNIEnv *, jobject, jobject jvmci_type))
-  InstanceKlass* klass = (InstanceKlass*) CompilerToVM::asKlass(jvmci_type);
-  oop implementor = CompilerToVM::get_jvmci_type(klass->implementor(), CHECK_NULL);
+  Klass* klass = CompilerToVM::asKlass(jvmci_type);
+  if (!klass->is_interface()) {
+    THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(),
+        err_msg("Expected interface type, got %s", klass->external_name()));
+  }
+  InstanceKlass* iklass = InstanceKlass::cast(klass);
+  oop implementor = CompilerToVM::get_jvmci_type(iklass->implementor(), CHECK_NULL);
   return JNIHandles::make_local(THREAD, implementor);
 C2V_END
 
@@ -989,8 +994,12 @@
 C2V_END
 
 C2V_VMENTRY(jobject, getClassInitializer, (JNIEnv *, jobject, jobject jvmci_type))
-  InstanceKlass* klass = (InstanceKlass*) CompilerToVM::asKlass(jvmci_type);
-  oop result = CompilerToVM::get_jvmci_method(klass->class_initializer(), CHECK_NULL);
+  Klass* klass = CompilerToVM::asKlass(jvmci_type);
+  if (!klass->is_instance_klass()) {
+    return NULL;
+  }
+  InstanceKlass* iklass = InstanceKlass::cast(klass);
+  oop result = CompilerToVM::get_jvmci_method(iklass->class_initializer(), CHECK_NULL);
   return JNIHandles::make_local(THREAD, result);
 C2V_END
 
@@ -1424,6 +1433,7 @@
               Deoptimization::reassign_fields(fst.current(), fst.register_map(), scope->objects(), realloc_failures, false);
 
               GrowableArray<ScopeValue*>* local_values = scope->locals();
+              assert(local_values != NULL, "NULL locals");
               typeArrayOop array_oop = oopFactory::new_boolArray(local_values->length(), CHECK_NULL);
               typeArrayHandle array(THREAD, array_oop);
               for (int i = 0; i < local_values->length(); i++) {
@@ -1651,7 +1661,6 @@
 
     GrowableArray<ScopeValue*>* scopeLocals = cvf->scope()->locals();
     StackValueCollection* locals = cvf->locals();
-
     if (locals != NULL) {
       for (int i2 = 0; i2 < locals->size(); i2++) {
         StackValue* var = locals->at(i2);
@@ -1662,6 +1671,27 @@
         }
       }
     }
+
+    GrowableArray<ScopeValue*>* scopeExpressions = cvf->scope()->expressions();
+    StackValueCollection* expressions = cvf->expressions();
+    if (expressions != NULL) {
+      for (int i2 = 0; i2 < expressions->size(); i2++) {
+        StackValue* var = expressions->at(i2);
+        if (var->type() == T_OBJECT && scopeExpressions->at(i2)->is_object()) {
+          jvalue val;
+          val.l = (jobject) expressions->at(i2)->get_obj()();
+          cvf->update_stack(T_OBJECT, i2, val);
+        }
+      }
+    }
+
+    GrowableArray<MonitorValue*>* scopeMonitors = cvf->scope()->monitors();
+    GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
+    if (monitors != NULL) {
+      for (int i2 = 0; i2 < monitors->length(); i2++) {
+        cvf->update_monitor(i2, monitors->at(i2));
+      }
+    }
   }
 
   // all locals are materialized by now
--- a/src/hotspot/share/memory/allocation.inline.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/memory/allocation.inline.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -163,7 +163,7 @@
     return NULL;
   }
 
-  if (os::commit_memory(addr, size, !ExecMem, "Allocator (commit)")) {
+  if (os::commit_memory(addr, size, !ExecMem)) {
     return (E*)addr;
   } else {
     os::release_memory(addr, size);
--- a/src/hotspot/share/oops/compiledICHolder.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/oops/compiledICHolder.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -32,8 +32,8 @@
 volatile int CompiledICHolder::_live_not_claimed_count;
 
 
-CompiledICHolder::CompiledICHolder(Method* method, Klass* klass)
-  : _holder_method(method), _holder_klass(klass) {
+CompiledICHolder::CompiledICHolder(Metadata* metadata, Klass* klass)
+  : _holder_metadata(metadata), _holder_klass(klass) {
 #ifdef ASSERT
   Atomic::inc(&_live_count);
   Atomic::inc(&_live_not_claimed_count);
@@ -47,12 +47,28 @@
 }
 #endif // ASSERT
 
+bool CompiledICHolder::is_loader_alive(BoolObjectClosure* is_alive) {
+  if (_holder_metadata->is_method()) {
+    if (!((Method*)_holder_metadata)->method_holder()->is_loader_alive(is_alive)) {
+      return false;
+    }
+  } else if (_holder_metadata->is_klass()) {
+    if (!((Klass*)_holder_metadata)->is_loader_alive(is_alive)) {
+      return false;
+    }
+  }
+  if (!_holder_klass->is_loader_alive(is_alive)) {
+    return false;
+  }
+  return true;
+}
+
 // Printing
 
 void CompiledICHolder::print_on(outputStream* st) const {
   st->print("%s", internal_name());
-  st->print(" - method: "); holder_method()->print_value_on(st); st->cr();
-  st->print(" - klass:  "); holder_klass()->print_value_on(st); st->cr();
+  st->print(" - metadata: "); holder_metadata()->print_value_on(st); st->cr();
+  st->print(" - klass:    "); holder_klass()->print_value_on(st); st->cr();
 }
 
 void CompiledICHolder::print_value_on(outputStream* st) const {
@@ -63,7 +79,7 @@
 // Verification
 
 void CompiledICHolder::verify_on(outputStream* st) {
-  guarantee(holder_method()->is_method(), "should be method");
+  guarantee(holder_metadata()->is_method() || holder_metadata()->is_klass(), "should be method or klass");
   guarantee(holder_klass()->is_klass(),   "should be klass");
 }
 
--- a/src/hotspot/share/oops/compiledICHolder.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/oops/compiledICHolder.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -29,8 +29,9 @@
 #include "utilities/macros.hpp"
 
 // A CompiledICHolder* is a helper object for the inline cache implementation.
-// It holds an intermediate value (method+klass pair) used when converting from
-// compiled to an interpreted call.
+// It holds:
+//   (1) (method+klass pair) when converting from compiled to an interpreted call
+//   (2) (klass+klass pair) when calling itable stub from megamorphic compiled call
 //
 // These are always allocated in the C heap and are freed during a
 // safepoint by the ICBuffer logic.  It's unsafe to free them earlier
@@ -45,32 +46,33 @@
   static volatile int _live_not_claimed_count; // allocated but not yet in use so not
                                                // reachable by iterating over nmethods
 
-  Method* _holder_method;
+  Metadata* _holder_metadata;
   Klass*    _holder_klass;    // to avoid name conflict with oopDesc::_klass
   CompiledICHolder* _next;
 
  public:
   // Constructor
-  CompiledICHolder(Method* method, Klass* klass);
+  CompiledICHolder(Metadata* metadata, Klass* klass);
   ~CompiledICHolder() NOT_DEBUG_RETURN;
 
   static int live_count() { return _live_count; }
   static int live_not_claimed_count() { return _live_not_claimed_count; }
 
   // accessors
-  Method* holder_method() const     { return _holder_method; }
   Klass*    holder_klass()  const     { return _holder_klass; }
+  Metadata* holder_metadata() const   { return _holder_metadata; }
 
-  void set_holder_method(Method* m) { _holder_method = m; }
-  void set_holder_klass(Klass* k)   { _holder_klass = k; }
+  void set_holder_metadata(Metadata* m) { _holder_metadata = m; }
+  void set_holder_klass(Klass* k)     { _holder_klass = k; }
 
-  // interpreter support (offsets in bytes)
-  static int holder_method_offset()   { return offset_of(CompiledICHolder, _holder_method); }
+  static int holder_metadata_offset() { return offset_of(CompiledICHolder, _holder_metadata); }
   static int holder_klass_offset()    { return offset_of(CompiledICHolder, _holder_klass); }
 
   CompiledICHolder* next()     { return _next; }
   void set_next(CompiledICHolder* n) { _next = n; }
 
+  bool is_loader_alive(BoolObjectClosure* is_alive);
+
   // Verify
   void verify_on(outputStream* st);
 
--- a/src/hotspot/share/oops/constantPool.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/oops/constantPool.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_OOPS_CONSTANTPOOLOOP_HPP
 #define SHARE_VM_OOPS_CONSTANTPOOLOOP_HPP
 
+#include "memory/allocation.inline.hpp"
 #include "oops/arrayOop.hpp"
 #include "oops/cpCache.hpp"
 #include "oops/objArrayOop.hpp"
@@ -1021,7 +1022,7 @@
         delete(cur);
       }
     }
-    delete _buckets;
+    FREE_C_HEAP_ARRAY(SymbolHashMapBucket, _buckets);
   }
 }; // End SymbolHashMap class
 
--- a/src/hotspot/share/oops/cpCache.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/oops/cpCache.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -177,6 +177,7 @@
       // instruction somehow links to a non-interface method (in Object).
       // In that case, the method has no itable index and must be invoked as a virtual.
       // Set a flag to keep track of this corner case.
+      assert(method->is_public(), "Calling non-public method in Object with invokeinterface");
       change_to_virtual = true;
 
       // ...and fall through as if we were handling invokevirtual:
@@ -277,14 +278,16 @@
   set_direct_or_vtable_call(invoke_code, method, index, false);
 }
 
-void ConstantPoolCacheEntry::set_itable_call(Bytecodes::Code invoke_code, const methodHandle& method, int index) {
+void ConstantPoolCacheEntry::set_itable_call(Bytecodes::Code invoke_code,
+                                             Klass* referenced_klass,
+                                             const methodHandle& method, int index) {
   assert(method->method_holder()->verify_itable_index(index), "");
   assert(invoke_code == Bytecodes::_invokeinterface, "");
   InstanceKlass* interf = method->method_holder();
   assert(interf->is_interface(), "must be an interface");
   assert(!method->is_final_method(), "interfaces do not have final methods; cannot link to one here");
-  set_f1(interf);
-  set_f2(index);
+  set_f1(referenced_klass);
+  set_f2((intx)method());
   set_method_flags(as_TosState(method->result_type()),
                    0,  // no option bits
                    method()->size_of_parameters());
@@ -513,10 +516,23 @@
 
 
 #if INCLUDE_JVMTI
+
+void log_adjust(const char* entry_type, Method* old_method, Method* new_method, bool* trace_name_printed) {
+  if (log_is_enabled(Info, redefine, class, update)) {
+    ResourceMark rm;
+    if (!(*trace_name_printed)) {
+      log_info(redefine, class, update)("adjust: name=%s", old_method->method_holder()->external_name());
+      *trace_name_printed = true;
+    }
+    log_debug(redefine, class, update, constantpool)
+          ("cpc %s entry update: %s(%s)", entry_type, new_method->name()->as_C_string(), new_method->signature()->as_C_string());
+  }
+}
+
 // RedefineClasses() API support:
 // If this ConstantPoolCacheEntry refers to old_method then update it
 // to refer to new_method.
-bool ConstantPoolCacheEntry::adjust_method_entry(Method* old_method,
+void ConstantPoolCacheEntry::adjust_method_entry(Method* old_method,
        Method* new_method, bool * trace_name_printed) {
 
   if (is_vfinal()) {
@@ -525,63 +541,35 @@
       // match old_method so need an update
       // NOTE: can't use set_f2_as_vfinal_method as it asserts on different values
       _f2 = (intptr_t)new_method;
-      if (log_is_enabled(Info, redefine, class, update)) {
-        ResourceMark rm;
-        if (!(*trace_name_printed)) {
-          log_info(redefine, class, update)("adjust: name=%s", old_method->method_holder()->external_name());
-          *trace_name_printed = true;
-        }
-        log_debug(redefine, class, update, constantpool)
-          ("cpc vf-entry update: %s(%s)", new_method->name()->as_C_string(), new_method->signature()->as_C_string());
-      }
-      return true;
+      log_adjust("vfinal", old_method, new_method, trace_name_printed);
     }
-
-    // f1() is not used with virtual entries so bail out
-    return false;
+    return;
   }
 
-  if (_f1 == NULL) {
-    // NULL f1() means this is a virtual entry so bail out
-    // We are assuming that the vtable index does not need change.
-    return false;
+  assert (_f1 != NULL, "should not call with uninteresting entry");
+
+  if (!(_f1->is_method())) {
+    // _f1 is a Klass* for an interface, _f2 is the method
+    if (f2_as_interface_method() == old_method) {
+      _f2 = (intptr_t)new_method;
+      log_adjust("interface", old_method, new_method, trace_name_printed);
+    }
+  } else if (_f1 == old_method) {
+    _f1 = new_method;
+    log_adjust("special, static or dynamic", old_method, new_method, trace_name_printed);
   }
-
-  if (_f1 == old_method) {
-    _f1 = new_method;
-    if (log_is_enabled(Info, redefine, class, update)) {
-      ResourceMark rm;
-      if (!(*trace_name_printed)) {
-        log_info(redefine, class, update)("adjust: name=%s", old_method->method_holder()->external_name());
-        *trace_name_printed = true;
-      }
-      log_debug(redefine, class, update, constantpool)
-        ("cpc entry update: %s(%s)", new_method->name()->as_C_string(), new_method->signature()->as_C_string());
-    }
-    return true;
-  }
-
-  return false;
 }
 
 // a constant pool cache entry should never contain old or obsolete methods
 bool ConstantPoolCacheEntry::check_no_old_or_obsolete_entries() {
-  if (is_vfinal()) {
-    // virtual and final so _f2 contains method ptr instead of vtable index
-    Metadata* f2 = (Metadata*)_f2;
-    // Return false if _f2 refers to an old or an obsolete method.
-    // _f2 == NULL || !_f2->is_method() are just as unexpected here.
-    return (f2 != NULL NOT_PRODUCT(&& f2->is_valid()) && f2->is_method() &&
-            !((Method*)f2)->is_old() && !((Method*)f2)->is_obsolete());
-  } else if (_f1 == NULL ||
-             (NOT_PRODUCT(_f1->is_valid() &&) !_f1->is_method())) {
-    // _f1 == NULL || !_f1->is_method() are OK here
+  Method* m = get_interesting_method_entry(NULL);
+  // return false if m refers to a non-deleted old or obsolete method
+  if (m != NULL) {
+    assert(m->is_valid() && m->is_method(), "m is a valid method");
+    return !m->is_old() && !m->is_obsolete(); // old is always set for old and obsolete
+  } else {
     return true;
   }
-  // return false if _f1 refers to a non-deleted old or obsolete method
-  return (NOT_PRODUCT(_f1->is_valid() &&) _f1->is_method() &&
-          (f1_as_method()->is_deleted() ||
-          (!f1_as_method()->is_old() && !f1_as_method()->is_obsolete())));
 }
 
 Method* ConstantPoolCacheEntry::get_interesting_method_entry(Klass* k) {
@@ -598,10 +586,11 @@
     return NULL;
   } else {
     if (!(_f1->is_method())) {
-      // _f1 can also contain a Klass* for an interface
-      return NULL;
+      // _f1 is a Klass* for an interface
+      m = f2_as_interface_method();
+    } else {
+      m = f1_as_method();
     }
-    m = f1_as_method();
   }
   assert(m != NULL && m->is_method(), "sanity check");
   if (m == NULL || !m->is_method() || (k != NULL && m->method_holder() != k)) {
--- a/src/hotspot/share/oops/cpCache.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/oops/cpCache.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -249,6 +249,7 @@
 
   void set_itable_call(
     Bytecodes::Code invoke_code,                 // the bytecode used; must be invokeinterface
+    Klass* referenced_klass,                     // the referenced klass in the InterfaceMethodref
     const methodHandle& method,                  // the resolved interface method
     int itable_index                             // index into itable for the method
   );
@@ -352,6 +353,7 @@
   bool      is_f1_null() const                   { Metadata* f1 = f1_ord(); return f1 == NULL; }  // classifies a CPC entry as unbound
   int       f2_as_index() const                  { assert(!is_vfinal(), ""); return (int) _f2; }
   Method*   f2_as_vfinal_method() const          { assert(is_vfinal(), ""); return (Method*)_f2; }
+  Method*   f2_as_interface_method() const       { assert(bytecode_1() == Bytecodes::_invokeinterface, ""); return (Method*)_f2; }
   intx flags_ord() const                         { return (intx)OrderAccess::load_acquire(&_flags); }
   int  field_index() const                       { assert(is_field_entry(),  ""); return (_flags & field_index_mask); }
   int  parameter_size() const                    { assert(is_method_entry(), ""); return (_flags & parameter_size_mask); }
@@ -387,7 +389,7 @@
   // trace_name_printed is set to true if the current call has
   // printed the klass name so that other routines in the adjust_*
   // group don't print the klass name.
-  bool adjust_method_entry(Method* old_method, Method* new_method,
+  void adjust_method_entry(Method* old_method, Method* new_method,
          bool* trace_name_printed);
   bool check_no_old_or_obsolete_entries();
   Method* get_interesting_method_entry(Klass* k);
--- a/src/hotspot/share/oops/instanceKlass.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/oops/instanceKlass.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -609,9 +609,11 @@
   InstanceKlass* host_klass() const              {
     InstanceKlass** hk = adr_host_klass();
     if (hk == NULL) {
+      assert(!is_anonymous(), "Anonymous classes have host klasses");
       return NULL;
     } else {
       assert(*hk != NULL, "host klass should always be set if the address is not null");
+      assert(is_anonymous(), "Only anonymous classes have host klasses");
       return *hk;
     }
   }
@@ -623,6 +625,9 @@
       *addr = host;
     }
   }
+  bool has_host_klass() const              {
+    return adr_host_klass() != NULL;
+  }
   bool is_anonymous() const                {
     return (_misc_flags & _misc_is_anonymous) != 0;
   }
--- a/src/hotspot/share/oops/klassVtable.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/oops/klassVtable.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -86,13 +86,14 @@
 
   GrowableArray<Method*> new_mirandas(20);
   // compute the number of mirandas methods that must be added to the end
-  get_mirandas(&new_mirandas, all_mirandas, super, methods, NULL, local_interfaces);
+  get_mirandas(&new_mirandas, all_mirandas, super, methods, NULL, local_interfaces,
+               class_flags.is_interface());
   *num_new_mirandas = new_mirandas.length();
 
   // Interfaces do not need interface methods in their vtables
   // This includes miranda methods and during later processing, default methods
   if (!class_flags.is_interface()) {
-    vtable_length += *num_new_mirandas * vtableEntry::size();
+     vtable_length += *num_new_mirandas * vtableEntry::size();
   }
 
   if (Universe::is_bootstrapping() && vtable_length == 0) {
@@ -454,8 +455,13 @@
     } else {
       super_method = method_at(i);
     }
-    // Check if method name matches
-    if (super_method->name() == name && super_method->signature() == signature) {
+    // Check if method name matches.  Ignore match if klass is an interface and the
+    // matching method is a non-public java.lang.Object method.  (See JVMS 5.4.3.4)
+    // This is safe because the method at this slot should never get invoked.
+    // (TBD: put in a method to throw NoSuchMethodError if this slot is ever used.)
+    if (super_method->name() == name && super_method->signature() == signature &&
+        (!_klass->is_interface() ||
+         !SystemDictionary::is_nonpublic_Object_method(super_method))) {
 
       // get super_klass for method_holder for the found method
       InstanceKlass* super_klass =  super_method->method_holder();
@@ -713,7 +719,7 @@
   if (mhk->is_interface()) {
     assert(m->is_public(), "should be public");
     assert(ik()->implements_interface(method_holder) , "this class should implement the interface");
-    if (is_miranda(m, ik()->methods(), ik()->default_methods(), ik()->super())) {
+    if (is_miranda(m, ik()->methods(), ik()->default_methods(), ik()->super(), klass()->is_interface())) {
       return true;
     }
   }
@@ -738,7 +744,10 @@
 // During the first run, the current instanceKlass has not yet been
 // created, the superclasses and superinterfaces do have instanceKlasses
 // but may not have vtables, the default_methods list is empty, no overpasses.
-// This is seen by default method creation.
+// Default method generation uses the all_mirandas array as the starter set for
+// maximally-specific default method calculation.  So, for both classes and
+// interfaces, it is necessary that the first pass will find all non-private
+// interface instance methods, whether or not they are concrete.
 //
 // Pass 2: recalculated during vtable initialization: only include abstract methods.
 // The goal of pass 2 is to walk through the superinterfaces to see if any of
@@ -772,7 +781,8 @@
 // Part of the Miranda Rights in the US mean that if you do not have
 // an attorney one will be appointed for you.
 bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods,
-                             Array<Method*>* default_methods, const Klass* super) {
+                             Array<Method*>* default_methods, const Klass* super,
+                             bool is_interface) {
   if (m->is_static() || m->is_private() || m->is_overpass()) {
     return false;
   }
@@ -800,8 +810,11 @@
 
   for (const Klass* cursuper = super; cursuper != NULL; cursuper = cursuper->super())
   {
-     if (InstanceKlass::cast(cursuper)->find_local_method(name, signature,
-           Klass::find_overpass, Klass::skip_static, Klass::skip_private) != NULL) {
+     Method* found_mth = InstanceKlass::cast(cursuper)->find_local_method(name, signature,
+       Klass::find_overpass, Klass::skip_static, Klass::skip_private);
+     // Ignore non-public methods in java.lang.Object if klass is an interface.
+     if (found_mth != NULL && (!is_interface ||
+         !SystemDictionary::is_nonpublic_Object_method(found_mth))) {
        return false;
      }
   }
@@ -820,7 +833,7 @@
 void klassVtable::add_new_mirandas_to_lists(
     GrowableArray<Method*>* new_mirandas, GrowableArray<Method*>* all_mirandas,
     Array<Method*>* current_interface_methods, Array<Method*>* class_methods,
-    Array<Method*>* default_methods, const Klass* super) {
+    Array<Method*>* default_methods, const Klass* super, bool is_interface) {
 
   // iterate thru the current interface's method to see if it a miranda
   int num_methods = current_interface_methods->length();
@@ -839,7 +852,7 @@
     }
 
     if (!is_duplicate) { // we don't want duplicate miranda entries in the vtable
-      if (is_miranda(im, class_methods, default_methods, super)) { // is it a miranda at all?
+      if (is_miranda(im, class_methods, default_methods, super, is_interface)) { // is it a miranda at all?
         const InstanceKlass *sk = InstanceKlass::cast(super);
         // check if it is a duplicate of a super's miranda
         if (sk->lookup_method_in_all_interfaces(im->name(), im->signature(), Klass::find_defaults) == NULL) {
@@ -858,7 +871,8 @@
                                const Klass* super,
                                Array<Method*>* class_methods,
                                Array<Method*>* default_methods,
-                               Array<Klass*>* local_interfaces) {
+                               Array<Klass*>* local_interfaces,
+                               bool is_interface) {
   assert((new_mirandas->length() == 0) , "current mirandas must be 0");
 
   // iterate thru the local interfaces looking for a miranda
@@ -867,7 +881,7 @@
     InstanceKlass *ik = InstanceKlass::cast(local_interfaces->at(i));
     add_new_mirandas_to_lists(new_mirandas, all_mirandas,
                               ik->methods(), class_methods,
-                              default_methods, super);
+                              default_methods, super, is_interface);
     // iterate thru each local's super interfaces
     Array<Klass*>* super_ifs = ik->transitive_interfaces();
     int num_super_ifs = super_ifs->length();
@@ -875,7 +889,7 @@
       InstanceKlass *sik = InstanceKlass::cast(super_ifs->at(j));
       add_new_mirandas_to_lists(new_mirandas, all_mirandas,
                                 sik->methods(), class_methods,
-                                default_methods, super);
+                                default_methods, super, is_interface);
     }
   }
 }
@@ -888,7 +902,8 @@
 int klassVtable::fill_in_mirandas(int initialized) {
   GrowableArray<Method*> mirandas(20);
   get_mirandas(&mirandas, NULL, ik()->super(), ik()->methods(),
-               ik()->default_methods(), ik()->local_interfaces());
+               ik()->default_methods(), ik()->local_interfaces(),
+               klass()->is_interface());
   for (int i = 0; i < mirandas.length(); i++) {
     if (log_develop_is_enabled(Trace, vtables)) {
       Method* meth = mirandas.at(i);
@@ -1185,7 +1200,6 @@
   Array<Method*>* methods = InstanceKlass::cast(interf)->methods();
   int nof_methods = methods->length();
   HandleMark hm;
-  assert(nof_methods > 0, "at least one method must exist for interface to be in vtable");
   Handle interface_loader (THREAD, InstanceKlass::cast(interf)->class_loader());
 
   int ime_count = method_count_for_interface(interf);
@@ -1354,8 +1368,10 @@
       }
     }
 
-    // Only count interfaces with at least one method
-    if (method_count > 0) {
+    // Visit all interfaces which either have any methods or can participate in receiver type check.
+    // We do not bother to count methods in transitive interfaces, although that would allow us to skip
+    // this step in the rare case of a zero-method interface extending another zero-method interface.
+    if (method_count > 0 || InstanceKlass::cast(intf)->transitive_interfaces()->length() > 0) {
       blk->doit(intf, method_count);
     }
   }
--- a/src/hotspot/share/oops/klassVtable.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/oops/klassVtable.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -144,21 +144,24 @@
   bool is_miranda_entry_at(int i);
   int fill_in_mirandas(int initialized);
   static bool is_miranda(Method* m, Array<Method*>* class_methods,
-                         Array<Method*>* default_methods, const Klass* super);
+                         Array<Method*>* default_methods, const Klass* super,
+                         bool is_interface);
   static void add_new_mirandas_to_lists(
       GrowableArray<Method*>* new_mirandas,
       GrowableArray<Method*>* all_mirandas,
       Array<Method*>* current_interface_methods,
       Array<Method*>* class_methods,
       Array<Method*>* default_methods,
-      const Klass* super);
+      const Klass* super,
+      bool is_interface);
   static void get_mirandas(
       GrowableArray<Method*>* new_mirandas,
       GrowableArray<Method*>* all_mirandas,
       const Klass* super,
       Array<Method*>* class_methods,
       Array<Method*>* default_methods,
-      Array<Klass*>* local_interfaces);
+      Array<Klass*>* local_interfaces,
+      bool is_interface);
   void verify_against(outputStream* st, klassVtable* vt, int index);
   inline InstanceKlass* ik() const;
   // When loading a class from CDS archive at run time, and no class redefintion
--- a/src/hotspot/share/oops/method.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/oops/method.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -446,7 +446,7 @@
 
 bool Method::init_method_counters(MethodCounters* counters) {
   // Try to install a pointer to MethodCounters, return true on success.
-  return Atomic::cmpxchg(counters, &_method_counters, (MethodCounters*)NULL) == NULL;
+  return Atomic::replace_if_null(counters, &_method_counters);
 }
 
 void Method::cleanup_inline_caches() {
--- a/src/hotspot/share/oops/method.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/oops/method.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -698,6 +698,7 @@
   static ByteSize from_interpreted_offset()      { return byte_offset_of(Method, _from_interpreted_entry ); }
   static ByteSize interpreter_entry_offset()     { return byte_offset_of(Method, _i2i_entry ); }
   static ByteSize signature_handler_offset()     { return in_ByteSize(sizeof(Method) + wordSize);      }
+  static ByteSize itable_index_offset()          { return byte_offset_of(Method, _vtable_index ); }
 
   // for code generation
   static int method_data_offset_in_bytes()       { return offset_of(Method, _method_data); }
--- a/src/hotspot/share/opto/callnode.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/opto/callnode.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -772,7 +772,7 @@
     ciKlass* boxing_klass = t_oop->klass();
     if (is_CallStaticJava() && as_CallStaticJava()->is_boxing_method()) {
       // Skip unrelated boxing methods.
-      Node* proj = proj_out(TypeFunc::Parms);
+      Node* proj = proj_out_or_null(TypeFunc::Parms);
       if ((proj == NULL) || (phase->type(proj)->is_instptr()->klass() != boxing_klass)) {
         return false;
       }
@@ -784,7 +784,7 @@
       }
       // May modify (by reflection) if an boxing object is passed
       // as argument or returned.
-      Node* proj = returns_pointer() ? proj_out(TypeFunc::Parms) : NULL;
+      Node* proj = returns_pointer() ? proj_out_or_null(TypeFunc::Parms) : NULL;
       if (proj != NULL) {
         const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr();
         if ((inst_t != NULL) && (!inst_t->klass_is_exact() ||
@@ -824,7 +824,7 @@
 Node *CallNode::result_cast() {
   Node *cast = NULL;
 
-  Node *p = proj_out(TypeFunc::Parms);
+  Node *p = proj_out_or_null(TypeFunc::Parms);
   if (p == NULL)
     return NULL;
 
@@ -1378,13 +1378,13 @@
       PhaseIterGVN *igvn = phase->is_IterGVN();
       // Unreachable fall through path (negative array length),
       // the allocation can only throw so disconnect it.
-      Node* proj = proj_out(TypeFunc::Control);
+      Node* proj = proj_out_or_null(TypeFunc::Control);
       Node* catchproj = NULL;
       if (proj != NULL) {
         for (DUIterator_Fast imax, i = proj->fast_outs(imax); i < imax; i++) {
           Node *cn = proj->fast_out(i);
           if (cn->is_Catch()) {
-            catchproj = cn->as_Multi()->proj_out(CatchProjNode::fall_through_index);
+            catchproj = cn->as_Multi()->proj_out_or_null(CatchProjNode::fall_through_index);
             break;
           }
         }
@@ -1442,7 +1442,7 @@
       // Create a cast which is control dependent on the initialization to
       // propagate the fact that the array length must be positive.
       length = new CastIINode(length, narrow_length_type);
-      length->set_req(0, initialization()->proj_out(0));
+      length->set_req(0, initialization()->proj_out_or_null(0));
     }
   }
 
--- a/src/hotspot/share/opto/cfgnode.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/opto/cfgnode.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2373,7 +2373,7 @@
   if (can_reshape && !in(0)->is_Loop()) {
     // Dead code elimination can sometimes delete this projection so
     // if it's not there, there's nothing to do.
-    Node* fallthru = proj_out(0);
+    Node* fallthru = proj_out_or_null(0);
     if (fallthru != NULL) {
       phase->is_IterGVN()->replace_node(fallthru, in(0));
     }
--- a/src/hotspot/share/opto/divnode.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/opto/divnode.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -154,8 +154,8 @@
   virtual bool is_CFG() const  { return false; }
   virtual uint ideal_reg() const { return NotAMachineReg; }
 
-  ProjNode* div_proj() { return proj_out(div_proj_num); }
-  ProjNode* mod_proj() { return proj_out(mod_proj_num); }
+  ProjNode* div_proj() { return proj_out_or_null(div_proj_num); }
+  ProjNode* mod_proj() { return proj_out_or_null(mod_proj_num); }
 };
 
 //------------------------------DivModINode---------------------------------------
--- a/src/hotspot/share/opto/escape.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/opto/escape.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -366,7 +366,7 @@
       delayed_worklist->push(n);
       // Check if a call returns an object.
       if ((n->as_Call()->returns_pointer() &&
-           n->as_Call()->proj_out(TypeFunc::Parms) != NULL) ||
+           n->as_Call()->proj_out_or_null(TypeFunc::Parms) != NULL) ||
           (n->is_CallStaticJava() &&
            n->as_CallStaticJava()->is_boxing_method())) {
         add_call_node(n->as_Call());
@@ -2674,7 +2674,7 @@
   PhaseGVN* igvn = _igvn;
   const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr();
   bool is_instance = (toop != NULL) && toop->is_known_instance();
-  Node *start_mem = C->start()->proj_out(TypeFunc::Memory);
+  Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory);
   Node *prev = NULL;
   Node *result = orig_mem;
   while (prev != result) {
@@ -3028,7 +3028,7 @@
         // An allocation may have an Initialize which has raw stores. Scan
         // the users of the raw allocation result and push AddP users
         // on alloc_worklist.
-        Node *raw_result = alloc->proj_out(TypeFunc::Parms);
+        Node *raw_result = alloc->proj_out_or_null(TypeFunc::Parms);
         assert (raw_result != NULL, "must have an allocation result");
         for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) {
           Node *use = raw_result->fast_out(i);
@@ -3219,7 +3219,7 @@
       // we don't need to do anything, but the users must be pushed
     } else if (n->is_MemBar()) { // Initialize, MemBar nodes
       // we don't need to do anything, but the users must be pushed
-      n = n->as_MemBar()->proj_out(TypeFunc::Memory);
+      n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
       if (n == NULL)
         continue;
     } else if (n->Opcode() == Op_StrCompressedCopy ||
--- a/src/hotspot/share/opto/graphKit.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/opto/graphKit.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -3754,7 +3754,7 @@
 
 // Trace Allocate -> Proj[Parm] -> Initialize
 InitializeNode* AllocateNode::initialization() {
-  ProjNode* rawoop = proj_out(AllocateNode::RawAddress);
+  ProjNode* rawoop = proj_out_or_null(AllocateNode::RawAddress);
   if (rawoop == NULL)  return NULL;
   for (DUIterator_Fast imax, i = rawoop->fast_outs(imax); i < imax; i++) {
     Node* init = rawoop->fast_out(i);
--- a/src/hotspot/share/opto/ifnode.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/opto/ifnode.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -505,7 +505,7 @@
   //  Flip 1:  If (Bool[<] CmpU(l, LoadRange)) ...
   //  Flip 2:  If (Bool[<=] CmpU(LoadRange, l)) ...
 
-  ProjNode* iftrap = proj_out(flip_test == 2 ? true : false);
+  ProjNode* iftrap = proj_out_or_null(flip_test == 2 ? true : false);
   return iftrap;
 }
 
@@ -1195,14 +1195,17 @@
 // Check that the If that is in between the 2 integer comparisons has
 // no side effect
 bool IfNode::is_side_effect_free_test(ProjNode* proj, PhaseIterGVN* igvn) {
-  if (proj != NULL &&
-      proj->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
-      proj->outcnt() <= 2) {
+  if (proj == NULL) {
+    return false;
+  }
+  CallStaticJavaNode* unc = proj->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
+  if (unc != NULL && proj->outcnt() <= 2) {
     if (proj->outcnt() == 1 ||
         // Allow simple null check from LoadRange
         (is_cmp_with_loadrange(proj) && is_null_check(proj, igvn))) {
       CallStaticJavaNode* unc = proj->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
       CallStaticJavaNode* dom_unc = proj->in(0)->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
+      assert(dom_unc != NULL, "is_uncommon_trap_if_pattern returned NULL");
 
       // reroute_side_effect_free_unc changes the state of this
       // uncommon trap to restart execution at the previous
@@ -1471,7 +1474,7 @@
   // be skipped. For example, range check predicate has two checks
   // for lower and upper bounds.
   ProjNode* unc_proj = proj_out(1 - prev_dom->as_Proj()->_con)->as_Proj();
-  if ((unc_proj != NULL) && (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) != NULL)) {
+  if (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) != NULL) {
     prev_dom = idom;
   }
 
--- a/src/hotspot/share/opto/library_call.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/opto/library_call.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1495,7 +1495,7 @@
     // escape analysis can go from the MemBarStoreStoreNode to the
     // AllocateNode and eliminate the MemBarStoreStoreNode if possible
     // based on the escape status of the AllocateNode.
-    insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress));
+    insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
   }
   if (compress) {
     set_result(_gvn.transform(count));
@@ -1589,7 +1589,7 @@
       // escape analysis can go from the MemBarStoreStoreNode to the
       // AllocateNode and eliminate the MemBarStoreStoreNode if possible
       // based on the escape status of the AllocateNode.
-      insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress));
+      insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
     } else {
       insert_mem_bar(Op_MemBarCPUOrder);
     }
@@ -1675,7 +1675,7 @@
       // escape analysis can go from the MemBarStoreStoreNode to the
       // AllocateNode and eliminate the MemBarStoreStoreNode if possible
       // based on the escape status of the AllocateNode.
-      insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress));
+      insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
     } else {
       insert_mem_bar(Op_MemBarCPUOrder);
     }
@@ -4722,7 +4722,7 @@
     // escape analysis can go from the MemBarStoreStoreNode to the
     // AllocateNode and eliminate the MemBarStoreStoreNode if possible
     // based on the escape status of the AllocateNode.
-    insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress));
+    insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
   } else {
     insert_mem_bar(Op_MemBarCPUOrder);
   }
@@ -5031,7 +5031,7 @@
     Node *mem = reset_memory();
     set_all_memory(mem);
     alloc->set_req(TypeFunc::Memory, mem);
-    set_control(init->proj_out(TypeFunc::Control));
+    set_control(init->proj_out_or_null(TypeFunc::Control));
     set_i_o(callprojs.fallthrough_ioproj);
 
     // Update memory as done in GraphKit::set_output_for_allocation()
@@ -5042,8 +5042,8 @@
     }
     const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
     int            elemidx  = C->get_alias_index(telemref);
-    set_memory(init->proj_out(TypeFunc::Memory), Compile::AliasIdxRaw);
-    set_memory(init->proj_out(TypeFunc::Memory), elemidx);
+    set_memory(init->proj_out_or_null(TypeFunc::Memory), Compile::AliasIdxRaw);
+    set_memory(init->proj_out_or_null(TypeFunc::Memory), elemidx);
 
     Node* allocx = _gvn.transform(alloc);
     assert(allocx == alloc, "where has the allocation gone?");
@@ -5360,7 +5360,7 @@
     // to finish initializing the allocated object.
     if ((ctl->is_IfFalse() || ctl->is_IfTrue()) && ctl->in(0)->is_If()) {
       IfNode* iff = ctl->in(0)->as_If();
-      Node* not_ctl = iff->proj_out(1 - ctl->as_Proj()->_con);
+      Node* not_ctl = iff->proj_out_or_null(1 - ctl->as_Proj()->_con);
       assert(not_ctl != NULL && not_ctl != ctl, "found alternate");
       if (slow_region != NULL && slow_region->find_edge(not_ctl) >= 1) {
         ctl = iff->in(0);       // This test feeds the known slow_region.
--- a/src/hotspot/share/opto/loopTransform.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/opto/loopTransform.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1017,7 +1017,6 @@
   CountedLoopNode *main_head = loop->_head->as_CountedLoop();
   assert( main_head->is_normal_loop(), "" );
   CountedLoopEndNode *main_end = main_head->loopexit();
-  guarantee(main_end != NULL, "no loop exit node");
   assert( main_end->outcnt() == 2, "1 true, 1 false path only" );
 
   Node *pre_header= main_head->in(LoopNode::EntryControl);
@@ -1243,7 +1242,6 @@
   // Find common pieces of the loop being guarded with pre & post loops
   CountedLoopNode *main_head = loop->_head->as_CountedLoop();
   CountedLoopEndNode *main_end = main_head->loopexit();
-  guarantee(main_end != NULL, "no loop exit node");
   // diagnostic to show loop end is not properly formed
   assert(main_end->outcnt() == 2, "1 true, 1 false path only");
 
@@ -1293,7 +1291,6 @@
   // Find common pieces of the loop being guarded with pre & post loops
   CountedLoopNode *main_head = loop->_head->as_CountedLoop();
   CountedLoopEndNode *main_end = main_head->loopexit();
-  guarantee(main_end != NULL, "no loop exit node");
   // diagnostic to show loop end is not properly formed
   assert(main_end->outcnt() == 2, "1 true, 1 false path only");
 
@@ -1427,7 +1424,6 @@
   assert(LoopUnrollLimit, "");
   CountedLoopNode *loop_head = loop->_head->as_CountedLoop();
   CountedLoopEndNode *loop_end = loop_head->loopexit();
-  assert(loop_end, "");
 #ifndef PRODUCT
   if (PrintOpto && VerifyLoopOptimizations) {
     tty->print("Unrolling ");
@@ -2972,7 +2968,7 @@
       }
       store = n;
       store_value = value;
-    } else if (n->is_If() && n != head->loopexit()) {
+    } else if (n->is_If() && n != head->loopexit_or_null()) {
       msg = "extra control flow";
       msg_node = n;
     }
@@ -3114,7 +3110,6 @@
   ok.set(store->in(MemNode::Memory)->_idx);
 
   CountedLoopEndNode* loop_exit = head->loopexit();
-  guarantee(loop_exit != NULL, "no loop exit node");
 
   // Loop structure is ok
   ok.set(head->_idx);
@@ -3204,7 +3199,7 @@
     return false;
   }
 
-  Node* exit = head->loopexit()->proj_out(0);
+  Node* exit = head->loopexit()->proj_out_or_null(0);
   if (exit == NULL) {
     return false;
   }
@@ -3280,8 +3275,8 @@
   call->init_req(TypeFunc::Control,   head->init_control());
   call->init_req(TypeFunc::I_O,       C->top());       // Does no I/O.
   call->init_req(TypeFunc::Memory,    mem_phi->in(LoopNode::EntryControl));
-  call->init_req(TypeFunc::ReturnAdr, C->start()->proj_out(TypeFunc::ReturnAdr));
-  call->init_req(TypeFunc::FramePtr,  C->start()->proj_out(TypeFunc::FramePtr));
+  call->init_req(TypeFunc::ReturnAdr, C->start()->proj_out_or_null(TypeFunc::ReturnAdr));
+  call->init_req(TypeFunc::FramePtr,  C->start()->proj_out_or_null(TypeFunc::FramePtr));
   _igvn.register_new_node_with_optimizer(call);
   result_ctrl = new ProjNode(call,TypeFunc::Control);
   _igvn.register_new_node_with_optimizer(result_ctrl);
--- a/src/hotspot/share/opto/loopnode.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/opto/loopnode.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -68,11 +68,11 @@
 bool LoopNode::is_valid_counted_loop() const {
   if (is_CountedLoop()) {
     CountedLoopNode*    l  = as_CountedLoop();
-    CountedLoopEndNode* le = l->loopexit();
+    CountedLoopEndNode* le = l->loopexit_or_null();
     if (le != NULL &&
-        le->proj_out(1 /* true */) == l->in(LoopNode::LoopBackControl)) {
+        le->proj_out_or_null(1 /* true */) == l->in(LoopNode::LoopBackControl)) {
       Node* phi  = l->phi();
-      Node* exit = le->proj_out(0 /* false */);
+      Node* exit = le->proj_out_or_null(0 /* false */);
       if (exit != NULL && exit->Opcode() == Op_IfFalse &&
           phi != NULL && phi->is_Phi() &&
           phi->in(LoopNode::LoopBackControl) == l->incr() &&
@@ -793,7 +793,7 @@
 
 #ifdef ASSERT
   assert(l->is_valid_counted_loop(), "counted loop shape is messed up");
-  assert(l == loop->_head && l->phi() == phi && l->loopexit() == lex, "" );
+  assert(l == loop->_head && l->phi() == phi && l->loopexit_or_null() == lex, "" );
 #endif
 #ifndef PRODUCT
   if (TraceLoopOpts) {
@@ -917,7 +917,7 @@
       }
     }
     CountedLoopEndNode* cle = inner_out->in(0)->as_CountedLoopEnd();
-    assert(cle == inner->loopexit(), "mismatch");
+    assert(cle == inner->loopexit_or_null(), "mismatch");
     bool has_skeleton = outer_le->in(1)->bottom_type()->singleton() && outer_le->in(1)->bottom_type()->is_int()->get_con() == 0;
     if (has_skeleton) {
       assert(expect_skeleton == 1 || expect_skeleton == -1, "unexpected skeleton node");
@@ -1216,7 +1216,7 @@
   if (le == NULL) {
     return NULL;
   }
-  Node* c = le->proj_out(false);
+  Node* c = le->proj_out_or_null(false);
   if (c == NULL) {
     return NULL;
   }
@@ -1331,8 +1331,7 @@
           n->set_req(i, old_new[n->in(i)->_idx]);
         }
       }
-      if (n->in(0) != NULL) {
-        assert(n->in(0) == cle_tail, "node not on backedge?");
+      if (n->in(0) != NULL && n->in(0) == cle_tail) {
         n->set_req(0, le_tail);
       }
       igvn->register_new_node_with_optimizer(n);
@@ -1394,9 +1393,14 @@
           Node* uu = fast_out(j);
           if (uu->is_Phi()) {
             Node* be = uu->in(LoopNode::LoopBackControl);
-            while (be->is_Store() && old_new[be->_idx] != NULL) {
-              ShouldNotReachHere();
-              be = be->in(MemNode::Memory);
+            if (be->is_Store() && old_new[be->_idx] != NULL) {
+              assert(false, "store on the backedge + sunk stores: unsupported");
+              // drop outer loop
+              IfNode* outer_le = outer_loop_end();
+              Node* iff = igvn->transform(new IfNode(outer_le->in(0), outer_le->in(1), outer_le->_prob, outer_le->_fcnt));
+              igvn->replace_node(outer_le, iff);
+              inner_cl->clear_strip_mined();
+              return;
             }
             if (be == last || be == first->in(MemNode::Memory)) {
               assert(phi == NULL, "only one phi");
@@ -1449,10 +1453,7 @@
           // Or fix the outer loop fix to include
           // that chain of stores.
           Node* be = phi->in(LoopNode::LoopBackControl);
-          while (be->is_Store() && old_new[be->_idx] != NULL) {
-            ShouldNotReachHere();
-            be = be->in(MemNode::Memory);
-          }
+          assert(!(be->is_Store() && old_new[be->_idx] != NULL), "store on the backedge + sunk stores: unsupported");
           if (be == first->in(MemNode::Memory)) {
             if (be == phi->in(LoopNode::LoopBackControl)) {
               igvn->replace_input_of(phi, LoopNode::LoopBackControl, last);
@@ -1489,8 +1490,8 @@
     } else {
       new_limit = igvn->transform(new SubINode(iv_phi, min));
     }
-    igvn->replace_input_of(inner_cle->cmp_node(), 2, new_limit);
     Node* cmp = inner_cle->cmp_node()->clone();
+    igvn->replace_input_of(cmp, 2, new_limit);
     Node* bol = inner_cle->in(CountedLoopEndNode::TestValue)->clone();
     cmp->set_req(2, limit);
     bol->set_req(1, igvn->transform(cmp));
--- a/src/hotspot/share/opto/loopnode.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/opto/loopnode.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -215,6 +215,7 @@
 
   Node *init_control() const { return in(EntryControl); }
   Node *back_control() const { return in(LoopBackControl); }
+  CountedLoopEndNode *loopexit_or_null() const;
   CountedLoopEndNode *loopexit() const;
   Node *init_trip() const;
   Node *stride() const;
@@ -342,7 +343,7 @@
       return NULL;
     }
     Node *ln = iv_phi->in(0);
-    if (ln->is_CountedLoop() && ln->as_CountedLoop()->loopexit() == this) {
+    if (ln->is_CountedLoop() && ln->as_CountedLoop()->loopexit_or_null() == this) {
       return (CountedLoopNode*)ln;
     }
     return NULL;
@@ -354,7 +355,7 @@
 };
 
 
-inline CountedLoopEndNode *CountedLoopNode::loopexit() const {
+inline CountedLoopEndNode *CountedLoopNode::loopexit_or_null() const {
   Node *bc = back_control();
   if( bc == NULL ) return NULL;
   Node *le = bc->in(0);
@@ -362,13 +363,18 @@
     return NULL;
   return (CountedLoopEndNode*)le;
 }
-inline Node *CountedLoopNode::init_trip() const { return loopexit() ? loopexit()->init_trip() : NULL; }
-inline Node *CountedLoopNode::stride() const { return loopexit() ? loopexit()->stride() : NULL; }
-inline int CountedLoopNode::stride_con() const { return loopexit() ? loopexit()->stride_con() : 0; }
-inline bool CountedLoopNode::stride_is_con() const { return loopexit() && loopexit()->stride_is_con(); }
-inline Node *CountedLoopNode::limit() const { return loopexit() ? loopexit()->limit() : NULL; }
-inline Node *CountedLoopNode::incr() const { return loopexit() ? loopexit()->incr() : NULL; }
-inline Node *CountedLoopNode::phi() const { return loopexit() ? loopexit()->phi() : NULL; }
+inline CountedLoopEndNode *CountedLoopNode::loopexit() const {
+  CountedLoopEndNode* cle = loopexit_or_null();
+  assert(cle != NULL, "loopexit is NULL");
+  return cle;
+}
+inline Node *CountedLoopNode::init_trip() const { return loopexit_or_null() ? loopexit()->init_trip() : NULL; }
+inline Node *CountedLoopNode::stride() const { return loopexit_or_null() ? loopexit()->stride() : NULL; }
+inline int CountedLoopNode::stride_con() const { return loopexit_or_null() ? loopexit()->stride_con() : 0; }
+inline bool CountedLoopNode::stride_is_con() const { return loopexit_or_null() && loopexit()->stride_is_con(); }
+inline Node *CountedLoopNode::limit() const { return loopexit_or_null() ? loopexit()->limit() : NULL; }
+inline Node *CountedLoopNode::incr() const { return loopexit_or_null() ? loopexit()->incr() : NULL; }
+inline Node *CountedLoopNode::phi() const { return loopexit_or_null() ? loopexit()->phi() : NULL; }
 
 //------------------------------LoopLimitNode-----------------------------
 // Counted Loop limit node which represents exact final iterator value:
--- a/src/hotspot/share/opto/loopopts.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/opto/loopopts.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -239,7 +239,7 @@
   // Make control-dependent data Nodes on the live path (path that will remain
   // once the dominated IF is removed) become control-dependent on the
   // dominating projection.
-  Node* dp = iff->as_If()->proj_out(pop == Op_IfTrue);
+  Node* dp = iff->as_If()->proj_out_or_null(pop == Op_IfTrue);
 
   // Loop predicates may have depending checks which should not
   // be skipped. For example, range check predicate has two checks
@@ -1731,7 +1731,7 @@
     Node* sfpt = cl->outer_safepoint();
     CountedLoopEndNode* cle = cl->loopexit();
     CountedLoopNode* new_cl = old_new[cl->_idx]->as_CountedLoop();
-    CountedLoopEndNode* new_cle = new_cl->as_CountedLoop()->loopexit();
+    CountedLoopEndNode* new_cle = new_cl->as_CountedLoop()->loopexit_or_null();
     Node* cle_out = cle->proj_out(false);
 
     Node* new_sfpt = NULL;
@@ -1956,7 +1956,7 @@
         if (head->is_strip_mined() && mode != IgnoreStripMined) {
           CountedLoopNode* cl = head->as_CountedLoop();
           CountedLoopEndNode* cle = cl->loopexit();
-          Node* cle_out = cle->proj_out(false);
+          Node* cle_out = cle->proj_out_or_null(false);
           if (use == cle_out) {
             IfNode* le = cl->outer_loop_end();
             use = le->proj_out(false);
--- a/src/hotspot/share/opto/macro.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/opto/macro.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -496,7 +496,7 @@
   if (level <= 0) {
     return NULL; // Give up: phi tree too deep
   }
-  Node *start_mem = C->start()->proj_out(TypeFunc::Memory);
+  Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory);
   Node *alloc_mem = alloc->in(TypeFunc::Memory);
 
   uint length = mem->req();
@@ -576,7 +576,7 @@
 
   int alias_idx = C->get_alias_index(adr_t);
   int offset = adr_t->offset();
-  Node *start_mem = C->start()->proj_out(TypeFunc::Memory);
+  Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory);
   Node *alloc_ctrl = alloc->in(TypeFunc::Control);
   Node *alloc_mem = alloc->in(TypeFunc::Memory);
   Arena *a = Thread::current()->resource_area();
@@ -974,8 +974,8 @@
 }
 
 static void disconnect_projections(MultiNode* n, PhaseIterGVN& igvn) {
-  Node* ctl_proj = n->proj_out(TypeFunc::Control);
-  Node* mem_proj = n->proj_out(TypeFunc::Memory);
+  Node* ctl_proj = n->proj_out_or_null(TypeFunc::Control);
+  Node* mem_proj = n->proj_out_or_null(TypeFunc::Memory);
   if (ctl_proj != NULL) {
     igvn.replace_node(ctl_proj, n->in(0));
   }
@@ -1086,12 +1086,12 @@
         // Eliminate Initialize node.
         InitializeNode *init = use->as_Initialize();
         assert(init->outcnt() <= 2, "only a control and memory projection expected");
-        Node *ctrl_proj = init->proj_out(TypeFunc::Control);
+        Node *ctrl_proj = init->proj_out_or_null(TypeFunc::Control);
         if (ctrl_proj != NULL) {
            assert(init->in(TypeFunc::Control) == _fallthroughcatchproj, "allocation control projection");
           _igvn.replace_node(ctrl_proj, _fallthroughcatchproj);
         }
-        Node *mem_proj = init->proj_out(TypeFunc::Memory);
+        Node *mem_proj = init->proj_out_or_null(TypeFunc::Memory);
         if (mem_proj != NULL) {
           Node *mem = init->in(TypeFunc::Memory);
 #ifdef ASSERT
@@ -1198,7 +1198,7 @@
 
 bool PhaseMacroExpand::eliminate_boxing_node(CallStaticJavaNode *boxing) {
   // EA should remove all uses of non-escaping boxing node.
-  if (!C->eliminate_boxing() || boxing->proj_out(TypeFunc::Parms) != NULL) {
+  if (!C->eliminate_boxing() || boxing->proj_out_or_null(TypeFunc::Parms) != NULL) {
     return false;
   }
 
@@ -1580,8 +1580,8 @@
         // before the InitializeNode happen before the storestore
         // barrier.
 
-        Node* init_ctrl = init->proj_out(TypeFunc::Control);
-        Node* init_mem = init->proj_out(TypeFunc::Memory);
+        Node* init_ctrl = init->proj_out_or_null(TypeFunc::Control);
+        Node* init_mem = init->proj_out_or_null(TypeFunc::Memory);
 
         MemBarNode* mb = MemBarNode::make(C, Op_MemBarStoreStore, Compile::AliasIdxBot);
         transform_later(mb);
--- a/src/hotspot/share/opto/memnode.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/opto/memnode.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -136,7 +136,7 @@
   if (!(is_instance || is_boxed_value_load))
     return mchain;  // don't try to optimize non-instance types
   uint instance_id = t_oop->instance_id();
-  Node *start_mem = phase->C->start()->proj_out(TypeFunc::Memory);
+  Node *start_mem = phase->C->start()->proj_out_or_null(TypeFunc::Memory);
   Node *prev = NULL;
   Node *result = mchain;
   while (prev != result) {
--- a/src/hotspot/share/opto/multnode.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/opto/multnode.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -43,8 +43,8 @@
 Node *MultiNode::match( const ProjNode *proj, const Matcher *m ) { return proj->clone(); }
 
 //------------------------------proj_out---------------------------------------
-// Get a named projection
-ProjNode* MultiNode::proj_out(uint which_proj) const {
+// Get a named projection or null if not found
+ProjNode* MultiNode::proj_out_or_null(uint which_proj) const {
   assert((Opcode() != Op_If && Opcode() != Op_RangeCheck) || which_proj == (uint)true || which_proj == (uint)false, "must be 1 or 0");
   assert((Opcode() != Op_If && Opcode() != Op_RangeCheck) || outcnt() == 2, "bad if #1");
   for( DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++ ) {
@@ -63,6 +63,13 @@
   return NULL;
 }
 
+// Get a named projection
+ProjNode* MultiNode::proj_out(uint which_proj) const {
+  ProjNode* p = proj_out_or_null(which_proj);
+  assert(p != NULL, "named projection %u not found", which_proj);
+  return p;
+}
+
 //=============================================================================
 //------------------------------ProjNode---------------------------------------
 uint ProjNode::hash() const {
@@ -214,8 +221,6 @@
   }
 
   ProjNode* other_proj = iff->proj_out(1-_con);
-  if (other_proj == NULL) // Should never happen, but make Parfait happy.
-      return NULL;
   CallStaticJavaNode* call = other_proj->is_uncommon_trap_proj(reason);
   if (call != NULL) {
     assert(reason == Deoptimization::Reason_none ||
--- a/src/hotspot/share/opto/multnode.hpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/opto/multnode.hpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -47,6 +47,7 @@
   virtual Node *match( const ProjNode *proj, const Matcher *m );
   virtual uint ideal_reg() const { return NotAMachineReg; }
   ProjNode* proj_out(uint which_proj) const; // Get a named projection
+  ProjNode* proj_out_or_null(uint which_proj) const;
 
 };
 
--- a/src/hotspot/share/opto/phaseX.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/opto/phaseX.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1524,7 +1524,7 @@
     // receiver to know when to enable the regular fall-through path
     // in addition to the NullPtrException path.
     if (use->is_CallDynamicJava() && n == use->in(TypeFunc::Parms)) {
-      Node* p = use->as_CallDynamicJava()->proj_out(TypeFunc::Control);
+      Node* p = use->as_CallDynamicJava()->proj_out_or_null(TypeFunc::Control);
       if (p != NULL) {
         add_users_to_worklist0(p);
       }
@@ -1617,12 +1617,12 @@
     if (use_op == Op_Allocate || use_op == Op_AllocateArray) {
       InitializeNode* init = use->as_Allocate()->initialization();
       if (init != NULL) {
-        Node* imem = init->proj_out(TypeFunc::Memory);
+        Node* imem = init->proj_out_or_null(TypeFunc::Memory);
         if (imem != NULL)  add_users_to_worklist0(imem);
       }
     }
     if (use_op == Op_Initialize) {
-      Node* imem = use->as_Initialize()->proj_out(TypeFunc::Memory);
+      Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory);
       if (imem != NULL)  add_users_to_worklist0(imem);
     }
     // Loading the java mirror from a klass oop requires two loads and the type
--- a/src/hotspot/share/opto/stringopts.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/opto/stringopts.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -362,11 +362,11 @@
   // Eliminate Initialize node.
   assert(init->outcnt() <= 2, "only a control and memory projection expected");
   assert(init->req() <= InitializeNode::RawStores, "no pending inits");
-  Node *ctrl_proj = init->proj_out(TypeFunc::Control);
+  Node *ctrl_proj = init->proj_out_or_null(TypeFunc::Control);
   if (ctrl_proj != NULL) {
     C->gvn_replace_by(ctrl_proj, init->in(TypeFunc::Control));
   }
-  Node *mem_proj = init->proj_out(TypeFunc::Memory);
+  Node *mem_proj = init->proj_out_or_null(TypeFunc::Memory);
   if (mem_proj != NULL) {
     Node *mem = init->in(TypeFunc::Memory);
     C->gvn_replace_by(mem_proj, mem);
@@ -891,7 +891,7 @@
       ctrl_path.push(cn);
       ctrl_path.push(cn->proj_out(0));
       ctrl_path.push(cn->proj_out(0)->unique_out());
-      Node* catchproj = cn->proj_out(0)->unique_out()->as_Catch()->proj_out(0);
+      Node* catchproj = cn->proj_out(0)->unique_out()->as_Catch()->proj_out_or_null(0);
       if (catchproj != NULL) {
         ctrl_path.push(catchproj);
       }
@@ -1035,13 +1035,13 @@
   // by calls in the region.
   _stringopts->_visited.Clear();
   Node_List worklist;
-  Node* final_result = _end->proj_out(TypeFunc::Parms);
+  Node* final_result = _end->proj_out_or_null(TypeFunc::Parms);
   for (uint i = 0; i < _control.size(); i++) {
     CallNode* cnode = _control.at(i)->isa_Call();
     if (cnode != NULL) {
       _stringopts->_visited.test_set(cnode->_idx);
     }
-    Node* result = cnode != NULL ? cnode->proj_out(TypeFunc::Parms) : NULL;
+    Node* result = cnode != NULL ? cnode->proj_out_or_null(TypeFunc::Parms) : NULL;
     if (result != NULL && result != final_result) {
       worklist.push(result);
     }
--- a/src/hotspot/share/opto/superword.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/opto/superword.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -190,7 +190,7 @@
   int *ignored_loop_nodes = NEW_RESOURCE_ARRAY(int, ignored_size);
   Node_Stack nstack((int)ignored_size);
   CountedLoopNode *cl = lpt()->_head->as_CountedLoop();
-  Node *cl_exit = cl->loopexit();
+  Node *cl_exit = cl->loopexit_or_null();
   int rpo_idx = _post_block.length();
 
   assert(rpo_idx == 0, "post loop block is empty");
--- a/src/hotspot/share/prims/jni.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/prims/jni.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -2820,7 +2820,7 @@
   EntryProbe; \
   DT_VOID_RETURN_MARK(Get##Result##ArrayRegion); \
   typeArrayOop src = typeArrayOop(JNIHandles::resolve_non_null(array)); \
-  if (start < 0 || len < 0 || ((unsigned int)start + (unsigned int)len > (unsigned int)src->length())) { \
+  if (start < 0 || len < 0 || (start > src->length() - len)) { \
     THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException()); \
   } else { \
     if (len > 0) { \
@@ -2870,7 +2870,7 @@
   EntryProbe; \
   DT_VOID_RETURN_MARK(Set##Result##ArrayRegion); \
   typeArrayOop dst = typeArrayOop(JNIHandles::resolve_non_null(array)); \
-  if (start < 0 || len < 0 || ((unsigned int)start + (unsigned int)len > (unsigned int)dst->length())) { \
+  if (start < 0 || len < 0 || (start > dst->length() - len)) { \
     THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException()); \
   } else { \
     if (len > 0) { \
@@ -3106,7 +3106,7 @@
   DT_VOID_RETURN_MARK(GetStringRegion);
   oop s = JNIHandles::resolve_non_null(string);
   int s_len = java_lang_String::length(s);
-  if (start < 0 || len < 0 || start + len > s_len) {
+  if (start < 0 || len < 0 || start > s_len - len) {
     THROW(vmSymbols::java_lang_StringIndexOutOfBoundsException());
   } else {
     if (len > 0) {
@@ -3132,7 +3132,7 @@
   DT_VOID_RETURN_MARK(GetStringUTFRegion);
   oop s = JNIHandles::resolve_non_null(string);
   int s_len = java_lang_String::length(s);
-  if (start < 0 || len < 0 || start + len > s_len) {
+  if (start < 0 || len < 0 || start > s_len - len) {
     THROW(vmSymbols::java_lang_StringIndexOutOfBoundsException());
   } else {
     //%note jni_7
--- a/src/hotspot/share/prims/jvm.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/prims/jvm.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -678,17 +678,9 @@
 // Misc. class handling ///////////////////////////////////////////////////////////
 
 
-JVM_ENTRY(jclass, JVM_GetCallerClass(JNIEnv* env, int depth))
+JVM_ENTRY(jclass, JVM_GetCallerClass(JNIEnv* env))
   JVMWrapper("JVM_GetCallerClass");
 
-  // Pre-JDK 8 and early builds of JDK 8 don't have a CallerSensitive annotation; or
-  // sun.reflect.Reflection.getCallerClass with a depth parameter is provided
-  // temporarily for existing code to use until a replacement API is defined.
-  if (SystemDictionary::reflect_CallerSensitive_klass() == NULL || depth != JVM_CALLER_DEPTH) {
-    Klass* k = thread->security_get_caller_class(depth);
-    return (k == NULL) ? NULL : (jclass) JNIHandles::make_local(env, k->java_mirror());
-  }
-
   // Getting the class of the caller frame.
   //
   // The call stack at this point looks something like this:
--- a/src/hotspot/share/prims/jvmtiRawMonitor.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/prims/jvmtiRawMonitor.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -127,7 +127,7 @@
 
 int JvmtiRawMonitor::SimpleEnter (Thread * Self) {
   for (;;) {
-    if (Atomic::cmpxchg(Self, &_owner, (void*)NULL) == NULL) {
+    if (Atomic::replace_if_null(Self, &_owner)) {
        return OS_OK ;
     }
 
@@ -139,7 +139,7 @@
     Node._next  = _EntryList ;
     _EntryList  = &Node ;
     OrderAccess::fence() ;
-    if (_owner == NULL && Atomic::cmpxchg(Self, &_owner, (void*)NULL) == NULL) {
+    if (_owner == NULL && Atomic::replace_if_null(Self, &_owner)) {
         _EntryList = Node._next ;
         RawMonitor_lock->unlock() ;
         return OS_OK ;
--- a/src/hotspot/share/prims/methodHandles.cpp	Thu Jan 11 22:05:09 2018 +0100
+++ b/src/hotspot/share/prims/methodHandles.cpp	Thu Jan 18 22:05:24 2018 +0100
@@ -122,6 +122,48 @@
   ALL_KINDS      = IS_METHOD | IS_CONSTRUCTOR | IS_FIELD | IS_TYPE
 };
 
+int MethodHandles::ref_kind_to_flags(int ref_kind) {
+