changeset 57623:3696d32b89d9 nestmates

Merge
author mchung
date Tue, 29 Oct 2019 19:49:55 -0700
parents d022bcd97043 9261ad32cba9
children 7764761b3a98
files make/hotspot/symbols/symbols-unix src/hotspot/cpu/ppc/templateTable_ppc_64.cpp src/hotspot/share/aot/aotCodeHeap.cpp src/hotspot/share/ci/ciInstanceKlass.cpp src/hotspot/share/classfile/classFileParser.cpp src/hotspot/share/classfile/classLoader.cpp src/hotspot/share/classfile/defaultMethods.cpp src/hotspot/share/classfile/javaClasses.cpp src/hotspot/share/classfile/javaClasses.hpp src/hotspot/share/classfile/systemDictionary.cpp src/hotspot/share/classfile/systemDictionary.hpp src/hotspot/share/classfile/systemDictionaryShared.cpp src/hotspot/share/classfile/verifier.cpp src/hotspot/share/classfile/vmSymbols.cpp src/hotspot/share/classfile/vmSymbols.hpp src/hotspot/share/include/jvm.h src/hotspot/share/interpreter/linkResolver.cpp src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSet.cpp src/hotspot/share/jvmci/vmStructs_jvmci.cpp src/hotspot/share/oops/instanceKlass.cpp src/hotspot/share/oops/instanceKlass.hpp src/hotspot/share/oops/klass.cpp src/hotspot/share/oops/klass.hpp src/hotspot/share/oops/klassVtable.cpp src/hotspot/share/oops/method.hpp src/hotspot/share/opto/c2compiler.cpp src/hotspot/share/opto/library_call.cpp src/hotspot/share/prims/jni.cpp src/hotspot/share/prims/jvm.cpp src/hotspot/share/prims/jvmtiExport.cpp src/hotspot/share/prims/jvmtiRedefineClasses.cpp src/hotspot/share/prims/methodHandles.cpp src/hotspot/share/runtime/compilationPolicy.cpp src/hotspot/share/runtime/compilationPolicy.hpp src/hotspot/share/runtime/tieredThresholdPolicy.cpp src/hotspot/share/runtime/tieredThresholdPolicy.hpp src/hotspot/share/runtime/vmStructs.cpp src/java.base/share/classes/java/lang/ClassLoader.java src/java.base/share/classes/java/lang/System.java src/java.base/share/native/libjava/Class.c src/java.base/share/native/libjava/ClassLoader.c src/java.base/share/native/libjava/verify_stub.c src/java.base/share/native/libverify/check_format.c src/java.base/unix/native/libjava/jdk_util_md.c src/java.desktop/unix/native/common/awt/awt_Font.h src/jdk.compiler/share/classes/com/sun/tools/javac/code/Symbol.java src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Lower.java src/jdk.compiler/share/classes/com/sun/tools/javac/util/Names.java src/jdk.internal.vm.compiler.management/share/classes/org.graalvm.compiler.hotspot.management/src/org/graalvm/compiler/hotspot/management/JMXServiceProvider.java src/jdk.internal.vm.compiler.management/share/classes/org.graalvm.compiler.hotspot.management/src/org/graalvm/compiler/hotspot/management/package-info.java test/hotspot/gtest/oops/test_markOop.cpp test/hotspot/jtreg/ProblemList.txt test/hotspot/jtreg/compiler/graalunit/EA9Test.java test/hotspot/jtreg/compiler/graalunit/com.oracle.mxtool.junit/com/oracle/mxtool/junit/JLModule.java test/hotspot/jtreg/serviceability/jvmti/RedefineClasses/RedefineDeleteJmethod.java test/hotspot/jtreg/serviceability/jvmti/RedefineClasses/libRedefineDeleteJmethod.c test/hotspot/jtreg/vmTestbase/jit/graph/CGTThread.java test/hotspot/jtreg/vmTestbase/jit/graph/cgt0/cgt0.java test/hotspot/jtreg/vmTestbase/jit/graph/cgt1/cgt1.java test/hotspot/jtreg/vmTestbase/jit/graph/cgt10/cgt10.java test/hotspot/jtreg/vmTestbase/jit/graph/cgt11/cgt11.java test/hotspot/jtreg/vmTestbase/jit/graph/cgt2/cgt2.java test/hotspot/jtreg/vmTestbase/jit/graph/cgt3/cgt3.java test/hotspot/jtreg/vmTestbase/jit/graph/cgt4/cgt4.java test/hotspot/jtreg/vmTestbase/jit/graph/cgt5/cgt5.java test/hotspot/jtreg/vmTestbase/jit/graph/cgt6/cgt6.java test/hotspot/jtreg/vmTestbase/jit/graph/cgt7/cgt7.java test/hotspot/jtreg/vmTestbase/jit/graph/cgt8/cgt8.java test/hotspot/jtreg/vmTestbase/jit/graph/cgt9/cgt9.java test/jdk/ProblemList.txt test/jdk/com/sun/jdi/RedefineNestmateAttr/TestNestmateAttr.java test/jdk/java/math/BigInteger/DivisionOverflow.java test/jdk/java/math/BigInteger/StringConstructorOverflow.java test/jdk/java/math/BigInteger/SymmetricRangeTests.java
diffstat 1118 files changed, 30565 insertions(+), 17091 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Tue Oct 08 17:58:39 2019 -0700
+++ b/.hgtags	Tue Oct 29 19:49:55 2019 -0700
@@ -590,3 +590,6 @@
 778fc2dcbdaa8981e07e929a2cacef979c72261e jdk-14+15
 d29f0181ba424a95d881aba5eabf2e393abcc70f jdk-14+16
 5c83830390baafb76a1fbe33443c57620bd45fb9 jdk-14+17
+e84d8379815ba0d3e50fb096d28c25894cb50b8c jdk-14+18
+9b67dd88a9313e982ec5f710a7747161bc8f0c23 jdk-14+19
+54ffb15c48399dd59922ee22bb592d815307e77c jdk-14+20
--- a/doc/building.html	Tue Oct 08 17:58:39 2019 -0700
+++ b/doc/building.html	Tue Oct 29 19:49:55 2019 -0700
@@ -281,7 +281,7 @@
 <tbody>
 <tr class="odd">
 <td style="text-align: left;">Linux</td>
-<td style="text-align: left;">gcc 8.2.0</td>
+<td style="text-align: left;">gcc 8.3.0</td>
 </tr>
 <tr class="even">
 <td style="text-align: left;">macOS</td>
@@ -293,14 +293,14 @@
 </tr>
 <tr class="even">
 <td style="text-align: left;">Windows</td>
-<td style="text-align: left;">Microsoft Visual Studio 2017 update 15.9.6</td>
+<td style="text-align: left;">Microsoft Visual Studio 2017 update 15.9.16</td>
 </tr>
 </tbody>
 </table>
 <p>All compilers are expected to be able to compile to the C99 language standard, as some C99 features are used in the source code. Microsoft Visual Studio doesn't fully support C99 so in practice shared code is limited to using C99 features that it does support.</p>
 <h3 id="gcc">gcc</h3>
 <p>The minimum accepted version of gcc is 4.8. Older versions will generate a warning by <code>configure</code> and are unlikely to work.</p>
-<p>The JDK is currently known to be able to compile with at least version 7.4 of gcc.</p>
+<p>The JDK is currently known to be able to compile with at least version 8.3 of gcc.</p>
 <p>In general, any version between these two should be usable.</p>
 <h3 id="clang">clang</h3>
 <p>The minimum accepted version of clang is 3.2. Older versions will not be accepted by <code>configure</code>.</p>
--- a/doc/building.md	Tue Oct 08 17:58:39 2019 -0700
+++ b/doc/building.md	Tue Oct 29 19:49:55 2019 -0700
@@ -323,10 +323,10 @@
 
  Operating system   Toolchain version
  ------------------ -------------------------------------------------------
- Linux              gcc 8.2.0
+ Linux              gcc 8.3.0
  macOS              Apple Xcode 10.1 (using clang 10.0.0)
  Solaris            Oracle Solaris Studio 12.6 (with compiler version 5.15)
- Windows            Microsoft Visual Studio 2017 update 15.9.6
+ Windows            Microsoft Visual Studio 2017 update 15.9.16
 
 All compilers are expected to be able to compile to the C99 language standard,
 as some C99 features are used in the source code. Microsoft Visual Studio
@@ -338,7 +338,7 @@
 The minimum accepted version of gcc is 4.8. Older versions will generate a warning
 by `configure` and are unlikely to work.
 
-The JDK is currently known to be able to compile with at least version 7.4 of
+The JDK is currently known to be able to compile with at least version 8.3 of
 gcc.
 
 In general, any version between these two should be usable.
--- a/make/Bundles.gmk	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/Bundles.gmk	Tue Oct 29 19:49:55 2019 -0700
@@ -50,6 +50,7 @@
 #     files or directories may contain spaces.
 # BASE_DIRS : Base directories for the root dir in the bundle.
 # SUBDIR : Optional name of root dir in bundle.
+# OUTPUTDIR : Optionally override output dir
 SetupBundleFile = $(NamedParamsMacroTemplate)
 define SetupBundleFileBody
 
@@ -70,8 +71,11 @@
 
   $$(call SetIfEmpty, $1_UNZIP_DEBUGINFO, false)
 
-  $(BUNDLES_OUTPUTDIR)/$$($1_BUNDLE_NAME): $$($1_FILES)
+  $$(call SetIfEmpty, $1_OUTPUTDIR, $$(BUNDLES_OUTPUTDIR))
+
+  $$($1_OUTPUTDIR)/$$($1_BUNDLE_NAME): $$($1_FILES)
 	$$(call MakeTargetDir)
+	$$(call LogWarn, Creating $$($1_BUNDLE_NAME))
         # If any of the files contain a space in the file name, FindFiles
         # will have replaced it with ?. Tar does not accept that so need to
         # switch it back.
@@ -137,7 +141,7 @@
           endif
         endif
 
-  $1 += $(BUNDLES_OUTPUTDIR)/$$($1_BUNDLE_NAME)
+  $1 += $$($1_OUTPUTDIR)/$$($1_BUNDLE_NAME)
 
 endef
 
@@ -165,7 +169,7 @@
 
 ################################################################################
 
-ifneq ($(filter product-bundles legacy-bundles, $(MAKECMDGOALS)), )
+ifneq ($(filter product-bundles% legacy-bundles, $(MAKECMDGOALS)), )
 
   SYMBOLS_EXCLUDE_PATTERN := %.debuginfo %.diz %.pdb %.map
 
--- a/make/CompileInterimLangtools.gmk	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/CompileInterimLangtools.gmk	Tue Oct 29 19:49:55 2019 -0700
@@ -49,6 +49,13 @@
 TARGETS += $(patsubst %, $(BUILDTOOLS_OUTPUTDIR)/gensrc/%/module-info.java, \
     $(INTERIM_LANGTOOLS_MODULES))
 
+$(eval $(call SetupCopyFiles, COPY_PREVIEW_FEATURES, \
+    FILES := $(TOPDIR)/src/java.base/share/classes/jdk/internal/PreviewFeature.java, \
+    DEST := $(BUILDTOOLS_OUTPUTDIR)/gensrc/java.base.interim/jdk/internal/, \
+))
+
+TARGETS += $(COPY_PREVIEW_FEATURES)
+
 ################################################################################
 # Setup the rules to build interim langtools, which is compiled by the boot
 # javac and can be run on the boot jdk. This will be used to compile the rest of
@@ -72,13 +79,15 @@
       BIN := $(BUILDTOOLS_OUTPUTDIR)/interim_langtools_modules/$1.interim, \
       ADD_JAVAC_FLAGS := --module-path $(BUILDTOOLS_OUTPUTDIR)/interim_langtools_modules \
           $$(INTERIM_LANGTOOLS_ADD_EXPORTS) \
+          --patch-module java.base=$(BUILDTOOLS_OUTPUTDIR)/gensrc/java.base.interim \
+          --add-exports java.base/jdk.internal=jdk.compiler.interim \
           -Xlint:-module, \
   ))
 
   $1_DEPS_INTERIM := $$(addsuffix .interim, $$(filter \
       $$(INTERIM_LANGTOOLS_BASE_MODULES), $$(call FindTransitiveDepsForModule, $1)))
 
-  $$(BUILD_$1.interim): $$(foreach d, $$($1_DEPS_INTERIM), $$(BUILD_$$d))
+  $$(BUILD_$1.interim): $$(foreach d, $$($1_DEPS_INTERIM), $$(BUILD_$$d)) $(COPY_PREVIEW_FEATURES)
 
   TARGETS += $$(BUILD_$1.interim)
 endef
--- a/make/CreateJmods.gmk	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/CreateJmods.gmk	Tue Oct 29 19:49:55 2019 -0700
@@ -86,16 +86,18 @@
 # from there. These files were explicitly filtered or modified in <module>-copy
 # targets. For the rest, just pick up everything from the source legal dirs.
 LEGAL_NOTICES := \
-    $(SUPPORT_OUTPUTDIR)/modules_legal/common \
+    $(wildcard $(SUPPORT_OUTPUTDIR)/modules_legal/common) \
     $(if $(wildcard $(SUPPORT_OUTPUTDIR)/modules_legal/$(MODULE)), \
       $(wildcard $(SUPPORT_OUTPUTDIR)/modules_legal/$(MODULE)), \
       $(call FindModuleLegalSrcDirs, $(MODULE)) \
     )
 
-LEGAL_NOTICES_PATH := $(call PathList, $(LEGAL_NOTICES))
-DEPS += $(call FindFiles, $(LEGAL_NOTICES))
+ifneq ($(strip $(LEGAL_NOTICES)), )
+  LEGAL_NOTICES_PATH := $(call PathList, $(LEGAL_NOTICES))
+  DEPS += $(call FindFiles, $(LEGAL_NOTICES))
 
-JMOD_FLAGS += --legal-notices $(LEGAL_NOTICES_PATH)
+  JMOD_FLAGS += --legal-notices $(LEGAL_NOTICES_PATH)
+endif
 
 ifeq ($(filter-out jdk.incubator.%, $(MODULE)), )
   JMOD_FLAGS += --do-not-resolve-by-default
--- a/make/Docs.gmk	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/Docs.gmk	Tue Oct 29 19:49:55 2019 -0700
@@ -95,6 +95,7 @@
     -tag see \
     -taglet build.tools.taglet.ExtLink \
     -taglet build.tools.taglet.Incubating \
+    -taglet build.tools.taglet.Preview \
     -tagletpath $(BUILDTOOLS_OUTPUTDIR)/jdk_tools_classes \
     $(CUSTOM_JAVADOC_TAGS) \
     #
@@ -191,26 +192,26 @@
 ################################################################################
 # Functions
 
-# Helper function for creating a png file from a dot file generated by the
+# Helper function for creating a svg file from a dot file generated by the
 # GenGraphs tool.
 # param 1: SetupJavadocGeneration namespace ($1)
 # param 2: module name
 #
-define setup_gengraph_dot_to_png
+define setup_gengraph_dot_to_svg
   $1_$2_DOT_SRC :=  $$($1_GENGRAPHS_DIR)/$2.dot
-  $1_$2_PNG_TARGET := $$($1_TARGET_DIR)/$2/module-graph.png
+  $1_$2_SVG_TARGET := $$($1_TARGET_DIR)/$2/module-graph.svg
 
-    # For each module needing a graph, create a png file from the dot file
+    # For each module needing a graph, create a svg file from the dot file
     # generated by the GenGraphs tool and store it in the target dir.
-    $$(eval $$(call SetupExecute, gengraphs_png_$1_$2, \
+    $$(eval $$(call SetupExecute, gengraphs_svg_$1_$2, \
         INFO := Running dot for module graphs for $2, \
         DEPS := $$(gengraphs_$1_TARGET), \
-        OUTPUT_FILE := $$($1_$2_PNG_TARGET), \
+        OUTPUT_FILE := $$($1_$2_SVG_TARGET), \
         SUPPORT_DIR := $$($1_GENGRAPHS_DIR), \
-        COMMAND := $$(DOT) -Tpng -o $$($1_$2_PNG_TARGET) $$($1_$2_DOT_SRC), \
+        COMMAND := $$(DOT) -Tsvg -o $$($1_$2_SVG_TARGET) $$($1_$2_DOT_SRC), \
     ))
 
-  $1_MODULEGRAPH_TARGETS += $$($1_$2_PNG_TARGET)
+  $1_MODULEGRAPH_TARGETS += $$($1_$2_SVG_TARGET)
 endef
 
 # Helper function to create the overview.html file to use with the -overview
@@ -281,7 +282,7 @@
 
   ifeq ($$(ENABLE_FULL_DOCS), true)
     # Tell the ModuleGraph taglet to generate html links to soon-to-be-created
-    # png files with module graphs.
+    # svg files with module graphs.
     $1_JAVA_ARGS += -DenableModuleGraph=true
   endif
 
@@ -361,8 +362,8 @@
   $1_JAVADOC_TARGETS := $$(javadoc_$1_TARGET)
 
   ifeq ($$(ENABLE_FULL_DOCS), true)
-    # We have asked ModuleGraph to generate links to png files. Now we must
-    # produce the png files.
+    # We have asked ModuleGraph to generate links to svg files. Now we must
+    # produce the svg files.
 
     # Locate which modules has the @moduleGraph tag in their module-info.java
     $1_MODULES_NEEDING_GRAPH := $$(strip $$(foreach m, $$($1_ALL_MODULES), \
@@ -387,11 +388,11 @@
             --dot-attributes $$(GENGRAPHS_PROPS), \
     ))
 
-    # For each module needing a graph, create a png file from the dot file
+    # For each module needing a graph, create a svg file from the dot file
     # generated by the GenGraphs tool and store it in the target dir.
     # They will depend on gengraphs_$1_TARGET, and will be added to $1.
     $$(foreach m, $$($1_MODULES_NEEDING_GRAPH), \
-      $$(eval $$(call setup_gengraph_dot_to_png,$1,$$m)) \
+      $$(eval $$(call setup_gengraph_dot_to_svg,$1,$$m)) \
     )
   endif
 endef
--- a/make/RunTests.gmk	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/RunTests.gmk	Tue Oct 29 19:49:55 2019 -0700
@@ -185,13 +185,13 @@
 	    $$(FIXPATH) $$(JDK_UNDER_TEST)/bin/jaotc \
 	        $$($1_JAOTC_OPTS) --output $$@ --module $$($1_MODULE) \
 	)
-	$$(call ExecuteWithLog, $$@.check, \
+	$$(call ExecuteWithLog, $$@.check, ( \
 	    $$(FIXPATH) $$(JDK_UNDER_TEST)/bin/java \
 	        $$($1_VM_OPTIONS) -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions \
 	        -XX:+PrintAOT -XX:+UseAOTStrictLoading \
 	        -XX:AOTLibrary=$$@ -version \
 	         > $$@.verify-aot \
-	)
+	))
 
   $1_AOT_OPTIONS += -XX:+UnlockExperimentalVMOptions
   $1_AOT_OPTIONS += -XX:AOTLibrary=$$($1_AOT_LIB)
@@ -593,7 +593,7 @@
 	$$(call LogWarn)
 	$$(call LogWarn, Running test '$$($1_TEST)')
 	$$(call MakeDir, $$($1_TEST_RESULTS_DIR) $$($1_TEST_SUPPORT_DIR))
-	$$(call ExecuteWithLog, $$($1_TEST_SUPPORT_DIR)/gtest, \
+	$$(call ExecuteWithLog, $$($1_TEST_SUPPORT_DIR)/gtest, ( \
 	    $$(FIXPATH) $$(TEST_IMAGE_DIR)/hotspot/gtest/$$($1_VARIANT)/gtestLauncher \
 	        -jdk $(JDK_UNDER_TEST) $$($1_GTEST_FILTER) \
 	        --gtest_output=xml:$$($1_TEST_RESULTS_DIR)/gtest.xml \
@@ -602,7 +602,7 @@
 	        > >($(TEE) $$($1_TEST_RESULTS_DIR)/gtest.txt) \
 	    && $$(ECHO) $$$$? > $$($1_EXITCODE) \
 	    || $$(ECHO) $$$$? > $$($1_EXITCODE) \
-	)
+	))
 
   $1_RESULT_FILE := $$($1_TEST_RESULTS_DIR)/gtest.txt
 
@@ -705,7 +705,7 @@
 	$$(call LogWarn)
 	$$(call LogWarn, Running test '$$($1_TEST)')
 	$$(call MakeDir, $$($1_TEST_RESULTS_DIR) $$($1_TEST_SUPPORT_DIR))
-	$$(call ExecuteWithLog, $$($1_TEST_SUPPORT_DIR)/micro, \
+	$$(call ExecuteWithLog, $$($1_TEST_SUPPORT_DIR)/micro, ( \
 	    $$(FIXPATH) $$($1_MICRO_TEST_JDK)/bin/java $$($1_MICRO_JAVA_OPTIONS) \
 	        -jar $$($1_MICRO_BENCHMARKS_JAR) \
 	        $$($1_MICRO_ITER) $$($1_MICRO_FORK) $$($1_MICRO_TIME) \
@@ -715,7 +715,7 @@
 	        > >($(TEE) $$($1_TEST_RESULTS_DIR)/micro.txt) \
 	    && $$(ECHO) $$$$? > $$($1_EXITCODE) \
 	    || $$(ECHO) $$$$? > $$($1_EXITCODE) \
-	)
+	))
 
   $1_RESULT_FILE := $$($1_TEST_RESULTS_DIR)/micro.txt
 
@@ -928,7 +928,7 @@
 	$$(call LogWarn)
 	$$(call LogWarn, Running test '$$($1_TEST)')
 	$$(call MakeDir, $$($1_TEST_RESULTS_DIR) $$($1_TEST_SUPPORT_DIR))
-	$$(call ExecuteWithLog, $$($1_TEST_SUPPORT_DIR)/jtreg, \
+	$$(call ExecuteWithLog, $$($1_TEST_SUPPORT_DIR)/jtreg, ( \
 	    $$(COV_ENVIRONMENT) \
 	    $$(JAVA) $$($1_JTREG_LAUNCHER_OPTIONS) \
 	        -Dprogram=jtreg -jar $$(JT_HOME)/lib/jtreg.jar \
@@ -943,7 +943,7 @@
 	        $$($1_TEST_NAME) \
 	    && $$(ECHO) $$$$? > $$($1_EXITCODE) \
 	    || $$(ECHO) $$$$? > $$($1_EXITCODE) \
-	)
+	))
 
   $1_RESULT_FILE := $$($1_TEST_RESULTS_DIR)/text/stats.txt
 
@@ -1019,12 +1019,12 @@
 	$$(call LogWarn)
 	$$(call LogWarn, Running test '$$($1_TEST)')
 	$$(call MakeDir, $$($1_TEST_RESULTS_DIR) $$($1_TEST_SUPPORT_DIR))
-	$$(call ExecuteWithLog, $$($1_TEST_SUPPORT_DIR)/test-execution, \
+	$$(call ExecuteWithLog, $$($1_TEST_SUPPORT_DIR)/test-execution, ( \
 	    $$($1_TEST_COMMAND_LINE) \
 	        > >($(TEE) $$($1_TEST_RESULTS_DIR)/test-output.txt) \
 	    && $$(ECHO) $$$$? > $$($1_EXITCODE) \
 	    || $$(ECHO) $$$$? > $$($1_EXITCODE) \
-	)
+	))
 
   $1_RESULT_FILE := $$($1_TEST_RESULTS_DIR)/gtest.txt
 
--- a/make/RunTestsPrebuilt.gmk	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/RunTestsPrebuilt.gmk	Tue Oct 29 19:49:55 2019 -0700
@@ -230,7 +230,7 @@
   NUM_CORES := $(shell /usr/sbin/sysctl -n hw.ncpu)
   MEMORY_SIZE := $(shell $(EXPR) `/usr/sbin/sysctl -n hw.memsize` / 1024 / 1024)
 else ifeq ($(OPENJDK_TARGET_OS), solaris)
-  NUM_CORES := $(shell LC_MESSAGES=C /usr/sbin/psrinfo -v | $(GREP) -c on-line)
+  NUM_CORES := $(shell /usr/sbin/psrinfo -v | $(GREP) -c on-line)
   MEMORY_SIZE := $(shell \
       /usr/sbin/prtconf 2> /dev/null | $(GREP) "^Memory [Ss]ize" | $(AWK) '{print $$3}' \
   )
--- a/make/RunTestsPrebuiltSpec.gmk	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/RunTestsPrebuiltSpec.gmk	Tue Oct 29 19:49:55 2019 -0700
@@ -27,6 +27,9 @@
 # Fake minimalistic spec file for RunTestsPrebuilt.gmk.
 ################################################################################
 
+# Make sure all shell commands are executed with the C locale
+export LC_ALL := C
+
 define VerifyVariable
   ifeq ($$($1), )
     $$(info Error: Variable $1 is missing, needed by RunTestPrebuiltSpec.gmk)
--- a/make/autoconf/basics.m4	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/autoconf/basics.m4	Tue Oct 29 19:49:55 2019 -0700
@@ -427,7 +427,7 @@
   # Save the path variable before it gets changed
   ORIGINAL_PATH="$PATH"
   AC_SUBST(ORIGINAL_PATH)
-  DATE_WHEN_CONFIGURED=`LANG=C date`
+  DATE_WHEN_CONFIGURED=`date`
   AC_SUBST(DATE_WHEN_CONFIGURED)
   AC_MSG_NOTICE([Configuration created at $DATE_WHEN_CONFIGURED.])
 ])
@@ -489,31 +489,43 @@
       # for unknown variables in the end.
       CONFIGURE_OVERRIDDEN_VARIABLES="$try_remove_var"
 
+      tool_override=[$]$1
+      AC_MSG_NOTICE([User supplied override $1="$tool_override"])
+
       # Check if we try to supply an empty value
-      if test "x[$]$1" = x; then
-        AC_MSG_NOTICE([Setting user supplied tool $1= (no value)])
+      if test "x$tool_override" = x; then
         AC_MSG_CHECKING([for $1])
         AC_MSG_RESULT([disabled])
       else
+        # Split up override in command part and argument part
+        tool_and_args=($tool_override)
+        [ tool_command=${tool_and_args[0]} ]
+        [ unset 'tool_and_args[0]' ]
+        [ tool_args=${tool_and_args[@]} ]
+
         # Check if the provided tool contains a complete path.
-        tool_specified="[$]$1"
-        tool_basename="${tool_specified##*/}"
-        if test "x$tool_basename" = "x$tool_specified"; then
+        tool_basename="${tool_command##*/}"
+        if test "x$tool_basename" = "x$tool_command"; then
           # A command without a complete path is provided, search $PATH.
-          AC_MSG_NOTICE([Will search for user supplied tool $1=$tool_basename])
+          AC_MSG_NOTICE([Will search for user supplied tool "$tool_basename"])
           AC_PATH_PROG($1, $tool_basename)
           if test "x[$]$1" = x; then
-            AC_MSG_ERROR([User supplied tool $tool_basename could not be found])
+            AC_MSG_ERROR([User supplied tool $1="$tool_basename" could not be found])
           fi
         else
           # Otherwise we believe it is a complete path. Use it as it is.
-          AC_MSG_NOTICE([Will use user supplied tool $1=$tool_specified])
-          AC_MSG_CHECKING([for $1])
-          if test ! -x "$tool_specified"; then
+          AC_MSG_NOTICE([Will use user supplied tool "$tool_command"])
+          AC_MSG_CHECKING([for $tool_command])
+          if test ! -x "$tool_command"; then
             AC_MSG_RESULT([not found])
-            AC_MSG_ERROR([User supplied tool $1=$tool_specified does not exist or is not executable])
+            AC_MSG_ERROR([User supplied tool $1="$tool_command" does not exist or is not executable])
           fi
-          AC_MSG_RESULT([$tool_specified])
+           $1="$tool_command"
+          AC_MSG_RESULT([found])
+        fi
+        if test "x$tool_args" != x; then
+          # If we got arguments, re-append them to the command after the fixup.
+          $1="[$]$1 $tool_args"
         fi
       fi
     fi
--- a/make/autoconf/build-performance.m4	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/autoconf/build-performance.m4	Tue Oct 29 19:49:55 2019 -0700
@@ -35,7 +35,7 @@
     FOUND_CORES=yes
   elif test -x /usr/sbin/psrinfo; then
     # Looks like a Solaris system
-    NUM_CORES=`LC_MESSAGES=C /usr/sbin/psrinfo -v | grep -c on-line`
+    NUM_CORES=`/usr/sbin/psrinfo -v | grep -c on-line`
     FOUND_CORES=yes
   elif test -x /usr/sbin/sysctl; then
     # Looks like a MacOSX system
--- a/make/autoconf/configure	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/autoconf/configure	Tue Oct 29 19:49:55 2019 -0700
@@ -43,6 +43,9 @@
 export CONFIG_SHELL=$BASH
 export _as_can_reexec=no
 
+# Make sure all shell commands are executed with the C locale
+export LC_ALL=C
+
 if test "x$CUSTOM_CONFIG_DIR" != x; then
   custom_hook=$CUSTOM_CONFIG_DIR/custom-hook.m4
   if test ! -e $custom_hook; then
--- a/make/autoconf/flags-cflags.m4	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/autoconf/flags-cflags.m4	Tue Oct 29 19:49:55 2019 -0700
@@ -170,11 +170,11 @@
       DISABLE_WARNING_PREFIX="-erroff="
       CFLAGS_WARNINGS_ARE_ERRORS="-errwarn=%all"
 
-      WARNINGS_ENABLE_ALL_CFLAGS="-v"
-      WARNINGS_ENABLE_ALL_CXXFLAGS="+w"
+      WARNINGS_ENABLE_ALL_CFLAGS="-v -fd -xtransition"
+      WARNINGS_ENABLE_ALL_CXXFLAGS="+w +w2"
 
-      DISABLED_WARNINGS_C=""
-      DISABLED_WARNINGS_CXX=""
+      DISABLED_WARNINGS_C="E_OLD_STYLE_FUNC_DECL E_OLD_STYLE_FUNC_DEF E_SEMANTICS_OF_OP_CHG_IN_ANSI_C E_NO_REPLACEMENT_IN_STRING E_DECLARATION_IN_CODE"
+      DISABLED_WARNINGS_CXX="inllargeuse inllargeint notused wemptydecl notemsource"
       ;;
 
     gcc)
--- a/make/autoconf/jdk-options.m4	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/autoconf/jdk-options.m4	Tue Oct 29 19:49:55 2019 -0700
@@ -599,7 +599,14 @@
     AC_MSG_RESULT([yes, forced])
     ENABLE_GENERATE_CLASSLIST="true"
     if test "x$ENABLE_GENERATE_CLASSLIST_POSSIBLE" = "xfalse"; then
-      AC_MSG_WARN([Generation of classlist might not be possible with JVM Variants $JVM_VARIANTS and enable-cds=$ENABLE_CDS])
+      if test "x$ENABLE_CDS" = "xfalse"; then
+        # In GenerateLinkOptData.gmk, DumpLoadedClassList is used to generate the
+        # classlist file. It never will work in this case since the VM will report
+        # an error for DumpLoadedClassList when CDS is disabled.
+        AC_MSG_ERROR([Generation of classlist is not possible with enable-cds=false])
+      else
+        AC_MSG_WARN([Generation of classlist might not be possible with JVM Variants $JVM_VARIANTS and enable-cds=$ENABLE_CDS])
+      fi
     fi
   elif test "x$enable_generate_classlist" = "xno"; then
     AC_MSG_RESULT([no, forced])
--- a/make/autoconf/spec.gmk.in	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/autoconf/spec.gmk.in	Tue Oct 29 19:49:55 2019 -0700
@@ -51,6 +51,9 @@
 # What make to use for main processing, after bootstrapping top-level Makefile.
 MAKE := @MAKE@
 
+# Make sure all shell commands are executed with the C locale
+export LC_ALL := C
+
 # The default make arguments
 MAKE_ARGS = $(MAKE_LOG_FLAGS) -r -R -I $(TOPDIR)/make/common SPEC=$(SPEC) \
     MAKE_LOG_FLAGS="$(MAKE_LOG_FLAGS)" $(MAKE_LOG_VARS)
--- a/make/autoconf/toolchain_windows.m4	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/autoconf/toolchain_windows.m4	Tue Oct 29 19:49:55 2019 -0700
@@ -209,6 +209,8 @@
   eval SDK_INSTALL_DIR="\${VS_SDK_INSTALLDIR_${VS_VERSION}}"
   eval VS_ENV_ARGS="\${VS_ENV_ARGS_${VS_VERSION}}"
   eval VS_TOOLSET_SUPPORTED="\${VS_TOOLSET_SUPPORTED_${VS_VERSION}}"
+    
+  VS_ENV_CMD="" 
 
   # When using --with-tools-dir, assume it points to the correct and default
   # version of Visual Studio or that --with-toolchain-version was also set.
@@ -227,8 +229,6 @@
     fi
   fi
 
-  VS_ENV_CMD=""
-
   if test "x$VS_COMNTOOLS" != x; then
     TOOLCHAIN_CHECK_POSSIBLE_VISUAL_STUDIO_ROOT([${VS_VERSION}],
         [$VS_COMNTOOLS/../..], [$VS_COMNTOOLS_VAR variable])
--- a/make/common/JavaCompilation.gmk	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/common/JavaCompilation.gmk	Tue Oct 29 19:49:55 2019 -0700
@@ -122,7 +122,7 @@
     $$($1_BIN)$$($1_MODULE_SUBDIR)$$($2_TARGET) : $2
 	$$(call LogInfo, Cleaning $$(patsubst $(OUTPUTDIR)/%,%, $$@))
 	$$(call MakeTargetDir)
-	export LC_ALL=C ; ( $(CAT) $$< && $(ECHO) "" ) \
+	( $(CAT) $$< && $(ECHO) "" ) \
 	    | $(SED) -e 's/\([^\\]\):/\1\\:/g' -e 's/\([^\\]\)=/\1\\=/g' \
 	        -e 's/\([^\\]\)!/\1\\!/g' -e 's/^[ 	]*#.*/#/g' \
 	    | $(SED) -f "$(TOPDIR)/make/common/support/unicode2x.sed" \
--- a/make/common/MakeBase.gmk	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/common/MakeBase.gmk	Tue Oct 29 19:49:55 2019 -0700
@@ -564,8 +564,8 @@
 # Param 1 - The path to base the name of the log file / command line file on
 # Param 2 - The command to run
 ExecuteWithLog = \
-  $(call LogCmdlines, Exececuting: [$(strip $2)]) \
-  $(call MakeDir, $(dir $(strip $1))) \
+  $(call LogCmdlines, Executing: [$(strip $2)]) \
+  $(call MakeDir, $(dir $(strip $1)) $(MAKESUPPORT_OUTPUTDIR)/failure-logs) \
   $(call WriteFile, $2, $(strip $1).cmdline) \
   ( $(RM) $(strip $1).log && $(strip $2) > >($(TEE) -a $(strip $1).log) 2> >($(TEE) -a $(strip $1).log >&2) || \
       ( exitcode=$(DOLLAR)? && \
--- a/make/common/TestFilesCompilation.gmk	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/common/TestFilesCompilation.gmk	Tue Oct 29 19:49:55 2019 -0700
@@ -98,7 +98,7 @@
         DISABLED_WARNINGS_gcc := format undef unused-function unused-value, \
         DISABLED_WARNINGS_clang := undef format-nonliteral \
             missing-field-initializers sometimes-uninitialized, \
-        DISABLED_WARNINGS_CXX_solstudio := wvarhidenmem, \
+        DISABLED_WARNINGS_CXX_solstudio := wvarhidenmem doubunder, \
         LIBS := $$($1_LIBS_$$(name)), \
         TOOLCHAIN := $(if $$(filter %.cpp, $$(file)), TOOLCHAIN_LINK_CXX, TOOLCHAIN_DEFAULT), \
         OPTIMIZATION := $$(if $$($1_OPTIMIZATION_$$(name)),$$($1_OPTIMIZATION_$$(name)),LOW), \
--- a/make/conf/jib-profiles.js	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/conf/jib-profiles.js	Tue Oct 29 19:49:55 2019 -0700
@@ -839,13 +839,17 @@
     if (testedProfile == null) {
         testedProfile = input.build_os + "-" + input.build_cpu;
     }
-    var testedProfileJDK = testedProfile + ".jdk";
-    var testedProfileTest = ""
-    if (testedProfile.endsWith("-jcov")) {
-        testedProfileTest = testedProfile.substring(0, testedProfile.length - "-jcov".length) + ".test";
+    var testedProfileJdk = testedProfile + ".jdk";
+    // Make it possible to use the test image from a different profile
+    var testImageProfile;
+    if (input.testImageProfile != null) {
+        testImageProfile = input.testImageProfile;
+    } else if (testedProfile.endsWith("-jcov")) {
+        testImageProfile = testedProfile.substring(0, testedProfile.length - "-jcov".length);
     } else {
-        testedProfileTest = testedProfile + ".test";
+        testImageProfile = testedProfile;
     }
+    var testedProfileTest = testImageProfile + ".test"
     var testOnlyMake = [ "run-test-prebuilt", "LOG_CMDLINES=true", "JTREG_VERBOSE=fail,error,time" ];
     if (testedProfile.endsWith("-gcov")) {
         testOnlyMake = concat(testOnlyMake, "GCOV_ENABLED=true")
@@ -855,14 +859,14 @@
             target_os: input.build_os,
             target_cpu: input.build_cpu,
             dependencies: [
-                "jtreg", "gnumake", "boot_jdk", "devkit", "jib", "jcov", testedProfileJDK,
+                "jtreg", "gnumake", "boot_jdk", "devkit", "jib", "jcov", testedProfileJdk,
                 testedProfileTest
             ],
             src: "src.conf",
             make_args: testOnlyMake,
             environment: {
                 "BOOT_JDK": common.boot_jdk_home,
-                "JDK_IMAGE_DIR": input.get(testedProfileJDK, "home_path"),
+                "JDK_IMAGE_DIR": input.get(testedProfileJdk, "home_path"),
                 "TEST_IMAGE_DIR": input.get(testedProfileTest, "home_path")
             },
             labels: "test"
@@ -871,10 +875,10 @@
 
     // If actually running the run-test-prebuilt profile, verify that the input
     // variable is valid and if so, add the appropriate target_* values from
-    // the tested profile.
+    // the tested profile. Use testImageProfile value as backup.
     if (input.profile == "run-test-prebuilt") {
-        if (profiles[testedProfile] == null) {
-            error("testedProfile is not defined: " + testedProfile);
+        if (profiles[testedProfile] == null && profiles[testImageProfile] == null) {
+            error("testedProfile is not defined: " + testedProfile + " " + testImageProfile);
         }
     }
     if (profiles[testedProfile] != null) {
@@ -882,6 +886,11 @@
             = profiles[testedProfile]["target_os"];
         testOnlyProfilesPrebuilt["run-test-prebuilt"]["target_cpu"]
             = profiles[testedProfile]["target_cpu"];
+    } else if (profiles[testImageProfile] != null) {
+        testOnlyProfilesPrebuilt["run-test-prebuilt"]["target_os"]
+            = profiles[testImageProfile]["target_os"];
+        testOnlyProfilesPrebuilt["run-test-prebuilt"]["target_cpu"]
+            = profiles[testImageProfile]["target_cpu"];
     }
     profiles = concatObjects(profiles, testOnlyProfilesPrebuilt);
 
@@ -944,11 +953,11 @@
 var getJibProfilesDependencies = function (input, common) {
 
     var devkit_platform_revisions = {
-        linux_x64: "gcc8.2.0-OL6.4+1.0",
+        linux_x64: "gcc8.3.0-OL6.4+1.0",
         macosx_x64: "Xcode10.1-MacOSX10.14+1.0",
         solaris_x64: "SS12u4-Solaris11u1+1.0",
         solaris_sparcv9: "SS12u6-Solaris11u3+1.0",
-        windows_x64: "VS2017-15.9.6+1.0",
+        windows_x64: "VS2017-15.9.16+1.0",
         linux_aarch64: "gcc8.2.0-Fedora27+1.0",
         linux_arm: "gcc8.2.0-Fedora27+1.0",
         linux_ppc64le: "gcc8.2.0-Fedora27+1.0",
@@ -1346,3 +1355,8 @@
              || (input.build_os == "linux"
                  && java.lang.System.getProperty("os.version").contains("Microsoft")));
 }
+
+var error = function (s) {
+    java.lang.System.err.println("[ERROR] " + s);
+    exit(1);
+};
--- a/make/data/charsetmapping/SingleByte-X.java.template	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/data/charsetmapping/SingleByte-X.java.template	Tue Oct 29 19:49:55 2019 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -48,7 +48,7 @@
     }
 
     public CharsetDecoder newDecoder() {
-        return new SingleByte.Decoder(this, b2c, $ASCIICOMPATIBLE$);
+        return new SingleByte.Decoder(this, b2c, $ASCIICOMPATIBLE$, $LATIN1DECODABLE$);
     }
 
     public CharsetEncoder newEncoder() {
--- a/make/data/lsrdata/language-subtag-registry.txt	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/data/lsrdata/language-subtag-registry.txt	Tue Oct 29 19:49:55 2019 -0700
@@ -1,4 +1,4 @@
-File-Date: 2019-04-03
+File-Date: 2019-09-16
 %%
 Type: language
 Subtag: aa
@@ -2096,6 +2096,8 @@
 Subtag: ais
 Description: Nataoran Amis
 Added: 2009-07-29
+Deprecated: 2019-04-16
+Comments: see ami, szy
 %%
 Type: language
 Subtag: ait
@@ -2633,6 +2635,7 @@
 Type: language
 Subtag: ant
 Description: Antakarinya
+Description: Antikarinya
 Added: 2009-07-29
 %%
 Type: language
@@ -3094,6 +3097,8 @@
 Subtag: asd
 Description: Asas
 Added: 2009-07-29
+Deprecated: 2019-04-16
+Preferred-Value: snz
 %%
 Type: language
 Subtag: ase
@@ -4135,7 +4140,7 @@
 %%
 Type: language
 Subtag: bck
-Description: Bunaba
+Description: Bunuba
 Added: 2009-07-29
 %%
 Type: language
@@ -6930,7 +6935,7 @@
 %%
 Type: language
 Subtag: bym
-Description: Bidyara
+Description: Bidjara
 Added: 2009-07-29
 %%
 Type: language
@@ -7564,6 +7569,11 @@
 Added: 2009-07-29
 %%
 Type: language
+Subtag: cey
+Description: Ekai Chin
+Added: 2019-04-16
+%%
+Type: language
 Subtag: cfa
 Description: Dijim-Bwilim
 Added: 2009-07-29
@@ -9439,6 +9449,7 @@
 Type: language
 Subtag: dif
 Description: Dieri
+Description: Diyari
 Added: 2009-07-29
 %%
 Type: language
@@ -9515,6 +9526,8 @@
 Subtag: dit
 Description: Dirari
 Added: 2009-07-29
+Deprecated: 2019-04-29
+Preferred-Value: dif
 %%
 Type: language
 Subtag: diu
@@ -9560,6 +9573,7 @@
 Type: language
 Subtag: djd
 Description: Djamindjung
+Description: Ngaliwurru
 Added: 2009-07-29
 %%
 Type: language
@@ -9603,6 +9617,7 @@
 %%
 Type: language
 Subtag: djn
+Description: Jawoyn
 Description: Djauan
 Added: 2009-07-29
 %%
@@ -10191,6 +10206,8 @@
 Subtag: dud
 Description: Hun-Saare
 Added: 2009-07-29
+Deprecated: 2019-04-16
+Comments: see uth, uss
 %%
 Type: language
 Subtag: due
@@ -10382,6 +10399,7 @@
 Type: language
 Subtag: dyn
 Description: Dyangadi
+Description: Dhanggatti
 Added: 2009-07-29
 %%
 Type: language
@@ -10396,6 +10414,7 @@
 %%
 Type: language
 Subtag: dyy
+Description: Djabugay
 Description: Dyaabugay
 Added: 2009-07-29
 %%
@@ -11672,7 +11691,7 @@
 %%
 Type: language
 Subtag: gbd
-Description: Karadjeri
+Description: Karajarri
 Added: 2009-07-29
 %%
 Type: language
@@ -12056,7 +12075,7 @@
 %%
 Type: language
 Subtag: gge
-Description: Guragone
+Description: Gurr-goni
 Added: 2009-07-29
 %%
 Type: language
@@ -12169,7 +12188,7 @@
 %%
 Type: language
 Subtag: gia
-Description: Kitja
+Description: Kija
 Added: 2009-07-29
 %%
 Type: language
@@ -12955,7 +12974,7 @@
 %%
 Type: language
 Subtag: gue
-Description: Gurinji
+Description: Gurindji
 Added: 2009-07-29
 %%
 Type: language
@@ -15292,6 +15311,7 @@
 Type: language
 Subtag: jay
 Description: Yan-nhangu
+Description: Nhangu
 Added: 2009-07-29
 %%
 Type: language
@@ -15488,6 +15508,7 @@
 %%
 Type: language
 Subtag: jig
+Description: Jingulu
 Description: Djingili
 Added: 2009-07-29
 %%
@@ -17222,6 +17243,7 @@
 Type: language
 Subtag: kkp
 Description: Gugubera
+Description: Koko-Bera
 Added: 2009-07-29
 %%
 Type: language
@@ -17266,6 +17288,7 @@
 %%
 Type: language
 Subtag: kky
+Description: Guugu Yimidhirr
 Description: Guguyimidjir
 Added: 2009-07-29
 %%
@@ -18320,6 +18343,7 @@
 Type: language
 Subtag: ktd
 Description: Kokata
+Description: Kukatha
 Added: 2009-07-29
 %%
 Type: language
@@ -19341,6 +19365,7 @@
 Subtag: lba
 Description: Lui
 Added: 2009-07-29
+Deprecated: 2019-04-16
 %%
 Type: language
 Subtag: lbb
@@ -19396,7 +19421,7 @@
 %%
 Type: language
 Subtag: lbn
-Description: Lamet
+Description: Rmeet
 Added: 2009-07-29
 %%
 Type: language
@@ -19446,6 +19471,7 @@
 %%
 Type: language
 Subtag: lby
+Description: Lamalama
 Description: Lamu-Lamu
 Added: 2009-07-29
 %%
@@ -20162,6 +20188,8 @@
 Subtag: llo
 Description: Khlor
 Added: 2009-07-29
+Deprecated: 2019-04-16
+Preferred-Value: ngt
 %%
 Type: language
 Subtag: llp
@@ -20654,6 +20682,11 @@
 Macrolanguage: luy
 %%
 Type: language
+Subtag: lsn
+Description: Tibetan Sign Language
+Added: 2019-04-16
+%%
+Type: language
 Subtag: lso
 Description: Laos Sign Language
 Added: 2009-07-29
@@ -20680,6 +20713,11 @@
 Added: 2009-07-29
 %%
 Type: language
+Subtag: lsv
+Description: Sivia Sign Language
+Added: 2019-04-16
+%%
+Type: language
 Subtag: lsy
 Description: Mauritian Sign Language
 Added: 2010-03-11
@@ -20848,6 +20886,11 @@
 Added: 2009-07-29
 %%
 Type: language
+Subtag: lvi
+Description: Lavi
+Added: 2019-04-16
+%%
+Type: language
 Subtag: lvk
 Description: Lavukaleve
 Added: 2009-07-29
@@ -21454,7 +21497,7 @@
 %%
 Type: language
 Subtag: mec
-Description: Mara
+Description: Marra
 Added: 2009-07-29
 %%
 Type: language
@@ -21523,7 +21566,7 @@
 %%
 Type: language
 Subtag: mep
-Description: Miriwung
+Description: Miriwoong
 Added: 2009-07-29
 %%
 Type: language
@@ -21660,7 +21703,7 @@
 %%
 Type: language
 Subtag: mfr
-Description: Marithiel
+Description: Marrithiyel
 Added: 2009-07-29
 %%
 Type: language
@@ -22853,12 +22896,13 @@
 %%
 Type: language
 Subtag: mpb
+Description: Malak Malak
 Description: Mullukmulluk
 Added: 2009-07-29
 %%
 Type: language
 Subtag: mpc
-Description: Mangarayi
+Description: Mangarrayi
 Added: 2009-07-29
 %%
 Type: language
@@ -22889,6 +22933,7 @@
 Type: language
 Subtag: mpj
 Description: Martu Wangka
+Description: Wangkajunga
 Added: 2009-07-29
 %%
 Type: language
@@ -24015,6 +24060,8 @@
 Subtag: myd
 Description: Maramba
 Added: 2009-07-29
+Deprecated: 2019-04-16
+Preferred-Value: aog
 %%
 Type: language
 Subtag: mye
@@ -24040,6 +24087,7 @@
 Subtag: myi
 Description: Mina (India)
 Added: 2009-07-29
+Deprecated: 2019-04-16
 %%
 Type: language
 Subtag: myj
@@ -24375,7 +24423,7 @@
 %%
 Type: language
 Subtag: nay
-Description: Narrinyeri
+Description: Ngarrindjeri
 Added: 2009-07-29
 %%
 Type: language
@@ -24432,7 +24480,7 @@
 %%
 Type: language
 Subtag: nbj
-Description: Ngarinman
+Description: Ngarinyman
 Added: 2009-07-29
 %%
 Type: language
@@ -24467,7 +24515,7 @@
 %%
 Type: language
 Subtag: nbr
-Description: Numana-Nunku-Gbantu-Numbu
+Description: Numana
 Added: 2009-07-29
 %%
 Type: language
@@ -24559,7 +24607,7 @@
 %%
 Type: language
 Subtag: nck
-Description: Nakara
+Description: Na-kara
 Added: 2009-07-29
 %%
 Type: language
@@ -24931,7 +24979,7 @@
 %%
 Type: language
 Subtag: ngh
-Description: Nǀu
+Description: Nǁng
 Added: 2009-07-29
 %%
 Type: language
@@ -25176,7 +25224,7 @@
 %%
 Type: language
 Subtag: nig
-Description: Ngalakan
+Description: Ngalakgan
 Added: 2009-07-29
 %%
 Type: language
@@ -25798,6 +25846,8 @@
 Subtag: nns
 Description: Ningye
 Added: 2009-07-29
+Deprecated: 2019-04-16
+Preferred-Value: nbr
 %%
 Type: language
 Subtag: nnt
@@ -26658,7 +26708,7 @@
 %%
 Type: language
 Subtag: nyh
-Description: Nyigina
+Description: Nyikina
 Added: 2009-07-29
 %%
 Type: language
@@ -26713,7 +26763,7 @@
 %%
 Type: language
 Subtag: nys
-Description: Nyunga
+Description: Nyungar
 Added: 2009-07-29
 %%
 Type: language
@@ -28707,6 +28757,11 @@
 Added: 2009-07-29
 %%
 Type: language
+Subtag: pnd
+Description: Mpinda
+Added: 2019-04-16
+%%
+Type: language
 Subtag: pne
 Description: Western Penan
 Added: 2009-07-29
@@ -28794,6 +28849,7 @@
 %%
 Type: language
 Subtag: pnw
+Description: Banyjima
 Description: Panytyima
 Added: 2009-07-29
 %%
@@ -29251,7 +29307,8 @@
 %%
 Type: language
 Subtag: pti
-Description: Pintiini
+Description: Pindiini
+Description: Wangkatha
 Added: 2009-07-29
 %%
 Type: language
@@ -30133,6 +30190,7 @@
 %%
 Type: language
 Subtag: ril
+Description: Riang Lang
 Description: Riang (Myanmar)
 Added: 2009-07-29
 %%
@@ -30153,7 +30211,7 @@
 %%
 Type: language
 Subtag: rit
-Description: Ritarungo
+Description: Ritharrngu
 Added: 2009-07-29
 %%
 Type: language
@@ -30219,7 +30277,7 @@
 %%
 Type: language
 Subtag: rmb
-Description: Rembarunga
+Description: Rembarrnga
 Added: 2009-07-29
 %%
 Type: language
@@ -30641,6 +30699,7 @@
 Type: language
 Subtag: rxw
 Description: Karuwali
+Description: Garuwali
 Added: 2013-09-10
 %%
 Type: language
@@ -32206,7 +32265,7 @@
 %%
 Type: language
 Subtag: snz
-Description: Sinsauru
+Description: Kou
 Added: 2009-07-29
 %%
 Type: language
@@ -32883,6 +32942,7 @@
 Subtag: suj
 Description: Shubi
 Added: 2009-07-29
+Comments: see also xsj
 %%
 Type: language
 Subtag: suk
@@ -33312,6 +33372,11 @@
 Added: 2009-07-29
 %%
 Type: language
+Subtag: szy
+Description: Sakizaya
+Added: 2019-04-16
+%%
+Type: language
 Subtag: taa
 Description: Lower Tanana
 Added: 2009-07-29
@@ -33465,6 +33530,7 @@
 %%
 Type: language
 Subtag: tbh
+Description: Dharawal
 Description: Thurawal
 Added: 2009-07-29
 %%
@@ -33644,6 +33710,7 @@
 Type: language
 Subtag: tcs
 Description: Torres Strait Creole
+Description: Yumplatok
 Added: 2009-07-29
 %%
 Type: language
@@ -34067,6 +34134,7 @@
 %%
 Type: language
 Subtag: thd
+Description: Kuuk Thaayorre
 Description: Thayore
 Added: 2009-07-29
 %%
@@ -34310,6 +34378,11 @@
 Added: 2009-07-29
 %%
 Type: language
+Subtag: tjj
+Description: Tjungundji
+Added: 2019-04-16
+%%
+Type: language
 Subtag: tjl
 Description: Tai Laing
 Added: 2012-08-12
@@ -34330,6 +34403,11 @@
 Added: 2009-07-29
 %%
 Type: language
+Subtag: tjp
+Description: Tjupany
+Added: 2019-04-16
+%%
+Type: language
 Subtag: tjs
 Description: Southern Tujia
 Added: 2009-07-29
@@ -35679,6 +35757,11 @@
 Added: 2009-07-29
 %%
 Type: language
+Subtag: tvx
+Description: Taivoan
+Added: 2019-04-16
+%%
+Type: language
 Subtag: tvy
 Description: Timor Pidgin
 Added: 2009-07-29
@@ -36230,7 +36313,7 @@
 %%
 Type: language
 Subtag: ulk
-Description: Meriam
+Description: Meriam Mir
 Added: 2009-07-29
 %%
 Type: language
@@ -36280,6 +36363,7 @@
 %%
 Type: language
 Subtag: umg
+Description: Morrobalama
 Description: Umbuygamu
 Added: 2009-07-29
 %%
@@ -36550,6 +36634,11 @@
 Added: 2009-07-29
 %%
 Type: language
+Subtag: uss
+Description: us-Saare
+Added: 2019-04-16
+%%
+Type: language
 Subtag: usu
 Description: Uya
 Added: 2009-07-29
@@ -36565,6 +36654,11 @@
 Added: 2009-07-29
 %%
 Type: language
+Subtag: uth
+Description: ut-Hun
+Added: 2019-04-16
+%%
+Type: language
 Subtag: utp
 Description: Amba (Solomon Islands)
 Added: 2009-07-29
@@ -37178,7 +37272,7 @@
 %%
 Type: language
 Subtag: waq
-Description: Wageman
+Description: Wagiman
 Added: 2009-07-29
 %%
 Type: language
@@ -37301,7 +37395,7 @@
 %%
 Type: language
 Subtag: wbt
-Description: Wanman
+Description: Warnman
 Added: 2009-07-29
 %%
 Type: language
@@ -37448,6 +37542,7 @@
 %%
 Type: language
 Subtag: wgg
+Description: Wangkangurru
 Description: Wangganguru
 Added: 2009-07-29
 %%
@@ -37521,7 +37616,7 @@
 %%
 Type: language
 Subtag: wig
-Description: Wik-Ngathana
+Description: Wik Ngathan
 Added: 2009-07-29
 %%
 Type: language
@@ -37625,6 +37720,11 @@
 Added: 2009-07-29
 %%
 Type: language
+Subtag: wkr
+Description: Keerray-Woorroong
+Added: 2019-04-16
+%%
+Type: language
 Subtag: wku
 Description: Kunduvadi
 Added: 2009-07-29
@@ -37857,10 +37957,12 @@
 Type: language
 Subtag: wny
 Description: Wanyi
+Description: Waanyi
 Added: 2012-08-12
 %%
 Type: language
 Subtag: woa
+Description: Kuwema
 Description: Tyaraity
 Added: 2009-07-29
 %%
@@ -37951,6 +38053,7 @@
 %%
 Type: language
 Subtag: wrb
+Description: Waluwarra
 Description: Warluwara
 Added: 2009-07-29
 %%
@@ -37962,11 +38065,12 @@
 Type: language
 Subtag: wrg
 Description: Warungu
+Description: Gudjal
 Added: 2009-07-29
 %%
 Type: language
 Subtag: wrh
-Description: Wiradhuri
+Description: Wiradjuri
 Added: 2009-07-29
 %%
 Type: language
@@ -38439,6 +38543,7 @@
 %%
 Type: language
 Subtag: xby
+Description: Batjala
 Description: Batyala
 Added: 2013-09-10
 %%
@@ -38998,7 +39103,7 @@
 %%
 Type: language
 Subtag: xmh
-Description: Kuku-Muminh
+Description: Kugu-Muminh
 Added: 2009-07-29
 %%
 Type: language
@@ -39423,8 +39528,7 @@
 Subtag: xsj
 Description: Subi
 Added: 2009-07-29
-Deprecated: 2015-02-12
-Preferred-Value: suj
+Comments: see also suj
 %%
 Type: language
 Subtag: xsl
@@ -40258,6 +40362,7 @@
 %%
 Type: language
 Subtag: yin
+Description: Riang Lai
 Description: Yinchia
 Added: 2009-07-29
 %%
@@ -41562,12 +41667,13 @@
 %%
 Type: language
 Subtag: zml
-Description: Madngele
+Description: Matngala
 Added: 2009-07-29
 %%
 Type: language
 Subtag: zmm
 Description: Marimanindji
+Description: Marramaninyshi
 Added: 2009-07-29
 %%
 Type: language
@@ -43019,6 +43125,13 @@
 Prefix: sgn
 %%
 Type: extlang
+Subtag: lsn
+Description: Tibetan Sign Language
+Added: 2019-04-16
+Preferred-Value: lsn
+Prefix: sgn
+%%
+Type: extlang
 Subtag: lso
 Description: Laos Sign Language
 Added: 2009-07-29
@@ -43041,6 +43154,13 @@
 Prefix: sgn
 %%
 Type: extlang
+Subtag: lsv
+Description: Sivia Sign Language
+Added: 2019-04-16
+Preferred-Value: lsv
+Prefix: sgn
+%%
+Type: extlang
 Subtag: lsy
 Description: Mauritian Sign Language
 Added: 2010-03-11
@@ -43966,6 +44086,11 @@
 Added: 2005-10-16
 %%
 Type: script
+Subtag: Chrs
+Description: Chorasmian
+Added: 2019-09-11
+%%
+Type: script
 Subtag: Cirt
 Description: Cirth
 Added: 2005-10-16
@@ -44002,6 +44127,11 @@
 Added: 2005-10-16
 %%
 Type: script
+Subtag: Diak
+Description: Dives Akuru
+Added: 2019-09-11
+%%
+Type: script
 Subtag: Dogr
 Description: Dogra
 Added: 2017-01-13
@@ -44839,6 +44969,11 @@
 Added: 2005-10-16
 %%
 Type: script
+Subtag: Yezi
+Description: Yezidi
+Added: 2019-09-11
+%%
+Type: script
 Subtag: Yiii
 Description: Yi
 Added: 2005-10-16
@@ -45683,7 +45818,7 @@
 %%
 Type: region
 Subtag: MK
-Description: The Former Yugoslav Republic of Macedonia
+Description: North Macedonia
 Added: 2005-10-16
 %%
 Type: region
--- a/make/devkit/Tools.gmk	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/devkit/Tools.gmk	Tue Oct 29 19:49:55 2019 -0700
@@ -79,20 +79,19 @@
 # Define external dependencies
 
 # Latest that could be made to work.
-GCC_VER := 8.2.0
-ifeq ($(GCC_VER), 8.2.0)
-  gcc_ver := gcc-8.2.0
-  binutils_ver := binutils-2.30
-  ccache_ver := ccache-3.5.1a
-  CCACHE_DIRNAME := ccache-3.5.1
+GCC_VER := 8.3.0
+ifeq ($(GCC_VER), 8.3.0)
+  gcc_ver := gcc-8.3.0
+  binutils_ver := binutils-2.32
+  ccache_ver := 3.7.3
   mpfr_ver := mpfr-3.1.5
   gmp_ver := gmp-6.1.2
   mpc_ver := mpc-1.0.3
-  gdb_ver := gdb-8.2.1
+  gdb_ver := gdb-8.3
 else ifeq ($(GCC_VER), 7.3.0)
   gcc_ver := gcc-7.3.0
   binutils_ver := binutils-2.30
-  ccache_ver := ccache-3.3.6
+  ccache_ver := 3.3.6
   mpfr_ver := mpfr-3.1.5
   gmp_ver := gmp-6.1.2
   mpc_ver := mpc-1.0.3
@@ -100,7 +99,7 @@
 else ifeq ($(GCC_VER), 4.9.2)
   gcc_ver := gcc-4.9.2
   binutils_ver := binutils-2.25
-  ccache_ver := ccache-3.2.1
+  ccache_ver := 3.2.1
   mpfr_ver := mpfr-3.0.1
   gmp_ver := gmp-4.3.2
   mpc_ver := mpc-1.0.1
@@ -111,7 +110,7 @@
 
 GCC := http://ftp.gnu.org/pub/gnu/gcc/$(gcc_ver)/$(gcc_ver).tar.xz
 BINUTILS := http://ftp.gnu.org/pub/gnu/binutils/$(binutils_ver).tar.xz
-CCACHE := https://samba.org/ftp/ccache/$(ccache_ver).tar.xz
+CCACHE := https://github.com/ccache/ccache/releases/download/v$(ccache_ver)/ccache-$(ccache_ver).tar.xz
 MPFR := https://www.mpfr.org/${mpfr_ver}/${mpfr_ver}.tar.bz2
 GMP := http://ftp.gnu.org/pub/gnu/gmp/${gmp_ver}.tar.bz2
 MPC := http://ftp.gnu.org/pub/gnu/mpc/${mpc_ver}.tar.gz
--- a/make/devkit/createWindowsDevkit2017.sh	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/devkit/createWindowsDevkit2017.sh	Tue Oct 29 19:49:55 2019 -0700
@@ -32,10 +32,7 @@
 VS_VERSION_NUM_NODOT="150"
 VS_DLL_VERSION="140"
 SDK_VERSION="10"
-SDK_FULL_VERSION="10.0.16299.0"
 MSVC_DIR="Microsoft.VC141.CRT"
-MSVC_FULL_VERSION="14.12.25827"
-REDIST_FULL_VERSION="14.12.25810"
 
 SCRIPT_DIR="$(cd "$(dirname $0)" > /dev/null && pwd)"
 BUILD_DIR="${SCRIPT_DIR}/../../build/devkit"
--- a/make/gensrc/Gensrc-jdk.internal.vm.compiler.management.gmk	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/gensrc/Gensrc-jdk.internal.vm.compiler.management.gmk	Tue Oct 29 19:49:55 2019 -0700
@@ -73,7 +73,7 @@
 	($(CD) $(GENSRC_DIR)/META-INF/providers && \
 	    p=""; \
 	    impl=""; \
-	    for i in $$($(GREP) '^' * | $(SORT) -t ':' -k 2 | $(SED) 's/:.*//'); do \
+	    for i in $$($(NAWK) '$$0=FILENAME" "$$0' * | $(SORT) -k 2 | $(SED) 's/ .*//'); do \
 	      c=$$($(CAT) $$i | $(TR) -d '\n\r'); \
 	      if test x$$p != x$$c; then \
                 if test x$$p != x; then \
--- a/make/hotspot/gensrc/GensrcAdlc.gmk	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/hotspot/gensrc/GensrcAdlc.gmk	Tue Oct 29 19:49:55 2019 -0700
@@ -75,7 +75,6 @@
       OUTPUT_DIR := $(JVM_VARIANT_OUTPUTDIR)/tools/adlc, \
       DEBUG_SYMBOLS := false, \
       DISABLED_WARNINGS_clang := tautological-compare, \
-      DISABLED_WARNINGS_solstudio := notemsource, \
       DEFINE_THIS_FILE := false, \
   ))
 
--- a/make/hotspot/lib/CompileJvm.gmk	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/hotspot/lib/CompileJvm.gmk	Tue Oct 29 19:49:55 2019 -0700
@@ -78,14 +78,14 @@
 ################################################################################
 # Disabled warnings
 
-DISABLED_WARNINGS_gcc := extra parentheses comment unknown-pragmas address \
+DISABLED_WARNINGS_gcc := parentheses comment unknown-pragmas address \
     delete-non-virtual-dtor char-subscripts array-bounds int-in-bool-context \
     ignored-qualifiers  missing-field-initializers implicit-fallthrough \
     empty-body strict-overflow sequence-point maybe-uninitialized \
-    misleading-indentation
+    misleading-indentation cast-function-type
 
 ifeq ($(call check-jvm-feature, zero), true)
-  DISABLED_WARNINGS_gcc += return-type switch
+  DISABLED_WARNINGS_gcc += return-type switch clobbered
 endif
 
 DISABLED_WARNINGS_clang := tautological-compare \
--- a/make/hotspot/symbols/symbols-unix	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/hotspot/symbols/symbols-unix	Tue Oct 29 19:49:55 2019 -0700
@@ -97,6 +97,7 @@
 JVM_GetDeclaredClasses
 JVM_GetDeclaringClass
 JVM_GetEnclosingMethodInfo
+JVM_GetExtendedNPEMessage
 JVM_GetFieldIxModifiers
 JVM_GetFieldTypeAnnotations
 JVM_GetInheritedAccessControlContext
--- a/make/jdk/src/classes/build/tools/charsetmapping/SBCS.java	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/jdk/src/classes/build/tools/charsetmapping/SBCS.java	Tue Oct 29 19:49:55 2019 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -46,6 +46,7 @@
         String hisName = cs.hisName;
         String pkgName = cs.pkgName;
         boolean isASCII = cs.isASCII;
+        boolean isLatin1Decodable = true;
 
         StringBuilder b2cSB = new StringBuilder();
         StringBuilder b2cNRSB = new StringBuilder();
@@ -69,6 +70,9 @@
                 c2bOff += 0x100;
                 c2bIndex[e.cp>>8] = 1;
             }
+            if (e.cp > 0xFF) {
+                isLatin1Decodable = false;
+            }
         }
 
         Formatter fm = new Formatter(b2cSB);
@@ -178,6 +182,9 @@
             if (line.indexOf("$ASCIICOMPATIBLE$") != -1) {
                 line = line.replace("$ASCIICOMPATIBLE$", isASCII ? "true" : "false");
             }
+            if (line.indexOf("$LATIN1DECODABLE$") != -1) {
+                line = line.replace("$LATIN1DECODABLE$", isLatin1Decodable ? "true" : "false");
+            }
             if (line.indexOf("$B2CTABLE$") != -1) {
                 line = line.replace("$B2CTABLE$", b2c);
             }
--- a/make/jdk/src/classes/build/tools/taglet/ModuleGraph.java	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/jdk/src/classes/build/tools/taglet/ModuleGraph.java	Tue Oct 29 19:49:55 2019 -0700
@@ -64,7 +64,7 @@
         }
 
         String moduleName = ((ModuleElement) element).getQualifiedName().toString();
-        String imageFile = "module-graph.png";
+        String imageFile = "module-graph.svg";
         int thumbnailHeight = -1;
         String hoverImage = "";
         if (!moduleName.equals("java.base")) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/jdk/src/classes/build/tools/taglet/Preview.java	Tue Oct 29 19:49:55 2019 -0700
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package build.tools.taglet;
+
+import java.util.Arrays;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Set;
+import java.util.function.Predicate;
+import javax.lang.model.element.Element;
+import com.sun.source.doctree.DocTree;
+import com.sun.source.doctree.TextTree;
+import com.sun.source.doctree.UnknownInlineTagTree;
+import jdk.javadoc.doclet.Taglet;
+import static jdk.javadoc.doclet.Taglet.Location.*;
+
+/**
+ * An block tag to insert a standard warning about a preview API.
+ */
+public class Preview implements Taglet {
+
+    /** Returns the set of locations in which a taglet may be used. */
+    @Override
+    public Set<Location> getAllowedLocations() {
+        return EnumSet.of(MODULE, PACKAGE, TYPE, CONSTRUCTOR, METHOD, FIELD);
+    }
+
+    @Override
+    public boolean isInlineTag() {
+        return true;
+    }
+
+    @Override
+    public String getName() {
+        return "preview";
+    }
+
+    @Override
+    public String toString(List<? extends DocTree> tags, Element elem) {
+        UnknownInlineTagTree previewTag = (UnknownInlineTagTree) tags.get(0);
+        List<? extends DocTree> previewContent = previewTag.getContent();
+        String previewText = ((TextTree) previewContent.get(0)).getBody();
+        String[] summaryAndDetails = previewText.split("\n\r?\n\r?");
+        String summary = summaryAndDetails[0];
+        String details = summaryAndDetails.length > 1 ? summaryAndDetails[1] : summaryAndDetails[0];
+        StackTraceElement[] stackTrace = new Exception().getStackTrace();
+        Predicate<StackTraceElement> isSummary =
+                el -> el.getClassName().endsWith("HtmlDocletWriter") &&
+                      el.getMethodName().equals("addSummaryComment");
+        if (Arrays.stream(stackTrace).anyMatch(isSummary)) {
+            return "<div style=\"display:inline-block; font-weight:bold\">" + summary + "</div><br>";
+        }
+        return "<div style=\"border: 1px solid red; border-radius: 5px; padding: 5px; display:inline-block; font-size: larger\">" + details + "</div><br>";
+    }
+}
+
--- a/make/launcher/Launcher-jdk.pack.gmk	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/launcher/Launcher-jdk.pack.gmk	Tue Oct 29 19:49:55 2019 -0700
@@ -87,6 +87,7 @@
     CFLAGS_solaris := -KPIC, \
     CFLAGS_macosx := -fPIC, \
     DISABLED_WARNINGS_clang := format-nonliteral, \
+    DISABLED_WARNINGS_solstudio := wunreachable, \
     LDFLAGS := $(LDFLAGS_JDKEXE) $(LDFLAGS_CXX_JDK) \
         $(call SET_SHARED_LIBRARY_ORIGIN), \
     LIBS := $(UNPACKEXE_LIBS) $(LIBCXX), \
--- a/make/lib/Awt2dLibraries.gmk	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/lib/Awt2dLibraries.gmk	Tue Oct 29 19:49:55 2019 -0700
@@ -229,7 +229,6 @@
         format-nonliteral parentheses unused-value unused-function, \
     DISABLED_WARNINGS_clang := logical-op-parentheses extern-initializer \
         sign-compare format-nonliteral, \
-    DISABLED_WARNINGS_solstudio := E_DECLARATION_IN_CODE, \
     DISABLED_WARNINGS_microsoft := 4244 4267 4996, \
     ASFLAGS := $(LIBAWT_ASFLAGS), \
     LDFLAGS := $(LDFLAGS_JDKLIB) $(call SET_SHARED_LIBRARY_ORIGIN), \
@@ -339,8 +338,8 @@
             implicit-fallthrough undef unused-function, \
         DISABLED_WARNINGS_clang := parentheses format undef \
             logical-op-parentheses format-nonliteral int-conversion, \
-        DISABLED_WARNINGS_solstudio := E_DECLARATION_IN_CODE \
-            E_ASSIGNMENT_TYPE_MISMATCH E_NON_CONST_INIT, \
+        DISABLED_WARNINGS_solstudio := E_ASSIGNMENT_TYPE_MISMATCH \
+             E_NON_CONST_INIT, \
         LDFLAGS := $(LDFLAGS_JDKLIB) \
             $(call SET_SHARED_LIBRARY_ORIGIN) \
             -L$(INSTALL_LIBRARIES_HERE), \
@@ -620,7 +619,8 @@
         E_ENUM_VAL_OVERFLOWS_INT_MAX, \
     DISABLED_WARNINGS_CXX_solstudio := \
         truncwarn wvarhidenmem wvarhidemem wbadlkginit identexpected \
-        hidevf w_novirtualdescr arrowrtn2 refmemnoconstr_aggr unknownpragma, \
+        hidevf w_novirtualdescr arrowrtn2 refmemnoconstr_aggr unknownpragma \
+        doubunder wunreachable, \
     DISABLED_WARNINGS_microsoft := 4267 4244 4018 4090 4996 4146 4334 4819 4101 4068 4805 4138, \
     LDFLAGS := $(subst -Xlinker -z -Xlinker defs,, \
         $(subst -Wl$(COMMA)-z$(COMMA)defs,,$(LDFLAGS_JDKLIB))) $(LDFLAGS_CXX_JDK) \
@@ -848,8 +848,7 @@
           maybe-uninitialized shift-negative-value implicit-fallthrough \
           unused-function, \
       DISABLED_WARNINGS_clang := incompatible-pointer-types sign-compare, \
-      DISABLED_WARNINGS_solstudio := E_NEWLINE_NOT_LAST E_DECLARATION_IN_CODE \
-          E_STATEMENT_NOT_REACHED, \
+      DISABLED_WARNINGS_solstudio := E_STATEMENT_NOT_REACHED, \
       DISABLED_WARNINGS_microsoft := 4018 4244 4267, \
       LDFLAGS := $(LDFLAGS_JDKLIB) \
           $(call SET_SHARED_LIBRARY_ORIGIN), \
--- a/make/lib/CoreLibraries.gmk	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/lib/CoreLibraries.gmk	Tue Oct 29 19:49:55 2019 -0700
@@ -23,8 +23,6 @@
 # questions.
 #
 
-WIN_VERIFY_LIB := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/libverify/verify.lib
-
 # Hook to include the corresponding custom file, if present.
 $(eval $(call IncludeCustomExtension, lib/CoreLibraries.gmk))
 
@@ -110,14 +108,14 @@
     LDFLAGS_macosx := -L$(SUPPORT_OUTPUTDIR)/native/$(MODULE)/, \
     LDFLAGS_windows := -delayload:shell32.dll, \
     LIBS := $(BUILD_LIBFDLIBM_TARGET), \
-    LIBS_unix := -ljvm -lverify, \
+    LIBS_unix := -ljvm, \
     LIBS_linux := $(LIBDL), \
     LIBS_solaris := -lsocket -lnsl -lscf $(LIBDL), \
     LIBS_aix := $(LIBDL) $(LIBM),\
     LIBS_macosx := -framework CoreFoundation \
         -framework Foundation \
         -framework SystemConfiguration, \
-    LIBS_windows := jvm.lib $(WIN_VERIFY_LIB) \
+    LIBS_windows := jvm.lib \
         shell32.lib delayimp.lib \
         advapi32.lib version.lib, \
 ))
@@ -214,9 +212,6 @@
     CFLAGS := $(CFLAGS_JDKLIB) $(LIBJLI_CFLAGS), \
     DISABLED_WARNINGS_gcc := unused-function, \
     DISABLED_WARNINGS_clang := sometimes-uninitialized format-nonliteral, \
-    DISABLED_WARNINGS_solstudio := \
-        E_ASM_DISABLES_OPTIMIZATION \
-        E_STATEMENT_NOT_REACHED, \
     LDFLAGS := $(LDFLAGS_JDKLIB) \
         $(call SET_SHARED_LIBRARY_ORIGIN), \
     LIBS_unix := $(LIBZ_LIBS), \
--- a/make/lib/Lib-jdk.hotspot.agent.gmk	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/lib/Lib-jdk.hotspot.agent.gmk	Tue Oct 29 19:49:55 2019 -0700
@@ -61,7 +61,7 @@
     DISABLED_WARNINGS_microsoft := 4267, \
     DISABLED_WARNINGS_gcc := sign-compare pointer-arith, \
     DISABLED_WARNINGS_clang := sign-compare pointer-arith format-nonliteral, \
-    DISABLED_WARNINGS_CXX_solstudio := truncwarn unknownpragma, \
+    DISABLED_WARNINGS_CXX_solstudio := truncwarn unknownpragma doubunder, \
     CFLAGS := $(CFLAGS_JDKLIB) $(SA_CFLAGS), \
     CXXFLAGS := $(CXXFLAGS_JDKLIB) $(SA_CFLAGS) $(SA_CXXFLAGS), \
     EXTRA_SRC := $(LIBSA_EXTRA_SRC), \
--- a/make/lib/Lib-jdk.pack.gmk	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/lib/Lib-jdk.pack.gmk	Tue Oct 29 19:49:55 2019 -0700
@@ -38,6 +38,7 @@
     EXTRA_HEADER_DIRS := $(call GetJavaHeaderDir, java.base), \
     DISABLED_WARNINGS_gcc := implicit-fallthrough, \
     DISABLED_WARNINGS_clang := format-nonliteral, \
+    DISABLED_WARNINGS_solstudio := wunreachable, \
     LDFLAGS := $(LDFLAGS_JDKLIB) $(LDFLAGS_CXX_JDK) \
         $(call SET_SHARED_LIBRARY_ORIGIN), \
     LDFLAGS_windows := -map:$(SUPPORT_OUTPUTDIR)/native/$(MODULE)/unpack.map -debug, \
--- a/make/scripts/compare.sh	Tue Oct 08 17:58:39 2019 -0700
+++ b/make/scripts/compare.sh	Tue Oct 29 19:49:55 2019 -0700
@@ -34,6 +34,9 @@
     exit 1
 fi
 
+# Make sure all shell commands are executed with the C locale
+export LC_ALL=C
+
 if [ "$OPENJDK_TARGET_OS" = "macosx" ]; then
     FULLDUMP_CMD="$OTOOL -v -V -h -X -d"
     LDD_CMD="$OTOOL -L"
@@ -110,7 +113,7 @@
         "
   fi
 elif [ "$OPENJDK_TARGET_OS" = "macosx" ]; then
-  DIS_DIFF_FILTER="LANG=C $SED \
+  DIS_DIFF_FILTER="$SED \
       -e 's/0x[0-9a-f]\{3,16\}/<HEXSTR>/g' -e 's/^[0-9a-f]\{12,20\}/<ADDR>/' \
       -e 's/-20[0-9][0-9]-[0-1][0-9]-[0-3][0-9]-[0-2][0-9]\{5\}/<DATE>/g' \
       -e 's/), built on .*/), <DATE>/' \
@@ -134,7 +137,7 @@
 
     if [[ "$THIS_FILE" = *"META-INF/MANIFEST.MF" ]]; then
         # Filter out date string, ant version and java version differences.
-        TMP=$(LC_ALL=C $DIFF $OTHER_FILE $THIS_FILE | \
+        TMP=$($DIFF $OTHER_FILE $THIS_FILE | \
             $GREP '^[<>]' | \
             $SED -e '/[<>] Ant-Version: Apache Ant .*/d' \
                  -e '/[<>] Created-By: .* (Oracle [Corpatin)]*/d' \
@@ -142,7 +145,7 @@
                  -e '/[<>].*[0-9]\{4\}_[0-9]\{2\}_[0-9]\{2\}_[0-9]\{2\}_[0-9]\{2\}-b[0-9]\{2\}.*/d')
     fi
     if test "x$SUFFIX" = "xjava"; then
-        TMP=$(LC_ALL=C $DIFF $OTHER_FILE $THIS_FILE | \
+        TMP=$($DIFF $OTHER_FILE $THIS_FILE | \
             $GREP '^[<>]' | \
             $SED -e '/[<>] \* from.*\.idl/d' \
                  -e '/[<>] .*[0-9]\{4\}_[0-9]\{2\}_[0-9]\{2\}_[0-9]\{2\}_[0-9]\{2\}-b[0-9]\{2\}.*/d' \
@@ -197,7 +200,7 @@
     fi
     if test "x$SUFFIX" = "xproperties"; then
         # Filter out date string differences.
-        TMP=$(LC_ALL=C $DIFF $OTHER_FILE $THIS_FILE | \
+        TMP=$($DIFF $OTHER_FILE $THIS_FILE | \
             $GREP '^[<>]' | \
             $SED -e '/[<>].*[0-9]\{4\}-[0-9]\{2\}-[0-9]\{2\}-[0-9]\{6\}.*/d')
     fi
@@ -207,7 +210,7 @@
             -e 's/<font size=-1>/<font size=\"-1\">/g'"
         $CAT $THIS_FILE | eval "$HTML_FILTER" > $THIS_FILE.filtered
         $CAT $OTHER_FILE | eval "$HTML_FILTER" > $OTHER_FILE.filtered
-        TMP=$(LC_ALL=C $DIFF $OTHER_FILE.filtered $THIS_FILE.filtered | \
+        TMP=$($DIFF $OTHER_FILE.filtered $THIS_FILE.filtered | \
             $GREP '^[<>]' | \
             $SED -e '/[<>] <!-- Generated by javadoc .* on .* -->/d' \
                  -e '/[<>] <meta name="date" content=".*">/d' )
@@ -554,11 +557,11 @@
     CONTENTS_DIFF_FILE=$WORK_DIR/$ZIP_FILE.diff
     # On solaris, there is no -q option.
     if [ "$OPENJDK_TARGET_OS" = "solaris" ]; then
-        LC_ALL=C $DIFF -r $OTHER_UNZIPDIR $THIS_UNZIPDIR \
+        $DIFF -r $OTHER_UNZIPDIR $THIS_UNZIPDIR \
             | $GREP -v -e "^<" -e "^>" -e "^Common subdirectories:" \
             > $CONTENTS_DIFF_FILE
     else
-        LC_ALL=C $DIFF -rq $OTHER_UNZIPDIR $THIS_UNZIPDIR > $CONTENTS_DIFF_FILE
+        $DIFF -rq $OTHER_UNZIPDIR $THIS_UNZIPDIR > $CONTENTS_DIFF_FILE
     fi
 
     ONLY_OTHER=$($GREP "^Only in $OTHER_UNZIPDIR" $CONTENTS_DIFF_FILE)
@@ -605,11 +608,11 @@
             if [ -n "$SHOW_DIFFS" ]; then
                 for i in $(cat $WORK_DIR/$ZIP_FILE.difflist) ; do
                     if [ -f "${OTHER_UNZIPDIR}/$i.javap" ]; then
-                        LC_ALL=C $DIFF ${OTHER_UNZIPDIR}/$i.javap ${THIS_UNZIPDIR}/$i.javap
+                        $DIFF ${OTHER_UNZIPDIR}/$i.javap ${THIS_UNZIPDIR}/$i.javap
                     elif [ -f "${OTHER_UNZIPDIR}/$i.cleaned" ]; then
-                        LC_ALL=C $DIFF ${OTHER_UNZIPDIR}/$i.cleaned ${THIS_UNZIPDIR}/$i
+                        $DIFF ${OTHER_UNZIPDIR}/$i.cleaned ${THIS_UNZIPDIR}/$i
                     else
-                        LC_ALL=C $DIFF ${OTHER_UNZIPDIR}/$i ${THIS_UNZIPDIR}/$i
+                        $DIFF ${OTHER_UNZIPDIR}/$i ${THIS_UNZIPDIR}/$i
                     fi
                 done
             fi
@@ -642,7 +645,7 @@
     $JMOD list $THIS_JMOD | sort > $THIS_JMOD_LIST
     $JMOD list $OTHER_JMOD | sort > $OTHER_JMOD_LIST
     JMOD_LIST_DIFF_FILE=$WORK_DIR/$JMOD_FILE.list.diff
-    LC_ALL=C $DIFF $THIS_JMOD_LIST $OTHER_JMOD_LIST > $JMOD_LIST_DIFF_FILE
+    $DIFF $THIS_JMOD_LIST $OTHER_JMOD_LIST > $JMOD_LIST_DIFF_FILE
 
     ONLY_THIS=$($GREP "^<" $JMOD_LIST_DIFF_FILE)
     ONLY_OTHER=$($GREP "^>" $JMOD_LIST_DIFF_FILE)
@@ -924,7 +927,7 @@
             > $WORK_FILE_BASE.symbols.this
     fi
 
-    LC_ALL=C $DIFF $WORK_FILE_BASE.symbols.other $WORK_FILE_BASE.symbols.this > $WORK_FILE_BASE.symbols.diff
+    $DIFF $WORK_FILE_BASE.symbols.other $WORK_FILE_BASE.symbols.this > $WORK_FILE_BASE.symbols.diff
     if [ -s $WORK_FILE_BASE.symbols.diff ]; then
         SYM_MSG=" diff  "
         if [[ "$ACCEPTED_SYM_DIFF" != *"$BIN_FILE"* ]]; then
@@ -964,9 +967,9 @@
                     | $UNIQ > $WORK_FILE_BASE.deps.this.uniq)
         (cd $FILE_WORK_DIR && $RM -f $NAME)
 
-        LC_ALL=C $DIFF $WORK_FILE_BASE.deps.other $WORK_FILE_BASE.deps.this \
+        $DIFF $WORK_FILE_BASE.deps.other $WORK_FILE_BASE.deps.this \
               > $WORK_FILE_BASE.deps.diff
-        LC_ALL=C $DIFF $WORK_FILE_BASE.deps.other.uniq $WORK_FILE_BASE.deps.this.uniq \
+        $DIFF $WORK_FILE_BASE.deps.other.uniq $WORK_FILE_BASE.deps.this.uniq \
               > $WORK_FILE_BASE.deps.diff.uniq
 
         if [ -s $WORK_FILE_BASE.deps.diff ]; then
@@ -1016,7 +1019,7 @@
             > $WORK_FILE_BASE.fulldump.this  2>&1 &
         wait
 
-        LC_ALL=C $DIFF $WORK_FILE_BASE.fulldump.other $WORK_FILE_BASE.fulldump.this \
+        $DIFF $WORK_FILE_BASE.fulldump.other $WORK_FILE_BASE.fulldump.this \
             > $WORK_FILE_BASE.fulldump.diff
 
         if [ -s $WORK_FILE_BASE.fulldump.diff ]; then
@@ -1063,7 +1066,7 @@
             | eval "$this_DIS_DIFF_FILTER" > $WORK_FILE_BASE.dis.this  2>&1 &
         wait
 
-        LC_ALL=C $DIFF $WORK_FILE_BASE.dis.other $WORK_FILE_BASE.dis.this > $WORK_FILE_BASE.dis.diff
+        $DIFF $WORK_FILE_BASE.dis.other $WORK_FILE_BASE.dis.this > $WORK_FILE_BASE.dis.diff
 
         if [ -s $WORK_FILE_BASE.dis.diff ]; then
             DIS_DIFF_SIZE=$(ls -n $WORK_FILE_BASE.dis.diff | awk '{print $5}')
--- a/src/hotspot/cpu/aarch64/aarch64.ad	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/aarch64/aarch64.ad	Tue Oct 29 19:49:55 2019 -0700
@@ -2513,17 +2513,8 @@
     __ INSN(REG, as_Register(BASE));                                    \
   }
 
-typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
-typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
-typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
-                                  MacroAssembler::SIMD_RegVariant T, const Address &adr);
-
-  // Used for all non-volatile memory accesses.  The use of
-  // $mem->opcode() to discover whether this pattern uses sign-extended
-  // offsets is something of a kludge.
-  static void loadStore(MacroAssembler masm, mem_insn insn,
-                         Register reg, int opcode,
-                         Register base, int index, int size, int disp)
+
+static Address mem2address(int opcode, Register base, int index, int size, int disp)
   {
     Address::extend scale;
 
@@ -2542,16 +2533,34 @@
     }
 
     if (index == -1) {
-      (masm.*insn)(reg, Address(base, disp));
+      return Address(base, disp);
     } else {
       assert(disp == 0, "unsupported address mode: disp = %d", disp);
-      (masm.*insn)(reg, Address(base, as_Register(index), scale));
+      return Address(base, as_Register(index), scale);
     }
   }
 
+
+typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
+typedef void (MacroAssembler::* mem_insn2)(Register Rt, Register adr);
+typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
+typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
+                                  MacroAssembler::SIMD_RegVariant T, const Address &adr);
+
+  // Used for all non-volatile memory accesses.  The use of
+  // $mem->opcode() to discover whether this pattern uses sign-extended
+  // offsets is something of a kludge.
+  static void loadStore(MacroAssembler masm, mem_insn insn,
+                        Register reg, int opcode,
+                        Register base, int index, int size, int disp)
+  {
+    Address addr = mem2address(opcode, base, index, size, disp);
+    (masm.*insn)(reg, addr);
+  }
+
   static void loadStore(MacroAssembler masm, mem_float_insn insn,
-                         FloatRegister reg, int opcode,
-                         Register base, int index, int size, int disp)
+                        FloatRegister reg, int opcode,
+                        Register base, int index, int size, int disp)
   {
     Address::extend scale;
 
@@ -2573,8 +2582,8 @@
   }
 
   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
-                         FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
-                         int opcode, Register base, int index, int size, int disp)
+                        FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
+                        int opcode, Register base, int index, int size, int disp)
   {
     if (index == -1) {
       (masm.*insn)(reg, T, Address(base, disp));
@@ -3791,7 +3800,7 @@
     static const int hi[Op_RegL + 1] = { // enum name
       0,                                 // Op_Node
       0,                                 // Op_Set
-      OptoReg::Bad,                       // Op_RegN
+      OptoReg::Bad,                      // Op_RegN
       OptoReg::Bad,                      // Op_RegI
       R0_H_num,                          // Op_RegP
       OptoReg::Bad,                      // Op_RegF
@@ -6923,7 +6932,7 @@
 instruct loadP(iRegPNoSp dst, memory mem)
 %{
   match(Set dst (LoadP mem));
-  predicate(!needs_acquiring_load(n));
+  predicate(!needs_acquiring_load(n) && (n->as_Load()->barrier_data() == 0));
 
   ins_cost(4 * INSN_COST);
   format %{ "ldr  $dst, $mem\t# ptr" %}
@@ -7616,6 +7625,7 @@
 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
 %{
   match(Set dst (LoadP mem));
+  predicate(n->as_Load()->barrier_data() == 0);
 
   ins_cost(VOLATILE_REF_COST);
   format %{ "ldar  $dst, $mem\t# ptr" %}
@@ -8552,6 +8562,7 @@
 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 
   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
+  predicate(n->as_LoadStore()->barrier_data() == 0);
   ins_cost(2 * VOLATILE_REF_COST);
 
   effect(KILL cr);
@@ -8665,7 +8676,7 @@
 
 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 
-  predicate(needs_acquiring_load_exclusive(n));
+  predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
   ins_cost(VOLATILE_REF_COST);
 
@@ -8796,6 +8807,7 @@
 %}
 
 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
+  predicate(n->as_LoadStore()->barrier_data() == 0);
   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
   ins_cost(2 * VOLATILE_REF_COST);
   effect(TEMP_DEF res, KILL cr);
@@ -8895,7 +8907,7 @@
 %}
 
 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
-  predicate(needs_acquiring_load_exclusive(n));
+  predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
   ins_cost(VOLATILE_REF_COST);
   effect(TEMP_DEF res, KILL cr);
@@ -8996,6 +9008,7 @@
 %}
 
 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
+  predicate(n->as_LoadStore()->barrier_data() == 0);
   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
   ins_cost(2 * VOLATILE_REF_COST);
   effect(KILL cr);
@@ -9103,8 +9116,8 @@
 %}
 
 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
-  predicate(needs_acquiring_load_exclusive(n));
   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
+  predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
   ins_cost(VOLATILE_REF_COST);
   effect(KILL cr);
   format %{
@@ -9154,6 +9167,7 @@
 %}
 
 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
+  predicate(n->as_LoadStore()->barrier_data() == 0);
   match(Set prev (GetAndSetP mem newv));
   ins_cost(2 * VOLATILE_REF_COST);
   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
@@ -9197,7 +9211,7 @@
 %}
 
 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
-  predicate(needs_acquiring_load_exclusive(n));
+  predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
   match(Set prev (GetAndSetP mem newv));
   ins_cost(VOLATILE_REF_COST);
   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
--- a/src/hotspot/cpu/aarch64/abstractInterpreter_aarch64.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/aarch64/abstractInterpreter_aarch64.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -26,6 +26,7 @@
 #include "precompiled.hpp"
 #include "interpreter/interpreter.hpp"
 #include "oops/constMethod.hpp"
+#include "oops/klass.inline.hpp"
 #include "oops/method.hpp"
 #include "runtime/frame.inline.hpp"
 #include "utilities/align.hpp"
--- a/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -162,16 +162,12 @@
   // Creation also verifies the object.
   NativeMovConstReg* method_holder
     = nativeMovConstReg_at(stub + NativeInstruction::instruction_size);
-#ifndef PRODUCT
+
+#ifdef ASSERT
   NativeGeneralJump* jump = nativeGeneralJump_at(method_holder->next_instruction_address());
+  verify_mt_safe(callee, entry, method_holder, jump);
+#endif
 
-  // read the value once
-  volatile intptr_t data = method_holder->data();
-  assert(data == 0 || data == (intptr_t)callee(),
-         "a) MT-unsafe modification of inline cache");
-  assert(data == 0 || jump->jump_destination() == entry,
-         "b) MT-unsafe modification of inline cache");
-#endif
   // Update stub.
   method_holder->set_data((intptr_t)callee());
   NativeGeneralJump::insert_unconditional(method_holder->next_instruction_address(), entry);
@@ -189,6 +185,10 @@
   NativeMovConstReg* method_holder
     = nativeMovConstReg_at(stub + NativeInstruction::instruction_size);
   method_holder->set_data(0);
+  if (!static_stub->is_aot()) {
+    NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
+    jump->set_jump_destination((address)-1);
+  }
 }
 
 //-----------------------------------------------------------------------------
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -58,7 +58,7 @@
       Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
       __ ldrb(rscratch1, gc_state);
       if (dest_uninitialized) {
-        __ tbz(rscratch2, ShenandoahHeap::HAS_FORWARDED_BITPOS, done);
+        __ tbz(rscratch1, ShenandoahHeap::HAS_FORWARDED_BITPOS, done);
       } else {
         __ mov(rscratch2, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING);
         __ tst(rscratch1, rscratch2);
@@ -262,7 +262,7 @@
   __ leave();
 }
 
-void ShenandoahBarrierSetAssembler::load_reference_barrier_native(MacroAssembler* masm, Register dst, Register tmp) {
+void ShenandoahBarrierSetAssembler::load_reference_barrier_native(MacroAssembler* masm, Register dst, Address load_addr) {
   if (!ShenandoahLoadRefBarrier) {
     return;
   }
@@ -272,6 +272,8 @@
   Label is_null;
   Label done;
 
+  __ block_comment("load_reference_barrier_native { ");
+
   __ cbz(dst, is_null);
 
   __ enter();
@@ -285,6 +287,7 @@
   __ mov(rscratch2, dst);
   __ push_call_clobbered_registers();
   __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_native));
+  __ lea(r1, load_addr);
   __ mov(r0, rscratch2);
   __ blr(lr);
   __ mov(rscratch2, r0);
@@ -294,6 +297,7 @@
   __ bind(done);
   __ leave();
   __ bind(is_null);
+  __ block_comment("} load_reference_barrier_native");
 }
 
 void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) {
@@ -327,20 +331,32 @@
   bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
   bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
   bool on_reference = on_weak || on_phantom;
-  bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
+  bool is_traversal_mode = ShenandoahHeap::heap()->is_traversal_mode();
+  bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0 || is_traversal_mode;
+
+  Register result_dst = dst;
+
+  if (on_oop) {
+    // We want to preserve src
+    if (dst == src.base() || dst == src.index()) {
+      dst = rscratch1;
+    }
+    assert_different_registers(dst, src.base(), src.index());
+  }
 
   BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
   if (on_oop) {
-     if (not_in_heap) {
-       if (ShenandoahHeap::heap()->is_traversal_mode()) {
-         load_reference_barrier(masm, dst, tmp1);
-         keep_alive = true;
-       } else {
-         load_reference_barrier_native(masm, dst, tmp1);
-       }
-     } else {
-       load_reference_barrier(masm, dst, tmp1);
-     }
+    if (not_in_heap && !is_traversal_mode) {
+      load_reference_barrier_native(masm, dst, src);
+    } else {
+      load_reference_barrier(masm, dst, tmp1);
+    }
+
+    if (dst != result_dst) {
+      __ mov(result_dst, dst);
+      dst = result_dst;
+    }
+
     if (ShenandoahKeepAliveBarrier && on_reference && keep_alive) {
       __ enter();
       satb_write_barrier_pre(masm /* masm */,
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp	Tue Oct 29 19:49:55 2019 -0700
@@ -58,7 +58,7 @@
   void resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp = noreg);
   void load_reference_barrier(MacroAssembler* masm, Register dst, Register tmp);
   void load_reference_barrier_not_null(MacroAssembler* masm, Register dst, Register tmp);
-  void load_reference_barrier_native(MacroAssembler* masm, Register dst, Register tmp);
+  void load_reference_barrier_native(MacroAssembler* masm, Register dst, Address load_addr);
 
   address generate_shenandoah_lrb(StubCodeGenerator* cgen);
 
--- a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -24,22 +24,23 @@
 #include "precompiled.hpp"
 #include "asm/macroAssembler.inline.hpp"
 #include "code/codeBlob.hpp"
+#include "code/vmreg.inline.hpp"
 #include "gc/z/zBarrier.inline.hpp"
 #include "gc/z/zBarrierSet.hpp"
 #include "gc/z/zBarrierSetAssembler.hpp"
 #include "gc/z/zBarrierSetRuntime.hpp"
+#include "gc/z/zThreadLocalData.hpp"
 #include "memory/resourceArea.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "utilities/macros.hpp"
 #ifdef COMPILER1
 #include "c1/c1_LIRAssembler.hpp"
 #include "c1/c1_MacroAssembler.hpp"
 #include "gc/z/c1/zBarrierSetC1.hpp"
 #endif // COMPILER1
-
-#include "gc/z/zThreadLocalData.hpp"
-
-ZBarrierSetAssembler::ZBarrierSetAssembler() :
-    _load_barrier_slow_stub(),
-    _load_barrier_weak_slow_stub() {}
+#ifdef COMPILER2
+#include "gc/z/c2/zBarrierSetC2.hpp"
+#endif // COMPILER2
 
 #ifdef PRODUCT
 #define BLOCK_COMMENT(str) /* nothing */
@@ -66,7 +67,7 @@
   assert_different_registers(rscratch1, rscratch2, src.base());
   assert_different_registers(rscratch1, rscratch2, dst);
 
-  RegSet savedRegs = RegSet::range(r0,r28) - RegSet::of(dst, rscratch1, rscratch2);
+  RegSet savedRegs = RegSet::range(r0, r28) - RegSet::of(dst, rscratch1, rscratch2);
 
   Label done;
 
@@ -206,7 +207,8 @@
 
   // The Address offset is too large to direct load - -784. Our range is +127, -128.
   __ mov(tmp, (long int)(in_bytes(ZThreadLocalData::address_bad_mask_offset()) -
-      in_bytes(JavaThread::jni_environment_offset())));
+              in_bytes(JavaThread::jni_environment_offset())));
+
   // Load address bad mask
   __ add(tmp, jni_env, tmp);
   __ ldr(tmp, Address(tmp));
@@ -294,12 +296,12 @@
   __ prologue("zgc_load_barrier stub", false);
 
   // We don't use push/pop_clobbered_registers() - we need to pull out the result from r0.
-  for (int i = 0; i < 32; i +=2) {
-    __ stpd(as_FloatRegister(i), as_FloatRegister(i+1), Address(__ pre(sp,-16)));
+  for (int i = 0; i < 32; i += 2) {
+    __ stpd(as_FloatRegister(i), as_FloatRegister(i + 1), Address(__ pre(sp,-16)));
   }
 
-  RegSet saveRegs = RegSet::range(r0,r28) - RegSet::of(r0);
-  __ push(saveRegs, sp);
+  const RegSet save_regs = RegSet::range(r1, r28);
+  __ push(save_regs, sp);
 
   // Setup arguments
   __ load_parameter(0, c_rarg0);
@@ -307,98 +309,161 @@
 
   __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2);
 
-  __ pop(saveRegs, sp);
+  __ pop(save_regs, sp);
 
-  for (int i = 30; i >0; i -=2) {
-      __ ldpd(as_FloatRegister(i), as_FloatRegister(i+1), Address(__ post(sp, 16)));
-    }
+  for (int i = 30; i >= 0; i -= 2) {
+    __ ldpd(as_FloatRegister(i), as_FloatRegister(i + 1), Address(__ post(sp, 16)));
+  }
 
   __ epilogue();
 }
 #endif // COMPILER1
 
-#undef __
-#define __ cgen->assembler()->
+#ifdef COMPILER2
 
-// Generates a register specific stub for calling
-// ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or
-// ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded().
-//
-// The raddr register serves as both input and output for this stub. When the stub is
-// called the raddr register contains the object field address (oop*) where the bad oop
-// was loaded from, which caused the slow path to be taken. On return from the stub the
-// raddr register contains the good/healed oop returned from
-// ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or
-// ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded().
-static address generate_load_barrier_stub(StubCodeGenerator* cgen, Register raddr, DecoratorSet decorators) {
-  // Don't generate stub for invalid registers
-  if (raddr == zr || raddr == r29 || raddr == r30) {
-    return NULL;
+OptoReg::Name ZBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) {
+  if (!OptoReg::is_reg(opto_reg)) {
+    return OptoReg::Bad;
   }
 
-  // Create stub name
-  char name[64];
-  const bool weak = (decorators & ON_WEAK_OOP_REF) != 0;
-  os::snprintf(name, sizeof(name), "zgc_load_barrier%s_stub_%s", weak ? "_weak" : "", raddr->name());
-
-  __ align(CodeEntryAlignment);
-  StubCodeMark mark(cgen, "StubRoutines", os::strdup(name, mtCode));
-  address start = __ pc();
-
-  // Save live registers
-  RegSet savedRegs = RegSet::range(r0,r18) - RegSet::of(raddr);
-
-  __ enter();
-  __ push(savedRegs, sp);
-
-  // Setup arguments
-  if (raddr != c_rarg1) {
-    __ mov(c_rarg1, raddr);
+  const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
+  if (vm_reg->is_FloatRegister()) {
+    return opto_reg & ~1;
   }
 
-  __ ldr(c_rarg0, Address(raddr));
+  return opto_reg;
+}
 
-  // Call barrier function
-  __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), c_rarg0, c_rarg1);
+#undef __
+#define __ _masm->
 
-  // Move result returned in r0 to raddr, if needed
-  if (raddr != r0) {
-    __ mov(raddr, r0);
+class ZSaveLiveRegisters {
+private:
+  MacroAssembler* const _masm;
+  RegSet                _gp_regs;
+  RegSet                _fp_regs;
+
+public:
+  void initialize(ZLoadBarrierStubC2* stub) {
+    // Create mask of live registers
+    RegMask live = stub->live();
+
+    // Record registers that needs to be saved/restored
+    while (live.is_NotEmpty()) {
+      const OptoReg::Name opto_reg = live.find_first_elem();
+      live.Remove(opto_reg);
+      if (OptoReg::is_reg(opto_reg)) {
+        const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
+        if (vm_reg->is_Register()) {
+          _gp_regs += RegSet::of(vm_reg->as_Register());
+        } else if (vm_reg->is_FloatRegister()) {
+          _fp_regs += RegSet::of((Register)vm_reg->as_FloatRegister());
+        } else {
+          fatal("Unknown register type");
+        }
+      }
+    }
+
+    // Remove C-ABI SOE registers, scratch regs and _ref register that will be updated
+    _gp_regs -= RegSet::range(r19, r30) + RegSet::of(r8, r9, stub->ref());
   }
 
-  __ pop(savedRegs, sp);
-  __ leave();
-  __ ret(lr);
+  ZSaveLiveRegisters(MacroAssembler* masm, ZLoadBarrierStubC2* stub) :
+      _masm(masm),
+      _gp_regs(),
+      _fp_regs() {
 
-  return start;
+    // Figure out what registers to save/restore
+    initialize(stub);
+
+    // Save registers
+    __ push(_gp_regs, sp);
+    __ push_fp(_fp_regs, sp);
+  }
+
+  ~ZSaveLiveRegisters() {
+    // Restore registers
+    __ pop_fp(_fp_regs, sp);
+    __ pop(_gp_regs, sp);
+  }
+};
+
+#undef __
+#define __ _masm->
+
+class ZSetupArguments {
+private:
+  MacroAssembler* const _masm;
+  const Register        _ref;
+  const Address         _ref_addr;
+
+public:
+  ZSetupArguments(MacroAssembler* masm, ZLoadBarrierStubC2* stub) :
+      _masm(masm),
+      _ref(stub->ref()),
+      _ref_addr(stub->ref_addr()) {
+
+    // Setup arguments
+    if (_ref_addr.base() == noreg) {
+      // No self healing
+      if (_ref != c_rarg0) {
+        __ mov(c_rarg0, _ref);
+      }
+      __ mov(c_rarg1, 0);
+    } else {
+      // Self healing
+      if (_ref == c_rarg0) {
+        // _ref is already at correct place
+        __ lea(c_rarg1, _ref_addr);
+      } else if (_ref != c_rarg1) {
+        // _ref is in wrong place, but not in c_rarg1, so fix it first
+        __ lea(c_rarg1, _ref_addr);
+        __ mov(c_rarg0, _ref);
+      } else if (_ref_addr.base() != c_rarg0 && _ref_addr.index() != c_rarg0) {
+        assert(_ref == c_rarg1, "Mov ref first, vacating c_rarg0");
+        __ mov(c_rarg0, _ref);
+        __ lea(c_rarg1, _ref_addr);
+      } else {
+        assert(_ref == c_rarg1, "Need to vacate c_rarg1 and _ref_addr is using c_rarg0");
+        if (_ref_addr.base() == c_rarg0 || _ref_addr.index() == c_rarg0) {
+          __ mov(rscratch2, c_rarg1);
+          __ lea(c_rarg1, _ref_addr);
+          __ mov(c_rarg0, rscratch2);
+        } else {
+          ShouldNotReachHere();
+        }
+      }
+    }
+  }
+
+  ~ZSetupArguments() {
+    // Transfer result
+    if (_ref != r0) {
+      __ mov(_ref, r0);
+    }
+  }
+};
+
+#undef __
+#define __ masm->
+
+void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const {
+  BLOCK_COMMENT("ZLoadBarrierStubC2");
+
+  // Stub entry
+  __ bind(*stub->entry());
+
+  {
+    ZSaveLiveRegisters save_live_registers(masm, stub);
+    ZSetupArguments setup_arguments(masm, stub);
+    __ mov(rscratch1, stub->slow_path());
+    __ blr(rscratch1);
+  }
+
+  // Stub exit
+  __ b(*stub->continuation());
 }
 
 #undef __
 
-static void barrier_stubs_init_inner(const char* label, const DecoratorSet decorators, address* stub) {
-  const int nregs = 28;              // Exclude FP, XZR, SP from calculation.
-  const int code_size = nregs * 254; // Rough estimate of code size
-
-  ResourceMark rm;
-
-  CodeBuffer buf(BufferBlob::create(label, code_size));
-  StubCodeGenerator cgen(&buf);
-
-  for (int i = 0; i < nregs; i++) {
-    const Register reg = as_Register(i);
-    stub[i] = generate_load_barrier_stub(&cgen, reg, decorators);
-  }
-}
-
-void ZBarrierSetAssembler::barrier_stubs_init() {
-  barrier_stubs_init_inner("zgc_load_barrier_stubs", ON_STRONG_OOP_REF, _load_barrier_slow_stub);
-  barrier_stubs_init_inner("zgc_load_barrier_weak_stubs", ON_WEAK_OOP_REF, _load_barrier_weak_slow_stub);
-}
-
-address ZBarrierSetAssembler::load_barrier_slow_stub(Register reg) {
-  return _load_barrier_slow_stub[reg->encoding()];
-}
-
-address ZBarrierSetAssembler::load_barrier_weak_slow_stub(Register reg) {
-  return _load_barrier_weak_slow_stub[reg->encoding()];
-}
+#endif // COMPILER2
--- a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp	Tue Oct 29 19:49:55 2019 -0700
@@ -24,6 +24,12 @@
 #ifndef CPU_AARCH64_GC_Z_ZBARRIERSETASSEMBLER_AARCH64_HPP
 #define CPU_AARCH64_GC_Z_ZBARRIERSETASSEMBLER_AARCH64_HPP
 
+#include "code/vmreg.hpp"
+#include "oops/accessDecorators.hpp"
+#ifdef COMPILER2
+#include "opto/optoreg.hpp"
+#endif // COMPILER2
+
 #ifdef COMPILER1
 class LIR_Assembler;
 class LIR_OprDesc;
@@ -32,14 +38,13 @@
 class ZLoadBarrierStubC1;
 #endif // COMPILER1
 
+#ifdef COMPILER2
+class Node;
+class ZLoadBarrierStubC2;
+#endif // COMPILER2
+
 class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase {
-private:
-  address _load_barrier_slow_stub[RegisterImpl::number_of_registers];
-  address _load_barrier_weak_slow_stub[RegisterImpl::number_of_registers];
-
 public:
-  ZBarrierSetAssembler();
-
   virtual void load_at(MacroAssembler* masm,
                        DecoratorSet decorators,
                        BasicType type,
@@ -83,10 +88,13 @@
                                              DecoratorSet decorators) const;
 #endif // COMPILER1
 
-  virtual void barrier_stubs_init();
+#ifdef COMPILER2
+  OptoReg::Name refine_register(const Node* node,
+                                OptoReg::Name opto_reg);
 
-  address load_barrier_slow_stub(Register reg);
-  address load_barrier_weak_slow_stub(Register reg);
+  void generate_c2_load_barrier_stub(MacroAssembler* masm,
+                                     ZLoadBarrierStubC2* stub) const;
+#endif // COMPILER2
 };
 
 #endif // CPU_AARCH64_GC_Z_ZBARRIERSETASSEMBLER_AARCH64_HPP
--- a/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -40,7 +40,7 @@
 //  +--------------------------------+ 0x0000014000000000 (20TB)
 //  |         Remapped View          |
 //  +--------------------------------+ 0x0000010000000000 (16TB)
-//  |     (Reserved, but unused)     |
+//  .                                .
 //  +--------------------------------+ 0x00000c0000000000 (12TB)
 //  |         Marked1 View           |
 //  +--------------------------------+ 0x0000080000000000 (8TB)
@@ -75,7 +75,7 @@
 //  +--------------------------------+ 0x0000280000000000 (40TB)
 //  |         Remapped View          |
 //  +--------------------------------+ 0x0000200000000000 (32TB)
-//  |     (Reserved, but unused)     |
+//  .                                .
 //  +--------------------------------+ 0x0000180000000000 (24TB)
 //  |         Marked1 View           |
 //  +--------------------------------+ 0x0000100000000000 (16TB)
@@ -110,7 +110,7 @@
 //  +--------------------------------+ 0x0000500000000000 (80TB)
 //  |         Remapped View          |
 //  +--------------------------------+ 0x0000400000000000 (64TB)
-//  |     (Reserved, but unused)     |
+//  .                                .
 //  +--------------------------------+ 0x0000300000000000 (48TB)
 //  |         Marked1 View           |
 //  +--------------------------------+ 0x0000200000000000 (32TB)
@@ -142,8 +142,7 @@
 size_t ZPlatformAddressOffsetBits() {
   const size_t min_address_offset_bits = 42; // 4TB
   const size_t max_address_offset_bits = 44; // 16TB
-  const size_t virtual_to_physical_ratio = 7; // 7:1
-  const size_t address_offset = ZUtils::round_up_power_of_2(MaxHeapSize * virtual_to_physical_ratio);
+  const size_t address_offset = ZUtils::round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio);
   const size_t address_offset_bits = log2_intptr(address_offset);
   return MIN2(MAX2(address_offset_bits, min_address_offset_bits), max_address_offset_bits);
 }
--- a/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.hpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.hpp	Tue Oct 29 19:49:55 2019 -0700
@@ -24,24 +24,13 @@
 #ifndef CPU_AARCH64_GC_Z_ZGLOBALS_AARCH64_HPP
 #define CPU_AARCH64_GC_Z_ZGLOBALS_AARCH64_HPP
 
-//
-// Page Allocation Tiers
-// ---------------------
-//
-//  Page Type     Page Size     Object Size Limit     Object Alignment
-//  ------------------------------------------------------------------
-//  Small         2M            <= 265K               <MinObjAlignmentInBytes>
-//  Medium        32M           <= 4M                 4K
-//  Large         X*M           > 4M                  2M
-//  ------------------------------------------------------------------
-//
 const size_t ZPlatformGranuleSizeShift      = 21; // 2MB
-const size_t ZPlatformMaxHeapSizeShift      = 46; // 16TB
+const size_t ZPlatformHeapViews             = 3;
 const size_t ZPlatformNMethodDisarmedOffset = 4;
 const size_t ZPlatformCacheLineSize         = 64;
 
-uintptr_t    ZPlatformAddressBase();
-size_t       ZPlatformAddressOffsetBits();
-size_t       ZPlatformAddressMetadataShift();
+uintptr_t ZPlatformAddressBase();
+size_t ZPlatformAddressOffsetBits();
+size_t ZPlatformAddressMetadataShift();
 
 #endif // CPU_AARCH64_GC_Z_ZGLOBALS_AARCH64_HPP
--- a/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad	Tue Oct 29 19:49:55 2019 -0700
@@ -24,155 +24,244 @@
 source_hpp %{
 
 #include "gc/z/c2/zBarrierSetC2.hpp"
+#include "gc/z/zThreadLocalData.hpp"
 
 %}
 
 source %{
 
-#include "gc/z/zBarrierSetAssembler.hpp"
+static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, bool weak) {
+  ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, weak);
+  __ ldr(tmp, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
+  __ andr(tmp, tmp, ref);
+  __ cbnz(tmp, *stub->entry());
+  __ bind(*stub->continuation());
+}
 
-static void z_load_barrier_slow_reg(MacroAssembler& _masm, Register dst, 
-                                    Register base, int index, int scale, 
-                                    int disp, bool weak) {
-  const address stub = weak ? ZBarrierSet::assembler()->load_barrier_weak_slow_stub(dst)
-                            : ZBarrierSet::assembler()->load_barrier_slow_stub(dst);
-
-  if (index == -1) {
-    if (disp != 0) {
-      __ lea(dst, Address(base, disp));
-    } else {
-       __ mov(dst, base);
-    }
-  } else {
-    Register index_reg = as_Register(index);
-    if (disp == 0) {
-      __ lea(dst, Address(base, index_reg, Address::lsl(scale)));
-    } else {
-      __ lea(dst, Address(base, disp));
-      __ lea(dst, Address(dst, index_reg, Address::lsl(scale)));
-    }
-  }
-
-  __ far_call(RuntimeAddress(stub));
+static void z_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
+  ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, false /* weak */);
+  __ b(*stub->entry());
+  __ bind(*stub->continuation());
 }
 
 %}
 
-//
-// Execute ZGC load barrier (strong) slow path
-//
-instruct loadBarrierSlowReg(iRegP dst, memory src, rFlagsReg cr,
-    vRegD_V0 v0, vRegD_V1 v1, vRegD_V2 v2, vRegD_V3 v3, vRegD_V4 v4,
-    vRegD_V5 v5, vRegD_V6 v6, vRegD_V7 v7, vRegD_V8 v8, vRegD_V9 v9,
-    vRegD_V10 v10, vRegD_V11 v11, vRegD_V12 v12, vRegD_V13 v13, vRegD_V14 v14,
-    vRegD_V15 v15, vRegD_V16 v16, vRegD_V17 v17, vRegD_V18 v18, vRegD_V19 v19,
-    vRegD_V20 v20, vRegD_V21 v21, vRegD_V22 v22, vRegD_V23 v23, vRegD_V24 v24,
-    vRegD_V25 v25, vRegD_V26 v26, vRegD_V27 v27, vRegD_V28 v28, vRegD_V29 v29,
-    vRegD_V30 v30, vRegD_V31 v31) %{
-  match(Set dst (LoadBarrierSlowReg src dst));
-  predicate(!n->as_LoadBarrierSlowReg()->is_weak());
+// Load Pointer
+instruct zLoadP(iRegPNoSp dst, memory mem, rFlagsReg cr)
+%{
+  match(Set dst (LoadP mem));
+  predicate(UseZGC && !needs_acquiring_load(n) && (n->as_Load()->barrier_data() == ZLoadBarrierStrong));
+  effect(TEMP dst, KILL cr);
 
-  effect(KILL cr,
-     KILL v0, KILL v1, KILL v2, KILL v3, KILL v4, KILL v5, KILL v6, KILL v7,
-     KILL v8, KILL v9, KILL v10, KILL v11, KILL v12, KILL v13, KILL v14,
-     KILL v15, KILL v16, KILL v17, KILL v18, KILL v19, KILL v20, KILL v21,
-     KILL v22, KILL v23, KILL v24, KILL v25, KILL v26, KILL v27, KILL v28,
-     KILL v29, KILL v30, KILL v31);
+  ins_cost(4 * INSN_COST);
 
-  format %{ "lea $dst, $src\n\t"
-            "call #ZLoadBarrierSlowPath" %}
+  format %{ "ldr  $dst, $mem" %}
 
   ins_encode %{
-    z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$base$$Register,
-                            $src$$index, $src$$scale, $src$$disp, false);
+    const Address ref_addr = mem2address($mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
+    __ ldr($dst$$Register, ref_addr);
+    if (barrier_data() != ZLoadBarrierElided) {
+      z_load_barrier(_masm, this, ref_addr, $dst$$Register, rscratch2 /* tmp */, false /* weak */);
+    }
   %}
-  ins_pipe(pipe_slow);
+
+  ins_pipe(iload_reg_mem);
 %}
 
-//
-// Execute ZGC load barrier (weak) slow path
-//
-instruct loadBarrierWeakSlowReg(iRegP dst, memory src, rFlagsReg cr,
-    vRegD_V0 v0, vRegD_V1 v1, vRegD_V2 v2, vRegD_V3 v3, vRegD_V4 v4,
-    vRegD_V5 v5, vRegD_V6 v6, vRegD_V7 v7, vRegD_V8 v8, vRegD_V9 v9,
-    vRegD_V10 v10, vRegD_V11 v11, vRegD_V12 v12, vRegD_V13 v13, vRegD_V14 v14,
-    vRegD_V15 v15, vRegD_V16 v16, vRegD_V17 v17, vRegD_V18 v18, vRegD_V19 v19,
-    vRegD_V20 v20, vRegD_V21 v21, vRegD_V22 v22, vRegD_V23 v23, vRegD_V24 v24,
-    vRegD_V25 v25, vRegD_V26 v26, vRegD_V27 v27, vRegD_V28 v28, vRegD_V29 v29,
-    vRegD_V30 v30, vRegD_V31 v31) %{
-  match(Set dst (LoadBarrierSlowReg src dst));
-  predicate(n->as_LoadBarrierSlowReg()->is_weak());
+// Load Weak Pointer
+instruct zLoadWeakP(iRegPNoSp dst, memory mem, rFlagsReg cr)
+%{
+  match(Set dst (LoadP mem));
+  predicate(UseZGC && !needs_acquiring_load(n) && (n->as_Load()->barrier_data() == ZLoadBarrierWeak));
+  effect(TEMP dst, KILL cr);
 
-  effect(KILL cr,
-     KILL v0, KILL v1, KILL v2, KILL v3, KILL v4, KILL v5, KILL v6, KILL v7,
-     KILL v8, KILL v9, KILL v10, KILL v11, KILL v12, KILL v13, KILL v14,
-     KILL v15, KILL v16, KILL v17, KILL v18, KILL v19, KILL v20, KILL v21,
-     KILL v22, KILL v23, KILL v24, KILL v25, KILL v26, KILL v27, KILL v28,
-     KILL v29, KILL v30, KILL v31);
+  ins_cost(4 * INSN_COST);
 
-  format %{ "lea $dst, $src\n\t"
-            "call #ZLoadBarrierSlowPath" %}
+  format %{ "ldr  $dst, $mem" %}
 
   ins_encode %{
-    z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$base$$Register,
-                            $src$$index, $src$$scale, $src$$disp, true);
+    const Address ref_addr = mem2address($mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
+    __ ldr($dst$$Register, ref_addr);
+    z_load_barrier(_masm, this, ref_addr, $dst$$Register, rscratch2 /* tmp */, true /* weak */);
   %}
-  ins_pipe(pipe_slow);
+
+  ins_pipe(iload_reg_mem);
 %}
 
+// Load Pointer Volatile
+instruct zLoadPVolatile(iRegPNoSp dst, indirect mem /* sync_memory */, rFlagsReg cr)
+%{
+  match(Set dst (LoadP mem));
+  predicate(UseZGC && needs_acquiring_load(n) && n->as_Load()->barrier_data() == ZLoadBarrierStrong);
+  effect(TEMP dst, KILL cr);
 
-// Specialized versions of compareAndExchangeP that adds a keepalive that is consumed
-// but doesn't affect output.
+  ins_cost(VOLATILE_REF_COST);
 
-instruct z_compareAndExchangeP(iRegPNoSp res, indirect mem,
-                               iRegP oldval, iRegP newval, iRegP keepalive,
-                               rFlagsReg cr) %{
-  match(Set res (ZCompareAndExchangeP (Binary mem keepalive) (Binary oldval newval)));
-  ins_cost(2 * VOLATILE_REF_COST);
-  effect(TEMP_DEF res, KILL cr);
-  format %{
-    "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
+  format %{ "ldar  $dst, $mem\t" %}
+
+  ins_encode %{
+    __ ldar($dst$$Register, $mem$$Register);
+    if (barrier_data() != ZLoadBarrierElided) {
+      z_load_barrier(_masm, this, Address($mem$$Register), $dst$$Register, rscratch2 /* tmp */, false /* weak */);
+    }
   %}
-  ins_encode %{
-    __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
-               Assembler::xword, /*acquire*/ false, /*release*/ true,
-               /*weak*/ false, $res$$Register);
-  %}
-  ins_pipe(pipe_slow);
+
+  ins_pipe(pipe_serial);
 %}
 
-instruct z_compareAndSwapP(iRegINoSp res,
-                           indirect mem,
-                           iRegP oldval, iRegP newval, iRegP keepalive,
-                            rFlagsReg cr) %{
-
-  match(Set res (ZCompareAndSwapP (Binary mem keepalive) (Binary oldval newval)));
-  match(Set res (ZWeakCompareAndSwapP (Binary mem keepalive) (Binary oldval newval)));
+instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
+  match(Set res (CompareAndSwapP mem (Binary oldval newval)));
+  match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
+  predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
+  effect(KILL cr, TEMP_DEF res);
 
   ins_cost(2 * VOLATILE_REF_COST);
 
-  effect(KILL cr);
+  format %{ "cmpxchg $mem, $oldval, $newval\n\t"
+            "cset    $res, EQ" %}
 
- format %{
-    "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
-    "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
- %}
-
- ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
-            aarch64_enc_cset_eq(res));
+  ins_encode %{
+    guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
+    __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
+               false /* acquire */, true /* release */, false /* weak */, rscratch2);
+    __ cset($res$$Register, Assembler::EQ);
+    if (barrier_data() != ZLoadBarrierElided) {
+      Label good;
+      __ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
+      __ andr(rscratch1, rscratch1, rscratch2);
+      __ cbz(rscratch1, good);
+      z_load_barrier_slow_path(_masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */);
+      __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
+                 false /* acquire */, true /* release */, false /* weak */, rscratch2);
+      __ cset($res$$Register, Assembler::EQ);
+      __ bind(good);
+    }
+  %}
 
   ins_pipe(pipe_slow);
 %}
 
-
-instruct z_get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev,
-                        iRegP keepalive) %{
-  match(Set prev (ZGetAndSetP mem (Binary newv keepalive)));
+instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
+  match(Set res (CompareAndSwapP mem (Binary oldval newval)));
+  match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
+  predicate(UseZGC && needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong));
+  effect(KILL cr, TEMP_DEF res);
 
   ins_cost(2 * VOLATILE_REF_COST);
+
+ format %{ "cmpxchg $mem, $oldval, $newval\n\t"
+           "cset    $res, EQ" %}
+
+  ins_encode %{
+    guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
+    __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
+               true /* acquire */, true /* release */, false /* weak */, rscratch2);
+    __ cset($res$$Register, Assembler::EQ);
+    if (barrier_data() != ZLoadBarrierElided) {
+      Label good;
+      __ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
+      __ andr(rscratch1, rscratch1, rscratch2);
+      __ cbz(rscratch1, good);
+      z_load_barrier_slow_path(_masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */ );
+      __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
+                 true /* acquire */, true /* release */, false /* weak */, rscratch2);
+      __ cset($res$$Register, Assembler::EQ);
+      __ bind(good);
+    }
+  %}
+
+  ins_pipe(pipe_slow);
+%}
+
+instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
+  match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
+  predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
+  effect(TEMP_DEF res, KILL cr);
+
+  ins_cost(2 * VOLATILE_REF_COST);
+
+  format %{ "cmpxchg $res = $mem, $oldval, $newval" %}
+
+  ins_encode %{
+    guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
+    __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
+               false /* acquire */, true /* release */, false /* weak */, $res$$Register);
+    if (barrier_data() != ZLoadBarrierElided) {
+      Label good;
+      __ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
+      __ andr(rscratch1, rscratch1, $res$$Register);
+      __ cbz(rscratch1, good);
+      z_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */);
+      __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
+                 false /* acquire */, true /* release */, false /* weak */, $res$$Register);
+      __ bind(good);
+    }
+  %}
+
+  ins_pipe(pipe_slow);
+%}
+
+instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
+  match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
+  predicate(UseZGC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
+  effect(TEMP_DEF res, KILL cr);
+
+  ins_cost(2 * VOLATILE_REF_COST);
+
+  format %{ "cmpxchg $res = $mem, $oldval, $newval" %}
+
+  ins_encode %{
+    guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
+    __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
+               true /* acquire */, true /* release */, false /* weak */, $res$$Register);
+    if (barrier_data() != ZLoadBarrierElided) {
+      Label good;
+      __ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
+      __ andr(rscratch1, rscratch1, $res$$Register);
+      __ cbz(rscratch1, good);
+      z_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */);
+      __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
+                 true /* acquire */, true /* release */, false /* weak */, $res$$Register);
+      __ bind(good);
+    }
+  %}
+
+  ins_pipe(pipe_slow);
+%}
+
+instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
+  match(Set prev (GetAndSetP mem newv));
+  predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
+  effect(TEMP_DEF prev, KILL cr);
+
+  ins_cost(2 * VOLATILE_REF_COST);
+
   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
+
   ins_encode %{
-    __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
+    __ atomic_xchg($prev$$Register, $newv$$Register, $mem$$Register);
+    if (barrier_data() != ZLoadBarrierElided) {
+      z_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, false /* weak */);
+    }
+  %}
+
+  ins_pipe(pipe_serial);
+%}
+
+instruct zGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
+  match(Set prev (GetAndSetP mem newv));
+  predicate(UseZGC && needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong));
+  effect(TEMP_DEF prev, KILL cr);
+
+  ins_cost(VOLATILE_REF_COST);
+
+  format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
+
+  ins_encode %{
+    __ atomic_xchgal($prev$$Register, $newv$$Register, $mem$$Register);
+    if (barrier_data() != ZLoadBarrierElided) {
+      z_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, false /* weak */);
+    }
   %}
   ins_pipe(pipe_serial);
 %}
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -2132,6 +2132,65 @@
 
   return count;
 }
+
+// Push lots of registers in the bit set supplied.  Don't push sp.
+// Return the number of words pushed
+int MacroAssembler::push_fp(unsigned int bitset, Register stack) {
+  int words_pushed = 0;
+
+  // Scan bitset to accumulate register pairs
+  unsigned char regs[32];
+  int count = 0;
+  for (int reg = 0; reg <= 31; reg++) {
+    if (1 & bitset)
+      regs[count++] = reg;
+    bitset >>= 1;
+  }
+  regs[count++] = zr->encoding_nocheck();
+  count &= ~1;  // Only push an even number of regs
+
+  // Always pushing full 128 bit registers.
+  if (count) {
+    stpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -count * wordSize * 2)));
+    words_pushed += 2;
+  }
+  for (int i = 2; i < count; i += 2) {
+    stpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2));
+    words_pushed += 2;
+  }
+
+  assert(words_pushed == count, "oops, pushed != count");
+  return count;
+}
+
+int MacroAssembler::pop_fp(unsigned int bitset, Register stack) {
+  int words_pushed = 0;
+
+  // Scan bitset to accumulate register pairs
+  unsigned char regs[32];
+  int count = 0;
+  for (int reg = 0; reg <= 31; reg++) {
+    if (1 & bitset)
+      regs[count++] = reg;
+    bitset >>= 1;
+  }
+  regs[count++] = zr->encoding_nocheck();
+  count &= ~1;
+
+  for (int i = 2; i < count; i += 2) {
+    ldpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2));
+    words_pushed += 2;
+  }
+  if (count) {
+    ldpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, count * wordSize * 2)));
+    words_pushed += 2;
+  }
+
+  assert(words_pushed == count, "oops, pushed != count");
+
+  return count;
+}
+
 #ifdef ASSERT
 void MacroAssembler::verify_heapbase(const char* msg) {
 #if 0
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp	Tue Oct 29 19:49:55 2019 -0700
@@ -442,12 +442,18 @@
   int push(unsigned int bitset, Register stack);
   int pop(unsigned int bitset, Register stack);
 
+  int push_fp(unsigned int bitset, Register stack);
+  int pop_fp(unsigned int bitset, Register stack);
+
   void mov(Register dst, Address a);
 
 public:
   void push(RegSet regs, Register stack) { if (regs.bits()) push(regs.bits(), stack); }
   void pop(RegSet regs, Register stack) { if (regs.bits()) pop(regs.bits(), stack); }
 
+  void push_fp(RegSet regs, Register stack) { if (regs.bits()) push_fp(regs.bits(), stack); }
+  void pop_fp(RegSet regs, Register stack) { if (regs.bits()) pop_fp(regs.bits(), stack); }
+
   // Push and pop everything that might be clobbered by a native
   // runtime call except rscratch1 and rscratch2.  (They are always
   // scratch, so we don't have to protect them.)  Only save the lower
--- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -332,9 +332,14 @@
 
   // We use jump to self as the unresolved address which the inline
   // cache code (and relocs) know about
+  // As a special case we also use sequence movptr(r,0); br(r);
+  // i.e. jump to 0 when we need leave space for a wide immediate
+  // load
 
-  // return -1 if jump to self
-  dest = (dest == (address) this) ? (address) -1 : dest;
+  // return -1 if jump to self or to 0
+  if ((dest == (address)this) || dest == 0) {
+    dest = (address) -1;
+  }
   return dest;
 }
 
@@ -356,9 +361,13 @@
 
   // We use jump to self as the unresolved address which the inline
   // cache code (and relocs) know about
+  // As a special case we also use jump to 0 when first generating
+  // a general jump
 
-  // return -1 if jump to self
-  dest = (dest == (address) this) ? (address) -1 : dest;
+  // return -1 if jump to self or to 0
+  if ((dest == (address)this) || dest == 0) {
+    dest = (address) -1;
+  }
   return dest;
 }
 
--- a/src/hotspot/cpu/aarch64/register_aarch64.hpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/aarch64/register_aarch64.hpp	Tue Oct 29 19:49:55 2019 -0700
@@ -230,6 +230,11 @@
     return *this;
   }
 
+  RegSet &operator-=(const RegSet aSet) {
+    *this = *this - aSet;
+    return *this;
+  }
+
   static RegSet of(Register r1) {
     return RegSet(r1);
   }
--- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -34,6 +34,7 @@
 #include "logging/log.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/compiledICHolder.hpp"
+#include "oops/klass.inline.hpp"
 #include "runtime/safepointMechanism.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/vframeArray.hpp"
--- a/src/hotspot/cpu/arm/abstractInterpreter_arm.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/arm/abstractInterpreter_arm.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -27,6 +27,7 @@
 #include "interpreter/bytecode.hpp"
 #include "interpreter/interpreter.hpp"
 #include "oops/constMethod.hpp"
+#include "oops/klass.inline.hpp"
 #include "oops/method.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/handles.inline.hpp"
--- a/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -774,7 +774,7 @@
   bool is_oop = type == T_OBJECT || type == T_ARRAY;
   LIR_Opr result = new_register(type);
   value.load_item();
-  assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type");
+  assert(type == T_INT || is_oop || (type == T_LONG && VM_Version::supports_ldrexd()), "unexpected type");
   LIR_Opr tmp = (UseCompressedOops && is_oop) ? new_pointer_register() : LIR_OprFact::illegalOpr;
   __ xchg(addr, value.result(), result, tmp);
   return result;
@@ -783,7 +783,7 @@
 LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
   LIR_Opr result = new_register(type);
   value.load_item();
-  assert(type == T_INT LP64_ONLY( || type == T_LONG), "unexpected type");
+  assert(type == T_INT || (type == T_LONG && VM_Version::supports_ldrexd ()), "unexpected type");
   LIR_Opr tmp = new_register(type);
   __ xadd(addr, value.result(), result, tmp);
   return result;
--- a/src/hotspot/cpu/arm/compiledIC_arm.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/arm/compiledIC_arm.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -115,16 +115,7 @@
   // Creation also verifies the object.
   NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
   NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());
-
-#ifdef ASSERT
-  // read the value once
-  volatile intptr_t data = method_holder->data();
-  volatile address destination = jump->jump_destination();
-  assert(data == 0 || data == (intptr_t)callee(),
-         "a) MT-unsafe modification of inline cache");
-  assert(destination == (address)-1 || destination == entry,
-         "b) MT-unsafe modification of inline cache");
-#endif
+  verify_mt_safe(callee, entry, method_holder, jump);
 
   // Update stub.
   method_holder->set_data((intptr_t)callee());
--- a/src/hotspot/cpu/arm/sharedRuntime_arm.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/arm/sharedRuntime_arm.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -32,6 +32,7 @@
 #include "logging/log.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/compiledICHolder.hpp"
+#include "oops/klass.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/vframeArray.hpp"
 #include "utilities/align.hpp"
--- a/src/hotspot/cpu/ppc/abstractInterpreter_ppc.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/ppc/abstractInterpreter_ppc.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -26,6 +26,7 @@
 #include "precompiled.hpp"
 #include "interpreter/interpreter.hpp"
 #include "oops/constMethod.hpp"
+#include "oops/klass.inline.hpp"
 #include "oops/method.hpp"
 #include "runtime/frame.inline.hpp"
 #include "utilities/debug.hpp"
--- a/src/hotspot/cpu/ppc/assembler_ppc.hpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/ppc/assembler_ppc.hpp	Tue Oct 29 19:49:55 2019 -0700
@@ -444,6 +444,9 @@
     FDIV_OPCODE   = (63u << OPCODE_SHIFT |  18u << 1),
     FDIVS_OPCODE  = (59u << OPCODE_SHIFT |  18u << 1),
     FMR_OPCODE    = (63u << OPCODE_SHIFT |  72u << 1),
+    FRIN_OPCODE   = (63u << OPCODE_SHIFT | 392u << 1),
+    FRIP_OPCODE   = (63u << OPCODE_SHIFT | 456u << 1),
+    FRIM_OPCODE   = (63u << OPCODE_SHIFT | 488u << 1),
     // These are special Power6 opcodes, reused for "lfdepx" and "stfdepx"
     // on Power7.  Do not use.
     // MFFGPR_OPCODE  = (31u << OPCODE_SHIFT | 607u << 1),
@@ -545,6 +548,9 @@
     XVMSUBADP_OPCODE=(60u << OPCODE_SHIFT |  113u << 3),
     XVNMSUBASP_OPCODE=(60u<< OPCODE_SHIFT |  209u << 3),
     XVNMSUBADP_OPCODE=(60u<< OPCODE_SHIFT |  241u << 3),
+    XVRDPI_OPCODE  = (60u << OPCODE_SHIFT |  201u << 2),
+    XVRDPIM_OPCODE = (60u << OPCODE_SHIFT |  249u << 2),
+    XVRDPIP_OPCODE = (60u << OPCODE_SHIFT |  233u << 2),
 
     // Deliver A Random Number (introduced with POWER9)
     DARN_OPCODE    = (31u << OPCODE_SHIFT |  755u << 1),
@@ -1981,6 +1987,10 @@
   inline void fmr(  FloatRegister d, FloatRegister b);
   inline void fmr_( FloatRegister d, FloatRegister b);
 
+  inline void frin( FloatRegister d, FloatRegister b);
+  inline void frip( FloatRegister d, FloatRegister b);
+  inline void frim( FloatRegister d, FloatRegister b);
+
   //  inline void mffgpr( FloatRegister d, Register b);
   //  inline void mftgpr( Register d, FloatRegister b);
   inline void cmpb(   Register a, Register s, Register b);
@@ -2241,6 +2251,9 @@
   inline void xvmsubadp(VectorSRegister d, VectorSRegister a, VectorSRegister b);
   inline void xvnmsubasp(VectorSRegister d, VectorSRegister a, VectorSRegister b);
   inline void xvnmsubadp(VectorSRegister d, VectorSRegister a, VectorSRegister b);
+  inline void xvrdpi(   VectorSRegister d, VectorSRegister b);
+  inline void xvrdpim(  VectorSRegister d, VectorSRegister b);
+  inline void xvrdpip(  VectorSRegister d, VectorSRegister b);
 
   // VSX Extended Mnemonics
   inline void xxspltd(  VectorSRegister d, VectorSRegister a, int x);
--- a/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp	Tue Oct 29 19:49:55 2019 -0700
@@ -675,6 +675,10 @@
 inline void Assembler::fmr( FloatRegister d, FloatRegister b) { emit_int32( FMR_OPCODE | frt(d) | frb(b) | rc(0)); }
 inline void Assembler::fmr_(FloatRegister d, FloatRegister b) { emit_int32( FMR_OPCODE | frt(d) | frb(b) | rc(1)); }
 
+inline void Assembler::frin( FloatRegister d, FloatRegister b) { emit_int32( FRIN_OPCODE | frt(d) | frb(b) | rc(0)); }
+inline void Assembler::frip( FloatRegister d, FloatRegister b) { emit_int32( FRIP_OPCODE | frt(d) | frb(b) | rc(0)); }
+inline void Assembler::frim( FloatRegister d, FloatRegister b) { emit_int32( FRIM_OPCODE | frt(d) | frb(b) | rc(0)); }
+
 // These are special Power6 opcodes, reused for "lfdepx" and "stfdepx"
 // on Power7.  Do not use.
 //inline void Assembler::mffgpr( FloatRegister d, Register b)   { emit_int32( MFFGPR_OPCODE | frt(d) | rb(b) | rc(0)); }
@@ -796,6 +800,10 @@
 inline void Assembler::xvmsubadp( VectorSRegister d, VectorSRegister a, VectorSRegister b) { emit_int32( XVMSUBADP_OPCODE  | vsrt(d) | vsra(a) | vsrb(b)); }
 inline void Assembler::xvnmsubasp(VectorSRegister d, VectorSRegister a, VectorSRegister b) { emit_int32( XVNMSUBASP_OPCODE | vsrt(d) | vsra(a) | vsrb(b)); }
 inline void Assembler::xvnmsubadp(VectorSRegister d, VectorSRegister a, VectorSRegister b) { emit_int32( XVNMSUBADP_OPCODE | vsrt(d) | vsra(a) | vsrb(b)); }
+inline void Assembler::xvrdpi(    VectorSRegister d, VectorSRegister b)                  { emit_int32( XVRDPI_OPCODE  | vsrt(d) | vsrb(b)); }
+inline void Assembler::xvrdpim(   VectorSRegister d, VectorSRegister b)                  { emit_int32( XVRDPIM_OPCODE | vsrt(d) | vsrb(b)); }
+inline void Assembler::xvrdpip(   VectorSRegister d, VectorSRegister b)                  { emit_int32( XVRDPIP_OPCODE | vsrt(d) | vsrb(b)); }
+
 inline void Assembler::mtvrd(   VectorRegister d, Register a)               { emit_int32( MTVSRD_OPCODE  | vsrt(d->to_vsr()) | ra(a)); }
 inline void Assembler::mfvrd(   Register        a, VectorRegister d)         { emit_int32( MFVSRD_OPCODE  | vsrt(d->to_vsr()) | ra(a)); }
 inline void Assembler::mtvrwz(  VectorRegister  d, Register a)               { emit_int32( MTVSRWZ_OPCODE | vsrt(d->to_vsr()) | ra(a)); }
--- a/src/hotspot/cpu/ppc/compiledIC_ppc.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/ppc/compiledIC_ppc.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -178,15 +178,7 @@
   NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + IC_pos_in_java_to_interp_stub);
   NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());
 
-#ifdef ASSERT
-  // read the value once
-  volatile intptr_t data = method_holder->data();
-  volatile address destination = jump->jump_destination();
-  assert(data == 0 || data == (intptr_t)callee(),
-         "a) MT-unsafe modification of inline cache");
-  assert(destination == (address)-1 || destination == entry,
-         "b) MT-unsafe modification of inline cache");
-#endif
+  verify_mt_safe(callee, entry, method_holder, jump);
 
   // Update stub.
   method_holder->set_data((intptr_t)callee());
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -32,6 +32,7 @@
 #include "interpreter/interpreter.hpp"
 #include "memory/resourceArea.hpp"
 #include "nativeInst_ppc.hpp"
+#include "oops/klass.inline.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/biasedLocking.hpp"
 #include "runtime/icache.hpp"
--- a/src/hotspot/cpu/ppc/ppc.ad	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/ppc/ppc.ad	Tue Oct 29 19:49:55 2019 -0700
@@ -972,6 +972,8 @@
   // To keep related declarations/definitions/uses close together,
   // we switch between source %{ }% and source_hpp %{ }% freely as needed.
 
+#include "opto/convertnode.hpp"
+
   // Returns true if Node n is followed by a MemBar node that
   // will do an acquire. If so, this node must not do the acquire
   // operation.
@@ -2272,6 +2274,7 @@
   case Op_AddVL:
   case Op_SubVL:
   case Op_MulVI:
+  case Op_RoundDoubleModeV:
     return SuperwordUseVSX;
   case Op_PopCountVI:
     return (SuperwordUseVSX && UsePopCountInstruction);
@@ -14454,6 +14457,53 @@
   ins_pipe(pipe_class_default);
 %}
 
+// Round Instructions
+instruct roundD_reg(regD dst, regD src, immI8 rmode) %{
+  match(Set dst (RoundDoubleMode src rmode));
+  format %{ "RoundDoubleMode $src,$rmode" %}
+  size(4);
+  ins_encode %{
+    switch ($rmode$$constant) {
+      case RoundDoubleModeNode::rmode_rint:
+        __ frin($dst$$FloatRegister, $src$$FloatRegister);
+        break;
+      case RoundDoubleModeNode::rmode_floor:
+        __ frim($dst$$FloatRegister, $src$$FloatRegister);
+        break;
+      case RoundDoubleModeNode::rmode_ceil:
+        __ frip($dst$$FloatRegister, $src$$FloatRegister);
+        break;
+      default:
+        ShouldNotReachHere();
+    }
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Vector Round Instructions
+instruct vround2D_reg(vecX dst, vecX src, immI8 rmode) %{
+  match(Set dst (RoundDoubleModeV src rmode));
+  predicate(n->as_Vector()->length() == 2);
+  format %{ "RoundDoubleModeV $src,$rmode" %}
+  size(4);
+  ins_encode %{
+    switch ($rmode$$constant) {
+      case RoundDoubleModeNode::rmode_rint:
+        __ xvrdpi($dst$$VectorSRegister, $src$$VectorSRegister);
+        break;
+      case RoundDoubleModeNode::rmode_floor:
+        __ xvrdpim($dst$$VectorSRegister, $src$$VectorSRegister);
+        break;
+      case RoundDoubleModeNode::rmode_ceil:
+        __ xvrdpip($dst$$VectorSRegister, $src$$VectorSRegister);
+        break;
+      default:
+        ShouldNotReachHere();
+    }
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 // Vector Negate Instructions
 
 instruct vneg4F_reg(vecX dst, vecX src) %{
--- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -34,6 +34,7 @@
 #include "interpreter/interp_masm.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/compiledICHolder.hpp"
+#include "oops/klass.inline.hpp"
 #include "runtime/safepointMechanism.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/vframeArray.hpp"
--- a/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -592,10 +592,10 @@
   __ load_const_optimized(R4_ARG2, (address) name, R11_scratch1);
   if (pass_oop) {
     __ mr(R5_ARG3, Rexception);
-    __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), false);
+    __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception));
   } else {
     __ load_const_optimized(R5_ARG3, (address) message, R11_scratch1);
-    __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), false);
+    __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception));
   }
 
   // Throw exception.
@@ -2105,7 +2105,7 @@
     // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
     // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
     __ ld(R4_ARG2, 0, R18_locals);
-    __ MacroAssembler::call_VM(R4_ARG2, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R4_ARG2, R19_method, R14_bcp, false);
+    __ call_VM(R4_ARG2, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R4_ARG2, R19_method, R14_bcp);
     __ restore_interpreter_state(R11_scratch1, /*bcp_and_mdx_only*/ true);
     __ cmpdi(CCR0, R4_ARG2, 0);
     __ beq(CCR0, L_done);
--- a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -32,6 +32,7 @@
 #include "interpreter/templateInterpreter.hpp"
 #include "interpreter/templateTable.hpp"
 #include "memory/universe.hpp"
+#include "oops/klass.inline.hpp"
 #include "oops/objArrayKlass.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/methodHandles.hpp"
--- a/src/hotspot/cpu/ppc/vtableStubs_ppc_64.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/ppc/vtableStubs_ppc_64.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -30,6 +30,7 @@
 #include "memory/resourceArea.hpp"
 #include "oops/compiledICHolder.hpp"
 #include "oops/instanceKlass.hpp"
+#include "oops/klass.inline.hpp"
 #include "oops/klassVtable.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "vmreg_ppc.inline.hpp"
--- a/src/hotspot/cpu/s390/abstractInterpreter_s390.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/s390/abstractInterpreter_s390.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -26,6 +26,7 @@
 #include "precompiled.hpp"
 #include "interpreter/interpreter.hpp"
 #include "oops/constMethod.hpp"
+#include "oops/klass.inline.hpp"
 #include "oops/method.hpp"
 #include "runtime/frame.inline.hpp"
 #include "utilities/debug.hpp"
--- a/src/hotspot/cpu/s390/compiledIC_s390.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/s390/compiledIC_s390.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2016 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -104,19 +104,7 @@
   // Creation also verifies the object.
   NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + NativeCall::get_IC_pos_in_java_to_interp_stub());
   NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());
-
-#ifdef ASSERT
-  // A generated lambda form might be deleted from the Lambdaform
-  // cache in MethodTypeForm.  If a jit compiled lambdaform method
-  // becomes not entrant and the cache access returns null, the new
-  // resolve will lead to a new generated LambdaForm.
-  volatile intptr_t data = method_holder->data();
-  volatile address destination = jump->jump_destination();
-  assert(data == 0 || data == (intptr_t)callee() || callee->is_compiled_lambda_form(),
-         "a) MT-unsafe modification of inline cache");
-  assert(destination == (address)-1 || destination == entry,
-         "b) MT-unsafe modification of inline cache");
-#endif
+  verify_mt_safe(callee, entry, method_holder, jump);
 
   // Update stub.
   method_holder->set_data((intptr_t)callee(), relocInfo::metadata_type);
--- a/src/hotspot/cpu/s390/interp_masm_s390.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/s390/interp_masm_s390.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -1072,8 +1072,7 @@
 void InterpreterMacroAssembler::unlock_object(Register monitor, Register object) {
 
   if (UseHeavyMonitors) {
-    call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit),
-            monitor, /*check_for_exceptions=*/ true);
+    call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), monitor);
     return;
   }
 
@@ -1147,8 +1146,7 @@
   // The lock has been converted into a heavy lock and hence
   // we need to get into the slow case.
   z_stg(object, obj_entry);   // Restore object entry, has been cleared above.
-  call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit),
-          monitor,  /*check_for_exceptions=*/false);
+  call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), monitor);
 
   // }
 
@@ -2095,7 +2093,7 @@
     Label jvmti_post_done;
     MacroAssembler::load_and_test_int(Z_R0, Address(Z_thread, JavaThread::interp_only_mode_offset()));
     z_bre(jvmti_post_done);
-    call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry), /*check_exceptions=*/false);
+    call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry));
     bind(jvmti_post_done);
   }
 }
@@ -2129,7 +2127,7 @@
     MacroAssembler::load_and_test_int(Z_R0, Address(Z_thread, JavaThread::interp_only_mode_offset()));
     z_bre(jvmti_post_done);
     if (!native_method) push(state); // see frame::interpreter_frame_result()
-    call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit), /*check_exceptions=*/false);
+    call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
     if (!native_method) pop(state);
     bind(jvmti_post_done);
   }
--- a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -33,6 +33,7 @@
 #include "interpreter/interp_masm.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/compiledICHolder.hpp"
+#include "oops/klass.inline.hpp"
 #include "registerSaver_s390.hpp"
 #include "runtime/safepointMechanism.hpp"
 #include "runtime/sharedRuntime.hpp"
--- a/src/hotspot/cpu/s390/vtableStubs_s390.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/s390/vtableStubs_s390.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -30,6 +30,7 @@
 #include "memory/resourceArea.hpp"
 #include "oops/compiledICHolder.hpp"
 #include "oops/instanceKlass.hpp"
+#include "oops/klass.inline.hpp"
 #include "oops/klassVtable.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "vmreg_s390.inline.hpp"
--- a/src/hotspot/cpu/sparc/abstractInterpreter_sparc.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/sparc/abstractInterpreter_sparc.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "interpreter/interpreter.hpp"
 #include "oops/constMethod.hpp"
+#include "oops/klass.inline.hpp"
 #include "oops/method.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/frame.inline.hpp"
--- a/src/hotspot/cpu/sparc/compiledIC_sparc.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/sparc/compiledIC_sparc.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -104,16 +104,7 @@
   // Creation also verifies the object.
   NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
   NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());
-
-#ifdef ASSERT
-  // read the value once
-  volatile intptr_t data = method_holder->data();
-  volatile address destination = jump->jump_destination();
-  assert(data == 0 || data == (intptr_t)callee(),
-         "a) MT-unsafe modification of inline cache");
-  assert(destination == (address)-1 || destination == entry,
-         "b) MT-unsafe modification of inline cache");
-#endif
+  verify_mt_safe(callee, entry, method_holder, jump);
 
   // Update stub.
   method_holder->set_data((intptr_t)callee());
--- a/src/hotspot/cpu/sparc/sharedRuntime_sparc.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/sparc/sharedRuntime_sparc.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -32,6 +32,7 @@
 #include "logging/log.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/compiledICHolder.hpp"
+#include "oops/klass.inline.hpp"
 #include "runtime/safepointMechanism.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/vframeArray.hpp"
--- a/src/hotspot/cpu/x86/abstractInterpreter_x86.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/x86/abstractInterpreter_x86.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "ci/ciMethod.hpp"
 #include "interpreter/interpreter.hpp"
+#include "oops/klass.inline.hpp"
 #include "runtime/frame.inline.hpp"
 
 
--- a/src/hotspot/cpu/x86/compiledIC_x86.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/x86/compiledIC_x86.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -157,16 +157,7 @@
   // Creation also verifies the object.
   NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
   NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());
-
-#ifdef ASSERT
-  Method* old_method = reinterpret_cast<Method*>(method_holder->data());
-  address destination = jump->jump_destination();
-  assert(old_method == NULL || old_method == callee() ||
-         !old_method->method_holder()->is_loader_alive(),
-         "a) MT-unsafe modification of inline cache");
-  assert(destination == (address)-1 || destination == entry,
-         "b) MT-unsafe modification of inline cache");
-#endif
+  verify_mt_safe(callee, entry, method_holder, jump);
 
   // Update stub.
   method_holder->set_data((intptr_t)callee());
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -247,54 +247,6 @@
   __ bind(done);
 }
 
-void ShenandoahBarrierSetAssembler::resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp) {
-  assert(ShenandoahCASBarrier, "should be enabled");
-  Label is_null;
-  __ testptr(dst, dst);
-  __ jcc(Assembler::zero, is_null);
-  resolve_forward_pointer_not_null(masm, dst, tmp);
-  __ bind(is_null);
-}
-
-void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp) {
-  assert(ShenandoahCASBarrier || ShenandoahLoadRefBarrier, "should be enabled");
-  // The below loads the mark word, checks if the lowest two bits are
-  // set, and if so, clear the lowest two bits and copy the result
-  // to dst. Otherwise it leaves dst alone.
-  // Implementing this is surprisingly awkward. I do it here by:
-  // - Inverting the mark word
-  // - Test lowest two bits == 0
-  // - If so, set the lowest two bits
-  // - Invert the result back, and copy to dst
-
-  bool borrow_reg = (tmp == noreg);
-  if (borrow_reg) {
-    // No free registers available. Make one useful.
-    tmp = LP64_ONLY(rscratch1) NOT_LP64(rdx);
-    if (tmp == dst) {
-      tmp = LP64_ONLY(rscratch2) NOT_LP64(rcx);
-    }
-    __ push(tmp);
-  }
-
-  assert_different_registers(dst, tmp);
-
-  Label done;
-  __ movptr(tmp, Address(dst, oopDesc::mark_offset_in_bytes()));
-  __ notptr(tmp);
-  __ testb(tmp, markWord::marked_value);
-  __ jccb(Assembler::notZero, done);
-  __ orptr(tmp, markWord::marked_value);
-  __ notptr(tmp);
-  __ mov(dst, tmp);
-  __ bind(done);
-
-  if (borrow_reg) {
-    __ pop(tmp);
-  }
-}
-
-
 void ShenandoahBarrierSetAssembler::load_reference_barrier_not_null(MacroAssembler* masm, Register dst) {
   assert(ShenandoahLoadRefBarrier, "Should be enabled");
 
@@ -333,7 +285,7 @@
 #endif
 }
 
-void ShenandoahBarrierSetAssembler::load_reference_barrier_native(MacroAssembler* masm, Register dst) {
+void ShenandoahBarrierSetAssembler::load_reference_barrier_native(MacroAssembler* masm, Register dst, Address src) {
   if (!ShenandoahLoadRefBarrier) {
     return;
   }
@@ -341,6 +293,7 @@
   Label done;
   Label not_null;
   Label slow_path;
+  __ block_comment("load_reference_barrier_native { ");
 
   // null check
   __ testptr(dst, dst);
@@ -371,7 +324,7 @@
   __ bind(slow_path);
 
   if (dst != rax) {
-    __ xchgptr(dst, rax); // Move obj into rax and save rax into obj.
+    __ push(rax);
   }
   __ push(rcx);
   __ push(rdx);
@@ -388,8 +341,9 @@
   __ push(r15);
 #endif
 
-  __ movptr(rdi, rax);
-  __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_native), rdi);
+  assert_different_registers(dst, rsi);
+  __ lea(rsi, src);
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_native), dst, rsi);
 
 #ifdef _LP64
   __ pop(r15);
@@ -407,10 +361,12 @@
   __ pop(rcx);
 
   if (dst != rax) {
-    __ xchgptr(rax, dst); // Swap back obj with rax.
+    __ movptr(dst, rax);
+    __ pop(rax);
   }
 
   __ bind(done);
+  __ block_comment("load_reference_barrier_native { ");
 }
 
 void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) {
@@ -474,14 +430,43 @@
   bool is_traversal_mode = ShenandoahHeap::heap()->is_traversal_mode();
   bool keep_alive = ((decorators & AS_NO_KEEPALIVE) == 0) || is_traversal_mode;
 
+  Register result_dst = dst;
+  bool use_tmp1_for_dst = false;
+
+  if (on_oop) {
+    // We want to preserve src
+    if (dst == src.base() || dst == src.index()) {
+      // Use tmp1 for dst if possible, as it is not used in BarrierAssembler::load_at()
+      if (tmp1->is_valid() && tmp1 != src.base() && tmp1 != src.index()) {
+        dst = tmp1;
+        use_tmp1_for_dst = true;
+      } else {
+        dst = rdi;
+        __ push(dst);
+      }
+    }
+    assert_different_registers(dst, src.base(), src.index());
+  }
+
   BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
+
   if (on_oop) {
     if (not_in_heap && !is_traversal_mode) {
-      load_reference_barrier_native(masm, dst);
+      load_reference_barrier_native(masm, dst, src);
     } else {
       load_reference_barrier(masm, dst);
     }
 
+    if (dst != result_dst) {
+      __ movptr(result_dst, dst);
+
+      if (!use_tmp1_for_dst) {
+        __ pop(dst);
+      }
+
+      dst = result_dst;
+    }
+
     if (ShenandoahKeepAliveBarrier && on_reference && keep_alive) {
       const Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread);
       assert_different_registers(dst, tmp1, tmp_thread);
@@ -572,8 +557,9 @@
                                                 bool exchange, Register tmp1, Register tmp2) {
   assert(ShenandoahCASBarrier, "Should only be used when CAS barrier is enabled");
   assert(oldval == rax, "must be in rax for implicit use in cmpxchg");
+  assert_different_registers(oldval, newval, tmp1, tmp2);
 
-  Label retry, done;
+  Label L_success, L_failure;
 
   // Remember oldval for retry logic below
 #ifdef _LP64
@@ -585,8 +571,10 @@
     __ movptr(tmp1, oldval);
   }
 
-  // Step 1. Try to CAS with given arguments. If successful, then we are done,
-  // and can safely return.
+  // Step 1. Fast-path.
+  //
+  // Try to CAS with given arguments. If successful, then we are done.
+
   if (os::is_MP()) __ lock();
 #ifdef _LP64
   if (UseCompressedOops) {
@@ -596,21 +584,32 @@
   {
     __ cmpxchgptr(newval, addr);
   }
-  __ jcc(Assembler::equal, done, true);
+  __ jcc(Assembler::equal, L_success);
 
   // Step 2. CAS had failed. This may be a false negative.
   //
   // The trouble comes when we compare the to-space pointer with the from-space
-  // pointer to the same object. To resolve this, it will suffice to resolve both
-  // oldval and the value from memory -- this will give both to-space pointers.
+  // pointer to the same object. To resolve this, it will suffice to resolve
+  // the value from memory -- this will give both to-space pointers.
   // If they mismatch, then it was a legitimate failure.
   //
+  // Before reaching to resolve sequence, see if we can avoid the whole shebang
+  // with filters.
+
+  // Filter: when offending in-memory value is NULL, the failure is definitely legitimate
+  __ testptr(oldval, oldval);
+  __ jcc(Assembler::zero, L_failure);
+
+  // Filter: when heap is stable, the failure is definitely legitimate
 #ifdef _LP64
-  if (UseCompressedOops) {
-    __ decode_heap_oop(tmp1);
-  }
+  const Register thread = r15_thread;
+#else
+  const Register thread = tmp2;
+  __ get_thread(thread);
 #endif
-  resolve_forward_pointer(masm, tmp1);
+  Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
+  __ testb(gc_state, ShenandoahHeap::HAS_FORWARDED);
+  __ jcc(Assembler::zero, L_failure);
 
 #ifdef _LP64
   if (UseCompressedOops) {
@@ -621,18 +620,70 @@
   {
     __ movptr(tmp2, oldval);
   }
-  resolve_forward_pointer(masm, tmp2);
 
+  // Decode offending in-memory value.
+  // Test if-forwarded
+  __ testb(Address(tmp2, oopDesc::mark_offset_in_bytes()), markWord::marked_value);
+  __ jcc(Assembler::noParity, L_failure);  // When odd number of bits, then not forwarded
+  __ jcc(Assembler::zero, L_failure);      // When it is 00, then also not forwarded
+
+  // Load and mask forwarding pointer
+  __ movptr(tmp2, Address(tmp2, oopDesc::mark_offset_in_bytes()));
+  __ shrptr(tmp2, 2);
+  __ shlptr(tmp2, 2);
+
+#ifdef _LP64
+  if (UseCompressedOops) {
+    __ decode_heap_oop(tmp1); // decode for comparison
+  }
+#endif
+
+  // Now we have the forwarded offender in tmp2.
+  // Compare and if they don't match, we have legitimate failure
   __ cmpptr(tmp1, tmp2);
-  __ jcc(Assembler::notEqual, done, true);
+  __ jcc(Assembler::notEqual, L_failure);
 
-  // Step 3. Try to CAS again with resolved to-space pointers.
+  // Step 3. Need to fix the memory ptr before continuing.
   //
-  // Corner case: it may happen that somebody stored the from-space pointer
-  // to memory while we were preparing for retry. Therefore, we can fail again
-  // on retry, and so need to do this in loop, always resolving the failure
-  // witness.
-  __ bind(retry);
+  // At this point, we have from-space oldval in the register, and its to-space
+  // address is in tmp2. Let's try to update it into memory. We don't care if it
+  // succeeds or not. If it does, then the retrying CAS would see it and succeed.
+  // If this fixup fails, this means somebody else beat us to it, and necessarily
+  // with to-space ptr store. We still have to do the retry, because the GC might
+  // have updated the reference for us.
+
+#ifdef _LP64
+  if (UseCompressedOops) {
+    __ encode_heap_oop(tmp2); // previously decoded at step 2.
+  }
+#endif
+
+  if (os::is_MP()) __ lock();
+#ifdef _LP64
+  if (UseCompressedOops) {
+    __ cmpxchgl(tmp2, addr);
+  } else
+#endif
+  {
+    __ cmpxchgptr(tmp2, addr);
+  }
+
+  // Step 4. Try to CAS again.
+  //
+  // This is guaranteed not to have false negatives, because oldval is definitely
+  // to-space, and memory pointer is to-space as well. Nothing is able to store
+  // from-space ptr into memory anymore. Make sure oldval is restored, after being
+  // garbled during retries.
+  //
+#ifdef _LP64
+  if (UseCompressedOops) {
+    __ movl(oldval, tmp2);
+  } else
+#endif
+  {
+    __ movptr(oldval, tmp2);
+  }
+
   if (os::is_MP()) __ lock();
 #ifdef _LP64
   if (UseCompressedOops) {
@@ -642,41 +693,28 @@
   {
     __ cmpxchgptr(newval, addr);
   }
-  __ jcc(Assembler::equal, done, true);
+  if (!exchange) {
+    __ jccb(Assembler::equal, L_success); // fastpath, peeking into Step 5, no need to jump
+  }
 
-#ifdef _LP64
-  if (UseCompressedOops) {
-    __ movl(tmp2, oldval);
-    __ decode_heap_oop(tmp2);
-  } else
-#endif
-  {
-    __ movptr(tmp2, oldval);
-  }
-  resolve_forward_pointer(masm, tmp2);
+  // Step 5. If we need a boolean result out of CAS, set the flag appropriately.
+  // and promote the result. Note that we handle the flag from both the 1st and 2nd CAS.
+  // Otherwise, failure witness for CAE is in oldval on all paths, and we can return.
 
-  __ cmpptr(tmp1, tmp2);
-  __ jcc(Assembler::equal, retry, true);
+  if (exchange) {
+    __ bind(L_failure);
+    __ bind(L_success);
+  } else {
+    assert(res != NULL, "need result register");
 
-  // Step 4. If we need a boolean result out of CAS, check the flag again,
-  // and promote the result. Note that we handle the flag from both the CAS
-  // itself and from the retry loop.
-  __ bind(done);
-  if (!exchange) {
-    assert(res != NULL, "need result register");
-#ifdef _LP64
-    __ setb(Assembler::equal, res);
-    __ movzbl(res, res);
-#else
-    // Need something else to clean the result, because some registers
-    // do not have byte encoding that movzbl wants. Cannot do the xor first,
-    // because it modifies the flags.
-    Label res_non_zero;
+    Label exit;
+    __ bind(L_failure);
+    __ xorptr(res, res);
+    __ jmpb(exit);
+
+    __ bind(L_success);
     __ movptr(res, 1);
-    __ jcc(Assembler::equal, res_non_zero, true);
-    __ xorptr(res, res);
-    __ bind(res_non_zero);
-#endif
+    __ bind(exit);
   }
 }
 
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp	Tue Oct 29 19:49:55 2019 -0700
@@ -55,9 +55,6 @@
                                     bool tosca_live,
                                     bool expand_call);
 
-  void resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp = noreg);
-  void resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp = noreg);
-
   void load_reference_barrier_not_null(MacroAssembler* masm, Register dst);
 
   void storeval_barrier_impl(MacroAssembler* masm, Register dst, Register tmp);
@@ -76,7 +73,7 @@
 #endif
 
   void load_reference_barrier(MacroAssembler* masm, Register dst);
-  void load_reference_barrier_native(MacroAssembler* masm, Register dst);
+  void load_reference_barrier_native(MacroAssembler* masm, Register dst, Address src);
 
   void cmpxchg_oop(MacroAssembler* masm,
                    Register res, Address addr, Register oldval, Register newval,
--- a/src/hotspot/cpu/x86/gc/z/zArguments_x86.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/x86/gc/z/zArguments_x86.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -23,20 +23,7 @@
 
 #include "precompiled.hpp"
 #include "gc/z/zArguments.hpp"
-#include "runtime/globals.hpp"
-#include "runtime/globals_extension.hpp"
-#include "utilities/debug.hpp"
 
 void ZArguments::initialize_platform() {
-#ifdef COMPILER2
-  // The C2 barrier slow path expects vector registers to be least
-  // 16 bytes wide, which is the minimum width available on all
-  // x86-64 systems. However, the user could have speficied a lower
-  // number on the command-line, in which case we print a warning
-  // and raise it to 16.
-  if (MaxVectorSize < 16) {
-    warning("ZGC requires MaxVectorSize to be at least 16");
-    FLAG_SET_DEFAULT(MaxVectorSize, 16);
-  }
-#endif
+  // Does nothing
 }
--- a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -24,22 +24,22 @@
 #include "precompiled.hpp"
 #include "asm/macroAssembler.inline.hpp"
 #include "code/codeBlob.hpp"
+#include "code/vmreg.inline.hpp"
 #include "gc/z/zBarrier.inline.hpp"
 #include "gc/z/zBarrierSet.hpp"
 #include "gc/z/zBarrierSetAssembler.hpp"
 #include "gc/z/zBarrierSetRuntime.hpp"
 #include "memory/resourceArea.hpp"
-#include "runtime/stubCodeGenerator.hpp"
+#include "runtime/sharedRuntime.hpp"
 #include "utilities/macros.hpp"
 #ifdef COMPILER1
 #include "c1/c1_LIRAssembler.hpp"
 #include "c1/c1_MacroAssembler.hpp"
 #include "gc/z/c1/zBarrierSetC1.hpp"
 #endif // COMPILER1
-
-ZBarrierSetAssembler::ZBarrierSetAssembler() :
-    _load_barrier_slow_stub(),
-    _load_barrier_weak_slow_stub() {}
+#ifdef COMPILER2
+#include "gc/z/c2/zBarrierSetC2.hpp"
+#endif // COMPILER2
 
 #ifdef PRODUCT
 #define BLOCK_COMMENT(str) /* nothing */
@@ -344,137 +344,327 @@
 
 #endif // COMPILER1
 
-#undef __
-#define __ cgen->assembler()->
+#ifdef COMPILER2
 
-// Generates a register specific stub for calling
-// ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or
-// ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded().
-//
-// The raddr register serves as both input and output for this stub. When the stub is
-// called the raddr register contains the object field address (oop*) where the bad oop
-// was loaded from, which caused the slow path to be taken. On return from the stub the
-// raddr register contains the good/healed oop returned from
-// ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or
-// ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded().
-static address generate_load_barrier_stub(StubCodeGenerator* cgen, Register raddr, DecoratorSet decorators) {
-  // Don't generate stub for invalid registers
-  if (raddr == rsp || raddr == r15) {
-    return NULL;
+OptoReg::Name ZBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) {
+  if (!OptoReg::is_reg(opto_reg)) {
+    return OptoReg::Bad;
   }
 
-  // Create stub name
-  char name[64];
-  const bool weak = (decorators & ON_WEAK_OOP_REF) != 0;
-  os::snprintf(name, sizeof(name), "zgc_load_barrier%s_stub_%s", weak ? "_weak" : "", raddr->name());
-
-  __ align(CodeEntryAlignment);
-  StubCodeMark mark(cgen, "StubRoutines", os::strdup(name, mtCode));
-  address start = __ pc();
-
-  // Save live registers
-  if (raddr != rax) {
-    __ push(rax);
-  }
-  if (raddr != rcx) {
-    __ push(rcx);
-  }
-  if (raddr != rdx) {
-    __ push(rdx);
-  }
-  if (raddr != rsi) {
-    __ push(rsi);
-  }
-  if (raddr != rdi) {
-    __ push(rdi);
-  }
-  if (raddr != r8) {
-    __ push(r8);
-  }
-  if (raddr != r9) {
-    __ push(r9);
-  }
-  if (raddr != r10) {
-    __ push(r10);
-  }
-  if (raddr != r11) {
-    __ push(r11);
+  const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
+  if (vm_reg->is_XMMRegister()) {
+    opto_reg &= ~15;
+    switch (node->ideal_reg()) {
+      case Op_VecX:
+        opto_reg |= 2;
+        break;
+      case Op_VecY:
+        opto_reg |= 4;
+        break;
+      case Op_VecZ:
+        opto_reg |= 8;
+        break;
+      default:
+        opto_reg |= 1;
+        break;
+    }
   }
 
-  // Setup arguments
-  if (raddr != c_rarg1) {
-    __ movq(c_rarg1, raddr);
-  }
-  __ movq(c_rarg0, Address(raddr, 0));
+  return opto_reg;
+}
 
-  // Call barrier function
-  __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), c_rarg0, c_rarg1);
+// We use the vec_spill_helper from the x86.ad file to avoid reinventing this wheel
+extern int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load,
+                            int stack_offset, int reg, uint ireg, outputStream* st);
 
-  // Move result returned in rax to raddr, if needed
-  if (raddr != rax) {
-    __ movq(raddr, rax);
+#undef __
+#define __ _masm->
+
+class ZSaveLiveRegisters {
+private:
+  struct XMMRegisterData {
+    XMMRegister _reg;
+    int         _size;
+
+    // Used by GrowableArray::find()
+    bool operator == (const XMMRegisterData& other) {
+      return _reg == other._reg;
+    }
+  };
+
+  MacroAssembler* const          _masm;
+  GrowableArray<Register>        _gp_registers;
+  GrowableArray<XMMRegisterData> _xmm_registers;
+  int                            _spill_size;
+  int                            _spill_offset;
+
+  static int xmm_compare_register_size(XMMRegisterData* left, XMMRegisterData* right) {
+    if (left->_size == right->_size) {
+      return 0;
+    }
+
+    return (left->_size < right->_size) ? -1 : 1;
   }
 
-  // Restore saved registers
-  if (raddr != r11) {
-    __ pop(r11);
-  }
-  if (raddr != r10) {
-    __ pop(r10);
-  }
-  if (raddr != r9) {
-    __ pop(r9);
-  }
-  if (raddr != r8) {
-    __ pop(r8);
-  }
-  if (raddr != rdi) {
-    __ pop(rdi);
-  }
-  if (raddr != rsi) {
-    __ pop(rsi);
-  }
-  if (raddr != rdx) {
-    __ pop(rdx);
-  }
-  if (raddr != rcx) {
-    __ pop(rcx);
-  }
-  if (raddr != rax) {
-    __ pop(rax);
+  static int xmm_slot_size(OptoReg::Name opto_reg) {
+    // The low order 4 bytes denote what size of the XMM register is live
+    return (opto_reg & 15) << 3;
   }
 
-  __ ret(0);
+  static uint xmm_ideal_reg_for_size(int reg_size) {
+    switch (reg_size) {
+    case 8:
+      return Op_VecD;
+    case 16:
+      return Op_VecX;
+    case 32:
+      return Op_VecY;
+    case 64:
+      return Op_VecZ;
+    default:
+      fatal("Invalid register size %d", reg_size);
+      return 0;
+    }
+  }
 
-  return start;
+  bool xmm_needs_vzeroupper() const {
+    return _xmm_registers.is_nonempty() && _xmm_registers.at(0)._size > 16;
+  }
+
+  void xmm_register_save(const XMMRegisterData& reg_data) {
+    const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg());
+    const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size);
+    _spill_offset -= reg_data._size;
+    vec_spill_helper(__ code(), false /* do_size */, false /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
+  }
+
+  void xmm_register_restore(const XMMRegisterData& reg_data) {
+    const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg());
+    const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size);
+    vec_spill_helper(__ code(), false /* do_size */, true /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
+    _spill_offset += reg_data._size;
+  }
+
+  void gp_register_save(Register reg) {
+    _spill_offset -= 8;
+    __ movq(Address(rsp, _spill_offset), reg);
+  }
+
+  void gp_register_restore(Register reg) {
+    __ movq(reg, Address(rsp, _spill_offset));
+    _spill_offset += 8;
+  }
+
+  void initialize(ZLoadBarrierStubC2* stub) {
+    // Create mask of caller saved registers that need to
+    // be saved/restored if live
+    RegMask caller_saved;
+    caller_saved.Insert(OptoReg::as_OptoReg(rax->as_VMReg()));
+    caller_saved.Insert(OptoReg::as_OptoReg(rcx->as_VMReg()));
+    caller_saved.Insert(OptoReg::as_OptoReg(rdx->as_VMReg()));
+    caller_saved.Insert(OptoReg::as_OptoReg(rsi->as_VMReg()));
+    caller_saved.Insert(OptoReg::as_OptoReg(rdi->as_VMReg()));
+    caller_saved.Insert(OptoReg::as_OptoReg(r8->as_VMReg()));
+    caller_saved.Insert(OptoReg::as_OptoReg(r9->as_VMReg()));
+    caller_saved.Insert(OptoReg::as_OptoReg(r10->as_VMReg()));
+    caller_saved.Insert(OptoReg::as_OptoReg(r11->as_VMReg()));
+    caller_saved.Remove(OptoReg::as_OptoReg(stub->ref()->as_VMReg()));
+
+    // Create mask of live registers
+    RegMask live = stub->live();
+    if (stub->tmp() != noreg) {
+      live.Insert(OptoReg::as_OptoReg(stub->tmp()->as_VMReg()));
+    }
+
+    int gp_spill_size = 0;
+    int xmm_spill_size = 0;
+
+    // Record registers that needs to be saved/restored
+    while (live.is_NotEmpty()) {
+      const OptoReg::Name opto_reg = live.find_first_elem();
+      const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
+
+      live.Remove(opto_reg);
+
+      if (vm_reg->is_Register()) {
+        if (caller_saved.Member(opto_reg)) {
+          _gp_registers.append(vm_reg->as_Register());
+          gp_spill_size += 8;
+        }
+      } else if (vm_reg->is_XMMRegister()) {
+        // We encode in the low order 4 bits of the opto_reg, how large part of the register is live
+        const VMReg vm_reg_base = OptoReg::as_VMReg(opto_reg & ~15);
+        const int reg_size = xmm_slot_size(opto_reg);
+        const XMMRegisterData reg_data = { vm_reg_base->as_XMMRegister(), reg_size };
+        const int reg_index = _xmm_registers.find(reg_data);
+        if (reg_index == -1) {
+          // Not previously appended
+          _xmm_registers.append(reg_data);
+          xmm_spill_size += reg_size;
+        } else {
+          // Previously appended, update size
+          const int reg_size_prev = _xmm_registers.at(reg_index)._size;
+          if (reg_size > reg_size_prev) {
+            _xmm_registers.at_put(reg_index, reg_data);
+            xmm_spill_size += reg_size - reg_size_prev;
+          }
+        }
+      } else {
+        fatal("Unexpected register type");
+      }
+    }
+
+    // Sort by size, largest first
+    _xmm_registers.sort(xmm_compare_register_size);
+
+    // Stack pointer must be 16 bytes aligned for the call
+    _spill_offset = _spill_size = align_up(xmm_spill_size + gp_spill_size, 16);
+  }
+
+public:
+  ZSaveLiveRegisters(MacroAssembler* masm, ZLoadBarrierStubC2* stub) :
+      _masm(masm),
+      _gp_registers(),
+      _xmm_registers(),
+      _spill_size(0),
+      _spill_offset(0) {
+
+    //
+    // Stack layout after registers have been spilled:
+    //
+    // | ...            | original rsp, 16 bytes aligned
+    // ------------------
+    // | zmm0 high      |
+    // | ...            |
+    // | zmm0 low       | 16 bytes aligned
+    // | ...            |
+    // | ymm1 high      |
+    // | ...            |
+    // | ymm1 low       | 16 bytes aligned
+    // | ...            |
+    // | xmmN high      |
+    // | ...            |
+    // | xmmN low       | 8 bytes aligned
+    // | reg0           | 8 bytes aligned
+    // | reg1           |
+    // | ...            |
+    // | regN           | new rsp, if 16 bytes aligned
+    // | <padding>      | else new rsp, 16 bytes aligned
+    // ------------------
+    //
+
+    // Figure out what registers to save/restore
+    initialize(stub);
+
+    // Allocate stack space
+    if (_spill_size > 0) {
+      __ subptr(rsp, _spill_size);
+    }
+
+    // Save XMM/YMM/ZMM registers
+    for (int i = 0; i < _xmm_registers.length(); i++) {
+      xmm_register_save(_xmm_registers.at(i));
+    }
+
+    if (xmm_needs_vzeroupper()) {
+      __ vzeroupper();
+    }
+
+    // Save general purpose registers
+    for (int i = 0; i < _gp_registers.length(); i++) {
+      gp_register_save(_gp_registers.at(i));
+    }
+  }
+
+  ~ZSaveLiveRegisters() {
+    // Restore general purpose registers
+    for (int i = _gp_registers.length() - 1; i >= 0; i--) {
+      gp_register_restore(_gp_registers.at(i));
+    }
+
+    __ vzeroupper();
+
+    // Restore XMM/YMM/ZMM registers
+    for (int i = _xmm_registers.length() - 1; i >= 0; i--) {
+      xmm_register_restore(_xmm_registers.at(i));
+    }
+
+    // Free stack space
+    if (_spill_size > 0) {
+      __ addptr(rsp, _spill_size);
+    }
+  }
+};
+
+class ZSetupArguments {
+private:
+  MacroAssembler* const _masm;
+  const Register        _ref;
+  const Address         _ref_addr;
+
+public:
+  ZSetupArguments(MacroAssembler* masm, ZLoadBarrierStubC2* stub) :
+      _masm(masm),
+      _ref(stub->ref()),
+      _ref_addr(stub->ref_addr()) {
+
+    // Setup arguments
+    if (_ref_addr.base() == noreg) {
+      // No self healing
+      if (_ref != c_rarg0) {
+        __ movq(c_rarg0, _ref);
+      }
+      __ xorq(c_rarg1, c_rarg1);
+    } else {
+      // Self healing
+      if (_ref == c_rarg0) {
+        __ lea(c_rarg1, _ref_addr);
+      } else if (_ref != c_rarg1) {
+        __ lea(c_rarg1, _ref_addr);
+        __ movq(c_rarg0, _ref);
+      } else if (_ref_addr.base() != c_rarg0 && _ref_addr.index() != c_rarg0) {
+        __ movq(c_rarg0, _ref);
+        __ lea(c_rarg1, _ref_addr);
+      } else {
+        __ xchgq(c_rarg0, c_rarg1);
+        if (_ref_addr.base() == c_rarg0) {
+          __ lea(c_rarg1, Address(c_rarg1, _ref_addr.index(), _ref_addr.scale(), _ref_addr.disp()));
+        } else if (_ref_addr.index() == c_rarg0) {
+          __ lea(c_rarg1, Address(_ref_addr.base(), c_rarg1, _ref_addr.scale(), _ref_addr.disp()));
+        } else {
+          ShouldNotReachHere();
+        }
+      }
+    }
+  }
+
+  ~ZSetupArguments() {
+    // Transfer result
+    if (_ref != rax) {
+      __ movq(_ref, rax);
+    }
+  }
+};
+
+#undef __
+#define __ masm->
+
+void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const {
+  BLOCK_COMMENT("ZLoadBarrierStubC2");
+
+  // Stub entry
+  __ bind(*stub->entry());
+
+  {
+    ZSaveLiveRegisters save_live_registers(masm, stub);
+    ZSetupArguments setup_arguments(masm, stub);
+    __ call(RuntimeAddress(stub->slow_path()));
+  }
+
+  // Stub exit
+  __ jmp(*stub->continuation());
 }
 
 #undef __
 
-static void barrier_stubs_init_inner(const char* label, const DecoratorSet decorators, address* stub) {
-  const int nregs = RegisterImpl::number_of_registers;
-  const int code_size = nregs * 128; // Rough estimate of code size
-
-  ResourceMark rm;
-
-  CodeBuffer buf(BufferBlob::create(label, code_size));
-  StubCodeGenerator cgen(&buf);
-
-  for (int i = 0; i < nregs; i++) {
-    const Register reg = as_Register(i);
-    stub[i] = generate_load_barrier_stub(&cgen, reg, decorators);
-  }
-}
-
-void ZBarrierSetAssembler::barrier_stubs_init() {
-  barrier_stubs_init_inner("zgc_load_barrier_stubs", ON_STRONG_OOP_REF, _load_barrier_slow_stub);
-  barrier_stubs_init_inner("zgc_load_barrier_weak_stubs", ON_WEAK_OOP_REF, _load_barrier_weak_slow_stub);
-}
-
-address ZBarrierSetAssembler::load_barrier_slow_stub(Register reg) {
-  return _load_barrier_slow_stub[reg->encoding()];
-}
-
-address ZBarrierSetAssembler::load_barrier_weak_slow_stub(Register reg) {
-  return _load_barrier_weak_slow_stub[reg->encoding()];
-}
+#endif // COMPILER2
--- a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.hpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.hpp	Tue Oct 29 19:49:55 2019 -0700
@@ -24,6 +24,14 @@
 #ifndef CPU_X86_GC_Z_ZBARRIERSETASSEMBLER_X86_HPP
 #define CPU_X86_GC_Z_ZBARRIERSETASSEMBLER_X86_HPP
 
+#include "code/vmreg.hpp"
+#include "oops/accessDecorators.hpp"
+#ifdef COMPILER2
+#include "opto/optoreg.hpp"
+#endif // COMPILER2
+
+class MacroAssembler;
+
 #ifdef COMPILER1
 class LIR_Assembler;
 class LIR_OprDesc;
@@ -32,14 +40,13 @@
 class ZLoadBarrierStubC1;
 #endif // COMPILER1
 
+#ifdef COMPILER2
+class Node;
+class ZLoadBarrierStubC2;
+#endif // COMPILER2
+
 class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase {
-private:
-  address _load_barrier_slow_stub[RegisterImpl::number_of_registers];
-  address _load_barrier_weak_slow_stub[RegisterImpl::number_of_registers];
-
 public:
-  ZBarrierSetAssembler();
-
   virtual void load_at(MacroAssembler* masm,
                        DecoratorSet decorators,
                        BasicType type,
@@ -82,10 +89,13 @@
                                              DecoratorSet decorators) const;
 #endif // COMPILER1
 
-  virtual void barrier_stubs_init();
+#ifdef COMPILER2
+  OptoReg::Name refine_register(const Node* node,
+                                OptoReg::Name opto_reg);
 
-  address load_barrier_slow_stub(Register reg);
-  address load_barrier_weak_slow_stub(Register reg);
+  void generate_c2_load_barrier_stub(MacroAssembler* masm,
+                                     ZLoadBarrierStubC2* stub) const;
+#endif // COMPILER2
 };
 
 #endif // CPU_X86_GC_Z_ZBARRIERSETASSEMBLER_X86_HPP
--- a/src/hotspot/cpu/x86/gc/z/zGlobals_x86.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/x86/gc/z/zGlobals_x86.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -40,7 +40,7 @@
 //  +--------------------------------+ 0x0000014000000000 (20TB)
 //  |         Remapped View          |
 //  +--------------------------------+ 0x0000010000000000 (16TB)
-//  |     (Reserved, but unused)     |
+//  .                                .
 //  +--------------------------------+ 0x00000c0000000000 (12TB)
 //  |         Marked1 View           |
 //  +--------------------------------+ 0x0000080000000000 (8TB)
@@ -75,7 +75,7 @@
 //  +--------------------------------+ 0x0000280000000000 (40TB)
 //  |         Remapped View          |
 //  +--------------------------------+ 0x0000200000000000 (32TB)
-//  |     (Reserved, but unused)     |
+//  .                                .
 //  +--------------------------------+ 0x0000180000000000 (24TB)
 //  |         Marked1 View           |
 //  +--------------------------------+ 0x0000100000000000 (16TB)
@@ -110,7 +110,7 @@
 //  +--------------------------------+ 0x0000500000000000 (80TB)
 //  |         Remapped View          |
 //  +--------------------------------+ 0x0000400000000000 (64TB)
-//  |     (Reserved, but unused)     |
+//  .                                .
 //  +--------------------------------+ 0x0000300000000000 (48TB)
 //  |         Marked1 View           |
 //  +--------------------------------+ 0x0000200000000000 (32TB)
@@ -142,8 +142,7 @@
 size_t ZPlatformAddressOffsetBits() {
   const size_t min_address_offset_bits = 42; // 4TB
   const size_t max_address_offset_bits = 44; // 16TB
-  const size_t virtual_to_physical_ratio = 7; // 7:1
-  const size_t address_offset = ZUtils::round_up_power_of_2(MaxHeapSize * virtual_to_physical_ratio);
+  const size_t address_offset = ZUtils::round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio);
   const size_t address_offset_bits = log2_intptr(address_offset);
   return MIN2(MAX2(address_offset_bits, min_address_offset_bits), max_address_offset_bits);
 }
--- a/src/hotspot/cpu/x86/gc/z/zGlobals_x86.hpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/x86/gc/z/zGlobals_x86.hpp	Tue Oct 29 19:49:55 2019 -0700
@@ -24,24 +24,13 @@
 #ifndef CPU_X86_GC_Z_ZGLOBALS_X86_HPP
 #define CPU_X86_GC_Z_ZGLOBALS_X86_HPP
 
-//
-// Page Allocation Tiers
-// ---------------------
-//
-//  Page Type     Page Size     Object Size Limit     Object Alignment
-//  ------------------------------------------------------------------
-//  Small         2M            <= 265K               <MinObjAlignmentInBytes>
-//  Medium        32M           <= 4M                 4K
-//  Large         X*M           > 4M                  2M
-//  ------------------------------------------------------------------
-//
 const size_t ZPlatformGranuleSizeShift      = 21; // 2MB
-const size_t ZPlatformMaxHeapSizeShift      = 46; // 16TB
+const size_t ZPlatformHeapViews             = 3;
 const size_t ZPlatformNMethodDisarmedOffset = 4;
 const size_t ZPlatformCacheLineSize         = 64;
 
-uintptr_t    ZPlatformAddressBase();
-size_t       ZPlatformAddressOffsetBits();
-size_t       ZPlatformAddressMetadataShift();
+uintptr_t ZPlatformAddressBase();
+size_t ZPlatformAddressOffsetBits();
+size_t ZPlatformAddressMetadataShift();
 
 #endif // CPU_X86_GC_Z_ZGLOBALS_X86_HPP
--- a/src/hotspot/cpu/x86/gc/z/z_x86_64.ad	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/x86/gc/z/z_x86_64.ad	Tue Oct 29 19:49:55 2019 -0700
@@ -24,190 +24,144 @@
 source_hpp %{
 
 #include "gc/z/c2/zBarrierSetC2.hpp"
+#include "gc/z/zThreadLocalData.hpp"
 
 %}
 
 source %{
 
-#include "gc/z/zBarrierSetAssembler.hpp"
+static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, bool weak) {
+  ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, weak);
+  __ testptr(ref, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset()));
+  __ jcc(Assembler::notZero, *stub->entry());
+  __ bind(*stub->continuation());
+}
 
-static void z_load_barrier_slow_reg(MacroAssembler& _masm, Register dst, Address src, bool weak) {
-  assert(dst != rsp, "Invalid register");
-  assert(dst != r15, "Invalid register");
-
-  const address stub = weak ? ZBarrierSet::assembler()->load_barrier_weak_slow_stub(dst)
-                            : ZBarrierSet::assembler()->load_barrier_slow_stub(dst);
-  __ lea(dst, src);
-  __ call(RuntimeAddress(stub));
+static void z_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
+  ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, false /* weak */);
+  __ jmp(*stub->entry());
+  __ bind(*stub->continuation());
 }
 
 %}
 
-// For XMM and YMM enabled processors
-instruct zLoadBarrierSlowRegXmmAndYmm(rRegP dst, memory src, rFlagsReg cr,
-                                      rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3,
-                                      rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
-                                      rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
-                                      rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
-  match(Set dst (LoadBarrierSlowReg src dst));
-  predicate(UseAVX <= 2 && !n->as_LoadBarrierSlowReg()->is_weak());
+// Load Pointer
+instruct zLoadP(rRegP dst, memory mem, rFlagsReg cr)
+%{
+  predicate(UseZGC && n->as_Load()->barrier_data() == ZLoadBarrierStrong);
+  match(Set dst (LoadP mem));
+  effect(KILL cr, TEMP dst);
 
-  effect(KILL cr,
-         KILL x0, KILL x1, KILL x2, KILL x3,
-         KILL x4, KILL x5, KILL x6, KILL x7,
-         KILL x8, KILL x9, KILL x10, KILL x11,
-         KILL x12, KILL x13, KILL x14, KILL x15);
+  ins_cost(125);
 
-  format %{ "lea $dst, $src\n\t"
-            "call #ZLoadBarrierSlowPath" %}
+  format %{ "movq     $dst, $mem" %}
 
   ins_encode %{
-    z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, false /* weak */);
+    __ movptr($dst$$Register, $mem$$Address);
+    if (barrier_data() != ZLoadBarrierElided) {
+      z_load_barrier(_masm, this, $mem$$Address, $dst$$Register, noreg /* tmp */, false /* weak */);
+    }
   %}
-  ins_pipe(pipe_slow);
+
+  ins_pipe(ialu_reg_mem);
 %}
 
-// For ZMM enabled processors
-instruct zLoadBarrierSlowRegZmm(rRegP dst, memory src, rFlagsReg cr,
-                                rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3,
-                                rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
-                                rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
-                                rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
-                                rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19,
-                                rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
-                                rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
-                                rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
+// Load Weak Pointer
+instruct zLoadWeakP(rRegP dst, memory mem, rFlagsReg cr)
+%{
+  predicate(UseZGC && n->as_Load()->barrier_data() == ZLoadBarrierWeak);
+  match(Set dst (LoadP mem));
+  effect(KILL cr, TEMP dst);
 
-  match(Set dst (LoadBarrierSlowReg src dst));
-  predicate(UseAVX == 3 && !n->as_LoadBarrierSlowReg()->is_weak());
+  ins_cost(125);
 
-  effect(KILL cr,
-         KILL x0, KILL x1, KILL x2, KILL x3,
-         KILL x4, KILL x5, KILL x6, KILL x7,
-         KILL x8, KILL x9, KILL x10, KILL x11,
-         KILL x12, KILL x13, KILL x14, KILL x15,
-         KILL x16, KILL x17, KILL x18, KILL x19,
-         KILL x20, KILL x21, KILL x22, KILL x23,
-         KILL x24, KILL x25, KILL x26, KILL x27,
-         KILL x28, KILL x29, KILL x30, KILL x31);
-
-  format %{ "lea $dst, $src\n\t"
-            "call #ZLoadBarrierSlowPath" %}
+  format %{ "movq     $dst, $mem" %}
 
   ins_encode %{
-    z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, false /* weak */);
+    __ movptr($dst$$Register, $mem$$Address);
+    z_load_barrier(_masm, this, $mem$$Address, $dst$$Register, noreg /* tmp */, true /* weak */);
   %}
-  ins_pipe(pipe_slow);
+
+  ins_pipe(ialu_reg_mem);
 %}
 
-// For XMM and YMM enabled processors
-instruct zLoadBarrierWeakSlowRegXmmAndYmm(rRegP dst, memory src, rFlagsReg cr,
-                                          rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3,
-                                          rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
-                                          rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
-                                          rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
-  match(Set dst (LoadBarrierSlowReg src dst));
-  predicate(UseAVX <= 2 && n->as_LoadBarrierSlowReg()->is_weak());
+instruct zCompareAndExchangeP(memory mem, rax_RegP oldval, rRegP newval, rRegP tmp, rFlagsReg cr) %{
+  match(Set oldval (CompareAndExchangeP mem (Binary oldval newval)));
+  predicate(UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
+  effect(KILL cr, TEMP tmp);
 
-  effect(KILL cr,
-         KILL x0, KILL x1, KILL x2, KILL x3,
-         KILL x4, KILL x5, KILL x6, KILL x7,
-         KILL x8, KILL x9, KILL x10, KILL x11,
-         KILL x12, KILL x13, KILL x14, KILL x15);
-
-  format %{ "lea $dst, $src\n\t"
-            "call #ZLoadBarrierSlowPath" %}
+  format %{ "lock\n\t"
+            "cmpxchgq $newval, $mem" %}
 
   ins_encode %{
-    z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, true /* weak */);
+    if (barrier_data() != ZLoadBarrierElided) {
+      __ movptr($tmp$$Register, $oldval$$Register);
+    }
+    __ lock();
+    __ cmpxchgptr($newval$$Register, $mem$$Address);
+    if (barrier_data() != ZLoadBarrierElided) {
+      Label good;
+      __ testptr($oldval$$Register, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset()));
+      __ jcc(Assembler::zero, good);
+      z_load_barrier_slow_path(_masm, this, $mem$$Address, $oldval$$Register, $tmp$$Register);
+      __ movptr($oldval$$Register, $tmp$$Register);
+      __ lock();
+      __ cmpxchgptr($newval$$Register, $mem$$Address);
+      __ bind(good);
+    }
   %}
-  ins_pipe(pipe_slow);
+
+  ins_pipe(pipe_cmpxchg);
 %}
 
-// For ZMM enabled processors
-instruct zLoadBarrierWeakSlowRegZmm(rRegP dst, memory src, rFlagsReg cr,
-                                    rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3,
-                                    rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
-                                    rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
-                                    rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
-                                    rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19,
-                                    rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
-                                    rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
-                                    rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
+instruct zCompareAndSwapP(rRegI res, memory mem, rRegP newval, rRegP tmp, rFlagsReg cr, rax_RegP oldval) %{
+  match(Set res (CompareAndSwapP mem (Binary oldval newval)));
+  match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
+  predicate(UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
+  effect(KILL cr, KILL oldval, TEMP tmp);
 
-  match(Set dst (LoadBarrierSlowReg src dst));
-  predicate(UseAVX == 3 && n->as_LoadBarrierSlowReg()->is_weak());
-
-  effect(KILL cr,
-         KILL x0, KILL x1, KILL x2, KILL x3,
-         KILL x4, KILL x5, KILL x6, KILL x7,
-         KILL x8, KILL x9, KILL x10, KILL x11,
-         KILL x12, KILL x13, KILL x14, KILL x15,
-         KILL x16, KILL x17, KILL x18, KILL x19,
-         KILL x20, KILL x21, KILL x22, KILL x23,
-         KILL x24, KILL x25, KILL x26, KILL x27,
-         KILL x28, KILL x29, KILL x30, KILL x31);
-
-  format %{ "lea $dst, $src\n\t"
-            "call #ZLoadBarrierSlowPath" %}
+  format %{ "lock\n\t"
+            "cmpxchgq $newval, $mem\n\t"
+            "sete     $res\n\t"
+            "movzbl   $res, $res" %}
 
   ins_encode %{
-    z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, true /* weak */);
+    if (barrier_data() != ZLoadBarrierElided) {
+      __ movptr($tmp$$Register, $oldval$$Register);
+    }
+    __ lock();
+    __ cmpxchgptr($newval$$Register, $mem$$Address);
+    if (barrier_data() != ZLoadBarrierElided) {
+      Label good;
+      __ testptr($oldval$$Register, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset()));
+      __ jcc(Assembler::zero, good);
+      z_load_barrier_slow_path(_masm, this, $mem$$Address, $oldval$$Register, $tmp$$Register);
+      __ movptr($oldval$$Register, $tmp$$Register);
+      __ lock();
+      __ cmpxchgptr($newval$$Register, $mem$$Address);
+      __ bind(good);
+      __ cmpptr($tmp$$Register, $oldval$$Register);
+    }
+    __ setb(Assembler::equal, $res$$Register);
+    __ movzbl($res$$Register, $res$$Register);
   %}
-  ins_pipe(pipe_slow);
+
+  ins_pipe(pipe_cmpxchg);
 %}
 
-// Specialized versions of compareAndExchangeP that adds a keepalive that is consumed
-// but doesn't affect output.
+instruct zXChgP(memory mem, rRegP newval, rFlagsReg cr) %{
+  match(Set newval (GetAndSetP mem newval));
+  predicate(UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
+  effect(KILL cr);
 
-instruct z_compareAndExchangeP(
-        memory mem_ptr,
-        rax_RegP oldval, rRegP newval, rRegP keepalive,
-        rFlagsReg cr) %{
-    predicate(VM_Version::supports_cx8());
-    match(Set oldval (ZCompareAndExchangeP (Binary mem_ptr keepalive) (Binary oldval newval)));
-    effect(KILL cr);
+  format %{ "xchgq    $newval, $mem" %}
 
-    format %{ "cmpxchgq $mem_ptr,$newval\t# "
-              "If rax == $mem_ptr then store $newval into $mem_ptr\n\t" %}
-    opcode(0x0F, 0xB1);
-    ins_encode(lock_prefix,
-            REX_reg_mem_wide(newval, mem_ptr),
-            OpcP, OpcS,
-            reg_mem(newval, mem_ptr)  // lock cmpxchg
-    );
-    ins_pipe( pipe_cmpxchg );
+  ins_encode %{
+    __ xchgptr($newval$$Register, $mem$$Address);
+    if (barrier_data() != ZLoadBarrierElided) {
+      z_load_barrier(_masm, this, Address(noreg, 0), $newval$$Register, noreg /* tmp */, false /* weak */);
+    }
+  %}
+
+  ins_pipe(pipe_cmpxchg);
 %}
-
-instruct z_compareAndSwapP(rRegI res,
-                         memory mem_ptr,
-                         rax_RegP oldval, rRegP newval, rRegP keepalive,
-                         rFlagsReg cr) %{
-  predicate(VM_Version::supports_cx8());
-  match(Set res (ZCompareAndSwapP (Binary mem_ptr keepalive) (Binary oldval newval)));
-  match(Set res (ZWeakCompareAndSwapP (Binary mem_ptr keepalive) (Binary oldval newval)));
-  effect(KILL cr, KILL oldval);
-
-  format %{ "cmpxchgq $mem_ptr,$newval\t# "
-            "If rax == $mem_ptr then store $newval into $mem_ptr\n\t"
-            "sete    $res\n\t"
-            "movzbl  $res, $res" %}
-  opcode(0x0F, 0xB1);
-  ins_encode(lock_prefix,
-          REX_reg_mem_wide(newval, mem_ptr),
-          OpcP, OpcS,
-          reg_mem(newval, mem_ptr),
-          REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete
-          REX_reg_breg(res, res), // movzbl
-          Opcode(0xF), Opcode(0xB6), reg_reg(res, res));
-  ins_pipe( pipe_cmpxchg );
-%}
-
-instruct z_xchgP( memory mem, rRegP newval, rRegP keepalive) %{
-  match(Set newval (ZGetAndSetP mem (Binary newval keepalive)));
-  format %{ "XCHGQ  $newval,[$mem]" %}
-  ins_encode %{
-    __ xchgq($newval$$Register, $mem$$Address);
-  %}
-  ins_pipe( pipe_cmpxchg );
-%}
--- a/src/hotspot/cpu/x86/globals_x86.hpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/x86/globals_x86.hpp	Tue Oct 29 19:49:55 2019 -0700
@@ -211,5 +211,15 @@
           "Use BMI2 instructions")                                          \
                                                                             \
   diagnostic(bool, UseLibmIntrinsic, true,                                  \
-          "Use Libm Intrinsics")
+          "Use Libm Intrinsics")                                            \
+                                                                            \
+  /* Minimum array size in bytes to use AVX512 intrinsics */                \
+  /* for copy, inflate and fill which don't bail out early based on any */  \
+  /* condition. When this value is set to zero compare operations like */   \
+  /* compare, vectorizedMismatch, compress can also use AVX512 intrinsics.*/\
+  diagnostic(int, AVX3Threshold, 4096,                                      \
+             "Minimum array size in bytes to use AVX512 intrinsics"         \
+             "for copy, inflate and fill. When this value is set as zero"   \
+             "compare operations can also use AVX512 intrinsics.")          \
+          range(0, max_jint)
 #endif // CPU_X86_GLOBALS_X86_HPP
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -824,11 +824,13 @@
 }
 
 void MacroAssembler::stop(const char* msg) {
-  address rip = pc();
-  pusha(); // get regs on stack
+  if (ShowMessageBoxOnError) {
+    address rip = pc();
+    pusha(); // get regs on stack
+    lea(c_rarg1, InternalAddress(rip));
+    movq(c_rarg2, rsp); // pass pointer to regs array
+  }
   lea(c_rarg0, ExternalAddress((address) msg));
-  lea(c_rarg1, InternalAddress(rip));
-  movq(c_rarg2, rsp); // pass pointer to regs array
   andq(rsp, -16); // align stack as required by ABI
   call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
   hlt();
@@ -6350,7 +6352,7 @@
   movptr(result, str1);
   if (UseAVX >= 2) {
     cmpl(cnt1, stride);
-    jcc(Assembler::less, SCAN_TO_CHAR_LOOP);
+    jcc(Assembler::less, SCAN_TO_CHAR);
     cmpl(cnt1, 2*stride);
     jcc(Assembler::less, SCAN_TO_8_CHAR_INIT);
     movdl(vec1, ch);
@@ -6377,10 +6379,8 @@
   }
   bind(SCAN_TO_8_CHAR);
   cmpl(cnt1, stride);
-  if (UseAVX >= 2) {
-    jcc(Assembler::less, SCAN_TO_CHAR);
-  } else {
-    jcc(Assembler::less, SCAN_TO_CHAR_LOOP);
+  jcc(Assembler::less, SCAN_TO_CHAR);
+  if (UseAVX < 2) {
     movdl(vec1, ch);
     pshuflw(vec1, vec1, 0x00);
     pshufd(vec1, vec1, 0);
@@ -6593,7 +6593,7 @@
     bind(COMPARE_WIDE_VECTORS_LOOP);
 
 #ifdef _LP64
-    if (VM_Version::supports_avx512vlbw()) { // trying 64 bytes fast loop
+    if ((AVX3Threshold == 0) && VM_Version::supports_avx512vlbw()) { // trying 64 bytes fast loop
       cmpl(cnt2, stride2x2);
       jccb(Assembler::below, COMPARE_WIDE_VECTORS_LOOP_AVX2);
       testl(cnt2, stride2x2-1);   // cnt2 holds the vector count
@@ -6853,7 +6853,7 @@
   testl(len, len);
   jcc(Assembler::zero, FALSE_LABEL);
 
-  if ((UseAVX > 2) && // AVX512
+  if ((AVX3Threshold == 0) && (UseAVX > 2) && // AVX512
     VM_Version::supports_avx512vlbw() &&
     VM_Version::supports_bmi2()) {
 
@@ -6926,7 +6926,7 @@
   } else {
     movl(result, len); // copy
 
-    if (UseAVX == 2 && UseSSE >= 2) {
+    if (UseAVX >= 2 && UseSSE >= 2) {
       // With AVX2, use 32-byte vector compare
       Label COMPARE_WIDE_VECTORS, COMPARE_TAIL;
 
@@ -7099,14 +7099,12 @@
     lea(ary2, Address(ary2, limit, Address::times_1));
     negptr(limit);
 
-    bind(COMPARE_WIDE_VECTORS);
-
 #ifdef _LP64
-    if (VM_Version::supports_avx512vlbw()) { // trying 64 bytes fast loop
+    if ((AVX3Threshold == 0) && VM_Version::supports_avx512vlbw()) { // trying 64 bytes fast loop
       Label COMPARE_WIDE_VECTORS_LOOP_AVX2, COMPARE_WIDE_VECTORS_LOOP_AVX3;
 
       cmpl(limit, -64);
-      jccb(Assembler::greater, COMPARE_WIDE_VECTORS_LOOP_AVX2);
+      jcc(Assembler::greater, COMPARE_WIDE_VECTORS_LOOP_AVX2);
 
       bind(COMPARE_WIDE_VECTORS_LOOP_AVX3); // the hottest loop
 
@@ -7139,7 +7137,7 @@
 
     }//if (VM_Version::supports_avx512vlbw())
 #endif //_LP64
-
+    bind(COMPARE_WIDE_VECTORS);
     vmovdqu(vec1, Address(ary1, limit, Address::times_1));
     vmovdqu(vec2, Address(ary2, limit, Address::times_1));
     vpxor(vec1, vec2);
@@ -7365,32 +7363,33 @@
       assert( UseSSE >= 2, "supported cpu only" );
       Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
       movdl(xtmp, value);
-      if (UseAVX > 2 && UseUnalignedLoadStores) {
+      if (UseAVX >= 2 && UseUnalignedLoadStores) {
+        Label L_check_fill_32_bytes;
+        if (UseAVX > 2) {
+          // Fill 64-byte chunks
+          Label L_fill_64_bytes_loop_avx3, L_check_fill_64_bytes_avx2;
+
+          // If number of bytes to fill < AVX3Threshold, perform fill using AVX2
+          cmpl(count, AVX3Threshold);
+          jccb(Assembler::below, L_check_fill_64_bytes_avx2);
+
+          vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit);
+
+          subl(count, 16 << shift);
+          jccb(Assembler::less, L_check_fill_32_bytes);
+          align(16);
+
+          BIND(L_fill_64_bytes_loop_avx3);
+          evmovdqul(Address(to, 0), xtmp, Assembler::AVX_512bit);
+          addptr(to, 64);
+          subl(count, 16 << shift);
+          jcc(Assembler::greaterEqual, L_fill_64_bytes_loop_avx3);
+          jmpb(L_check_fill_32_bytes);
+
+          BIND(L_check_fill_64_bytes_avx2);
+        }
         // Fill 64-byte chunks
-        Label L_fill_64_bytes_loop, L_check_fill_32_bytes;
-        vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit);
-
-        subl(count, 16 << shift);
-        jcc(Assembler::less, L_check_fill_32_bytes);
-        align(16);
-
-        BIND(L_fill_64_bytes_loop);
-        evmovdqul(Address(to, 0), xtmp, Assembler::AVX_512bit);
-        addptr(to, 64);
-        subl(count, 16 << shift);
-        jcc(Assembler::greaterEqual, L_fill_64_bytes_loop);
-
-        BIND(L_check_fill_32_bytes);
-        addl(count, 8 << shift);
-        jccb(Assembler::less, L_check_fill_8_bytes);
-        vmovdqu(Address(to, 0), xtmp);
-        addptr(to, 32);
-        subl(count, 8 << shift);
-
-        BIND(L_check_fill_8_bytes);
-      } else if (UseAVX == 2 && UseUnalignedLoadStores) {
-        // Fill 64-byte chunks
-        Label L_fill_64_bytes_loop, L_check_fill_32_bytes;
+        Label L_fill_64_bytes_loop;
         vpbroadcastd(xtmp, xtmp, Assembler::AVX_256bit);
 
         subl(count, 16 << shift);
@@ -8104,12 +8103,13 @@
   shlq(length);
   xorq(result, result);
 
-  if ((UseAVX > 2) &&
+  if ((AVX3Threshold == 0) && (UseAVX > 2) &&
       VM_Version::supports_avx512vlbw()) {
     Label VECTOR64_LOOP, VECTOR64_NOT_EQUAL, VECTOR32_TAIL;
 
     cmpq(length, 64);
     jcc(Assembler::less, VECTOR32_TAIL);
+
     movq(tmp1, length);
     andq(tmp1, 0x3F);      // tail count
     andq(length, ~(0x3F)); //vector count
@@ -9566,7 +9566,7 @@
   // save length for return
   push(len);
 
-  if ((UseAVX > 2) && // AVX512
+  if ((AVX3Threshold == 0) && (UseAVX > 2) && // AVX512
     VM_Version::supports_avx512vlbw() &&
     VM_Version::supports_bmi2()) {
 
@@ -9758,7 +9758,7 @@
 //   }
 void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len,
   XMMRegister tmp1, Register tmp2) {
-  Label copy_chars_loop, done, below_threshold;
+  Label copy_chars_loop, done, below_threshold, avx3_threshold;
   // rsi: src
   // rdi: dst
   // rdx: len
@@ -9768,7 +9768,7 @@
   // rdi holds start addr of destination char[]
   // rdx holds length
   assert_different_registers(src, dst, len, tmp2);
-
+  movl(tmp2, len);
   if ((UseAVX > 2) && // AVX512
     VM_Version::supports_avx512vlbw() &&
     VM_Version::supports_bmi2()) {
@@ -9780,9 +9780,11 @@
     testl(len, -16);
     jcc(Assembler::zero, below_threshold);
 
+    testl(len, -1 * AVX3Threshold);
+    jcc(Assembler::zero, avx3_threshold);
+
     // In order to use only one arithmetic operation for the main loop we use
     // this pre-calculation
-    movl(tmp2, len);
     andl(tmp2, (32 - 1)); // tail count (in chars), 32 element wide loop
     andl(len, -32);     // vector count
     jccb(Assembler::zero, copy_tail);
@@ -9813,12 +9815,11 @@
     evmovdquw(Address(dst, 0), k2, tmp1, Assembler::AVX_512bit);
 
     jmp(done);
+    bind(avx3_threshold);
   }
   if (UseSSE42Intrinsics) {
     Label copy_16_loop, copy_8_loop, copy_bytes, copy_new_tail, copy_tail;
 
-    movl(tmp2, len);
-
     if (UseAVX > 1) {
       andl(tmp2, (16 - 1));
       andl(len, -16);
@@ -9843,13 +9844,7 @@
 
       bind(below_threshold);
       bind(copy_new_tail);
-      if ((UseAVX > 2) &&
-        VM_Version::supports_avx512vlbw() &&
-        VM_Version::supports_bmi2()) {
-        movl(tmp2, len);
-      } else {
-        movl(len, tmp2);
-      }
+      movl(len, tmp2);
       andl(tmp2, 0x00000007);
       andl(len, 0xFFFFFFF8);
       jccb(Assembler::zero, copy_tail);
--- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp	Tue Oct 29 19:49:55 2019 -0700
@@ -114,7 +114,8 @@
       // short offset operators (jmp and jcc)
       char* disp = (char*) &branch[1];
       int imm8 = target - (address) &disp[1];
-      guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d", file, line);
+      guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d",
+                file == NULL ? "<NULL>" : file, line);
       *disp = imm8;
     } else {
       int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1];
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -34,6 +34,7 @@
 #include "logging/log.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/compiledICHolder.hpp"
+#include "oops/klass.inline.hpp"
 #include "runtime/safepointMechanism.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/vframeArray.hpp"
@@ -1303,6 +1304,97 @@
   }
 }
 
+// Registers need to be saved for runtime call
+static Register caller_saved_registers[] = {
+  rcx, rdx, rsi, rdi
+};
+
+// Save caller saved registers except r1 and r2
+static void save_registers_except(MacroAssembler* masm, Register r1, Register r2) {
+  int reg_len = (int)(sizeof(caller_saved_registers) / sizeof(Register));
+  for (int index = 0; index < reg_len; index ++) {
+    Register this_reg = caller_saved_registers[index];
+    if (this_reg != r1 && this_reg != r2) {
+      __ push(this_reg);
+    }
+  }
+}
+
+// Restore caller saved registers except r1 and r2
+static void restore_registers_except(MacroAssembler* masm, Register r1, Register r2) {
+  int reg_len = (int)(sizeof(caller_saved_registers) / sizeof(Register));
+  for (int index = reg_len - 1; index >= 0; index --) {
+    Register this_reg = caller_saved_registers[index];
+    if (this_reg != r1 && this_reg != r2) {
+      __ pop(this_reg);
+    }
+  }
+}
+
+// Pin object, return pinned object or null in rax
+static void gen_pin_object(MacroAssembler* masm,
+                           Register thread, VMRegPair reg) {
+  __ block_comment("gen_pin_object {");
+
+  Label is_null;
+  Register tmp_reg = rax;
+  VMRegPair tmp(tmp_reg->as_VMReg());
+  if (reg.first()->is_stack()) {
+    // Load the arg up from the stack
+    simple_move32(masm, reg, tmp);
+    reg = tmp;
+  } else {
+    __ movl(tmp_reg, reg.first()->as_Register());
+  }
+  __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
+  __ jccb(Assembler::equal, is_null);
+
+  // Save registers that may be used by runtime call
+  Register arg = reg.first()->is_Register() ? reg.first()->as_Register() : noreg;
+  save_registers_except(masm, arg, thread);
+
+  __ call_VM_leaf(
+    CAST_FROM_FN_PTR(address, SharedRuntime::pin_object),
+    thread, reg.first()->as_Register());
+
+  // Restore saved registers
+  restore_registers_except(masm, arg, thread);
+
+  __ bind(is_null);
+  __ block_comment("} gen_pin_object");
+}
+
+// Unpin object
+static void gen_unpin_object(MacroAssembler* masm,
+                             Register thread, VMRegPair reg) {
+  __ block_comment("gen_unpin_object {");
+  Label is_null;
+
+  // temp register
+  __ push(rax);
+  Register tmp_reg = rax;
+  VMRegPair tmp(tmp_reg->as_VMReg());
+
+  simple_move32(masm, reg, tmp);
+
+  __ testptr(rax, rax);
+  __ jccb(Assembler::equal, is_null);
+
+  // Save registers that may be used by runtime call
+  Register arg = reg.first()->is_Register() ? reg.first()->as_Register() : noreg;
+  save_registers_except(masm, arg, thread);
+
+  __ call_VM_leaf(
+    CAST_FROM_FN_PTR(address, SharedRuntime::unpin_object),
+    thread, rax);
+
+  // Restore saved registers
+  restore_registers_except(masm, arg, thread);
+  __ bind(is_null);
+  __ pop(rax);
+  __ block_comment("} gen_unpin_object");
+}
+
 // Check GCLocker::needs_gc and enter the runtime if it's true.  This
 // keeps a new JNI critical region from starting until a GC has been
 // forced.  Save down any oops in registers and describe them in an
@@ -1836,7 +1928,7 @@
 
   __ get_thread(thread);
 
-  if (is_critical_native) {
+  if (is_critical_native && !Universe::heap()->supports_object_pinning()) {
     check_needs_gc_for_critical_native(masm, thread, stack_slots, total_c_args, total_in_args,
                                        oop_handle_offset, oop_maps, in_regs, in_sig_bt);
   }
@@ -1874,6 +1966,11 @@
   //
   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
 
+  // Inbound arguments that need to be pinned for critical natives
+  GrowableArray<int> pinned_args(total_in_args);
+  // Current stack slot for storing register based array argument
+  int pinned_slot = oop_handle_offset;
+
   // Mark location of rbp,
   // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, rbp->as_VMReg());
 
@@ -1885,7 +1982,28 @@
     switch (in_sig_bt[i]) {
       case T_ARRAY:
         if (is_critical_native) {
-          unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
+          VMRegPair in_arg = in_regs[i];
+          if (Universe::heap()->supports_object_pinning()) {
+            // gen_pin_object handles save and restore
+            // of any clobbered registers
+            gen_pin_object(masm, thread, in_arg);
+            pinned_args.append(i);
+
+            // rax has pinned array
+            VMRegPair result_reg(rax->as_VMReg());
+            if (!in_arg.first()->is_stack()) {
+              assert(pinned_slot <= stack_slots, "overflow");
+              simple_move32(masm, result_reg, VMRegImpl::stack2reg(pinned_slot));
+              pinned_slot += VMRegImpl::slots_per_word;
+            } else {
+              // Write back pinned value, it will be used to unpin this argument
+              __ movptr(Address(rbp, reg2offset_in(in_arg.first())), result_reg.first()->as_Register());
+            }
+            // We have the array in register, use it
+            in_arg = result_reg;
+          }
+
+          unpack_array_argument(masm, in_arg, in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
           c_arg++;
           break;
         }
@@ -2078,6 +2196,26 @@
   default       : ShouldNotReachHere();
   }
 
+  // unpin pinned arguments
+  pinned_slot = oop_handle_offset;
+  if (pinned_args.length() > 0) {
+    // save return value that may be overwritten otherwise.
+    save_native_result(masm, ret_type, stack_slots);
+    for (int index = 0; index < pinned_args.length(); index ++) {
+      int i = pinned_args.at(index);
+      assert(pinned_slot <= stack_slots, "overflow");
+      if (!in_regs[i].first()->is_stack()) {
+        int offset = pinned_slot * VMRegImpl::stack_slot_size;
+        __ movl(in_regs[i].first()->as_Register(), Address(rsp, offset));
+        pinned_slot += VMRegImpl::slots_per_word;
+      }
+      // gen_pin_object handles save and restore
+      // of any other clobbered registers
+      gen_unpin_object(masm, thread, in_regs[i]);
+    }
+    restore_native_result(masm, ret_type, stack_slots);
+  }
+
   // Switch thread to "native transition" state before reading the synchronization state.
   // This additional state is necessary because reading and testing the synchronization
   // state is not atomic w.r.t. GC, as this scenario demonstrates:
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -41,6 +41,7 @@
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
 #include "oops/compiledICHolder.hpp"
+#include "oops/klass.inline.hpp"
 #include "runtime/safepointMechanism.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/vframeArray.hpp"
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -1288,30 +1288,58 @@
     if (UseUnalignedLoadStores) {
       Label L_end;
       // Copy 64-bytes per iteration
-      __ BIND(L_loop);
       if (UseAVX > 2) {
+        Label L_loop_avx512, L_loop_avx2, L_32_byte_head, L_above_threshold, L_below_threshold;
+
+        __ BIND(L_copy_bytes);
+        __ cmpptr(qword_count, (-1 * AVX3Threshold / 8));
+        __ jccb(Assembler::less, L_above_threshold);
+        __ jmpb(L_below_threshold);
+
+        __ bind(L_loop_avx512);
         __ evmovdqul(xmm0, Address(end_from, qword_count, Address::times_8, -56), Assembler::AVX_512bit);
         __ evmovdqul(Address(end_to, qword_count, Address::times_8, -56), xmm0, Assembler::AVX_512bit);
-      } else if (UseAVX == 2) {
+        __ bind(L_above_threshold);
+        __ addptr(qword_count, 8);
+        __ jcc(Assembler::lessEqual, L_loop_avx512);
+        __ jmpb(L_32_byte_head);
+
+        __ bind(L_loop_avx2);
         __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56));
         __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0);
         __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24));
         __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1);
+        __ bind(L_below_threshold);
+        __ addptr(qword_count, 8);
+        __ jcc(Assembler::lessEqual, L_loop_avx2);
+
+        __ bind(L_32_byte_head);
+        __ subptr(qword_count, 4);  // sub(8) and add(4)
+        __ jccb(Assembler::greater, L_end);
       } else {
-        __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56));
-        __ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0);
-        __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40));
-        __ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1);
-        __ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24));
-        __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2);
-        __ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8));
-        __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3);
+        __ BIND(L_loop);
+        if (UseAVX == 2) {
+          __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56));
+          __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0);
+          __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24));
+          __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1);
+        } else {
+          __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56));
+          __ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0);
+          __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40));
+          __ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1);
+          __ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24));
+          __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2);
+          __ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8));
+          __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3);
+        }
+
+        __ BIND(L_copy_bytes);
+        __ addptr(qword_count, 8);
+        __ jcc(Assembler::lessEqual, L_loop);
+        __ subptr(qword_count, 4);  // sub(8) and add(4)
+        __ jccb(Assembler::greater, L_end);
       }
-      __ BIND(L_copy_bytes);
-      __ addptr(qword_count, 8);
-      __ jcc(Assembler::lessEqual, L_loop);
-      __ subptr(qword_count, 4);  // sub(8) and add(4)
-      __ jccb(Assembler::greater, L_end);
       // Copy trailing 32 bytes
       if (UseAVX >= 2) {
         __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24));
@@ -1368,31 +1396,59 @@
     if (UseUnalignedLoadStores) {
       Label L_end;
       // Copy 64-bytes per iteration
-      __ BIND(L_loop);
       if (UseAVX > 2) {
+        Label L_loop_avx512, L_loop_avx2, L_32_byte_head, L_above_threshold, L_below_threshold;
+
+        __ BIND(L_copy_bytes);
+        __ cmpptr(qword_count, (AVX3Threshold / 8));
+        __ jccb(Assembler::greater, L_above_threshold);
+        __ jmpb(L_below_threshold);
+
+        __ BIND(L_loop_avx512);
         __ evmovdqul(xmm0, Address(from, qword_count, Address::times_8, 0), Assembler::AVX_512bit);
         __ evmovdqul(Address(dest, qword_count, Address::times_8, 0), xmm0, Assembler::AVX_512bit);
-      } else if (UseAVX == 2) {
+        __ bind(L_above_threshold);
+        __ subptr(qword_count, 8);
+        __ jcc(Assembler::greaterEqual, L_loop_avx512);
+        __ jmpb(L_32_byte_head);
+
+        __ bind(L_loop_avx2);
         __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32));
         __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0);
-        __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8,  0));
-        __ vmovdqu(Address(dest, qword_count, Address::times_8,  0), xmm1);
+        __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0));
+        __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1);
+        __ bind(L_below_threshold);
+        __ subptr(qword_count, 8);
+        __ jcc(Assembler::greaterEqual, L_loop_avx2);
+
+        __ bind(L_32_byte_head);
+        __ addptr(qword_count, 4);  // add(8) and sub(4)
+        __ jccb(Assembler::less, L_end);
       } else {
-        __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48));
-        __ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0);
-        __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32));
-        __ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1);
-        __ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16));
-        __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2);
-        __ movdqu(xmm3, Address(from, qword_count, Address::times_8,  0));
-        __ movdqu(Address(dest, qword_count, Address::times_8,  0), xmm3);
+        __ BIND(L_loop);
+        if (UseAVX == 2) {
+          __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32));
+          __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0);
+          __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8,  0));
+          __ vmovdqu(Address(dest, qword_count, Address::times_8,  0), xmm1);
+        } else {
+          __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48));
+          __ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0);
+          __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32));
+          __ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1);
+          __ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16));
+          __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2);
+          __ movdqu(xmm3, Address(from, qword_count, Address::times_8,  0));
+          __ movdqu(Address(dest, qword_count, Address::times_8,  0), xmm3);
+        }
+
+        __ BIND(L_copy_bytes);
+        __ subptr(qword_count, 8);
+        __ jcc(Assembler::greaterEqual, L_loop);
+
+        __ addptr(qword_count, 4);  // add(8) and sub(4)
+        __ jccb(Assembler::less, L_end);
       }
-      __ BIND(L_copy_bytes);
-      __ subptr(qword_count, 8);
-      __ jcc(Assembler::greaterEqual, L_loop);
-
-      __ addptr(qword_count, 4);  // add(8) and sub(4)
-      __ jccb(Assembler::less, L_end);
       // Copy trailing 32 bytes
       if (UseAVX >= 2) {
         __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 0));
--- a/src/hotspot/cpu/x86/vm_version_x86.cpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/x86/vm_version_x86.cpp	Tue Oct 29 19:49:55 2019 -0700
@@ -381,6 +381,10 @@
     __ cmpl(rax, 0xE0);
     __ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported
 
+    __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
+    __ movl(rax, Address(rsi, 0));
+    __ cmpl(rax, 0x50654);              // If it is Skylake
+    __ jcc(Assembler::equal, legacy_setup);
     // If UseAVX is unitialized or is set by the user to include EVEX
     if (use_evex) {
       // EVEX setup: run in lowest evex mode
@@ -465,6 +469,11 @@
     __ cmpl(rax, 0xE0);
     __ jcc(Assembler::notEqual, legacy_save_restore);
 
+    __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
+    __ movl(rax, Address(rsi, 0));
+    __ cmpl(rax, 0x50654);              // If it is Skylake
+    __ jcc(Assembler::equal, legacy_save_restore);
+
     // If UseAVX is unitialized or is set by the user to include EVEX
     if (use_evex) {
       // EVEX check: run in lowest evex mode
@@ -660,6 +669,9 @@
   }
   if (FLAG_IS_DEFAULT(UseAVX)) {
     FLAG_SET_DEFAULT(UseAVX, use_avx_limit);
+    if (is_intel_family_core() && _model == CPU_MODEL_SKYLAKE && _stepping < 5) {
+      FLAG_SET_DEFAULT(UseAVX, 2);  //Set UseAVX=2 for Skylake
+    }
   } else if (UseAVX > use_avx_limit) {
     warning("UseAVX=%d is not supported on this CPU, setting it to UseAVX=%d", (int) UseAVX, use_avx_limit);
     FLAG_SET_DEFAULT(UseAVX, use_avx_limit);
@@ -1059,6 +1071,13 @@
   }
 #endif // COMPILER2 && ASSERT
 
+  if (!FLAG_IS_DEFAULT(AVX3Threshold)) {
+    if (!is_power_of_2(AVX3Threshold)) {
+      warning("AVX3Threshold must be a power of 2");
+      FLAG_SET_DEFAULT(AVX3Threshold, 4096);
+    }
+  }
+
 #ifdef _LP64
   if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
     UseMultiplyToLenIntrinsic = true;
--- a/src/hotspot/cpu/x86/vm_version_x86.hpp	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/x86/vm_version_x86.hpp	Tue Oct 29 19:49:55 2019 -0700
@@ -366,7 +366,7 @@
     CPU_MODEL_HASWELL_E3     = 0x3c,
     CPU_MODEL_HASWELL_E7     = 0x3f,
     CPU_MODEL_BROADWELL      = 0x3d,
-    CPU_MODEL_SKYLAKE        = CPU_MODEL_HASWELL_E3
+    CPU_MODEL_SKYLAKE        = 0x55
   };
 
   // cpuid information block.  All info derived from executing cpuid with
--- a/src/hotspot/cpu/x86/x86.ad	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/x86/x86.ad	Tue Oct 29 19:49:55 2019 -0700
@@ -1097,138 +1097,6 @@
 reg_class_dynamic vectorz_reg(vectorz_reg_evex, vectorz_reg_legacy, %{ VM_Version::supports_evex() %} );
 reg_class_dynamic vectorz_reg_vl(vectorz_reg_evex, vectorz_reg_legacy, %{ VM_Version::supports_evex() && VM_Version::supports_avx512vl() %} );
 
-reg_class xmm0_reg(XMM0, XMM0b, XMM0c, XMM0d);
-reg_class ymm0_reg(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h);
-reg_class zmm0_reg(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h, XMM0i, XMM0j, XMM0k, XMM0l, XMM0m, XMM0n, XMM0o, XMM0p);
-
-reg_class xmm1_reg(XMM1, XMM1b, XMM1c, XMM1d);
-reg_class ymm1_reg(XMM1, XMM1b, XMM1c, XMM1d, XMM1e, XMM1f, XMM1g, XMM1h);
-reg_class zmm1_reg(XMM1, XMM1b, XMM1c, XMM1d, XMM1e, XMM1f, XMM1g, XMM1h, XMM1i, XMM1j, XMM1k, XMM1l, XMM1m, XMM1n, XMM1o, XMM1p);
-
-reg_class xmm2_reg(XMM2, XMM2b, XMM2c, XMM2d);
-reg_class ymm2_reg(XMM2, XMM2b, XMM2c, XMM2d, XMM2e, XMM2f, XMM2g, XMM2h);
-reg_class zmm2_reg(XMM2, XMM2b, XMM2c, XMM2d, XMM2e, XMM2f, XMM2g, XMM2h, XMM2i, XMM2j, XMM2k, XMM2l, XMM2m, XMM2n, XMM2o, XMM2p);
-
-reg_class xmm3_reg(XMM3, XMM3b, XMM3c, XMM3d);
-reg_class ymm3_reg(XMM3, XMM3b, XMM3c, XMM3d, XMM3e, XMM3f, XMM3g, XMM3h);
-reg_class zmm3_reg(XMM3, XMM3b, XMM3c, XMM3d, XMM3e, XMM3f, XMM3g, XMM3h, XMM3i, XMM3j, XMM3k, XMM3l, XMM3m, XMM3n, XMM3o, XMM3p);
-
-reg_class xmm4_reg(XMM4, XMM4b, XMM4c, XMM4d);
-reg_class ymm4_reg(XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h);
-reg_class zmm4_reg(XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h, XMM4i, XMM4j, XMM4k, XMM4l, XMM4m, XMM4n, XMM4o, XMM4p);
-
-reg_class xmm5_reg(XMM5, XMM5b, XMM5c, XMM5d);
-reg_class ymm5_reg(XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h);
-reg_class zmm5_reg(XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h, XMM5i, XMM5j, XMM5k, XMM5l, XMM5m, XMM5n, XMM5o, XMM5p);
-
-reg_class xmm6_reg(XMM6, XMM6b, XMM6c, XMM6d);
-reg_class ymm6_reg(XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h);
-reg_class zmm6_reg(XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h, XMM6i, XMM6j, XMM6k, XMM6l, XMM6m, XMM6n, XMM6o, XMM6p);
-
-reg_class xmm7_reg(XMM7, XMM7b, XMM7c, XMM7d);
-reg_class ymm7_reg(XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h);
-reg_class zmm7_reg(XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h, XMM7i, XMM7j, XMM7k, XMM7l, XMM7m, XMM7n, XMM7o, XMM7p);
-
-#ifdef _LP64
-
-reg_class xmm8_reg(XMM8, XMM8b, XMM8c, XMM8d);
-reg_class ymm8_reg(XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h);
-reg_class zmm8_reg(XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h, XMM8i, XMM8j, XMM8k, XMM8l, XMM8m, XMM8n, XMM8o, XMM8p);
-
-reg_class xmm9_reg(XMM9, XMM9b, XMM9c, XMM9d);
-reg_class ymm9_reg(XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h);
-reg_class zmm9_reg(XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h, XMM9i, XMM9j, XMM9k, XMM9l, XMM9m, XMM9n, XMM9o, XMM9p);
-
-reg_class xmm10_reg(XMM10, XMM10b, XMM10c, XMM10d);
-reg_class ymm10_reg(XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h);
-reg_class zmm10_reg(XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h, XMM10i, XMM10j, XMM10k, XMM10l, XMM10m, XMM10n, XMM10o, XMM10p);
-
-reg_class xmm11_reg(XMM11, XMM11b, XMM11c, XMM11d);
-reg_class ymm11_reg(XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h);
-reg_class zmm11_reg(XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h, XMM11i, XMM11j, XMM11k, XMM11l, XMM11m, XMM11n, XMM11o, XMM11p);
-
-reg_class xmm12_reg(XMM12, XMM12b, XMM12c, XMM12d);
-reg_class ymm12_reg(XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h);
-reg_class zmm12_reg(XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h, XMM12i, XMM12j, XMM12k, XMM12l, XMM12m, XMM12n, XMM12o, XMM12p);
-
-reg_class xmm13_reg(XMM13, XMM13b, XMM13c, XMM13d);
-reg_class ymm13_reg(XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h);
-reg_class zmm13_reg(XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h, XMM13i, XMM13j, XMM13k, XMM13l, XMM13m, XMM13n, XMM13o, XMM13p);
-
-reg_class xmm14_reg(XMM14, XMM14b, XMM14c, XMM14d);
-reg_class ymm14_reg(XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h);
-reg_class zmm14_reg(XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h, XMM14i, XMM14j, XMM14k, XMM14l, XMM14m, XMM14n, XMM14o, XMM14p);
-
-reg_class xmm15_reg(XMM15, XMM15b, XMM15c, XMM15d);
-reg_class ymm15_reg(XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h);
-reg_class zmm15_reg(XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h, XMM15i, XMM15j, XMM15k, XMM15l, XMM15m, XMM15n, XMM15o, XMM15p);
-
-reg_class xmm16_reg(XMM16, XMM16b, XMM16c, XMM16d);
-reg_class ymm16_reg(XMM16, XMM16b, XMM16c, XMM16d, XMM16e, XMM16f, XMM16g, XMM16h);
-reg_class zmm16_reg(XMM16, XMM16b, XMM16c, XMM16d, XMM16e, XMM16f, XMM16g, XMM16h, XMM16i, XMM16j, XMM16k, XMM16l, XMM16m, XMM16n, XMM16o, XMM16p);
-
-reg_class xmm17_reg(XMM17, XMM17b, XMM17c, XMM17d);
-reg_class ymm17_reg(XMM17, XMM17b, XMM17c, XMM17d, XMM17e, XMM17f, XMM17g, XMM17h);
-reg_class zmm17_reg(XMM17, XMM17b, XMM17c, XMM17d, XMM17e, XMM17f, XMM17g, XMM17h, XMM17i, XMM17j, XMM17k, XMM17l, XMM17m, XMM17n, XMM17o, XMM17p);
-
-reg_class xmm18_reg(XMM18, XMM18b, XMM18c, XMM18d);
-reg_class ymm18_reg(XMM18, XMM18b, XMM18c, XMM18d, XMM18e, XMM18f, XMM18g, XMM18h);
-reg_class zmm18_reg(XMM18, XMM18b, XMM18c, XMM18d, XMM18e, XMM18f, XMM18g, XMM18h, XMM18i, XMM18j, XMM18k, XMM18l, XMM18m, XMM18n, XMM18o, XMM18p);
-
-reg_class xmm19_reg(XMM19, XMM19b, XMM19c, XMM19d);
-reg_class ymm19_reg(XMM19, XMM19b, XMM19c, XMM19d, XMM19e, XMM19f, XMM19g, XMM19h);
-reg_class zmm19_reg(XMM19, XMM19b, XMM19c, XMM19d, XMM19e, XMM19f, XMM19g, XMM19h, XMM19i, XMM19j, XMM19k, XMM19l, XMM19m, XMM19n, XMM19o, XMM19p);
-
-reg_class xmm20_reg(XMM20, XMM20b, XMM20c, XMM20d);
-reg_class ymm20_reg(XMM20, XMM20b, XMM20c, XMM20d, XMM20e, XMM20f, XMM20g, XMM20h);
-reg_class zmm20_reg(XMM20, XMM20b, XMM20c, XMM20d, XMM20e, XMM20f, XMM20g, XMM20h, XMM20i, XMM20j, XMM20k, XMM20l, XMM20m, XMM20n, XMM20o, XMM20p);
-
-reg_class xmm21_reg(XMM21, XMM21b, XMM21c, XMM21d);
-reg_class ymm21_reg(XMM21, XMM21b, XMM21c, XMM21d, XMM21e, XMM21f, XMM21g, XMM21h);
-reg_class zmm21_reg(XMM21, XMM21b, XMM21c, XMM21d, XMM21e, XMM21f, XMM21g, XMM21h, XMM21i, XMM21j, XMM21k, XMM21l, XMM21m, XMM21n, XMM21o, XMM21p);
-
-reg_class xmm22_reg(XMM22, XMM22b, XMM22c, XMM22d);
-reg_class ymm22_reg(XMM22, XMM22b, XMM22c, XMM22d, XMM22e, XMM22f, XMM22g, XMM22h);
-reg_class zmm22_reg(XMM22, XMM22b, XMM22c, XMM22d, XMM22e, XMM22f, XMM22g, XMM22h, XMM22i, XMM22j, XMM22k, XMM22l, XMM22m, XMM22n, XMM22o, XMM22p);
-
-reg_class xmm23_reg(XMM23, XMM23b, XMM23c, XMM23d);
-reg_class ymm23_reg(XMM23, XMM23b, XMM23c, XMM23d, XMM23e, XMM23f, XMM23g, XMM23h);
-reg_class zmm23_reg(XMM23, XMM23b, XMM23c, XMM23d, XMM23e, XMM23f, XMM23g, XMM23h, XMM23i, XMM23j, XMM23k, XMM23l, XMM23m, XMM23n, XMM23o, XMM23p);
-
-reg_class xmm24_reg(XMM24, XMM24b, XMM24c, XMM24d);
-reg_class ymm24_reg(XMM24, XMM24b, XMM24c, XMM24d, XMM24e, XMM24f, XMM24g, XMM24h);
-reg_class zmm24_reg(XMM24, XMM24b, XMM24c, XMM24d, XMM24e, XMM24f, XMM24g, XMM24h, XMM24i, XMM24j, XMM24k, XMM24l, XMM24m, XMM24n, XMM24o, XMM24p);
-
-reg_class xmm25_reg(XMM25, XMM25b, XMM25c, XMM25d);
-reg_class ymm25_reg(XMM25, XMM25b, XMM25c, XMM25d, XMM25e, XMM25f, XMM25g, XMM25h);
-reg_class zmm25_reg(XMM25, XMM25b, XMM25c, XMM25d, XMM25e, XMM25f, XMM25g, XMM25h, XMM25i, XMM25j, XMM25k, XMM25l, XMM25m, XMM25n, XMM25o, XMM25p);
-
-reg_class xmm26_reg(XMM26, XMM26b, XMM26c, XMM26d);
-reg_class ymm26_reg(XMM26, XMM26b, XMM26c, XMM26d, XMM26e, XMM26f, XMM26g, XMM26h);
-reg_class zmm26_reg(XMM26, XMM26b, XMM26c, XMM26d, XMM26e, XMM26f, XMM26g, XMM26h, XMM26i, XMM26j, XMM26k, XMM26l, XMM26m, XMM26n, XMM26o, XMM26p);
-
-reg_class xmm27_reg(XMM27, XMM27b, XMM27c, XMM27d);
-reg_class ymm27_reg(XMM27, XMM27b, XMM27c, XMM27d, XMM27e, XMM27f, XMM27g, XMM27h);
-reg_class zmm27_reg(XMM27, XMM27b, XMM27c, XMM27d, XMM27e, XMM27f, XMM27g, XMM27h, XMM27i, XMM27j, XMM27k, XMM27l, XMM27m, XMM27n, XMM27o, XMM27p);
-
-reg_class xmm28_reg(XMM28, XMM28b, XMM28c, XMM28d);
-reg_class ymm28_reg(XMM28, XMM28b, XMM28c, XMM28d, XMM28e, XMM28f, XMM28g, XMM28h);
-reg_class zmm28_reg(XMM28, XMM28b, XMM28c, XMM28d, XMM28e, XMM28f, XMM28g, XMM28h, XMM28i, XMM28j, XMM28k, XMM28l, XMM28m, XMM28n, XMM28o, XMM28p);
-
-reg_class xmm29_reg(XMM29, XMM29b, XMM29c, XMM29d);
-reg_class ymm29_reg(XMM29, XMM29b, XMM29c, XMM29d, XMM29e, XMM29f, XMM29g, XMM29h);
-reg_class zmm29_reg(XMM29, XMM29b, XMM29c, XMM29d, XMM29e, XMM29f, XMM29g, XMM29h, XMM29i, XMM29j, XMM29k, XMM29l, XMM29m, XMM29n, XMM29o, XMM29p);
-
-reg_class xmm30_reg(XMM30, XMM30b, XMM30c, XMM30d);
-reg_class ymm30_reg(XMM30, XMM30b, XMM30c, XMM30d, XMM30e, XMM30f, XMM30g, XMM30h);
-reg_class zmm30_reg(XMM30, XMM30b, XMM30c, XMM30d, XMM30e, XMM30f, XMM30g, XMM30h, XMM30i, XMM30j, XMM30k, XMM30l, XMM30m, XMM30n, XMM30o, XMM30p);
-
-reg_class xmm31_reg(XMM31, XMM31b, XMM31c, XMM31d);
-reg_class ymm31_reg(XMM31, XMM31b, XMM31c, XMM31d, XMM31e, XMM31f, XMM31g, XMM31h);
-reg_class zmm31_reg(XMM31, XMM31b, XMM31c, XMM31d, XMM31e, XMM31f, XMM31g, XMM31h, XMM31i, XMM31j, XMM31k, XMM31l, XMM31m, XMM31n, XMM31o, XMM31p);
-
-#endif
-
 %}
 
 
@@ -1412,7 +1280,7 @@
     case Op_AbsVS:
     case Op_AbsVI:
     case Op_AddReductionVI:
-      if (UseSSE < 3) // requires at least SSE3
+      if (UseSSE < 3 || !VM_Version::supports_ssse3()) // requires at least SSSE3
         ret_value = false;
       break;
     case Op_MulReductionVI:
@@ -1800,8 +1668,8 @@
   return (UseAVX > 2) ? 6 : 4;
 }
 
-static int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load,
-                            int stack_offset, int reg, uint ireg, outputStream* st) {
+int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load,
+                     int stack_offset, int reg, uint ireg, outputStream* st) {
   // In 64-bit VM size calculation is very complex. Emitting instructions
   // into scratch buffer is used to get size in 64-bit VM.
   LP64_ONLY( assert(!do_size, "this method calculates size only for 32-bit VM"); )
@@ -3861,7 +3729,7 @@
 %}
 
 instruct Repl2F_zero(vecD dst, immF0 zero) %{
-  predicate(n->as_Vector()->length() == 2 && UseAVX < 3);
+  predicate(n->as_Vector()->length() == 2);
   match(Set dst (ReplicateF zero));
   format %{ "xorps   $dst,$dst\t! replicate2F zero" %}
   ins_encode %{
@@ -3871,7 +3739,7 @@
 %}
 
 instruct Repl4F_zero(vecX dst, immF0 zero) %{
-  predicate(n->as_Vector()->length() == 4 && UseAVX < 3);
+  predicate(n->as_Vector()->length() == 4);
   match(Set dst (ReplicateF zero));
   format %{ "xorps   $dst,$dst\t! replicate4F zero" %}
   ins_encode %{
@@ -3881,7 +3749,7 @@
 %}
 
 instruct Repl8F_zero(vecY dst, immF0 zero) %{
-  predicate(n->as_Vector()->length() == 8 && UseAVX < 3);
+  predicate(n->as_Vector()->length() == 8 && UseAVX > 0);
   match(Set dst (ReplicateF zero));
   format %{ "vxorps  $dst,$dst,$dst\t! replicate8F zero" %}
   ins_encode %{
@@ -3955,7 +3823,7 @@
 
 // Replicate double (8 byte) scalar zero to be vector
 instruct Repl2D_zero(vecX dst, immD0 zero) %{
-  predicate(n->as_Vector()->length() == 2 && UseAVX < 3);
+  predicate(n->as_Vector()->length() == 2);
   match(Set dst (ReplicateD zero));
   format %{ "xorpd   $dst,$dst\t! replicate2D zero" %}
   ins_encode %{
@@ -3965,7 +3833,7 @@
 %}
 
 instruct Repl4D_zero(vecY dst, immD0 zero) %{
-  predicate(n->as_Vector()->length() == 4 && UseAVX < 3);
+  predicate(n->as_Vector()->length() == 4 && UseAVX > 0);
   match(Set dst (ReplicateD zero));
   format %{ "vxorpd  $dst,$dst,$dst,vect256\t! replicate4D zero" %}
   ins_encode %{
@@ -4890,42 +4758,6 @@
   ins_pipe( pipe_slow );
 %}
 
-instruct Repl2F_zero_evex(vecD dst, immF0 zero) %{
-  predicate(n->as_Vector()->length() == 2 && UseAVX > 2);
-  match(Set dst (ReplicateF zero));
-  format %{ "vpxor  $dst k0,$dst,$dst\t! replicate2F zero" %}
-  ins_encode %{
-    // Use vpxor in place of vxorps since EVEX has a constriant on dq for vxorps: this is a 512-bit operation
-    int vector_len = 2;
-    __ vpxor($dst$$XMMRegister,$dst$$XMMRegister, $dst$$XMMRegister, vector_len);
-  %}
-  ins_pipe( fpu_reg_reg );
-%}
-
-instruct Repl4F_zero_evex(vecX dst, immF0 zero) %{
-  predicate(n->as_Vector()->length() == 4 && UseAVX > 2);
-  match(Set dst (ReplicateF zero));
-  format %{ "vpxor  $dst k0,$dst,$dst\t! replicate4F zero" %}
-  ins_encode %{
-    // Use vpxor in place of vxorps since EVEX has a constriant on dq for vxorps: this is a 512-bit operation
-    int vector_len = 2;
-    __ vpxor($dst$$XMMRegister,$dst$$XMMRegister, $dst$$XMMRegister, vector_len);
-  %}
-  ins_pipe( fpu_reg_reg );
-%}
-
-instruct Repl8F_zero_evex(vecY dst, immF0 zero) %{
-  predicate(n->as_Vector()->length() == 8 && UseAVX > 2);
-  match(Set dst (ReplicateF zero));
-  format %{ "vpxor  $dst k0,$dst,$dst\t! replicate8F zero" %}
-  ins_encode %{
-    // Use vpxor in place of vxorps since EVEX has a constriant on dq for vxorps: this is a 512-bit operation
-    int vector_len = 2;
-    __ vpxor($dst$$XMMRegister,$dst$$XMMRegister, $dst$$XMMRegister, vector_len);
-  %}
-  ins_pipe( fpu_reg_reg );
-%}
-
 instruct Repl16F_zero_evex(vecZ dst, immF0 zero) %{
   predicate(n->as_Vector()->length() == 16 && UseAVX > 2);
   match(Set dst (ReplicateF zero));
@@ -4982,30 +4814,6 @@
   ins_pipe( pipe_slow );
 %}
 
-instruct Repl2D_zero_evex(vecX dst, immD0 zero) %{
-  predicate(n->as_Vector()->length() == 2 && UseAVX > 2);
-  match(Set dst (ReplicateD zero));
-  format %{ "vpxor  $dst k0,$dst,$dst\t! replicate2D zero" %}
-  ins_encode %{
-    // Use vpxor in place of vxorpd since EVEX has a constriant on dq for vxorpd: this is a 512-bit operation
-    int vector_len = 2;
-    __ vpxor($dst$$XMMRegister,$dst$$XMMRegister, $dst$$XMMRegister, vector_len);
-  %}
-  ins_pipe( fpu_reg_reg );
-%}
-
-instruct Repl4D_zero_evex(vecY dst, immD0 zero) %{
-  predicate(n->as_Vector()->length() == 4 && UseAVX > 2);
-  match(Set dst (ReplicateD zero));
-  format %{ "vpxor  $dst k0,$dst,$dst\t! replicate4D zero" %}
-  ins_encode %{
-    // Use vpxor in place of vxorpd since EVEX has a constriant on dq for vxorpd: this is a 512-bit operation
-    int vector_len = 2;
-    __ vpxor($dst$$XMMRegister,$dst$$XMMRegister, $dst$$XMMRegister, vector_len);
-  %}
-  ins_pipe( fpu_reg_reg );
-%}
-
 instruct Repl8D_zero_evex(vecZ dst, immD0 zero) %{
   predicate(n->as_Vector()->length() == 8 && UseAVX > 2);
   match(Set dst (ReplicateD zero));
--- a/src/hotspot/cpu/x86/x86_64.ad	Tue Oct 08 17:58:39 2019 -0700
+++ b/src/hotspot/cpu/x86/x86_64.ad	Tue Oct 29 19:49:55 2019 -0700
@@ -1058,8 +1058,8 @@
 static int vec_mov_helper(CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
                           int src_hi, int dst_hi, uint ireg, outputStream* st);
 
-static int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load,
-                            int stack_offset, int reg, uint ireg, outputStream* st);
+int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load,
+                     int stack_offset, int reg, uint ireg, outputStream* st);
 
 static void vec_stack_to_stack_helper(CodeBuffer *cbuf, int src_offset,
                                       int dst_offset, uint ireg, outputStream* st) {
@@ -4260,200 +4260,6 @@
   %}
 %}
 
-// Operands for bound floating pointer register arguments
-operand rxmm0() %{
-  constraint(ALLOC_IN_RC(xmm0_reg));
-  match(VecX);
-  format%{%}
-  interface(REG_INTER);
-%}
-operand rxmm1() %{
-  constraint(ALLOC_IN_RC(xmm1_reg));
-  match(VecX);
-  format%{%}
-  interface(REG_INTER);
-%}
-operand rxmm2() %{
-  constraint(ALLOC_IN_RC(xmm2_reg));
-  match(VecX);
-  format%{%}
-  interface(REG_INTER);
-%}
-operand rxmm3() %{
-  constraint(ALLOC_IN_RC(xmm3_reg));
-  match(VecX);
-  format%{%}
-  interface(REG_INTER);
-%}
-operand rxmm4() %{
-  constraint(ALLOC_IN_RC(xmm4_reg));
-  match(VecX);
-  format%{%}
-  interface(REG_INTER);
-%}
-operand rxmm5() %{
-  constraint(ALLOC_IN_RC(xmm5_reg));
-  match(VecX);
-  format%{%}
-  interface(REG_INTER);
-%}
-operand rxmm6() %{
-  constraint(ALLOC_IN_RC(xmm6_reg));
-  match(VecX);
-  format%{%}
-  interface(REG_INTER);
-%}
-operand rxmm7() %{
-  constraint(ALLOC_IN_RC(xmm7_reg));
-  match(VecX);
-  format%{%}
-  interface(REG_INTER);
-%}
-operand rxmm8() %{
-  constraint(ALLOC_IN_RC(xmm8_reg));
-  match(VecX);
-  format%{%}
-  interface(REG_INTER);
-%}
-operand rxmm9() %{
-  constraint(ALLOC_IN_RC(xmm9_reg));
-  match(VecX);
-  format%{%}
-  interface(REG_INTER);
-%}
-operand rxmm10() %{
-  constraint(ALLOC_IN_RC(xmm10_reg));
-  match(VecX);
-  format%{%}
-  interface(REG_INTER);
-%}
-operand rxmm11() %{
-  constraint(ALLOC_IN_RC(xmm11_reg));
-  match(VecX);
-  format%{%}
-  interface(REG_INTER);
-%}
-operand rxmm12() %{
-  constraint(ALLOC_IN_RC(xmm12_reg));
-  match(VecX);
-  format%{%}
-  interface(REG_INTER);
-%}
-operand rxmm13() %{
-  constraint(ALLOC_IN_RC(xmm13_reg));
-  match(VecX);
-  format%{%}
-  interface(REG_INTER);
-%}
-operand rxmm14() %{
-  constraint(ALLOC_IN_RC(xmm14_reg));
-  match(VecX);
-  format%{%}
-  interface(REG_INTER);
-%}
-operand rxmm15() %{
-  constraint(ALLOC_IN_RC(xmm15_reg));
-  match(VecX);
-  format%{%}
-  interface(REG_INTER);
-%}
-operand rxmm16() %{
-  constraint(ALLOC_IN_RC(xmm16_reg));
-  match(VecX);
-  format%{%}
-  interface(REG_INTER);
-%}
-operand rxmm17() %{
-  constraint(ALLOC_IN_RC(xmm17_reg));
-  match(VecX);
-  format%{%}
-  interface(REG_INTER);
-%}
-operand rxmm18() %{
-  constraint(ALLOC_IN_RC(xmm18_reg));
-  match(VecX);
-  format%{%}
-  interface(REG_INTER);
-%}
-operand rxmm19() %{
-  constraint(ALLOC_IN_RC(xmm19_reg));
-  match(VecX);
-  format%{%}
-  interface(REG_INTER);
-%}
-operand rxmm20() %{
-  constraint(ALLOC_IN_RC(xmm20_reg));
-  match(VecX);
-  format%{%}
-  interface(REG_INTER);
-%}
-operand rxmm21() %{
-  constraint(ALLOC_IN_RC(xmm21_reg));
-  match(VecX);
-  format%{%}
-  interface(REG_INTER);
-%}
-operand rxmm22() %{
-  constraint(ALLOC_IN_RC(xmm22_reg));
-  match(VecX);
-  format%{%}
-  interface(REG_INTER);
-%}
-operand rxmm23() %{
-  constraint(ALLOC_IN_RC(xmm23_reg));
-  match(VecX);
-  format%{%}
-  interface(REG_INTER);
-%}
-operand rxmm24() %{
-  constraint(ALLOC_IN_RC(xmm24_reg));
-  match(VecX);
-  format%{%}
-  interface(REG_INTER);
-%}
-operand rxmm25() %{
-  constraint(ALLOC_IN_RC(xmm25_reg));
-  match(VecX);
-  format%{%}
-  interface(REG_INTER);
-%}
-operand rxmm26() %{
-  constraint(ALLOC_IN_RC(xmm26_reg));
-  match(VecX);
-  format%{%}
-  interface(REG_INTER);
-%}
-operand rxmm27() %{
-  constraint(ALLOC_IN_RC(xmm27_reg));
-  match(VecX);
-  format%{%}
-  interface(REG_INTER);
-%}
-operand rxmm28() %{
-  constraint(ALLOC_IN_RC(xmm28_reg));
-  match(VecX);
-  format%{%}
-  interface(REG_INTER);
-%}
-operand rxmm29() %{
-  constraint(ALLOC_IN_RC(xmm29_reg));
-  match(VecX);
-  format%{%}
-  interface(REG_INTER);
-%}
-operand rxmm30() %{
-  constraint(ALLOC_IN_RC(xmm30_reg));
-  match(VecX);
-  format%{%}
-  interface(REG_INTER);
-%}
-operand rxmm31() %{
-  constraint(ALLOC_IN_RC(xmm31_reg));
-  match(VecX);
-  format%{%}
-  interface(REG_INTER);
-%}
-
 //----------OPERAND CLASSES----------------------------------------------------
 // Operand Classes are groups of operands that are used as to simplify
 // instruction definitions by not requiring the AD writer to specify separate
@@ -5346,6 +5152,7 @@
 instruct loadP(rRegP dst, memory mem)
 %{
   match(Set dst (LoadP mem));
+  predicate(n->as_Load()->barrier_data() == 0);
 
   ins_cost(125); // XXX
   format %{ "movq    $dst, $mem\t# ptr" %}
@@ -7794,6 +7601,7 @@
                            rax_RegP oldval, rRegP newval,
                            rFlagsReg cr)
 %{
+  predicate(n->as_LoadStore()->barrier_data() == 0);
   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
 
   format %{ "cmpxchgq $heap_top_ptr, $newval\t# (ptr) "
@@ -7845,7 +7653,7 @@
                          rax_RegP oldval, rRegP newval,
                          rFlagsReg cr)
 %{
-  predicate(VM_Version::supports_cx8());
+  predicate(VM_Version::supports_cx8() && n->as_LoadStore()->barrier_data() == 0);
   match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
   match(Set res (WeakCompareAndSwapP mem_ptr (Binary oldval newval)));
   effect(KILL cr, KILL oldval);
@@ -8087,7 +7895,7 @@
                          rax_RegP oldval, rRegP newval,
                          rFlagsReg cr)
 %{
-  predicate(VM_Version::supports_cx8());
+  predicate(VM_Version::supports_cx8() && n->as_LoadStore()->barrier_data() == 0);
   match(Set oldval (CompareAndExchangeP mem_ptr (Binary oldval newval)));
   effect(KILL cr);
 
@@ -8232,6 +8040,7 @@
 
 instruct xchgP( memory mem, rRegP newval) %{
   match(Set newval (GetAndSetP mem newval));
+  predicate(n->as_LoadStore()->barrier_data() == 0);
   format %{ "XCHGQ  $newval,[$mem]" %}
   ins_encode %{
     __ xchgq($newval$$Register, $mem$$Address);
@@ -11974,6 +11783,7 @@
 instruct compP_rReg_mem(rFlagsRegU cr, rRegP op1, memory op2)
 %{
   match(Set cr (CmpP op1 (LoadP op2)));
+  predicate(n->in(2)->as_Load()->barrier_data() == 0);
 
   ins_cost(500); // XXX
   format %{ "cmpq    $op1, $op2\t# ptr" %}
@@ -11999,7 +11809,8 @@
 // and raw pointers have no anti-dependencies.
 instruct compP_mem_rReg(rFlagsRegU cr, rRegP op1, memory op2)
 %{
-  predicate(n->in(2)->in(2)->bottom_type()->reloc() == relocInfo::none);