changeset 58498:3e6ad3a4a05d records

manual merge with default
author vromero
date Tue, 22 Oct 2019 20:49:40 -0400
parents a06475cac3d9 62e16c1e0f66
children 239cc1135ec1
files make/hotspot/symbols/symbols-unix src/hotspot/share/classfile/classFileParser.cpp src/hotspot/share/classfile/javaClasses.cpp src/hotspot/share/classfile/javaClasses.hpp src/hotspot/share/classfile/systemDictionary.hpp src/hotspot/share/classfile/vmSymbols.hpp src/hotspot/share/include/jvm.h src/hotspot/share/oops/instanceKlass.cpp src/hotspot/share/prims/jvm.cpp src/hotspot/share/prims/jvmtiRedefineClasses.cpp src/java.base/share/classes/module-info.java src/java.desktop/unix/native/common/awt/awt_Font.h src/jdk.compiler/share/classes/com/sun/tools/javac/code/Flags.java src/jdk.compiler/share/classes/com/sun/tools/javac/code/Preview.java src/jdk.compiler/share/classes/com/sun/tools/javac/code/Symbol.java src/jdk.compiler/share/classes/com/sun/tools/javac/code/Symtab.java src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Annotate.java src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Attr.java src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Check.java src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Lower.java src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Resolve.java src/jdk.compiler/share/classes/com/sun/tools/javac/comp/TypeEnter.java src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/ClassReader.java src/jdk.compiler/share/classes/com/sun/tools/javac/parser/JavacParser.java src/jdk.compiler/share/classes/com/sun/tools/javac/resources/compiler.properties src/jdk.compiler/share/classes/com/sun/tools/javac/tree/JCTree.java src/jdk.compiler/share/classes/com/sun/tools/javac/tree/TreeMaker.java src/jdk.compiler/share/classes/com/sun/tools/javac/util/Names.java src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/toolkit/util/Utils.java test/jdk/java/math/BigInteger/DivisionOverflow.java test/jdk/java/math/BigInteger/StringConstructorOverflow.java test/jdk/java/math/BigInteger/SymmetricRangeTests.java test/langtools/tools/javac/diags/examples.not-yet.txt
diffstat 405 files changed, 12012 insertions(+), 3547 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Tue Oct 22 16:42:23 2019 -0400
+++ b/.hgtags	Tue Oct 22 20:49:40 2019 -0400
@@ -591,3 +591,4 @@
 d29f0181ba424a95d881aba5eabf2e393abcc70f jdk-14+16
 5c83830390baafb76a1fbe33443c57620bd45fb9 jdk-14+17
 e84d8379815ba0d3e50fb096d28c25894cb50b8c jdk-14+18
+9b67dd88a9313e982ec5f710a7747161bc8f0c23 jdk-14+19
--- a/make/Bundles.gmk	Tue Oct 22 16:42:23 2019 -0400
+++ b/make/Bundles.gmk	Tue Oct 22 20:49:40 2019 -0400
@@ -50,6 +50,7 @@
 #     files or directories may contain spaces.
 # BASE_DIRS : Base directories for the root dir in the bundle.
 # SUBDIR : Optional name of root dir in bundle.
+# OUTPUTDIR : Optionally override output dir
 SetupBundleFile = $(NamedParamsMacroTemplate)
 define SetupBundleFileBody
 
@@ -70,8 +71,11 @@
 
   $$(call SetIfEmpty, $1_UNZIP_DEBUGINFO, false)
 
-  $(BUNDLES_OUTPUTDIR)/$$($1_BUNDLE_NAME): $$($1_FILES)
+  $$(call SetIfEmpty, $1_OUTPUTDIR, $$(BUNDLES_OUTPUTDIR))
+
+  $$($1_OUTPUTDIR)/$$($1_BUNDLE_NAME): $$($1_FILES)
 	$$(call MakeTargetDir)
+	$$(call LogWarn, Creating $$($1_BUNDLE_NAME))
         # If any of the files contain a space in the file name, FindFiles
         # will have replaced it with ?. Tar does not accept that so need to
         # switch it back.
@@ -137,7 +141,7 @@
           endif
         endif
 
-  $1 += $(BUNDLES_OUTPUTDIR)/$$($1_BUNDLE_NAME)
+  $1 += $$($1_OUTPUTDIR)/$$($1_BUNDLE_NAME)
 
 endef
 
@@ -165,7 +169,7 @@
 
 ################################################################################
 
-ifneq ($(filter product-bundles legacy-bundles, $(MAKECMDGOALS)), )
+ifneq ($(filter product-bundles% legacy-bundles, $(MAKECMDGOALS)), )
 
   SYMBOLS_EXCLUDE_PATTERN := %.debuginfo %.diz %.pdb %.map
 
--- a/make/CompileInterimLangtools.gmk	Tue Oct 22 16:42:23 2019 -0400
+++ b/make/CompileInterimLangtools.gmk	Tue Oct 22 20:49:40 2019 -0400
@@ -49,6 +49,13 @@
 TARGETS += $(patsubst %, $(BUILDTOOLS_OUTPUTDIR)/gensrc/%/module-info.java, \
     $(INTERIM_LANGTOOLS_MODULES))
 
+$(eval $(call SetupCopyFiles, COPY_PREVIEW_FEATURES, \
+    FILES := $(TOPDIR)/src/java.base/share/classes/jdk/internal/PreviewFeature.java, \
+    DEST := $(BUILDTOOLS_OUTPUTDIR)/gensrc/java.base.interim/jdk/internal/, \
+))
+
+TARGETS += $(COPY_PREVIEW_FEATURES)
+
 ################################################################################
 # Setup the rules to build interim langtools, which is compiled by the boot
 # javac and can be run on the boot jdk. This will be used to compile the rest of
@@ -72,13 +79,15 @@
       BIN := $(BUILDTOOLS_OUTPUTDIR)/interim_langtools_modules/$1.interim, \
       ADD_JAVAC_FLAGS := --module-path $(BUILDTOOLS_OUTPUTDIR)/interim_langtools_modules \
           $$(INTERIM_LANGTOOLS_ADD_EXPORTS) \
+          --patch-module java.base=$(BUILDTOOLS_OUTPUTDIR)/gensrc/java.base.interim \
+          --add-exports java.base/jdk.internal=jdk.compiler.interim \
           -Xlint:-module, \
   ))
 
   $1_DEPS_INTERIM := $$(addsuffix .interim, $$(filter \
       $$(INTERIM_LANGTOOLS_BASE_MODULES), $$(call FindTransitiveDepsForModule, $1)))
 
-  $$(BUILD_$1.interim): $$(foreach d, $$($1_DEPS_INTERIM), $$(BUILD_$$d))
+  $$(BUILD_$1.interim): $$(foreach d, $$($1_DEPS_INTERIM), $$(BUILD_$$d)) $(COPY_PREVIEW_FEATURES)
 
   TARGETS += $$(BUILD_$1.interim)
 endef
--- a/make/Docs.gmk	Tue Oct 22 16:42:23 2019 -0400
+++ b/make/Docs.gmk	Tue Oct 22 20:49:40 2019 -0400
@@ -95,6 +95,7 @@
     -tag see \
     -taglet build.tools.taglet.ExtLink \
     -taglet build.tools.taglet.Incubating \
+    -taglet build.tools.taglet.Preview \
     -tagletpath $(BUILDTOOLS_OUTPUTDIR)/jdk_tools_classes \
     $(CUSTOM_JAVADOC_TAGS) \
     #
@@ -191,26 +192,26 @@
 ################################################################################
 # Functions
 
-# Helper function for creating a png file from a dot file generated by the
+# Helper function for creating a svg file from a dot file generated by the
 # GenGraphs tool.
 # param 1: SetupJavadocGeneration namespace ($1)
 # param 2: module name
 #
-define setup_gengraph_dot_to_png
+define setup_gengraph_dot_to_svg
   $1_$2_DOT_SRC :=  $$($1_GENGRAPHS_DIR)/$2.dot
-  $1_$2_PNG_TARGET := $$($1_TARGET_DIR)/$2/module-graph.png
+  $1_$2_SVG_TARGET := $$($1_TARGET_DIR)/$2/module-graph.svg
 
-    # For each module needing a graph, create a png file from the dot file
+    # For each module needing a graph, create a svg file from the dot file
     # generated by the GenGraphs tool and store it in the target dir.
-    $$(eval $$(call SetupExecute, gengraphs_png_$1_$2, \
+    $$(eval $$(call SetupExecute, gengraphs_svg_$1_$2, \
         INFO := Running dot for module graphs for $2, \
         DEPS := $$(gengraphs_$1_TARGET), \
-        OUTPUT_FILE := $$($1_$2_PNG_TARGET), \
+        OUTPUT_FILE := $$($1_$2_SVG_TARGET), \
         SUPPORT_DIR := $$($1_GENGRAPHS_DIR), \
-        COMMAND := $$(DOT) -Tpng -o $$($1_$2_PNG_TARGET) $$($1_$2_DOT_SRC), \
+        COMMAND := $$(DOT) -Tsvg -o $$($1_$2_SVG_TARGET) $$($1_$2_DOT_SRC), \
     ))
 
-  $1_MODULEGRAPH_TARGETS += $$($1_$2_PNG_TARGET)
+  $1_MODULEGRAPH_TARGETS += $$($1_$2_SVG_TARGET)
 endef
 
 # Helper function to create the overview.html file to use with the -overview
@@ -281,7 +282,7 @@
 
   ifeq ($$(ENABLE_FULL_DOCS), true)
     # Tell the ModuleGraph taglet to generate html links to soon-to-be-created
-    # png files with module graphs.
+    # svg files with module graphs.
     $1_JAVA_ARGS += -DenableModuleGraph=true
   endif
 
@@ -361,8 +362,8 @@
   $1_JAVADOC_TARGETS := $$(javadoc_$1_TARGET)
 
   ifeq ($$(ENABLE_FULL_DOCS), true)
-    # We have asked ModuleGraph to generate links to png files. Now we must
-    # produce the png files.
+    # We have asked ModuleGraph to generate links to svg files. Now we must
+    # produce the svg files.
 
     # Locate which modules has the @moduleGraph tag in their module-info.java
     $1_MODULES_NEEDING_GRAPH := $$(strip $$(foreach m, $$($1_ALL_MODULES), \
@@ -387,11 +388,11 @@
             --dot-attributes $$(GENGRAPHS_PROPS), \
     ))
 
-    # For each module needing a graph, create a png file from the dot file
+    # For each module needing a graph, create a svg file from the dot file
     # generated by the GenGraphs tool and store it in the target dir.
     # They will depend on gengraphs_$1_TARGET, and will be added to $1.
     $$(foreach m, $$($1_MODULES_NEEDING_GRAPH), \
-      $$(eval $$(call setup_gengraph_dot_to_png,$1,$$m)) \
+      $$(eval $$(call setup_gengraph_dot_to_svg,$1,$$m)) \
     )
   endif
 endef
--- a/make/autoconf/basics.m4	Tue Oct 22 16:42:23 2019 -0400
+++ b/make/autoconf/basics.m4	Tue Oct 22 20:49:40 2019 -0400
@@ -489,31 +489,43 @@
       # for unknown variables in the end.
       CONFIGURE_OVERRIDDEN_VARIABLES="$try_remove_var"
 
+      tool_override=[$]$1
+      AC_MSG_NOTICE([User supplied override $1="$tool_override"])
+
       # Check if we try to supply an empty value
-      if test "x[$]$1" = x; then
-        AC_MSG_NOTICE([Setting user supplied tool $1= (no value)])
+      if test "x$tool_override" = x; then
         AC_MSG_CHECKING([for $1])
         AC_MSG_RESULT([disabled])
       else
+        # Split up override in command part and argument part
+        tool_and_args=($tool_override)
+        [ tool_command=${tool_and_args[0]} ]
+        [ unset 'tool_and_args[0]' ]
+        [ tool_args=${tool_and_args[@]} ]
+
         # Check if the provided tool contains a complete path.
-        tool_specified="[$]$1"
-        tool_basename="${tool_specified##*/}"
-        if test "x$tool_basename" = "x$tool_specified"; then
+        tool_basename="${tool_command##*/}"
+        if test "x$tool_basename" = "x$tool_command"; then
           # A command without a complete path is provided, search $PATH.
-          AC_MSG_NOTICE([Will search for user supplied tool $1=$tool_basename])
+          AC_MSG_NOTICE([Will search for user supplied tool "$tool_basename"])
           AC_PATH_PROG($1, $tool_basename)
           if test "x[$]$1" = x; then
-            AC_MSG_ERROR([User supplied tool $tool_basename could not be found])
+            AC_MSG_ERROR([User supplied tool $1="$tool_basename" could not be found])
           fi
         else
           # Otherwise we believe it is a complete path. Use it as it is.
-          AC_MSG_NOTICE([Will use user supplied tool $1=$tool_specified])
-          AC_MSG_CHECKING([for $1])
-          if test ! -x "$tool_specified"; then
+          AC_MSG_NOTICE([Will use user supplied tool "$tool_command"])
+          AC_MSG_CHECKING([for $tool_command])
+          if test ! -x "$tool_command"; then
             AC_MSG_RESULT([not found])
-            AC_MSG_ERROR([User supplied tool $1=$tool_specified does not exist or is not executable])
+            AC_MSG_ERROR([User supplied tool $1="$tool_command" does not exist or is not executable])
           fi
-          AC_MSG_RESULT([$tool_specified])
+           $1="$tool_command"
+          AC_MSG_RESULT([found])
+        fi
+        if test "x$tool_args" != x; then
+          # If we got arguments, re-append them to the command after the fixup.
+          $1="[$]$1 $tool_args"
         fi
       fi
     fi
--- a/make/common/MakeBase.gmk	Tue Oct 22 16:42:23 2019 -0400
+++ b/make/common/MakeBase.gmk	Tue Oct 22 20:49:40 2019 -0400
@@ -564,8 +564,8 @@
 # Param 1 - The path to base the name of the log file / command line file on
 # Param 2 - The command to run
 ExecuteWithLog = \
-  $(call LogCmdlines, Exececuting: [$(strip $2)]) \
-  $(call MakeDir, $(dir $(strip $1))) \
+  $(call LogCmdlines, Executing: [$(strip $2)]) \
+  $(call MakeDir, $(dir $(strip $1)) $(MAKESUPPORT_OUTPUTDIR)/failure-logs) \
   $(call WriteFile, $2, $(strip $1).cmdline) \
   ( $(RM) $(strip $1).log && $(strip $2) > >($(TEE) -a $(strip $1).log) 2> >($(TEE) -a $(strip $1).log >&2) || \
       ( exitcode=$(DOLLAR)? && \
--- a/make/conf/jib-profiles.js	Tue Oct 22 16:42:23 2019 -0400
+++ b/make/conf/jib-profiles.js	Tue Oct 22 20:49:40 2019 -0400
@@ -839,13 +839,17 @@
     if (testedProfile == null) {
         testedProfile = input.build_os + "-" + input.build_cpu;
     }
-    var testedProfileJDK = testedProfile + ".jdk";
-    var testedProfileTest = ""
-    if (testedProfile.endsWith("-jcov")) {
-        testedProfileTest = testedProfile.substring(0, testedProfile.length - "-jcov".length) + ".test";
+    var testedProfileJdk = testedProfile + ".jdk";
+    // Make it possible to use the test image from a different profile
+    var testImageProfile;
+    if (input.testImageProfile != null) {
+        testImageProfile = input.testImageProfile;
+    } else if (testedProfile.endsWith("-jcov")) {
+        testImageProfile = testedProfile.substring(0, testedProfile.length - "-jcov".length);
     } else {
-        testedProfileTest = testedProfile + ".test";
+        testImageProfile = testedProfile;
     }
+    var testedProfileTest = testImageProfile + ".test"
     var testOnlyMake = [ "run-test-prebuilt", "LOG_CMDLINES=true", "JTREG_VERBOSE=fail,error,time" ];
     if (testedProfile.endsWith("-gcov")) {
         testOnlyMake = concat(testOnlyMake, "GCOV_ENABLED=true")
@@ -855,14 +859,14 @@
             target_os: input.build_os,
             target_cpu: input.build_cpu,
             dependencies: [
-                "jtreg", "gnumake", "boot_jdk", "devkit", "jib", "jcov", testedProfileJDK,
+                "jtreg", "gnumake", "boot_jdk", "devkit", "jib", "jcov", testedProfileJdk,
                 testedProfileTest
             ],
             src: "src.conf",
             make_args: testOnlyMake,
             environment: {
                 "BOOT_JDK": common.boot_jdk_home,
-                "JDK_IMAGE_DIR": input.get(testedProfileJDK, "home_path"),
+                "JDK_IMAGE_DIR": input.get(testedProfileJdk, "home_path"),
                 "TEST_IMAGE_DIR": input.get(testedProfileTest, "home_path")
             },
             labels: "test"
@@ -871,10 +875,10 @@
 
     // If actually running the run-test-prebuilt profile, verify that the input
     // variable is valid and if so, add the appropriate target_* values from
-    // the tested profile.
+    // the tested profile. Use testImageProfile value as backup.
     if (input.profile == "run-test-prebuilt") {
-        if (profiles[testedProfile] == null) {
-            error("testedProfile is not defined: " + testedProfile);
+        if (profiles[testedProfile] == null && profiles[testImageProfile] == null) {
+            error("testedProfile is not defined: " + testedProfile + " " + testImageProfile);
         }
     }
     if (profiles[testedProfile] != null) {
@@ -882,6 +886,11 @@
             = profiles[testedProfile]["target_os"];
         testOnlyProfilesPrebuilt["run-test-prebuilt"]["target_cpu"]
             = profiles[testedProfile]["target_cpu"];
+    } else if (profiles[testImageProfile] != null) {
+        testOnlyProfilesPrebuilt["run-test-prebuilt"]["target_os"]
+            = profiles[testImageProfile]["target_os"];
+        testOnlyProfilesPrebuilt["run-test-prebuilt"]["target_cpu"]
+            = profiles[testImageProfile]["target_cpu"];
     }
     profiles = concatObjects(profiles, testOnlyProfilesPrebuilt);
 
@@ -1346,3 +1355,8 @@
              || (input.build_os == "linux"
                  && java.lang.System.getProperty("os.version").contains("Microsoft")));
 }
+
+var error = function (s) {
+    java.lang.System.err.println("[ERROR] " + s);
+    exit(1);
+};
--- a/make/hotspot/symbols/symbols-unix	Tue Oct 22 16:42:23 2019 -0400
+++ b/make/hotspot/symbols/symbols-unix	Tue Oct 22 20:49:40 2019 -0400
@@ -97,6 +97,7 @@
 JVM_GetDeclaredClasses
 JVM_GetDeclaringClass
 JVM_GetEnclosingMethodInfo
+JVM_GetExtendedNPEMessage
 JVM_GetFieldIxModifiers
 JVM_GetFieldTypeAnnotations
 JVM_GetInheritedAccessControlContext
--- a/make/jdk/src/classes/build/tools/taglet/ModuleGraph.java	Tue Oct 22 16:42:23 2019 -0400
+++ b/make/jdk/src/classes/build/tools/taglet/ModuleGraph.java	Tue Oct 22 20:49:40 2019 -0400
@@ -64,7 +64,7 @@
         }
 
         String moduleName = ((ModuleElement) element).getQualifiedName().toString();
-        String imageFile = "module-graph.png";
+        String imageFile = "module-graph.svg";
         int thumbnailHeight = -1;
         String hoverImage = "";
         if (!moduleName.equals("java.base")) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/jdk/src/classes/build/tools/taglet/Preview.java	Tue Oct 22 20:49:40 2019 -0400
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package build.tools.taglet;
+
+import java.util.Arrays;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Set;
+import java.util.function.Predicate;
+import javax.lang.model.element.Element;
+import com.sun.source.doctree.DocTree;
+import com.sun.source.doctree.TextTree;
+import com.sun.source.doctree.UnknownInlineTagTree;
+import jdk.javadoc.doclet.Taglet;
+import static jdk.javadoc.doclet.Taglet.Location.*;
+
+/**
+ * An block tag to insert a standard warning about a preview API.
+ */
+public class Preview implements Taglet {
+
+    /** Returns the set of locations in which a taglet may be used. */
+    @Override
+    public Set<Location> getAllowedLocations() {
+        return EnumSet.of(MODULE, PACKAGE, TYPE, CONSTRUCTOR, METHOD, FIELD);
+    }
+
+    @Override
+    public boolean isInlineTag() {
+        return true;
+    }
+
+    @Override
+    public String getName() {
+        return "preview";
+    }
+
+    @Override
+    public String toString(List<? extends DocTree> tags, Element elem) {
+        UnknownInlineTagTree previewTag = (UnknownInlineTagTree) tags.get(0);
+        List<? extends DocTree> previewContent = previewTag.getContent();
+        String previewText = ((TextTree) previewContent.get(0)).getBody();
+        String[] summaryAndDetails = previewText.split("\n\r?\n\r?");
+        String summary = summaryAndDetails[0];
+        String details = summaryAndDetails.length > 1 ? summaryAndDetails[1] : summaryAndDetails[0];
+        StackTraceElement[] stackTrace = new Exception().getStackTrace();
+        Predicate<StackTraceElement> isSummary =
+                el -> el.getClassName().endsWith("HtmlDocletWriter") &&
+                      el.getMethodName().equals("addSummaryComment");
+        if (Arrays.stream(stackTrace).anyMatch(isSummary)) {
+            return "<div style=\"display:inline-block; font-weight:bold\">" + summary + "</div><br>";
+        }
+        return "<div style=\"border: 1px solid red; border-radius: 5px; padding: 5px; display:inline-block; font-size: larger\">" + details + "</div><br>";
+    }
+}
+
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -58,7 +58,7 @@
       Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
       __ ldrb(rscratch1, gc_state);
       if (dest_uninitialized) {
-        __ tbz(rscratch2, ShenandoahHeap::HAS_FORWARDED_BITPOS, done);
+        __ tbz(rscratch1, ShenandoahHeap::HAS_FORWARDED_BITPOS, done);
       } else {
         __ mov(rscratch2, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING);
         __ tst(rscratch1, rscratch2);
@@ -262,7 +262,7 @@
   __ leave();
 }
 
-void ShenandoahBarrierSetAssembler::load_reference_barrier_native(MacroAssembler* masm, Register dst, Register tmp) {
+void ShenandoahBarrierSetAssembler::load_reference_barrier_native(MacroAssembler* masm, Register dst, Address load_addr) {
   if (!ShenandoahLoadRefBarrier) {
     return;
   }
@@ -272,6 +272,8 @@
   Label is_null;
   Label done;
 
+  __ block_comment("load_reference_barrier_native { ");
+
   __ cbz(dst, is_null);
 
   __ enter();
@@ -285,6 +287,7 @@
   __ mov(rscratch2, dst);
   __ push_call_clobbered_registers();
   __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_native));
+  __ lea(r1, load_addr);
   __ mov(r0, rscratch2);
   __ blr(lr);
   __ mov(rscratch2, r0);
@@ -294,6 +297,7 @@
   __ bind(done);
   __ leave();
   __ bind(is_null);
+  __ block_comment("} load_reference_barrier_native");
 }
 
 void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) {
@@ -327,20 +331,32 @@
   bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
   bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
   bool on_reference = on_weak || on_phantom;
-  bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
+  bool is_traversal_mode = ShenandoahHeap::heap()->is_traversal_mode();
+  bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0 || is_traversal_mode;
+
+  Register result_dst = dst;
+
+  if (on_oop) {
+    // We want to preserve src
+    if (dst == src.base() || dst == src.index()) {
+      dst = rscratch1;
+    }
+    assert_different_registers(dst, src.base(), src.index());
+  }
 
   BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
   if (on_oop) {
-     if (not_in_heap) {
-       if (ShenandoahHeap::heap()->is_traversal_mode()) {
-         load_reference_barrier(masm, dst, tmp1);
-         keep_alive = true;
-       } else {
-         load_reference_barrier_native(masm, dst, tmp1);
-       }
-     } else {
-       load_reference_barrier(masm, dst, tmp1);
-     }
+    if (not_in_heap && !is_traversal_mode) {
+      load_reference_barrier_native(masm, dst, src);
+    } else {
+      load_reference_barrier(masm, dst, tmp1);
+    }
+
+    if (dst != result_dst) {
+      __ mov(result_dst, dst);
+      dst = result_dst;
+    }
+
     if (ShenandoahKeepAliveBarrier && on_reference && keep_alive) {
       __ enter();
       satb_write_barrier_pre(masm /* masm */,
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -58,7 +58,7 @@
   void resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp = noreg);
   void load_reference_barrier(MacroAssembler* masm, Register dst, Register tmp);
   void load_reference_barrier_not_null(MacroAssembler* masm, Register dst, Register tmp);
-  void load_reference_barrier_native(MacroAssembler* masm, Register dst, Register tmp);
+  void load_reference_barrier_native(MacroAssembler* masm, Register dst, Address load_addr);
 
   address generate_shenandoah_lrb(StubCodeGenerator* cgen);
 
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -333,7 +333,7 @@
 #endif
 }
 
-void ShenandoahBarrierSetAssembler::load_reference_barrier_native(MacroAssembler* masm, Register dst) {
+void ShenandoahBarrierSetAssembler::load_reference_barrier_native(MacroAssembler* masm, Register dst, Address src) {
   if (!ShenandoahLoadRefBarrier) {
     return;
   }
@@ -341,6 +341,7 @@
   Label done;
   Label not_null;
   Label slow_path;
+  __ block_comment("load_reference_barrier_native { ");
 
   // null check
   __ testptr(dst, dst);
@@ -371,7 +372,7 @@
   __ bind(slow_path);
 
   if (dst != rax) {
-    __ xchgptr(dst, rax); // Move obj into rax and save rax into obj.
+    __ push(rax);
   }
   __ push(rcx);
   __ push(rdx);
@@ -388,8 +389,9 @@
   __ push(r15);
 #endif
 
-  __ movptr(rdi, rax);
-  __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_native), rdi);
+  assert_different_registers(dst, rsi);
+  __ lea(rsi, src);
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_native), dst, rsi);
 
 #ifdef _LP64
   __ pop(r15);
@@ -407,10 +409,12 @@
   __ pop(rcx);
 
   if (dst != rax) {
-    __ xchgptr(rax, dst); // Swap back obj with rax.
+    __ movptr(dst, rax);
+    __ pop(rax);
   }
 
   __ bind(done);
+  __ block_comment("load_reference_barrier_native { ");
 }
 
 void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) {
@@ -474,14 +478,43 @@
   bool is_traversal_mode = ShenandoahHeap::heap()->is_traversal_mode();
   bool keep_alive = ((decorators & AS_NO_KEEPALIVE) == 0) || is_traversal_mode;
 
+  Register result_dst = dst;
+  bool use_tmp1_for_dst = false;
+
+  if (on_oop) {
+    // We want to preserve src
+    if (dst == src.base() || dst == src.index()) {
+      // Use tmp1 for dst if possible, as it is not used in BarrierAssembler::load_at()
+      if (tmp1->is_valid() && tmp1 != src.base() && tmp1 != src.index()) {
+        dst = tmp1;
+        use_tmp1_for_dst = true;
+      } else {
+        dst = rdi;
+        __ push(dst);
+      }
+    }
+    assert_different_registers(dst, src.base(), src.index());
+  }
+
   BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
+
   if (on_oop) {
     if (not_in_heap && !is_traversal_mode) {
-      load_reference_barrier_native(masm, dst);
+      load_reference_barrier_native(masm, dst, src);
     } else {
       load_reference_barrier(masm, dst);
     }
 
+    if (dst != result_dst) {
+      __ movptr(result_dst, dst);
+
+      if (!use_tmp1_for_dst) {
+        __ pop(dst);
+      }
+
+      dst = result_dst;
+    }
+
     if (ShenandoahKeepAliveBarrier && on_reference && keep_alive) {
       const Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread);
       assert_different_registers(dst, tmp1, tmp_thread);
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -76,7 +76,7 @@
 #endif
 
   void load_reference_barrier(MacroAssembler* masm, Register dst);
-  void load_reference_barrier_native(MacroAssembler* masm, Register dst);
+  void load_reference_barrier_native(MacroAssembler* masm, Register dst, Address src);
 
   void cmpxchg_oop(MacroAssembler* masm,
                    Register res, Address addr, Register oldval, Register newval,
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -6352,7 +6352,7 @@
   movptr(result, str1);
   if (UseAVX >= 2) {
     cmpl(cnt1, stride);
-    jcc(Assembler::less, SCAN_TO_CHAR_LOOP);
+    jcc(Assembler::less, SCAN_TO_CHAR);
     cmpl(cnt1, 2*stride);
     jcc(Assembler::less, SCAN_TO_8_CHAR_INIT);
     movdl(vec1, ch);
@@ -6379,10 +6379,8 @@
   }
   bind(SCAN_TO_8_CHAR);
   cmpl(cnt1, stride);
-  if (UseAVX >= 2) {
-    jcc(Assembler::less, SCAN_TO_CHAR);
-  } else {
-    jcc(Assembler::less, SCAN_TO_CHAR_LOOP);
+  jcc(Assembler::less, SCAN_TO_CHAR);
+  if (UseAVX < 2) {
     movdl(vec1, ch);
     pshuflw(vec1, vec1, 0x00);
     pshufd(vec1, vec1, 0);
--- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -114,7 +114,8 @@
       // short offset operators (jmp and jcc)
       char* disp = (char*) &branch[1];
       int imm8 = target - (address) &disp[1];
-      guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d", file, line);
+      guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d",
+                file == NULL ? "<NULL>" : file, line);
       *disp = imm8;
     } else {
       int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1];
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -1304,6 +1304,97 @@
   }
 }
 
+// Registers need to be saved for runtime call
+static Register caller_saved_registers[] = {
+  rcx, rdx, rsi, rdi
+};
+
+// Save caller saved registers except r1 and r2
+static void save_registers_except(MacroAssembler* masm, Register r1, Register r2) {
+  int reg_len = (int)(sizeof(caller_saved_registers) / sizeof(Register));
+  for (int index = 0; index < reg_len; index ++) {
+    Register this_reg = caller_saved_registers[index];
+    if (this_reg != r1 && this_reg != r2) {
+      __ push(this_reg);
+    }
+  }
+}
+
+// Restore caller saved registers except r1 and r2
+static void restore_registers_except(MacroAssembler* masm, Register r1, Register r2) {
+  int reg_len = (int)(sizeof(caller_saved_registers) / sizeof(Register));
+  for (int index = reg_len - 1; index >= 0; index --) {
+    Register this_reg = caller_saved_registers[index];
+    if (this_reg != r1 && this_reg != r2) {
+      __ pop(this_reg);
+    }
+  }
+}
+
+// Pin object, return pinned object or null in rax
+static void gen_pin_object(MacroAssembler* masm,
+                           Register thread, VMRegPair reg) {
+  __ block_comment("gen_pin_object {");
+
+  Label is_null;
+  Register tmp_reg = rax;
+  VMRegPair tmp(tmp_reg->as_VMReg());
+  if (reg.first()->is_stack()) {
+    // Load the arg up from the stack
+    simple_move32(masm, reg, tmp);
+    reg = tmp;
+  } else {
+    __ movl(tmp_reg, reg.first()->as_Register());
+  }
+  __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
+  __ jccb(Assembler::equal, is_null);
+
+  // Save registers that may be used by runtime call
+  Register arg = reg.first()->is_Register() ? reg.first()->as_Register() : noreg;
+  save_registers_except(masm, arg, thread);
+
+  __ call_VM_leaf(
+    CAST_FROM_FN_PTR(address, SharedRuntime::pin_object),
+    thread, reg.first()->as_Register());
+
+  // Restore saved registers
+  restore_registers_except(masm, arg, thread);
+
+  __ bind(is_null);
+  __ block_comment("} gen_pin_object");
+}
+
+// Unpin object
+static void gen_unpin_object(MacroAssembler* masm,
+                             Register thread, VMRegPair reg) {
+  __ block_comment("gen_unpin_object {");
+  Label is_null;
+
+  // temp register
+  __ push(rax);
+  Register tmp_reg = rax;
+  VMRegPair tmp(tmp_reg->as_VMReg());
+
+  simple_move32(masm, reg, tmp);
+
+  __ testptr(rax, rax);
+  __ jccb(Assembler::equal, is_null);
+
+  // Save registers that may be used by runtime call
+  Register arg = reg.first()->is_Register() ? reg.first()->as_Register() : noreg;
+  save_registers_except(masm, arg, thread);
+
+  __ call_VM_leaf(
+    CAST_FROM_FN_PTR(address, SharedRuntime::unpin_object),
+    thread, rax);
+
+  // Restore saved registers
+  restore_registers_except(masm, arg, thread);
+  __ bind(is_null);
+  __ pop(rax);
+  __ block_comment("} gen_unpin_object");
+}
+
 // Check GCLocker::needs_gc and enter the runtime if it's true.  This
 // keeps a new JNI critical region from starting until a GC has been
 // forced.  Save down any oops in registers and describe them in an
@@ -1837,7 +1928,7 @@
 
   __ get_thread(thread);
 
-  if (is_critical_native) {
+  if (is_critical_native && !Universe::heap()->supports_object_pinning()) {
     check_needs_gc_for_critical_native(masm, thread, stack_slots, total_c_args, total_in_args,
                                        oop_handle_offset, oop_maps, in_regs, in_sig_bt);
   }
@@ -1875,6 +1966,11 @@
   //
   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
 
+  // Inbound arguments that need to be pinned for critical natives
+  GrowableArray<int> pinned_args(total_in_args);
+  // Current stack slot for storing register based array argument
+  int pinned_slot = oop_handle_offset;
+
   // Mark location of rbp,
   // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, rbp->as_VMReg());
 
@@ -1886,7 +1982,28 @@
     switch (in_sig_bt[i]) {
       case T_ARRAY:
         if (is_critical_native) {
-          unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
+          VMRegPair in_arg = in_regs[i];
+          if (Universe::heap()->supports_object_pinning()) {
+            // gen_pin_object handles save and restore
+            // of any clobbered registers
+            gen_pin_object(masm, thread, in_arg);
+            pinned_args.append(i);
+
+            // rax has pinned array
+            VMRegPair result_reg(rax->as_VMReg());
+            if (!in_arg.first()->is_stack()) {
+              assert(pinned_slot <= stack_slots, "overflow");
+              simple_move32(masm, result_reg, VMRegImpl::stack2reg(pinned_slot));
+              pinned_slot += VMRegImpl::slots_per_word;
+            } else {
+              // Write back pinned value, it will be used to unpin this argument
+              __ movptr(Address(rbp, reg2offset_in(in_arg.first())), result_reg.first()->as_Register());
+            }
+            // We have the array in register, use it
+            in_arg = result_reg;
+          }
+
+          unpack_array_argument(masm, in_arg, in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
           c_arg++;
           break;
         }
@@ -2079,6 +2196,26 @@
   default       : ShouldNotReachHere();
   }
 
+  // unpin pinned arguments
+  pinned_slot = oop_handle_offset;
+  if (pinned_args.length() > 0) {
+    // save return value that may be overwritten otherwise.
+    save_native_result(masm, ret_type, stack_slots);
+    for (int index = 0; index < pinned_args.length(); index ++) {
+      int i = pinned_args.at(index);
+      assert(pinned_slot <= stack_slots, "overflow");
+      if (!in_regs[i].first()->is_stack()) {
+        int offset = pinned_slot * VMRegImpl::stack_slot_size;
+        __ movl(in_regs[i].first()->as_Register(), Address(rsp, offset));
+        pinned_slot += VMRegImpl::slots_per_word;
+      }
+      // gen_pin_object handles save and restore
+      // of any other clobbered registers
+      gen_unpin_object(masm, thread, in_regs[i]);
+    }
+    restore_native_result(masm, ret_type, stack_slots);
+  }
+
   // Switch thread to "native transition" state before reading the synchronization state.
   // This additional state is necessary because reading and testing the synchronization
   // state is not atomic w.r.t. GC, as this scenario demonstrates:
--- a/src/hotspot/cpu/x86/x86.ad	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/cpu/x86/x86.ad	Tue Oct 22 20:49:40 2019 -0400
@@ -1280,7 +1280,7 @@
     case Op_AbsVS:
     case Op_AbsVI:
     case Op_AddReductionVI:
-      if (UseSSE < 3) // requires at least SSE3
+      if (UseSSE < 3 || !VM_Version::supports_ssse3()) // requires at least SSSE3
         ret_value = false;
       break;
     case Op_MulReductionVI:
--- a/src/hotspot/os/aix/os_aix.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/os/aix/os_aix.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -1034,8 +1034,6 @@
 }
 
 bool os::supports_vtime() { return true; }
-bool os::enable_vtime()   { return false; }
-bool os::vtime_enabled()  { return false; }
 
 double os::elapsedVTime() {
   struct rusage usage;
@@ -2643,8 +2641,24 @@
   60              // 11 CriticalPriority
 };
 
+static int prio_init() {
+  if (ThreadPriorityPolicy == 1) {
+    if (geteuid() != 0) {
+      if (!FLAG_IS_DEFAULT(ThreadPriorityPolicy)) {
+        warning("-XX:ThreadPriorityPolicy=1 may require system level permission, " \
+                "e.g., being the root user. If the necessary permission is not " \
+                "possessed, changes to priority will be silently ignored.");
+      }
+    }
+  }
+  if (UseCriticalJavaThreadPriority) {
+    os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
+  }
+  return 0;
+}
+
 OSReturn os::set_native_priority(Thread* thread, int newpri) {
-  if (!UseThreadPriorities) return OS_OK;
+  if (!UseThreadPriorities || ThreadPriorityPolicy == 0) return OS_OK;
   pthread_t thr = thread->osthread()->pthread_id();
   int policy = SCHED_OTHER;
   struct sched_param param;
@@ -2659,7 +2673,7 @@
 }
 
 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
-  if (!UseThreadPriorities) {
+  if (!UseThreadPriorities || ThreadPriorityPolicy == 0) {
     *priority_ptr = java_to_os_priority[NormPriority];
     return OS_OK;
   }
@@ -3569,6 +3583,9 @@
     }
   }
 
+  // initialize thread priority policy
+  prio_init();
+
   return JNI_OK;
 }
 
@@ -3606,11 +3623,6 @@
   return;
 }
 
-bool os::distribute_processes(uint length, uint* distribution) {
-  // Not yet implemented.
-  return false;
-}
-
 bool os::bind_to_processor(uint processor_id) {
   // Not yet implemented.
   return false;
--- a/src/hotspot/os/aix/os_aix.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/os/aix/os_aix.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -44,7 +44,6 @@
 
   static julong _physical_memory;
   static pthread_t _main_thread;
-  static Mutex* _createThread_lock;
   static int _page_size;
 
   // -1 = uninitialized, 0 = AIX, 1 = OS/400 (PASE)
@@ -90,8 +89,6 @@
  public:
   static void init_thread_fpu_state();
   static pthread_t main_thread(void)                                { return _main_thread; }
-  static void set_createThread_lock(Mutex* lk)                      { _createThread_lock = lk; }
-  static Mutex* createThread_lock(void)                             { return _createThread_lock; }
   static void hotspot_sigmask(Thread* thread);
 
   // Given an address, returns the size of the page backing that address
--- a/src/hotspot/os/aix/os_aix.inline.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/os/aix/os_aix.inline.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -64,8 +64,6 @@
   ::dlclose(lib);
 }
 
-inline const int os::default_file_open_flags() { return 0;}
-
 inline jlong os::lseek(int fd, jlong offset, int whence) {
   return (jlong) ::lseek64(fd, offset, whence);
 }
--- a/src/hotspot/os/bsd/os_bsd.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/os/bsd/os_bsd.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -877,8 +877,6 @@
 }
 
 bool os::supports_vtime() { return true; }
-bool os::enable_vtime()   { return false; }
-bool os::vtime_enabled()  { return false; }
 
 double os::elapsedVTime() {
   // better than nothing, but not much
@@ -3282,11 +3280,6 @@
 #endif
 }
 
-bool os::distribute_processes(uint length, uint* distribution) {
-  // Not yet implemented.
-  return false;
-}
-
 bool os::bind_to_processor(uint processor_id) {
   // Not yet implemented.
   return false;
--- a/src/hotspot/os/bsd/os_bsd.inline.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/os/bsd/os_bsd.inline.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -67,8 +67,6 @@
   ::dlclose(lib);
 }
 
-inline const int os::default_file_open_flags() { return 0;}
-
 inline jlong os::lseek(int fd, jlong offset, int whence) {
   return (jlong) ::lseek(fd, offset, whence);
 }
--- a/src/hotspot/os/linux/gc/z/zNUMA_linux.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/os/linux/gc/z/zNUMA_linux.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -22,7 +22,7 @@
  */
 
 #include "gc/z/zErrno.hpp"
-#include "gc/z/zCPU.hpp"
+#include "gc/z/zCPU.inline.hpp"
 #include "gc/z/zNUMA.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/os.hpp"
--- a/src/hotspot/os/linux/osContainer_linux.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/os/linux/osContainer_linux.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -131,14 +131,34 @@
      * hierarchy. If set to true consider also memory.stat
      * file if everything else seems unlimited */
     bool _uses_mem_hierarchy;
+    volatile jlong _memory_limit_in_bytes;
+    volatile jlong _next_check_counter;
 
  public:
     CgroupMemorySubsystem(char *root, char *mountpoint) : CgroupSubsystem::CgroupSubsystem(root, mountpoint) {
       _uses_mem_hierarchy = false;
+      _memory_limit_in_bytes = -1;
+      _next_check_counter = min_jlong;
+
     }
 
     bool is_hierarchical() { return _uses_mem_hierarchy; }
     void set_hierarchical(bool value) { _uses_mem_hierarchy = value; }
+
+    bool should_check_memory_limit() {
+      return os::elapsed_counter() > _next_check_counter;
+    }
+    jlong memory_limit_in_bytes() { return _memory_limit_in_bytes; }
+    void set_memory_limit_in_bytes(jlong value) {
+      _memory_limit_in_bytes = value;
+      // max memory limit is unlikely to change, but we want to remain
+      // responsive to configuration changes. A very short (20ms) grace time
+      // between re-read avoids excessive overhead during startup without
+      // significantly reducing the VMs ability to promptly react to reduced
+      // memory availability
+      _next_check_counter = os::elapsed_counter() + (NANOSECS_PER_SEC/50);
+    }
+
 };
 
 CgroupMemorySubsystem* memory = NULL;
@@ -461,6 +481,16 @@
  *    OSCONTAINER_ERROR for not supported
  */
 jlong OSContainer::memory_limit_in_bytes() {
+  if (!memory->should_check_memory_limit()) {
+    return memory->memory_limit_in_bytes();
+  }
+  jlong memory_limit = read_memory_limit_in_bytes();
+  // Update CgroupMemorySubsystem to avoid re-reading container settings too often
+  memory->set_memory_limit_in_bytes(memory_limit);
+  return memory_limit;
+}
+
+jlong OSContainer::read_memory_limit_in_bytes() {
   GET_CONTAINER_INFO(julong, memory, "/memory.limit_in_bytes",
                      "Memory Limit is: " JULONG_FORMAT, JULONG_FORMAT, memlimit);
 
--- a/src/hotspot/os/linux/osContainer_linux.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/os/linux/osContainer_linux.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -36,6 +36,7 @@
  private:
   static bool   _is_initialized;
   static bool   _is_containerized;
+  static jlong read_memory_limit_in_bytes();
 
  public:
   static void init();
--- a/src/hotspot/os/linux/os_linux.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/os/linux/os_linux.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -148,11 +148,9 @@
 
 int (*os::Linux::_pthread_getcpuclockid)(pthread_t, clockid_t *) = NULL;
 int (*os::Linux::_pthread_setname_np)(pthread_t, const char*) = NULL;
-Mutex* os::Linux::_createThread_lock = NULL;
 pthread_t os::Linux::_main_thread;
 int os::Linux::_page_size = -1;
 bool os::Linux::_supports_fast_thread_cpu_time = false;
-uint32_t os::Linux::_os_version = 0;
 const char * os::Linux::_glibc_version = NULL;
 const char * os::Linux::_libpthread_version = NULL;
 
@@ -1364,8 +1362,6 @@
 }
 
 bool os::supports_vtime() { return true; }
-bool os::enable_vtime()   { return false; }
-bool os::vtime_enabled()  { return false; }
 
 double os::elapsedVTime() {
   struct rusage usage;
@@ -4823,48 +4819,6 @@
   return (tp.tv_sec * NANOSECS_PER_SEC) + tp.tv_nsec;
 }
 
-void os::Linux::initialize_os_info() {
-  assert(_os_version == 0, "OS info already initialized");
-
-  struct utsname _uname;
-
-  uint32_t major;
-  uint32_t minor;
-  uint32_t fix;
-
-  int rc;
-
-  // Kernel version is unknown if
-  // verification below fails.
-  _os_version = 0x01000000;
-
-  rc = uname(&_uname);
-  if (rc != -1) {
-
-    rc = sscanf(_uname.release,"%d.%d.%d", &major, &minor, &fix);
-    if (rc == 3) {
-
-      if (major < 256 && minor < 256 && fix < 256) {
-        // Kernel version format is as expected,
-        // set it overriding unknown state.
-        _os_version = (major << 16) |
-                      (minor << 8 ) |
-                      (fix   << 0 ) ;
-      }
-    }
-  }
-}
-
-uint32_t os::Linux::os_version() {
-  assert(_os_version != 0, "not initialized");
-  return _os_version & 0x00FFFFFF;
-}
-
-bool os::Linux::os_version_is_known() {
-  assert(_os_version != 0, "not initialized");
-  return _os_version & 0x01000000 ? false : true;
-}
-
 /////
 // glibc on Linux platform uses non-documented flag
 // to indicate, that some special sort of signal
@@ -5084,8 +5038,6 @@
 
   Linux::initialize_system_info();
 
-  Linux::initialize_os_info();
-
   os::Linux::CPUPerfTicks pticks;
   bool res = os::Linux::get_tick_information(&pticks, -1);
 
@@ -5262,9 +5214,6 @@
     }
   }
 
-  // Initialize lock used to serialize thread creation (see os::create_thread)
-  Linux::set_createThread_lock(new Mutex(Mutex::leaf, "createThread_lock", false));
-
   // at-exit methods are called in the reverse order of their registration.
   // atexit functions are called on return from main or as a result of a
   // call to exit(3C). There can be only 32 of these functions registered
@@ -5465,11 +5414,6 @@
   }
 }
 
-bool os::distribute_processes(uint length, uint* distribution) {
-  // Not yet implemented.
-  return false;
-}
-
 bool os::bind_to_processor(uint processor_id) {
   // Not yet implemented.
   return false;
--- a/src/hotspot/os/linux/os_linux.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/os/linux/os_linux.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -55,20 +55,10 @@
   static GrowableArray<int>* _cpu_to_node;
   static GrowableArray<int>* _nindex_to_node;
 
-  // 0x00000000 = uninitialized,
-  // 0x01000000 = kernel version unknown,
-  // otherwise a 32-bit number:
-  // Ox00AABBCC
-  // AA, Major Version
-  // BB, Minor Version
-  // CC, Fix   Version
-  static uint32_t _os_version;
-
  protected:
 
   static julong _physical_memory;
   static pthread_t _main_thread;
-  static Mutex* _createThread_lock;
   static int _page_size;
 
   static julong available_memory();
@@ -136,8 +126,6 @@
   // returns kernel thread id (similar to LWP id on Solaris), which can be
   // used to access /proc
   static pid_t gettid();
-  static void set_createThread_lock(Mutex* lk)                      { _createThread_lock = lk; }
-  static Mutex* createThread_lock(void)                             { return _createThread_lock; }
   static void hotspot_sigmask(Thread* thread);
 
   static address   initial_thread_stack_bottom(void)                { return _initial_thread_stack_bottom; }
@@ -196,7 +184,6 @@
 
   // Stack overflow handling
   static bool manually_expand_stack(JavaThread * t, address addr);
-  static int max_register_window_saves_before_flushing();
 
   // fast POSIX clocks support
   static void fast_thread_clock_init(void);
@@ -211,10 +198,6 @@
 
   static jlong fast_thread_cpu_time(clockid_t clockid);
 
-  static void initialize_os_info();
-  static bool os_version_is_known();
-  static uint32_t os_version();
-
   // Stack repair handling
 
   // none present
--- a/src/hotspot/os/linux/os_linux.inline.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/os/linux/os_linux.inline.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -59,8 +59,6 @@
   ::dlclose(lib);
 }
 
-inline const int os::default_file_open_flags() { return 0;}
-
 inline jlong os::lseek(int fd, jlong offset, int whence) {
   return (jlong) ::lseek64(fd, offset, whence);
 }
--- a/src/hotspot/os/posix/os_posix.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/os/posix/os_posix.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -167,11 +167,6 @@
   return n;
 }
 
-bool os::is_debugger_attached() {
-  // not implemented
-  return false;
-}
-
 void os::wait_for_keypress_at_exit(void) {
   // don't do anything on posix platforms
   return;
--- a/src/hotspot/os/solaris/os_solaris.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/os/solaris/os_solaris.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -265,8 +265,6 @@
   }
 }
 
-static int _processors_online = 0;
-
 jint os::Solaris::_os_thread_limit = 0;
 volatile jint os::Solaris::_os_thread_count = 0;
 
@@ -291,7 +289,6 @@
 
 void os::Solaris::initialize_system_info() {
   set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
-  _processors_online = sysconf(_SC_NPROCESSORS_ONLN);
   _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) *
                                      (julong)sysconf(_SC_PAGESIZE);
 }
@@ -320,7 +317,6 @@
     // Query the number of cpus available to us.
     if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
       assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
-      _processors_online = pset_cpus;
       return pset_cpus;
     }
   }
@@ -328,136 +324,6 @@
   return online_cpus;
 }
 
-static bool find_processors_in_pset(psetid_t        pset,
-                                    processorid_t** id_array,
-                                    uint_t*         id_length) {
-  bool result = false;
-  // Find the number of processors in the processor set.
-  if (pset_info(pset, NULL, id_length, NULL) == 0) {
-    // Make up an array to hold their ids.
-    *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
-    // Fill in the array with their processor ids.
-    if (pset_info(pset, NULL, id_length, *id_array) == 0) {
-      result = true;
-    }
-  }
-  return result;
-}
-
-// Callers of find_processors_online() must tolerate imprecise results --
-// the system configuration can change asynchronously because of DR
-// or explicit psradm operations.
-//
-// We also need to take care that the loop (below) terminates as the
-// number of processors online can change between the _SC_NPROCESSORS_ONLN
-// request and the loop that builds the list of processor ids.   Unfortunately
-// there's no reliable way to determine the maximum valid processor id,
-// so we use a manifest constant, MAX_PROCESSOR_ID, instead.  See p_online
-// man pages, which claim the processor id set is "sparse, but
-// not too sparse".  MAX_PROCESSOR_ID is used to ensure that we eventually
-// exit the loop.
-//
-// In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's
-// not available on S8.0.
-
-static bool find_processors_online(processorid_t** id_array,
-                                   uint*           id_length) {
-  const processorid_t MAX_PROCESSOR_ID = 100000;
-  // Find the number of processors online.
-  *id_length = sysconf(_SC_NPROCESSORS_ONLN);
-  // Make up an array to hold their ids.
-  *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
-  // Processors need not be numbered consecutively.
-  long found = 0;
-  processorid_t next = 0;
-  while (found < *id_length && next < MAX_PROCESSOR_ID) {
-    processor_info_t info;
-    if (processor_info(next, &info) == 0) {
-      // NB, PI_NOINTR processors are effectively online ...
-      if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) {
-        (*id_array)[found] = next;
-        found += 1;
-      }
-    }
-    next += 1;
-  }
-  if (found < *id_length) {
-    // The loop above didn't identify the expected number of processors.
-    // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN)
-    // and re-running the loop, above, but there's no guarantee of progress
-    // if the system configuration is in flux.  Instead, we just return what
-    // we've got.  Note that in the worst case find_processors_online() could
-    // return an empty set.  (As a fall-back in the case of the empty set we
-    // could just return the ID of the current processor).
-    *id_length = found;
-  }
-
-  return true;
-}
-
-static bool assign_distribution(processorid_t* id_array,
-                                uint           id_length,
-                                uint*          distribution,
-                                uint           distribution_length) {
-  // We assume we can assign processorid_t's to uint's.
-  assert(sizeof(processorid_t) == sizeof(uint),
-         "can't convert processorid_t to uint");
-  // Quick check to see if we won't succeed.
-  if (id_length < distribution_length) {
-    return false;
-  }
-  // Assign processor ids to the distribution.
-  // Try to shuffle processors to distribute work across boards,
-  // assuming 4 processors per board.
-  const uint processors_per_board = ProcessDistributionStride;
-  // Find the maximum processor id.
-  processorid_t max_id = 0;
-  for (uint m = 0; m < id_length; m += 1) {
-    max_id = MAX2(max_id, id_array[m]);
-  }
-  // The next id, to limit loops.
-  const processorid_t limit_id = max_id + 1;
-  // Make up markers for available processors.
-  bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id, mtInternal);
-  for (uint c = 0; c < limit_id; c += 1) {
-    available_id[c] = false;
-  }
-  for (uint a = 0; a < id_length; a += 1) {
-    available_id[id_array[a]] = true;
-  }
-  // Step by "boards", then by "slot", copying to "assigned".
-  // NEEDS_CLEANUP: The assignment of processors should be stateful,
-  //                remembering which processors have been assigned by
-  //                previous calls, etc., so as to distribute several
-  //                independent calls of this method.  What we'd like is
-  //                It would be nice to have an API that let us ask
-  //                how many processes are bound to a processor,
-  //                but we don't have that, either.
-  //                In the short term, "board" is static so that
-  //                subsequent distributions don't all start at board 0.
-  static uint board = 0;
-  uint assigned = 0;
-  // Until we've found enough processors ....
-  while (assigned < distribution_length) {
-    // ... find the next available processor in the board.
-    for (uint slot = 0; slot < processors_per_board; slot += 1) {
-      uint try_id = board * processors_per_board + slot;
-      if ((try_id < limit_id) && (available_id[try_id] == true)) {
-        distribution[assigned] = try_id;
-        available_id[try_id] = false;
-        assigned += 1;
-        break;
-      }
-    }
-    board += 1;
-    if (board * processors_per_board + 0 >= limit_id) {
-      board = 0;
-    }
-  }
-  FREE_C_HEAP_ARRAY(bool, available_id);
-  return true;
-}
-
 void os::set_native_thread_name(const char *name) {
   if (Solaris::_pthread_setname_np != NULL) {
     // Only the first 31 bytes of 'name' are processed by pthread_setname_np
@@ -470,31 +336,6 @@
   }
 }
 
-bool os::distribute_processes(uint length, uint* distribution) {
-  bool result = false;
-  // Find the processor id's of all the available CPUs.
-  processorid_t* id_array  = NULL;
-  uint           id_length = 0;
-  // There are some races between querying information and using it,
-  // since processor sets can change dynamically.
-  psetid_t pset = PS_NONE;
-  // Are we running in a processor set?
-  if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) {
-    result = find_processors_in_pset(pset, &id_array, &id_length);
-  } else {
-    result = find_processors_online(&id_array, &id_length);
-  }
-  if (result == true) {
-    if (id_length >= length) {
-      result = assign_distribution(id_array, id_length, distribution, length);
-    } else {
-      result = false;
-    }
-  }
-  FREE_C_HEAP_ARRAY(processorid_t, id_array);
-  return result;
-}
-
 bool os::bind_to_processor(uint processor_id) {
   // We assume that a processorid_t can be stored in a uint.
   assert(sizeof(uint) == sizeof(processorid_t),
@@ -1237,8 +1078,6 @@
 }
 
 bool os::supports_vtime() { return true; }
-bool os::enable_vtime() { return false; }
-bool os::vtime_enabled() { return false; }
 
 double os::elapsedVTime() {
   return (double)gethrvtime() / (double)hrtime_hz;
--- a/src/hotspot/os/solaris/os_solaris.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/os/solaris/os_solaris.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -271,10 +271,6 @@
 
   static void correct_stack_boundaries_for_primordial_thread(Thread* thr);
 
-  // Stack overflow handling
-
-  static int max_register_window_saves_before_flushing();
-
   // Stack repair handling
 
   // none present
--- a/src/hotspot/os/solaris/os_solaris.inline.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/os/solaris/os_solaris.inline.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -61,8 +61,6 @@
 
 inline void os::dll_unload(void *lib) { ::dlclose(lib); }
 
-inline const int os::default_file_open_flags() { return 0;}
-
 //////////////////////////////////////////////////////////////////////////////
 ////////////////////////////////////////////////////////////////////////////////
 
--- a/src/hotspot/os/windows/os_windows.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/os/windows/os_windows.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -826,11 +826,6 @@
   } __except(EXCEPTION_EXECUTE_HANDLER) {}
 }
 
-bool os::distribute_processes(uint length, uint* distribution) {
-  // Not yet implemented.
-  return false;
-}
-
 bool os::bind_to_processor(uint processor_id) {
   // Not yet implemented.
   return false;
@@ -911,8 +906,6 @@
 }
 
 bool os::supports_vtime() { return true; }
-bool os::enable_vtime() { return false; }
-bool os::vtime_enabled() { return false; }
 
 double os::elapsedVTime() {
   FILETIME created;
@@ -3904,12 +3897,6 @@
   _setmode(_fileno(stderr), _O_BINARY);
 }
 
-
-bool os::is_debugger_attached() {
-  return IsDebuggerPresent() ? true : false;
-}
-
-
 void os::wait_for_keypress_at_exit(void) {
   if (PauseAtExit) {
     fprintf(stderr, "Press any key to continue...\n");
--- a/src/hotspot/os/windows/os_windows.inline.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/os/windows/os_windows.inline.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -30,8 +30,6 @@
 
 inline const char* os::dll_file_extension()            { return ".dll"; }
 
-inline const int os::default_file_open_flags() { return O_BINARY | O_NOINHERIT;}
-
 inline void  os::dll_unload(void *lib) {
   ::FreeLibrary((HMODULE)lib);
 }
--- a/src/hotspot/os_cpu/solaris_sparc/os_solaris_sparc.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/os_cpu/solaris_sparc/os_solaris_sparc.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -86,12 +86,6 @@
 size_t os::Posix::_java_thread_min_stack_allowed = 86 * K;
 size_t os::Posix::_vm_internal_thread_min_stack_allowed = 128 * K;
 
-int os::Solaris::max_register_window_saves_before_flushing() {
-  // We should detect this at run time. For now, filling
-  // in with a constant.
-  return 8;
-}
-
 static void handle_unflushed_register_windows(gwindows_t *win) {
   int restore_count = win->wbcnt;
   int i;
--- a/src/hotspot/share/aot/aotCodeHeap.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/aot/aotCodeHeap.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -93,7 +93,7 @@
   Handle protection_domain(thread, caller->method_holder()->protection_domain());
 
   // Ignore wrapping L and ;
-  if (name[0] == 'L') {
+  if (name[0] == JVM_SIGNATURE_CLASS) {
     assert(len > 2, "small name %s", name);
     name++;
     len -= 2;
--- a/src/hotspot/share/c1/c1_InstructionPrinter.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/c1/c1_InstructionPrinter.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,19 +33,11 @@
 #ifndef PRODUCT
 
 const char* InstructionPrinter::basic_type_name(BasicType type) {
-  switch (type) {
-    case T_BOOLEAN: return "boolean";
-    case T_BYTE   : return "byte";
-    case T_CHAR   : return "char";
-    case T_SHORT  : return "short";
-    case T_INT    : return "int";
-    case T_LONG   : return "long";
-    case T_FLOAT  : return "float";
-    case T_DOUBLE : return "double";
-    case T_ARRAY  : return "array";
-    case T_OBJECT : return "object";
-    default       : return "???";
+  const char* n = type2name(type);
+  if (n == NULL || type > T_VOID) {
+    return "???";
   }
+  return n;
 }
 
 
--- a/src/hotspot/share/c1/c1_ValueStack.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/c1/c1_ValueStack.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -42,31 +42,21 @@
   verify();
 }
 
-
 ValueStack::ValueStack(ValueStack* copy_from, Kind kind, int bci)
   : _scope(copy_from->scope())
   , _caller_state(copy_from->caller_state())
   , _bci(bci)
   , _kind(kind)
-  , _locals()
-  , _stack()
+  , _locals(copy_from->locals_size_for_copy(kind))
+  , _stack(copy_from->stack_size_for_copy(kind))
   , _locks(copy_from->locks_size() == 0 ? NULL : new Values(copy_from->locks_size()))
 {
   assert(kind != EmptyExceptionState || !Compilation::current()->env()->should_retain_local_variables(), "need locals");
   if (kind != EmptyExceptionState) {
-    // only allocate space if we need to copy the locals-array
-    _locals = Values(copy_from->locals_size());
     _locals.appendAll(&copy_from->_locals);
   }
 
   if (kind != ExceptionState && kind != EmptyExceptionState) {
-    if (kind == Parsing) {
-      // stack will be modified, so reserve enough space to avoid resizing
-      _stack = Values(scope()->method()->max_stack());
-    } else {
-      // stack will not be modified, so do not waste space
-      _stack = Values(copy_from->stack_size());
-    }
     _stack.appendAll(&copy_from->_stack);
   }
 
@@ -77,6 +67,25 @@
   verify();
 }
 
+int ValueStack::locals_size_for_copy(Kind kind) const {
+  if (kind != EmptyExceptionState) {
+    return locals_size();
+  }
+  return 0;
+}
+
+int ValueStack::stack_size_for_copy(Kind kind) const {
+  if (kind != ExceptionState && kind != EmptyExceptionState) {
+    if (kind == Parsing) {
+      // stack will be modified, so reserve enough space to avoid resizing
+      return scope()->method()->max_stack();
+    } else {
+      // stack will not be modified, so do not waste space
+      return stack_size();
+    }
+  }
+  return 0;
+}
 
 bool ValueStack::is_same(ValueStack* s) {
   if (scope() != s->scope()) return false;
--- a/src/hotspot/share/c1/c1_ValueStack.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/c1/c1_ValueStack.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -65,6 +65,8 @@
   // for simplified copying
   ValueStack(ValueStack* copy_from, Kind kind, int bci);
 
+  int locals_size_for_copy(Kind kind) const;
+  int stack_size_for_copy(Kind kind) const;
  public:
   // creation
   ValueStack(IRScope* scope, ValueStack* caller_state);
--- a/src/hotspot/share/ci/ciEnv.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/ci/ciEnv.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -413,8 +413,8 @@
 
   // Now we need to check the SystemDictionary
   Symbol* sym = name->get_symbol();
-  if (sym->char_at(0) == 'L' &&
-    sym->char_at(sym->utf8_length()-1) == ';') {
+  if (sym->char_at(0) == JVM_SIGNATURE_CLASS &&
+      sym->char_at(sym->utf8_length()-1) == JVM_SIGNATURE_ENDCLASS) {
     // This is a name from a signature.  Strip off the trimmings.
     // Call recursive to keep scope of strippedsym.
     TempNewSymbol strippedsym = SymbolTable::new_symbol(sym->as_utf8()+1,
@@ -440,7 +440,7 @@
 
   // setup up the proper type to return on OOM
   ciKlass* fail_type;
-  if (sym->char_at(0) == '[') {
+  if (sym->char_at(0) == JVM_SIGNATURE_ARRAY) {
     fail_type = _unloaded_ciobjarrayklass;
   } else {
     fail_type = _unloaded_ciinstance_klass;
@@ -466,8 +466,8 @@
   // we must build an array type around it.  The CI requires array klasses
   // to be loaded if their element klasses are loaded, except when memory
   // is exhausted.
-  if (sym->char_at(0) == '[' &&
-      (sym->char_at(1) == '[' || sym->char_at(1) == 'L')) {
+  if (sym->char_at(0) == JVM_SIGNATURE_ARRAY &&
+      (sym->char_at(1) == JVM_SIGNATURE_ARRAY || sym->char_at(1) == JVM_SIGNATURE_CLASS)) {
     // We have an unloaded array.
     // Build it on the fly if the element class exists.
     TempNewSymbol elem_sym = SymbolTable::new_symbol(sym->as_utf8()+1,
--- a/src/hotspot/share/ci/ciInstanceKlass.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/ci/ciInstanceKlass.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -116,7 +116,7 @@
                                  jobject loader, jobject protection_domain)
   : ciKlass(name, T_OBJECT)
 {
-  assert(name->char_at(0) != '[', "not an instance klass");
+  assert(name->char_at(0) != JVM_SIGNATURE_ARRAY, "not an instance klass");
   _init_state = (InstanceKlass::ClassState)0;
   _nonstatic_field_size = -1;
   _has_nonstatic_fields = false;
--- a/src/hotspot/share/ci/ciMetadata.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/ci/ciMetadata.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -51,7 +51,6 @@
   virtual bool is_metadata() const          { return true; }
 
   virtual bool is_type() const              { return false; }
-  virtual bool is_cpcache() const           { return false; }
   virtual bool is_return_address() const    { return false; }
   virtual bool is_method() const            { return false; }
   virtual bool is_method_data() const       { return false; }
--- a/src/hotspot/share/ci/ciObjArrayKlass.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/ci/ciObjArrayKlass.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -112,16 +112,16 @@
   Symbol* base_name_sym = element_name->get_symbol();
   char* name;
 
-  if (base_name_sym->char_at(0) == '[' ||
-      (base_name_sym->char_at(0) == 'L' &&  // watch package name 'Lxx'
-       base_name_sym->char_at(element_len-1) == ';')) {
+  if (base_name_sym->char_at(0) == JVM_SIGNATURE_ARRAY ||
+      (base_name_sym->char_at(0) == JVM_SIGNATURE_CLASS &&  // watch package name 'Lxx'
+       base_name_sym->char_at(element_len-1) == JVM_SIGNATURE_ENDCLASS)) {
 
     int new_len = element_len + dimension + 1; // for the ['s and '\0'
     name = CURRENT_THREAD_ENV->name_buffer(new_len);
 
     int pos = 0;
     for ( ; pos < dimension; pos++) {
-      name[pos] = '[';
+      name[pos] = JVM_SIGNATURE_ARRAY;
     }
     strncpy(name+pos, (char*)element_name->base(), element_len);
     name[new_len-1] = '\0';
@@ -133,11 +133,11 @@
     name = CURRENT_THREAD_ENV->name_buffer(new_len);
     int pos = 0;
     for ( ; pos < dimension; pos++) {
-      name[pos] = '[';
+      name[pos] = JVM_SIGNATURE_ARRAY;
     }
-    name[pos++] = 'L';
+    name[pos++] = JVM_SIGNATURE_CLASS;
     strncpy(name+pos, (char*)element_name->base(), element_len);
-    name[new_len-2] = ';';
+    name[new_len-2] = JVM_SIGNATURE_ENDCLASS;
     name[new_len-1] = '\0';
   }
   return ciSymbol::make(name);
--- a/src/hotspot/share/ci/ciObjectFactory.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/ci/ciObjectFactory.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -486,7 +486,7 @@
 
   // Two cases: this is an unloaded ObjArrayKlass or an
   // unloaded InstanceKlass.  Deal with both.
-  if (name->char_at(0) == '[') {
+  if (name->char_at(0) == JVM_SIGNATURE_ARRAY) {
     // Decompose the name.'
     FieldArrayInfo fd;
     BasicType element_type = FieldType::get_array_info(name->get_symbol(),
--- a/src/hotspot/share/ci/ciReplay.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/ci/ciReplay.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -814,18 +814,18 @@
     }
 
     oop java_mirror = k->java_mirror();
-    if (field_signature[0] == '[') {
+    if (field_signature[0] == JVM_SIGNATURE_ARRAY) {
       int length = parse_int("array length");
       oop value = NULL;
 
-      if (field_signature[1] == '[') {
+      if (field_signature[1] == JVM_SIGNATURE_ARRAY) {
         // multi dimensional array
         ArrayKlass* kelem = (ArrayKlass *)parse_klass(CHECK);
         if (kelem == NULL) {
           return;
         }
         int rank = 0;
-        while (field_signature[rank] == '[') {
+        while (field_signature[rank] == JVM_SIGNATURE_ARRAY) {
           rank++;
         }
         jint* dims = NEW_RESOURCE_ARRAY(jint, rank);
@@ -851,7 +851,8 @@
           value = oopFactory::new_intArray(length, CHECK);
         } else if (strcmp(field_signature, "[J") == 0) {
           value = oopFactory::new_longArray(length, CHECK);
-        } else if (field_signature[0] == '[' && field_signature[1] == 'L') {
+        } else if (field_signature[0] == JVM_SIGNATURE_ARRAY &&
+                   field_signature[1] == JVM_SIGNATURE_CLASS) {
           Klass* kelem = resolve_klass(field_signature + 1, CHECK);
           value = oopFactory::new_objArray(kelem, length, CHECK);
         } else {
@@ -892,7 +893,7 @@
       } else if (strcmp(field_signature, "Ljava/lang/String;") == 0) {
         Handle value = java_lang_String::create_from_str(string_value, CHECK);
         java_mirror->obj_field_put(fd.offset(), value());
-      } else if (field_signature[0] == 'L') {
+      } else if (field_signature[0] == JVM_SIGNATURE_CLASS) {
         Klass* k = resolve_klass(string_value, CHECK);
         oop value = InstanceKlass::cast(k)->allocate_instance(CHECK);
         java_mirror->obj_field_put(fd.offset(), value);
--- a/src/hotspot/share/classfile/classFileParser.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/classfile/classFileParser.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -731,7 +731,7 @@
           const unsigned int name_len = name->utf8_length();
           if (tag == JVM_CONSTANT_Methodref &&
               name_len != 0 &&
-              name->char_at(0) == '<' &&
+              name->char_at(0) == JVM_SIGNATURE_SPECIAL &&
               name != vmSymbols::object_initializer_name()) {
             classfile_parse_error(
               "Bad method name at constant pool index %u in class file %s",
@@ -5171,24 +5171,25 @@
   if (length == 0) return false;  // Must have at least one char.
   for (const char* p = name; p != name + length; p++) {
     switch(*p) {
-      case '.':
-      case ';':
-      case '[':
+      case JVM_SIGNATURE_DOT:
+      case JVM_SIGNATURE_ENDCLASS:
+      case JVM_SIGNATURE_ARRAY:
         // do not permit '.', ';', or '['
         return false;
-      case '/':
+      case JVM_SIGNATURE_SLASH:
         // check for '//' or leading or trailing '/' which are not legal
         // unqualified name must not be empty
         if (type == ClassFileParser::LegalClass) {
-          if (p == name || p+1 >= name+length || *(p+1) == '/') {
+          if (p == name || p+1 >= name+length ||
+              *(p+1) == JVM_SIGNATURE_SLASH) {
             return false;
           }
         } else {
           return false;   // do not permit '/' unless it's class name
         }
         break;
-      case '<':
-      case '>':
+      case JVM_SIGNATURE_SPECIAL:
+      case JVM_SIGNATURE_ENDSPECIAL:
         // do not permit '<' or '>' in method names
         if (type == ClassFileParser::LegalMethod) {
           return false;
@@ -5225,7 +5226,7 @@
         last_is_slash = false;
         continue;
       }
-      if (slash_ok && ch == '/') {
+      if (slash_ok && ch == JVM_SIGNATURE_SLASH) {
         if (last_is_slash) {
           return NULL;  // Don't permit consecutive slashes
         }
@@ -5305,14 +5306,14 @@
         const char* const p = skip_over_field_name(signature + 1, true, --length);
 
         // The next character better be a semicolon
-        if (p && (p - signature) > 1 && p[0] == ';') {
+        if (p && (p - signature) > 1 && p[0] == JVM_SIGNATURE_ENDCLASS) {
           return p + 1;
         }
       }
       else {
         // Skip leading 'L' and ignore first appearance of ';'
         signature++;
-        const char* c = (const char*) memchr(signature, ';', length - 1);
+        const char* c = (const char*) memchr(signature, JVM_SIGNATURE_ENDCLASS, length - 1);
         // Format check signature
         if (c != NULL) {
           int newlen = c - (char*) signature;
@@ -5361,7 +5362,7 @@
       p = skip_over_field_signature(bytes, false, length, CHECK);
       legal = (p != NULL) && ((p - bytes) == (int)length);
     } else if (_major_version < JAVA_1_5_VERSION) {
-      if (bytes[0] != '<') {
+      if (bytes[0] != JVM_SIGNATURE_SPECIAL) {
         p = skip_over_field_name(bytes, true, length);
         legal = (p != NULL) && ((p - bytes) == (int)length);
       }
@@ -5396,7 +5397,7 @@
 
   if (length > 0) {
     if (_major_version < JAVA_1_5_VERSION) {
-      if (bytes[0] != '<') {
+      if (bytes[0] != JVM_SIGNATURE_SPECIAL) {
         const char* p = skip_over_field_name(bytes, false, length);
         legal = (p != NULL) && ((p - bytes) == (int)length);
       }
@@ -5429,7 +5430,7 @@
   bool legal = false;
 
   if (length > 0) {
-    if (bytes[0] == '<') {
+    if (bytes[0] == JVM_SIGNATURE_SPECIAL) {
       if (name == vmSymbols::object_initializer_name() || name == vmSymbols::class_initializer_name()) {
         legal = true;
       }
@@ -5513,7 +5514,7 @@
     // The first non-signature thing better be a ')'
     if ((length > 0) && (*p++ == JVM_SIGNATURE_ENDFUNC)) {
       length--;
-      if (name->utf8_length() > 0 && name->char_at(0) == '<') {
+      if (name->utf8_length() > 0 && name->char_at(0) == JVM_SIGNATURE_SPECIAL) {
         // All internal methods must return void
         if ((length == 1) && (p[0] == JVM_SIGNATURE_VOID)) {
           return args_size;
@@ -5916,7 +5917,7 @@
 // its _class_name field.
 void ClassFileParser::prepend_host_package_name(const InstanceKlass* unsafe_anonymous_host, TRAPS) {
   ResourceMark rm(THREAD);
-  assert(strrchr(_class_name->as_C_string(), '/') == NULL,
+  assert(strrchr(_class_name->as_C_string(), JVM_SIGNATURE_SLASH) == NULL,
          "Unsafe anonymous class should not be in a package");
   const char* host_pkg_name =
     ClassLoader::package_from_name(unsafe_anonymous_host->name()->as_C_string(), NULL);
@@ -5949,7 +5950,7 @@
   assert(_unsafe_anonymous_host != NULL, "Expected an unsafe anonymous class");
 
   const jbyte* anon_last_slash = UTF8::strrchr((const jbyte*)_class_name->base(),
-                                               _class_name->utf8_length(), '/');
+                                               _class_name->utf8_length(), JVM_SIGNATURE_SLASH);
   if (anon_last_slash == NULL) {  // Unnamed package
     prepend_host_package_name(_unsafe_anonymous_host, CHECK);
   } else {
@@ -6560,7 +6561,7 @@
   if (class_name != NULL) {
     ResourceMark rm;
     char* name = class_name->as_C_string();
-    return strchr(name, '.') == NULL;
+    return strchr(name, JVM_SIGNATURE_DOT) == NULL;
   } else {
     return true;
   }
--- a/src/hotspot/share/classfile/classLoader.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/classfile/classLoader.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -187,7 +187,7 @@
     *bad_class_name = false;
   }
 
-  const char* const last_slash = strrchr(class_name, '/');
+  const char* const last_slash = strrchr(class_name, JVM_SIGNATURE_SLASH);
   if (last_slash == NULL) {
     // No package name
     return NULL;
@@ -195,16 +195,16 @@
 
   char* class_name_ptr = (char*) class_name;
   // Skip over '['s
-  if (*class_name_ptr == '[') {
+  if (*class_name_ptr == JVM_SIGNATURE_ARRAY) {
     do {
       class_name_ptr++;
-    } while (*class_name_ptr == '[');
+    } while (*class_name_ptr == JVM_SIGNATURE_ARRAY);
 
     // Fully qualified class names should not contain a 'L'.
     // Set bad_class_name to true to indicate that the package name
     // could not be obtained due to an error condition.
     // In this situation, is_same_class_package returns false.
-    if (*class_name_ptr == 'L') {
+    if (*class_name_ptr == JVM_SIGNATURE_CLASS) {
       if (bad_class_name != NULL) {
         *bad_class_name = true;
       }
--- a/src/hotspot/share/classfile/defaultMethods.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/classfile/defaultMethods.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -1012,7 +1012,7 @@
       klass->class_loader_data(), new_size, NULL, CHECK);
 
   // original_ordering might be empty if this class has no methods of its own
-  if (JvmtiExport::can_maintain_original_method_order() || DumpSharedSpaces) {
+  if (JvmtiExport::can_maintain_original_method_order() || Arguments::is_dumping_archive()) {
     merged_ordering = MetadataFactory::new_array<int>(
         klass->class_loader_data(), new_size, CHECK);
   }
--- a/src/hotspot/share/classfile/javaAssertions.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/classfile/javaAssertions.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -79,7 +79,7 @@
   // JVM_DesiredAssertionStatus pass the external_name() to
   // JavaAssertion::enabled(), but that is done once per loaded class.
   for (int i = 0; i < len; ++i) {
-    if (name_copy[i] == '.') name_copy[i] = '/';
+    if (name_copy[i] == JVM_SIGNATURE_DOT) name_copy[i] = JVM_SIGNATURE_SLASH;
   }
 
   if (TraceJavaAssertions) {
@@ -135,7 +135,7 @@
   for (index = len - 1; p != 0; p = p->next(), --index) {
     assert(index >= 0, "length does not match list");
     Handle s = java_lang_String::create_from_str(p->name(), CHECK);
-    s = java_lang_String::char_converter(s, '/', '.', CHECK);
+    s = java_lang_String::char_converter(s, JVM_SIGNATURE_SLASH, JVM_SIGNATURE_DOT, CHECK);
     names->obj_at_put(index, s());
     enabled->bool_at_put(index, p->enabled());
   }
@@ -163,10 +163,10 @@
   // does not include a package, length will be 0 which will match items for the
   // default package (from options "-ea:..."  or "-da:...").
   size_t len = strlen(classname);
-  for (/* empty */; len > 0 && classname[len] != '/'; --len) /* empty */;
+  for (/* empty */; len > 0 && classname[len] != JVM_SIGNATURE_SLASH; --len) /* empty */;
 
   do {
-    assert(len == 0 || classname[len] == '/', "not a package name");
+    assert(len == 0 || classname[len] == JVM_SIGNATURE_SLASH, "not a package name");
     for (OptionList* p = _packages; p != 0; p = p->next()) {
       if (strncmp(p->name(), classname, len) == 0 && p->name()[len] == '\0') {
         return p;
@@ -175,7 +175,7 @@
 
     // Find the length of the next package, taking care to avoid decrementing
     // past 0 (len is unsigned).
-    while (len > 0 && classname[--len] != '/') /* empty */;
+    while (len > 0 && classname[--len] != JVM_SIGNATURE_SLASH) /* empty */;
   } while (len > 0);
 
   return 0;
--- a/src/hotspot/share/classfile/javaClasses.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/classfile/javaClasses.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -378,7 +378,7 @@
 
   if (_to_java_string_fn == NULL) {
     void *lib_handle = os::native_java_library();
-    _to_java_string_fn = CAST_TO_FN_PTR(to_java_string_fn_t, os::dll_lookup(lib_handle, "JNU_NewStringPlatform"));
+    _to_java_string_fn = CAST_TO_FN_PTR(to_java_string_fn_t, os::dll_lookup(lib_handle, "NewStringPlatform"));
     if (_to_java_string_fn == NULL) {
       fatal("NewStringPlatform missing");
     }
@@ -1546,7 +1546,7 @@
 int  java_lang_Class::classRedefinedCount_offset = -1;
 
 #define CLASS_FIELDS_DO(macro) \
-  macro(classRedefinedCount_offset, k, "classRedefinedCount", int_signature,         false) ; \
+  macro(classRedefinedCount_offset, k, "classRedefinedCount", int_signature,         false); \
   macro(_class_loader_offset,       k, "classLoader",         classloader_signature, false); \
   macro(_component_mirror_offset,   k, "componentType",       class_signature,       false); \
   macro(_module_offset,             k, "module",              module_signature,      false); \
@@ -1939,7 +1939,6 @@
   return method != NULL && (method->constants()->version() == version);
 }
 
-
 // This class provides a simple wrapper over the internal structure of
 // exception backtrace to insulate users of the backtrace from needing
 // to know what it looks like.
@@ -1951,7 +1950,11 @@
   typeArrayOop    _methods;
   typeArrayOop    _bcis;
   objArrayOop     _mirrors;
-  typeArrayOop    _names; // needed to insulate method name against redefinition
+  typeArrayOop    _names; // Needed to insulate method name against redefinition.
+  // This is set to a java.lang.Boolean(true) if the top frame
+  // of the backtrace is omitted because it shall be hidden.
+  // Else it is null.
+  oop             _has_hidden_top_frame;
   int             _index;
   NoSafepointVerifier _nsv;
 
@@ -1961,6 +1964,7 @@
     trace_mirrors_offset = java_lang_Throwable::trace_mirrors_offset,
     trace_names_offset   = java_lang_Throwable::trace_names_offset,
     trace_next_offset    = java_lang_Throwable::trace_next_offset,
+    trace_hidden_offset  = java_lang_Throwable::trace_hidden_offset,
     trace_size           = java_lang_Throwable::trace_size,
     trace_chunk_size     = java_lang_Throwable::trace_chunk_size
   };
@@ -1986,11 +1990,15 @@
     assert(names != NULL, "names array should be initialized in backtrace");
     return names;
   }
+  static oop get_has_hidden_top_frame(objArrayHandle chunk) {
+    oop hidden = chunk->obj_at(trace_hidden_offset);
+    return hidden;
+  }
 
  public:
 
   // constructor for new backtrace
-  BacktraceBuilder(TRAPS): _head(NULL), _methods(NULL), _bcis(NULL), _mirrors(NULL), _names(NULL) {
+  BacktraceBuilder(TRAPS): _head(NULL), _methods(NULL), _bcis(NULL), _mirrors(NULL), _names(NULL), _has_hidden_top_frame(NULL) {
     expand(CHECK);
     _backtrace = Handle(THREAD, _head);
     _index = 0;
@@ -2001,6 +2009,7 @@
     _bcis = get_bcis(backtrace);
     _mirrors = get_mirrors(backtrace);
     _names = get_names(backtrace);
+    _has_hidden_top_frame = get_has_hidden_top_frame(backtrace);
     assert(_methods->length() == _bcis->length() &&
            _methods->length() == _mirrors->length() &&
            _mirrors->length() == _names->length(),
@@ -2038,6 +2047,7 @@
     new_head->obj_at_put(trace_bcis_offset, new_bcis());
     new_head->obj_at_put(trace_mirrors_offset, new_mirrors());
     new_head->obj_at_put(trace_names_offset, new_names());
+    new_head->obj_at_put(trace_hidden_offset, NULL);
 
     _head    = new_head();
     _methods = new_methods();
@@ -2078,6 +2088,16 @@
     _index++;
   }
 
+  void set_has_hidden_top_frame(TRAPS) {
+    if (_has_hidden_top_frame == NULL) {
+      jvalue prim;
+      prim.z = 1;
+      PauseNoSafepointVerifier pnsv(&_nsv);
+      _has_hidden_top_frame = java_lang_boxing_object::create(T_BOOLEAN, &prim, CHECK);
+      _head->obj_at_put(trace_hidden_offset, _has_hidden_top_frame);
+    }
+  }
+
 };
 
 struct BacktraceElement : public StackObj {
@@ -2407,7 +2427,13 @@
       }
     }
     if (method->is_hidden()) {
-      if (skip_hidden)  continue;
+      if (skip_hidden) {
+        if (total_count == 0) {
+          // The top frame will be hidden from the stack trace.
+          bt.set_has_hidden_top_frame(CHECK);
+        }
+        continue;
+      }
     }
     bt.push(method, bci, CHECK);
     total_count++;
@@ -2524,6 +2550,37 @@
   }
 }
 
+bool java_lang_Throwable::get_top_method_and_bci(oop throwable, Method** method, int* bci) {
+  Thread* THREAD = Thread::current();
+  objArrayHandle result(THREAD, objArrayOop(backtrace(throwable)));
+  BacktraceIterator iter(result, THREAD);
+  // No backtrace available.
+  if (!iter.repeat()) return false;
+
+  // If the exception happened in a frame that has been hidden, i.e.,
+  // omitted from the back trace, we can not compute the message.
+  oop hidden = ((objArrayOop)backtrace(throwable))->obj_at(trace_hidden_offset);
+  if (hidden != NULL) {
+    return false;
+  }
+
+  // Get first backtrace element.
+  BacktraceElement bte = iter.next(THREAD);
+
+  InstanceKlass* holder = InstanceKlass::cast(java_lang_Class::as_Klass(bte._mirror()));
+  assert(holder != NULL, "first element should be non-null");
+  Method* m = holder->method_with_orig_idnum(bte._method_id, bte._version);
+
+  // Original version is no longer available.
+  if (m == NULL || !version_matches(m, bte._version)) {
+    return false;
+  }
+
+  *method = m;
+  *bci = bte._bci;
+  return true;
+}
+
 oop java_lang_StackTraceElement::create(const methodHandle& method, int bci, TRAPS) {
   // Allocate java.lang.StackTraceElement instance
   InstanceKlass* k = SystemDictionary::StackTraceElement_klass();
--- a/src/hotspot/share/classfile/javaClasses.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/classfile/javaClasses.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -203,7 +203,9 @@
   static inline bool value_equals(typeArrayOop str_value1, typeArrayOop str_value2);
 
   // Conversion between '.' and '/' formats
-  static Handle externalize_classname(Handle java_string, TRAPS) { return char_converter(java_string, '/', '.', THREAD); }
+  static Handle externalize_classname(Handle java_string, TRAPS) {
+    return char_converter(java_string, JVM_SIGNATURE_SLASH, JVM_SIGNATURE_DOT, THREAD);
+  }
 
   // Conversion
   static Symbol* as_symbol(oop java_string);
@@ -520,7 +522,8 @@
     trace_mirrors_offset = 2,
     trace_names_offset   = 3,
     trace_next_offset    = 4,
-    trace_size           = 5,
+    trace_hidden_offset  = 5,
+    trace_size           = 6,
     trace_chunk_size     = 32
   };
 
@@ -570,6 +573,8 @@
   static void java_printStackTrace(Handle throwable, TRAPS);
   // Debugging
   friend class JavaClasses;
+  // Gets the method and bci of the top frame (TOS). Returns false if this failed.
+  static bool get_top_method_and_bci(oop throwable, Method** method, int* bci);
 };
 
 
--- a/src/hotspot/share/classfile/modules.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/classfile/modules.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -331,7 +331,7 @@
     if (!h_loader.is_null() &&
         !SystemDictionary::is_platform_class_loader(h_loader()) &&
         (strncmp(package_name, JAVAPKG, JAVAPKG_LEN) == 0 &&
-          (package_name[JAVAPKG_LEN] == '/' || package_name[JAVAPKG_LEN] == '\0'))) {
+          (package_name[JAVAPKG_LEN] == JVM_SIGNATURE_SLASH || package_name[JAVAPKG_LEN] == '\0'))) {
       const char* class_loader_name = loader_data->loader_name_and_id();
       size_t pkg_len = strlen(package_name);
       char* pkg_name = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, pkg_len + 1);
--- a/src/hotspot/share/classfile/systemDictionary.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/classfile/systemDictionary.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -2533,7 +2533,7 @@
 
     // It's a primitive.  (Void has a primitive mirror too.)
     char ch = type->char_at(0);
-    assert(is_java_primitive(char2type(ch)) || ch == 'V', "");
+    assert(is_java_primitive(char2type(ch)) || ch == JVM_SIGNATURE_VOID, "");
     return Handle(THREAD, find_java_mirror_for_type(ch));
 
   } else if (FieldType::is_obj(type) || FieldType::is_array(type)) {
--- a/src/hotspot/share/classfile/systemDictionary.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/classfile/systemDictionary.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -156,6 +156,7 @@
   do_klass(reflect_ConstantPool_klass,                  reflect_ConstantPool                                  ) \
   do_klass(reflect_UnsafeStaticFieldAccessorImpl_klass, reflect_UnsafeStaticFieldAccessorImpl                 ) \
   do_klass(reflect_CallerSensitive_klass,               reflect_CallerSensitive                               ) \
+  do_klass(reflect_NativeConstructorAccessorImpl_klass, reflect_NativeConstructorAccessorImpl                 ) \
                                                                                                                 \
   /* support for dynamic typing; it's OK if these are NULL in earlier JDKs */                                   \
   do_klass(DirectMethodHandle_klass,                    java_lang_invoke_DirectMethodHandle                   ) \
--- a/src/hotspot/share/classfile/verificationType.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/classfile/verificationType.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -122,19 +122,19 @@
   assert(is_array() && name()->utf8_length() >= 2, "Must be a valid array");
   Symbol* component;
   switch (name()->char_at(1)) {
-    case 'Z': return VerificationType(Boolean);
-    case 'B': return VerificationType(Byte);
-    case 'C': return VerificationType(Char);
-    case 'S': return VerificationType(Short);
-    case 'I': return VerificationType(Integer);
-    case 'J': return VerificationType(Long);
-    case 'F': return VerificationType(Float);
-    case 'D': return VerificationType(Double);
-    case '[':
+    case JVM_SIGNATURE_BOOLEAN: return VerificationType(Boolean);
+    case JVM_SIGNATURE_BYTE:    return VerificationType(Byte);
+    case JVM_SIGNATURE_CHAR:    return VerificationType(Char);
+    case JVM_SIGNATURE_SHORT:   return VerificationType(Short);
+    case JVM_SIGNATURE_INT:     return VerificationType(Integer);
+    case JVM_SIGNATURE_LONG:    return VerificationType(Long);
+    case JVM_SIGNATURE_FLOAT:   return VerificationType(Float);
+    case JVM_SIGNATURE_DOUBLE:  return VerificationType(Double);
+    case JVM_SIGNATURE_ARRAY:
       component = context->create_temporary_symbol(
         name(), 1, name()->utf8_length());
       return VerificationType::reference_type(component);
-    case 'L':
+    case JVM_SIGNATURE_CLASS:
       component = context->create_temporary_symbol(
         name(), 2, name()->utf8_length() - 1);
       return VerificationType::reference_type(component);
--- a/src/hotspot/share/classfile/verificationType.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/classfile/verificationType.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -209,24 +209,24 @@
   bool is_x_array(char sig) const {
     return is_null() || (is_array() && (name()->char_at(1) == sig));
   }
-  bool is_int_array() const { return is_x_array('I'); }
-  bool is_byte_array() const { return is_x_array('B'); }
-  bool is_bool_array() const { return is_x_array('Z'); }
-  bool is_char_array() const { return is_x_array('C'); }
-  bool is_short_array() const { return is_x_array('S'); }
-  bool is_long_array() const { return is_x_array('J'); }
-  bool is_float_array() const { return is_x_array('F'); }
-  bool is_double_array() const { return is_x_array('D'); }
-  bool is_object_array() const { return is_x_array('L'); }
-  bool is_array_array() const { return is_x_array('['); }
+  bool is_int_array() const { return is_x_array(JVM_SIGNATURE_INT); }
+  bool is_byte_array() const { return is_x_array(JVM_SIGNATURE_BYTE); }
+  bool is_bool_array() const { return is_x_array(JVM_SIGNATURE_BOOLEAN); }
+  bool is_char_array() const { return is_x_array(JVM_SIGNATURE_CHAR); }
+  bool is_short_array() const { return is_x_array(JVM_SIGNATURE_SHORT); }
+  bool is_long_array() const { return is_x_array(JVM_SIGNATURE_LONG); }
+  bool is_float_array() const { return is_x_array(JVM_SIGNATURE_FLOAT); }
+  bool is_double_array() const { return is_x_array(JVM_SIGNATURE_DOUBLE); }
+  bool is_object_array() const { return is_x_array(JVM_SIGNATURE_CLASS); }
+  bool is_array_array() const { return is_x_array(JVM_SIGNATURE_ARRAY); }
   bool is_reference_array() const
     { return is_object_array() || is_array_array(); }
   bool is_object() const
     { return (is_reference() && !is_null() && name()->utf8_length() >= 1 &&
-              name()->char_at(0) != '['); }
+              name()->char_at(0) != JVM_SIGNATURE_ARRAY); }
   bool is_array() const
     { return (is_reference() && !is_null() && name()->utf8_length() >= 2 &&
-              name()->char_at(0) == '['); }
+              name()->char_at(0) == JVM_SIGNATURE_ARRAY); }
   bool is_uninitialized() const
     { return ((_u._data & Uninitialized) == Uninitialized); }
   bool is_uninitialized_this() const
@@ -322,7 +322,7 @@
   int dimensions() const {
     assert(is_array(), "Must be an array");
     int index = 0;
-    while (name()->char_at(index) == '[') index++;
+    while (name()->char_at(index) == JVM_SIGNATURE_ARRAY) index++;
     return index;
   }
 
--- a/src/hotspot/share/classfile/verifier.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/classfile/verifier.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -2852,7 +2852,7 @@
     }
   }
 
-  if (method_name->char_at(0) == '<') {
+  if (method_name->char_at(0) == JVM_SIGNATURE_SPECIAL) {
     // Make sure <init> can only be invoked by invokespecial
     if (opcode != Bytecodes::_invokespecial ||
         method_name != vmSymbols::object_initializer_name()) {
@@ -3028,21 +3028,23 @@
     // Check for more than MAX_ARRAY_DIMENSIONS
     length = (int)strlen(component_name);
     if (length > MAX_ARRAY_DIMENSIONS &&
-        component_name[MAX_ARRAY_DIMENSIONS - 1] == '[') {
+        component_name[MAX_ARRAY_DIMENSIONS - 1] == JVM_SIGNATURE_ARRAY) {
       verify_error(ErrorContext::bad_code(bci),
         "Illegal anewarray instruction, array has more than 255 dimensions");
     }
     // add one dimension to component
     length++;
     arr_sig_str = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, length + 1);
-    int n = os::snprintf(arr_sig_str, length + 1, "[%s", component_name);
+    int n = os::snprintf(arr_sig_str, length + 1, "%c%s",
+                         JVM_SIGNATURE_ARRAY, component_name);
     assert(n == length, "Unexpected number of characters in string");
   } else {         // it's an object or interface
     const char* component_name = component_type.name()->as_utf8();
     // add one dimension to component with 'L' prepended and ';' postpended.
     length = (int)strlen(component_name) + 3;
     arr_sig_str = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, length + 1);
-    int n = os::snprintf(arr_sig_str, length + 1, "[L%s;", component_name);
+    int n = os::snprintf(arr_sig_str, length + 1, "%c%c%s;",
+                         JVM_SIGNATURE_ARRAY, JVM_SIGNATURE_CLASS, component_name);
     assert(n == length, "Unexpected number of characters in string");
   }
   Symbol* arr_sig = create_temporary_symbol(arr_sig_str, length);
--- a/src/hotspot/share/classfile/vmSymbols.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/classfile/vmSymbols.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -970,7 +970,7 @@
   case F_RNY:fname = "native synchronized "; break;
   default:   break;
   }
-  const char* kptr = strrchr(kname, '/');
+  const char* kptr = strrchr(kname, JVM_SIGNATURE_SLASH);
   if (kptr != NULL)  kname = kptr + 1;
   int len = jio_snprintf(buf, buflen, "%s: %s%s.%s%s",
                          str, fname, kname, mname, sname);
--- a/src/hotspot/share/classfile/vmSymbols.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/classfile/vmSymbols.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -246,6 +246,7 @@
   template(reflect_Reflection,                        "jdk/internal/reflect/Reflection")              \
   template(reflect_CallerSensitive,                   "jdk/internal/reflect/CallerSensitive")         \
   template(reflect_CallerSensitive_signature,         "Ljdk/internal/reflect/CallerSensitive;")       \
+  template(reflect_NativeConstructorAccessorImpl,     "jdk/internal/reflect/NativeConstructorAccessorImpl")\
   template(checkedExceptions_name,                    "checkedExceptions")                        \
   template(clazz_name,                                "clazz")                                    \
   template(exceptionTypes_name,                       "exceptionTypes")                           \
--- a/src/hotspot/share/compiler/methodMatcher.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/compiler/methodMatcher.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -264,11 +264,13 @@
     c_match = check_mode(class_name, error_msg);
     m_match = check_mode(method_name, error_msg);
 
-    if ((strchr(class_name, '<') != NULL) || (strchr(class_name, '>') != NULL)) {
+    if ((strchr(class_name, JVM_SIGNATURE_SPECIAL) != NULL) ||
+        (strchr(class_name, JVM_SIGNATURE_ENDSPECIAL) != NULL)) {
       error_msg = "Chars '<' and '>' not allowed in class name";
       return;
     }
-    if ((strchr(method_name, '<') != NULL) || (strchr(method_name, '>') != NULL)) {
+    if ((strchr(method_name, JVM_SIGNATURE_SPECIAL) != NULL) ||
+        (strchr(method_name, JVM_SIGNATURE_ENDSPECIAL) != NULL)) {
       if ((strncmp("<init>", method_name, 255) != 0) && (strncmp("<clinit>", method_name, 255) != 0)) {
         error_msg = "Chars '<' and '>' only allowed in <init> and <clinit>";
         return;
--- a/src/hotspot/share/gc/epsilon/epsilonArguments.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/epsilon/epsilonArguments.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -45,13 +45,25 @@
     FLAG_SET_DEFAULT(ExitOnOutOfMemoryError, true);
   }
 
+  // Warn users that non-resizable heap might be better for some configurations.
+  // We are not adjusting the heap size by ourselves, because it affects startup time.
+  if (InitialHeapSize != MaxHeapSize) {
+    log_warning(gc)("Consider setting -Xms equal to -Xmx to avoid resizing hiccups");
+  }
+
+  // Warn users that AlwaysPreTouch might be better for some configurations.
+  // We are not turning this on by ourselves, because it affects startup time.
+  if (FLAG_IS_DEFAULT(AlwaysPreTouch) && !AlwaysPreTouch) {
+    log_warning(gc)("Consider enabling -XX:+AlwaysPreTouch to avoid memory commit hiccups");
+  }
+
   if (EpsilonMaxTLABSize < MinTLABSize) {
-    warning("EpsilonMaxTLABSize < MinTLABSize, adjusting it to " SIZE_FORMAT, MinTLABSize);
+    log_warning(gc)("EpsilonMaxTLABSize < MinTLABSize, adjusting it to " SIZE_FORMAT, MinTLABSize);
     EpsilonMaxTLABSize = MinTLABSize;
   }
 
   if (!EpsilonElasticTLAB && EpsilonElasticTLABDecay) {
-    warning("Disabling EpsilonElasticTLABDecay because EpsilonElasticTLAB is disabled");
+    log_warning(gc)("Disabling EpsilonElasticTLABDecay because EpsilonElasticTLAB is disabled");
     FLAG_SET_DEFAULT(EpsilonElasticTLABDecay, false);
   }
 
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -1630,7 +1630,6 @@
 }
 
 jint G1CollectedHeap::initialize() {
-  os::enable_vtime();
 
   // Necessary to satisfy locking discipline assertions.
 
@@ -4076,7 +4075,7 @@
     Atomic::add(r->rem_set()->occupied_locked(), &_rs_length);
 
     if (!is_young) {
-      g1h->_hot_card_cache->reset_card_counts(r);
+      g1h->hot_card_cache()->reset_card_counts(r);
     }
 
     if (!evacuation_failed) {
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -129,7 +129,6 @@
 };
 
 class G1CollectedHeap : public CollectedHeap {
-  friend class G1FreeCollectionSetTask;
   friend class VM_CollectForMetadataAllocation;
   friend class VM_G1CollectForAllocation;
   friend class VM_G1CollectFull;
@@ -1138,7 +1137,7 @@
     return _reserved.contains(addr);
   }
 
-  G1HotCardCache* g1_hot_card_cache() const { return _hot_card_cache; }
+  G1HotCardCache* hot_card_cache() const { return _hot_card_cache; }
 
   G1CardTable* card_table() const {
     return _card_table;
--- a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -114,8 +114,9 @@
   hr->rem_set()->clear();
   hr->clear_cardtable();
 
-  if (_g1h->g1_hot_card_cache()->use_cache()) {
-    _g1h->g1_hot_card_cache()->reset_card_counts(hr);
+  G1HotCardCache* hcc = _g1h->hot_card_cache();
+  if (hcc->use_cache()) {
+    hcc->reset_card_counts(hr);
   }
 }
 
--- a/src/hotspot/share/gc/g1/g1RemSet.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1RemSet.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -487,7 +487,7 @@
                    G1CardTable* ct,
                    G1HotCardCache* hot_card_cache) :
   _scan_state(new G1RemSetScanState()),
-  _prev_period_summary(),
+  _prev_period_summary(false),
   _g1h(g1h),
   _ct(ct),
   _g1p(_g1h->policy()),
@@ -1404,7 +1404,7 @@
   if ((G1SummarizeRSetStatsPeriod > 0) && log_is_enabled(Trace, gc, remset) &&
       (period_count % G1SummarizeRSetStatsPeriod == 0)) {
 
-    G1RemSetSummary current(this);
+    G1RemSetSummary current;
     _prev_period_summary.subtract_from(&current);
 
     Log(gc, remset) log;
@@ -1421,7 +1421,7 @@
   Log(gc, remset, exit) log;
   if (log.is_trace()) {
     log.trace(" Cumulative RS summary");
-    G1RemSetSummary current(this);
+    G1RemSetSummary current;
     ResourceMark rm;
     LogStream ls(log.trace());
     current.print_on(&ls);
--- a/src/hotspot/share/gc/g1/g1RemSetSummary.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1RemSetSummary.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -81,8 +81,7 @@
   return _rs_threads_vtimes[thread];
 }
 
-G1RemSetSummary::G1RemSetSummary() :
-  _rem_set(NULL),
+G1RemSetSummary::G1RemSetSummary(bool should_update) :
   _total_mutator_refined_cards(0),
   _total_concurrent_refined_cards(0),
   _num_coarsenings(0),
@@ -91,17 +90,10 @@
   _sampling_thread_vtime(0.0f) {
 
   memset(_rs_threads_vtimes, 0, sizeof(double) * _num_vtimes);
-}
 
-G1RemSetSummary::G1RemSetSummary(G1RemSet* rem_set) :
-  _rem_set(rem_set),
-  _total_mutator_refined_cards(0),
-  _total_concurrent_refined_cards(0),
-  _num_coarsenings(0),
-  _num_vtimes(G1ConcurrentRefine::max_num_threads()),
-  _rs_threads_vtimes(NEW_C_HEAP_ARRAY(double, _num_vtimes, mtGC)),
-  _sampling_thread_vtime(0.0f) {
-  update();
+  if (should_update) {
+    update();
+  }
 }
 
 G1RemSetSummary::~G1RemSetSummary() {
--- a/src/hotspot/share/gc/g1/g1RemSetSummary.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1RemSetSummary.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -36,8 +36,6 @@
 private:
   friend class GetRSThreadVTimeClosure;
 
-  G1RemSet* _rem_set;
-
   size_t _total_mutator_refined_cards;
   size_t _total_concurrent_refined_cards;
 
@@ -57,8 +55,7 @@
   void update();
 
 public:
-  G1RemSetSummary();
-  G1RemSetSummary(G1RemSet* remset);
+  G1RemSetSummary(bool should_update = true);
 
   ~G1RemSetSummary();
 
--- a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -211,34 +211,41 @@
   LIRGenerator* gen = access.gen();
 
   DecoratorSet decorators = access.decorators();
-  if ((decorators & IN_NATIVE) != 0) {
+  bool is_traversal_mode = ShenandoahHeap::heap()->is_traversal_mode();
+
+  if ((decorators & IN_NATIVE) != 0 && !is_traversal_mode) {
     assert(access.is_oop(), "IN_NATIVE access only for oop values");
     BarrierSetC1::load_at_resolved(access, result);
     LIR_OprList* args = new LIR_OprList();
+    LIR_Opr addr = access.resolved_addr();
+    addr = ensure_in_register(gen, addr);
     args->append(result);
+    args->append(addr);
     BasicTypeList signature;
     signature.append(T_OBJECT);
+    signature.append(T_ADDRESS);
     LIR_Opr call_result = gen->call_runtime(&signature, args,
                                             CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_native),
                                             objectType, NULL);
     __ move(call_result, result);
-    return;
-  }
-
-  if (ShenandoahLoadRefBarrier) {
-    LIR_Opr tmp = gen->new_register(T_OBJECT);
-    BarrierSetC1::load_at_resolved(access, tmp);
-    tmp = load_reference_barrier(access.gen(), tmp, access.resolved_addr());
-    __ move(tmp, result);
   } else {
-    BarrierSetC1::load_at_resolved(access, result);
+    if (ShenandoahLoadRefBarrier) {
+      LIR_Opr tmp = gen->new_register(T_OBJECT);
+      BarrierSetC1::load_at_resolved(access, tmp);
+      tmp = load_reference_barrier(access.gen(), tmp, access.resolved_addr());
+      __ move(tmp, result);
+    } else {
+      BarrierSetC1::load_at_resolved(access, result);
+    }
   }
 
   if (ShenandoahKeepAliveBarrier) {
     bool is_weak = (decorators & ON_WEAK_OOP_REF) != 0;
     bool is_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
     bool is_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
-    if (is_weak || is_phantom || is_anonymous) {
+    bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0 || is_traversal_mode;
+
+    if ((is_weak || is_phantom || is_anonymous) && keep_alive) {
       // Register the value in the referent field with the pre-barrier
       LabelObj *Lcont_anonymous;
       if (is_anonymous) {
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -536,9 +536,12 @@
   bool mismatched = (decorators & C2_MISMATCHED) != 0;
   bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0;
   bool on_heap = (decorators & IN_HEAP) != 0;
-  bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
+  bool on_weak_ref = (decorators & (ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF)) != 0;
   bool is_unordered = (decorators & MO_UNORDERED) != 0;
   bool need_cpu_mem_bar = !is_unordered || mismatched || !on_heap;
+  bool is_traversal_mode = ShenandoahHeap::heap()->is_traversal_mode();
+  bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0 || is_traversal_mode;
+  bool in_native = (decorators & IN_NATIVE) != 0;
 
   Node* top = Compile::current()->top();
 
@@ -547,7 +550,7 @@
 
   if (access.is_oop()) {
     if (ShenandoahLoadRefBarrier) {
-      load = new ShenandoahLoadReferenceBarrierNode(NULL, load, (decorators & IN_NATIVE) != 0);
+      load = new ShenandoahLoadReferenceBarrierNode(NULL, load, in_native && !is_traversal_mode);
       if (access.is_parse_access()) {
         load = static_cast<C2ParseAccess &>(access).kit()->gvn().transform(load);
       } else {
@@ -563,7 +566,7 @@
   // Also we need to add memory barrier to prevent commoning reads
   // from this field across safepoint since GC can change its value.
   bool need_read_barrier = ShenandoahKeepAliveBarrier &&
-    (on_heap && (on_weak || (unknown && offset != top && obj != top)));
+    (on_weak_ref || (unknown && offset != top && obj != top));
 
   if (!access.is_oop() || !need_read_barrier) {
     return load;
@@ -573,7 +576,7 @@
   C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
   GraphKit* kit = parse_access.kit();
 
-  if (on_weak) {
+  if (on_weak_ref && keep_alive) {
     // Use the pre-barrier to record the value in the referent field
     satb_write_barrier_pre(kit, false /* do_load */,
                            NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -1033,7 +1033,7 @@
 
   address calladdr = is_native ? CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_native)
                                : target;
-  const char* name = is_native ? "oop_load_from_native_barrier" : "load_reference_barrier";
+  const char* name = is_native ? "load_reference_barrier_native" : "load_reference_barrier";
   Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type(), calladdr, name, TypeRawPtr::BOTTOM);
 
   call->init_req(TypeFunc::Control, ctrl);
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -29,6 +29,11 @@
 #include "logging/log.hpp"
 #include "logging/logTag.hpp"
 
+ShenandoahPassiveHeuristics::ShenandoahPassiveHeuristics() : ShenandoahHeuristics() {
+  // Passive runs with max speed for allocation, because GC is always STW
+  SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahPacing);
+}
+
 bool ShenandoahPassiveHeuristics::should_start_gc() const {
   // Never do concurrent GCs.
   return false;
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -28,7 +28,9 @@
 
 class ShenandoahPassiveHeuristics : public ShenandoahHeuristics {
 public:
-   virtual bool should_start_gc() const;
+  ShenandoahPassiveHeuristics();
+
+  virtual bool should_start_gc() const;
 
   virtual bool should_process_references();
 
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahTraversalAggressiveHeuristics.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahTraversalAggressiveHeuristics.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -75,6 +75,9 @@
   RegionData *data = get_region_data_cache(heap->num_regions());
   size_t cnt = 0;
 
+  // About to choose the collection set, make sure we have pinned regions in correct state
+  heap->assert_pinned_region_status();
+
   // Step 0. Prepare all regions
   for (size_t i = 0; i < heap->num_regions(); i++) {
     ShenandoahHeapRegion* r = heap->get_region(i);
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahTraversalHeuristics.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahTraversalHeuristics.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -59,6 +59,9 @@
   RegionData *data = get_region_data_cache(heap->num_regions());
   size_t cnt = 0;
 
+  // About to choose the collection set, make sure we have pinned regions in correct state
+  heap->assert_pinned_region_status();
+
   // Step 0. Prepare all regions
 
   for (size_t i = 0; i < heap->num_regions(); i++) {
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -262,27 +262,39 @@
   }
 }
 
-oop ShenandoahBarrierSet::oop_load_from_native_barrier(oop obj) {
+oop ShenandoahBarrierSet::load_reference_barrier_native(oop obj, oop* load_addr) {
+  return load_reference_barrier_native_impl(obj, load_addr);
+}
+
+oop ShenandoahBarrierSet::load_reference_barrier_native(oop obj, narrowOop* load_addr) {
+  // Assumption: narrow oop version should not be used anywhere.
+  ShouldNotReachHere();
+  return NULL;
+}
+
+template <class T>
+oop ShenandoahBarrierSet::load_reference_barrier_native_impl(oop obj, T* load_addr) {
   if (CompressedOops::is_null(obj)) {
     return NULL;
   }
 
   ShenandoahMarkingContext* const marking_context = _heap->marking_context();
-
-  if (_heap->is_evacuation_in_progress()) {
-    // Normal GC
-    if (!marking_context->is_marked(obj)) {
+  if (_heap->is_evacuation_in_progress() && !marking_context->is_marked(obj)) {
+    Thread* thr = Thread::current();
+    if (thr->is_Java_thread()) {
       return NULL;
-    }
-  } else if (_heap->is_concurrent_traversal_in_progress()) {
-    // Traversal GC
-    if (marking_context->is_complete() &&
-        !marking_context->is_marked(resolve_forwarded_not_null(obj))) {
-      return NULL;
+    } else {
+      return obj;
     }
   }
 
-  return load_reference_barrier_not_null(obj);
+  oop fwd = load_reference_barrier_not_null(obj);
+  if (load_addr != NULL && fwd != obj) {
+    // Since we are here and we know the load address, update the reference.
+    ShenandoahHeap::cas_oop(fwd, load_addr, obj);
+  }
+
+  return fwd;
 }
 
 void ShenandoahBarrierSet::clone_barrier_runtime(oop src) {
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -84,8 +84,6 @@
 
   void write_ref_field_work(void* v, oop o, bool release = false);
 
-  oop oop_load_from_native_barrier(oop obj);
-
   virtual void on_thread_create(Thread* thread);
   virtual void on_thread_destroy(Thread* thread);
   virtual void on_thread_attach(Thread* thread);
@@ -106,6 +104,9 @@
   template <class T>
   oop load_reference_barrier_mutator_work(oop obj, T* load_addr);
 
+  oop load_reference_barrier_native(oop obj, oop* load_addr);
+  oop load_reference_barrier_native(oop obj, narrowOop* load_addr);
+
   void enqueue(oop obj);
 
 private:
@@ -118,6 +119,9 @@
 
   oop load_reference_barrier_impl(oop obj);
 
+  template <class T>
+  oop load_reference_barrier_native_impl(oop obj, T* load_addr);
+
   static void keep_alive_if_weak(DecoratorSet decorators, oop value) {
     assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Reference strength must be known");
     const bool on_strong_oop_ref = (decorators & ON_STRONG_OOP_REF) != 0;
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -69,7 +69,7 @@
 template <typename T>
 inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_load_not_in_heap(T* addr) {
   oop value = Raw::oop_load_not_in_heap(addr);
-  value = ShenandoahBarrierSet::barrier_set()->oop_load_from_native_barrier(value);
+  value = ShenandoahBarrierSet::barrier_set()->load_reference_barrier_native(value, addr);
   keep_alive_if_weak(decorators, value);
   return value;
 }
--- a/src/hotspot/share/gc/shenandoah/shenandoahClosures.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahClosures.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -27,6 +27,7 @@
 
 class ShenandoahHeap;
 class ShenandoahMarkingContext;
+class ShenandoahHeapRegionSet;
 class Thread;
 
 class ShenandoahForwardedIsAliveClosure: public BoolObjectClosure {
@@ -65,6 +66,20 @@
   inline void do_oop_work(T* p);
 };
 
+class ShenandoahTraversalUpdateRefsClosure: public OopClosure {
+private:
+  ShenandoahHeap* const           _heap;
+  ShenandoahHeapRegionSet* const  _traversal_set;
+
+public:
+  inline ShenandoahTraversalUpdateRefsClosure();
+  inline void do_oop(oop* p);
+  inline void do_oop(narrowOop* p);
+private:
+  template <class T>
+  inline void do_oop_work(T* p);
+};
+
 class ShenandoahEvacuateUpdateRootsClosure: public BasicOopIterateClosure {
 private:
   ShenandoahHeap* _heap;
--- a/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -26,6 +26,7 @@
 #include "gc/shenandoah/shenandoahAsserts.hpp"
 #include "gc/shenandoah/shenandoahClosures.hpp"
 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahTraversalGC.hpp"
 #include "oops/compressedOops.inline.hpp"
 #include "runtime/thread.hpp"
 
@@ -78,6 +79,29 @@
 void ShenandoahUpdateRefsClosure::do_oop(oop* p)       { do_oop_work(p); }
 void ShenandoahUpdateRefsClosure::do_oop(narrowOop* p) { do_oop_work(p); }
 
+ShenandoahTraversalUpdateRefsClosure::ShenandoahTraversalUpdateRefsClosure() :
+  _heap(ShenandoahHeap::heap()),
+  _traversal_set(ShenandoahHeap::heap()->traversal_gc()->traversal_set()) {
+  assert(_heap->is_traversal_mode(), "Why we here?");
+}
+
+template <class T>
+void ShenandoahTraversalUpdateRefsClosure::do_oop_work(T* p) {
+  T o = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(o)) {
+    oop obj = CompressedOops::decode_not_null(o);
+    if (_heap->in_collection_set(obj) || _traversal_set->is_in((HeapWord*)obj)) {
+      obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
+      RawAccess<IS_NOT_NULL>::oop_store(p, obj);
+    } else {
+      shenandoah_assert_not_forwarded(p, obj);
+    }
+  }
+}
+
+void ShenandoahTraversalUpdateRefsClosure::do_oop(oop* p)       { do_oop_work(p); }
+void ShenandoahTraversalUpdateRefsClosure::do_oop(narrowOop* p) { do_oop_work(p); }
+
 ShenandoahEvacuateUpdateRootsClosure::ShenandoahEvacuateUpdateRootsClosure() :
   _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
 }
--- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -319,13 +319,20 @@
 };
 
 void ShenandoahConcurrentMark::update_thread_roots(ShenandoahPhaseTimings::Phase root_phase) {
-  WorkGang* workers = _heap->workers();
-  bool is_par = workers->active_workers() > 1;
+  assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
+
+  ShenandoahGCPhase phase(root_phase);
+
 #if COMPILER2_OR_JVMCI
   DerivedPointerTable::clear();
 #endif
+
+  WorkGang* workers = _heap->workers();
+  bool is_par = workers->active_workers() > 1;
+
   ShenandoahUpdateThreadRootsTask task(is_par, root_phase);
   workers->run_task(&task);
+
 #if COMPILER2_OR_JVMCI
   DerivedPointerTable::update_pointers();
 #endif
@@ -435,8 +442,6 @@
     weak_refs_work(full_gc);
   }
 
-  _heap->parallel_cleaning(full_gc);
-
   assert(task_queues()->is_empty(), "Should be empty");
   TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -964,7 +964,6 @@
     ShenandoahHeapRegion* r;
     while ((r =_cs->claim_next()) != NULL) {
       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->region_number());
-      assert(r->is_conc_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->region_number());
       _sh->marked_object_iterate(r, &cl);
 
       if (ShenandoahPacing) {
@@ -1479,6 +1478,12 @@
   if (!cancelled_gc()) {
     concurrent_mark()->finish_mark_from_roots(/* full_gc = */ false);
 
+    // Marking is completed, deactivate SATB barrier
+    set_concurrent_mark_in_progress(false);
+    mark_complete_marking_context();
+
+    parallel_cleaning(false /* full gc*/);
+
     if (has_forwarded_objects()) {
       // Degen may be caused by failed evacuation of roots
       if (is_degenerated_gc_in_progress()) {
@@ -1486,39 +1491,50 @@
       } else {
         concurrent_mark()->update_thread_roots(ShenandoahPhaseTimings::update_roots);
       }
+      set_has_forwarded_objects(false);
    }
 
     if (ShenandoahVerify) {
       verifier()->verify_roots_no_forwarded();
     }
-
-    stop_concurrent_marking();
-
+    // All allocations past TAMS are implicitly live, adjust the region data.
+    // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
     {
       ShenandoahGCPhase phase(ShenandoahPhaseTimings::complete_liveness);
-
-      // All allocations past TAMS are implicitly live, adjust the region data.
-      // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
       ShenandoahCompleteLivenessClosure cl;
       parallel_heap_region_iterate(&cl);
     }
 
+    // Force the threads to reacquire their TLABs outside the collection set.
     {
-      ShenandoahGCPhase prepare_evac(ShenandoahPhaseTimings::prepare_evac);
-
+      ShenandoahGCPhase phase(ShenandoahPhaseTimings::retire_tlabs);
       make_parsable(true);
-
+    }
+
+    // We are about to select the collection set, make sure it knows about
+    // current pinning status. Also, this allows trashing more regions that
+    // now have their pinning status dropped.
+    {
+      ShenandoahGCPhase phase(ShenandoahPhaseTimings::sync_pinned);
+      sync_pinned_region_status();
+    }
+
+    // Trash the collection set left over from previous cycle, if any.
+    {
+      ShenandoahGCPhase phase(ShenandoahPhaseTimings::trash_cset);
       trash_cset_regions();
-
-      {
-        ShenandoahHeapLocker locker(lock());
-        _collection_set->clear();
-        _free_set->clear();
-
-        heuristics()->choose_collection_set(_collection_set);
-
-        _free_set->rebuild();
-      }
+    }
+
+    {
+      ShenandoahGCPhase phase(ShenandoahPhaseTimings::prepare_evac);
+
+      ShenandoahHeapLocker locker(lock());
+      _collection_set->clear();
+      _free_set->clear();
+
+      heuristics()->choose_collection_set(_collection_set);
+
+      _free_set->rebuild();
     }
 
     // If collection set has candidates, start evacuation.
@@ -1563,8 +1579,10 @@
     }
 
   } else {
+    // If this cycle was updating references, we need to keep the has_forwarded_objects
+    // flag on, for subsequent phases to deal with it.
     concurrent_mark()->cancel();
-    stop_concurrent_marking();
+    set_concurrent_mark_in_progress(false);
 
     if (process_references()) {
       // Abandon reference processing right away: pre-cleaning must have failed.
@@ -1581,7 +1599,10 @@
 
   set_evacuation_in_progress(false);
 
-  retire_and_reset_gclabs();
+  {
+    ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac_retire_gclabs);
+    retire_and_reset_gclabs();
+  }
 
   if (ShenandoahVerify) {
     verifier()->verify_after_evacuation();
@@ -1777,6 +1798,7 @@
         // it, we fail degeneration right away and slide into Full GC to recover.
 
         {
+          sync_pinned_region_status();
           collection_set()->clear_current_index();
 
           ShenandoahHeapRegion* r;
@@ -1856,17 +1878,6 @@
   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
 }
 
-void ShenandoahHeap::stop_concurrent_marking() {
-  assert(is_concurrent_mark_in_progress(), "How else could we get here?");
-  set_concurrent_mark_in_progress(false);
-  if (!cancelled_gc()) {
-    // If we needed to update refs, and concurrent marking has been cancelled,
-    // we need to finish updating references.
-    set_has_forwarded_objects(false);
-    mark_complete_marking_context();
-  }
-}
-
 void ShenandoahHeap::force_satb_flush_all_threads() {
   if (!is_concurrent_mark_in_progress() && !is_concurrent_traversal_in_progress()) {
     // No need to flush SATBs
@@ -1903,7 +1914,7 @@
 }
 
 void ShenandoahHeap::set_concurrent_traversal_in_progress(bool in_progress) {
-   set_gc_state_mask(TRAVERSAL | HAS_FORWARDED | UPDATEREFS, in_progress);
+   set_gc_state_mask(TRAVERSAL, in_progress);
    ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
 }
 
@@ -2035,11 +2046,19 @@
   // Cleanup weak roots
   ShenandoahGCPhase phase(timing_phase);
   if (has_forwarded_objects()) {
-    ShenandoahForwardedIsAliveClosure is_alive;
-    ShenandoahUpdateRefsClosure keep_alive;
-    ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>
-      cleaning_task(&is_alive, &keep_alive, num_workers);
-    _workers->run_task(&cleaning_task);
+    if (is_traversal_mode()) {
+      ShenandoahForwardedIsAliveClosure is_alive;
+      ShenandoahTraversalUpdateRefsClosure keep_alive;
+      ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahTraversalUpdateRefsClosure>
+        cleaning_task(&is_alive, &keep_alive, num_workers);
+      _workers->run_task(&cleaning_task);
+    } else {
+      ShenandoahForwardedIsAliveClosure is_alive;
+      ShenandoahUpdateRefsClosure keep_alive;
+      ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>
+        cleaning_task(&is_alive, &keep_alive, num_workers);
+      _workers->run_task(&cleaning_task);
+    }
   } else {
     ShenandoahIsAliveClosure is_alive;
 #ifdef ASSERT
@@ -2061,7 +2080,12 @@
 }
 
 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
-  set_gc_state_mask(HAS_FORWARDED, cond);
+  if (is_traversal_mode()) {
+    set_gc_state_mask(HAS_FORWARDED | UPDATEREFS, cond);
+  } else {
+    set_gc_state_mask(HAS_FORWARDED, cond);
+  }
+
 }
 
 void ShenandoahHeap::set_process_references(bool pr) {
@@ -2128,16 +2152,45 @@
 }
 
 oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
-  ShenandoahHeapLocker locker(lock());
-  heap_region_containing(o)->make_pinned();
+  heap_region_containing(o)->record_pin();
   return o;
 }
 
 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
+  heap_region_containing(o)->record_unpin();
+}
+
+void ShenandoahHeap::sync_pinned_region_status() {
   ShenandoahHeapLocker locker(lock());
-  heap_region_containing(o)->make_unpinned();
+
+  for (size_t i = 0; i < num_regions(); i++) {
+    ShenandoahHeapRegion *r = get_region(i);
+    if (r->is_active()) {
+      if (r->is_pinned()) {
+        if (r->pin_count() == 0) {
+          r->make_unpinned();
+        }
+      } else {
+        if (r->pin_count() > 0) {
+          r->make_pinned();
+        }
+      }
+    }
+  }
+
+  assert_pinned_region_status();
 }
 
+#ifdef ASSERT
+void ShenandoahHeap::assert_pinned_region_status() {
+  for (size_t i = 0; i < num_regions(); i++) {
+    ShenandoahHeapRegion* r = get_region(i);
+    assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
+           "Region " SIZE_FORMAT " pinning status is inconsistent", i);
+  }
+}
+#endif
+
 GCTimer* ShenandoahHeap::gc_timer() const {
   return _gc_timer;
 }
@@ -2230,7 +2283,10 @@
 
   set_evacuation_in_progress(false);
 
-  retire_and_reset_gclabs();
+  {
+    ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_retire_gclabs);
+    retire_and_reset_gclabs();
+  }
 
   if (ShenandoahVerify) {
     if (!is_degenerated_gc_in_progress()) {
@@ -2240,15 +2296,20 @@
   }
 
   set_update_refs_in_progress(true);
-  make_parsable(true);
-  for (uint i = 0; i < num_regions(); i++) {
-    ShenandoahHeapRegion* r = get_region(i);
-    r->set_concurrent_iteration_safe_limit(r->top());
+
+  {
+    ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_prepare);
+
+    make_parsable(true);
+    for (uint i = 0; i < num_regions(); i++) {
+      ShenandoahHeapRegion* r = get_region(i);
+      r->set_concurrent_iteration_safe_limit(r->top());
+    }
+
+    // Reset iterator.
+    _update_refs_iterator.reset();
   }
 
-  // Reset iterator.
-  _update_refs_iterator.reset();
-
   if (ShenandoahPacing) {
     pacer()->setup_for_updaterefs();
   }
@@ -2259,7 +2320,7 @@
 
   // Check if there is left-over work, and finish it
   if (_update_refs_iterator.has_next()) {
-    ShenandoahGCPhase final_work(ShenandoahPhaseTimings::final_update_refs_finish_work);
+    ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_finish_work);
 
     // Finish updating references where we left off.
     clear_cancelled_gc();
@@ -2288,9 +2349,18 @@
     verifier()->verify_roots_in_to_space();
   }
 
-  ShenandoahGCPhase final_update_refs(ShenandoahPhaseTimings::final_update_refs_recycle);
-
-  trash_cset_regions();
+  // Drop unnecessary "pinned" state from regions that does not have CP marks
+  // anymore, as this would allow trashing them below.
+  {
+    ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_sync_pinned);
+    sync_pinned_region_status();
+  }
+
+  {
+    ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_trash_cset);
+    trash_cset_regions();
+  }
+
   set_has_forwarded_objects(false);
   set_update_refs_in_progress(false);
 
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -43,6 +43,7 @@
 class ShenandoahGCStateResetter;
 class ShenandoahHeuristics;
 class ShenandoahMarkingContext;
+class ShenandoahMarkCompact;
 class ShenandoahMode;
 class ShenandoahPhaseTimings;
 class ShenandoahHeap;
@@ -574,6 +575,9 @@
   oop pin_object(JavaThread* thread, oop obj);
   void unpin_object(JavaThread* thread, oop obj);
 
+  void sync_pinned_region_status();
+  void assert_pinned_region_status() NOT_DEBUG_RETURN;
+
 // ---------- Allocation support
 //
 private:
@@ -713,8 +717,6 @@
 
   void deduplicate_string(oop str);
 
-  void stop_concurrent_marking();
-
 private:
   void trash_cset_regions();
   void update_heap_references(bool concurrent);
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -130,15 +130,18 @@
 }
 
 inline oop ShenandoahHeap::cas_oop(oop n, oop* addr, oop c) {
+  assert(is_aligned(addr, HeapWordSize), "Address should be aligned: " PTR_FORMAT, p2i(addr));
   return (oop) Atomic::cmpxchg(n, addr, c);
 }
 
 inline oop ShenandoahHeap::cas_oop(oop n, narrowOop* addr, narrowOop c) {
+  assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));
   narrowOop val = CompressedOops::encode(n);
   return CompressedOops::decode((narrowOop) Atomic::cmpxchg(val, addr, c));
 }
 
 inline oop ShenandoahHeap::cas_oop(oop n, narrowOop* addr, oop c) {
+  assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));
   narrowOop cmp = CompressedOops::encode(c);
   narrowOop val = CompressedOops::encode(n);
   return CompressedOops::decode((narrowOop) Atomic::cmpxchg(val, addr, cmp));
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -59,7 +59,6 @@
   _reserved(MemRegion(start, size_words)),
   _region_number(index),
   _new_top(NULL),
-  _critical_pins(0),
   _empty_time(os::elapsedTime()),
   _state(committed ? _empty_committed : _empty_uncommitted),
   _tlab_allocs(0),
@@ -69,7 +68,8 @@
   _seqnum_first_alloc_gc(0),
   _seqnum_last_alloc_mutator(0),
   _seqnum_last_alloc_gc(0),
-  _live_data(0) {
+  _live_data(0),
+  _critical_pins(0) {
 
   ContiguousSpace::initialize(_reserved, true, committed);
 }
@@ -187,25 +187,20 @@
 
 void ShenandoahHeapRegion::make_pinned() {
   _heap->assert_heaplock_owned_by_current_thread();
+  assert(pin_count() > 0, "Should have pins: " SIZE_FORMAT, pin_count());
+
   switch (_state) {
     case _regular:
-      assert (_critical_pins == 0, "sanity");
       set_state(_pinned);
     case _pinned_cset:
     case _pinned:
-      _critical_pins++;
       return;
     case _humongous_start:
-      assert (_critical_pins == 0, "sanity");
       set_state(_pinned_humongous_start);
     case _pinned_humongous_start:
-      _critical_pins++;
       return;
     case _cset:
-      guarantee(_heap->cancelled_gc(), "only valid when evac has been cancelled");
-      assert (_critical_pins == 0, "sanity");
       _state = _pinned_cset;
-      _critical_pins++;
       return;
     default:
       report_illegal_transition("pinning");
@@ -214,32 +209,20 @@
 
 void ShenandoahHeapRegion::make_unpinned() {
   _heap->assert_heaplock_owned_by_current_thread();
+  assert(pin_count() == 0, "Should not have pins: " SIZE_FORMAT, pin_count());
+
   switch (_state) {
     case _pinned:
-      assert (_critical_pins > 0, "sanity");
-      _critical_pins--;
-      if (_critical_pins == 0) {
-        set_state(_regular);
-      }
+      set_state(_regular);
       return;
     case _regular:
     case _humongous_start:
-      assert (_critical_pins == 0, "sanity");
       return;
     case _pinned_cset:
-      guarantee(_heap->cancelled_gc(), "only valid when evac has been cancelled");
-      assert (_critical_pins > 0, "sanity");
-      _critical_pins--;
-      if (_critical_pins == 0) {
-        set_state(_cset);
-      }
+      set_state(_cset);
       return;
     case _pinned_humongous_start:
-      assert (_critical_pins > 0, "sanity");
-      _critical_pins--;
-      if (_critical_pins == 0) {
-        set_state(_humongous_start);
-      }
+      set_state(_humongous_start);
       return;
     default:
       report_illegal_transition("unpinning");
@@ -434,7 +417,7 @@
   st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_gclab_allocs()),    proper_unit_for_byte_size(get_gclab_allocs()));
   st->print("|S " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_shared_allocs()),   proper_unit_for_byte_size(get_shared_allocs()));
   st->print("|L " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes()));
-  st->print("|CP " SIZE_FORMAT_W(3), _critical_pins);
+  st->print("|CP " SIZE_FORMAT_W(3), pin_count());
   st->print("|SN " UINT64_FORMAT_X_W(12) ", " UINT64_FORMAT_X_W(8) ", " UINT64_FORMAT_X_W(8) ", " UINT64_FORMAT_X_W(8),
             seqnum_first_alloc_mutator(), seqnum_last_alloc_mutator(),
             seqnum_first_alloc_gc(), seqnum_last_alloc_gc());
@@ -702,3 +685,16 @@
   }
   _state = to;
 }
+
+void ShenandoahHeapRegion::record_pin() {
+  Atomic::add((size_t)1, &_critical_pins);
+}
+
+void ShenandoahHeapRegion::record_unpin() {
+  assert(pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", region_number());
+  Atomic::sub((size_t)1, &_critical_pins);
+}
+
+size_t ShenandoahHeapRegion::pin_count() const {
+  return Atomic::load(&_critical_pins);
+}
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -198,12 +198,15 @@
 
   // Macro-properties:
   bool is_alloc_allowed()          const { return is_empty() || is_regular() || _state == _pinned; }
-  bool is_conc_move_allowed()      const { return is_regular() || _state == _cset; }
-  bool is_stw_move_allowed()       const { return is_conc_move_allowed() || (ShenandoahHumongousMoves && _state == _humongous_start); }
+  bool is_stw_move_allowed()       const { return is_regular() || _state == _cset || (ShenandoahHumongousMoves && _state == _humongous_start); }
 
   RegionState state()              const { return _state; }
   int  state_ordinal()             const { return region_state_to_ordinal(_state); }
 
+  void record_pin();
+  void record_unpin();
+  size_t pin_count() const;
+
 private:
   static size_t RegionCount;
   static size_t RegionSizeBytes;
@@ -239,7 +242,6 @@
 
   // Rarely updated fields
   HeapWord* _new_top;
-  size_t _critical_pins;
   double _empty_time;
 
   // Seldom updated fields
@@ -256,6 +258,7 @@
   uint64_t _seqnum_last_alloc_gc;
 
   volatile size_t _live_data;
+  volatile size_t _critical_pins;
 
   // Claim some space at the end to protect next region
   DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, 0);
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeuristics.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeuristics.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -120,6 +120,9 @@
 
   ShenandoahHeap* heap = ShenandoahHeap::heap();
 
+  // Check all pinned regions have updated status before choosing the collection set.
+  heap->assert_pinned_region_status();
+
   // Step 1. Build up the region candidates we care about, rejecting losers and accepting winners right away.
 
   size_t num_regions = heap->num_regions();
--- a/src/hotspot/share/gc/shenandoah/shenandoahLock.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahLock.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -41,6 +41,9 @@
   ShenandoahLock() : _state(unlocked), _owner(NULL) {};
 
   void lock() {
+#ifdef ASSERT
+    assert(_owner != Thread::current(), "reentrant locking attempt, would deadlock");
+#endif
     Thread::SpinAcquire(&_state, "Shenandoah Heap Lock");
 #ifdef ASSERT
     assert(_state == locked, "must be locked");
--- a/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -110,7 +110,7 @@
     // b. Cancel concurrent mark, if in progress
     if (heap->is_concurrent_mark_in_progress()) {
       heap->concurrent_mark()->cancel();
-      heap->stop_concurrent_marking();
+      heap->set_concurrent_mark_in_progress(false);
     }
     assert(!heap->is_concurrent_mark_in_progress(), "sanity");
 
@@ -128,6 +128,9 @@
     // e. Set back forwarded objects bit back, in case some steps above dropped it.
     heap->set_has_forwarded_objects(has_forwarded_objects);
 
+    // f. Sync pinned region status from the CP marks
+    heap->sync_pinned_region_status();
+
     // The rest of prologue:
     BiasedLocking::preserve_marks();
     _preserved_marks->init(heap->workers()->active_workers());
@@ -240,8 +243,8 @@
   cm->update_roots(ShenandoahPhaseTimings::full_gc_roots);
   cm->mark_roots(ShenandoahPhaseTimings::full_gc_roots);
   cm->finish_mark_from_roots(/* full_gc = */ true);
-
   heap->mark_complete_marking_context();
+  heap->parallel_cleaning(true /* full_gc */);
 }
 
 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
@@ -505,6 +508,10 @@
 
   ShenandoahHeap* heap = ShenandoahHeap::heap();
 
+  // About to figure out which regions can be compacted, make sure pinning status
+  // had been updated in GC prologue.
+  heap->assert_pinned_region_status();
+
   {
     // Trash the immediately collectible regions before computing addresses
     ShenandoahTrashImmediateGarbageClosure tigcl;
--- a/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -214,15 +214,30 @@
     _mark_context(ShenandoahHeap::heap()->marking_context()) {
   }
 
-  template <class T, bool STRING_DEDUP, bool DEGEN>
+  template <class T, bool STRING_DEDUP, bool DEGEN, bool ATOMIC_UPDATE>
   void work(T* p);
 
 };
 
+class ShenandoahTraversalRootsClosure : public ShenandoahTraversalSuperClosure {
+private:
+  template <class T>
+  inline void do_oop_work(T* p)     { work<T, false, false, false>(p); }
+
+public:
+  ShenandoahTraversalRootsClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
+    ShenandoahTraversalSuperClosure(q, rp) {}
+
+  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+  virtual void do_oop(oop* p)       { do_oop_work(p); }
+
+  virtual bool do_metadata()        { return false; }
+};
+
 class ShenandoahTraversalClosure : public ShenandoahTraversalSuperClosure {
 private:
   template <class T>
-  inline void do_oop_work(T* p)     { work<T, false, false>(p); }
+  inline void do_oop_work(T* p)     { work<T, false, false, true>(p); }
 
 public:
   ShenandoahTraversalClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
@@ -237,7 +252,7 @@
 class ShenandoahTraversalMetadataClosure : public ShenandoahTraversalSuperClosure {
 private:
   template <class T>
-  inline void do_oop_work(T* p)     { work<T, false, false>(p); }
+  inline void do_oop_work(T* p)     { work<T, false, false, true>(p); }
 
 public:
   ShenandoahTraversalMetadataClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
@@ -252,7 +267,7 @@
 class ShenandoahTraversalDedupClosure : public ShenandoahTraversalSuperClosure {
 private:
   template <class T>
-  inline void do_oop_work(T* p)     { work<T, true, false>(p); }
+  inline void do_oop_work(T* p)     { work<T, true, false, true>(p); }
 
 public:
   ShenandoahTraversalDedupClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
@@ -267,7 +282,7 @@
 class ShenandoahTraversalMetadataDedupClosure : public ShenandoahTraversalSuperClosure {
 private:
   template <class T>
-  inline void do_oop_work(T* p)     { work<T, true, false>(p); }
+  inline void do_oop_work(T* p)     { work<T, true, false, true>(p); }
 
 public:
   ShenandoahTraversalMetadataDedupClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
@@ -282,7 +297,7 @@
 class ShenandoahTraversalDegenClosure : public ShenandoahTraversalSuperClosure {
 private:
   template <class T>
-  inline void do_oop_work(T* p)     { work<T, false, true>(p); }
+  inline void do_oop_work(T* p)     { work<T, false, true, false>(p); }
 
 public:
   ShenandoahTraversalDegenClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
@@ -297,7 +312,7 @@
 class ShenandoahTraversalMetadataDegenClosure : public ShenandoahTraversalSuperClosure {
 private:
   template <class T>
-  inline void do_oop_work(T* p)     { work<T, false, true>(p); }
+  inline void do_oop_work(T* p)     { work<T, false, true, false>(p); }
 
 public:
   ShenandoahTraversalMetadataDegenClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
@@ -312,7 +327,7 @@
 class ShenandoahTraversalDedupDegenClosure : public ShenandoahTraversalSuperClosure {
 private:
   template <class T>
-  inline void do_oop_work(T* p)     { work<T, true, true>(p); }
+  inline void do_oop_work(T* p)     { work<T, true, true, false>(p); }
 
 public:
   ShenandoahTraversalDedupDegenClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
@@ -327,7 +342,7 @@
 class ShenandoahTraversalMetadataDedupDegenClosure : public ShenandoahTraversalSuperClosure {
 private:
   template <class T>
-  inline void do_oop_work(T* p)     { work<T, true, true>(p); }
+  inline void do_oop_work(T* p)     { work<T, true, true, false>(p); }
 
 public:
   ShenandoahTraversalMetadataDedupDegenClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
--- a/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.inline.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.inline.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -38,9 +38,9 @@
   _heap->maybe_update_with_forwarded(p);
 }
 
-template <class T, bool STRING_DEDUP, bool DEGEN>
+template <class T, bool STRING_DEDUP, bool DEGEN, bool ATOMIC_UPDATE>
 inline void ShenandoahTraversalSuperClosure::work(T* p) {
-  _traversal_gc->process_oop<T, STRING_DEDUP, DEGEN>(p, _thread, _queue, _mark_context);
+  _traversal_gc->process_oop<T, STRING_DEDUP, DEGEN, ATOMIC_UPDATE>(p, _thread, _queue, _mark_context);
 }
 
 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHOOPCLOSURES_INLINE_HPP
--- a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -100,8 +100,10 @@
   f(purge_par,                                      "    Parallel Cleanup")             \
   f(purge_cldg,                                     "    CLDG")                         \
   f(complete_liveness,                              "  Complete Liveness")              \
+  f(retire_tlabs,                                   "  Retire TLABs")                   \
+  f(sync_pinned,                                    "  Sync Pinned")                    \
+  f(trash_cset,                                     "  Trash CSet")                     \
   f(prepare_evac,                                   "  Prepare Evacuation")             \
-  f(recycle_regions,                                "  Recycle regions")                \
                                                                                         \
   /* Per-thread timer block, should have "roots" counters in consistent order */        \
   f(init_evac,                                      "  Initial Evacuation")             \
@@ -127,9 +129,12 @@
                                                                                         \
   f(final_evac_gross,                               "Pause Final Evac (G)")             \
   f(final_evac,                                     "Pause Final Evac (N)")             \
+  f(final_evac_retire_gclabs,                       "  Retire GCLABs")                  \
                                                                                         \
   f(init_update_refs_gross,                         "Pause Init  Update Refs (G)")      \
   f(init_update_refs,                               "Pause Init  Update Refs (N)")      \
+  f(init_update_refs_retire_gclabs,                 "  Retire GCLABs")                  \
+  f(init_update_refs_prepare,                       "  Prepare")                        \
                                                                                         \
   f(final_update_refs_gross,                         "Pause Final Update Refs (G)")     \
   f(final_update_refs,                               "Pause Final Update Refs (N)")     \
@@ -157,7 +162,8 @@
   f(final_update_refs_string_dedup_queue_roots,      "    UR: Dedup Queue Roots")       \
   f(final_update_refs_finish_queues,                 "    UR: Finish Queues")           \
                                                                                         \
-  f(final_update_refs_recycle,                       "  Recycle")                       \
+  f(final_update_refs_sync_pinned,                   "  Sync Pinned")                   \
+  f(final_update_refs_trash_cset,                    "  Trash CSet")                    \
                                                                                         \
   f(degen_gc_gross,                                  "Pause Degenerated GC (G)")        \
   f(degen_gc,                                        "Pause Degenerated GC (N)")        \
@@ -189,6 +195,7 @@
   f(traversal_gc_prepare,                            "  Prepare")                       \
   f(traversal_gc_make_parsable,                      "    Make Parsable")               \
   f(traversal_gc_resize_tlabs,                       "    Resize TLABs")                \
+  f(traversal_gc_prepare_sync_pinned,                "    Sync Pinned")                 \
                                                                                         \
   /* Per-thread timer block, should have "roots" counters in consistent order */        \
   f(init_traversal_gc_work,                          "  Work")                          \
@@ -260,6 +267,7 @@
   f(final_traversal_update_string_dedup_queue_roots,    "    TU: Dedup Queue Roots")    \
   f(final_traversal_update_finish_queues,               "    TU: Finish Queues")        \
                                                                                         \
+  f(traversal_gc_sync_pinned,                        "  Sync Pinned")                   \
   f(traversal_gc_cleanup,                            "  Cleanup")                       \
                                                                                         \
   f(full_gc_gross,                                   "Pause Full GC (G)")               \
--- a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -189,6 +189,18 @@
   _thread_roots(n_workers > 1) {
 }
 
+void ShenandoahRootUpdater::strong_roots_do(uint worker_id, OopClosure* oops_cl) {
+  CodeBlobToOopClosure update_blobs(oops_cl, CodeBlobToOopClosure::FixRelocations);
+  CLDToOopClosure clds(oops_cl, ClassLoaderData::_claim_strong);
+
+  _serial_roots.oops_do(oops_cl, worker_id);
+  _vm_roots.oops_do(oops_cl, worker_id);
+
+  _thread_roots.oops_do(oops_cl, NULL, worker_id);
+  _cld_roots.cld_do(&clds, worker_id);
+  _code_roots.code_blobs_do(&update_blobs, worker_id);
+}
+
 ShenandoahRootAdjuster::ShenandoahRootAdjuster(uint n_workers, ShenandoahPhaseTimings::Phase phase) :
   ShenandoahRootProcessor(phase),
   _thread_roots(n_workers > 1) {
--- a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -304,6 +304,8 @@
 
   template<typename IsAlive, typename KeepAlive>
   void roots_do(uint worker_id, IsAlive* is_alive, KeepAlive* keep_alive);
+
+  void strong_roots_do(uint worker_id, OopClosure* oops_cl);
 };
 
 // Adjuster all roots at a safepoint during full gc
--- a/src/hotspot/share/gc/shenandoah/shenandoahRuntime.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRuntime.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -82,6 +82,6 @@
   ShenandoahBarrierSet::barrier_set()->clone_barrier(s);
 JRT_END
 
-JRT_LEAF(oopDesc*, ShenandoahRuntime::load_reference_barrier_native(oopDesc * src))
-  return (oopDesc*) ShenandoahBarrierSet::barrier_set()->oop_load_from_native_barrier(oop(src));
+JRT_LEAF(oopDesc*, ShenandoahRuntime::load_reference_barrier_native(oopDesc * src, oop* load_addr))
+  return (oopDesc*) ShenandoahBarrierSet::barrier_set()->load_reference_barrier_native(oop(src), load_addr);
 JRT_END
--- a/src/hotspot/share/gc/shenandoah/shenandoahRuntime.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRuntime.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -42,7 +42,7 @@
   static oopDesc* load_reference_barrier_fixup(oopDesc* src, oop* load_addr);
   static oopDesc* load_reference_barrier_fixup_narrow(oopDesc* src, narrowOop* load_addr);
 
-  static oopDesc* load_reference_barrier_native(oopDesc* src);
+  static oopDesc* load_reference_barrier_native(oopDesc* src, oop* load_addr);
 
   static void shenandoah_clone_barrier(oopDesc* src);
 };
--- a/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -186,7 +186,7 @@
 
     // Step 1: Process ordinary GC roots.
     {
-      ShenandoahTraversalClosure roots_cl(q, rp);
+      ShenandoahTraversalRootsClosure roots_cl(q, rp);
       ShenandoahMarkCLDClosure cld_cl(&roots_cl);
       MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
       if (unload_classes) {
@@ -266,7 +266,7 @@
     // in similar way during nmethod-register process. Therefore, we don't need to rescan code
     // roots here.
     if (!_heap->is_degenerated_gc_in_progress()) {
-      ShenandoahTraversalClosure roots_cl(q, rp);
+      ShenandoahTraversalRootsClosure roots_cl(q, rp);
       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
       if (unload_classes) {
         ShenandoahRemarkCLDClosure remark_cld_cl(&roots_cl);
@@ -340,9 +340,6 @@
 }
 
 void ShenandoahTraversalGC::prepare() {
-  _heap->collection_set()->clear();
-  assert(_heap->collection_set()->count() == 0, "collection set not clear");
-
   {
     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_make_parsable);
     _heap->make_parsable(true);
@@ -356,15 +353,26 @@
   assert(_heap->marking_context()->is_bitmap_clear(), "need clean mark bitmap");
   assert(!_heap->marking_context()->is_complete(), "should not be complete");
 
-  ShenandoahFreeSet* free_set = _heap->free_set();
+  // About to choose the collection set, make sure we know which regions are pinned.
+  {
+    ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_prepare_sync_pinned);
+    _heap->sync_pinned_region_status();
+  }
+
   ShenandoahCollectionSet* collection_set = _heap->collection_set();
+  {
+    ShenandoahHeapLocker lock(_heap->lock());
 
-  // Find collection set
-  _heap->heuristics()->choose_collection_set(collection_set);
-  prepare_regions();
+    collection_set->clear();
+    assert(collection_set->count() == 0, "collection set not clear");
 
-  // Rebuild free set
-  free_set->rebuild();
+    // Find collection set
+    _heap->heuristics()->choose_collection_set(collection_set);
+    prepare_regions();
+
+    // Rebuild free set
+    _heap->free_set()->rebuild();
+  }
 
   log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "%s, " SIZE_FORMAT "%s CSet, " SIZE_FORMAT " CSet regions",
                      byte_size_in_proper_unit(collection_set->garbage()),   proper_unit_for_byte_size(collection_set->garbage()),
@@ -385,11 +393,11 @@
 
   {
     ShenandoahGCPhase phase_prepare(ShenandoahPhaseTimings::traversal_gc_prepare);
-    ShenandoahHeapLocker lock(_heap->lock());
     prepare();
   }
 
   _heap->set_concurrent_traversal_in_progress(true);
+  _heap->set_has_forwarded_objects(true);
 
   bool process_refs = _heap->process_references();
   if (process_refs) {
@@ -601,14 +609,24 @@
     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
 
     // No more marking expected
+    _heap->set_concurrent_traversal_in_progress(false);
     _heap->mark_complete_marking_context();
 
     fixup_roots();
     _heap->parallel_cleaning(false);
 
+    _heap->set_has_forwarded_objects(false);
+
     // Resize metaspace
     MetaspaceGC::compute_new_size();
 
+    // Need to see that pinned region status is updated: newly pinned regions must not
+    // be trashed. New unpinned regions should be trashed.
+    {
+      ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_sync_pinned);
+      _heap->sync_pinned_region_status();
+    }
+
     // Still good? We can now trash the cset, and make final verification
     {
       ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_cleanup);
@@ -651,7 +669,6 @@
     }
 
     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
-    _heap->set_concurrent_traversal_in_progress(false);
     assert(!_heap->cancelled_gc(), "must not be cancelled when getting out here");
 
     if (ShenandoahVerify) {
@@ -697,8 +714,7 @@
   void work(uint worker_id) {
     ShenandoahParallelWorkerSession worker_session(worker_id);
     ShenandoahTraversalFixRootsClosure cl;
-    ShenandoahForwardedIsAliveClosure is_alive;
-    _rp->roots_do<ShenandoahForwardedIsAliveClosure, ShenandoahTraversalFixRootsClosure>(worker_id, &is_alive, &cl);
+    _rp->strong_roots_do(worker_id, &cl);
   }
 };
 
@@ -751,7 +767,7 @@
 
   template <class T>
   inline void do_oop_work(T* p) {
-    _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */>(p, _thread, _queue, _mark_context);
+    _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */, true /* atomic update */>(p, _thread, _queue, _mark_context);
   }
 
 public:
@@ -773,7 +789,7 @@
 
   template <class T>
   inline void do_oop_work(T* p) {
-    _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */>(p, _thread, _queue, _mark_context);
+    _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */, false /* atomic update */>(p, _thread, _queue, _mark_context);
   }
 
 public:
@@ -796,7 +812,7 @@
   template <class T>
   inline void do_oop_work(T* p) {
     ShenandoahEvacOOMScope evac_scope;
-    _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */>(p, _thread, _queue, _mark_context);
+    _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */, true /* atomic update */>(p, _thread, _queue, _mark_context);
   }
 
 public:
@@ -819,7 +835,7 @@
   template <class T>
   inline void do_oop_work(T* p) {
     ShenandoahEvacOOMScope evac_scope;
-    _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */>(p, _thread, _queue, _mark_context);
+    _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */, false /* atomic update */>(p, _thread, _queue, _mark_context);
   }
 
 public:
--- a/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -48,7 +48,7 @@
   void concurrent_traversal_collection();
   void final_traversal_collection();
 
-  template <class T, bool STRING_DEDUP, bool DEGEN>
+  template <class T, bool STRING_DEDUP, bool DEGEN, bool ATOMIC_UPDATE>
   inline void process_oop(T* p, Thread* thread, ShenandoahObjToScanQueue* queue, ShenandoahMarkingContext* const mark_context);
 
   bool check_and_handle_cancelled_gc(ShenandoahTaskTerminator* terminator, bool sts_yield);
--- a/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.inline.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.inline.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -35,12 +35,13 @@
 #include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
 
-template <class T, bool STRING_DEDUP, bool DEGEN>
+template <class T, bool STRING_DEDUP, bool DEGEN, bool ATOMIC_UPDATE>
 void ShenandoahTraversalGC::process_oop(T* p, Thread* thread, ShenandoahObjToScanQueue* queue, ShenandoahMarkingContext* const mark_context) {
   T o = RawAccess<>::oop_load(p);
   if (!CompressedOops::is_null(o)) {
     oop obj = CompressedOops::decode_not_null(o);
     if (DEGEN) {
+      assert(!ATOMIC_UPDATE, "Degen path assumes non-atomic updates");
       oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
       if (obj != forw) {
         // Update reference.
@@ -54,7 +55,11 @@
       }
       shenandoah_assert_forwarded_except(p, obj, _heap->cancelled_gc());
       // Update reference.
-      ShenandoahHeap::cas_oop(forw, p, obj);
+      if (ATOMIC_UPDATE) {
+        ShenandoahHeap::cas_oop(forw, p, obj);
+      } else {
+        RawAccess<IS_NOT_NULL>::oop_store(p, forw);
+      }
       obj = forw;
     }
 
--- a/src/hotspot/share/gc/z/zArray.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/z/zArray.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -74,15 +74,13 @@
 template <typename T>
 class ZArrayIterator : public ZArrayIteratorImpl<T, ZARRAY_SERIAL> {
 public:
-  ZArrayIterator(ZArray<T>* array) :
-      ZArrayIteratorImpl<T, ZARRAY_SERIAL>(array) {}
+  ZArrayIterator(ZArray<T>* array);
 };
 
 template <typename T>
 class ZArrayParallelIterator : public ZArrayIteratorImpl<T, ZARRAY_PARALLEL> {
 public:
-  ZArrayParallelIterator(ZArray<T>* array) :
-      ZArrayIteratorImpl<T, ZARRAY_PARALLEL>(array) {}
+  ZArrayParallelIterator(ZArray<T>* array);
 };
 
 #endif // SHARE_GC_Z_ZARRAY_HPP
--- a/src/hotspot/share/gc/z/zArray.inline.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/z/zArray.inline.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -117,4 +117,12 @@
   return false;
 }
 
+template <typename T>
+inline ZArrayIterator<T>::ZArrayIterator(ZArray<T>* array) :
+    ZArrayIteratorImpl<T, ZARRAY_SERIAL>(array) {}
+
+template <typename T>
+inline ZArrayParallelIterator<T>::ZArrayParallelIterator(ZArray<T>* array) :
+    ZArrayIteratorImpl<T, ZARRAY_PARALLEL>(array) {}
+
 #endif // SHARE_GC_Z_ZARRAY_INLINE_HPP
--- a/src/hotspot/share/gc/z/zCPU.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/z/zCPU.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,15 +22,15 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/z/zCPU.hpp"
+#include "gc/z/zCPU.inline.hpp"
 #include "logging/log.hpp"
 #include "memory/padded.inline.hpp"
 #include "runtime/os.hpp"
 #include "runtime/thread.inline.hpp"
 #include "utilities/debug.hpp"
 
-#define ZCPU_UNKNOWN_AFFINITY (Thread*)-1;
-#define ZCPU_UNKNOWN_SELF     (Thread*)-2;
+#define ZCPU_UNKNOWN_AFFINITY ((Thread*)-1)
+#define ZCPU_UNKNOWN_SELF     ((Thread*)-2)
 
 PaddedEnd<ZCPU::ZCPUAffinity>* ZCPU::_affinity = NULL;
 THREAD_LOCAL Thread*           ZCPU::_self     = ZCPU_UNKNOWN_SELF;
@@ -51,20 +51,13 @@
                      os::initial_active_processor_count());
 }
 
-uint32_t ZCPU::count() {
-  return os::processor_count();
-}
-
-uint32_t ZCPU::id() {
-  assert(_affinity != NULL, "Not initialized");
-
-  // Fast path
-  if (_affinity[_cpu]._thread == _self) {
-    return _cpu;
+uint32_t ZCPU::id_slow() {
+  // Set current thread
+  if (_self == ZCPU_UNKNOWN_SELF) {
+    _self = Thread::current();
   }
 
-  // Slow path
-  _self = Thread::current();
+  // Set current CPU
   _cpu = os::processor_id();
 
   // Update affinity table
--- a/src/hotspot/share/gc/z/zCPU.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/z/zCPU.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -40,6 +40,8 @@
   static THREAD_LOCAL Thread*     _self;
   static THREAD_LOCAL uint32_t    _cpu;
 
+  static uint32_t id_slow();
+
 public:
   static void initialize();
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zCPU.inline.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZCPU_INLINE_HPP
+#define SHARE_GC_Z_ZCPU_INLINE_HPP
+
+#include "gc/z/zCPU.hpp"
+#include "runtime/os.hpp"
+#include "utilities/debug.hpp"
+
+inline uint32_t ZCPU::count() {
+  return os::processor_count();
+}
+
+inline uint32_t ZCPU::id() {
+  assert(_affinity != NULL, "Not initialized");
+
+  // Fast path
+  if (_affinity[_cpu]._thread == _self) {
+    return _cpu;
+  }
+
+  // Slow path
+  return id_slow();
+}
+
+#endif // SHARE_GC_Z_ZCPU_INLINE_HPP
--- a/src/hotspot/share/gc/z/zDirector.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/z/zDirector.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -48,14 +48,6 @@
                        ZStatAllocRate::avg_sd() / M);
 }
 
-bool ZDirector::is_first() const {
-  return ZStatCycle::ncycles() == 0;
-}
-
-bool ZDirector::is_warm() const {
-  return ZStatCycle::ncycles() >= 3;
-}
-
 bool ZDirector::rule_timer() const {
   if (ZCollectionInterval == 0) {
     // Rule disabled
@@ -73,7 +65,7 @@
 }
 
 bool ZDirector::rule_warmup() const {
-  if (is_warm()) {
+  if (ZStatCycle::is_warm()) {
     // Rule disabled
     return false;
   }
@@ -93,7 +85,7 @@
 }
 
 bool ZDirector::rule_allocation_rate() const {
-  if (is_first()) {
+  if (ZStatCycle::is_first()) {
     // Rule disabled
     return false;
   }
@@ -140,7 +132,7 @@
 }
 
 bool ZDirector::rule_proactive() const {
-  if (!ZProactive || !is_warm()) {
+  if (!ZProactive || !ZStatCycle::is_warm()) {
     // Rule disabled
     return false;
   }
--- a/src/hotspot/share/gc/z/zDirector.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/z/zDirector.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -36,9 +36,6 @@
 
   void sample_allocation_rate() const;
 
-  bool is_first() const;
-  bool is_warm() const;
-
   bool rule_timer() const;
   bool rule_warmup() const;
   bool rule_allocation_rate() const;
--- a/src/hotspot/share/gc/z/zDriver.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/z/zDriver.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -250,11 +250,17 @@
   case GCCause::_z_allocation_stall:
   case GCCause::_z_proactive:
   case GCCause::_z_high_usage:
-  case GCCause::_metadata_GC_threshold:
     // Start asynchronous GC
     _gc_cycle_port.send_async(cause);
     break;
 
+  case GCCause::_metadata_GC_threshold:
+    // Start asynchronous GC, but only if the GC is warm
+    if (ZStatCycle::is_warm()) {
+      _gc_cycle_port.send_async(cause);
+    }
+    break;
+
   case GCCause::_gc_locker:
     // Restart VM operation previously blocked by the GC locker
     _gc_locker_port.signal();
--- a/src/hotspot/share/gc/z/zHeap.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/z/zHeap.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -34,7 +34,7 @@
 #include "gc/z/zRelocationSetSelector.hpp"
 #include "gc/z/zResurrection.hpp"
 #include "gc/z/zStat.hpp"
-#include "gc/z/zThread.hpp"
+#include "gc/z/zThread.inline.hpp"
 #include "gc/z/zVerify.hpp"
 #include "gc/z/zWorkers.inline.hpp"
 #include "logging/log.hpp"
--- a/src/hotspot/share/gc/z/zList.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/z/zList.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
 #define SHARE_GC_Z_ZLIST_HPP
 
 #include "memory/allocation.hpp"
-#include "utilities/debug.hpp"
 
 template <typename T> class ZList;
 
@@ -38,27 +37,15 @@
   ZListNode* _next;
   ZListNode* _prev;
 
-  ZListNode(ZListNode* next, ZListNode* prev) :
-      _next(next),
-      _prev(prev) {}
+  ZListNode(ZListNode* next, ZListNode* prev);
 
-  void set_unused() {
-    _next = NULL;
-    _prev = NULL;
-  }
+  void set_unused();
 
 public:
-  ZListNode() {
-    set_unused();
-  }
+  ZListNode();
+  ~ZListNode();
 
-  ~ZListNode() {
-    set_unused();
-  }
-
-  bool is_unused() const {
-    return _next == NULL && _prev == NULL;
-  }
+  bool is_unused() const;
 };
 
 // Doubly linked list
@@ -72,139 +59,34 @@
   ZList(const ZList<T>& list);
   ZList<T>& operator=(const ZList<T>& list);
 
-  void verify() const {
-    assert(_head._next->_prev == &_head, "List corrupt");
-    assert(_head._prev->_next == &_head, "List corrupt");
-  }
+  void verify() const;
 
-  void insert(ZListNode<T>* before, ZListNode<T>* node) {
-    verify();
+  void insert(ZListNode<T>* before, ZListNode<T>* node);
 
-    assert(node->is_unused(), "Already in a list");
-    node->_prev = before;
-    node->_next = before->_next;
-    before->_next = node;
-    node->_next->_prev = node;
-
-    _size++;
-  }
-
-  ZListNode<T>* cast_to_inner(T* elem) const {
-    return &elem->_node;
-  }
-
-  T* cast_to_outer(ZListNode<T>* node) const {
-    return (T*)((uintptr_t)node - offset_of(T, _node));
-  }
+  ZListNode<T>* cast_to_inner(T* elem) const;
+  T* cast_to_outer(ZListNode<T>* node) const;
 
 public:
-  ZList() :
-      _head(&_head, &_head),
-      _size(0) {
-    verify();
-  }
+  ZList();
 
-  size_t size() const {
-    verify();
-    return _size;
-  }
+  size_t size() const;
+  bool is_empty() const;
 
-  bool is_empty() const {
-    return _size == 0;
-  }
+  T* first() const;
+  T* last() const;
+  T* next(T* elem) const;
+  T* prev(T* elem) const;
 
-  T* first() const {
-    return is_empty() ? NULL : cast_to_outer(_head._next);
-  }
+  void insert_first(T* elem);
+  void insert_last(T* elem);
+  void insert_before(T* before, T* elem);
+  void insert_after(T* after, T* elem);
 
-  T* last() const {
-    return is_empty() ? NULL : cast_to_outer(_head._prev);
-  }
+  void remove(T* elem);
+  T* remove_first();
+  T* remove_last();
 
-  T* next(T* elem) const {
-    verify();
-    ZListNode<T>* next = cast_to_inner(elem)->_next;
-    return (next == &_head) ? NULL : cast_to_outer(next);
-  }
-
-  T* prev(T* elem) const {
-    verify();
-    ZListNode<T>* prev = cast_to_inner(elem)->_prev;
-    return (prev == &_head) ? NULL : cast_to_outer(prev);
-  }
-
-  void insert_first(T* elem) {
-    insert(&_head, cast_to_inner(elem));
-  }
-
-  void insert_last(T* elem) {
-    insert(_head._prev, cast_to_inner(elem));
-  }
-
-  void insert_before(T* before, T* elem) {
-    insert(cast_to_inner(before)->_prev, cast_to_inner(elem));
-  }
-
-  void insert_after(T* after, T* elem) {
-    insert(cast_to_inner(after), cast_to_inner(elem));
-  }
-
-  void remove(T* elem) {
-    verify();
-
-    ZListNode<T>* const node = cast_to_inner(elem);
-    assert(!node->is_unused(), "Not in a list");
-
-    ZListNode<T>* const next = node->_next;
-    ZListNode<T>* const prev = node->_prev;
-    assert(next->_prev == node, "List corrupt");
-    assert(prev->_next == node, "List corrupt");
-
-    prev->_next = next;
-    next->_prev = prev;
-    node->set_unused();
-
-    _size--;
-  }
-
-  T* remove_first() {
-    T* elem = first();
-    if (elem != NULL) {
-      remove(elem);
-    }
-
-    return elem;
-  }
-
-  T* remove_last() {
-    T* elem = last();
-    if (elem != NULL) {
-      remove(elem);
-    }
-
-    return elem;
-  }
-
-  void transfer(ZList<T>* list) {
-    verify();
-
-    if (!list->is_empty()) {
-      list->_head._next->_prev = _head._prev;
-      list->_head._prev->_next = _head._prev->_next;
-
-      _head._prev->_next = list->_head._next;
-      _head._prev = list->_head._prev;
-
-      list->_head._next = &list->_head;
-      list->_head._prev = &list->_head;
-
-      _size += list->_size;
-      list->_size = 0;
-
-      list->verify();
-      verify();
-    }
-  }
+  void transfer(ZList<T>* list);
 };
 
 template <typename T, bool forward>
@@ -226,15 +108,13 @@
 template <typename T>
 class ZListIterator : public ZListIteratorImpl<T, ZLIST_FORWARD> {
 public:
-  ZListIterator(const ZList<T>* list) :
-      ZListIteratorImpl<T, ZLIST_FORWARD>(list) {}
+  ZListIterator(const ZList<T>* list);
 };
 
 template <typename T>
 class ZListReverseIterator : public ZListIteratorImpl<T, ZLIST_REVERSE> {
 public:
-  ZListReverseIterator(const ZList<T>* list) :
-      ZListIteratorImpl<T, ZLIST_REVERSE>(list) {}
+  ZListReverseIterator(const ZList<T>* list);
 };
 
 #endif // SHARE_GC_Z_ZLIST_HPP
--- a/src/hotspot/share/gc/z/zList.inline.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/z/zList.inline.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,14 +25,193 @@
 #define SHARE_GC_Z_ZLIST_INLINE_HPP
 
 #include "gc/z/zList.hpp"
+#include "utilities/debug.hpp"
+
+template <typename T>
+inline ZListNode<T>::ZListNode(ZListNode* next, ZListNode* prev) :
+    _next(next),
+    _prev(prev) {}
+
+template <typename T>
+inline void ZListNode<T>::set_unused() {
+  _next = NULL;
+  _prev = NULL;
+}
+
+template <typename T>
+inline ZListNode<T>::ZListNode() {
+  set_unused();
+}
+
+template <typename T>
+inline ZListNode<T>::~ZListNode() {
+  set_unused();
+}
+
+template <typename T>
+inline bool ZListNode<T>::is_unused() const {
+  return _next == NULL && _prev == NULL;
+}
+
+template <typename T>
+inline void ZList<T>::verify() const {
+  assert(_head._next->_prev == &_head, "List corrupt");
+  assert(_head._prev->_next == &_head, "List corrupt");
+}
+
+template <typename T>
+inline void ZList<T>::insert(ZListNode<T>* before, ZListNode<T>* node) {
+  verify();
+
+  assert(node->is_unused(), "Already in a list");
+  node->_prev = before;
+  node->_next = before->_next;
+  before->_next = node;
+  node->_next->_prev = node;
+
+  _size++;
+}
+
+template <typename T>
+inline ZListNode<T>* ZList<T>::cast_to_inner(T* elem) const {
+  return &elem->_node;
+}
+
+template <typename T>
+inline T* ZList<T>::cast_to_outer(ZListNode<T>* node) const {
+  return (T*)((uintptr_t)node - offset_of(T, _node));
+}
+
+template <typename T>
+inline ZList<T>::ZList() :
+    _head(&_head, &_head),
+    _size(0) {
+  verify();
+}
+
+template <typename T>
+inline size_t ZList<T>::size() const {
+  verify();
+  return _size;
+}
+
+template <typename T>
+inline bool ZList<T>::is_empty() const {
+  return _size == 0;
+}
+
+template <typename T>
+inline T* ZList<T>::first() const {
+  return is_empty() ? NULL : cast_to_outer(_head._next);
+}
+
+template <typename T>
+inline T* ZList<T>::last() const {
+  return is_empty() ? NULL : cast_to_outer(_head._prev);
+}
+
+template <typename T>
+inline T* ZList<T>::next(T* elem) const {
+  verify();
+  ZListNode<T>* next = cast_to_inner(elem)->_next;
+  return (next == &_head) ? NULL : cast_to_outer(next);
+}
+
+template <typename T>
+inline T* ZList<T>::prev(T* elem) const {
+  verify();
+  ZListNode<T>* prev = cast_to_inner(elem)->_prev;
+  return (prev == &_head) ? NULL : cast_to_outer(prev);
+}
+
+template <typename T>
+inline void ZList<T>::insert_first(T* elem) {
+  insert(&_head, cast_to_inner(elem));
+}
+
+template <typename T>
+inline void ZList<T>::insert_last(T* elem) {
+  insert(_head._prev, cast_to_inner(elem));
+}
+
+template <typename T>
+inline void ZList<T>::insert_before(T* before, T* elem) {
+  insert(cast_to_inner(before)->_prev, cast_to_inner(elem));
+}
+
+template <typename T>
+inline void ZList<T>::insert_after(T* after, T* elem) {
+  insert(cast_to_inner(after), cast_to_inner(elem));
+}
+
+template <typename T>
+inline void ZList<T>::remove(T* elem) {
+  verify();
+
+  ZListNode<T>* const node = cast_to_inner(elem);
+  assert(!node->is_unused(), "Not in a list");
+
+  ZListNode<T>* const next = node->_next;
+  ZListNode<T>* const prev = node->_prev;
+  assert(next->_prev == node, "List corrupt");
+  assert(prev->_next == node, "List corrupt");
+
+  prev->_next = next;
+  next->_prev = prev;
+  node->set_unused();
+
+  _size--;
+}
+
+template <typename T>
+inline T* ZList<T>::remove_first() {
+  T* elem = first();
+  if (elem != NULL) {
+    remove(elem);
+  }
+
+  return elem;
+}
+
+template <typename T>
+inline T* ZList<T>::remove_last() {
+  T* elem = last();
+  if (elem != NULL) {
+    remove(elem);
+  }
+
+  return elem;
+}
+
+template <typename T>
+inline void ZList<T>::transfer(ZList<T>* list) {
+  verify();
+
+  if (!list->is_empty()) {
+    list->_head._next->_prev = _head._prev;
+    list->_head._prev->_next = _head._prev->_next;
+
+    _head._prev->_next = list->_head._next;
+    _head._prev = list->_head._prev;
+
+    list->_head._next = &list->_head;
+    list->_head._prev = &list->_head;
+
+    _size += list->_size;
+    list->_size = 0;
+
+    list->verify();
+    verify();
+  }
+}
 
 template <typename T, bool forward>
-ZListIteratorImpl<T, forward>::ZListIteratorImpl(const ZList<T>* list) :
+inline ZListIteratorImpl<T, forward>::ZListIteratorImpl(const ZList<T>* list) :
     _list(list),
     _next(forward ? list->first() : list->last()) {}
 
 template <typename T, bool forward>
-bool ZListIteratorImpl<T, forward>::next(T** elem) {
+inline bool ZListIteratorImpl<T, forward>::next(T** elem) {
   if (_next != NULL) {
     *elem = _next;
     _next = forward ? _list->next(_next) : _list->prev(_next);
@@ -43,4 +222,12 @@
   return false;
 }
 
+template <typename T>
+inline ZListIterator<T>::ZListIterator(const ZList<T>* list) :
+    ZListIteratorImpl<T, ZLIST_FORWARD>(list) {}
+
+template <typename T>
+inline ZListReverseIterator<T>::ZListReverseIterator(const ZList<T>* list) :
+    ZListIteratorImpl<T, ZLIST_REVERSE>(list) {}
+
 #endif // SHARE_GC_Z_ZLIST_INLINE_HPP
--- a/src/hotspot/share/gc/z/zLiveMap.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/z/zLiveMap.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -25,7 +25,7 @@
 #include "gc/z/zHeap.inline.hpp"
 #include "gc/z/zLiveMap.inline.hpp"
 #include "gc/z/zStat.hpp"
-#include "gc/z/zThread.hpp"
+#include "gc/z/zThread.inline.hpp"
 #include "logging/log.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/orderAccess.hpp"
--- a/src/hotspot/share/gc/z/zMark.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/z/zMark.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -34,7 +34,7 @@
 #include "gc/z/zRootsIterator.hpp"
 #include "gc/z/zStat.hpp"
 #include "gc/z/zTask.hpp"
-#include "gc/z/zThread.hpp"
+#include "gc/z/zThread.inline.hpp"
 #include "gc/z/zThreadLocalAllocBuffer.hpp"
 #include "gc/z/zUtils.inline.hpp"
 #include "gc/z/zWorkers.inline.hpp"
--- a/src/hotspot/share/gc/z/zMemory.inline.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/z/zMemory.inline.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -24,6 +24,7 @@
 #ifndef SHARE_GC_Z_ZMEMORY_INLINE_HPP
 #define SHARE_GC_Z_ZMEMORY_INLINE_HPP
 
+#include "gc/z/zList.inline.hpp"
 #include "gc/z/zMemory.hpp"
 #include "utilities/debug.hpp"
 
--- a/src/hotspot/share/gc/z/zObjectAllocator.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/z/zObjectAllocator.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,8 +28,9 @@
 #include "gc/z/zObjectAllocator.hpp"
 #include "gc/z/zPage.inline.hpp"
 #include "gc/z/zStat.hpp"
-#include "gc/z/zThread.hpp"
+#include "gc/z/zThread.inline.hpp"
 #include "gc/z/zUtils.inline.hpp"
+#include "gc/z/zValue.inline.hpp"
 #include "logging/log.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/safepoint.hpp"
--- a/src/hotspot/share/gc/z/zObjectAllocator.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/z/zObjectAllocator.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/hotspot/share/gc/z/zPage.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/z/zPage.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -22,6 +22,7 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/z/zList.inline.hpp"
 #include "gc/z/zPage.inline.hpp"
 #include "gc/z/zPhysicalMemory.inline.hpp"
 #include "gc/z/zVirtualMemory.inline.hpp"
@@ -52,6 +53,8 @@
   assert_initialized();
 }
 
+ZPage::~ZPage() {}
+
 void ZPage::assert_initialized() const {
   assert(!_virtual.is_null(), "Should not be null");
   assert(!_physical.is_null(), "Should not be null");
--- a/src/hotspot/share/gc/z/zPage.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/z/zPage.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -56,6 +56,7 @@
 public:
   ZPage(const ZVirtualMemory& vmem, const ZPhysicalMemory& pmem);
   ZPage(uint8_t type, const ZVirtualMemory& vmem, const ZPhysicalMemory& pmem);
+  ~ZPage();
 
   uint32_t object_max_count() const;
   size_t object_alignment_shift() const;
--- a/src/hotspot/share/gc/z/zPageCache.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/z/zPageCache.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -27,6 +27,7 @@
 #include "gc/z/zPage.inline.hpp"
 #include "gc/z/zPageCache.hpp"
 #include "gc/z/zStat.hpp"
+#include "gc/z/zValue.inline.hpp"
 #include "logging/log.hpp"
 
 static const ZStatCounter ZCounterPageCacheHitL1("Memory", "Page Cache Hit L1", ZStatUnitOpsPerSecond);
--- a/src/hotspot/share/gc/z/zPageCache.inline.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/z/zPageCache.inline.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
 
 #include "gc/z/zList.inline.hpp"
 #include "gc/z/zPageCache.hpp"
+#include "gc/z/zValue.inline.hpp"
 
 inline size_t ZPageCache::available() const {
   return _available;
--- a/src/hotspot/share/gc/z/zReferenceProcessor.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/z/zReferenceProcessor.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -32,6 +32,7 @@
 #include "gc/z/zTask.hpp"
 #include "gc/z/zTracer.inline.hpp"
 #include "gc/z/zUtils.inline.hpp"
+#include "gc/z/zValue.inline.hpp"
 #include "memory/universe.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/os.hpp"
--- a/src/hotspot/share/gc/z/zRelocate.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/z/zRelocate.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -33,6 +33,7 @@
 #include "gc/z/zRootsIterator.hpp"
 #include "gc/z/zStat.hpp"
 #include "gc/z/zTask.hpp"
+#include "gc/z/zThread.inline.hpp"
 #include "gc/z/zThreadLocalAllocBuffer.hpp"
 #include "gc/z/zWorkers.hpp"
 #include "logging/log.hpp"
--- a/src/hotspot/share/gc/z/zStat.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/z/zStat.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -23,7 +23,7 @@
 
 #include "precompiled.hpp"
 #include "gc/z/zCollectedHeap.hpp"
-#include "gc/z/zCPU.hpp"
+#include "gc/z/zCPU.inline.hpp"
 #include "gc/z/zGlobals.hpp"
 #include "gc/z/zHeap.inline.hpp"
 #include "gc/z/zLargePages.inline.hpp"
@@ -716,7 +716,7 @@
 }
 
 void ZStatSubPhase::register_end(const Ticks& start, const Ticks& end) const {
-  ZTracer::tracer()->report_thread_phase(*this, start, end);
+  ZTracer::tracer()->report_thread_phase(name(), start, end);
 
   const Tickspan duration = end - start;
   ZStatSample(_sampler, duration.value());
@@ -736,7 +736,7 @@
 }
 
 void ZStatCriticalPhase::register_end(const Ticks& start, const Ticks& end) const {
-  ZTracer::tracer()->report_thread_phase(*this, start, end);
+  ZTracer::tracer()->report_thread_phase(name(), start, end);
 
   const Tickspan duration = end - start;
   ZStatSample(_sampler, duration.value());
@@ -759,7 +759,7 @@
 //
 // Stat sample/inc
 //
-void ZStatSample(const ZStatSampler& sampler, uint64_t value, bool trace) {
+void ZStatSample(const ZStatSampler& sampler, uint64_t value) {
   ZStatSamplerData* const cpu_data = sampler.get();
   Atomic::add(1u, &cpu_data->_nsamples);
   Atomic::add(value, &cpu_data->_sum);
@@ -782,18 +782,14 @@
     max = prev_max;
   }
 
-  if (trace) {
-    ZTracer::tracer()->report_stat_sampler(sampler, value);
-  }
+  ZTracer::tracer()->report_stat_sampler(sampler, value);
 }
 
-void ZStatInc(const ZStatCounter& counter, uint64_t increment, bool trace) {
+void ZStatInc(const ZStatCounter& counter, uint64_t increment) {
   ZStatCounterData* const cpu_data = counter.get();
   const uint64_t value = Atomic::add(increment, &cpu_data->_counter);
 
-  if (trace) {
-    ZTracer::tracer()->report_stat_counter(counter, increment, value);
-  }
+  ZTracer::tracer()->report_stat_counter(counter, increment, value);
 }
 
 void ZStatInc(const ZStatUnsampledCounter& counter, uint64_t increment) {
@@ -1049,6 +1045,14 @@
   _normalized_duration.add(normalized_duration);
 }
 
+bool ZStatCycle::is_first() {
+  return _ncycles == 0;
+}
+
+bool ZStatCycle::is_warm() {
+  return _ncycles >= 3;
+}
+
 uint64_t ZStatCycle::ncycles() {
   return _ncycles;
 }
@@ -1215,6 +1219,20 @@
 ZStatHeap::ZAtRelocateStart ZStatHeap::_at_relocate_start;
 ZStatHeap::ZAtRelocateEnd ZStatHeap::_at_relocate_end;
 
+size_t ZStatHeap::capacity_high() {
+  return MAX4(_at_mark_start.capacity,
+              _at_mark_end.capacity,
+              _at_relocate_start.capacity,
+              _at_relocate_end.capacity);
+}
+
+size_t ZStatHeap::capacity_low() {
+  return MIN4(_at_mark_start.capacity,
+              _at_mark_end.capacity,
+              _at_relocate_start.capacity,
+              _at_relocate_end.capacity);
+}
+
 size_t ZStatHeap::available(size_t used) {
   return _at_initialize.max_capacity - used;
 }
@@ -1282,8 +1300,8 @@
                                     size_t used_high,
                                     size_t used_low) {
   _at_relocate_end.capacity = capacity;
-  _at_relocate_end.capacity_high = capacity;
-  _at_relocate_end.capacity_low = _at_mark_start.capacity;
+  _at_relocate_end.capacity_high = capacity_high();
+  _at_relocate_end.capacity_low = capacity_low();
   _at_relocate_end.reserve = reserve(used);
   _at_relocate_end.reserve_high = reserve(used_low);
   _at_relocate_end.reserve_low = reserve(used_high);
--- a/src/hotspot/share/gc/z/zStat.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/z/zStat.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -315,8 +315,8 @@
 //
 // Stat sample/increment
 //
-void ZStatSample(const ZStatSampler& sampler, uint64_t value, bool trace = ZStatisticsForceTrace);
-void ZStatInc(const ZStatCounter& counter, uint64_t increment = 1, bool trace = ZStatisticsForceTrace);
+void ZStatSample(const ZStatSampler& sampler, uint64_t value);
+void ZStatInc(const ZStatCounter& counter, uint64_t increment = 1);
 void ZStatInc(const ZStatUnsampledCounter& counter, uint64_t increment = 1);
 
 //
@@ -374,6 +374,8 @@
   static void at_start();
   static void at_end(double boost_factor);
 
+  static bool is_first();
+  static bool is_warm();
   static uint64_t ncycles();
   static const AbsSeq& normalized_duration();
   static double time_since_last();
@@ -519,6 +521,8 @@
     size_t free_low;
   } _at_relocate_end;
 
+  static size_t capacity_high();
+  static size_t capacity_low();
   static size_t available(size_t used);
   static size_t reserve(size_t used);
   static size_t free(size_t used);
--- a/src/hotspot/share/gc/z/zThread.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/z/zThread.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,7 +22,7 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/z/zThread.hpp"
+#include "gc/z/zThread.inline.hpp"
 #include "runtime/thread.hpp"
 #include "utilities/debug.hpp"
 
--- a/src/hotspot/share/gc/z/zThread.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/z/zThread.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
 
 #include "memory/allocation.hpp"
 #include "utilities/globalDefinitions.hpp"
-#include "utilities/debug.hpp"
 
 class ZThread : public AllStatic {
   friend class ZTask;
@@ -43,12 +42,7 @@
   static THREAD_LOCAL uint      _worker_id;
 
   static void initialize();
-
-  static void ensure_initialized() {
-    if (!_initialized) {
-      initialize();
-    }
-  }
+  static void ensure_initialized();
 
   static void set_worker();
   static void set_runtime_worker();
@@ -59,36 +53,12 @@
 
 public:
   static const char* name();
-
-  static uintptr_t id() {
-    ensure_initialized();
-    return _id;
-  }
-
-  static bool is_vm() {
-    ensure_initialized();
-    return _is_vm;
-  }
-
-  static bool is_java() {
-    ensure_initialized();
-    return _is_java;
-  }
-
-  static bool is_worker() {
-    ensure_initialized();
-    return _is_worker;
-  }
-
-  static bool is_runtime_worker() {
-    ensure_initialized();
-    return _is_runtime_worker;
-  }
-
-  static uint worker_id() {
-    assert(has_worker_id(), "Worker id not initialized");
-    return _worker_id;
-  }
+  static uintptr_t id();
+  static bool is_vm();
+  static bool is_java();
+  static bool is_worker();
+  static bool is_runtime_worker();
+  static uint worker_id();
 };
 
 #endif // SHARE_GC_Z_ZTHREAD_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zThread.inline.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZTHREAD_INLINE_HPP
+#define SHARE_GC_Z_ZTHREAD_INLINE_HPP
+
+#include "gc/z/zThread.hpp"
+#include "utilities/debug.hpp"
+
+inline void ZThread::ensure_initialized() {
+  if (!_initialized) {
+    initialize();
+  }
+}
+
+inline uintptr_t ZThread::id() {
+  ensure_initialized();
+  return _id;
+}
+
+inline bool ZThread::is_vm() {
+  ensure_initialized();
+  return _is_vm;
+}
+
+inline bool ZThread::is_java() {
+  ensure_initialized();
+  return _is_java;
+}
+
+inline bool ZThread::is_worker() {
+  ensure_initialized();
+  return _is_worker;
+}
+
+inline bool ZThread::is_runtime_worker() {
+  ensure_initialized();
+  return _is_runtime_worker;
+}
+
+inline uint ZThread::worker_id() {
+  assert(has_worker_id(), "Worker id not initialized");
+  return _worker_id;
+}
+
+#endif // SHARE_GC_Z_ZTHREAD_INLINE_HPP
--- a/src/hotspot/share/gc/z/zThreadLocalAllocBuffer.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/z/zThreadLocalAllocBuffer.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -24,6 +24,7 @@
 #include "precompiled.hpp"
 #include "gc/z/zAddress.inline.hpp"
 #include "gc/z/zThreadLocalAllocBuffer.hpp"
+#include "gc/z/zValue.inline.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/thread.hpp"
 
--- a/src/hotspot/share/gc/z/zTracer.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/z/zTracer.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -22,18 +22,19 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/shared/gcId.hpp"
 #include "gc/z/zStat.hpp"
 #include "gc/z/zTracer.hpp"
-#include "gc/shared/gcId.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "jfr/jfrEvents.hpp"
-#include "runtime/safepoint.hpp"
 #include "runtime/safepointVerifiers.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/macros.hpp"
 #if INCLUDE_JFR
 #include "jfr/metadata/jfrSerializer.hpp"
 #endif
 
 #if INCLUDE_JFR
+
 class ZStatisticsCounterTypeConstant : public JfrSerializer {
 public:
   virtual void serialize(JfrCheckpointWriter& writer) {
@@ -66,7 +67,8 @@
                                      true /* permit_cache */,
                                      new ZStatisticsSamplerTypeConstant());
 }
-#endif
+
+#endif // INCLUDE_JFR
 
 ZTracer* ZTracer::_tracer = NULL;
 
@@ -79,24 +81,24 @@
   JFR_ONLY(register_jfr_type_serializers());
 }
 
-void ZTracer::send_stat_counter(uint32_t counter_id, uint64_t increment, uint64_t value) {
+void ZTracer::send_stat_counter(const ZStatCounter& counter, uint64_t increment, uint64_t value) {
   NoSafepointVerifier nsv;
 
   EventZStatisticsCounter e;
   if (e.should_commit()) {
-    e.set_id(counter_id);
+    e.set_id(counter.id());
     e.set_increment(increment);
     e.set_value(value);
     e.commit();
   }
 }
 
-void ZTracer::send_stat_sampler(uint32_t sampler_id, uint64_t value) {
+void ZTracer::send_stat_sampler(const ZStatSampler& sampler, uint64_t value) {
   NoSafepointVerifier nsv;
 
   EventZStatisticsSampler e;
   if (e.should_commit()) {
-    e.set_id(sampler_id);
+    e.set_id(sampler.id());
     e.set_value(value);
     e.commit();
   }
@@ -115,7 +117,7 @@
   }
 }
 
-void ZTracer::send_page_alloc(size_t size, size_t used, size_t free, size_t cache, bool nonblocking, bool noreserve) {
+void ZTracer::send_page_alloc(size_t size, size_t used, size_t free, size_t cache, ZAllocationFlags flags) {
   NoSafepointVerifier nsv;
 
   EventZPageAllocation e;
@@ -124,28 +126,8 @@
     e.set_usedAfter(used);
     e.set_freeAfter(free);
     e.set_inCacheAfter(cache);
-    e.set_nonBlocking(nonblocking);
-    e.set_noReserve(noreserve);
+    e.set_nonBlocking(flags.non_blocking());
+    e.set_noReserve(flags.no_reserve());
     e.commit();
   }
 }
-
-void ZTracer::report_stat_counter(const ZStatCounter& counter, uint64_t increment, uint64_t value) {
-  send_stat_counter(counter.id(), increment, value);
-}
-
-void ZTracer::report_stat_sampler(const ZStatSampler& sampler, uint64_t value) {
-  send_stat_sampler(sampler.id(), value);
-}
-
-void ZTracer::report_thread_phase(const ZStatPhase& phase, const Ticks& start, const Ticks& end) {
-  send_thread_phase(phase.name(), start, end);
-}
-
-void ZTracer::report_thread_phase(const char* name, const Ticks& start, const Ticks& end) {
-  send_thread_phase(name, start, end);
-}
-
-void ZTracer::report_page_alloc(size_t size, size_t used, size_t free, size_t cache, ZAllocationFlags flags) {
-  send_page_alloc(size, used, free, cache, flags.non_blocking(), flags.no_reserve());
-}
--- a/src/hotspot/share/gc/z/zTracer.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/z/zTracer.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,10 +37,10 @@
 
   ZTracer();
 
-  void send_stat_counter(uint32_t counter_id, uint64_t increment, uint64_t value);
-  void send_stat_sampler(uint32_t sampler_id, uint64_t value);
+  void send_stat_counter(const ZStatCounter& counter, uint64_t increment, uint64_t value);
+  void send_stat_sampler(const ZStatSampler& sampler, uint64_t value);
   void send_thread_phase(const char* name, const Ticks& start, const Ticks& end);
-  void send_page_alloc(size_t size, size_t used, size_t free, size_t cache, bool nonblocking, bool noreserve);
+  void send_page_alloc(size_t size, size_t used, size_t free, size_t cache, ZAllocationFlags flags);
 
 public:
   static ZTracer* tracer();
@@ -48,7 +48,6 @@
 
   void report_stat_counter(const ZStatCounter& counter, uint64_t increment, uint64_t value);
   void report_stat_sampler(const ZStatSampler& sampler, uint64_t value);
-  void report_thread_phase(const ZStatPhase& phase, const Ticks& start, const Ticks& end);
   void report_thread_phase(const char* name, const Ticks& start, const Ticks& end);
   void report_page_alloc(size_t size, size_t used, size_t free, size_t cache, ZAllocationFlags flags);
 };
--- a/src/hotspot/share/gc/z/zTracer.inline.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/z/zTracer.inline.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,12 +24,38 @@
 #ifndef SHARE_GC_Z_ZTRACER_INLINE_HPP
 #define SHARE_GC_Z_ZTRACER_INLINE_HPP
 
+#include "gc/z/zStat.hpp"
 #include "gc/z/zTracer.hpp"
+#include "jfr/jfrEvents.hpp"
 
 inline ZTracer* ZTracer::tracer() {
   return _tracer;
 }
 
+inline void ZTracer::report_stat_counter(const ZStatCounter& counter, uint64_t increment, uint64_t value) {
+  if (EventZStatisticsCounter::is_enabled()) {
+    send_stat_counter(counter, increment, value);
+  }
+}
+
+inline void ZTracer::report_stat_sampler(const ZStatSampler& sampler, uint64_t value) {
+  if (EventZStatisticsSampler::is_enabled()) {
+    send_stat_sampler(sampler, value);
+  }
+}
+
+inline void ZTracer::report_thread_phase(const char* name, const Ticks& start, const Ticks& end) {
+  if (EventZThreadPhase::is_enabled()) {
+    send_thread_phase(name, start, end);
+  }
+}
+
+inline void ZTracer::report_page_alloc(size_t size, size_t used, size_t free, size_t cache, ZAllocationFlags flags) {
+  if (EventZPageAllocation::is_enabled()) {
+    send_page_alloc(size, used, free, cache, flags);
+  }
+}
+
 inline ZTraceThreadPhase::ZTraceThreadPhase(const char* name) :
     _start(Ticks::now()),
     _name(name) {}
--- a/src/hotspot/share/gc/z/zValue.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/z/zValue.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,13 +25,11 @@
 #define SHARE_GC_Z_ZVALUE_HPP
 
 #include "memory/allocation.hpp"
-#include "gc/z/zCPU.hpp"
-#include "gc/z/zGlobals.hpp"
-#include "gc/z/zNUMA.hpp"
-#include "gc/z/zThread.hpp"
-#include "gc/z/zUtils.hpp"
-#include "runtime/globals.hpp"
-#include "utilities/align.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+//
+// Storage
+//
 
 template <typename S>
 class ZValueStorage : public AllStatic {
@@ -42,202 +40,93 @@
 public:
   static const size_t offset = 4 * K;
 
-  static uintptr_t alloc(size_t size) {
-    guarantee(size <= offset, "Allocation too large");
-
-    // Allocate entry in existing memory block
-    const uintptr_t addr = align_up(_top, S::alignment());
-    _top = addr + size;
-
-    if (_top < _end) {
-      // Success
-      return addr;
-    }
-
-    // Allocate new block of memory
-    const size_t block_alignment = offset;
-    const size_t block_size = offset * S::count();
-    _top = ZUtils::alloc_aligned(block_alignment, block_size);
-    _end = _top + offset;
-
-    // Retry allocation
-    return alloc(size);
-  }
+  static uintptr_t alloc(size_t size);
 };
 
-template <typename T> uintptr_t ZValueStorage<T>::_end = 0;
-template <typename T> uintptr_t ZValueStorage<T>::_top = 0;
-
 class ZContendedStorage : public ZValueStorage<ZContendedStorage> {
 public:
-  static size_t alignment() {
-    return ZCacheLineSize;
-  }
-
-  static uint32_t count() {
-    return 1;
-  }
-
-  static uint32_t id() {
-    return 0;
-  }
+  static size_t alignment();
+  static uint32_t count();
+  static uint32_t id();
 };
 
 class ZPerCPUStorage : public ZValueStorage<ZPerCPUStorage> {
 public:
-  static size_t alignment() {
-    return sizeof(uintptr_t);
-  }
-
-  static uint32_t count() {
-    return ZCPU::count();
-  }
-
-  static uint32_t id() {
-    return ZCPU::id();
-  }
+  static size_t alignment();
+  static uint32_t count();
+  static uint32_t id();
 };
 
 class ZPerNUMAStorage : public ZValueStorage<ZPerNUMAStorage> {
 public:
-  static size_t alignment() {
-    return sizeof(uintptr_t);
-  }
-
-  static uint32_t count() {
-    return ZNUMA::count();
-  }
-
-  static uint32_t id() {
-    return ZNUMA::id();
-  }
+  static size_t alignment();
+  static uint32_t count();
+  static uint32_t id();
 };
 
 class ZPerWorkerStorage : public ZValueStorage<ZPerWorkerStorage> {
 public:
-  static size_t alignment() {
-    return sizeof(uintptr_t);
-  }
-
-  static uint32_t count() {
-    return MAX2(ParallelGCThreads, ConcGCThreads);
-  }
-
-  static uint32_t id() {
-    return ZThread::worker_id();
-  }
+  static size_t alignment();
+  static uint32_t count();
+  static uint32_t id();
 };
 
-template <typename S, typename T>
-class ZValueIterator;
+//
+// Value
+//
 
 template <typename S, typename T>
 class ZValue : public CHeapObj<mtGC> {
 private:
   const uintptr_t _addr;
 
-  uintptr_t value_addr(uint32_t value_id) const {
-    return _addr + (value_id * S::offset);
-  }
+  uintptr_t value_addr(uint32_t value_id) const;
 
 public:
-  ZValue() :
-      _addr(S::alloc(sizeof(T))) {
-    // Initialize all instances
-    ZValueIterator<S, T> iter(this);
-    for (T* addr; iter.next(&addr);) {
-      ::new (addr) T;
-    }
-  }
+  ZValue();
+  ZValue(const T& value);
 
-  ZValue(const T& value) :
-      _addr(S::alloc(sizeof(T))) {
-    // Initialize all instances
-    ZValueIterator<S, T> iter(this);
-    for (T* addr; iter.next(&addr);) {
-      ::new (addr) T(value);
-    }
-  }
+  const T* addr(uint32_t value_id = S::id()) const;
+  T* addr(uint32_t value_id = S::id());
 
-  // Not implemented
-  ZValue(const ZValue<S, T>& value);
-  ZValue<S, T>& operator=(const ZValue<S, T>& value);
+  const T& get(uint32_t value_id = S::id()) const;
+  T& get(uint32_t value_id = S::id());
 
-  const T* addr(uint32_t value_id = S::id()) const {
-    return reinterpret_cast<const T*>(value_addr(value_id));
-  }
-
-  T* addr(uint32_t value_id = S::id()) {
-    return reinterpret_cast<T*>(value_addr(value_id));
-  }
-
-  const T& get(uint32_t value_id = S::id()) const {
-    return *addr(value_id);
-  }
-
-  T& get(uint32_t value_id = S::id()) {
-    return *addr(value_id);
-  }
-
-  void set(const T& value, uint32_t value_id = S::id()) {
-    get(value_id) = value;
-  }
-
-  void set_all(const T& value) {
-    ZValueIterator<S, T> iter(this);
-    for (T* addr; iter.next(&addr);) {
-      *addr = value;
-    }
-  }
+  void set(const T& value, uint32_t value_id = S::id());
+  void set_all(const T& value);
 };
 
 template <typename T>
 class ZContended : public ZValue<ZContendedStorage, T> {
 public:
-  ZContended() :
-      ZValue<ZContendedStorage, T>() {}
-
-  ZContended(const T& value) :
-      ZValue<ZContendedStorage, T>(value) {}
-
-  using ZValue<ZContendedStorage, T>::operator=;
+  ZContended();
+  ZContended(const T& value);
 };
 
 template <typename T>
 class ZPerCPU : public ZValue<ZPerCPUStorage, T> {
 public:
-  ZPerCPU() :
-      ZValue<ZPerCPUStorage, T>() {}
-
-  ZPerCPU(const T& value) :
-      ZValue<ZPerCPUStorage, T>(value) {}
-
-  using ZValue<ZPerCPUStorage, T>::operator=;
+  ZPerCPU();
+  ZPerCPU(const T& value);
 };
 
 template <typename T>
 class ZPerNUMA : public ZValue<ZPerNUMAStorage, T> {
 public:
-  ZPerNUMA() :
-      ZValue<ZPerNUMAStorage, T>() {}
-
-  ZPerNUMA(const T& value) :
-      ZValue<ZPerNUMAStorage, T>(value) {}
-
-  using ZValue<ZPerNUMAStorage, T>::operator=;
+  ZPerNUMA();
+  ZPerNUMA(const T& value);
 };
 
 template <typename T>
 class ZPerWorker : public ZValue<ZPerWorkerStorage, T> {
 public:
-  ZPerWorker() :
-      ZValue<ZPerWorkerStorage, T>() {}
+  ZPerWorker();
+  ZPerWorker(const T& value);
+};
 
-  ZPerWorker(const T& value) :
-      ZValue<ZPerWorkerStorage, T>(value) {}
-
-  using ZValue<ZPerWorkerStorage, T>::operator=;
-};
+//
+// Iterator
+//
 
 template <typename S, typename T>
 class ZValueIterator {
@@ -246,38 +135,27 @@
   uint32_t            _value_id;
 
 public:
-  ZValueIterator(ZValue<S, T>* value) :
-      _value(value),
-      _value_id(0) {}
+  ZValueIterator(ZValue<S, T>* value);
 
-  bool next(T** value) {
-    if (_value_id < S::count()) {
-      *value = _value->addr(_value_id++);
-      return true;
-    }
-    return false;
-  }
+  bool next(T** value);
 };
 
 template <typename T>
 class ZPerCPUIterator : public ZValueIterator<ZPerCPUStorage, T> {
 public:
-  ZPerCPUIterator(ZPerCPU<T>* value) :
-      ZValueIterator<ZPerCPUStorage, T>(value) {}
+  ZPerCPUIterator(ZPerCPU<T>* value);
 };
 
 template <typename T>
 class ZPerNUMAIterator : public ZValueIterator<ZPerNUMAStorage, T> {
 public:
-  ZPerNUMAIterator(ZPerNUMA<T>* value) :
-      ZValueIterator<ZPerNUMAStorage, T>(value) {}
+  ZPerNUMAIterator(ZPerNUMA<T>* value);
 };
 
 template <typename T>
 class ZPerWorkerIterator : public ZValueIterator<ZPerWorkerStorage, T> {
 public:
-  ZPerWorkerIterator(ZPerWorker<T>* value) :
-      ZValueIterator<ZPerWorkerStorage, T>(value) {}
+  ZPerWorkerIterator(ZPerWorker<T>* value);
 };
 
 template <typename S, typename T>
@@ -287,38 +165,27 @@
   uint32_t                  _value_id;
 
 public:
-  ZValueConstIterator(const ZValue<S, T>* value) :
-      _value(value),
-      _value_id(0) {}
+  ZValueConstIterator(const ZValue<S, T>* value);
 
-  bool next(const T** value) {
-    if (_value_id < S::count()) {
-      *value = _value->addr(_value_id++);
-      return true;
-    }
-    return false;
-  }
+  bool next(const T** value);
 };
 
 template <typename T>
 class ZPerCPUConstIterator : public ZValueConstIterator<ZPerCPUStorage, T> {
 public:
-  ZPerCPUConstIterator(const ZPerCPU<T>* value) :
-      ZValueConstIterator<ZPerCPUStorage, T>(value) {}
+  ZPerCPUConstIterator(const ZPerCPU<T>* value);
 };
 
 template <typename T>
 class ZPerNUMAConstIterator : public ZValueConstIterator<ZPerNUMAStorage, T> {
 public:
-  ZPerNUMAConstIterator(const ZPerNUMA<T>* value) :
-      ZValueConstIterator<ZPerNUMAStorage, T>(value) {}
+  ZPerNUMAConstIterator(const ZPerNUMA<T>* value);
 };
 
 template <typename T>
 class ZPerWorkerConstIterator : public ZValueConstIterator<ZPerWorkerStorage, T> {
 public:
-  ZPerWorkerConstIterator(const ZPerWorker<T>* value) :
-      ZValueConstIterator<ZPerWorkerStorage, T>(value) {}
+  ZPerWorkerConstIterator(const ZPerWorker<T>* value);
 };
 
 #endif // SHARE_GC_Z_ZVALUE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zValue.inline.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -0,0 +1,264 @@
+/*
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZVALUE_INLINE_HPP
+#define SHARE_GC_Z_ZVALUE_INLINE_HPP
+
+#include "gc/z/zCPU.inline.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "gc/z/zNUMA.hpp"
+#include "gc/z/zThread.inline.hpp"
+#include "gc/z/zUtils.hpp"
+#include "gc/z/zValue.hpp"
+#include "runtime/globals.hpp"
+#include "utilities/align.hpp"
+
+//
+// Storage
+//
+
+template <typename T> uintptr_t ZValueStorage<T>::_end = 0;
+template <typename T> uintptr_t ZValueStorage<T>::_top = 0;
+
+template <typename S>
+uintptr_t ZValueStorage<S>::alloc(size_t size) {
+  assert(size <= offset, "Allocation too large");
+
+  // Allocate entry in existing memory block
+  const uintptr_t addr = align_up(_top, S::alignment());
+  _top = addr + size;
+
+  if (_top < _end) {
+    // Success
+    return addr;
+  }
+
+  // Allocate new block of memory
+  const size_t block_alignment = offset;
+  const size_t block_size = offset * S::count();
+  _top = ZUtils::alloc_aligned(block_alignment, block_size);
+  _end = _top + offset;
+
+  // Retry allocation
+  return alloc(size);
+}
+
+inline size_t ZContendedStorage::alignment() {
+  return ZCacheLineSize;
+}
+
+inline uint32_t ZContendedStorage::count() {
+  return 1;
+}
+
+inline uint32_t ZContendedStorage::id() {
+  return 0;
+}
+
+inline size_t ZPerCPUStorage::alignment() {
+  return sizeof(uintptr_t);
+}
+
+inline uint32_t ZPerCPUStorage::count() {
+  return ZCPU::count();
+}
+
+inline uint32_t ZPerCPUStorage::id() {
+  return ZCPU::id();
+}
+
+inline size_t ZPerNUMAStorage::alignment() {
+  return sizeof(uintptr_t);
+}
+
+inline uint32_t ZPerNUMAStorage::count() {
+  return ZNUMA::count();
+}
+
+inline uint32_t ZPerNUMAStorage::id() {
+  return ZNUMA::id();
+}
+
+inline size_t ZPerWorkerStorage::alignment() {
+  return sizeof(uintptr_t);
+}
+
+inline uint32_t ZPerWorkerStorage::count() {
+  return MAX2(ParallelGCThreads, ConcGCThreads);
+}
+
+inline uint32_t ZPerWorkerStorage::id() {
+  return ZThread::worker_id();
+}
+
+//
+// Value
+//
+
+template <typename S, typename T>
+inline uintptr_t ZValue<S, T>::value_addr(uint32_t value_id) const {
+  return _addr + (value_id * S::offset);
+}
+
+template <typename S, typename T>
+inline ZValue<S, T>::ZValue() :
+    _addr(S::alloc(sizeof(T))) {
+  // Initialize all instances
+  ZValueIterator<S, T> iter(this);
+  for (T* addr; iter.next(&addr);) {
+    ::new (addr) T;
+  }
+}
+
+template <typename S, typename T>
+inline ZValue<S, T>::ZValue(const T& value) :
+    _addr(S::alloc(sizeof(T))) {
+  // Initialize all instances
+  ZValueIterator<S, T> iter(this);
+  for (T* addr; iter.next(&addr);) {
+    ::new (addr) T(value);
+  }
+}
+
+template <typename S, typename T>
+inline const T* ZValue<S, T>::addr(uint32_t value_id) const {
+  return reinterpret_cast<const T*>(value_addr(value_id));
+}
+
+template <typename S, typename T>
+inline T* ZValue<S, T>::addr(uint32_t value_id) {
+  return reinterpret_cast<T*>(value_addr(value_id));
+}
+
+template <typename S, typename T>
+inline const T& ZValue<S, T>::get(uint32_t value_id) const {
+  return *addr(value_id);
+}
+
+template <typename S, typename T>
+inline T& ZValue<S, T>::get(uint32_t value_id) {
+  return *addr(value_id);
+}
+
+template <typename S, typename T>
+inline void ZValue<S, T>::set(const T& value, uint32_t value_id) {
+  get(value_id) = value;
+}
+
+template <typename S, typename T>
+inline void ZValue<S, T>::set_all(const T& value) {
+  ZValueIterator<S, T> iter(this);
+  for (T* addr; iter.next(&addr);) {
+    *addr = value;
+  }
+}
+
+template <typename T>
+inline ZContended<T>::ZContended() :
+    ZValue<ZContendedStorage, T>() {}
+
+template <typename T>
+inline ZContended<T>::ZContended(const T& value) :
+    ZValue<ZContendedStorage, T>(value) {}
+
+template <typename T>
+inline ZPerCPU<T>::ZPerCPU() :
+    ZValue<ZPerCPUStorage, T>() {}
+
+template <typename T>
+inline ZPerCPU<T>::ZPerCPU(const T& value) :
+    ZValue<ZPerCPUStorage, T>(value) {}
+
+template <typename T>
+inline ZPerNUMA<T>::ZPerNUMA() :
+    ZValue<ZPerNUMAStorage, T>() {}
+
+template <typename T>
+inline ZPerNUMA<T>::ZPerNUMA(const T& value) :
+    ZValue<ZPerNUMAStorage, T>(value) {}
+
+template <typename T>
+inline ZPerWorker<T>::ZPerWorker() :
+    ZValue<ZPerWorkerStorage, T>() {}
+
+template <typename T>
+inline ZPerWorker<T>::ZPerWorker(const T& value) :
+    ZValue<ZPerWorkerStorage, T>(value) {}
+
+//
+// Iterator
+//
+
+template <typename S, typename T>
+inline ZValueIterator<S, T>::ZValueIterator(ZValue<S, T>* value) :
+    _value(value),
+    _value_id(0) {}
+
+template <typename S, typename T>
+inline bool ZValueIterator<S, T>::next(T** value) {
+  if (_value_id < S::count()) {
+    *value = _value->addr(_value_id++);
+    return true;
+  }
+  return false;
+}
+
+template <typename T>
+inline ZPerCPUIterator<T>::ZPerCPUIterator(ZPerCPU<T>* value) :
+    ZValueIterator<ZPerCPUStorage, T>(value) {}
+
+template <typename T>
+inline ZPerNUMAIterator<T>::ZPerNUMAIterator(ZPerNUMA<T>* value) :
+    ZValueIterator<ZPerNUMAStorage, T>(value) {}
+
+template <typename T>
+inline ZPerWorkerIterator<T>::ZPerWorkerIterator(ZPerWorker<T>* value) :
+    ZValueIterator<ZPerWorkerStorage, T>(value) {}
+
+template <typename S, typename T>
+inline ZValueConstIterator<S, T>::ZValueConstIterator(const ZValue<S, T>* value) :
+    _value(value),
+    _value_id(0) {}
+
+template <typename S, typename T>
+inline bool ZValueConstIterator<S, T>::next(const T** value) {
+  if (_value_id < S::count()) {
+    *value = _value->addr(_value_id++);
+    return true;
+  }
+  return false;
+}
+
+template <typename T>
+inline ZPerCPUConstIterator<T>::ZPerCPUConstIterator(const ZPerCPU<T>* value) :
+    ZValueConstIterator<ZPerCPUStorage, T>(value) {}
+
+template <typename T>
+inline ZPerNUMAConstIterator<T>::ZPerNUMAConstIterator(const ZPerNUMA<T>* value) :
+    ZValueConstIterator<ZPerNUMAStorage, T>(value) {}
+
+template <typename T>
+inline ZPerWorkerConstIterator<T>::ZPerWorkerConstIterator(const ZPerWorker<T>* value) :
+    ZValueConstIterator<ZPerWorkerStorage, T>(value) {}
+
+#endif // SHARE_GC_Z_ZVALUE_INLINE_HPP
--- a/src/hotspot/share/gc/z/zWeakRootsProcessor.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/z/zWeakRootsProcessor.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,8 +24,6 @@
 #ifndef SHARE_GC_Z_ZWEAKROOTSPROCESSOR_HPP
 #define SHARE_GC_Z_ZWEAKROOTSPROCESSOR_HPP
 
-#include "gc/z/zValue.hpp"
-
 class ZWorkers;
 
 class ZWeakRootsProcessor {
--- a/src/hotspot/share/gc/z/z_globals.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/gc/z/z_globals.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -67,9 +67,6 @@
           "Time between statistics print outs (in seconds)")                \
           range(1, (uint)-1)                                                \
                                                                             \
-  diagnostic(bool, ZStatisticsForceTrace, false,                            \
-          "Force tracing of ZStats")                                        \
-                                                                            \
   diagnostic(bool, ZProactive, true,                                        \
           "Enable proactive GC cycles")                                     \
                                                                             \
--- a/src/hotspot/share/include/jvm.h	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/include/jvm.h	Tue Oct 22 20:49:40 2019 -0400
@@ -192,6 +192,13 @@
 JVM_InitStackTraceElement(JNIEnv* env, jobject element, jobject stackFrameInfo);
 
 /*
+ * java.lang.NullPointerException
+ */
+
+JNIEXPORT jstring JNICALL
+JVM_GetExtendedNPEMessage(JNIEnv *env, jthrowable throwable);
+
+/*
  * java.lang.StackWalker
  */
 enum {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/interpreter/bytecodeUtils.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -0,0 +1,1481 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "gc/shared/gcLocker.hpp"
+#include "interpreter/bytecodeUtils.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/signature.hpp"
+#include "runtime/safepointVerifiers.hpp"
+#include "utilities/events.hpp"
+#include "utilities/ostream.hpp"
+
+class SimulatedOperandStack;
+class ExceptionMessageBuilder;
+
+// The entries of a SimulatedOperandStack. They carry the analysis
+// information gathered for the slot.
+class StackSlotAnalysisData {
+ private:
+
+  friend class SimulatedOperandStack;
+  friend class ExceptionMessageBuilder;
+
+  unsigned int _bci:17;    // The bci of the bytecode that pushed the current value on the operand stack.
+                           // INVALID if ambiguous, e.g. after a control flow merge.
+                           // 16 bits for bci (max bytecode size) and one for INVALID.
+  unsigned int _type:15;   // The BasicType of the value on the operand stack.
+
+  // Merges this slot data with the given one and returns the result. If
+  // the bcis of the two merged objects are different, the bci of the result
+  // will be undefined. If the types are different, the result type is T_CONFLICT.
+  // (An exception is if one type is an array and the other is object, then
+  // the result type will be T_OBJECT).
+  StackSlotAnalysisData merge(StackSlotAnalysisData other);
+
+ public:
+
+  // Creates a new object with an invalid bci and the given type.
+  StackSlotAnalysisData(BasicType type = T_CONFLICT);
+
+  // Creates a new object with the given bci and type.
+  StackSlotAnalysisData(int bci, BasicType type);
+
+  enum {
+    // An invalid bytecode index, as > 65535.
+    INVALID = 0x1FFFF
+  };
+
+  // Returns the bci. If the bci is invalid, INVALID is returned.
+  unsigned int get_bci();
+
+  // Returns true, if the bci is not invalid.
+  bool has_bci() { return get_bci() != INVALID; }
+
+  // Returns the type of the slot data.
+  BasicType get_type();
+};
+
+// A stack consisting of SimulatedOperandStackEntries.
+// This represents the analysis information for the operand stack
+// for a given bytecode at a given bci.
+// It also holds an additional field that serves to collect
+// information whether local slots were written.
+class SimulatedOperandStack: CHeapObj<mtInternal> {
+
+ private:
+
+  friend class ExceptionMessageBuilder;
+  friend class StackSlotAnalysisData;
+
+  // The stack.
+  GrowableArray<StackSlotAnalysisData> _stack;
+
+  // Optimized bytecode can reuse local variable slots for several
+  // local variables.
+  // If there is no variable name information, we print 'parameter<i>'
+  // if a parameter maps to a local slot. Once a local slot has been
+  // written, we don't know any more whether it was written as the
+  // corresponding parameter, or whether another local has been
+  // mapped to the slot. So we don't want to print 'parameter<i>' any
+  // more, but 'local<i>'. Similary for 'this'.
+  // Therefore, during the analysis, we mark a bit for local slots that
+  // get written and propagate this information.
+  // We only run the analysis for 64 slots. If a method has more
+  // parameters, we print 'local<i>' in all cases.
+  uint64_t _written_local_slots;
+
+  SimulatedOperandStack(): _written_local_slots(0) { };
+  SimulatedOperandStack(const SimulatedOperandStack &copy);
+
+  // Pushes the given slot data.
+  void push_raw(StackSlotAnalysisData slotData);
+
+  // Like push_raw, but if the slotData has type long or double, we push two.
+  void push(StackSlotAnalysisData slotData);
+
+  // Like push(slotData), but using bci/type to create an instance of
+  // StackSlotAnalysisData first.
+  void push(int bci, BasicType type);
+
+  // Pops the given number of entries.
+  void pop(int slots);
+
+  // Merges this with the given stack by merging all entries. The
+  // size of the stacks must be the same.
+  void merge(SimulatedOperandStack const& other);
+
+ public:
+
+  // Returns the size of the stack.
+  int get_size() const;
+
+  // Returns the slot data at the given index. Slot 0 is top of stack.
+  StackSlotAnalysisData get_slot_data(int slot);
+
+  // Mark that local slot i was written.
+  void set_local_slot_written(int i);
+
+  // Check whether local slot i was written by this or a previous bytecode.
+  bool local_slot_was_written(int i);
+};
+
+// Helper class to build internal exception messages for exceptions
+// that are thrown because prerequisites to execute a bytecode
+// are not met.
+// E.g., if a NPE is thrown because an iload can not be executed
+// by the VM because the reference to load from is null.
+//
+// It analyses the bytecode to assemble Java-like message text
+// to give precise information where in a larger expression the
+// exception occured.
+//
+// To assemble this message text, it is needed to know how
+// operand stack slot entries were pushed on the operand stack.
+// This class contains an analysis over the bytecodes to compute
+// this information. The information is stored in a
+// SimulatedOperandStack for each bytecode.
+class ExceptionMessageBuilder : public StackObj {
+
+  // The stacks for each bytecode.
+  GrowableArray<SimulatedOperandStack*>* _stacks;
+
+  // The method.
+  Method* _method;
+
+  // The number of entries used (the sum of all entries of all stacks).
+  int _nr_of_entries;
+
+  // If true, we have added at least one new stack.
+  bool _added_one;
+
+  // If true, we have processed all bytecodes.
+  bool _all_processed;
+
+  // The maximum number of entries we want to use. This is used to
+  // limit the amount of memory we waste for insane methods (as they
+  // appear in JCK tests).
+  static const int _max_entries = 1000000;
+
+  static const int _max_cause_detail = 5;
+
+  // Merges the stack the the given bci with the given stack. If there
+  // is no stack at the bci, we just put the given stack there. This
+  // method doesn't takes ownership of the stack.
+  void merge(int bci, SimulatedOperandStack* stack);
+
+  // Processes the instruction at the given bci in the method. Returns
+  // the size of the instruction.
+  int do_instruction(int bci);
+
+  bool print_NPE_cause0(outputStream *os, int bci, int slot, int max_detail,
+                        bool inner_expr = false, const char *prefix = NULL);
+
+ public:
+
+  // Creates an ExceptionMessageBuilder object and runs the analysis
+  // building SimulatedOperandStacks for each bytecode in the given
+  // method (the method must be rewritten already). Note that you're
+  // not allowed to use this object when crossing a safepoint! If the
+  // bci is != -1, we only create the stacks as far as needed to get a
+  // stack for the bci.
+  ExceptionMessageBuilder(Method* method, int bci = -1);
+
+  // Releases the resources.
+  ~ExceptionMessageBuilder();
+
+  // Returns the number of stacks (this is the size of the method).
+  int get_size() { return _stacks->length() - 1; }
+
+  // Assuming that a NullPointerException was thrown at the given bci,
+  // we return the nr of the slot holding the null reference. If this
+  // NPE is created by hand, we return -2 as the slot. If there
+  // cannot be a NullPointerException at the bci, -1 is returned.
+  int get_NPE_null_slot(int bci);
+
+  // Prints a java-like expression for the bytecode that pushed
+  // the value to the given slot being live at the given bci.
+  // It constructs the expression by recursing backwards over the
+  // bytecode using the results of the analysis done in the
+  // constructor of ExceptionMessageBuilder.
+  //  os:   The stream to print the message to.
+  //  bci:  The index of the bytecode that caused the NPE.
+  //  slot: The slot on the operand stack that contains null.
+  //        The slots are numbered from TOS downwards, i.e.,
+  //        TOS has the slot number 0, that below 1 and so on.
+  //
+  // Returns false if nothing was printed, else true.
+  bool print_NPE_cause(outputStream *os, int bci, int slot);
+
+  // Prints a string describing the failed action.
+  void print_NPE_failed_action(outputStream *os, int bci);
+};
+
+// Replaces the following well-known class names:
+//   java.lang.Object -> Object
+//   java.lang.String -> String
+static char *trim_well_known_class_names_from_signature(char *signature) {
+  size_t len = strlen(signature);
+  size_t skip_len = strlen("java.lang.");
+  size_t min_pattern_len = strlen("java.lang.String");
+  if (len < min_pattern_len) return signature;
+
+  for (size_t isrc = 0, idst = 0; isrc <= len; isrc++, idst++) {
+    // We must be careful not to trim names like test.java.lang.String.
+    if ((isrc == 0 && strncmp(signature + isrc, "java.lang.Object", min_pattern_len) == 0) ||
+        (isrc == 0 && strncmp(signature + isrc, "java.lang.String", min_pattern_len) == 0) ||
+        (isrc > 1  && strncmp(signature + isrc-2, ", java.lang.Object", min_pattern_len+2) == 0) ||
+        (isrc > 1  && strncmp(signature + isrc-2, ", java.lang.String", min_pattern_len+2) == 0)   ) {
+      isrc += skip_len;
+    }
+    if (idst != isrc) {
+      signature[idst] = signature[isrc];
+    }
+  }
+  return signature;
+}
+
+// Replaces the following well-known class names:
+//   java.lang.Object -> Object
+//   java.lang.String -> String
+static void print_klass_name(outputStream *os, Symbol *klass) {
+  const char *name = klass->as_klass_external_name();
+  if (strcmp(name, "java.lang.Object") == 0) name = "Object";
+  if (strcmp(name, "java.lang.String") == 0) name = "String";
+  os->print("%s", name);
+}
+
+// Prints the name of the method that is described at constant pool
+// index cp_index in the constant pool of method 'method'.
+static void print_method_name(outputStream *os, Method* method, int cp_index) {
+  ResourceMark rm;
+  ConstantPool* cp  = method->constants();
+  Symbol* klass     = cp->klass_ref_at_noresolve(cp_index);
+  Symbol* name      = cp->name_ref_at(cp_index);
+  Symbol* signature = cp->signature_ref_at(cp_index);
+
+  print_klass_name(os, klass);
+  os->print(".%s(", name->as_C_string());
+  stringStream sig;
+  signature->print_as_signature_external_parameters(&sig);
+  os->print("%s)", trim_well_known_class_names_from_signature(sig.as_string()));
+}
+
+// Prints the name of the field that is described at constant pool
+// index cp_index in the constant pool of method 'method'.
+static void print_field_and_class(outputStream *os, Method* method, int cp_index) {
+  ResourceMark rm;
+  ConstantPool* cp = method->constants();
+  Symbol* klass    = cp->klass_ref_at_noresolve(cp_index);
+  Symbol *name     = cp->name_ref_at(cp_index);
+  print_klass_name(os, klass);
+  os->print(".%s", name->as_C_string());
+}
+
+// Returns the name of the field that is described at constant pool
+// index cp_index in the constant pool of method 'method'.
+static char const* get_field_name(Method* method, int cp_index) {
+  Symbol* name = method->constants()->name_ref_at(cp_index);
+  return name->as_C_string();
+}
+
+static void print_local_var(outputStream *os, unsigned int bci, Method* method, int slot, bool is_parameter) {
+  if (method->has_localvariable_table()) {
+    for (int i = 0; i < method->localvariable_table_length(); i++) {
+      LocalVariableTableElement* elem = method->localvariable_table_start() + i;
+      unsigned int start = elem->start_bci;
+      unsigned int end = start + elem->length;
+
+      if ((bci >= start) && (bci < end) && (elem->slot == slot)) {
+        ConstantPool* cp = method->constants();
+        char *var =  cp->symbol_at(elem->name_cp_index)->as_C_string();
+        os->print("%s", var);
+
+        return;
+      }
+    }
+  }
+
+  // Handle at least some cases we know.
+  if (!method->is_static() && (slot == 0) && is_parameter) {
+    os->print("this");
+  } else {
+    int curr = method->is_static() ? 0 : 1;
+    SignatureStream ss(method->signature());
+    int param_index = 1;
+    bool found = false;
+
+    for (SignatureStream ss(method->signature()); !ss.is_done(); ss.next()) {
+      if (ss.at_return_type()) {
+        continue;
+      }
+      int size = type2size[ss.type()];
+      if ((slot >= curr) && (slot < curr + size)) {
+        found = true;
+        break;
+      }
+      param_index += 1;
+      curr += size;
+    }
+
+    if (found && is_parameter) {
+      os->print("<parameter%d>", param_index);
+    } else {
+      // This is the best we can do.
+      os->print("<local%d>", slot);
+    }
+  }
+}
+
+StackSlotAnalysisData::StackSlotAnalysisData(BasicType type) : _bci(INVALID), _type(type) {}
+
+StackSlotAnalysisData::StackSlotAnalysisData(int bci, BasicType type) : _bci(bci), _type(type) {
+  assert(bci >= 0, "BCI must be >= 0");
+  assert(bci < 65536, "BCI must be < 65536");
+}
+
+unsigned int StackSlotAnalysisData::get_bci() {
+  return _bci;
+}
+
+BasicType StackSlotAnalysisData::get_type() {
+  return (BasicType)_type;
+}
+
+StackSlotAnalysisData StackSlotAnalysisData::merge(StackSlotAnalysisData other) {
+  if (get_type() != other.get_type()) {
+    if (((get_type() == T_OBJECT) || (get_type() == T_ARRAY)) &&
+        ((other.get_type() == T_OBJECT) || (other.get_type() == T_ARRAY))) {
+      if (get_bci() == other.get_bci()) {
+        return StackSlotAnalysisData(get_bci(), T_OBJECT);
+      } else {
+        return StackSlotAnalysisData(T_OBJECT);
+      }
+    } else {
+      return StackSlotAnalysisData(T_CONFLICT);
+    }
+  }
+
+  if (get_bci() == other.get_bci()) {
+    return *this;
+  } else {
+    return StackSlotAnalysisData(get_type());
+  }
+}
+
+SimulatedOperandStack::SimulatedOperandStack(const SimulatedOperandStack &copy) {
+  for (int i = 0; i < copy.get_size(); i++) {
+    push_raw(copy._stack.at(i));
+  }
+  _written_local_slots = copy._written_local_slots;
+}
+
+void SimulatedOperandStack::push_raw(StackSlotAnalysisData slotData) {
+  if (slotData.get_type() == T_VOID) {
+    return;
+  }
+
+  _stack.push(slotData);
+}
+
+void SimulatedOperandStack::push(StackSlotAnalysisData slotData) {
+  if (type2size[slotData.get_type()] == 2) {
+    push_raw(slotData);
+    push_raw(slotData);
+  } else {
+    push_raw(slotData);
+  }
+}
+
+void SimulatedOperandStack::push(int bci, BasicType type) {
+  push(StackSlotAnalysisData(bci, type));
+}
+
+void SimulatedOperandStack::pop(int slots) {
+  for (int i = 0; i < slots; ++i) {
+    _stack.pop();
+  }
+
+  assert(get_size() >= 0, "Popped too many slots");
+}
+
+void SimulatedOperandStack::merge(SimulatedOperandStack const& other) {
+  assert(get_size() == other.get_size(), "Stacks not of same size");
+
+  for (int i = get_size() - 1; i >= 0; --i) {
+    _stack.at_put(i, _stack.at(i).merge(other._stack.at(i)));
+  }
+  _written_local_slots = _written_local_slots | other._written_local_slots;
+}
+
+int SimulatedOperandStack::get_size() const {
+  return _stack.length();
+}
+
+StackSlotAnalysisData SimulatedOperandStack::get_slot_data(int slot) {
+  assert(slot >= 0, "Slot=%d < 0", slot);
+  assert(slot < get_size(), "Slot=%d >= size=%d", slot, get_size());
+
+  return _stack.at(get_size() - slot - 1);
+}
+
+void SimulatedOperandStack::set_local_slot_written(int i) {
+  // Local slots > 63 are very unlikely. Consider these
+  // as written all the time. Saves space and complexity
+  // for dynamic data size.
+  if (i > 63) return;
+  _written_local_slots = _written_local_slots | (1ULL << i);
+}
+
+bool SimulatedOperandStack::local_slot_was_written(int i) {
+  if (i > 63) return true;
+  return (_written_local_slots & (1ULL << i)) != 0;
+}
+
+ExceptionMessageBuilder::ExceptionMessageBuilder(Method* method, int bci) :
+                    _method(method), _nr_of_entries(0),
+                    _added_one(true), _all_processed(false) {
+
+  ConstMethod* const_method = method->constMethod();
+  const int len = const_method->code_size();
+
+  assert(bci >= 0, "BCI too low: %d", bci);
+  assert(bci < len, "BCI too large: %d size: %d", bci, len);
+
+  _stacks = new GrowableArray<SimulatedOperandStack*> (len + 1);
+
+  for (int i = 0; i <= len; ++i) {
+    _stacks->push(NULL);
+  }
+
+  // Initialize stack a bci 0.
+  _stacks->at_put(0, new SimulatedOperandStack());
+
+  // And initialize the start of all exception handlers.
+  if (const_method->has_exception_handler()) {
+    ExceptionTableElement *et = const_method->exception_table_start();
+    for (int i = 0; i < const_method->exception_table_length(); ++i) {
+      u2 index = et[i].handler_pc;
+
+      if (_stacks->at(index) == NULL) {
+        _stacks->at_put(index, new SimulatedOperandStack());
+        _stacks->at(index)->push(index, T_OBJECT);
+      }
+    }
+  }
+
+  // Do this until each bytecode has a stack or we haven't
+  // added a new stack in one iteration.
+  while (!_all_processed && _added_one) {
+    _all_processed = true;
+    _added_one = false;
+
+    for (int i = 0; i < len; ) {
+      // Analyse bytecode i. Step by size of the analyzed bytecode to next bytecode.
+      i += do_instruction(i);
+
+      // If we want the data only for a certain bci, we can possibly end early.
+      if ((bci == i) && (_stacks->at(i) != NULL)) {
+        _all_processed = true;
+        break;
+      }
+
+      if (_nr_of_entries > _max_entries) {
+        return;
+      }
+    }
+  }
+}
+
+ExceptionMessageBuilder::~ExceptionMessageBuilder() {
+  if (_stacks != NULL) {
+    for (int i = 0; i < _stacks->length(); ++i) {
+      delete _stacks->at(i);
+    }
+  }
+}
+
+void ExceptionMessageBuilder::merge(int bci, SimulatedOperandStack* stack) {
+  assert(stack != _stacks->at(bci), "Cannot merge itself");
+
+  if (_stacks->at(bci) != NULL) {
+    stack->merge(*_stacks->at(bci));
+  } else {
+    // Got a new stack, so count the entries.
+    _nr_of_entries += stack->get_size();
+  }
+
+  // Replace the stack at this bci with a copy of our new merged stack.
+  delete _stacks->at(bci);
+  _stacks->at_put(bci, new SimulatedOperandStack(*stack));
+}
+
+int ExceptionMessageBuilder::do_instruction(int bci) {
+  ConstMethod* const_method = _method->constMethod();
+  address code_base = _method->constMethod()->code_base();
+
+  // We use the java code, since we don't want to cope with all the fast variants.
+  int len = Bytecodes::java_length_at(_method, code_base + bci);
+
+  // If we have no stack for this bci, we cannot process the bytecode now.
+  if (_stacks->at(bci) == NULL) {
+    _all_processed = false;
+    return len;
+  }
+
+  // Make a local copy of the stack for this bci to work on.
+  SimulatedOperandStack* stack = new SimulatedOperandStack(*_stacks->at(bci));
+
+  // dest_bci is != -1 if we branch.
+  int dest_bci = -1;
+
+  // This is for table and lookup switch.
+  static const int initial_length = 2;
+  GrowableArray<int> dests(initial_length);
+
+  bool flow_ended = false;
+
+  // Get the bytecode.
+  bool is_wide = false;
+  Bytecodes::Code raw_code = Bytecodes::code_at(_method, code_base + bci);
+  Bytecodes::Code code = Bytecodes::java_code_at(_method, code_base + bci);
+  int pos = bci + 1;
+
+  if (code == Bytecodes::_wide) {
+    is_wide = true;
+    code = Bytecodes::java_code_at(_method, code_base + bci + 1);
+    pos += 1;
+  }
+
+  // Now simulate the action of each bytecode.
+  switch (code) {
+    case Bytecodes::_nop:
+    case Bytecodes::_aconst_null:
+    case Bytecodes::_iconst_m1:
+    case Bytecodes::_iconst_0:
+    case Bytecodes::_iconst_1:
+    case Bytecodes::_iconst_2:
+    case Bytecodes::_iconst_3:
+    case Bytecodes::_iconst_4:
+    case Bytecodes::_iconst_5:
+    case Bytecodes::_lconst_0:
+    case Bytecodes::_lconst_1:
+    case Bytecodes::_fconst_0:
+    case Bytecodes::_fconst_1:
+    case Bytecodes::_fconst_2:
+    case Bytecodes::_dconst_0:
+    case Bytecodes::_dconst_1:
+    case Bytecodes::_bipush:
+    case Bytecodes::_sipush:
+    case Bytecodes::_iload:
+    case Bytecodes::_lload:
+    case Bytecodes::_fload:
+    case Bytecodes::_dload:
+    case Bytecodes::_aload:
+    case Bytecodes::_iload_0:
+    case Bytecodes::_iload_1:
+    case Bytecodes::_iload_2:
+    case Bytecodes::_iload_3:
+    case Bytecodes::_lload_0:
+    case Bytecodes::_lload_1:
+    case Bytecodes::_lload_2:
+    case Bytecodes::_lload_3:
+    case Bytecodes::_fload_0:
+    case Bytecodes::_fload_1:
+    case Bytecodes::_fload_2:
+    case Bytecodes::_fload_3:
+    case Bytecodes::_dload_0:
+    case Bytecodes::_dload_1:
+    case Bytecodes::_dload_2:
+    case Bytecodes::_dload_3:
+    case Bytecodes::_aload_0:
+    case Bytecodes::_aload_1:
+    case Bytecodes::_aload_2:
+    case Bytecodes::_aload_3:
+    case Bytecodes::_iinc:
+    case Bytecodes::_new:
+      stack->push(bci, Bytecodes::result_type(code));
+      break;
+
+    case Bytecodes::_ldc:
+    case Bytecodes::_ldc_w:
+    case Bytecodes::_ldc2_w: {
+      int cp_index;
+      ConstantPool* cp = _method->constants();
+
+      if (code == Bytecodes::_ldc) {
+        cp_index = *(uint8_t*) (code_base + pos);
+
+        if (raw_code == Bytecodes::_fast_aldc) {
+          cp_index = cp->object_to_cp_index(cp_index);
+        }
+      } else {
+        if (raw_code == Bytecodes::_fast_aldc_w) {
+          cp_index = Bytes::get_native_u2(code_base + pos);
+          cp_index = cp->object_to_cp_index(cp_index);
+        }
+        else {
+          cp_index = Bytes::get_Java_u2(code_base + pos);
+        }
+      }
+
+      constantTag tag = cp->tag_at(cp_index);
+      if (tag.is_klass()  || tag.is_unresolved_klass() ||
+          tag.is_method() || tag.is_interface_method() ||
+          tag.is_field()  || tag.is_string()) {
+        stack->push(bci, T_OBJECT);
+      } else if (tag.is_int()) {
+        stack->push(bci, T_INT);
+      } else if (tag.is_long()) {
+        stack->push(bci, T_LONG);
+      } else if (tag.is_float()) {
+        stack->push(bci, T_FLOAT);
+      } else if (tag.is_double()) {
+        stack->push(bci, T_DOUBLE);
+      } else {
+        assert(false, "Unexpected tag");
+      }
+      break;
+    }
+
+    case Bytecodes::_iaload:
+    case Bytecodes::_faload:
+    case Bytecodes::_aaload:
+    case Bytecodes::_baload:
+    case Bytecodes::_caload:
+    case Bytecodes::_saload:
+    case Bytecodes::_laload:
+    case Bytecodes::_daload:
+      stack->pop(2);
+      stack->push(bci, Bytecodes::result_type(code));
+      break;
+
+    case Bytecodes::_istore:
+    case Bytecodes::_lstore:
+    case Bytecodes::_fstore:
+    case Bytecodes::_dstore:
+    case Bytecodes::_astore:
+      int index;
+      if (is_wide) {
+        index = Bytes::get_Java_u2(code_base + bci + 2);
+      } else {
+        index = *(uint8_t*) (code_base + bci + 1);
+      }
+      stack->set_local_slot_written(index);
+      stack->pop(-Bytecodes::depth(code));
+      break;
+    case Bytecodes::_istore_0:
+    case Bytecodes::_lstore_0:
+    case Bytecodes::_fstore_0:
+    case Bytecodes::_dstore_0:
+    case Bytecodes::_astore_0:
+      stack->set_local_slot_written(0);
+      stack->pop(-Bytecodes::depth(code));
+      break;
+    case Bytecodes::_istore_1:
+    case Bytecodes::_fstore_1:
+    case Bytecodes::_lstore_1:
+    case Bytecodes::_dstore_1:
+    case Bytecodes::_astore_1:
+      stack->set_local_slot_written(1);
+      stack->pop(-Bytecodes::depth(code));
+      break;
+    case Bytecodes::_istore_2:
+    case Bytecodes::_lstore_2:
+    case Bytecodes::_fstore_2:
+    case Bytecodes::_dstore_2:
+    case Bytecodes::_astore_2:
+      stack->set_local_slot_written(2);
+      stack->pop(-Bytecodes::depth(code));
+      break;
+    case Bytecodes::_istore_3:
+    case Bytecodes::_lstore_3:
+    case Bytecodes::_fstore_3:
+    case Bytecodes::_dstore_3:
+    case Bytecodes::_astore_3:
+      stack->set_local_slot_written(3);
+      stack->pop(-Bytecodes::depth(code));
+      break;
+    case Bytecodes::_iastore:
+    case Bytecodes::_lastore:
+    case Bytecodes::_fastore:
+    case Bytecodes::_dastore:
+    case Bytecodes::_aastore:
+    case Bytecodes::_bastore:
+    case Bytecodes::_castore:
+    case Bytecodes::_sastore:
+    case Bytecodes::_pop:
+    case Bytecodes::_pop2:
+    case Bytecodes::_monitorenter:
+    case Bytecodes::_monitorexit:
+    case Bytecodes::_breakpoint:
+      stack->pop(-Bytecodes::depth(code));
+      break;
+
+    case Bytecodes::_dup:
+      stack->push_raw(stack->get_slot_data(0));
+      break;
+
+    case Bytecodes::_dup_x1: {
+      StackSlotAnalysisData top1 = stack->get_slot_data(0);
+      StackSlotAnalysisData top2 = stack->get_slot_data(1);
+      stack->pop(2);
+      stack->push_raw(top1);
+      stack->push_raw(top2);
+      stack->push_raw(top1);
+      break;
+    }
+
+    case Bytecodes::_dup_x2: {
+      StackSlotAnalysisData top1 = stack->get_slot_data(0);
+      StackSlotAnalysisData top2 = stack->get_slot_data(1);
+      StackSlotAnalysisData top3 = stack->get_slot_data(2);
+      stack->pop(3);
+      stack->push_raw(top1);
+      stack->push_raw(top3);
+      stack->push_raw(top2);
+      stack->push_raw(top1);
+      break;
+    }
+
+    case Bytecodes::_dup2:
+      stack->push_raw(stack->get_slot_data(1));
+      // The former '0' entry is now at '1'.
+      stack->push_raw(stack->get_slot_data(1));
+      break;
+
+    case Bytecodes::_dup2_x1: {
+      StackSlotAnalysisData top1 = stack->get_slot_data(0);
+      StackSlotAnalysisData top2 = stack->get_slot_data(1);
+      StackSlotAnalysisData top3 = stack->get_slot_data(2);
+      stack->pop(3);
+      stack->push_raw(top2);
+      stack->push_raw(top1);
+      stack->push_raw(top3);
+      stack->push_raw(top2);
+      stack->push_raw(top1);
+      break;
+    }
+
+    case Bytecodes::_dup2_x2: {
+      StackSlotAnalysisData top1 = stack->get_slot_data(0);
+      StackSlotAnalysisData top2 = stack->get_slot_data(1);
+      StackSlotAnalysisData top3 = stack->get_slot_data(2);
+      StackSlotAnalysisData top4 = stack->get_slot_data(3);
+      stack->pop(4);
+      stack->push_raw(top2);
+      stack->push_raw(top1);
+      stack->push_raw(top4);
+      stack->push_raw(top3);
+      stack->push_raw(top2);
+      stack->push_raw(top1);
+      break;
+    }
+
+    case Bytecodes::_swap: {
+      StackSlotAnalysisData top1 = stack->get_slot_data(0);
+      StackSlotAnalysisData top2 = stack->get_slot_data(1);
+      stack->pop(2);
+      stack->push(top1);
+      stack->push(top2);
+      break;
+    }
+
+    case Bytecodes::_iadd:
+    case Bytecodes::_ladd:
+    case Bytecodes::_fadd:
+    case Bytecodes::_dadd:
+    case Bytecodes::_isub:
+    case Bytecodes::_lsub:
+    case Bytecodes::_fsub:
+    case Bytecodes::_dsub:
+    case Bytecodes::_imul:
+    case Bytecodes::_lmul:
+    case Bytecodes::_fmul:
+    case Bytecodes::_dmul:
+    case Bytecodes::_idiv:
+    case Bytecodes::_ldiv:
+    case Bytecodes::_fdiv:
+    case Bytecodes::_ddiv:
+    case Bytecodes::_irem:
+    case Bytecodes::_lrem:
+    case Bytecodes::_frem:
+    case Bytecodes::_drem:
+    case Bytecodes::_iand:
+    case Bytecodes::_land:
+    case Bytecodes::_ior:
+    case Bytecodes::_lor:
+    case Bytecodes::_ixor:
+    case Bytecodes::_lxor:
+      stack->pop(2 * type2size[Bytecodes::result_type(code)]);
+      stack->push(bci, Bytecodes::result_type(code));
+      break;
+
+    case Bytecodes::_ineg:
+    case Bytecodes::_lneg:
+    case Bytecodes::_fneg:
+    case Bytecodes::_dneg:
+      stack->pop(type2size[Bytecodes::result_type(code)]);
+      stack->push(bci, Bytecodes::result_type(code));
+      break;
+
+    case Bytecodes::_ishl:
+    case Bytecodes::_lshl:
+    case Bytecodes::_ishr:
+    case Bytecodes::_lshr:
+    case Bytecodes::_iushr:
+    case Bytecodes::_lushr:
+      stack->pop(1 + type2size[Bytecodes::result_type(code)]);
+      stack->push(bci, Bytecodes::result_type(code));
+      break;
+
+    case Bytecodes::_i2l:
+    case Bytecodes::_i2f:
+    case Bytecodes::_i2d:
+    case Bytecodes::_f2i:
+    case Bytecodes::_f2l:
+    case Bytecodes::_f2d:
+    case Bytecodes::_i2b:
+    case Bytecodes::_i2c:
+    case Bytecodes::_i2s:
+      stack->pop(1);
+      stack->push(bci, Bytecodes::result_type(code));
+      break;
+
+    case Bytecodes::_l2i:
+    case Bytecodes::_l2f:
+    case Bytecodes::_l2d:
+    case Bytecodes::_d2i:
+    case Bytecodes::_d2l:
+    case Bytecodes::_d2f:
+      stack->pop(2);
+      stack->push(bci, Bytecodes::result_type(code));
+      break;
+
+    case Bytecodes::_lcmp:
+    case Bytecodes::_fcmpl:
+    case Bytecodes::_fcmpg:
+    case Bytecodes::_dcmpl:
+    case Bytecodes::_dcmpg:
+      stack->pop(1 - Bytecodes::depth(code));
+      stack->push(bci, T_INT);
+      break;
+
+    case Bytecodes::_ifeq:
+    case Bytecodes::_ifne:
+    case Bytecodes::_iflt:
+    case Bytecodes::_ifge:
+    case Bytecodes::_ifgt:
+    case Bytecodes::_ifle:
+    case Bytecodes::_if_icmpeq:
+    case Bytecodes::_if_icmpne:
+    case Bytecodes::_if_icmplt:
+    case Bytecodes::_if_icmpge:
+    case Bytecodes::_if_icmpgt:
+    case Bytecodes::_if_icmple:
+    case Bytecodes::_if_acmpeq:
+    case Bytecodes::_if_acmpne:
+    case Bytecodes::_ifnull:
+    case Bytecodes::_ifnonnull:
+      stack->pop(-Bytecodes::depth(code));
+      dest_bci = bci + (int16_t) Bytes::get_Java_u2(code_base + pos);
+      break;
+
+    case Bytecodes::_jsr:
+      // NOTE: Bytecodes has wrong depth for jsr.
+      stack->push(bci, T_ADDRESS);
+      dest_bci = bci + (int16_t) Bytes::get_Java_u2(code_base + pos);
+      flow_ended = true;
+      break;
+
+    case Bytecodes::_jsr_w: {
+      // NOTE: Bytecodes has wrong depth for jsr.
+      stack->push(bci, T_ADDRESS);
+      dest_bci = bci + (int32_t) Bytes::get_Java_u4(code_base + pos);
+      flow_ended = true;
+      break;
+    }
+
+    case Bytecodes::_ret:
+      // We don't track local variables, so we cannot know were we
+      // return. This makes the stacks imprecise, but we have to
+      // live with that.
+      flow_ended = true;
+      break;
+
+    case Bytecodes::_tableswitch: {
+      stack->pop(1);
+      pos = (pos + 3) & ~3;
+      dest_bci = bci + (int32_t) Bytes::get_Java_u4(code_base + pos);
+      int low = (int32_t) Bytes::get_Java_u4(code_base + pos + 4);
+      int high = (int32_t) Bytes::get_Java_u4(code_base + pos + 8);
+
+      for (int64_t i = low; i <= high; ++i) {
+        dests.push(bci + (int32_t) Bytes::get_Java_u4(code_base + pos + 12 + 4 * (i - low)));
+      }
+
+      break;
+    }
+
+    case Bytecodes::_lookupswitch: {
+      stack->pop(1);
+      pos = (pos + 3) & ~3;
+      dest_bci = bci + (int32_t) Bytes::get_Java_u4(code_base + pos);
+      int nr_of_dests = (int32_t) Bytes::get_Java_u4(code_base + pos + 4);
+
+      for (int i = 0; i < nr_of_dests; ++i) {
+        dests.push(bci + (int32_t) Bytes::get_Java_u4(code_base + pos + 12 + 8 * i));
+      }
+
+      break;
+    }
+
+    case Bytecodes::_ireturn:
+    case Bytecodes::_lreturn:
+    case Bytecodes::_freturn:
+    case Bytecodes::_dreturn:
+    case Bytecodes::_areturn:
+    case Bytecodes::_return:
+    case Bytecodes::_athrow:
+      stack->pop(-Bytecodes::depth(code));
+      flow_ended = true;
+      break;
+
+    case Bytecodes::_getstatic:
+    case Bytecodes::_getfield: {
+      // Find out the type of the field accessed.
+      int cp_index = Bytes::get_native_u2(code_base + pos) DEBUG_ONLY(+ ConstantPool::CPCACHE_INDEX_TAG);
+      ConstantPool* cp = _method->constants();
+      int name_and_type_index = cp->name_and_type_ref_index_at(cp_index);
+      int type_index = cp->signature_ref_index_at(name_and_type_index);
+      Symbol* signature = cp->symbol_at(type_index);
+      // Simulate the bytecode: pop the address, push the 'value' loaded
+      // from the field.
+      stack->pop(1 - Bytecodes::depth(code));
+      stack->push(bci, char2type((char) signature->char_at(0)));
+      break;
+    }
+
+    case Bytecodes::_putstatic:
+    case Bytecodes::_putfield: {
+      int cp_index = Bytes::get_native_u2(code_base + pos) DEBUG_ONLY(+ ConstantPool::CPCACHE_INDEX_TAG);
+      ConstantPool* cp = _method->constants();
+      int name_and_type_index = cp->name_and_type_ref_index_at(cp_index);
+      int type_index = cp->signature_ref_index_at(name_and_type_index);
+      Symbol* signature = cp->symbol_at(type_index);
+      ResultTypeFinder result_type(signature);
+      stack->pop(type2size[char2type((char) signature->char_at(0))] - Bytecodes::depth(code) - 1);
+      break;
+    }
+
+    case Bytecodes::_invokevirtual:
+    case Bytecodes::_invokespecial:
+    case Bytecodes::_invokestatic:
+    case Bytecodes::_invokeinterface:
+    case Bytecodes::_invokedynamic: {
+      ConstantPool* cp = _method->constants();
+      int cp_index;
+
+      if (code == Bytecodes::_invokedynamic) {
+        cp_index = ((int) Bytes::get_native_u4(code_base + pos));
+      } else {
+        cp_index = Bytes::get_native_u2(code_base + pos) DEBUG_ONLY(+ ConstantPool::CPCACHE_INDEX_TAG);
+      }
+
+      int name_and_type_index = cp->name_and_type_ref_index_at(cp_index);
+      int type_index = cp->signature_ref_index_at(name_and_type_index);
+      Symbol* signature = cp->symbol_at(type_index);
+
+      if ((code != Bytecodes::_invokestatic) && (code != Bytecodes::_invokedynamic)) {
+        // Pop receiver.
+        stack->pop(1);
+      }
+
+      stack->pop(ArgumentSizeComputer(signature).size());
+      ResultTypeFinder result_type(signature);
+      stack->push(bci, result_type.type());
+      break;
+    }
+
+    case Bytecodes::_newarray:
+    case Bytecodes::_anewarray:
+    case Bytecodes::_instanceof:
+      stack->pop(1);
+      stack->push(bci, Bytecodes::result_type(code));
+      break;
+
+    case Bytecodes::_arraylength:
+      // The return type of arraylength is wrong in the bytecodes table (T_VOID).
+      stack->pop(1);
+      stack->push(bci, T_INT);
+      break;
+
+    case Bytecodes::_checkcast:
+      break;
+
+    case Bytecodes::_multianewarray:
+      stack->pop(*(uint8_t*) (code_base + pos + 2));
+      stack->push(bci, T_OBJECT);
+      break;
+
+   case Bytecodes::_goto:
+      stack->pop(-Bytecodes::depth(code));
+      dest_bci = bci + (int16_t) Bytes::get_Java_u2(code_base + pos);
+      flow_ended = true;
+      break;
+
+
+   case Bytecodes::_goto_w:
+      stack->pop(-Bytecodes::depth(code));
+      dest_bci = bci + (int32_t) Bytes::get_Java_u4(code_base + pos);
+      flow_ended = true;
+      break;
+
+    default:
+      // Allow at least the bcis which have stack info to work.
+      _all_processed = false;
+      _added_one = false;
+      delete stack;
+
+      return len;
+  }
+
+  // Put new stack to the next instruction, if we might reach it from
+  // this bci.
+  if (!flow_ended) {
+    if (_stacks->at(bci + len) == NULL) {
+      _added_one = true;
+    }
+    merge(bci + len, stack);
+  }
+
+  // Put the stack to the branch target too.
+  if (dest_bci != -1) {
+    if (_stacks->at(dest_bci) == NULL) {
+      _added_one = true;
+    }
+    merge(dest_bci, stack);
+  }
+
+  // If we have more than one branch target, process these too.
+  for (int64_t i = 0; i < dests.length(); ++i) {
+    if (_stacks->at(dests.at(i)) == NULL) {
+      _added_one = true;
+    }
+    merge(dests.at(i), stack);
+  }
+
+  delete stack;
+
+  return len;
+}
+
+#define INVALID_BYTECODE_ENCOUNTERED -1
+#define NPE_EXPLICIT_CONSTRUCTED -2
+int ExceptionMessageBuilder::get_NPE_null_slot(int bci) {
+  // Get the bytecode.
+  address code_base = _method->constMethod()->code_base();
+  Bytecodes::Code code = Bytecodes::java_code_at(_method, code_base + bci);
+  int pos = bci + 1;  // Position of argument of the bytecode.
+  if (code == Bytecodes::_wide) {
+    code = Bytecodes::java_code_at(_method, code_base + bci + 1);
+    pos += 1;
+  }
+
+  switch (code) {
+    case Bytecodes::_getfield:
+    case Bytecodes::_arraylength:
+    case Bytecodes::_athrow:
+    case Bytecodes::_monitorenter:
+    case Bytecodes::_monitorexit:
+      return 0;
+    case Bytecodes::_iaload:
+    case Bytecodes::_faload:
+    case Bytecodes::_aaload:
+    case Bytecodes::_baload:
+    case Bytecodes::_caload:
+    case Bytecodes::_saload:
+    case Bytecodes::_laload:
+    case Bytecodes::_daload:
+      return 1;
+    case Bytecodes::_iastore:
+    case Bytecodes::_fastore:
+    case Bytecodes::_aastore:
+    case Bytecodes::_bastore:
+    case Bytecodes::_castore:
+    case Bytecodes::_sastore:
+      return 2;
+    case Bytecodes::_lastore:
+    case Bytecodes::_dastore:
+      return 3;
+    case Bytecodes::_putfield: {
+        int cp_index = Bytes::get_native_u2(code_base + pos) DEBUG_ONLY(+ ConstantPool::CPCACHE_INDEX_TAG);
+        ConstantPool* cp = _method->constants();
+        int name_and_type_index = cp->name_and_type_ref_index_at(cp_index);
+        int type_index = cp->signature_ref_index_at(name_and_type_index);
+        Symbol* signature = cp->symbol_at(type_index);
+        return type2size[char2type((char) signature->char_at(0))];
+      }
+    case Bytecodes::_invokevirtual:
+    case Bytecodes::_invokespecial:
+    case Bytecodes::_invokeinterface: {
+        int cp_index = Bytes::get_native_u2(code_base+ pos) DEBUG_ONLY(+ ConstantPool::CPCACHE_INDEX_TAG);
+        ConstantPool* cp = _method->constants();
+        int name_and_type_index = cp->name_and_type_ref_index_at(cp_index);
+        int name_index = cp->name_ref_index_at(name_and_type_index);
+        Symbol* name = cp->symbol_at(name_index);
+
+        // Assume the the call of a constructor can never cause a NullPointerException
+        // (which is true in Java). This is mainly used to avoid generating wrong
+        // messages for NullPointerExceptions created explicitly by new in Java code.
+        if (name != vmSymbols::object_initializer_name()) {
+          int     type_index = cp->signature_ref_index_at(name_and_type_index);
+          Symbol* signature  = cp->symbol_at(type_index);
+          // The 'this' parameter was null. Return the slot of it.
+          return ArgumentSizeComputer(signature).size();
+        } else {
+          return NPE_EXPLICIT_CONSTRUCTED;
+        }
+      }
+
+    default:
+      break;
+  }
+
+  return INVALID_BYTECODE_ENCOUNTERED;
+}
+
+bool ExceptionMessageBuilder::print_NPE_cause(outputStream* os, int bci, int slot) {
+  if (print_NPE_cause0(os, bci, slot, _max_cause_detail, false, " because \"")) {
+    os->print("\" is null");
+    return true;
+  }
+  return false;
+}
+
+// Recursively print what was null.
+//
+// Go to the bytecode that pushed slot 'slot' on the operand stack
+// at bytecode 'bci'. Compute a message for that bytecode. If
+// necessary (array, field), recur further.
+// At most do max_detail recursions.
+// Prefix is used to print a proper beginning of the whole
+// sentence.
+// inner_expr is used to omit some text, like 'static' in
+// inner expressions like array subscripts.
+//
+// Returns true if something was printed.
+//
+bool ExceptionMessageBuilder::print_NPE_cause0(outputStream* os, int bci, int slot,
+                                               int max_detail,
+                                               bool inner_expr, const char *prefix) {
+  assert(bci >= 0, "BCI too low");
+  assert(bci < get_size(), "BCI too large");
+
+  if (max_detail <= 0) {
+    return false;
+  }
+
+  if (_stacks->at(bci) == NULL) {
+    return false;
+  }
+
+  SimulatedOperandStack* stack = _stacks->at(bci);
+  assert(slot >= 0, "Slot nr. too low");
+  assert(slot < stack->get_size(), "Slot nr. too large");
+
+  StackSlotAnalysisData slotData = stack->get_slot_data(slot);
+
+  if (!slotData.has_bci()) {
+    return false;
+  }
+
+  // Get the bytecode.
+  unsigned int source_bci = slotData.get_bci();
+  address code_base = _method->constMethod()->code_base();
+  Bytecodes::Code code = Bytecodes::java_code_at(_method, code_base + source_bci);
+  bool is_wide = false;
+  int pos = source_bci + 1;
+
+  if (code == Bytecodes::_wide) {
+    is_wide = true;
+    code = Bytecodes::java_code_at(_method, code_base + source_bci + 1);
+    pos += 1;
+  }
+
+  if (max_detail == _max_cause_detail &&
+      prefix != NULL &&
+      code != Bytecodes::_invokevirtual &&
+      code != Bytecodes::_invokespecial &&
+      code != Bytecodes::_invokestatic &&
+      code != Bytecodes::_invokeinterface) {
+    os->print("%s", prefix);
+  }
+
+  switch (code) {
+    case Bytecodes::_iload_0:
+    case Bytecodes::_aload_0:
+      print_local_var(os, source_bci, _method, 0, !stack->local_slot_was_written(0));
+      return true;
+
+    case Bytecodes::_iload_1:
+    case Bytecodes::_aload_1:
+      print_local_var(os, source_bci, _method, 1, !stack->local_slot_was_written(1));
+      return true;
+
+    case Bytecodes::_iload_2:
+    case Bytecodes::_aload_2:
+      print_local_var(os, source_bci, _method, 2, !stack->local_slot_was_written(2));
+      return true;
+
+    case Bytecodes::_iload_3:
+    case Bytecodes::_aload_3:
+      print_local_var(os, source_bci, _method, 3, !stack->local_slot_was_written(3));
+      return true;
+
+    case Bytecodes::_iload:
+    case Bytecodes::_aload: {
+      int index;
+      if (is_wide) {
+        index = Bytes::get_Java_u2(code_base + source_bci + 2);
+      } else {
+        index = *(uint8_t*) (code_base + source_bci + 1);
+      }
+      print_local_var(os, source_bci, _method, index, !stack->local_slot_was_written(index));
+      return true;
+    }
+
+    case Bytecodes::_aconst_null:
+      os->print("null");
+      return true;
+    case Bytecodes::_iconst_m1:
+      os->print("-1");
+      return true;
+    case Bytecodes::_iconst_0:
+      os->print("0");
+      return true;
+    case Bytecodes::_iconst_1:
+      os->print("1");
+      return true;
+    case Bytecodes::_iconst_2:
+      os->print("2");
+      return true;
+    case Bytecodes::_iconst_3:
+      os->print("3");
+      return true;
+    case Bytecodes::_iconst_4:
+      os->print("4");
+      return true;
+    case Bytecodes::_iconst_5:
+      os->print("5");
+      return true;
+    case Bytecodes::_bipush: {
+      jbyte con = *(jbyte*) (code_base + source_bci + 1);
+      os->print("%d", con);
+      return true;
+    }
+    case Bytecodes::_sipush: {
+      u2 con = Bytes::get_Java_u2(code_base + source_bci + 1);
+      os->print("%d", con);
+      return true;
+    }
+   case Bytecodes::_iaload:
+   case Bytecodes::_aaload: {
+      // Print the 'name' of the array. Go back to the bytecode that
+      // pushed the array reference on the operand stack.
+     if (!print_NPE_cause0(os, source_bci, 1, max_detail - 1, inner_expr)) {
+        //  Returned false. Max recursion depth was reached. Print dummy.
+        os->print("<array>");
+      }
+      os->print("[");
+      // Print the index expression. Go back to the bytecode that
+      // pushed the index on the operand stack.
+      // inner_expr == true so we don't print unwanted strings
+      // as "The return value of'". And don't decrement max_detail so we always
+      // get a value here and only cancel out on the dereference.
+      if (!print_NPE_cause0(os, source_bci, 0, max_detail, true)) {
+        // Returned false. We don't print complex array index expressions. Print placeholder.
+        os->print("...");
+      }
+      os->print("]");
+      return true;
+    }
+
+    case Bytecodes::_getstatic: {
+      int cp_index = Bytes::get_native_u2(code_base + pos) + ConstantPool::CPCACHE_INDEX_TAG;
+      print_field_and_class(os, _method, cp_index);
+      return true;
+    }
+
+    case Bytecodes::_getfield: {
+      // Print the sender. Go back to the bytecode that
+      // pushed the sender on the operand stack.
+      if (print_NPE_cause0(os, source_bci, 0, max_detail - 1, inner_expr)) {
+        os->print(".");
+      }
+      int cp_index = Bytes::get_native_u2(code_base + pos) + ConstantPool::CPCACHE_INDEX_TAG;
+      os->print("%s", get_field_name(_method, cp_index));
+      return true;
+    }
+
+    case Bytecodes::_invokevirtual:
+    case Bytecodes::_invokespecial:
+    case Bytecodes::_invokestatic:
+    case Bytecodes::_invokeinterface: {
+      int cp_index = Bytes::get_native_u2(code_base + pos) DEBUG_ONLY(+ ConstantPool::CPCACHE_INDEX_TAG);
+      if (max_detail == _max_cause_detail && !inner_expr) {
+        os->print(" because the return value of \"");
+      }
+      print_method_name(os, _method, cp_index);
+      return true;
+    }
+
+    default: break;
+  }
+  return false;
+}
+
+void ExceptionMessageBuilder::print_NPE_failed_action(outputStream *os, int bci) {
+
+  // Get the bytecode.
+  address code_base = _method->constMethod()->code_base();
+  Bytecodes::Code code = Bytecodes::java_code_at(_method, code_base + bci);
+  int pos = bci + 1;
+  if (code == Bytecodes::_wide) {
+    code = Bytecodes::java_code_at(_method, code_base + bci + 1);
+    pos += 1;
+  }
+
+  switch (code) {
+    case Bytecodes::_iaload:
+      os->print("Cannot load from int array"); break;
+    case Bytecodes::_faload:
+      os->print("Cannot load from float array"); break;
+    case Bytecodes::_aaload:
+      os->print("Cannot load from object array"); break;
+    case Bytecodes::_baload:
+      os->print("Cannot load from byte/boolean array"); break;
+    case Bytecodes::_caload:
+      os->print("Cannot load from char array"); break;
+    case Bytecodes::_saload:
+      os->print("Cannot load from short array"); break;
+    case Bytecodes::_laload:
+      os->print("Cannot load from long array"); break;
+    case Bytecodes::_daload:
+      os->print("Cannot load from double array"); break;
+
+    case Bytecodes::_iastore:
+      os->print("Cannot store to int array"); break;
+    case Bytecodes::_fastore:
+      os->print("Cannot store to float array"); break;
+    case Bytecodes::_aastore:
+      os->print("Cannot store to object array"); break;
+    case Bytecodes::_bastore:
+      os->print("Cannot store to byte/boolean array"); break;
+    case Bytecodes::_castore:
+      os->print("Cannot store to char array"); break;
+    case Bytecodes::_sastore:
+      os->print("Cannot store to short array"); break;
+    case Bytecodes::_lastore:
+      os->print("Cannot store to long array"); break;
+    case Bytecodes::_dastore:
+      os->print("Cannot store to double array"); break;
+
+    case Bytecodes::_arraylength:
+      os->print("Cannot read the array length"); break;
+    case Bytecodes::_athrow:
+      os->print("Cannot throw exception"); break;
+    case Bytecodes::_monitorenter:
+      os->print("Cannot enter synchronized block"); break;
+    case Bytecodes::_monitorexit:
+      os->print("Cannot exit synchronized block"); break;
+    case Bytecodes::_getfield: {
+        int cp_index = Bytes::get_native_u2(code_base + pos) DEBUG_ONLY(+ ConstantPool::CPCACHE_INDEX_TAG);
+        ConstantPool* cp = _method->constants();
+        int name_and_type_index = cp->name_and_type_ref_index_at(cp_index);
+        int name_index = cp->name_ref_index_at(name_and_type_index);
+        Symbol* name = cp->symbol_at(name_index);
+        os->print("Cannot read field \"%s\"", name->as_C_string());
+      } break;
+    case Bytecodes::_putfield: {
+        int cp_index = Bytes::get_native_u2(code_base + pos) DEBUG_ONLY(+ ConstantPool::CPCACHE_INDEX_TAG);
+        os->print("Cannot assign field \"%s\"", get_field_name(_method, cp_index));
+      } break;
+    case Bytecodes::_invokevirtual:
+    case Bytecodes::_invokespecial:
+    case Bytecodes::_invokeinterface: {
+        int cp_index = Bytes::get_native_u2(code_base+ pos) DEBUG_ONLY(+ ConstantPool::CPCACHE_INDEX_TAG);
+        os->print("Cannot invoke \"");
+        print_method_name(os, _method, cp_index);
+        os->print("\"");
+      } break;
+
+    default:
+      assert(0, "We should have checked this bytecode in get_NPE_null_slot().");
+      break;
+  }
+}
+
+// Main API
+bool BytecodeUtils::get_NPE_message_at(outputStream* ss, Method* method, int bci) {
+
+  NoSafepointVerifier _nsv;   // Cannot use this object over a safepoint.
+
+  // If this NPE was created via reflection, we have no real NPE.
+  if (method->method_holder() ==
+      SystemDictionary::reflect_NativeConstructorAccessorImpl_klass()) {
+    return false;
+  }
+
+  // Analyse the bytecodes.
+  ResourceMark rm;
+  ExceptionMessageBuilder emb(method, bci);
+
+  // The slot of the operand stack that contains the null reference.
+  // Also checks for NPE explicitly constructed and returns NPE_EXPLICIT_CONSTRUCTED.
+  int slot = emb.get_NPE_null_slot(bci);
+
+  // Build the message.
+  if (slot == NPE_EXPLICIT_CONSTRUCTED) {
+    // We don't want to print a message.
+    return false;
+  } else if (slot == INVALID_BYTECODE_ENCOUNTERED) {
+    // We encountered a bytecode that does not dereference a reference.
+    DEBUG_ONLY(ss->print("There cannot be a NullPointerException at bci %d of method %s",
+                         bci, method->external_name()));
+    NOT_DEBUG(return false);
+  } else {
+    // Print string describing which action (bytecode) could not be
+    // performed because of the null reference.
+    emb.print_NPE_failed_action(ss, bci);
+    // Print a description of what is null.
+    if (!emb.print_NPE_cause(ss, bci, slot)) {
+      // Nothing was printed. End the sentence without the 'because'
+      // subordinate sentence.
+    }
+  }
+  return true;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/interpreter/bytecodeUtils.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_INTERPRETER_BYTECODEUTILS_HPP
+#define SHARE_INTERPRETER_BYTECODEUTILS_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+class Method;
+class outputStream;
+
+class BytecodeUtils : public AllStatic {
+ public:
+  // NPE extended message. Return true if string is printed.
+  static bool get_NPE_message_at(outputStream* ss, Method* method, int bci);
+};
+
+#endif // SHARE_INTERPRETER_BYTECODEUTILS_HPP
--- a/src/hotspot/share/interpreter/interpreterRuntime.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -1249,15 +1249,15 @@
   char sig_type = '\0';
 
   switch(cp_entry->flag_state()) {
-    case btos: sig_type = 'B'; break;
-    case ztos: sig_type = 'Z'; break;
-    case ctos: sig_type = 'C'; break;
-    case stos: sig_type = 'S'; break;
-    case itos: sig_type = 'I'; break;
-    case ftos: sig_type = 'F'; break;
-    case atos: sig_type = 'L'; break;
-    case ltos: sig_type = 'J'; break;
-    case dtos: sig_type = 'D'; break;
+    case btos: sig_type = JVM_SIGNATURE_BYTE;    break;
+    case ztos: sig_type = JVM_SIGNATURE_BOOLEAN; break;
+    case ctos: sig_type = JVM_SIGNATURE_CHAR;    break;
+    case stos: sig_type = JVM_SIGNATURE_SHORT;   break;
+    case itos: sig_type = JVM_SIGNATURE_INT;     break;
+    case ftos: sig_type = JVM_SIGNATURE_FLOAT;   break;
+    case atos: sig_type = JVM_SIGNATURE_CLASS;   break;
+    case ltos: sig_type = JVM_SIGNATURE_LONG;    break;
+    case dtos: sig_type = JVM_SIGNATURE_DOUBLE;  break;
     default:  ShouldNotReachHere(); return;
   }
   bool is_static = (obj == NULL);
--- a/src/hotspot/share/jvmci/compilerRuntime.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/jvmci/compilerRuntime.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -73,7 +73,7 @@
   Handle protection_domain(THREAD, caller->method_holder()->protection_domain());
 
   // Ignore wrapping L and ;
-  if (name[0] == 'L') {
+  if (name[0] == JVM_SIGNATURE_CLASS) {
     assert(len > 2, "small name %s", name);
     name++;
     len -= 2;
--- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -535,8 +535,8 @@
       JVMCI_THROW_MSG_NULL(ClassNotFoundException, str);
     }
   } else {
-    if (class_name->char_at(0) == 'L' &&
-      class_name->char_at(class_name->utf8_length()-1) == ';') {
+    if (class_name->char_at(0) == JVM_SIGNATURE_CLASS &&
+        class_name->char_at(class_name->utf8_length()-1) == JVM_SIGNATURE_ENDCLASS) {
       // This is a name from a signature.  Strip off the trimmings.
       // Call recursive to keep scope of strippedsym.
       TempNewSymbol strippedsym = SymbolTable::new_symbol(class_name->as_utf8()+1,
--- a/src/hotspot/share/jvmci/jvmciRuntime.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/jvmci/jvmciRuntime.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -980,8 +980,8 @@
   JVMCI_EXCEPTION_CONTEXT;
 
   // Now we need to check the SystemDictionary
-  if (sym->char_at(0) == 'L' &&
-    sym->char_at(sym->utf8_length()-1) == ';') {
+  if (sym->char_at(0) == JVM_SIGNATURE_CLASS &&
+      sym->char_at(sym->utf8_length()-1) == JVM_SIGNATURE_ENDCLASS) {
     // This is a name from a signature.  Strip off the trimmings.
     // Call recursive to keep scope of strippedsym.
     TempNewSymbol strippedsym = SymbolTable::new_symbol(sym->as_utf8()+1,
@@ -1013,8 +1013,8 @@
   // we must build an array type around it.  The CI requires array klasses
   // to be loaded if their element klasses are loaded, except when memory
   // is exhausted.
-  if (sym->char_at(0) == '[' &&
-      (sym->char_at(1) == '[' || sym->char_at(1) == 'L')) {
+  if (sym->char_at(0) == JVM_SIGNATURE_ARRAY &&
+      (sym->char_at(1) == JVM_SIGNATURE_ARRAY || sym->char_at(1) == JVM_SIGNATURE_CLASS)) {
     // We have an unloaded array.
     // Build it on the fly if the element class exists.
     TempNewSymbol elem_sym = SymbolTable::new_symbol(sym->as_utf8()+1,
--- a/src/hotspot/share/memory/heapInspection.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/memory/heapInspection.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -72,8 +72,8 @@
   ResourceMark rm;
   const char* name1 = e1->klass()->external_name();
   const char* name2 = e2->klass()->external_name();
-  bool d1 = (name1[0] == '[');
-  bool d2 = (name2[0] == '[');
+  bool d1 = (name1[0] == JVM_SIGNATURE_ARRAY);
+  bool d2 = (name2[0] == JVM_SIGNATURE_ARRAY);
   if (d1 && !d2) {
     return -1;
   } else if (d2 && !d1) {
--- a/src/hotspot/share/oops/generateOopMap.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/oops/generateOopMap.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1993,14 +1993,14 @@
 // This is used to parse the signature for fields, since they are very simple...
 CellTypeState *GenerateOopMap::sigchar_to_effect(char sigch, int bci, CellTypeState *out) {
   // Object and array
-  if (sigch=='L' || sigch=='[') {
+  if (sigch==JVM_SIGNATURE_CLASS || sigch==JVM_SIGNATURE_ARRAY) {
     out[0] = CellTypeState::make_line_ref(bci);
     out[1] = CellTypeState::bottom;
     return out;
   }
-  if (sigch == 'J' || sigch == 'D' ) return vvCTS;  // Long and Double
-  if (sigch == 'V' ) return epsilonCTS;             // Void
-  return vCTS;                                      // Otherwise
+  if (sigch == JVM_SIGNATURE_LONG || sigch == JVM_SIGNATURE_DOUBLE) return vvCTS;  // Long and Double
+  if (sigch == JVM_SIGNATURE_VOID) return epsilonCTS; // Void
+  return vCTS;                                        // Otherwise
 }
 
 long GenerateOopMap::_total_byte_count = 0;
--- a/src/hotspot/share/oops/instanceKlass.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/oops/instanceKlass.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -2513,10 +2513,18 @@
 #endif
 }
 
+static void method_release_C_heap_structures(Method* m) {
+  m->release_C_heap_structures();
+}
+
 void InstanceKlass::release_C_heap_structures(InstanceKlass* ik) {
   // Clean up C heap
   ik->release_C_heap_structures();
   ik->constants()->release_C_heap_structures();
+
+  // Deallocate and call destructors for MDO mutexes
+  ik->methods_do(method_release_C_heap_structures);
+
 }
 
 void InstanceKlass::release_C_heap_structures() {
@@ -2602,7 +2610,7 @@
 
   // Add L as type indicator
   int dest_index = 0;
-  dest[dest_index++] = 'L';
+  dest[dest_index++] = JVM_SIGNATURE_CLASS;
 
   // Add the actual class name
   for (int src_index = 0; src_index < src_length; ) {
@@ -2615,7 +2623,7 @@
   }
 
   // Add the semicolon and the NULL
-  dest[dest_index++] = ';';
+  dest[dest_index++] = JVM_SIGNATURE_ENDCLASS;
   dest[dest_index] = '\0';
   return dest;
 }
--- a/src/hotspot/share/oops/klass.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/oops/klass.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -340,19 +340,17 @@
   static ByteSize access_flags_offset()          { return in_ByteSize(offset_of(Klass, _access_flags)); }
 
   // Unpacking layout_helper:
-  enum {
-    _lh_neutral_value           = 0,  // neutral non-array non-instance value
-    _lh_instance_slow_path_bit  = 0x01,
-    _lh_log2_element_size_shift = BitsPerByte*0,
-    _lh_log2_element_size_mask  = BitsPerLong-1,
-    _lh_element_type_shift      = BitsPerByte*1,
-    _lh_element_type_mask       = right_n_bits(BitsPerByte),  // shifted mask
-    _lh_header_size_shift       = BitsPerByte*2,
-    _lh_header_size_mask        = right_n_bits(BitsPerByte),  // shifted mask
-    _lh_array_tag_bits          = 2,
-    _lh_array_tag_shift         = BitsPerInt - _lh_array_tag_bits,
-    _lh_array_tag_obj_value     = ~0x01   // 0x80000000 >> 30
-  };
+  static const int _lh_neutral_value           = 0;  // neutral non-array non-instance value
+  static const int _lh_instance_slow_path_bit  = 0x01;
+  static const int _lh_log2_element_size_shift = BitsPerByte*0;
+  static const int _lh_log2_element_size_mask  = BitsPerLong-1;
+  static const int _lh_element_type_shift      = BitsPerByte*1;
+  static const int _lh_element_type_mask       = right_n_bits(BitsPerByte);  // shifted mask
+  static const int _lh_header_size_shift       = BitsPerByte*2;
+  static const int _lh_header_size_mask        = right_n_bits(BitsPerByte);  // shifted mask
+  static const int _lh_array_tag_bits          = 2;
+  static const int _lh_array_tag_shift         = BitsPerInt - _lh_array_tag_bits;
+  static const int _lh_array_tag_obj_value     = ~0x01;   // 0x80000000 >> 30
 
   static const unsigned int _lh_array_tag_type_value = 0Xffffffff; // ~0x00,  // 0xC0000000 >> 30
 
--- a/src/hotspot/share/oops/method.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/oops/method.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -118,11 +118,6 @@
 void Method::deallocate_contents(ClassLoaderData* loader_data) {
   MetadataFactory::free_metadata(loader_data, constMethod());
   set_constMethod(NULL);
-#if INCLUDE_JVMCI
-  if (method_data()) {
-    FailedSpeculation::free_failed_speculations(method_data()->get_failed_speculations_address());
-  }
-#endif
   MetadataFactory::free_metadata(loader_data, method_data());
   set_method_data(NULL);
   MetadataFactory::free_metadata(loader_data, method_counters());
@@ -131,6 +126,16 @@
   if (code() != NULL) _code = NULL;
 }
 
+void Method::release_C_heap_structures() {
+  if (method_data()) {
+#if INCLUDE_JVMCI
+    FailedSpeculation::free_failed_speculations(method_data()->get_failed_speculations_address());
+#endif
+    // Destroy MethodData
+    method_data()->~MethodData();
+  }
+}
+
 address Method::get_i2c_entry() {
   assert(adapter() != NULL, "must have");
   return adapter()->get_i2c_entry();
--- a/src/hotspot/share/oops/method.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/oops/method.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -1006,6 +1006,8 @@
   // Deallocation function for redefine classes or if an error occurs
   void deallocate_contents(ClassLoaderData* loader_data);
 
+  void release_C_heap_structures();
+
   Method* get_new_method() const {
     InstanceKlass* holder = method_holder();
     Method* new_method = holder->method_with_idnum(orig_method_idnum());
--- a/src/hotspot/share/oops/methodData.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/oops/methodData.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -2445,7 +2445,7 @@
   virtual void metaspace_pointers_do(MetaspaceClosure* iter);
   virtual MetaspaceObj::Type type() const { return MethodDataType; }
 
-  // Deallocation support - no pointer fields to deallocate
+  // Deallocation support - no metaspace pointer fields to deallocate
   void deallocate_contents(ClassLoaderData* loader_data) {}
 
   // GC support
--- a/src/hotspot/share/oops/objArrayKlass.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/oops/objArrayKlass.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -106,14 +106,14 @@
     int len = element_klass->name()->utf8_length();
     char *new_str = NEW_RESOURCE_ARRAY(char, len + 4);
     int idx = 0;
-    new_str[idx++] = '[';
+    new_str[idx++] = JVM_SIGNATURE_ARRAY;
     if (element_klass->is_instance_klass()) { // it could be an array or simple type
-      new_str[idx++] = 'L';
+      new_str[idx++] = JVM_SIGNATURE_CLASS;
     }
     memcpy(&new_str[idx], name_str, len * sizeof(char));
     idx += len;
     if (element_klass->is_instance_klass()) {
-      new_str[idx++] = ';';
+      new_str[idx++] = JVM_SIGNATURE_ENDCLASS;
     }
     new_str[idx++] = '\0';
     name = SymbolTable::new_permanent_symbol(new_str);
--- a/src/hotspot/share/oops/symbol.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/oops/symbol.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -201,8 +201,8 @@
   int   length = (int)strlen(str);
   // Turn all '/'s into '.'s (also for array klasses)
   for (int index = 0; index < length; index++) {
-    if (str[index] == '/') {
-      str[index] = '.';
+    if (str[index] == JVM_SIGNATURE_SLASH) {
+      str[index] = JVM_SIGNATURE_DOT;
     }
   }
   return str;
@@ -210,8 +210,8 @@
 
 static void print_class(outputStream *os, char *class_str, int len) {
   for (int i = 0; i < len; ++i) {
-    if (class_str[i] == '/') {
-      os->put('.');
+    if (class_str[i] == JVM_SIGNATURE_SLASH) {
+      os->put(JVM_SIGNATURE_DOT);
     } else {
       os->put(class_str[i]);
     }
@@ -221,9 +221,9 @@
 static void print_array(outputStream *os, char *array_str, int len) {
   int dimensions = 0;
   for (int i = 0; i < len; ++i) {
-    if (array_str[i] == '[') {
+    if (array_str[i] == JVM_SIGNATURE_ARRAY) {
       dimensions++;
-    } else if (array_str[i] == 'L') {
+    } else if (array_str[i] == JVM_SIGNATURE_CLASS) {
       // Expected format: L<type name>;. Skip 'L' and ';' delimiting the type name.
       print_class(os, array_str+i+1, len-i-2);
       break;
--- a/src/hotspot/share/opto/loopTransform.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/opto/loopTransform.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -3129,6 +3129,13 @@
     // We also need to replace the original limit to collapse loop exit.
     Node* cmp = cl->loopexit()->cmp_node();
     assert(cl->limit() == cmp->in(2), "sanity");
+    // Duplicate cmp node if it has other users
+    if (cmp->outcnt() > 1) {
+      cmp = cmp->clone();
+      cmp = phase->_igvn.register_new_node_with_optimizer(cmp);
+      BoolNode *bol = cl->loopexit()->in(CountedLoopEndNode::TestValue)->as_Bool();
+      phase->_igvn.replace_input_of(bol, 1, cmp); // put bol on worklist
+    }
     phase->_igvn._worklist.push(cmp->in(2)); // put limit on worklist
     phase->_igvn.replace_input_of(cmp, 2, exact_limit); // put cmp on worklist
   }
--- a/src/hotspot/share/prims/jni.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/prims/jni.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -2151,7 +2151,7 @@
   if (JvmtiExport::should_post_field_modification()) {
     jvalue field_value;
     field_value.l = value;
-    o = JvmtiExport::jni_SetField_probe_nh(thread, obj, o, k, fieldID, false, 'L', (jvalue *)&field_value);
+    o = JvmtiExport::jni_SetField_probe_nh(thread, obj, o, k, fieldID, false, JVM_SIGNATURE_CLASS, (jvalue *)&field_value);
   }
   HeapAccess<ON_UNKNOWN_OOP_REF>::oop_store_at(o, offset, JNIHandles::resolve(value));
   HOTSPOT_JNI_SETOBJECTFIELD_RETURN();
@@ -2177,34 +2177,34 @@
     field_value.unionType = value; \
     o = JvmtiExport::jni_SetField_probe_nh(thread, obj, o, k, fieldID, false, SigType, (jvalue *)&field_value); \
   } \
-  if (SigType == 'Z') { value = ((jboolean)value) & 1; } \
+  if (SigType == JVM_SIGNATURE_BOOLEAN) { value = ((jboolean)value) & 1; } \
   o->Fieldname##_field_put(offset, value); \
   ReturnProbe; \
 JNI_END
 
-DEFINE_SETFIELD(jboolean, bool,   Boolean, 'Z', z
+DEFINE_SETFIELD(jboolean, bool,   Boolean, JVM_SIGNATURE_BOOLEAN, z
                 , HOTSPOT_JNI_SETBOOLEANFIELD_ENTRY(env, obj, (uintptr_t)fieldID, value),
                 HOTSPOT_JNI_SETBOOLEANFIELD_RETURN())
-DEFINE_SETFIELD(jbyte,    byte,   Byte,    'B', b
+DEFINE_SETFIELD(jbyte,    byte,   Byte,    JVM_SIGNATURE_BYTE, b
                 , HOTSPOT_JNI_SETBYTEFIELD_ENTRY(env, obj, (uintptr_t)fieldID, value),
                 HOTSPOT_JNI_SETBYTEFIELD_RETURN())
-DEFINE_SETFIELD(jchar,    char,   Char,    'C', c
+DEFINE_SETFIELD(jchar,    char,   Char,    JVM_SIGNATURE_CHAR, c
                 , HOTSPOT_JNI_SETCHARFIELD_ENTRY(env, obj, (uintptr_t)fieldID, value),
                 HOTSPOT_JNI_SETCHARFIELD_RETURN())
-DEFINE_SETFIELD(jshort,   short,  Short,   'S', s
+DEFINE_SETFIELD(jshort,   short,  Short,   JVM_SIGNATURE_SHORT, s
                 , HOTSPOT_JNI_SETSHORTFIELD_ENTRY(env, obj, (uintptr_t)fieldID, value),
                 HOTSPOT_JNI_SETSHORTFIELD_RETURN())
-DEFINE_SETFIELD(jint,     int,    Int,     'I', i
+DEFINE_SETFIELD(jint,     int,    Int,     JVM_SIGNATURE_INT, i
                 , HOTSPOT_JNI_SETINTFIELD_ENTRY(env, obj, (uintptr_t)fieldID, value),
                 HOTSPOT_JNI_SETINTFIELD_RETURN())
-DEFINE_SETFIELD(jlong,    long,   Long,    'J', j
+DEFINE_SETFIELD(jlong,    long,   Long,    JVM_SIGNATURE_LONG, j
                 , HOTSPOT_JNI_SETLONGFIELD_ENTRY(env, obj, (uintptr_t)fieldID, value),
                 HOTSPOT_JNI_SETLONGFIELD_RETURN())
 // Float and double probes don't return value because dtrace doesn't currently support it
-DEFINE_SETFIELD(jfloat,   float,  Float,   'F', f
+DEFINE_SETFIELD(jfloat,   float,  Float,   JVM_SIGNATURE_FLOAT, f
                 , HOTSPOT_JNI_SETFLOATFIELD_ENTRY(env, obj, (uintptr_t)fieldID),
                 HOTSPOT_JNI_SETFLOATFIELD_RETURN())
-DEFINE_SETFIELD(jdouble,  double, Double,  'D', d
+DEFINE_SETFIELD(jdouble,  double, Double,  JVM_SIGNATURE_DOUBLE, d
                 , HOTSPOT_JNI_SETDOUBLEFIELD_ENTRY(env, obj, (uintptr_t)fieldID),
                 HOTSPOT_JNI_SETDOUBLEFIELD_RETURN())
 
@@ -2352,7 +2352,7 @@
   if (JvmtiExport::should_post_field_modification()) {
     jvalue field_value;
     field_value.l = value;
-    JvmtiExport::jni_SetField_probe(thread, NULL, NULL, id->holder(), fieldID, true, 'L', (jvalue *)&field_value);
+    JvmtiExport::jni_SetField_probe(thread, NULL, NULL, id->holder(), fieldID, true, JVM_SIGNATURE_CLASS, (jvalue *)&field_value);
   }
   id->holder()->java_mirror()->obj_field_put(id->offset(), JNIHandles::resolve(value));
   HOTSPOT_JNI_SETSTATICOBJECTFIELD_RETURN();
@@ -2376,34 +2376,34 @@
     field_value.unionType = value; \
     JvmtiExport::jni_SetField_probe(thread, NULL, NULL, id->holder(), fieldID, true, SigType, (jvalue *)&field_value); \
   } \
-  if (SigType == 'Z') { value = ((jboolean)value) & 1; } \
+  if (SigType == JVM_SIGNATURE_BOOLEAN) { value = ((jboolean)value) & 1; } \
   id->holder()->java_mirror()-> Fieldname##_field_put (id->offset(), value); \
   ReturnProbe;\
 JNI_END
 
-DEFINE_SETSTATICFIELD(jboolean, bool,   Boolean, 'Z', z
+DEFINE_SETSTATICFIELD(jboolean, bool,   Boolean, JVM_SIGNATURE_BOOLEAN, z
                       , HOTSPOT_JNI_SETSTATICBOOLEANFIELD_ENTRY(env, clazz, (uintptr_t)fieldID, value),
                       HOTSPOT_JNI_SETSTATICBOOLEANFIELD_RETURN())
-DEFINE_SETSTATICFIELD(jbyte,    byte,   Byte,    'B', b
+DEFINE_SETSTATICFIELD(jbyte,    byte,   Byte,    JVM_SIGNATURE_BYTE, b
                       , HOTSPOT_JNI_SETSTATICBYTEFIELD_ENTRY(env, clazz, (uintptr_t) fieldID, value),
                       HOTSPOT_JNI_SETSTATICBYTEFIELD_RETURN())
-DEFINE_SETSTATICFIELD(jchar,    char,   Char,    'C', c
+DEFINE_SETSTATICFIELD(jchar,    char,   Char,    JVM_SIGNATURE_CHAR, c
                       , HOTSPOT_JNI_SETSTATICCHARFIELD_ENTRY(env, clazz, (uintptr_t) fieldID, value),
                       HOTSPOT_JNI_SETSTATICCHARFIELD_RETURN())
-DEFINE_SETSTATICFIELD(jshort,   short,  Short,   'S', s
+DEFINE_SETSTATICFIELD(jshort,   short,  Short,   JVM_SIGNATURE_SHORT, s
                       , HOTSPOT_JNI_SETSTATICSHORTFIELD_ENTRY(env, clazz, (uintptr_t) fieldID, value),
                       HOTSPOT_JNI_SETSTATICSHORTFIELD_RETURN())
-DEFINE_SETSTATICFIELD(jint,     int,    Int,     'I', i
+DEFINE_SETSTATICFIELD(jint,     int,    Int,     JVM_SIGNATURE_INT, i
                       , HOTSPOT_JNI_SETSTATICINTFIELD_ENTRY(env, clazz, (uintptr_t) fieldID, value),
                       HOTSPOT_JNI_SETSTATICINTFIELD_RETURN())
-DEFINE_SETSTATICFIELD(jlong,    long,   Long,    'J', j
+DEFINE_SETSTATICFIELD(jlong,    long,   Long,    JVM_SIGNATURE_LONG, j
                       , HOTSPOT_JNI_SETSTATICLONGFIELD_ENTRY(env, clazz, (uintptr_t) fieldID, value),
                       HOTSPOT_JNI_SETSTATICLONGFIELD_RETURN())
 // Float and double probes don't return value because dtrace doesn't currently support it
-DEFINE_SETSTATICFIELD(jfloat,   float,  Float,   'F', f
+DEFINE_SETSTATICFIELD(jfloat,   float,  Float,   JVM_SIGNATURE_FLOAT, f
                       , HOTSPOT_JNI_SETSTATICFLOATFIELD_ENTRY(env, clazz, (uintptr_t) fieldID),
                       HOTSPOT_JNI_SETSTATICFLOATFIELD_RETURN())
-DEFINE_SETSTATICFIELD(jdouble,  double, Double,  'D', d
+DEFINE_SETSTATICFIELD(jdouble,  double, Double,  JVM_SIGNATURE_DOUBLE, d
                       , HOTSPOT_JNI_SETSTATICDOUBLEFIELD_ENTRY(env, clazz, (uintptr_t) fieldID),
                       HOTSPOT_JNI_SETSTATICDOUBLEFIELD_RETURN())
 
--- a/src/hotspot/share/prims/jvm.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/prims/jvm.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -38,6 +38,7 @@
 #include "classfile/vmSymbols.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "interpreter/bytecode.hpp"
+#include "interpreter/bytecodeUtils.hpp"
 #include "jfr/jfrEvents.hpp"
 #include "logging/log.hpp"
 #include "memory/heapShared.hpp"
@@ -532,13 +533,37 @@
 
 // java.lang.Throwable //////////////////////////////////////////////////////
 
-
 JVM_ENTRY(void, JVM_FillInStackTrace(JNIEnv *env, jobject receiver))
   JVMWrapper("JVM_FillInStackTrace");
   Handle exception(thread, JNIHandles::resolve_non_null(receiver));
   java_lang_Throwable::fill_in_stack_trace(exception);
 JVM_END
 
+// java.lang.NullPointerException ///////////////////////////////////////////
+
+JVM_ENTRY(jstring, JVM_GetExtendedNPEMessage(JNIEnv *env, jthrowable throwable))
+  if (!ShowCodeDetailsInExceptionMessages) return NULL;
+
+  oop exc = JNIHandles::resolve_non_null(throwable);
+
+  Method* method;
+  int bci;
+  if (!java_lang_Throwable::get_top_method_and_bci(exc, &method, &bci)) {
+    return NULL;
+  }
+  if (method->is_native()) {
+    return NULL;
+  }
+
+  stringStream ss;
+  bool ok = BytecodeUtils::get_NPE_message_at(&ss, method, bci);
+  if (ok) {
+    oop result = java_lang_String::create_oop_from_str(ss.base(), CHECK_0);
+    return (jstring) JNIHandles::make_local(env, result);
+  } else {
+    return NULL;
+  }
+JVM_END
 
 // java.lang.StackTraceElement //////////////////////////////////////////////
 
@@ -2332,7 +2357,7 @@
     ConstantPool* cp = InstanceKlass::cast(k)->constants();
     for (int index = cp->length() - 1; index >= 0; index--) {
       constantTag tag = cp->tag_at(index);
-      types[index] = (tag.is_unresolved_klass()) ? JVM_CONSTANT_Class : tag.value();
+      types[index] = (tag.is_unresolved_klass()) ? (unsigned char) JVM_CONSTANT_Class : tag.value();
     }
   }
 JVM_END
--- a/src/hotspot/share/prims/jvmtiEnvBase.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/prims/jvmtiEnvBase.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -1361,7 +1361,7 @@
     NULL_CHECK(ob_k, JVMTI_ERROR_INVALID_OBJECT);
 
     // Method return type signature.
-    char* ty_sign = 1 + strchr(signature->as_C_string(), ')');
+    char* ty_sign = 1 + strchr(signature->as_C_string(), JVM_SIGNATURE_ENDFUNC);
 
     if (!VM_GetOrSetLocal::is_assignable(ty_sign, ob_k, current_thread)) {
       return JVMTI_ERROR_TYPE_MISMATCH;
--- a/src/hotspot/share/prims/jvmtiExport.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/prims/jvmtiExport.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -1994,7 +1994,9 @@
   address location, Klass* field_klass, Handle object, jfieldID field,
   char sig_type, jvalue *value) {
 
-  if (sig_type == 'I' || sig_type == 'Z' || sig_type == 'B' || sig_type == 'C' || sig_type == 'S') {
+  if (sig_type == JVM_SIGNATURE_INT || sig_type == JVM_SIGNATURE_BOOLEAN ||
+      sig_type == JVM_SIGNATURE_BYTE || sig_type == JVM_SIGNATURE_CHAR ||
+      sig_type == JVM_SIGNATURE_SHORT) {
     // 'I' instructions are used for byte, char, short and int.
     // determine which it really is, and convert
     fieldDescriptor fd;
@@ -2005,22 +2007,22 @@
       // convert value from int to appropriate type
       switch (fd.field_type()) {
       case T_BOOLEAN:
-        sig_type = 'Z';
+        sig_type = JVM_SIGNATURE_BOOLEAN;
         value->i = 0; // clear it
         value->z = (jboolean)ival;
         break;
       case T_BYTE:
-        sig_type = 'B';
+        sig_type = JVM_SIGNATURE_BYTE;
         value->i = 0; // clear it
         value->b = (jbyte)ival;
         break;
       case T_CHAR:
-        sig_type = 'C';
+        sig_type = JVM_SIGNATURE_CHAR;
         value->i = 0; // clear it
         value->c = (jchar)ival;
         break;
       case T_SHORT:
-        sig_type = 'S';
+        sig_type = JVM_SIGNATURE_SHORT;
         value->i = 0; // clear it
         value->s = (jshort)ival;
         break;
@@ -2035,11 +2037,11 @@
     }
   }
 
-  assert(sig_type != '[', "array should have sig_type == 'L'");
+  assert(sig_type != JVM_SIGNATURE_ARRAY, "array should have sig_type == 'L'");
   bool handle_created = false;
 
   // convert oop to JNI handle.
-  if (sig_type == 'L') {
+  if (sig_type == JVM_SIGNATURE_CLASS) {
     handle_created = true;
     value->l = (jobject)JNIHandles::make_local(thread, (oop)value->l);
   }
--- a/src/hotspot/share/prims/jvmtiImpl.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/prims/jvmtiImpl.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -588,7 +588,8 @@
   assert(klass != NULL, "klass must not be NULL");
 
   int len = (int) strlen(ty_sign);
-  if (ty_sign[0] == 'L' && ty_sign[len-1] == ';') { // Need pure class/interface name
+  if (ty_sign[0] == JVM_SIGNATURE_CLASS &&
+      ty_sign[len-1] == JVM_SIGNATURE_ENDCLASS) { // Need pure class/interface name
     ty_sign++;
     len -= 2;
   }
--- a/src/hotspot/share/prims/jvmtiRedefineClasses.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/prims/jvmtiRedefineClasses.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -2283,14 +2283,14 @@
 
   switch (tag) {
     // These BaseType tag values are from Table 4.2 in VM spec:
-    case 'B':  // byte
-    case 'C':  // char
-    case 'D':  // double
-    case 'F':  // float
-    case 'I':  // int
-    case 'J':  // long
-    case 'S':  // short
-    case 'Z':  // boolean
+    case JVM_SIGNATURE_BYTE:
+    case JVM_SIGNATURE_CHAR:
+    case JVM_SIGNATURE_DOUBLE:
+    case JVM_SIGNATURE_FLOAT:
+    case JVM_SIGNATURE_INT:
+    case JVM_SIGNATURE_LONG:
+    case JVM_SIGNATURE_SHORT:
+    case JVM_SIGNATURE_BOOLEAN:
 
     // The remaining tag values are from Table 4.8 in the 2nd-edition of
     // the VM spec:
@@ -2361,7 +2361,7 @@
       }
       break;
 
-    case '[':
+    case JVM_SIGNATURE_ARRAY:
     {
       if ((byte_i_ref + 2) > annotations_typeArray->length()) {
         // not enough room for a num_values field
--- a/src/hotspot/share/prims/jvmtiTagMap.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/prims/jvmtiTagMap.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -1032,7 +1032,7 @@
 
 // helper function to tell if a field is a primitive field or not
 static inline bool is_primitive_field_type(char type) {
-  return (type != 'L' && type != '[');
+  return (type != JVM_SIGNATURE_CLASS && type != JVM_SIGNATURE_ARRAY);
 }
 
 // helper function to copy the value from location addr to jvalue.
--- a/src/hotspot/share/prims/methodHandles.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/prims/methodHandles.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -542,18 +542,22 @@
   const int len = sig->utf8_length();
   for (int i = 0; i < len; i++) {
     switch (sig->char_at(i)) {
-    case 'L':
+    case JVM_SIGNATURE_CLASS:
       // only java/lang/Object is valid here
       if (sig->index_of_at(i, OBJ_SIG, OBJ_SIG_LEN) != i)
         return false;
       i += OBJ_SIG_LEN-1;  //-1 because of i++ in loop
       continue;
-    case '(': case ')': case 'V':
-    case 'I': case 'J': case 'F': case 'D':
+    case JVM_SIGNATURE_FUNC:
+    case JVM_SIGNATURE_ENDFUNC:
+    case JVM_SIGNATURE_VOID:
+    case JVM_SIGNATURE_INT:
+    case JVM_SIGNATURE_LONG:
+    case JVM_SIGNATURE_FLOAT:
+    case JVM_SIGNATURE_DOUBLE:
       continue;
-    //case '[':
-    //case 'Z': case 'B': case 'C': case 'S':
     default:
+      // subword types (T_BYTE etc.), arrays
       return false;
     }
   }
@@ -567,7 +571,7 @@
   } else if (is_basic_type_signature(sig)) {
     sig->increment_refcount();
     return sig;  // that was easy
-  } else if (sig->char_at(0) != '(') {
+  } else if (sig->char_at(0) != JVM_SIGNATURE_FUNC) {
     BasicType bt = char2type(sig->char_at(0));
     if (is_subword_type(bt)) {
       bsig = vmSymbols::int_signature();
@@ -578,7 +582,7 @@
   } else {
     ResourceMark rm;
     stringStream buffer(128);
-    buffer.put('(');
+    buffer.put(JVM_SIGNATURE_FUNC);
     int arg_pos = 0, keep_arg_pos = -1;
     if (keep_last_arg)
       keep_arg_pos = ArgumentCount(sig).size() - 1;
@@ -586,7 +590,7 @@
       BasicType bt = ss.type();
       size_t this_arg_pos = buffer.size();
       if (ss.at_return_type()) {
-        buffer.put(')');
+        buffer.put(JVM_SIGNATURE_ENDFUNC);
       }
       if (arg_pos == keep_arg_pos) {
         buffer.write((char*) ss.raw_bytes(),
@@ -621,25 +625,26 @@
   for (int i = 0; i < len; i++) {
     char ch = sig->char_at(i);
     switch (ch) {
-    case '(': case ')':
+    case JVM_SIGNATURE_FUNC:
+    case JVM_SIGNATURE_ENDFUNC:
       prev_type = false;
       st->put(ch);
       continue;
-    case '[':
+    case JVM_SIGNATURE_ARRAY:
       if (!keep_basic_names && keep_arrays)
         st->put(ch);
       array++;
       continue;
-    case 'L':
+    case JVM_SIGNATURE_CLASS:
       {
         if (prev_type)  st->put(',');
         int start = i+1, slash = start;
-        while (++i < len && (ch = sig->char_at(i)) != ';') {
-          if (ch == '/' || ch == '.' || ch == '$')  slash = i+1;
+        while (++i < len && (ch = sig->char_at(i)) != JVM_SIGNATURE_ENDCLASS) {
+          if (ch == JVM_SIGNATURE_SLASH || ch == JVM_SIGNATURE_DOT || ch == '$')  slash = i+1;
         }
         if (slash < i)  start = slash;
         if (!keep_basic_names) {
-          st->put('L');
+          st->put(JVM_SIGNATURE_CLASS);
         } else {
           for (int j = start; j < i; j++)
             st->put(sig->char_at(j));
@@ -650,7 +655,7 @@
     default:
       {
         if (array && char2type(ch) != T_ILLEGAL && !keep_arrays) {
-          ch = '[';
+          ch = JVM_SIGNATURE_ARRAY;
           array = 0;
         }
         if (prev_type)  st->put(',');
@@ -978,7 +983,7 @@
   }
   if (sig != NULL) {
     if (sig->utf8_length() == 0)  return 0; // a match is not possible
-    if (sig->char_at(0) == '(')
+    if (sig->char_at(0) == JVM_SIGNATURE_FUNC)
       match_flags &= ~(IS_FIELD | IS_TYPE);
     else
       match_flags &= ~(IS_CONSTRUCTOR | IS_METHOD);
@@ -1456,7 +1461,7 @@
           {
             Symbol* type = caller->constants()->signature_ref_at(bss_index_in_pool);
             Handle th;
-            if (type->char_at(0) == '(') {
+            if (type->char_at(0) == JVM_SIGNATURE_FUNC) {
               th = SystemDictionary::find_method_handle_type(type, caller, CHECK);
             } else {
               th = SystemDictionary::find_java_mirror_for_type(type, caller, SignatureStream::NCDFError, CHECK);
--- a/src/hotspot/share/prims/nativeLookup.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/prims/nativeLookup.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -106,7 +106,7 @@
   st.print("__");
   // find ')'
   int end;
-  for (end = 0; end < signature->utf8_length() && signature->char_at(end) != ')'; end++);
+  for (end = 0; end < signature->utf8_length() && signature->char_at(end) != JVM_SIGNATURE_ENDFUNC; end++);
   // skip first '('
   mangle_name_on(&st, signature, 1, end);
   return st.as_string();
--- a/src/hotspot/share/runtime/arguments.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/runtime/arguments.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -3968,6 +3968,11 @@
       "Shared spaces are not supported in this VM\n");
     return JNI_ERR;
   }
+  if (DumpLoadedClassList != NULL) {
+    jio_fprintf(defaultStream::error_stream(),
+      "DumpLoadedClassList is not supported in this VM\n");
+    return JNI_ERR;
+  }
   if ((UseSharedSpaces && FLAG_IS_CMDLINE(UseSharedSpaces)) ||
       log_is_enabled(Info, cds)) {
     warning("Shared spaces are not supported in this VM");
--- a/src/hotspot/share/runtime/deoptimization.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/runtime/deoptimization.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -295,7 +295,7 @@
 
   // Reallocate the non-escaping objects and restore their fields. Then
   // relock objects if synchronization on them was eliminated.
-  if (jvmci_enabled || ((DoEscapeAnalysis || EliminateNestedLocks) && EliminateAllocations)) {
+  if (jvmci_enabled || (DoEscapeAnalysis && EliminateAllocations)) {
     realloc_failures = eliminate_allocations(thread, exec_mode, cm, deoptee, map, chunk);
   }
 #endif // COMPILER2_OR_JVMCI
--- a/src/hotspot/share/runtime/fieldType.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/runtime/fieldType.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -39,28 +39,28 @@
 // Check if it is a valid array signature
 bool FieldType::is_valid_array_signature(Symbol* sig) {
   assert(sig->utf8_length() > 1, "this should already have been checked");
-  assert(sig->char_at(0) == '[', "this should already have been checked");
+  assert(sig->char_at(0) == JVM_SIGNATURE_ARRAY, "this should already have been checked");
   // The first character is already checked
   int i = 1;
   int len = sig->utf8_length();
   // First skip all '['s
-  while(i < len - 1 && sig->char_at(i) == '[') i++;
+  while(i < len - 1 && sig->char_at(i) == JVM_SIGNATURE_ARRAY) i++;
 
   // Check type
   switch(sig->char_at(i)) {
-    case 'B': // T_BYTE
-    case 'C': // T_CHAR
-    case 'D': // T_DOUBLE
-    case 'F': // T_FLOAT
-    case 'I': // T_INT
-    case 'J': // T_LONG
-    case 'S': // T_SHORT
-    case 'Z': // T_BOOLEAN
+    case JVM_SIGNATURE_BYTE:
+    case JVM_SIGNATURE_CHAR:
+    case JVM_SIGNATURE_DOUBLE:
+    case JVM_SIGNATURE_FLOAT:
+    case JVM_SIGNATURE_INT:
+    case JVM_SIGNATURE_LONG:
+    case JVM_SIGNATURE_SHORT:
+    case JVM_SIGNATURE_BOOLEAN:
       // If it is an array, the type is the last character
       return (i + 1 == len);
-    case 'L':
+    case JVM_SIGNATURE_CLASS:
       // If it is an object, the last character must be a ';'
-      return sig->char_at(len - 1) == ';';
+      return sig->char_at(len - 1) == JVM_SIGNATURE_ENDCLASS;
   }
 
   return false;
@@ -71,7 +71,7 @@
   assert(basic_type(signature) == T_ARRAY, "must be array");
   int index = 1;
   int dim   = 1;
-  while (signature->char_at(index) == '[') {
+  while (signature->char_at(index) == JVM_SIGNATURE_ARRAY) {
     index++;
     dim++;
   }
@@ -80,7 +80,7 @@
   BasicType element_type = char2type(element[0]);
   if (element_type == T_OBJECT) {
     int len = (int)strlen(element);
-    assert(element[len-1] == ';', "last char should be a semicolon");
+    assert(element[len-1] == JVM_SIGNATURE_ENDCLASS, "last char should be a semicolon");
     element[len-1] = '\0';        // chop off semicolon
     fd._object_key = SymbolTable::new_symbol(element + 1);
   }
--- a/src/hotspot/share/runtime/fieldType.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/runtime/fieldType.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -58,14 +58,16 @@
   static BasicType basic_type(Symbol* signature);
 
   // Testing
-  static bool is_array(Symbol* signature) { return signature->utf8_length() > 1 && signature->char_at(0) == '[' && is_valid_array_signature(signature); }
+  static bool is_array(Symbol* signature) { return signature->utf8_length() > 1 &&
+                                                   signature->char_at(0) == JVM_SIGNATURE_ARRAY &&
+                                                   is_valid_array_signature(signature); }
 
   static bool is_obj(Symbol* signature) {
      int sig_length = signature->utf8_length();
      // Must start with 'L' and end with ';'
      return (sig_length >= 2 &&
-             (signature->char_at(0) == 'L') &&
-             (signature->char_at(sig_length - 1) == ';'));
+             (signature->char_at(0) == JVM_SIGNATURE_CLASS) &&
+             (signature->char_at(sig_length - 1) == JVM_SIGNATURE_ENDCLASS));
   }
 
   // Parse field and extract array information. Works for T_ARRAY only.
--- a/src/hotspot/share/runtime/globals.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/runtime/globals.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -643,6 +643,10 @@
   product(bool, OmitStackTraceInFastThrow, true,                            \
           "Omit backtraces for some 'hot' exceptions in optimized code")    \
                                                                             \
+  manageable(bool, ShowCodeDetailsInExceptionMessages, false,               \
+          "Show exception messages from RuntimeExceptions that contain "    \
+          "snippets of the failing code. Disable this to improve privacy.") \
+                                                                            \
   product(bool, PrintWarnings, true,                                        \
           "Print JVM warnings to output stream")                            \
                                                                             \
--- a/src/hotspot/share/runtime/mutexLocker.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/runtime/mutexLocker.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -334,12 +334,12 @@
 #if INCLUDE_JVMTI
   def(CDSClassFileStream_lock      , PaddedMutex  , max_nonleaf, false, _safepoint_check_always);
 #endif
+  def(DumpTimeTable_lock           , PaddedMutex  , leaf,        true,  _safepoint_check_never);
+#endif // INCLUDE_CDS
 
 #if INCLUDE_JVMCI
   def(JVMCI_lock                   , PaddedMonitor, nonleaf+2,   true,  _safepoint_check_always);
 #endif
-  def(DumpTimeTable_lock           , PaddedMutex  , leaf,        true,  _safepoint_check_never);
-#endif // INCLUDE_CDS
 }
 
 GCMutexLocker::GCMutexLocker(Mutex* mutex) {
--- a/src/hotspot/share/runtime/os.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/runtime/os.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -47,8 +47,6 @@
 // OS services (time, I/O) as well as other functionality with system-
 // dependent code.
 
-typedef void (*dll_func)(...);
-
 class Thread;
 class JavaThread;
 class NativeCallStack;
@@ -195,14 +193,9 @@
 
   // The "virtual time" of a thread is the amount of time a thread has
   // actually run.  The first function indicates whether the OS supports
-  // this functionality for the current thread, and if so:
-  //   * the second enables vtime tracking (if that is required).
-  //   * the third tells whether vtime is enabled.
-  //   * the fourth returns the elapsed virtual time for the current
-  //     thread.
+  // this functionality for the current thread, and if so the second
+  // returns the elapsed virtual time for the current thread.
   static bool supports_vtime();
-  static bool enable_vtime();
-  static bool vtime_enabled();
   static double elapsedVTime();
 
   // Return current local time in a string (YYYY-MM-DD HH:MM:SS).
@@ -254,14 +247,6 @@
     return _initial_active_processor_count;
   }
 
-  // Bind processes to processors.
-  //     This is a two step procedure:
-  //     first you generate a distribution of processes to processors,
-  //     then you bind processes according to that distribution.
-  // Compute a distribution for number of processes to processors.
-  //    Stores the processor id's into the distribution array argument.
-  //    Returns true if it worked, false if it didn't.
-  static bool distribute_processes(uint length, uint* distribution);
   // Binds the current process to a processor.
   //    Returns true if it worked, false if it didn't.
   static bool bind_to_processor(uint processor_id);
@@ -496,7 +481,6 @@
   static void verify_stack_alignment() PRODUCT_RETURN;
 
   static bool message_box(const char* title, const char* message);
-  static char* do_you_want_to_debug(const char* message);
 
   // run cmd in a separate process and return its exit code; or -1 on failures
   static int fork_and_exec(char *cmd, bool use_vfork_if_available = false);
@@ -520,7 +504,6 @@
   static void die();
 
   // File i/o operations
-  static const int default_file_open_flags();
   static int open(const char *path, int oflag, int mode);
   static FILE* open(int fd, const char* mode);
   static FILE* fopen(const char* path, const char* mode);
@@ -668,9 +651,6 @@
   // Will not change the value of errno.
   static const char* errno_name(int e);
 
-  // Determines whether the calling process is being debugged by a user-mode debugger.
-  static bool is_debugger_attached();
-
   // wait for a key press if PauseAtExit is set
   static void wait_for_keypress_at_exit(void);
 
@@ -966,10 +946,6 @@
       return _state == SR_RUNNING;
     }
 
-    bool is_suspend_request() const {
-      return _state == SR_SUSPEND_REQUEST;
-    }
-
     bool is_suspended() const {
       return _state == SR_SUSPENDED;
     }
--- a/src/hotspot/share/runtime/sharedRuntime.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/runtime/sharedRuntime.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -2990,28 +2990,28 @@
     sig_bt[cnt++] = T_OBJECT; // Receiver is argument 0; not in signature
   }
 
-  while (*s != ')') {          // Find closing right paren
-    switch (*s++) {            // Switch on signature character
-    case 'B': sig_bt[cnt++] = T_BYTE;    break;
-    case 'C': sig_bt[cnt++] = T_CHAR;    break;
-    case 'D': sig_bt[cnt++] = T_DOUBLE;  sig_bt[cnt++] = T_VOID; break;
-    case 'F': sig_bt[cnt++] = T_FLOAT;   break;
-    case 'I': sig_bt[cnt++] = T_INT;     break;
-    case 'J': sig_bt[cnt++] = T_LONG;    sig_bt[cnt++] = T_VOID; break;
-    case 'S': sig_bt[cnt++] = T_SHORT;   break;
-    case 'Z': sig_bt[cnt++] = T_BOOLEAN; break;
-    case 'V': sig_bt[cnt++] = T_VOID;    break;
-    case 'L':                   // Oop
-      while (*s++ != ';');   // Skip signature
+  while (*s != JVM_SIGNATURE_ENDFUNC) { // Find closing right paren
+    switch (*s++) {                     // Switch on signature character
+    case JVM_SIGNATURE_BYTE:    sig_bt[cnt++] = T_BYTE;    break;
+    case JVM_SIGNATURE_CHAR:    sig_bt[cnt++] = T_CHAR;    break;
+    case JVM_SIGNATURE_DOUBLE:  sig_bt[cnt++] = T_DOUBLE;  sig_bt[cnt++] = T_VOID; break;
+    case JVM_SIGNATURE_FLOAT:   sig_bt[cnt++] = T_FLOAT;   break;
+    case JVM_SIGNATURE_INT:     sig_bt[cnt++] = T_INT;     break;
+    case JVM_SIGNATURE_LONG:    sig_bt[cnt++] = T_LONG;    sig_bt[cnt++] = T_VOID; break;
+    case JVM_SIGNATURE_SHORT:   sig_bt[cnt++] = T_SHORT;   break;
+    case JVM_SIGNATURE_BOOLEAN: sig_bt[cnt++] = T_BOOLEAN; break;
+    case JVM_SIGNATURE_VOID:    sig_bt[cnt++] = T_VOID;    break;
+    case JVM_SIGNATURE_CLASS: // Oop
+      while (*s++ != JVM_SIGNATURE_ENDCLASS);   // Skip signature
       sig_bt[cnt++] = T_OBJECT;
       break;
-    case '[': {                 // Array
+    case JVM_SIGNATURE_ARRAY: { // Array
       do {                      // Skip optional size
         while (*s >= '0' && *s <= '9') s++;
-      } while (*s++ == '[');   // Nested arrays?
+      } while (*s++ == JVM_SIGNATURE_ARRAY);   // Nested arrays?
       // Skip element type
-      if (s[-1] == 'L')
-        while (*s++ != ';'); // Skip signature
+      if (s[-1] == JVM_SIGNATURE_CLASS)
+        while (*s++ != JVM_SIGNATURE_ENDCLASS); // Skip signature
       sig_bt[cnt++] = T_ARRAY;
       break;
     }
--- a/src/hotspot/share/runtime/signature.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/runtime/signature.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -63,41 +63,41 @@
   //       compiler bug (was problem - gri 4/27/2000).
   int size = -1;
   switch(_signature->char_at(_index)) {
-    case 'B': do_byte  (); if (_parameter_index < 0 ) _return_type = T_BYTE;
-              _index++; size = T_BYTE_size   ; break;
-    case 'C': do_char  (); if (_parameter_index < 0 ) _return_type = T_CHAR;
-              _index++; size = T_CHAR_size   ; break;
-    case 'D': do_double(); if (_parameter_index < 0 ) _return_type = T_DOUBLE;
-              _index++; size = T_DOUBLE_size ; break;
-    case 'F': do_float (); if (_parameter_index < 0 ) _return_type = T_FLOAT;
-              _index++; size = T_FLOAT_size  ; break;
-    case 'I': do_int   (); if (_parameter_index < 0 ) _return_type = T_INT;
-              _index++; size = T_INT_size    ; break;
-    case 'J': do_long  (); if (_parameter_index < 0 ) _return_type = T_LONG;
-              _index++; size = T_LONG_size   ; break;
-    case 'S': do_short (); if (_parameter_index < 0 ) _return_type = T_SHORT;
-              _index++; size = T_SHORT_size  ; break;
-    case 'Z': do_bool  (); if (_parameter_index < 0 ) _return_type = T_BOOLEAN;
-              _index++; size = T_BOOLEAN_size; break;
-    case 'V': do_void  (); if (_parameter_index < 0 ) _return_type = T_VOID;
-              _index++; size = T_VOID_size;  ; break;
-    case 'L':
+    case JVM_SIGNATURE_BYTE:    do_byte(); if (_parameter_index < 0 ) _return_type = T_BYTE;
+                                  _index++; size = T_BYTE_size; break;
+    case JVM_SIGNATURE_CHAR:    do_char(); if (_parameter_index < 0 ) _return_type = T_CHAR;
+                                  _index++; size = T_CHAR_size; break;
+    case JVM_SIGNATURE_DOUBLE:  do_double(); if (_parameter_index < 0 ) _return_type = T_DOUBLE;
+                                  _index++; size = T_DOUBLE_size; break;
+    case JVM_SIGNATURE_FLOAT:   do_float(); if (_parameter_index < 0 ) _return_type = T_FLOAT;
+                                  _index++; size = T_FLOAT_size; break;
+    case JVM_SIGNATURE_INT:     do_int(); if (_parameter_index < 0 ) _return_type = T_INT;
+                                  _index++; size = T_INT_size; break;
+    case JVM_SIGNATURE_LONG:    do_long(); if (_parameter_index < 0 ) _return_type = T_LONG;
+                                  _index++; size = T_LONG_size; break;
+    case JVM_SIGNATURE_SHORT:   do_short(); if (_parameter_index < 0 ) _return_type = T_SHORT;
+                                  _index++; size = T_SHORT_size; break;
+    case JVM_SIGNATURE_BOOLEAN: do_bool(); if (_parameter_index < 0 ) _return_type = T_BOOLEAN;
+                                  _index++; size = T_BOOLEAN_size; break;
+    case JVM_SIGNATURE_VOID:    do_void(); if (_parameter_index < 0 ) _return_type = T_VOID;
+                                  _index++; size = T_VOID_size; break;
+    case JVM_SIGNATURE_CLASS:
       { int begin = ++_index;
         Symbol* sig = _signature;
-        while (sig->char_at(_index++) != ';') ;
+        while (sig->char_at(_index++) != JVM_SIGNATURE_ENDCLASS) ;
         do_object(begin, _index);
       }
       if (_parameter_index < 0 ) _return_type = T_OBJECT;
       size = T_OBJECT_size;
       break;
-    case '[':
+    case JVM_SIGNATURE_ARRAY:
       { int begin = ++_index;
         Symbol* sig = _signature;
-        while (sig->char_at(_index) == '[') {
+        while (sig->char_at(_index) == JVM_SIGNATURE_ARRAY) {
           _index++;
         }
-        if (sig->char_at(_index) == 'L') {
-          while (sig->char_at(_index++) != ';') ;
+        if (sig->char_at(_index) == JVM_SIGNATURE_CLASS) {
+          while (sig->char_at(_index++) != JVM_SIGNATURE_ENDCLASS) ;
         } else {
           _index++;
         }
@@ -128,9 +128,9 @@
   // Parse parameters
   _index = 0;
   _parameter_index = 0;
-  expect('(');
-  while (_signature->char_at(_index) != ')') _parameter_index += parse_type();
-  expect(')');
+  expect(JVM_SIGNATURE_FUNC);
+  while (_signature->char_at(_index) != JVM_SIGNATURE_ENDFUNC) _parameter_index += parse_type();
+  expect(JVM_SIGNATURE_ENDFUNC);
   _parameter_index = 0;
 }
 
@@ -202,36 +202,36 @@
 void SignatureIterator::iterate_returntype() {
   // Ignore parameters
   _index = 0;
-  expect('(');
+  expect(JVM_SIGNATURE_FUNC);
   Symbol* sig = _signature;
   // Need to skip over each type in the signature's argument list until a
   // closing ')' is found., then get the return type.  We cannot just scan
   // for the first ')' because ')' is a legal character in a type name.
-  while (sig->char_at(_index) != ')') {
+  while (sig->char_at(_index) != JVM_SIGNATURE_ENDFUNC) {
     switch(sig->char_at(_index)) {
-      case 'B':
-      case 'C':
-      case 'D':
-      case 'F':
-      case 'I':
-      case 'J':
-      case 'S':
-      case 'Z':
-      case 'V':
+      case JVM_SIGNATURE_BYTE:
+      case JVM_SIGNATURE_CHAR:
+      case JVM_SIGNATURE_DOUBLE:
+      case JVM_SIGNATURE_FLOAT:
+      case JVM_SIGNATURE_INT:
+      case JVM_SIGNATURE_LONG:
+      case JVM_SIGNATURE_SHORT:
+      case JVM_SIGNATURE_BOOLEAN:
+      case JVM_SIGNATURE_VOID:
         {
           _index++;
         }
         break;
-      case 'L':
+      case JVM_SIGNATURE_CLASS:
         {
-          while (sig->char_at(_index++) != ';') ;
+          while (sig->char_at(_index++) != JVM_SIGNATURE_ENDCLASS) ;
         }
         break;
-      case '[':
+      case JVM_SIGNATURE_ARRAY:
         {
-          while (sig->char_at(++_index) == '[') ;
-          if (sig->char_at(_index) == 'L') {
-            while (sig->char_at(_index++) != ';') ;
+          while (sig->char_at(++_index) == JVM_SIGNATURE_ARRAY) ;
+          if (sig->char_at(_index) == JVM_SIGNATURE_CLASS) {
+            while (sig->char_at(_index++) != JVM_SIGNATURE_ENDCLASS) ;
           } else {
             _index++;
           }
@@ -242,7 +242,7 @@
         break;
     }
   }
-  expect(')');
+  expect(JVM_SIGNATURE_ENDFUNC);
   // Parse return type
   _parameter_index = -1;
   parse_type();
@@ -255,9 +255,9 @@
   // Parse parameters
   _parameter_index = 0;
   _index = 0;
-  expect('(');
-  while (_signature->char_at(_index) != ')') _parameter_index += parse_type();
-  expect(')');
+  expect(JVM_SIGNATURE_FUNC);
+  while (_signature->char_at(_index) != JVM_SIGNATURE_ENDFUNC) _parameter_index += parse_type();
+  expect(JVM_SIGNATURE_ENDFUNC);
   // Parse return type
   _parameter_index = -1;
   parse_type();
@@ -289,39 +289,39 @@
 
 void SignatureStream::next_non_primitive(int t) {
   switch (t) {
-    case 'L': {
+    case JVM_SIGNATURE_CLASS: {
       _type = T_OBJECT;
       Symbol* sig = _signature;
-      while (sig->char_at(_end++) != ';');
+      while (sig->char_at(_end++) != JVM_SIGNATURE_ENDCLASS);
       break;
     }
-    case '[': {
+    case JVM_SIGNATURE_ARRAY: {
       _type = T_ARRAY;
       Symbol* sig = _signature;
       char c = sig->char_at(_end);
       while ('0' <= c && c <= '9') c = sig->char_at(_end++);
-      while (sig->char_at(_end) == '[') {
+      while (sig->char_at(_end) == JVM_SIGNATURE_ARRAY) {
         _end++;
         c = sig->char_at(_end);
         while ('0' <= c && c <= '9') c = sig->char_at(_end++);
       }
       switch(sig->char_at(_end)) {
-        case 'B':
-        case 'C':
-        case 'D':
-        case 'F':
-        case 'I':
-        case 'J':
-        case 'S':
-        case 'Z':_end++; break;
+        case JVM_SIGNATURE_BYTE:
+        case JVM_SIGNATURE_CHAR:
+        case JVM_SIGNATURE_DOUBLE:
+        case JVM_SIGNATURE_FLOAT:
+        case JVM_SIGNATURE_INT:
+        case JVM_SIGNATURE_LONG:
+        case JVM_SIGNATURE_SHORT:
+        case JVM_SIGNATURE_BOOLEAN:_end++; break;
         default: {
-          while (sig->char_at(_end++) != ';');
+          while (sig->char_at(_end++) != JVM_SIGNATURE_ENDCLASS);
           break;
         }
       }
       break;
     }
-    case ')': _end++; next(); _at_return_type = true; break;
+    case JVM_SIGNATURE_ENDFUNC: _end++; next(); _at_return_type = true; break;
     default : ShouldNotReachHere();
   }
 }
@@ -341,8 +341,8 @@
   int begin = _begin;
   int end   = _end;
 
-  if (   _signature->char_at(_begin) == 'L'
-      && _signature->char_at(_end-1) == ';') {
+  if (   _signature->char_at(_begin) == JVM_SIGNATURE_CLASS
+      && _signature->char_at(_end-1) == JVM_SIGNATURE_ENDCLASS) {
     begin++;
     end--;
   }
@@ -407,8 +407,8 @@
   int begin = _begin;
   int end   = _end;
 
-  if (   _signature->char_at(_begin) == 'L'
-      && _signature->char_at(_end-1) == ';') {
+  if (   _signature->char_at(_begin) == JVM_SIGNATURE_CLASS
+      && _signature->char_at(_end-1) == JVM_SIGNATURE_ENDCLASS) {
     begin++;
     end--;
   }
@@ -436,9 +436,9 @@
   const char* method_sig = (const char*)sig->bytes();
   ssize_t len = sig->utf8_length();
   ssize_t index = 0;
-  if (method_sig != NULL && len > 1 && method_sig[index] == '(') {
+  if (method_sig != NULL && len > 1 && method_sig[index] == JVM_SIGNATURE_FUNC) {
     ++index;
-    while (index < len && method_sig[index] != ')') {
+    while (index < len && method_sig[index] != JVM_SIGNATURE_ENDFUNC) {
       ssize_t res = is_valid_type(&method_sig[index], len - index);
       if (res == -1) {
         return false;
@@ -446,7 +446,7 @@
         index += res;
       }
     }
-    if (index < len && method_sig[index] == ')') {
+    if (index < len && method_sig[index] == JVM_SIGNATURE_ENDFUNC) {
       // check the return type
       ++index;
       return (is_valid_type(&method_sig[index], len - index) == (len - index));
@@ -469,21 +469,28 @@
   ssize_t index = 0;
 
   // Iterate over any number of array dimensions
-  while (index < limit && type[index] == '[') ++index;
+  while (index < limit && type[index] == JVM_SIGNATURE_ARRAY) ++index;
   if (index >= limit) {
     return -1;
   }
   switch (type[index]) {
-    case 'B': case 'C': case 'D': case 'F': case 'I':
-    case 'J': case 'S': case 'Z': case 'V':
+    case JVM_SIGNATURE_BYTE:
+    case JVM_SIGNATURE_CHAR:
+    case JVM_SIGNATURE_DOUBLE:
+    case JVM_SIGNATURE_FLOAT:
+    case JVM_SIGNATURE_INT:
+    case JVM_SIGNATURE_LONG:
+    case JVM_SIGNATURE_SHORT:
+    case JVM_SIGNATURE_BOOLEAN:
+    case JVM_SIGNATURE_VOID:
       return index + 1;
-    case 'L':
+    case JVM_SIGNATURE_CLASS:
       for (index = index + 1; index < limit; ++index) {
         char c = type[index];
         switch (c) {
-          case ';':
+          case JVM_SIGNATURE_ENDCLASS:
             return index + 1;
-          case '\0': case '.': case '[':
+          case '\0': case JVM_SIGNATURE_DOT: case JVM_SIGNATURE_ARRAY:
             return -1;
           default: ; // fall through
         }
--- a/src/hotspot/share/runtime/signature.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/runtime/signature.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -379,15 +379,15 @@
     _begin = _end;
     int t = sig->char_at(_begin);
     switch (t) {
-      case 'B': _type = T_BYTE;    break;
-      case 'C': _type = T_CHAR;    break;
-      case 'D': _type = T_DOUBLE;  break;
-      case 'F': _type = T_FLOAT;   break;
-      case 'I': _type = T_INT;     break;
-      case 'J': _type = T_LONG;    break;
-      case 'S': _type = T_SHORT;   break;
-      case 'Z': _type = T_BOOLEAN; break;
-      case 'V': _type = T_VOID;    break;
+      case JVM_SIGNATURE_BYTE:    _type = T_BYTE;    break;
+      case JVM_SIGNATURE_CHAR:    _type = T_CHAR;    break;
+      case JVM_SIGNATURE_DOUBLE:  _type = T_DOUBLE;  break;
+      case JVM_SIGNATURE_FLOAT:   _type = T_FLOAT;   break;
+      case JVM_SIGNATURE_INT:     _type = T_INT;     break;
+      case JVM_SIGNATURE_LONG:    _type = T_LONG;    break;
+      case JVM_SIGNATURE_SHORT:   _type = T_SHORT;   break;
+      case JVM_SIGNATURE_BOOLEAN: _type = T_BOOLEAN; break;
+      case JVM_SIGNATURE_VOID:    _type = T_VOID;    break;
       default : next_non_primitive(t);
                 return;
     }
--- a/src/hotspot/share/runtime/threadSMR.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/runtime/threadSMR.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -528,6 +528,22 @@
     return;
   }
 
+  if ( _thread == VM_Exit::shutdown_thread()) {
+    // The shutdown thread has removed itself from the Threads
+    // list and is safe to have a waiver from this check because
+    // VM_Exit::_shutdown_thread is not set until after the VMThread
+    // has started the final safepoint which holds the Threads_lock
+    // for the remainder of the VM's life.
+    return;
+  }
+
+  if (VMError::is_error_reported() &&
+      VMError::get_first_error_tid() == os::current_thread_id()) {
+    // If there is an error reported by this thread it may use ThreadsList even
+    // if it's unsafe.
+    return;
+  }
+
   // The closure will attempt to verify that the calling thread can
   // be found by threads_do() on the specified ThreadsList. If it
   // is successful, then the specified ThreadsList was acquired as
@@ -540,12 +556,6 @@
   // ThreadsList is not a stable hazard ptr and can be freed by
   // another thread from the to-be-deleted list at any time.
   //
-  // Note: The shutdown thread has removed itself from the Threads
-  // list and is safe to have a waiver from this check because
-  // VM_Exit::_shutdown_thread is not set until after the VMThread
-  // has started the final safepoint which holds the Threads_lock
-  // for the remainder of the VM's life.
-  //
   VerifyHazardPtrThreadClosure cl(_thread);
   ThreadsSMRSupport::threads_do(&cl, _list);
 
@@ -555,7 +565,7 @@
   // In either case, we won't get past this point with a badly placed
   // ThreadsListHandle.
 
-  assert(cl.found() || _thread == VM_Exit::shutdown_thread(), "Acquired a ThreadsList snapshot from a thread not recognized by the Thread-SMR protocol.");
+  assert(cl.found(), "Acquired a ThreadsList snapshot from a thread not recognized by the Thread-SMR protocol.");
 #endif
 }
 
--- a/src/hotspot/share/services/diagnosticArgument.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/services/diagnosticArgument.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -151,7 +151,13 @@
       ResourceMark rm;
 
       char* buf = NEW_RESOURCE_ARRAY(char, len + 1);
+
+PRAGMA_DIAG_PUSH
+PRAGMA_STRINGOP_TRUNCATION_IGNORED
+      // This code can incorrectly cause a "stringop-truncation" warning with gcc
       strncpy(buf, str, len);
+PRAGMA_DIAG_POP
+
       buf[len] = '\0';
       Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbols::java_lang_IllegalArgumentException(),
         "Boolean parsing error in command argument '%s'. Could not parse: %s.\n", _name, buf);
--- a/src/hotspot/share/utilities/compilerWarnings.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/utilities/compilerWarnings.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -62,4 +62,8 @@
 #define PRAGMA_FORMAT_IGNORED
 #endif
 
+#ifndef PRAGMA_STRINGOP_TRUNCATION_IGNORED
+#define PRAGMA_STRINGOP_TRUNCATION_IGNORED
+#endif
+
 #endif // SHARE_UTILITIES_COMPILERWARNINGS_HPP
--- a/src/hotspot/share/utilities/compilerWarnings_gcc.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/utilities/compilerWarnings_gcc.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -50,6 +50,12 @@
 
 #define PRAGMA_FORMAT_IGNORED PRAGMA_DISABLE_GCC_WARNING("-Wformat")
 
+// Disable -Wstringop-truncation which is introduced in GCC 8.
+// https://gcc.gnu.org/gcc-8/changes.html
+#if !defined(__clang_major__) && (__GNUC__ >= 8)
+#define PRAGMA_STRINGOP_TRUNCATION_IGNORED PRAGMA_DISABLE_GCC_WARNING("-Wstringop-truncation")
+#endif
+
 #if defined(__clang_major__) && \
       (__clang_major__ >= 4 || \
       (__clang_major__ >= 3 && __clang_minor__ >= 1)) || \
--- a/src/hotspot/share/utilities/decoder.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/utilities/decoder.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -84,7 +84,7 @@
 }
 
 bool Decoder::decode(address addr, char* buf, int buflen, int* offset, const char* modulepath, bool demangle) {
-  bool error_handling_thread = os::current_thread_id() == VMError::first_error_tid;
+  bool error_handling_thread = os::current_thread_id() == VMError::get_first_error_tid();
   if (error_handling_thread) {
     return get_error_handler_instance()->decode(addr, buf, buflen, offset, modulepath, demangle);
   } else {
@@ -95,7 +95,7 @@
 }
 
 bool Decoder::decode(address addr, char* buf, int buflen, int* offset, const void* base) {
-  bool error_handling_thread = os::current_thread_id() == VMError::first_error_tid;
+  bool error_handling_thread = os::current_thread_id() == VMError::get_first_error_tid();
   if (error_handling_thread) {
     return get_error_handler_instance()->decode(addr, buf, buflen, offset, base);
   } else {
@@ -106,7 +106,7 @@
 
 
 bool Decoder::demangle(const char* symbol, char* buf, int buflen) {
-  bool error_handling_thread = os::current_thread_id() == VMError::first_error_tid;
+  bool error_handling_thread = os::current_thread_id() == VMError::get_first_error_tid();
   if (error_handling_thread) {
     return get_error_handler_instance()->demangle(symbol, buf, buflen);
   } else {
--- a/src/hotspot/share/utilities/globalDefinitions.cpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/utilities/globalDefinitions.cpp	Tue Oct 22 20:49:40 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -178,7 +178,16 @@
 
 
 // Map BasicType to signature character
-char type2char_tab[T_CONFLICT+1]={ 0, 0, 0, 0, 'Z', 'C', 'F', 'D', 'B', 'S', 'I', 'J', 'L', '[', 'V', 0, 0, 0, 0, 0};
+char type2char_tab[T_CONFLICT+1] = {
+  0, 0, 0, 0,
+  JVM_SIGNATURE_BOOLEAN, JVM_SIGNATURE_CHAR,
+  JVM_SIGNATURE_FLOAT,   JVM_SIGNATURE_DOUBLE,
+  JVM_SIGNATURE_BYTE,    JVM_SIGNATURE_SHORT,
+  JVM_SIGNATURE_INT,     JVM_SIGNATURE_LONG,
+  JVM_SIGNATURE_CLASS,   JVM_SIGNATURE_ARRAY,
+  JVM_SIGNATURE_VOID,    0,
+  0, 0, 0, 0
+};
 
 // Map BasicType to Java type name
 const char* type2name_tab[T_CONFLICT+1] = {
--- a/src/hotspot/share/utilities/globalDefinitions.hpp	Tue Oct 22 16:42:23 2019 -0400
+++ b/src/hotspot/share/utilities/globalDefinitions.hpp	Tue Oct 22 20:49:40 2019 -0400
@@ -29,6 +29,9 @@
 #include "utilities/debug.hpp"
 #include "utilities/macros.hpp"
 
+// Get constants like JVM_T_CHAR and JVM_SIGNATURE_INT, before pulling in <jvm.h>.