changeset 55906:c0ea946e9316 patterns-stage-1

Merging recent default branch changes to the patterns-stage-1 branch
author jlahoda
date Tue, 30 Apr 2019 11:07:58 +0200
parents 97581c10465c 7689e1cc56fe
children 01c894e7fb2c 9bc584257b20 baed7f018b5e
files src/jdk.accessibility/windows/native/common/AccessBridgeStatusWindow.RC src/jdk.compiler/share/classes/com/sun/tools/javac/code/Flags.java src/jdk.compiler/share/classes/com/sun/tools/javac/comp/LambdaToMethod.java src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Lower.java src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/Gen.java src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/Pool.java src/jdk.compiler/share/classes/com/sun/tools/javac/resources/compiler.properties src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/AbstractModuleIndexWriter.java src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/AbstractPackageIndexWriter.java src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/jquery/external/jquery/jquery.js src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/jquery/images/ui-bg_glass_55_fbf9ee_1x400.png src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/jquery/images/ui-bg_glass_65_dadada_1x400.png src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/jquery/images/ui-bg_glass_75_dadada_1x400.png src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/jquery/images/ui-bg_glass_75_e6e6e6_1x400.png src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/jquery/images/ui-bg_glass_95_fef1ec_1x400.png src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/jquery/images/ui-bg_highlight-soft_75_cccccc_1x100.png src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/jquery/images/ui-icons_222222_256x240.png src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/jquery/images/ui-icons_2e83ff_256x240.png src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/jquery/images/ui-icons_454545_256x240.png src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/jquery/images/ui-icons_888888_256x240.png src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/jquery/images/ui-icons_cd0a0a_256x240.png src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/jquery/jquery-3.3.1.js src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/jquery/jquery-migrate-3.0.1.js src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/jquery/jquery-ui.css src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/jquery/jquery-ui.js src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/jquery/jquery-ui.min.css src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/jquery/jquery-ui.min.js src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/jquery/jquery-ui.structure.css src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/jquery/jquery-ui.structure.min.css src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/jquery/jszip-utils/dist/jszip-utils-ie.js src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/jquery/jszip-utils/dist/jszip-utils-ie.min.js src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/jquery/jszip-utils/dist/jszip-utils.js src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/jquery/jszip-utils/dist/jszip-utils.min.js src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/jquery/jszip/dist/jszip.js src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/jquery/jszip/dist/jszip.min.js test/hotspot/jtreg/runtime/ErrorHandling/ExplicitArithmeticCheck.java test/hotspot/jtreg/runtime/Thread/MonitorCacheMaybeExpand_DeadLock.java test/hotspot/jtreg/runtime/containers/cgroup/PlainRead.java test/hotspot/jtreg/runtime/containers/docker/AttemptOOM.java test/hotspot/jtreg/runtime/containers/docker/CheckContainerized.java test/hotspot/jtreg/runtime/containers/docker/DockerBasicTest.java test/hotspot/jtreg/runtime/containers/docker/HelloDocker.java test/hotspot/jtreg/runtime/containers/docker/JfrReporter.java test/hotspot/jtreg/runtime/containers/docker/PrintContainerInfo.java test/hotspot/jtreg/runtime/containers/docker/TEST.properties test/hotspot/jtreg/runtime/containers/docker/TestCPUAwareness.java test/hotspot/jtreg/runtime/containers/docker/TestCPUSets.java test/hotspot/jtreg/runtime/containers/docker/TestJFREvents.java test/hotspot/jtreg/runtime/containers/docker/TestMemoryAwareness.java test/hotspot/jtreg/runtime/containers/docker/TestMisc.java test/hotspot/jtreg/runtime/interpreter/WideStrictInline.java test/hotspot/jtreg/serviceability/dcmd/framework/TestJavaProcess.java test/jdk/sun/security/tools/jarsigner/AlgOptions.sh test/jdk/sun/security/tools/jarsigner/PercentSign.sh test/jdk/sun/security/tools/jarsigner/certpolicy.sh test/jdk/sun/security/tools/jarsigner/checkusage.sh test/jdk/sun/security/tools/jarsigner/collator.sh test/jdk/sun/security/tools/jarsigner/concise_jarsigner.sh test/jdk/sun/security/tools/jarsigner/crl.sh test/jdk/sun/security/tools/jarsigner/default_options.sh test/jdk/sun/security/tools/jarsigner/diffend.sh test/jdk/sun/security/tools/jarsigner/ec.sh test/jdk/sun/security/tools/jarsigner/emptymanifest.sh test/jdk/sun/security/tools/jarsigner/jvindex.sh test/jdk/sun/security/tools/jarsigner/nameclash.sh test/jdk/sun/security/tools/jarsigner/newsize7.sh test/jdk/sun/security/tools/jarsigner/oldsig.sh test/jdk/sun/security/tools/jarsigner/onlymanifest.sh test/jdk/sun/security/tools/jarsigner/passtype.sh test/jdk/sun/security/tools/jarsigner/samename.sh test/jdk/sun/security/tools/jarsigner/weaksize.sh test/jdk/sun/security/tools/keytool/CloneKeyAskPassword.sh test/jdk/sun/security/tools/keytool/NoExtNPE.sh test/jdk/sun/security/tools/keytool/SecretKeyKS.sh test/jdk/sun/security/tools/keytool/StandardAlgName.sh test/jdk/sun/security/tools/keytool/StorePasswordsByShell.sh test/jdk/sun/security/tools/keytool/default_options.sh test/jdk/sun/security/tools/keytool/emptysubject.sh test/jdk/sun/security/tools/keytool/file-in-help.sh test/jdk/sun/security/tools/keytool/i18n.sh test/jdk/sun/security/tools/keytool/importreadall.sh test/jdk/sun/security/tools/keytool/keyalg.sh test/jdk/sun/security/tools/keytool/newhelp.sh test/jdk/sun/security/tools/keytool/resource.sh test/jdk/sun/security/tools/keytool/selfissued.sh test/jdk/sun/security/tools/keytool/trystore.sh
diffstat 650 files changed, 54887 insertions(+), 49056 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Mon Apr 08 13:41:48 2019 +0200
+++ b/.hgtags	Tue Apr 30 11:07:58 2019 +0200
@@ -553,3 +553,4 @@
 83cace4142c8563b6a921787db02388e1bc48d01 jdk-13+13
 46cf212cdccaf4fb064d913b12004007d3322b67 jdk-13+14
 f855ec13aa2501ae184c8b3e0626a8cec9966116 jdk-13+15
+9d0ae9508d5337b0dc7cc4684be42888c4023755 jdk-13+16
--- a/doc/testing.html	Mon Apr 08 13:41:48 2019 +0200
+++ b/doc/testing.html	Tue Apr 30 11:07:58 2019 +0200
@@ -1,19 +1,24 @@
 <!DOCTYPE html>
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml" lang="" xml:lang="">
 <head>
-  <meta charset="utf-8">
-  <meta name="generator" content="pandoc">
-  <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes">
+  <meta charset="utf-8" />
+  <meta name="generator" content="pandoc" />
+  <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
   <title>Testing the JDK</title>
-  <style type="text/css">code{white-space: pre;}</style>
-  <link rel="stylesheet" href="../make/data/docs-resources/resources/jdk-default.css">
+  <style type="text/css">
+      code{white-space: pre-wrap;}
+      span.smallcaps{font-variant: small-caps;}
+      span.underline{text-decoration: underline;}
+      div.column{display: inline-block; vertical-align: top; width: 50%;}
+  </style>
+  <link rel="stylesheet" href="../make/data/docs-resources/resources/jdk-default.css" />
   <!--[if lt IE 9]>
     <script src="//cdnjs.cloudflare.com/ajax/libs/html5shiv/3.7.3/html5shiv-printshiv.min.js"></script>
   <![endif]-->
   <style type="text/css">pre, code, tt { color: #1d6ae5; }</style>
 </head>
 <body>
-<header>
+<header id="title-block-header">
 <h1 class="title">Testing the JDK</h1>
 </header>
 <nav id="TOC">
@@ -34,6 +39,9 @@
 <li><a href="#gtest-keywords">Gtest keywords</a></li>
 <li><a href="#microbenchmark-keywords">Microbenchmark keywords</a></li>
 </ul></li>
+<li><a href="#notes-for-specific-tests">Notes for Specific Tests</a><ul>
+<li><a href="#docker-tests">Docker Tests</a></li>
+</ul></li>
 </ul>
 </nav>
 <h2 id="using-make-test-the-run-test-framework">Using &quot;make test&quot; (the run-test framework)</h2>
@@ -180,5 +188,11 @@
 <p>Additional VM arguments to provide to forked off VMs. Same as <code>-jvmArgs &lt;args&gt;</code></p>
 <h4 id="options-2">OPTIONS</h4>
 <p>Additional arguments to send to JMH.</p>
+<h2 id="notes-for-specific-tests">Notes for Specific Tests</h2>
+<h3 id="docker-tests">Docker Tests</h3>
+<p>Docker tests with default parameters may fail on systems with glibc versions not compatible with the one used in the default docker image (e.g., Oracle Linux 7.6 for x86). For example, they pass on Ubuntu 16.04 but fail on Ubuntu 18.04 if run like this on x86:</p>
+<pre><code>$ make test TEST=&quot;jtreg:test/hotspot/jtreg/containers/docker&quot;</code></pre>
+<p>To run these tests correctly, additional parameters for the correct docker image are required on Ubuntu 18.04 by using <code>JAVA_OPTIONS</code>.</p>
+<pre><code>$ make test TEST=&quot;jtreg:test/hotspot/jtreg/containers/docker&quot; JTREG=&quot;JAVA_OPTIONS=-Djdk.test.docker.image.name=ubuntu -Djdk.test.docker.image.version=latest&quot;</code></pre>
 </body>
 </html>
--- a/doc/testing.md	Mon Apr 08 13:41:48 2019 +0200
+++ b/doc/testing.md	Tue Apr 30 11:07:58 2019 +0200
@@ -373,6 +373,21 @@
 #### OPTIONS
 Additional arguments to send to JMH.
 
+## Notes for Specific Tests
+
+### Docker Tests
+
+Docker tests with default parameters may fail on systems with glibc versions not
+compatible with the one used in the default docker image (e.g., Oracle Linux 7.6 for x86).
+For example, they pass on Ubuntu 16.04 but fail on Ubuntu 18.04 if run like this on x86:
+
+    $ make test TEST="jtreg:test/hotspot/jtreg/containers/docker"
+
+To run these tests correctly, additional parameters for the correct docker image are
+required on Ubuntu 18.04 by using `JAVA_OPTIONS`.
+
+    $ make test TEST="jtreg:test/hotspot/jtreg/containers/docker" JTREG="JAVA_OPTIONS=-Djdk.test.docker.image.name=ubuntu -Djdk.test.docker.image.version=latest"
+
 ---
 # Override some definitions in the global css file that are not optimal for
 # this document.
--- a/make/Coverage.gmk	Mon Apr 08 13:41:48 2019 +0200
+++ b/make/Coverage.gmk	Tue Apr 30 11:07:58 2019 +0200
@@ -49,6 +49,7 @@
 	    -exclude 'java.lang.Object' \
 	    -exclude 'jdk.internal.org.objectweb.**' \
 	    -exclude jdk.test.Main -exclude '**\$Proxy*' \
+	    $(JCOV_FILTERS) \
 	    $(JCOV_TEMP)/$(JCOV_IMAGE_SUBDIR)
 	$(MV) $(JCOV_TEMP)/$(JCOV_IMAGE_SUBDIR) $(JCOV_IMAGE_DIR)
 	$(RMDIR) $(JCOV_TEMP)
--- a/make/Init.gmk	Mon Apr 08 13:41:48 2019 +0200
+++ b/make/Init.gmk	Tue Apr 30 11:07:58 2019 +0200
@@ -279,7 +279,7 @@
         else
 	  $(ECHO) "Re-running configure using default settings"
         endif
-	( cd $(OUTPUTDIR) && PATH="$(ORIGINAL_PATH)" AUTOCONF="$(AUTOCONF)" \
+	( cd $(CONFIGURE_START_DIR) && PATH="$(ORIGINAL_PATH)" AUTOCONF="$(AUTOCONF)" \
 	    CUSTOM_ROOT="$(CUSTOM_ROOT)" \
 	    CUSTOM_CONFIG_DIR="$(CUSTOM_CONFIG_DIR)" \
 	    $(BASH) $(TOPDIR)/configure $(CONFIGURE_COMMAND_LINE) )
--- a/make/Main.gmk	Mon Apr 08 13:41:48 2019 +0200
+++ b/make/Main.gmk	Tue Apr 30 11:07:58 2019 +0200
@@ -335,6 +335,7 @@
 bootcycle-images:
         ifneq ($(COMPILE_TYPE), cross)
 	  $(call LogWarn, Boot cycle build step 2: Building a new JDK image using previously built image)
+	  $(call MakeDir, $(OUTPUTDIR)/bootcycle-build)
 	  +$(MAKE) $(MAKE_ARGS) -f $(TOPDIR)/make/Init.gmk PARALLEL_TARGETS=$(BOOTCYCLE_TARGET) \
 	      JOBS= SPEC=$(dir $(SPEC))bootcycle-spec.gmk main
         else
--- a/make/RunTests.gmk	Mon Apr 08 13:41:48 2019 +0200
+++ b/make/RunTests.gmk	Tue Apr 30 11:07:58 2019 +0200
@@ -1194,10 +1194,18 @@
 	$(call LogWarn, Stopping JCov Grabber...)
 	$(JAVA) -jar $(JCOV_HOME)/lib/jcov.jar GrabberManager -stop -stoptimeout 3600
 
+  JCOV_REPORT_TITLE := JDK code coverage report<br/>
+  ifneq ($(JCOV_FILTERS), )
+    JCOV_REPORT_TITLE += Code filters: $(JCOV_FILTERS)<br>
+  endif
+  JCOV_REPORT_TITLE += Tests: $(TEST)
+	
   jcov-gen-report: jcov-stop-grabber
 	$(call LogWarn, Generating JCov report ...)
 	$(JAVA) -Xmx4g -jar $(JCOV_HOME)/lib/jcov.jar RepGen -sourcepath \
 	    `$(ECHO) $(TOPDIR)/src/*/share/classes/ | $(TR) ' ' ':'` -fmt html \
+	    $(JCOV_FILTERS) \
+	    -mainReportTitle "$(JCOV_REPORT_TITLE)" \
 	    -o $(JCOV_REPORT) $(JCOV_RESULT_FILE)
 
   TARGETS += jcov-do-start-grabber jcov-start-grabber jcov-stop-grabber \
--- a/make/autoconf/basics.m4	Mon Apr 08 13:41:48 2019 +0200
+++ b/make/autoconf/basics.m4	Tue Apr 30 11:07:58 2019 +0200
@@ -627,7 +627,7 @@
 AC_DEFUN_ONCE([BASIC_SETUP_PATHS],
 [
   # Save the current directory this script was started from
-  CURDIR="$PWD"
+  CONFIGURE_START_DIR="$PWD"
 
   # We might need to rewrite ORIGINAL_PATH, if it includes "#", to quote them
   # for make. We couldn't do this when we retrieved ORIGINAL_PATH, since SED
@@ -653,9 +653,10 @@
   AC_MSG_CHECKING([for top-level directory])
   AC_MSG_RESULT([$TOPDIR])
   AC_SUBST(TOPDIR)
+  AC_SUBST(CONFIGURE_START_DIR)
 
   # We can only call BASIC_FIXUP_PATH after BASIC_CHECK_PATHS_WINDOWS.
-  BASIC_FIXUP_PATH(CURDIR)
+  BASIC_FIXUP_PATH(CONFIGURE_START_DIR)
   BASIC_FIXUP_PATH(TOPDIR)
 
   # Locate the directory of this script.
@@ -868,9 +869,10 @@
 
   # Test from where we are running configure, in or outside of src root.
   AC_MSG_CHECKING([where to store configuration])
-  if test "x$CURDIR" = "x$TOPDIR" || test "x$CURDIR" = "x$CUSTOM_ROOT" \
-      || test "x$CURDIR" = "x$TOPDIR/make/autoconf" \
-      || test "x$CURDIR" = "x$TOPDIR/make" ; then
+  if test "x$CONFIGURE_START_DIR" = "x$TOPDIR" \
+      || test "x$CONFIGURE_START_DIR" = "x$CUSTOM_ROOT" \
+      || test "x$CONFIGURE_START_DIR" = "x$TOPDIR/make/autoconf" \
+      || test "x$CONFIGURE_START_DIR" = "x$TOPDIR/make" ; then
     # We are running configure from the src root.
     # Create a default ./build/target-variant-debuglevel output root.
     if test "x${CONF_NAME}" = x; then
@@ -881,10 +883,11 @@
     fi
 
     if test "x$CUSTOM_ROOT" != x; then
-      OUTPUTDIR="${CUSTOM_ROOT}/build/${CONF_NAME}"
+      WORKSPACE_ROOT="${CUSTOM_ROOT}"
     else
-      OUTPUTDIR="${TOPDIR}/build/${CONF_NAME}"
+      WORKSPACE_ROOT="${TOPDIR}"
     fi
+    OUTPUTDIR="${WORKSPACE_ROOT}/build/${CONF_NAME}"
     $MKDIR -p "$OUTPUTDIR"
     if test ! -d "$OUTPUTDIR"; then
       AC_MSG_ERROR([Could not create build directory $OUTPUTDIR])
@@ -895,9 +898,9 @@
     # If configuration is situated in normal build directory, just use the build
     # directory name as configuration name, otherwise use the complete path.
     if test "x${CONF_NAME}" = x; then
-      CONF_NAME=`$ECHO $CURDIR | $SED -e "s!^${TOPDIR}/build/!!"`
+      CONF_NAME=`$ECHO $CONFIGURE_START_DIR | $SED -e "s!^${TOPDIR}/build/!!"`
     fi
-    OUTPUTDIR="$CURDIR"
+    OUTPUTDIR="$CONFIGURE_START_DIR"
     AC_MSG_RESULT([in current directory])
 
     # WARNING: This might be a bad thing to do. You need to be sure you want to
@@ -917,14 +920,14 @@
               -e 's/ //g' \
           | $TR -d '\n'`
       if test "x$filtered_files" != x; then
-        AC_MSG_NOTICE([Current directory is $CURDIR.])
+        AC_MSG_NOTICE([Current directory is $CONFIGURE_START_DIR.])
         AC_MSG_NOTICE([Since this is not the source root, configure will output the configuration here])
         AC_MSG_NOTICE([(as opposed to creating a configuration in <src_root>/build/<conf-name>).])
         AC_MSG_NOTICE([However, this directory is not empty. This is not allowed, since it could])
         AC_MSG_NOTICE([seriously mess up just about everything.])
         AC_MSG_NOTICE([Try 'cd $TOPDIR' and restart configure])
         AC_MSG_NOTICE([(or create a new empty directory and cd to it).])
-        AC_MSG_ERROR([Will not continue creating configuration in $CURDIR])
+        AC_MSG_ERROR([Will not continue creating configuration in $CONFIGURE_START_DIR])
       fi
     fi
   fi
@@ -940,6 +943,7 @@
   AC_SUBST(SPEC)
   AC_SUBST(CONF_NAME)
   AC_SUBST(OUTPUTDIR)
+  AC_SUBST(WORKSPACE_ROOT)
   AC_SUBST(CONFIGURESUPPORT_OUTPUTDIR)
 
   # The spec.gmk file contains all variables for the make system.
--- a/make/autoconf/basics_windows.m4	Mon Apr 08 13:41:48 2019 +0200
+++ b/make/autoconf/basics_windows.m4	Tue Apr 30 11:07:58 2019 +0200
@@ -551,7 +551,7 @@
     $MKDIR -p $FIXPATH_DIR $CONFIGURESUPPORT_OUTPUTDIR/bin
     cd $FIXPATH_DIR
     $CC $FIXPATH_SRC_W -Fe$FIXPATH_BIN_W > $FIXPATH_DIR/fixpath1.log 2>&1
-    cd $CURDIR
+    cd $CONFIGURE_START_DIR
 
     if test ! -x $FIXPATH_BIN; then
       AC_MSG_RESULT([no])
@@ -574,7 +574,7 @@
     cd $FIXPATH_DIR
     $FIXPATH $CC $FIXPATH_SRC -Fe$FIXPATH_DIR/fixpath2.exe \
         > $FIXPATH_DIR/fixpath2.log 2>&1
-    cd $CURDIR
+    cd $CONFIGURE_START_DIR
     if test ! -x $FIXPATH_DIR/fixpath2.exe; then
       AC_MSG_RESULT([no])
       cat $FIXPATH_DIR/fixpath2.log
--- a/make/autoconf/flags-cflags.m4	Mon Apr 08 13:41:48 2019 +0200
+++ b/make/autoconf/flags-cflags.m4	Tue Apr 30 11:07:58 2019 +0200
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -799,15 +799,29 @@
     $1_WARNING_CFLAGS_JVM="-Wno-format-zero-length -Wtype-limits -Wuninitialized"
   fi
 
+  if test "x$TOOLCHAIN_TYPE" = xgcc || test "x$TOOLCHAIN_TYPE" = xclang; then
+    # Check if compiler supports -fmacro-prefix-map. If so, use that to make
+    # the __FILE__ macro resolve to paths relative to the workspace root.
+    workspace_root_trailing_slash="${WORKSPACE_ROOT%/}/"
+    FILE_MACRO_CFLAGS="-fmacro-prefix-map=${workspace_root_trailing_slash}="
+    FLAGS_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [${FILE_MACRO_CFLAGS}],
+        PREFIX: $3,
+        IF_FALSE: [
+            FILE_MACRO_CFLAGS=
+        ]
+    )
+  fi
+
   # EXPORT to API
   CFLAGS_JVM_COMMON="$ALWAYS_CFLAGS_JVM $ALWAYS_DEFINES_JVM \
       $TOOLCHAIN_CFLAGS_JVM ${$1_TOOLCHAIN_CFLAGS_JVM} \
       $OS_CFLAGS $OS_CFLAGS_JVM $CFLAGS_OS_DEF_JVM $DEBUG_CFLAGS_JVM \
-      $WARNING_CFLAGS $WARNING_CFLAGS_JVM $JVM_PICFLAG"
+      $WARNING_CFLAGS $WARNING_CFLAGS_JVM $JVM_PICFLAG $FILE_MACRO_CFLAGS"
 
   CFLAGS_JDK_COMMON="$ALWAYS_CFLAGS_JDK $ALWAYS_DEFINES_JDK $TOOLCHAIN_CFLAGS_JDK \
       $OS_CFLAGS $CFLAGS_OS_DEF_JDK $DEBUG_CFLAGS_JDK $DEBUG_OPTIONS_FLAGS_JDK \
-      $WARNING_CFLAGS $WARNING_CFLAGS_JDK $DEBUG_SYMBOLS_CFLAGS_JDK"
+      $WARNING_CFLAGS $WARNING_CFLAGS_JDK $DEBUG_SYMBOLS_CFLAGS_JDK \
+      $FILE_MACRO_CFLAGS"
 
   # Use ${$2EXTRA_CFLAGS} to block EXTRA_CFLAGS to be added to build flags.
   # (Currently we don't have any OPENJDK_BUILD_EXTRA_CFLAGS, but that might
--- a/make/autoconf/flags-other.m4	Mon Apr 08 13:41:48 2019 +0200
+++ b/make/autoconf/flags-other.m4	Tue Apr 30 11:07:58 2019 +0200
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -81,21 +81,21 @@
     RC_FLAGS="$RC_FLAGS \
         -D\"JDK_VERSION_STRING=\$(VERSION_STRING)\" \
         -D\"JDK_COMPANY=\$(COMPANY_NAME)\" \
-        -D\"JDK_COMPONENT=\$(PRODUCT_NAME) \$(JDK_RC_PLATFORM_NAME) binary\" \
+        -D\"JDK_FILEDESC=\$(JDK_RC_NAME) binary\" \
         -D\"JDK_VER=\$(VERSION_NUMBER)\" \
         -D\"JDK_COPYRIGHT=Copyright \xA9 $COPYRIGHT_YEAR\" \
-        -D\"JDK_NAME=\$(PRODUCT_NAME) \$(JDK_RC_PLATFORM_NAME) \$(VERSION_FEATURE)\" \
+        -D\"JDK_NAME=\$(JDK_RC_NAME) \$(VERSION_FEATURE)\" \
         -D\"JDK_FVER=\$(subst .,\$(COMMA),\$(VERSION_NUMBER_FOUR_POSITIONS))\""
 
     JVM_RCFLAGS="$JVM_RCFLAGS \
-        -D\"HS_BUILD_ID=\$(VERSION_STRING)\" \
+        -D\"HS_VERSION_STRING=\$(VERSION_STRING)\" \
         -D\"HS_COMPANY=\$(COMPANY_NAME)\" \
-        -D\"JDK_DOTVER=\$(VERSION_NUMBER_FOUR_POSITIONS)\" \
+        -D\"HS_VER=\$(VERSION_NUMBER_FOUR_POSITIONS)\" \
+        -D\"HS_INTERNAL_NAME=jvm\" \
         -D\"HS_COPYRIGHT=Copyright $COPYRIGHT_YEAR\" \
+        -D\"HS_FNAME=jvm.dll\" \
         -D\"HS_NAME=\$(PRODUCT_NAME) \$(VERSION_SHORT)\" \
-        -D\"JDK_VER=\$(subst .,\$(COMMA),\$(VERSION_NUMBER_FOUR_POSITIONS))\" \
-        -D\"HS_FNAME=jvm.dll\" \
-        -D\"HS_INTERNAL_NAME=jvm\""
+        -D\"HS_FVER=\$(subst .,\$(COMMA),\$(VERSION_NUMBER_FOUR_POSITIONS))\""
   fi
   AC_SUBST(RC_FLAGS)
   AC_SUBST(JVM_RCFLAGS)
--- a/make/autoconf/jdk-options.m4	Mon Apr 08 13:41:48 2019 +0200
+++ b/make/autoconf/jdk-options.m4	Tue Apr 30 11:07:58 2019 +0200
@@ -404,9 +404,12 @@
       [jcov library location])])
   AC_ARG_WITH(jcov-input-jdk, [AS_HELP_STRING([--with-jcov-input-jdk],
       [jdk image to instrument])])
+  AC_ARG_WITH(jcov-filters, [AS_HELP_STRING([--with-jcov-filters],
+      [filters to limit code for jcov instrumentation and report generation])])
   JCOV_HOME=
   JCOV_INPUT_JDK=
   JCOV_ENABLED=
+  JCOV_FILTERS=
   if test "x$with_jcov" = "x" ; then
     JCOV_ENABLED="false"
   else
@@ -425,10 +428,14 @@
       fi
       BASIC_FIXUP_PATH(JCOV_INPUT_JDK)
     fi
+    if test "x$with_jcov_filters" != "x" ; then
+      JCOV_FILTERS="$with_jcov_filters"
+    fi
   fi
   AC_SUBST(JCOV_ENABLED)
   AC_SUBST(JCOV_HOME)
   AC_SUBST(JCOV_INPUT_JDK)
+  AC_SUBST(JCOV_FILTERS)
 ])
 
 ###############################################################################
--- a/make/autoconf/jdk-version.m4	Mon Apr 08 13:41:48 2019 +0200
+++ b/make/autoconf/jdk-version.m4	Tue Apr 30 11:07:58 2019 +0200
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -66,9 +66,52 @@
   AC_SUBST(PRODUCT_SUFFIX)
   AC_SUBST(JDK_RC_PLATFORM_NAME)
   AC_SUBST(HOTSPOT_VM_DISTRO)
+
+  # Set the MACOSX Bundle Name base
+  AC_ARG_WITH(macosx-bundle-name-base, [AS_HELP_STRING([--with-macosx-bundle-name-base],
+      [Set the MacOSX Bundle Name base. This is the base name for calculating MacOSX Bundle Names.
+      @<:@not specified@:>@])])
+  if test "x$with_macosx_bundle_name_base" = xyes; then
+    AC_MSG_ERROR([--with-macosx-bundle-name-base must have a value])
+  elif [ ! [[ $with_macosx_bundle_name_base =~ ^[[:print:]]*$ ]] ]; then
+    AC_MSG_ERROR([--with-macosx-bundle-name-base contains non-printing characters: $with_macosx_bundle_name_base])
+  elif test "x$with_macosx_bundle_name_base" != x; then
+    # Set MACOSX_BUNDLE_NAME_BASE to the configured value.
+    MACOSX_BUNDLE_NAME_BASE="$with_macosx_bundle_name_base"
+  fi
   AC_SUBST(MACOSX_BUNDLE_NAME_BASE)
+
+  # Set the MACOSX Bundle ID base
+  AC_ARG_WITH(macosx-bundle-id-base, [AS_HELP_STRING([--with-macosx-bundle-id-base],
+      [Set the MacOSX Bundle ID base. This is the base ID for calculating MacOSX Bundle IDs.
+      @<:@not specified@:>@])])
+  if test "x$with_macosx_bundle_id_base" = xyes; then
+    AC_MSG_ERROR([--with-macosx-bundle-id-base must have a value])
+  elif [ ! [[ $with_macosx_bundle_id_base =~ ^[[:print:]]*$ ]] ]; then
+    AC_MSG_ERROR([--with-macosx-bundle-id-base contains non-printing characters: $with_macosx_bundle_id_base])
+  elif test "x$with_macosx_bundle_id_base" != x; then
+    # Set MACOSX_BUNDLE_ID_BASE to the configured value.
+    MACOSX_BUNDLE_ID_BASE="$with_macosx_bundle_id_base"
+  fi
   AC_SUBST(MACOSX_BUNDLE_ID_BASE)
 
+  # Set the JDK RC name
+  AC_ARG_WITH(jdk-rc-name, [AS_HELP_STRING([--with-jdk-rc-name],
+      [Set JDK RC name. This is used for FileDescription and ProductName properties
+       of MS Windows binaries. @<:@not specified@:>@])])
+  if test "x$with_jdk_rc_name" = xyes; then
+    AC_MSG_ERROR([--with-jdk-rc-name must have a value])
+  elif [ ! [[ $with_jdk_rc_name =~ ^[[:print:]]*$ ]] ]; then
+    AC_MSG_ERROR([--with-jdk-rc-name contains non-printing characters: $with_jdk_rc_name])
+  elif test "x$with_jdk_rc_name" != x; then
+    # Set JDK_RC_NAME to a custom value if '--with-jdk-rc-name' was used and is not empty.
+    JDK_RC_NAME="$with_jdk_rc_name"
+  else
+    # Otherwise calculate from "version-numbers" included above.
+    JDK_RC_NAME="$PRODUCT_NAME $JDK_RC_PLATFORM_NAME"
+  fi
+  AC_SUBST(JDK_RC_NAME)
+
   # The vendor name, if any
   AC_ARG_WITH(vendor-name, [AS_HELP_STRING([--with-vendor-name],
       [Set vendor name. Among others, used to set the 'java.vendor'
--- a/make/autoconf/spec.gmk.in	Mon Apr 08 13:41:48 2019 +0200
+++ b/make/autoconf/spec.gmk.in	Tue Apr 30 11:07:58 2019 +0200
@@ -32,6 +32,8 @@
 
 # The command line given to configure.
 CONFIGURE_COMMAND_LINE:=@CONFIGURE_COMMAND_LINE@
+# The current directory when configure was run
+CONFIGURE_START_DIR:=@CONFIGURE_START_DIR@
 
 # A self-referential reference to this file.
 SPEC:=@SPEC@
@@ -138,8 +140,9 @@
 
 # The top-level directory of the source repository
 TOPDIR:=@TOPDIR@
-
-
+# Usually the top level directory, but could be something else if a custom
+# root is defined.
+WORKSPACE_ROOT:=@WORKSPACE_ROOT@
 IMPORT_MODULES_CLASSES:=@IMPORT_MODULES_CLASSES@
 IMPORT_MODULES_CMDS:=@IMPORT_MODULES_CMDS@
 IMPORT_MODULES_LIBS:=@IMPORT_MODULES_LIBS@
@@ -156,6 +159,7 @@
 PRODUCT_NAME:=@PRODUCT_NAME@
 PRODUCT_SUFFIX:=@PRODUCT_SUFFIX@
 JDK_RC_PLATFORM_NAME:=@JDK_RC_PLATFORM_NAME@
+JDK_RC_NAME:=@JDK_RC_NAME@
 COMPANY_NAME:=@COMPANY_NAME@
 HOTSPOT_VM_DISTRO:=@HOTSPOT_VM_DISTRO@
 MACOSX_BUNDLE_NAME_BASE=@MACOSX_BUNDLE_NAME_BASE@
@@ -385,6 +389,7 @@
 JCOV_ENABLED=@JCOV_ENABLED@
 JCOV_HOME=@JCOV_HOME@
 JCOV_INPUT_JDK=@JCOV_INPUT_JDK@
+JCOV_FILTERS=@JCOV_FILTERS@
 
 # AddressSanitizer
 export ASAN_ENABLED:=@ASAN_ENABLED@
--- a/make/autoconf/toolchain_windows.m4	Mon Apr 08 13:41:48 2019 +0200
+++ b/make/autoconf/toolchain_windows.m4	Tue Apr 30 11:07:58 2019 +0200
@@ -472,7 +472,7 @@
       # Change directory so we don't need to mess with Windows paths in redirects.
       cd $VS_ENV_TMP_DIR
       $CMD /c extract-vs-env.bat | $CAT
-      cd $CURDIR
+      cd $CONFIGURE_START_DIR
 
       if test ! -s $VS_ENV_TMP_DIR/set-vs-env.sh; then
         AC_MSG_NOTICE([Could not succesfully extract the environment variables needed for the VS setup.])
--- a/make/common/NativeCompilation.gmk	Mon Apr 08 13:41:48 2019 +0200
+++ b/make/common/NativeCompilation.gmk	Tue Apr 30 11:07:58 2019 +0200
@@ -236,8 +236,10 @@
     # This is the definite source file to use for $1_FILENAME.
     $1_SRC_FILE := $$($1_FILE)
 
-    ifneq ($$($1_DISABLE_THIS_FILE_DEFINE), true)
-      $1_THIS_FILE = -DTHIS_FILE='"$$($1_FILENAME)"'
+    ifneq ($$($1_DEFINE_THIS_FILE), false)
+      ifneq ($$($$($1_BASE)_DEFINE_THIS_FILE), false)
+        $1_THIS_FILE = -DTHIS_FILE='"$$($1_FILENAME)"'
+      endif
     endif
 
     ifeq ($$($1_OPTIMIZATION), )
@@ -370,6 +372,7 @@
 	  $(ECHO) $$@: \\ > $$($1_DEPS_FILE) ; \
 	  $(SED) $(WINDOWS_SHOWINCLUDE_SED_PATTERN) $$($1_OBJ).log \
 	      | $(SORT) -u >> $$($1_DEPS_FILE) ; \
+	  $(ECHO) >> $$($1_DEPS_FILE) ; \
 	  $(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_DEPS_FILE) > $$($1_DEPS_TARGETS_FILE)
         endif
   endif
@@ -426,6 +429,7 @@
 #   STRIPFLAGS Optionally change the flags given to the strip command
 #   PRECOMPILED_HEADER Header file to use as precompiled header
 #   PRECOMPILED_HEADER_EXCLUDE List of source files that should not use PCH
+#   DEFINE_THIS_FILE Set to false to not set the THIS_FILE preprocessor macro
 #
 # After being called, some variables are exported from this macro, all prefixed
 # with parameter 1 followed by a '_':
@@ -703,7 +707,7 @@
             FILE := $$($1_GENERATED_PCH_SRC), \
             BASE := $1, \
             EXTRA_CXXFLAGS := -Fp$$($1_PCH_FILE) -Yc$$(notdir $$($1_PRECOMPILED_HEADER)), \
-            DISABLE_THIS_FILE_DEFINE := true, \
+            DEFINE_THIS_FILE := false, \
         ))
 
         $1_USE_PCH_FLAGS := \
@@ -840,6 +844,7 @@
 		$(ECHO) $$($1_RES): \\ > $$($1_RES_DEPS_FILE) ; \
 		$(SED) $(WINDOWS_SHOWINCLUDE_SED_PATTERN) $$($1_RES_DEPS_FILE).obj.log \
 		    >> $$($1_RES_DEPS_FILE) ; \
+		$(ECHO) >> $$($1_RES_DEPS_FILE) ;\
 		$(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_RES_DEPS_FILE) \
 		    > $$($1_RES_DEPS_TARGETS_FILE)
     endif
--- a/make/hotspot/gensrc/GensrcAdlc.gmk	Mon Apr 08 13:41:48 2019 +0200
+++ b/make/hotspot/gensrc/GensrcAdlc.gmk	Tue Apr 30 11:07:58 2019 +0200
@@ -76,6 +76,7 @@
       DEBUG_SYMBOLS := false, \
       DISABLED_WARNINGS_clang := tautological-compare, \
       DISABLED_WARNINGS_solstudio := notemsource, \
+      DEFINE_THIS_FILE := false, \
   ))
 
   ADLC_TOOL := $(BUILD_ADLC_TARGET)
--- a/make/hotspot/gensrc/GensrcDtrace.gmk	Mon Apr 08 13:41:48 2019 +0200
+++ b/make/hotspot/gensrc/GensrcDtrace.gmk	Tue Apr 30 11:07:58 2019 +0200
@@ -80,6 +80,7 @@
         EXTRA_DEPS := $(JVMTI_H) $(JFR_FILES), \
         OBJECT_DIR := $(JVM_VARIANT_OUTPUTDIR)/tools/dtrace-gen-offsets/objs, \
         OUTPUT_DIR := $(JVM_VARIANT_OUTPUTDIR)/tools/dtrace-gen-offsets, \
+        DEFINE_THIS_FILE := false, \
     ))
 
     DTRACE_GEN_OFFSETS_TOOL := $(BUILD_DTRACE_GEN_OFFSETS_TARGET)
--- a/make/hotspot/lib/CompileDtraceLibraries.gmk	Mon Apr 08 13:41:48 2019 +0200
+++ b/make/hotspot/lib/CompileDtraceLibraries.gmk	Tue Apr 30 11:07:58 2019 +0200
@@ -42,6 +42,7 @@
         LDFLAGS := -m64 -mt -xnolib $(SHARED_LIBRARY_FLAGS), \
         LIBS := $(LIBDL) -lthread -ldoor, \
         OBJECT_DIR := $(LIBJVM_DTRACE_OUTPUTDIR)/objs, \
+        DEFINE_THIS_FILE := false, \
     ))
 
     # Note that libjvm_db.c has tests for COMPILER2, but this was never set by
@@ -54,6 +55,7 @@
         CFLAGS := -I$(DTRACE_GENSRC_DIR) $(JNI_INCLUDE_FLAGS) -m64 -G -mt -KPIC -xldscope=hidden, \
         LDFLAGS := -m64 -mt -xnolib $(SHARED_LIBRARY_FLAGS), \
         OBJECT_DIR := $(LIBJVM_DB_OUTPUTDIR)/objs, \
+        DEFINE_THIS_FILE := false, \
     ))
 
     TARGETS += $(BUILD_LIBJVM_DTRACE) $(BUILD_LIBJVM_DB)
--- a/make/hotspot/lib/CompileGtest.gmk	Mon Apr 08 13:41:48 2019 +0200
+++ b/make/hotspot/lib/CompileGtest.gmk	Tue Apr 30 11:07:58 2019 +0200
@@ -92,6 +92,7 @@
     STRIP_SYMBOLS := false, \
     PRECOMPILED_HEADER := $(JVM_PRECOMPILED_HEADER), \
     PRECOMPILED_HEADER_EXCLUDE := gtest-all.cc gtestMain.cpp, \
+    DEFINE_THIS_FILE := false, \
 ))
 
 TARGETS += $(BUILD_GTEST_LIBJVM)
@@ -115,6 +116,7 @@
     LIBS_windows := $(JVM_OUTPUTDIR)/gtest/objs/jvm.lib, \
     COPY_DEBUG_SYMBOLS := $(GTEST_COPY_DEBUG_SYMBOLS), \
     ZIP_EXTERNAL_DEBUG_SYMBOLS := false, \
+    DEFINE_THIS_FILE := false, \
 ))
 
 $(BUILD_GTEST_LAUNCHER): $(BUILD_GTEST_LIBJVM)
--- a/make/hotspot/lib/CompileJvm.gmk	Mon Apr 08 13:41:48 2019 +0200
+++ b/make/hotspot/lib/CompileJvm.gmk	Tue Apr 30 11:07:58 2019 +0200
@@ -202,6 +202,7 @@
     VERSIONINFO_RESOURCE := $(TOPDIR)/src/hotspot/os/windows/version.rc, \
     PRECOMPILED_HEADER := $(JVM_PRECOMPILED_HEADER), \
     PRECOMPILED_HEADER_EXCLUDE := $(JVM_PRECOMPILED_HEADER_EXCLUDE), \
+    DEFINE_THIS_FILE := false, \
 ))
 
 # Always recompile vm_version.cpp if libjvm needs to be relinked. This ensures
--- a/make/jdk/src/classes/build/tools/classlist/HelloClasslist.java	Mon Apr 08 13:41:48 2019 +0200
+++ b/make/jdk/src/classes/build/tools/classlist/HelloClasslist.java	Tue Apr 30 11:07:58 2019 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -67,13 +67,24 @@
               .forEach(System.out::println);
 
         // Common concatenation patterns
-        String const_I = "string" + args.length;
-        String const_S = "string" + String.valueOf(args.length);
-        String S_const = String.valueOf(args.length) + "string";
-        String S_S     = String.valueOf(args.length) + String.valueOf(args.length);
-        String const_J = "string" + System.currentTimeMillis();
-        String I_const = args.length + "string";
-        String J_const = System.currentTimeMillis() + "string";
+        String SS     = String.valueOf(args.length) + String.valueOf(args.length);
+        String CS     = "string" + String.valueOf(args.length);
+        String SC     = String.valueOf(args.length) + "string";
+        String SCS    = String.valueOf(args.length) + "string" + String.valueOf(args.length);
+        String CSS    = "string" + String.valueOf(args.length) + String.valueOf(args.length);
+        String CSCS   = "string" + String.valueOf(args.length) + "string" + String.valueOf(args.length);
+        String SCSC   = String.valueOf(args.length) + "string" + String.valueOf(args.length) + "string";
+        String CSCSC  = "string" + String.valueOf(args.length) + "string" + String.valueOf(args.length) + "string";
+        String SCSCS  = String.valueOf(args.length) + "string" + String.valueOf(args.length) + "string" + String.valueOf(args.length);
+        String CI     = "string" + args.length;
+        String IC     = args.length + "string";
+        String CIC    = "string" + args.length + "string";
+        String CICI   = "string" + args.length + "string" + args.length;
+        String CJ     = "string" + System.currentTimeMillis();
+        String JC     = System.currentTimeMillis() + "string";
+        String CJC    = "string" + System.currentTimeMillis() + "string";
+        String CJCJ   = "string" + System.currentTimeMillis() + "string" + System.currentTimeMillis();
+        String CJCJC  = "string" + System.currentTimeMillis() + "string" + System.currentTimeMillis() + "string";
 
         String newDate = DateTimeFormatter.ISO_LOCAL_DATE_TIME.format(
                 LocalDateTime.now(ZoneId.of("GMT")));
--- a/make/launcher/Launcher-java.base.gmk	Mon Apr 08 13:41:48 2019 +0200
+++ b/make/launcher/Launcher-java.base.gmk	Tue Apr 30 11:07:58 2019 +0200
@@ -39,7 +39,6 @@
 # overwritten.
 $(eval $(call SetupBuildLauncher, java, \
     CFLAGS := -DEXPAND_CLASSPATH_WILDCARDS -DENABLE_ARG_FILES, \
-    LDFLAGS_solaris := -R$(OPENWIN_HOME)/lib$(OPENJDK_TARGET_CPU_ISADIR), \
     EXTRA_RC_FLAGS := $(JAVA_RC_FLAGS), \
     VERSION_INFO_RESOURCE := $(JAVA_VERSION_INFO_RESOURCE), \
     OUTPUT_DIR := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/java_objs, \
--- a/make/launcher/Launcher-jdk.accessibility.gmk	Mon Apr 08 13:41:48 2019 +0200
+++ b/make/launcher/Launcher-jdk.accessibility.gmk	Tue Apr 30 11:07:58 2019 +0200
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -43,7 +43,7 @@
       DISABLED_WARNINGS_microsoft := 4267 4996, \
       LDFLAGS := $(LDFLAGS_JDKEXE), \
       LIBS := advapi32.lib version.lib user32.lib, \
-      VERSIONINFO_RESOURCE := $(ACCESSBRIDGE_SRC)/AccessBridgeStatusWindow.RC, \
+      VERSIONINFO_RESOURCE := $(ACCESSBRIDGE_SRC)/AccessBridgeStatusWindow.rc, \
       MANIFEST := $(JABSWITCH_SRC)/jabswitch.manifest, \
       MANIFEST_VERSION := $(VERSION_NUMBER_FOUR_POSITIONS), \
   ))
--- a/make/launcher/LauncherCommon.gmk	Mon Apr 08 13:41:48 2019 +0200
+++ b/make/launcher/LauncherCommon.gmk	Tue Apr 30 11:07:58 2019 +0200
@@ -73,8 +73,7 @@
 #     compile time defines exceeding Visual Studio 2013 limitations.
 # CFLAGS   Additional CFLAGS
 # CFLAGS_windows   Additional CFLAGS_windows
-# LDFLAGS_solaris Additional LDFLAGS_solaris
-# RC_FLAGS   Additional RC_FLAGS
+# EXTRA_RC_FLAGS   Additional EXTRA_RC_FLAGS
 # MACOSX_SIGNED   On macosx, sign this binary
 # OPTIMIZATION   Override default optimization level (LOW)
 # OUTPUT_DIR   Override default output directory
@@ -139,7 +138,7 @@
       NAME := $1, \
       EXTRA_FILES := $(LAUNCHER_SRC)/main.c, \
       OPTIMIZATION := $$($1_OPTIMIZATION), \
-      CFLAGS := $$(CFLAGS_JDKEXE) $$($1_CFLAGS) \
+      CFLAGS := $$(CFLAGS_JDKEXE) \
           $(LAUNCHER_CFLAGS) \
           $(VERSION_CFLAGS) \
           -DLAUNCHER_NAME='"$(LAUNCHER_NAME)"' \
--- a/make/test/JtregNativeJdk.gmk	Mon Apr 08 13:41:48 2019 +0200
+++ b/make/test/JtregNativeJdk.gmk	Tue Apr 30 11:07:58 2019 +0200
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -61,6 +61,7 @@
   BUILD_JDK_JTREG_LIBRARIES_LIBS_libstringPlatformChars := $(WIN_LIB_JAVA)
   WIN_LIB_JLI := $(SUPPORT_OUTPUTDIR)/native/java.base/libjli/jli.lib
   BUILD_JDK_JTREG_EXECUTABLES_LIBS_exeJliLaunchTest := $(WIN_LIB_JLI)
+  BUILD_JDK_JTREG_EXECUTABLES_LIBS_exeCallerAccessTest := jvm.lib
 else
   BUILD_JDK_JTREG_LIBRARIES_LIBS_libstringPlatformChars := -ljava
   BUILD_JDK_JTREG_LIBRARIES_LIBS_libDirectIO := -ljava
@@ -70,6 +71,7 @@
     BUILD_JDK_JTREG_LIBRARIES_LIBS_libInheritedChannel := -ljava -lsocket -lnsl
   endif
   BUILD_JDK_JTREG_EXECUTABLES_LIBS_exeJliLaunchTest := -ljli
+  BUILD_JDK_JTREG_EXECUTABLES_LIBS_exeCallerAccessTest := -ljvm
 endif
 
 ifeq ($(call isTargetOs, macosx), true)
--- a/src/hotspot/cpu/aarch64/aarch64.ad	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/aarch64/aarch64.ad	Tue Apr 30 11:07:58 2019 +0200
@@ -3932,7 +3932,8 @@
 
 operand immL_bitmask()
 %{
-  predicate(((n->get_long() & 0xc000000000000000l) == 0)
+  predicate((n->get_long() != 0)
+            && ((n->get_long() & 0xc000000000000000l) == 0)
             && is_power_of_2(n->get_long() + 1));
   match(ConL);
 
@@ -3943,7 +3944,8 @@
 
 operand immI_bitmask()
 %{
-  predicate(((n->get_int() & 0xc0000000) == 0)
+  predicate((n->get_int() != 0)
+            && ((n->get_int() & 0xc0000000) == 0)
             && is_power_of_2(n->get_int() + 1));
   match(ConI);
 
@@ -11432,11 +11434,13 @@
 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
 %{
   match(Set dst (AndI (URShiftI src rshift) mask));
+  // Make sure we are not going to exceed what ubfxw can do.
+  predicate((exact_log2(n->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
 
   ins_cost(INSN_COST);
   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
   ins_encode %{
-    int rshift = $rshift$$constant;
+    int rshift = $rshift$$constant & 31;
     long mask = $mask$$constant;
     int width = exact_log2(mask+1);
     __ ubfxw(as_Register($dst$$reg),
@@ -11447,13 +11451,15 @@
 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
 %{
   match(Set dst (AndL (URShiftL src rshift) mask));
+  // Make sure we are not going to exceed what ubfx can do.
+  predicate((exact_log2_long(n->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= (63 + 1));
 
   ins_cost(INSN_COST);
   format %{ "ubfx $dst, $src, $rshift, $mask" %}
   ins_encode %{
-    int rshift = $rshift$$constant;
+    int rshift = $rshift$$constant & 63;
     long mask = $mask$$constant;
-    int width = exact_log2(mask+1);
+    int width = exact_log2_long(mask+1);
     __ ubfx(as_Register($dst$$reg),
             as_Register($src$$reg), rshift, width);
   %}
@@ -11465,11 +11471,13 @@
 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
 %{
   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
+  // Make sure we are not going to exceed what ubfxw can do.
+  predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
 
   ins_cost(INSN_COST * 2);
   format %{ "ubfx $dst, $src, $rshift, $mask" %}
   ins_encode %{
-    int rshift = $rshift$$constant;
+    int rshift = $rshift$$constant & 31;
     long mask = $mask$$constant;
     int width = exact_log2(mask+1);
     __ ubfx(as_Register($dst$$reg),
@@ -11510,7 +11518,7 @@
   ins_encode %{
     int lshift = $lshift$$constant;
     long mask = $mask$$constant;
-    int width = exact_log2(mask+1);
+    int width = exact_log2_long(mask+1);
     __ ubfiz(as_Register($dst$$reg),
           as_Register($src$$reg), lshift, width);
   %}
--- a/src/hotspot/cpu/aarch64/aarch64_ad.m4	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/aarch64/aarch64_ad.m4	Tue Apr 30 11:07:58 2019 +0200
@@ -181,31 +181,35 @@
 `instruct $3$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI rshift, imm$1_bitmask mask)
 %{
   match(Set dst (And$1 ($2$1 src rshift) mask));
+  // Make sure we are not going to exceed what $3 can do.
+  predicate((exact_log2$6(n->in(2)->get_$5() + 1) + (n->in(1)->in(2)->get_int() & $4)) <= ($4 + 1));
 
   ins_cost(INSN_COST);
   format %{ "$3 $dst, $src, $rshift, $mask" %}
   ins_encode %{
-    int rshift = $rshift$$constant;
+    int rshift = $rshift$$constant & $4;
     long mask = $mask$$constant;
-    int width = exact_log2(mask+1);
+    int width = exact_log2$6(mask+1);
     __ $3(as_Register($dst$$reg),
             as_Register($src$$reg), rshift, width);
   %}
   ins_pipe(ialu_reg_shift);
 %}')
-BFX_INSN(I,URShift,ubfxw)
-BFX_INSN(L,URShift,ubfx)
+BFX_INSN(I, URShift, ubfxw, 31, int)
+BFX_INSN(L, URShift, ubfx,  63, long, _long)
 
 // We can use ubfx when extending an And with a mask when we know mask
 // is positive.  We know that because immI_bitmask guarantees it.
 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
 %{
   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
+  // Make sure we are not going to exceed what ubfxw can do.
+  predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
 
   ins_cost(INSN_COST * 2);
   format %{ "ubfx $dst, $src, $rshift, $mask" %}
   ins_encode %{
-    int rshift = $rshift$$constant;
+    int rshift = $rshift$$constant & 31;
     long mask = $mask$$constant;
     int width = exact_log2(mask+1);
     __ ubfx(as_Register($dst$$reg),
@@ -228,7 +232,7 @@
   ins_encode %{
     int lshift = $lshift$$constant;
     long mask = $mask$$constant;
-    int width = exact_log2(mask+1);
+    int width = exact_log2$5(mask+1);
     __ $2(as_Register($dst$$reg),
           as_Register($src$$reg), lshift, width);
   %}
--- a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -1211,8 +1211,8 @@
       /* The size bit is in bit 30, not 31 */
       sz = (operand_size)(sz == word ? 0b00:0b01);
     }
-    f(sz, 31, 30), f(0b001000, 29, 24), f(1, 23), f(a, 22), f(1, 21);
-    rf(Rs, 16), f(r, 15), f(0b11111, 14, 10), rf(Rn, 5), rf(Rt, 0);
+    f(sz, 31, 30), f(0b001000, 29, 24), f(not_pair ? 1 : 0, 23), f(a, 22), f(1, 21);
+    zrf(Rs, 16), f(r, 15), f(0b11111, 14, 10), srf(Rn, 5), zrf(Rt, 0);
   }
 
   // CAS
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -34,6 +34,7 @@
 #include "c1/c1_ValueStack.hpp"
 #include "ci/ciArrayKlass.hpp"
 #include "ci/ciInstance.hpp"
+#include "code/compiledIC.hpp"
 #include "gc/shared/barrierSet.hpp"
 #include "gc/shared/cardTableBarrierSet.hpp"
 #include "gc/shared/collectedHeap.hpp"
@@ -2063,11 +2064,10 @@
   int start = __ offset();
 
   __ relocate(static_stub_Relocation::spec(call_pc));
-  __ mov_metadata(rmethod, (Metadata*)NULL);
-  __ movptr(rscratch1, 0);
-  __ br(rscratch1);
-
-  assert(__ offset() - start <= call_stub_size(), "stub too big");
+  __ emit_static_call_stub();
+
+  assert(__ offset() - start + CompiledStaticCall::to_trampoline_stub_size()
+        <= call_stub_size(), "stub too big");
   __ end_a_stub();
 }
 
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -69,7 +69,9 @@
   void deoptimize_trap(CodeEmitInfo *info);
 
   enum {
-    _call_stub_size = 12 * NativeInstruction::instruction_size,
+    // call stub: CompiledStaticCall::to_interp_stub_size() +
+    //            CompiledStaticCall::to_trampoline_stub_size()
+    _call_stub_size = 13 * NativeInstruction::instruction_size,
     _call_aot_stub_size = 0,
     _exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(175),
     _deopt_handler_size = 7 * NativeInstruction::instruction_size
--- a/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -61,14 +61,14 @@
   // Don't create a Metadata reloc if we're generating immutable PIC.
   if (cbuf.immutable_PIC()) {
     __ movptr(rmethod, 0);
-  } else {
-    __ mov_metadata(rmethod, (Metadata*)NULL);
+    __ movptr(rscratch1, 0);
+    __ br(rscratch1);
+
+  } else
+#endif
+  {
+    __ emit_static_call_stub();
   }
-#else
-  __ mov_metadata(rmethod, (Metadata*)NULL);
-#endif
-  __ movptr(rscratch1, 0);
-  __ br(rscratch1);
 
   assert((__ offset() - offset) <= (int)to_interp_stub_size(), "stub too big");
   __ end_a_stub();
@@ -77,7 +77,8 @@
 #undef __
 
 int CompiledStaticCall::to_interp_stub_size() {
-  return 7 * NativeInstruction::instruction_size;
+  // isb; movk; movz; movz; movk; movz; movz; br
+  return 8 * NativeInstruction::instruction_size;
 }
 
 int CompiledStaticCall::to_trampoline_stub_size() {
@@ -159,7 +160,8 @@
   }
 
   // Creation also verifies the object.
-  NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
+  NativeMovConstReg* method_holder
+    = nativeMovConstReg_at(stub + NativeInstruction::instruction_size);
 #ifndef PRODUCT
   NativeGeneralJump* jump = nativeGeneralJump_at(method_holder->next_instruction_address());
 
@@ -184,7 +186,8 @@
   assert(stub != NULL, "stub not found");
   assert(CompiledICLocker::is_safe(stub), "mt unsafe call");
   // Creation also verifies the object.
-  NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
+  NativeMovConstReg* method_holder
+    = nativeMovConstReg_at(stub + NativeInstruction::instruction_size);
   method_holder->set_data(0);
 }
 
@@ -201,8 +204,9 @@
   address stub = find_stub(false /* is_aot */);
   assert(stub != NULL, "no stub found for static call");
   // Creation also verifies the object.
-  NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
-  NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());
+  NativeMovConstReg* method_holder
+    = nativeMovConstReg_at(stub + NativeInstruction::instruction_size);
+  NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
 
   // Verify state.
   assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
--- a/src/hotspot/cpu/aarch64/frame_aarch64.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/aarch64/frame_aarch64.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -559,7 +559,7 @@
 
   // validate constantPoolCache*
   ConstantPoolCache* cp = *interpreter_frame_cache_addr();
-  if (cp == NULL || !cp->is_metaspace_object()) return false;
+  if (MetaspaceObj::is_valid(cp) == false) return false;
 
   // validate locals
 
--- a/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -418,7 +418,7 @@
 };
 
 
-IRT_ENTRY(address,
+JRT_ENTRY(address,
           InterpreterRuntime::slow_signature_handler(JavaThread* thread,
                                                      Method* method,
                                                      intptr_t* from,
@@ -435,4 +435,4 @@
 
   // return result handler
   return Interpreter::result_handler(m->result_type());
-IRT_END
+JRT_END
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -812,6 +812,18 @@
   return stub_start_addr;
 }
 
+void MacroAssembler::emit_static_call_stub() {
+  // CompiledDirectStaticCall::set_to_interpreted knows the
+  // exact layout of this stub.
+
+  isb();
+  mov_metadata(rmethod, (Metadata*)NULL);
+
+  // Jump to the entry point of the i2c stub.
+  movptr(rscratch1, 0);
+  br(rscratch1);
+}
+
 void MacroAssembler::c2bool(Register x) {
   // implements x == 0 ? 0 : 1
   // note: must only look at least-significant byte of x
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -607,6 +607,7 @@
   static int patch_narrow_klass(address insn_addr, narrowKlass n);
 
   address emit_trampoline_stub(int insts_call_instruction_offset, address target);
+  void emit_static_call_stub();
 
   // The following 4 methods return the offset of the appropriate move instruction
 
--- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -232,7 +232,11 @@
 //-------------------------------------------------------------------
 
 void NativeMovConstReg::verify() {
-  // make sure code pattern is actually mov reg64, imm64 instructions
+  if (! (nativeInstruction_at(instruction_address())->is_movz() ||
+        is_adrp_at(instruction_address()) ||
+        is_ldr_literal_at(instruction_address())) ) {
+    fatal("should be MOVZ or ADRP or LDR (literal)");
+  }
 }
 
 
--- a/src/hotspot/cpu/arm/frame_arm.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/arm/frame_arm.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -494,7 +494,7 @@
 
   // validate ConstantPoolCache*
   ConstantPoolCache* cp = *interpreter_frame_cache_addr();
-  if (cp == NULL || !cp->is_metaspace_object()) return false;
+  if (MetaspaceObj::is_valid(cp) == false) return false;
 
   // validate locals
 
--- a/src/hotspot/cpu/arm/interpreterRT_arm.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/arm/interpreterRT_arm.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -369,9 +369,9 @@
   }
 };
 
-IRT_ENTRY(address, InterpreterRuntime::slow_signature_handler(JavaThread* thread, Method* method, intptr_t* from, intptr_t* to))
+JRT_ENTRY(address, InterpreterRuntime::slow_signature_handler(JavaThread* thread, Method* method, intptr_t* from, intptr_t* to))
   methodHandle m(thread, (Method*)method);
   assert(m->is_native(), "sanity check");
   SlowSignatureHandler(m, (address)from, to).iterate(UCONST64(-1));
   return Interpreter::result_handler(m->result_type());
-IRT_END
+JRT_END
--- a/src/hotspot/cpu/ppc/interpreterRT_ppc.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/ppc/interpreterRT_ppc.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2012, 2013 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -148,15 +148,15 @@
 
 
 // Access function to get the signature.
-IRT_ENTRY(address, InterpreterRuntime::get_signature(JavaThread* thread, Method* method))
+JRT_ENTRY(address, InterpreterRuntime::get_signature(JavaThread* thread, Method* method))
   methodHandle m(thread, method);
   assert(m->is_native(), "sanity check");
   Symbol *s = m->signature();
   return (address) s->base();
-IRT_END
+JRT_END
 
-IRT_ENTRY(address, InterpreterRuntime::get_result_handler(JavaThread* thread, Method* method))
+JRT_ENTRY(address, InterpreterRuntime::get_result_handler(JavaThread* thread, Method* method))
   methodHandle m(thread, method);
   assert(m->is_native(), "sanity check");
   return AbstractInterpreter::result_handler(m->result_type());
-IRT_END
+JRT_END
--- a/src/hotspot/cpu/ppc/vm_version_ppc.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/ppc/vm_version_ppc.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -381,6 +381,27 @@
   }
 }
 
+void VM_Version::print_platform_virtualization_info(outputStream* st) {
+  const char* info_file = "/proc/ppc64/lparcfg";
+  const char* kw[] = { "system_type=", // qemu indicates PowerKVM
+                       "partition_entitled_capacity=", // entitled processor capacity percentage
+                       "partition_max_entitled_capacity=",
+                       "capacity_weight=", // partition CPU weight
+                       "partition_active_processors=",
+                       "partition_potential_processors=",
+                       "entitled_proc_capacity_available=",
+                       "capped=", // 0 - uncapped, 1 - vcpus capped at entitled processor capacity percentage
+                       "shared_processor_mode=", // (non)dedicated partition
+                       "system_potential_processors=",
+                       "pool=", // CPU-pool number
+                       "pool_capacity=",
+                       "NumLpars=", // on non-KVM machines, NumLpars is not found for full partition mode machines
+                       NULL };
+  if (!print_matching_lines_from_file(info_file, st, kw)) {
+    st->print_cr("  <%s Not Available>", info_file);
+  }
+}
+
 bool VM_Version::use_biased_locking() {
 #if INCLUDE_RTM_OPT
   // RTM locking is most useful when there is high lock contention and
--- a/src/hotspot/cpu/ppc/vm_version_ppc.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/ppc/vm_version_ppc.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -89,6 +89,9 @@
   static void initialize();
 
   // Override Abstract_VM_Version implementation
+  static void print_platform_virtualization_info(outputStream*);
+
+  // Override Abstract_VM_Version implementation
   static bool use_biased_locking();
 
   static bool is_determine_features_test_running() { return _is_determine_features_test_running; }
--- a/src/hotspot/cpu/s390/interpreterRT_s390.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/s390/interpreterRT_s390.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2016 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -151,15 +151,15 @@
 
 void SignatureHandlerLibrary::pd_set_handler(address handler) {}
 
-IRT_ENTRY(address, InterpreterRuntime::get_signature(JavaThread* thread, Method* method))
+JRT_ENTRY(address, InterpreterRuntime::get_signature(JavaThread* thread, Method* method))
   methodHandle m(thread, method);
   assert(m->is_native(), "sanity check");
   Symbol *s = m->signature();
   return (address) s->base();
-IRT_END
+JRT_END
 
-IRT_ENTRY(address, InterpreterRuntime::get_result_handler(JavaThread* thread, Method* method))
+JRT_ENTRY(address, InterpreterRuntime::get_result_handler(JavaThread* thread, Method* method))
   methodHandle m(thread, method);
   assert(m->is_native(), "sanity check");
   return AbstractInterpreter::result_handler(m->result_type());
-IRT_END
+JRT_END
--- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -4355,12 +4355,9 @@
 // Emitter does not KILL cnt and base arguments, since they need to be copied to
 // work registers anyway.
 // Actually, only r0, r1, and r5 are killed.
-unsigned int MacroAssembler::Clear_Array(Register cnt_arg, Register base_pointer_arg, Register src_addr, Register src_len) {
-  // Src_addr is evenReg.
-  // Src_len is odd_Reg.
+unsigned int MacroAssembler::Clear_Array(Register cnt_arg, Register base_pointer_arg, Register odd_tmp_reg) {
 
   int      block_start = offset();
-  Register tmp_reg  = src_len; // Holds target instr addr for EX.
   Register dst_len  = Z_R1;    // Holds dst len  for MVCLE.
   Register dst_addr = Z_R0;    // Holds dst addr for MVCLE.
 
@@ -4369,7 +4366,7 @@
   BLOCK_COMMENT("Clear_Array {");
 
   // Check for zero len and convert to long.
-  z_ltgfr(src_len, cnt_arg);      // Remember casted value for doSTG case.
+  z_ltgfr(odd_tmp_reg, cnt_arg);
   z_bre(done);                    // Nothing to do if len == 0.
 
   // Prefetch data to be cleared.
@@ -4378,16 +4375,17 @@
     z_pfd(0x02, 256, Z_R0, base_pointer_arg);
   }
 
-  z_sllg(dst_len, src_len, 3);    // #bytes to clear.
-  z_cghi(src_len, 32);            // Check for len <= 256 bytes (<=32 DW).
-  z_brnh(doXC);                   // If so, use executed XC to clear.
+  z_sllg(dst_len, odd_tmp_reg, 3); // #bytes to clear.
+  z_cghi(odd_tmp_reg, 32);         // Check for len <= 256 bytes (<=32 DW).
+  z_brnh(doXC);                    // If so, use executed XC to clear.
 
   // MVCLE: initialize long arrays (general case).
   bind(doMVCLE);
   z_lgr(dst_addr, base_pointer_arg);
-  clear_reg(src_len, true, false); // Src len of MVCLE is zero.
-
-  MacroAssembler::move_long_ext(dst_addr, src_addr, 0);
+  // Pass 0 as source length to MVCLE: destination will be filled with padding byte 0.
+  // The even register of the register pair is not killed.
+  clear_reg(odd_tmp_reg, true, false);
+  MacroAssembler::move_long_ext(dst_addr, as_Register(odd_tmp_reg->encoding()-1), 0);
   z_bru(done);
 
   // XC: initialize short arrays.
@@ -4396,12 +4394,12 @@
     z_xc(0,0,base_pointer_arg,0,base_pointer_arg);
 
   bind(doXC);
-    add2reg(dst_len, -1);             // Get #bytes-1 for EXECUTE.
+    add2reg(dst_len, -1);               // Get #bytes-1 for EXECUTE.
     if (VM_Version::has_ExecuteExtensions()) {
-      z_exrl(dst_len, XC_template);   // Execute XC with var. len.
+      z_exrl(dst_len, XC_template);     // Execute XC with var. len.
     } else {
-      z_larl(tmp_reg, XC_template);
-      z_ex(dst_len,0,Z_R0,tmp_reg);   // Execute XC with var. len.
+      z_larl(odd_tmp_reg, XC_template);
+      z_ex(dst_len,0,Z_R0,odd_tmp_reg); // Execute XC with var. len.
     }
     // z_bru(done);      // fallthru
 
@@ -4463,7 +4461,7 @@
 // Compiler ensures base is doubleword aligned and cnt is #doublewords.
 // Emitter does not KILL cnt and base arguments, since they need to be copied to
 // work registers anyway.
-// Actually, only r0, r1, r4, and r5 (which are work registers) are killed.
+// Actually, only r0, r1, (which are work registers) and odd_tmp_reg are killed.
 //
 // For very large arrays, exploit MVCLE H/W support.
 // MVCLE instruction automatically exploits H/W-optimized page mover.
@@ -4471,9 +4469,7 @@
 // - All full pages are cleared with the page mover H/W assist.
 // - Remaining bytes are again cleared by a series of XC to self.
 //
-unsigned int MacroAssembler::Clear_Array_Const_Big(long cnt, Register base_pointer_arg, Register src_addr, Register src_len) {
-  // Src_addr is evenReg.
-  // Src_len is odd_Reg.
+unsigned int MacroAssembler::Clear_Array_Const_Big(long cnt, Register base_pointer_arg, Register odd_tmp_reg) {
 
   int      block_start = offset();
   Register dst_len  = Z_R1;      // Holds dst len  for MVCLE.
@@ -4486,11 +4482,10 @@
 
   // Prepare other args to MVCLE.
   z_lgr(dst_addr, base_pointer_arg);
-  // Indicate unused result.
-  (void) clear_reg(src_len, true, false);  // Src len of MVCLE is zero.
-
-  // Clear.
-  MacroAssembler::move_long_ext(dst_addr, src_addr, 0);
+  // Pass 0 as source length to MVCLE: destination will be filled with padding byte 0.
+  // The even register of the register pair is not killed.
+  (void) clear_reg(odd_tmp_reg, true, false);  // Src len of MVCLE is zero.
+  MacroAssembler::move_long_ext(dst_addr, as_Register(odd_tmp_reg->encoding() - 1), 0);
   BLOCK_COMMENT("} Clear_Array_Const_Big");
 
   int block_end = offset();
--- a/src/hotspot/cpu/s390/macroAssembler_s390.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -828,9 +828,9 @@
   //--------------------------
   //---  Operations on arrays.
   //--------------------------
-  unsigned int Clear_Array(Register cnt_arg, Register base_pointer_arg, Register src_addr, Register src_len);
+  unsigned int Clear_Array(Register cnt_arg, Register base_pointer_arg, Register odd_tmp_reg);
   unsigned int Clear_Array_Const(long cnt, Register base);
-  unsigned int Clear_Array_Const_Big(long cnt, Register base_pointer_arg, Register src_addr, Register src_len);
+  unsigned int Clear_Array_Const_Big(long cnt, Register base_pointer_arg, Register odd_tmp_reg);
   unsigned int CopyRawMemory_AlignedDisjoint(Register src_reg, Register dst_reg,
                                              Register cnt_reg,
                                              Register tmp1_reg, Register tmp2_reg);
--- a/src/hotspot/cpu/s390/s390.ad	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/s390/s390.ad	Tue Apr 30 11:07:58 2019 +0200
@@ -474,6 +474,19 @@
 /*Z_R15_H,Z_R15*/   // SP
 );
 
+// z_long_reg without even registers
+reg_class z_long_odd_reg(
+/*Z_R0_H,Z_R0*/     // R0
+/*Z_R1_H,Z_R1*/
+  Z_R3_H,Z_R3,
+  Z_R5_H,Z_R5,
+  Z_R7_H,Z_R7,
+  Z_R9_H,Z_R9,
+  Z_R11_H,Z_R11,
+  Z_R13_H,Z_R13
+/*Z_R14_H,Z_R14,*/  // return_pc
+/*Z_R15_H,Z_R15*/   // SP
+);
 
 // Special Class for Condition Code Flags Register
 
@@ -3378,6 +3391,7 @@
   match(RegL);
   match(revenRegL);
   match(roddRegL);
+  match(allRoddRegL);
   match(rarg1RegL);
   match(rarg5RegL);
   format %{ %}
@@ -3400,6 +3414,14 @@
   interface(REG_INTER);
 %}
 
+// available odd registers for iRegL
+operand allRoddRegL() %{
+  constraint(ALLOC_IN_RC(z_long_odd_reg));
+  match(iRegL);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
 operand rarg1RegL() %{
   constraint(ALLOC_IN_RC(z_rarg1_long_reg));
   match(iRegL);
@@ -9899,23 +9921,23 @@
   ins_pipe(pipe_class_dummy);
 %}
 
-instruct inlineCallClearArrayConstBig(immL cnt, iRegP_N2P base, Universe dummy, revenRegL srcA, roddRegL srcL, flagsReg cr) %{
+instruct inlineCallClearArrayConstBig(immL cnt, iRegP_N2P base, Universe dummy, allRoddRegL tmpL, flagsReg cr) %{
   match(Set dummy (ClearArray cnt base));
-  effect(TEMP srcA, TEMP srcL, KILL cr); // R0, R1 are killed, too.
+  effect(TEMP tmpL, KILL cr); // R0, R1 are killed, too.
   ins_cost(200);
   // TODO: s390 port size(VARIABLE_SIZE);       // Variable in size due to optimized constant loader.
   format %{ "ClearArrayConstBig $cnt,$base" %}
-  ins_encode %{ __ Clear_Array_Const_Big($cnt$$constant, $base$$Register, $srcA$$Register, $srcL$$Register); %}
-  ins_pipe(pipe_class_dummy);
-%}
-
-instruct inlineCallClearArray(iRegL cnt, iRegP_N2P base, Universe dummy, revenRegL srcA, roddRegL srcL, flagsReg cr) %{
+  ins_encode %{ __ Clear_Array_Const_Big($cnt$$constant, $base$$Register, $tmpL$$Register); %}
+  ins_pipe(pipe_class_dummy);
+%}
+
+instruct inlineCallClearArray(iRegL cnt, iRegP_N2P base, Universe dummy, allRoddRegL tmpL, flagsReg cr) %{
   match(Set dummy (ClearArray cnt base));
-  effect(TEMP srcA, TEMP srcL, KILL cr); // R0, R1 are killed, too.
+  effect(TEMP tmpL, KILL cr); // R0, R1 are killed, too.
   ins_cost(300);
   // TODO: s390 port size(FIXED_SIZE);  // z/Architecture: emitted code depends on PreferLAoverADD being on/off.
   format %{ "ClearArrayVar $cnt,$base" %}
-  ins_encode %{ __ Clear_Array($cnt$$Register, $base$$Register, $srcA$$Register, $srcL$$Register); %}
+  ins_encode %{ __ Clear_Array($cnt$$Register, $base$$Register, $tmpL$$Register); %}
   ins_pipe(pipe_class_dummy);
 %}
 
--- a/src/hotspot/cpu/s390/vm_version_s390.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/s390/vm_version_s390.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -516,6 +516,19 @@
   }
 }
 
+void VM_Version::print_platform_virtualization_info(outputStream* st) {
+  // /proc/sysinfo contains interesting information about
+  // - LPAR
+  // - whole "Box" (CPUs )
+  // - z/VM / KVM (VM<nn>); this is not available in an LPAR-only setup
+  const char* kw[] = { "LPAR", "CPUs", "VM", NULL };
+  const char* info_file = "/proc/sysinfo";
+
+  if (!print_matching_lines_from_file(info_file, st, kw)) {
+    st->print_cr("  <%s Not Available>", info_file);
+  }
+}
+
 void VM_Version::print_features() {
   print_features_internal("Version:");
 }
--- a/src/hotspot/cpu/s390/vm_version_s390.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/s390/vm_version_s390.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -346,6 +346,9 @@
   static void print_features();
   static bool is_determine_features_test_running() { return _is_determine_features_test_running; }
 
+  // Override Abstract_VM_Version implementation
+  static void print_platform_virtualization_info(outputStream*);
+
   // CPU feature query functions
   static const char* get_model_string()       { return _model_string; }
   static bool has_StoreFacilityListExtended() { return  (_features[0] & StoreFacilityListExtendedMask) == StoreFacilityListExtendedMask; }
--- a/src/hotspot/cpu/sparc/frame_sparc.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/sparc/frame_sparc.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -665,7 +665,7 @@
 
   // validate ConstantPoolCache*
   ConstantPoolCache* cp = *interpreter_frame_cache_addr();
-  if (cp == NULL || !cp->is_metaspace_object()) return false;
+  if (MetaspaceObj::is_valid(cp) == false) return false;
 
   // validate locals
 
--- a/src/hotspot/cpu/sparc/interpreterRT_sparc.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/sparc/interpreterRT_sparc.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -191,7 +191,7 @@
 };
 
 
-IRT_ENTRY(address, InterpreterRuntime::slow_signature_handler(
+JRT_ENTRY(address, InterpreterRuntime::slow_signature_handler(
                                                     JavaThread* thread,
                                                     Method* method,
                                                     intptr_t* from,
@@ -204,4 +204,4 @@
   SlowSignatureHandler(m, (address)from, m->is_static() ? to+2 : to+1, to).iterate((uint64_t)CONST64(-1));
   // return result handler
   return Interpreter::result_handler(m->result_type());
-IRT_END
+JRT_END
--- a/src/hotspot/cpu/x86/assembler_x86.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/x86/assembler_x86.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -3099,7 +3099,7 @@
     }
     return;
   }
-  if (UseAddressNop && VM_Version::is_amd()) {
+  if (UseAddressNop && VM_Version::is_amd_family()) {
     //
     // Using multi-bytes nops "0x0F 0x1F [address]" for AMD.
     //  1: 0x90
--- a/src/hotspot/cpu/x86/frame_x86.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/x86/frame_x86.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -546,7 +546,7 @@
 
   // validate ConstantPoolCache*
   ConstantPoolCache* cp = *interpreter_frame_cache_addr();
-  if (cp == NULL || !cp->is_metaspace_object()) return false;
+  if (MetaspaceObj::is_valid(cp) == false) return false;
 
   // validate locals
 
--- a/src/hotspot/cpu/x86/interpreterRT_x86_32.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/x86/interpreterRT_x86_32.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -147,11 +147,11 @@
   }
 };
 
-IRT_ENTRY(address, InterpreterRuntime::slow_signature_handler(JavaThread* thread, Method* method, intptr_t* from, intptr_t* to))
+JRT_ENTRY(address, InterpreterRuntime::slow_signature_handler(JavaThread* thread, Method* method, intptr_t* from, intptr_t* to))
   methodHandle m(thread, (Method*)method);
   assert(m->is_native(), "sanity check");
   // handle arguments
   SlowSignatureHandler(m, (address)from, to + 1).iterate((uint64_t)CONST64(-1));
   // return result handler
   return Interpreter::result_handler(m->result_type());
-IRT_END
+JRT_END
--- a/src/hotspot/cpu/x86/interpreterRT_x86_64.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/x86/interpreterRT_x86_64.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -496,7 +496,7 @@
 #endif
 
 
-IRT_ENTRY(address,
+JRT_ENTRY(address,
           InterpreterRuntime::slow_signature_handler(JavaThread* thread,
                                                      Method* method,
                                                      intptr_t* from,
@@ -509,4 +509,4 @@
 
   // return result handler
   return Interpreter::result_handler(m->result_type());
-IRT_END
+JRT_END
--- a/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -2968,9 +2968,8 @@
 
     __ enter();
     __ subptr(rsp, 8 * wordSize);
-    if (multi_block) {
-      __ push(limit);
-    }
+    handleSOERegisters(true /*saving*/);
+
     __ movptr(buf, buf_param);
     __ movptr(state, state_param);
     if (multi_block) {
@@ -2981,9 +2980,7 @@
     __ fast_sha1(abcd, e0, e1, msg0, msg1, msg2, msg3, shuf_mask,
       buf, state, ofs, limit, rsp, multi_block);
 
-    if (multi_block) {
-      __ pop(limit);
-    }
+    handleSOERegisters(false /*restoring*/);
     __ addptr(rsp, 8 * wordSize);
     __ leave();
     __ ret(0);
--- a/src/hotspot/cpu/x86/vm_version_ext_x86.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/x86/vm_version_ext_x86.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -43,10 +43,10 @@
    CPU_FAMILY_PENTIUM_4  = 0xF
 } FamilyFlag;
 
- typedef enum {
-    RDTSCP_FLAG  = 0x08000000, // bit 27
-    INTEL64_FLAG = 0x20000000  // bit 29
-  } _featureExtendedEdxFlag;
+typedef enum {
+  RDTSCP_FLAG  = 0x08000000, // bit 27
+  INTEL64_FLAG = 0x20000000  // bit 29
+} _featureExtendedEdxFlag;
 
 #define CPUID_STANDARD_FN   0x0
 #define CPUID_STANDARD_FN_1 0x1
@@ -340,6 +340,10 @@
     return !is_amd_Barcelona();
   }
 
+  if (is_hygon()) {
+    return true;
+  }
+
   return false;
 }
 
@@ -399,13 +403,20 @@
 const char* VM_Version_Ext::cpu_family_description(void) {
   int cpu_family_id = extended_cpu_family();
   if (is_amd()) {
-    return _family_id_amd[cpu_family_id];
+    if (cpu_family_id < ExtendedFamilyIdLength_AMD) {
+      return _family_id_amd[cpu_family_id];
+    }
   }
   if (is_intel()) {
     if (cpu_family_id == CPU_FAMILY_PENTIUMPRO) {
       return cpu_model_description();
     }
-    return _family_id_intel[cpu_family_id];
+    if (cpu_family_id < ExtendedFamilyIdLength_INTEL) {
+      return _family_id_intel[cpu_family_id];
+    }
+  }
+  if (is_hygon()) {
+    return "Dhyana";
   }
   return "Unknown x86";
 }
@@ -423,6 +434,9 @@
   } else if (is_amd()) {
     cpu_type = "AMD";
     x64 = cpu_is_em64t() ? " AMD64" : "";
+  } else if (is_hygon()) {
+    cpu_type = "Hygon";
+    x64 = cpu_is_em64t() ? " AMD64" : "";
   } else {
     cpu_type = "Unknown x86";
     x64 = cpu_is_em64t() ? " x86_64" : "";
@@ -694,7 +708,7 @@
   return _max_qualified_cpu_frequency;
 }
 
-const char* const VM_Version_Ext::_family_id_intel[] = {
+const char* const VM_Version_Ext::_family_id_intel[ExtendedFamilyIdLength_INTEL] = {
   "8086/8088",
   "",
   "286",
@@ -713,7 +727,7 @@
   "Pentium 4"
 };
 
-const char* const VM_Version_Ext::_family_id_amd[] = {
+const char* const VM_Version_Ext::_family_id_amd[ExtendedFamilyIdLength_AMD] = {
   "",
   "",
   "",
@@ -731,6 +745,13 @@
   "",
   "Opteron/Athlon64",
   "Opteron QC/Phenom"  // Barcelona et.al.
+  "",
+  "",
+  "",
+  "",
+  "",
+  "",
+  "Zen"
 };
 // Partially from Intel 64 and IA-32 Architecture Software Developer's Manual,
 // September 2013, Vol 3C Table 35-1
--- a/src/hotspot/cpu/x86/vm_version_ext_x86.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/x86/vm_version_ext_x86.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -29,14 +29,20 @@
 #include "vm_version_x86.hpp"
 
 class VM_Version_Ext : public VM_Version {
+
+  enum {
+    ExtendedFamilyIdLength_INTEL = 16,
+    ExtendedFamilyIdLength_AMD   = 24
+  };
+
  private:
   static const size_t      VENDOR_LENGTH;
   static const size_t      CPU_EBS_MAX_LENGTH;
   static const size_t      CPU_TYPE_DESC_BUF_SIZE;
   static const size_t      CPU_DETAILED_DESC_BUF_SIZE;
 
-  static const char* const _family_id_intel[];
-  static const char* const _family_id_amd[];
+  static const char* const _family_id_intel[ExtendedFamilyIdLength_INTEL];
+  static const char* const _family_id_amd[ExtendedFamilyIdLength_AMD];
   static const char* const _brand_id[];
   static const char* const _model_id_pentium_pro[];
 
--- a/src/hotspot/cpu/x86/vm_version_x86.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/x86/vm_version_x86.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -680,7 +680,7 @@
     _features &= ~CPU_HT;
   }
 
-  if( is_intel() ) { // Intel cpus specific settings
+  if (is_intel()) { // Intel cpus specific settings
     if (is_knights_family()) {
       _features &= ~CPU_VZEROUPPER;
     }
@@ -781,7 +781,7 @@
           FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
         }
       } else {
-        if(supports_sse4_1()) {
+        if (supports_sse4_1()) {
           if (FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
             FLAG_SET_DEFAULT(UseAESCTRIntrinsics, true);
           }
@@ -1001,7 +1001,7 @@
   } else if (UseAVX == 1 || UseAVX == 2) {
     // 32 bytes vectors (in YMM) are only supported with AVX+
     max_vector_size = 32;
-  } else if (UseAVX > 2 ) {
+  } else if (UseAVX > 2) {
     // 64 bytes vectors (in ZMM) are only supported with AVX 3
     max_vector_size = 64;
   }
@@ -1165,38 +1165,38 @@
     }
   }
 
-  if( is_amd() ) { // AMD cpus specific settings
-    if( supports_sse2() && FLAG_IS_DEFAULT(UseAddressNop) ) {
+  if (is_amd_family()) { // AMD cpus specific settings
+    if (supports_sse2() && FLAG_IS_DEFAULT(UseAddressNop)) {
       // Use it on new AMD cpus starting from Opteron.
       UseAddressNop = true;
     }
-    if( supports_sse2() && FLAG_IS_DEFAULT(UseNewLongLShift) ) {
+    if (supports_sse2() && FLAG_IS_DEFAULT(UseNewLongLShift)) {
       // Use it on new AMD cpus starting from Opteron.
       UseNewLongLShift = true;
     }
-    if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) {
+    if (FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper)) {
       if (supports_sse4a()) {
         UseXmmLoadAndClearUpper = true; // use movsd only on '10h' Opteron
       } else {
         UseXmmLoadAndClearUpper = false;
       }
     }
-    if( FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll) ) {
-      if( supports_sse4a() ) {
+    if (FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll)) {
+      if (supports_sse4a()) {
         UseXmmRegToRegMoveAll = true; // use movaps, movapd only on '10h'
       } else {
         UseXmmRegToRegMoveAll = false;
       }
     }
-    if( FLAG_IS_DEFAULT(UseXmmI2F) ) {
-      if( supports_sse4a() ) {
+    if (FLAG_IS_DEFAULT(UseXmmI2F)) {
+      if (supports_sse4a()) {
         UseXmmI2F = true;
       } else {
         UseXmmI2F = false;
       }
     }
-    if( FLAG_IS_DEFAULT(UseXmmI2D) ) {
-      if( supports_sse4a() ) {
+    if (FLAG_IS_DEFAULT(UseXmmI2D)) {
+      if (supports_sse4a()) {
         UseXmmI2D = true;
       } else {
         UseXmmI2D = false;
@@ -1214,7 +1214,7 @@
     }
 
     // some defaults for AMD family 15h
-    if ( cpu_family() == 0x15 ) {
+    if (cpu_family() == 0x15) {
       // On family 15h processors default is no sw prefetch
       if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
         FLAG_SET_DEFAULT(AllocatePrefetchStyle, 0);
@@ -1239,8 +1239,8 @@
     }
 #endif // COMPILER2
 
-    // Some defaults for AMD family 17h
-    if ( cpu_family() == 0x17 ) {
+    // Some defaults for AMD family 17h || Hygon family 18h
+    if (cpu_family() == 0x17 || cpu_family() == 0x18) {
       // On family 17h processors use XMM and UnalignedLoadStores for Array Copy
       if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
         FLAG_SET_DEFAULT(UseXMMForArrayCopy, true);
@@ -1256,29 +1256,29 @@
     }
   }
 
-  if( is_intel() ) { // Intel cpus specific settings
-    if( FLAG_IS_DEFAULT(UseStoreImmI16) ) {
+  if (is_intel()) { // Intel cpus specific settings
+    if (FLAG_IS_DEFAULT(UseStoreImmI16)) {
       UseStoreImmI16 = false; // don't use it on Intel cpus
     }
-    if( cpu_family() == 6 || cpu_family() == 15 ) {
-      if( FLAG_IS_DEFAULT(UseAddressNop) ) {
+    if (cpu_family() == 6 || cpu_family() == 15) {
+      if (FLAG_IS_DEFAULT(UseAddressNop)) {
         // Use it on all Intel cpus starting from PentiumPro
         UseAddressNop = true;
       }
     }
-    if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) {
+    if (FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper)) {
       UseXmmLoadAndClearUpper = true; // use movsd on all Intel cpus
     }
-    if( FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll) ) {
-      if( supports_sse3() ) {
+    if (FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll)) {
+      if (supports_sse3()) {
         UseXmmRegToRegMoveAll = true; // use movaps, movapd on new Intel cpus
       } else {
         UseXmmRegToRegMoveAll = false;
       }
     }
-    if( cpu_family() == 6 && supports_sse3() ) { // New Intel cpus
+    if (cpu_family() == 6 && supports_sse3()) { // New Intel cpus
 #ifdef COMPILER2
-      if( FLAG_IS_DEFAULT(MaxLoopPad) ) {
+      if (FLAG_IS_DEFAULT(MaxLoopPad)) {
         // For new Intel cpus do the next optimization:
         // don't align the beginning of a loop if there are enough instructions
         // left (NumberOfLoopInstrToAlign defined in c2_globals.hpp)
@@ -1324,7 +1324,7 @@
         FLAG_SET_DEFAULT(UseIncDec, false);
       }
     }
-    if(FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) {
+    if (FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) {
       FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);
     }
   }
@@ -1573,6 +1573,65 @@
 #endif // !PRODUCT
 }
 
+void VM_Version::print_platform_virtualization_info(outputStream* st) {
+  VirtualizationType vrt = VM_Version::get_detected_virtualization();
+  if (vrt == XenHVM) {
+    st->print_cr("Xen hardware-assisted virtualization detected");
+  } else if (vrt == KVM) {
+    st->print_cr("KVM virtualization detected");
+  } else if (vrt == VMWare) {
+    st->print_cr("VMWare virtualization detected");
+  } else if (vrt == HyperV) {
+    st->print_cr("HyperV virtualization detected");
+  }
+}
+
+void VM_Version::check_virt_cpuid(uint32_t idx, uint32_t *regs) {
+// TODO support 32 bit
+#if defined(_LP64)
+#if defined(_MSC_VER)
+  // Allocate space for the code
+  const int code_size = 100;
+  ResourceMark rm;
+  CodeBuffer cb("detect_virt", code_size, 0);
+  MacroAssembler* a = new MacroAssembler(&cb);
+  address code = a->pc();
+  void (*test)(uint32_t idx, uint32_t *regs) = (void(*)(uint32_t idx, uint32_t *regs))code;
+
+  a->movq(r9, rbx); // save nonvolatile register
+
+  // next line would not work on 32-bit
+  a->movq(rax, c_rarg0 /* rcx */);
+  a->movq(r8, c_rarg1 /* rdx */);
+  a->cpuid();
+  a->movl(Address(r8,  0), rax);
+  a->movl(Address(r8,  4), rbx);
+  a->movl(Address(r8,  8), rcx);
+  a->movl(Address(r8, 12), rdx);
+
+  a->movq(rbx, r9); // restore nonvolatile register
+  a->ret(0);
+
+  uint32_t *code_end = (uint32_t *)a->pc();
+  a->flush();
+
+  // execute code
+  (*test)(idx, regs);
+#elif defined(__GNUC__)
+  __asm__ volatile (
+     "        cpuid;"
+     "        mov %%eax,(%1);"
+     "        mov %%ebx,4(%1);"
+     "        mov %%ecx,8(%1);"
+     "        mov %%edx,12(%1);"
+     : "+a" (idx)
+     : "S" (regs)
+     : "ebx", "ecx", "edx", "memory" );
+#endif
+#endif
+}
+
+
 bool VM_Version::use_biased_locking() {
 #if INCLUDE_RTM_OPT
   // RTM locking is most useful when there is high lock contention and
@@ -1594,6 +1653,54 @@
   return UseBiasedLocking;
 }
 
+// On Xen, the cpuid instruction returns
+//  eax / registers[0]: Version of Xen
+//  ebx / registers[1]: chars 'XenV'
+//  ecx / registers[2]: chars 'MMXe'
+//  edx / registers[3]: chars 'nVMM'
+//
+// On KVM / VMWare / MS Hyper-V, the cpuid instruction returns
+//  ebx / registers[1]: chars 'KVMK' / 'VMwa' / 'Micr'
+//  ecx / registers[2]: chars 'VMKV' / 'reVM' / 'osof'
+//  edx / registers[3]: chars 'M'    / 'ware' / 't Hv'
+//
+// more information :
+// https://kb.vmware.com/s/article/1009458
+//
+void VM_Version::check_virtualizations() {
+#if defined(_LP64)
+  uint32_t registers[4];
+  char signature[13];
+  uint32_t base;
+  signature[12] = '\0';
+  memset((void*)registers, 0, 4*sizeof(uint32_t));
+
+  for (base = 0x40000000; base < 0x40010000; base += 0x100) {
+    check_virt_cpuid(base, registers);
+
+    *(uint32_t *)(signature + 0) = registers[1];
+    *(uint32_t *)(signature + 4) = registers[2];
+    *(uint32_t *)(signature + 8) = registers[3];
+
+    if (strncmp("VMwareVMware", signature, 12) == 0) {
+      Abstract_VM_Version::_detected_virtualization = VMWare;
+    }
+
+    if (strncmp("Microsoft Hv", signature, 12) == 0) {
+      Abstract_VM_Version::_detected_virtualization = HyperV;
+    }
+
+    if (strncmp("KVMKVMKVM", signature, 9) == 0) {
+      Abstract_VM_Version::_detected_virtualization = KVM;
+    }
+
+    if (strncmp("XenVMMXenVMM", signature, 12) == 0) {
+      Abstract_VM_Version::_detected_virtualization = XenHVM;
+    }
+  }
+#endif
+}
+
 void VM_Version::initialize() {
   ResourceMark rm;
   // Making this stub must be FIRST use of assembler
@@ -1608,4 +1715,7 @@
                                      g.generate_get_cpu_info());
 
   get_processor_features();
+  if (cpu_family() > 4) { // it supports CPUID
+    check_virtualizations();
+  }
 }
--- a/src/hotspot/cpu/x86/vm_version_x86.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/x86/vm_version_x86.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -495,13 +495,13 @@
       result |= CPU_CX8;
     if (_cpuid_info.std_cpuid1_edx.bits.cmov != 0)
       result |= CPU_CMOV;
-    if (_cpuid_info.std_cpuid1_edx.bits.fxsr != 0 || (is_amd() &&
+    if (_cpuid_info.std_cpuid1_edx.bits.fxsr != 0 || (is_amd_family() &&
         _cpuid_info.ext_cpuid1_edx.bits.fxsr != 0))
       result |= CPU_FXSR;
     // HT flag is set for multi-core processors also.
     if (threads_per_core() > 1)
       result |= CPU_HT;
-    if (_cpuid_info.std_cpuid1_edx.bits.mmx != 0 || (is_amd() &&
+    if (_cpuid_info.std_cpuid1_edx.bits.mmx != 0 || (is_amd_family() &&
         _cpuid_info.ext_cpuid1_edx.bits.mmx != 0))
       result |= CPU_MMX;
     if (_cpuid_info.std_cpuid1_edx.bits.sse != 0)
@@ -553,7 +553,7 @@
           result |= CPU_VNNI;
       }
     }
-    if(_cpuid_info.sef_cpuid7_ebx.bits.bmi1 != 0)
+    if (_cpuid_info.sef_cpuid7_ebx.bits.bmi1 != 0)
       result |= CPU_BMI1;
     if (_cpuid_info.std_cpuid1_edx.bits.tsc != 0)
       result |= CPU_TSC;
@@ -567,17 +567,17 @@
       result |= CPU_CLMUL;
     if (_cpuid_info.sef_cpuid7_ebx.bits.rtm != 0)
       result |= CPU_RTM;
-    if(_cpuid_info.sef_cpuid7_ebx.bits.adx != 0)
+    if (_cpuid_info.sef_cpuid7_ebx.bits.adx != 0)
        result |= CPU_ADX;
-    if(_cpuid_info.sef_cpuid7_ebx.bits.bmi2 != 0)
+    if (_cpuid_info.sef_cpuid7_ebx.bits.bmi2 != 0)
       result |= CPU_BMI2;
     if (_cpuid_info.sef_cpuid7_ebx.bits.sha != 0)
       result |= CPU_SHA;
     if (_cpuid_info.std_cpuid1_ecx.bits.fma != 0)
       result |= CPU_FMA;
 
-    // AMD features.
-    if (is_amd()) {
+    // AMD|Hygon features.
+    if (is_amd_family()) {
       if ((_cpuid_info.ext_cpuid1_edx.bits.tdnow != 0) ||
           (_cpuid_info.ext_cpuid1_ecx.bits.prefetchw != 0))
         result |= CPU_3DNOW_PREFETCH;
@@ -587,8 +587,8 @@
         result |= CPU_SSE4A;
     }
     // Intel features.
-    if(is_intel()) {
-      if(_cpuid_info.ext_cpuid1_ecx.bits.lzcnt_intel != 0)
+    if (is_intel()) {
+      if (_cpuid_info.ext_cpuid1_ecx.bits.lzcnt_intel != 0)
         result |= CPU_LZCNT;
       // for Intel, ecx.bits.misalignsse bit (bit 8) indicates support for prefetchw
       if (_cpuid_info.ext_cpuid1_ecx.bits.misalignsse != 0) {
@@ -686,6 +686,9 @@
   static void initialize();
 
   // Override Abstract_VM_Version implementation
+  static void print_platform_virtualization_info(outputStream*);
+
+  // Override Abstract_VM_Version implementation
   static bool use_biased_locking();
 
   // Asserts
@@ -711,6 +714,8 @@
   static int  cpu_family()        { return _cpu;}
   static bool is_P6()             { return cpu_family() >= 6; }
   static bool is_amd()            { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x68747541; } // 'htuA'
+  static bool is_hygon()          { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x6F677948; } // 'ogyH'
+  static bool is_amd_family()     { return is_amd() || is_hygon(); }
   static bool is_intel()          { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x756e6547; } // 'uneG'
   static bool is_zx()             { assert_is_initialized(); return (_cpuid_info.std_vendor_name_0 == 0x746e6543) || (_cpuid_info.std_vendor_name_0 == 0x68532020); } // 'tneC'||'hS  '
   static bool is_atom_family()    { return ((cpu_family() == 0x06) && ((extended_cpu_model() == 0x36) || (extended_cpu_model() == 0x37) || (extended_cpu_model() == 0x4D))); } //Silvermont and Centerton
@@ -734,7 +739,7 @@
       if (!supports_topology || result == 0) {
         result = (_cpuid_info.dcp_cpuid4_eax.bits.cores_per_cpu + 1);
       }
-    } else if (is_amd()) {
+    } else if (is_amd_family()) {
       result = (_cpuid_info.ext_cpuid8_ecx.bits.cores_per_cpu + 1);
     } else if (is_zx()) {
       bool supports_topology = supports_processor_topology();
@@ -770,7 +775,7 @@
     intx result = 0;
     if (is_intel()) {
       result = (_cpuid_info.dcp_cpuid4_ebx.bits.L1_line_size + 1);
-    } else if (is_amd()) {
+    } else if (is_amd_family()) {
       result = _cpuid_info.ext_cpuid5_ecx.bits.L1_line_size;
     } else if (is_zx()) {
       result = (_cpuid_info.dcp_cpuid4_ebx.bits.L1_line_size + 1);
@@ -857,7 +862,7 @@
 
   // AMD features
   static bool supports_3dnow_prefetch()    { return (_features & CPU_3DNOW_PREFETCH) != 0; }
-  static bool supports_mmx_ext()  { return is_amd() && _cpuid_info.ext_cpuid1_edx.bits.mmx_amd != 0; }
+  static bool supports_mmx_ext()  { return is_amd_family() && _cpuid_info.ext_cpuid1_edx.bits.mmx_amd != 0; }
   static bool supports_lzcnt()    { return (_features & CPU_LZCNT) != 0; }
   static bool supports_sse4a()    { return (_features & CPU_SSE4A) != 0; }
 
@@ -870,8 +875,8 @@
   }
   static bool supports_tscinv() {
     return supports_tscinv_bit() &&
-           ( (is_amd() && !is_amd_Barcelona()) ||
-             is_intel_tsc_synched_at_init() );
+      ((is_amd_family() && !is_amd_Barcelona()) ||
+        is_intel_tsc_synched_at_init());
   }
 
   // Intel Core and newer cpus have fast IDIV instruction (excluding Atom).
@@ -896,7 +901,7 @@
     // Core      - 256 / prefetchnta
     // It will be used only when AllocatePrefetchStyle > 0
 
-    if (is_amd()) { // AMD
+    if (is_amd_family()) { // AMD | Hygon
       if (supports_sse2()) {
         return 256; // Opteron
       } else {
@@ -930,6 +935,11 @@
   // that can be used for efficient implementation of
   // the intrinsic for java.lang.Thread.onSpinWait()
   static bool supports_on_spin_wait() { return supports_sse2(); }
+
+  // support functions for virtualization detection
+ private:
+  static void check_virt_cpuid(uint32_t idx, uint32_t *regs);
+  static void check_virtualizations();
 };
 
 #endif // CPU_X86_VM_VERSION_X86_HPP
--- a/src/hotspot/cpu/x86/x86_32.ad	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/x86/x86_32.ad	Tue Apr 30 11:07:58 2019 +0200
@@ -1309,7 +1309,7 @@
 }
 
 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
-  return implementation( NULL, ra_, true, NULL );
+  return MachNode::size(ra_);
 }
 
 
--- a/src/hotspot/cpu/zero/cppInterpreter_zero.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/zero/cppInterpreter_zero.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -371,14 +371,12 @@
   intptr_t result[4 - LogBytesPerWord];
   ffi_call(handler->cif(), (void (*)()) function, result, arguments);
 
-  // Change the thread state back to _thread_in_Java.
+  // Change the thread state back to _thread_in_Java and ensure it
+  // is seen by the GC thread.
   // ThreadStateTransition::transition_from_native() cannot be used
   // here because it does not check for asynchronous exceptions.
   // We have to manage the transition ourself.
-  thread->set_thread_state(_thread_in_native_trans);
-
-  // Make sure new state is visible in the GC thread
-  InterfaceSupport::serialize_thread_state(thread);
+  thread->set_thread_state_fence(_thread_in_native_trans);
 
   // Handle safepoint operations, pending suspend requests,
   // and pending asynchronous exceptions.
@@ -701,11 +699,11 @@
   return stack->sp() + argument_slots;
 }
 
-IRT_ENTRY(void, CppInterpreter::throw_exception(JavaThread* thread,
+JRT_ENTRY(void, CppInterpreter::throw_exception(JavaThread* thread,
                                                 Symbol*     name,
                                                 char*       message))
   THROW_MSG(name, message);
-IRT_END
+JRT_END
 
 InterpreterFrame *InterpreterFrame::build(Method* const method, TRAPS) {
   JavaThread *thread = (JavaThread *) THREAD;
--- a/src/hotspot/cpu/zero/interpreterRT_zero.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/cpu/zero/interpreterRT_zero.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2008, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -141,7 +141,7 @@
   assert(status == FFI_OK, "should be");
 }
 
-IRT_ENTRY(address,
+JRT_ENTRY(address,
           InterpreterRuntime::slow_signature_handler(JavaThread* thread,
                                                      Method*     method,
                                                      intptr_t*   unused1,
@@ -162,7 +162,7 @@
   handler->finalize();
 
   return (address) handler;
-IRT_END
+JRT_END
 
 void SignatureHandlerLibrary::pd_set_handler(address handlerAddr) {
   InterpreterRuntime::SignatureHandler *handler =
--- a/src/hotspot/os/aix/os_perf_aix.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/os/aix/os_perf_aix.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -273,110 +273,12 @@
   return n;
 }
 
-static FILE* open_statfile(void) {
-  FILE *f;
-
-  if ((f = fopen("/proc/stat", "r")) == NULL) {
-    static int haveWarned = 0;
-    if (!haveWarned) {
-      haveWarned = 1;
-    }
-  }
-  return f;
-}
-
-static void
-next_line(FILE *f) {
-  int c;
-  do {
-    c = fgetc(f);
-  } while (c != '\n' && c != EOF);
-}
-
 /**
- * Return the total number of ticks since the system was booted.
- * If the usedTicks parameter is not NULL, it will be filled with
- * the number of ticks spent on actual processes (user, system or
- * nice processes) since system boot. Note that this is the total number
- * of "executed" ticks on _all_ CPU:s, that is on a n-way system it is
- * n times the number of ticks that has passed in clock time.
- *
- * Returns a negative value if the reading of the ticks failed.
+ * on Linux we got the ticks related information from /proc/stat
+ * this does not work on AIX, libperfstat might be an alternative
  */
 static OSReturn get_total_ticks(int which_logical_cpu, CPUPerfTicks* pticks) {
-  FILE*         fh;
-  uint64_t      userTicks, niceTicks, systemTicks, idleTicks;
-  uint64_t      iowTicks = 0, irqTicks = 0, sirqTicks= 0;
-  int           logical_cpu = -1;
-  const int     expected_assign_count = (-1 == which_logical_cpu) ? 4 : 5;
-  int           n;
-
-  if ((fh = open_statfile()) == NULL) {
-    return OS_ERR;
-  }
-  if (-1 == which_logical_cpu) {
-    n = fscanf(fh, "cpu " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
-            UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT,
-            &userTicks, &niceTicks, &systemTicks, &idleTicks,
-            &iowTicks, &irqTicks, &sirqTicks);
-  } else {
-    // Move to next line
-    next_line(fh);
-
-    // find the line for requested cpu faster to just iterate linefeeds?
-    for (int i = 0; i < which_logical_cpu; i++) {
-      next_line(fh);
-    }
-
-    n = fscanf(fh, "cpu%u " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
-               UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT,
-               &logical_cpu, &userTicks, &niceTicks,
-               &systemTicks, &idleTicks, &iowTicks, &irqTicks, &sirqTicks);
-  }
-
-  fclose(fh);
-  if (n < expected_assign_count || logical_cpu != which_logical_cpu) {
-#ifdef DEBUG_LINUX_PROC_STAT
-    vm_fprintf(stderr, "[stat] read failed");
-#endif
-    return OS_ERR;
-  }
-
-#ifdef DEBUG_LINUX_PROC_STAT
-  vm_fprintf(stderr, "[stat] read "
-          UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
-          UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " \n",
-          userTicks, niceTicks, systemTicks, idleTicks,
-          iowTicks, irqTicks, sirqTicks);
-#endif
-
-  pticks->used       = userTicks + niceTicks;
-  pticks->usedKernel = systemTicks + irqTicks + sirqTicks;
-  pticks->total      = userTicks + niceTicks + systemTicks + idleTicks +
-                       iowTicks + irqTicks + sirqTicks;
-
-  return OS_OK;
-}
-
-
-static int get_systemtype(void) {
-  static int procEntriesType = UNDETECTED;
-  DIR *taskDir;
-
-  if (procEntriesType != UNDETECTED) {
-    return procEntriesType;
-  }
-
-  // Check whether we have a task subdirectory
-  if ((taskDir = opendir("/proc/self/task")) == NULL) {
-    procEntriesType = UNDETECTABLE;
-  } else {
-    // The task subdirectory exists; we're on a Linux >= 2.6 system
-    closedir(taskDir);
-    procEntriesType = LINUX26_NPTL;
-  }
-
-  return procEntriesType;
+  return OS_ERR;
 }
 
 /** read user and system ticks from a named procfile, assumed to be in 'stat' format then. */
@@ -390,26 +292,7 @@
  * to the JVM on any CPU.
  */
 static OSReturn get_jvm_ticks(CPUPerfTicks* pticks) {
-  uint64_t userTicks;
-  uint64_t systemTicks;
-
-  if (get_systemtype() != LINUX26_NPTL) {
-    return OS_ERR;
-  }
-
-  if (read_ticks("/proc/self/stat", &userTicks, &systemTicks) != 2) {
-    return OS_ERR;
-  }
-
-  // get the total
-  if (get_total_ticks(-1, pticks) != OS_OK) {
-    return OS_ERR;
-  }
-
-  pticks->used       = userTicks;
-  pticks->usedKernel = systemTicks;
-
-  return OS_OK;
+  return OS_ERR;
 }
 
 /**
@@ -473,29 +356,7 @@
 }
 
 static int SCANF_ARGS(1, 2) parse_stat(_SCANFMT_ const char* fmt, ...) {
-  FILE *f;
-  va_list args;
-
-  va_start(args, fmt);
-
-  if ((f = open_statfile()) == NULL) {
-    va_end(args);
-    return OS_ERR;
-  }
-  for (;;) {
-    char line[80];
-    if (fgets(line, sizeof(line), f) != NULL) {
-      if (vsscanf(line, fmt, args) == 1) {
-        fclose(f);
-        va_end(args);
-        return OS_OK;
-      }
-    } else {
-        fclose(f);
-        va_end(args);
-        return OS_ERR;
-    }
-  }
+  return OS_ERR;
 }
 
 static int get_noof_context_switches(uint64_t* switches) {
--- a/src/hotspot/os/linux/os_linux.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/os/linux/os_linux.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -63,6 +63,7 @@
 #include "runtime/threadCritical.hpp"
 #include "runtime/threadSMR.hpp"
 #include "runtime/timer.hpp"
+#include "runtime/vm_version.hpp"
 #include "semaphore_posix.hpp"
 #include "services/attachListener.hpp"
 #include "services/memTracker.hpp"
@@ -227,6 +228,82 @@
   return phys_mem;
 }
 
+static uint64_t initial_total_ticks = 0;
+static uint64_t initial_steal_ticks = 0;
+static bool     has_initial_tick_info = false;
+
+static void next_line(FILE *f) {
+  int c;
+  do {
+    c = fgetc(f);
+  } while (c != '\n' && c != EOF);
+}
+
+bool os::Linux::get_tick_information(CPUPerfTicks* pticks, int which_logical_cpu) {
+  FILE*         fh;
+  uint64_t      userTicks, niceTicks, systemTicks, idleTicks;
+  // since at least kernel 2.6 : iowait: time waiting for I/O to complete
+  // irq: time  servicing interrupts; softirq: time servicing softirqs
+  uint64_t      iowTicks = 0, irqTicks = 0, sirqTicks= 0;
+  // steal (since kernel 2.6.11): time spent in other OS when running in a virtualized environment
+  uint64_t      stealTicks = 0;
+  // guest (since kernel 2.6.24): time spent running a virtual CPU for guest OS under the
+  // control of the Linux kernel
+  uint64_t      guestNiceTicks = 0;
+  int           logical_cpu = -1;
+  const int     required_tickinfo_count = (which_logical_cpu == -1) ? 4 : 5;
+  int           n;
+
+  memset(pticks, 0, sizeof(CPUPerfTicks));
+
+  if ((fh = fopen("/proc/stat", "r")) == NULL) {
+    return false;
+  }
+
+  if (which_logical_cpu == -1) {
+    n = fscanf(fh, "cpu " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
+            UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
+            UINT64_FORMAT " " UINT64_FORMAT " ",
+            &userTicks, &niceTicks, &systemTicks, &idleTicks,
+            &iowTicks, &irqTicks, &sirqTicks,
+            &stealTicks, &guestNiceTicks);
+  } else {
+    // Move to next line
+    next_line(fh);
+
+    // find the line for requested cpu faster to just iterate linefeeds?
+    for (int i = 0; i < which_logical_cpu; i++) {
+      next_line(fh);
+    }
+
+    n = fscanf(fh, "cpu%u " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
+               UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
+               UINT64_FORMAT " " UINT64_FORMAT " ",
+               &logical_cpu, &userTicks, &niceTicks,
+               &systemTicks, &idleTicks, &iowTicks, &irqTicks, &sirqTicks,
+               &stealTicks, &guestNiceTicks);
+  }
+
+  fclose(fh);
+  if (n < required_tickinfo_count || logical_cpu != which_logical_cpu) {
+    return false;
+  }
+  pticks->used       = userTicks + niceTicks;
+  pticks->usedKernel = systemTicks + irqTicks + sirqTicks;
+  pticks->total      = userTicks + niceTicks + systemTicks + idleTicks +
+                       iowTicks + irqTicks + sirqTicks + stealTicks + guestNiceTicks;
+
+  if (n > required_tickinfo_count + 3) {
+    pticks->steal = stealTicks;
+    pticks->has_steal_ticks = true;
+  } else {
+    pticks->steal = 0;
+    pticks->has_steal_ticks = false;
+  }
+
+  return true;
+}
+
 // Return true if user is running as root.
 
 bool os::have_special_privileges() {
@@ -1863,35 +1940,6 @@
   return true;
 }
 
-#if defined(S390) || defined(PPC64)
-// keywords_to_match - NULL terminated array of keywords
-static bool print_matching_lines_from_file(const char* filename, outputStream* st, const char* keywords_to_match[]) {
-  char* line = NULL;
-  size_t length = 0;
-  FILE* fp = fopen(filename, "r");
-  if (fp == NULL) {
-    return false;
-  }
-
-  st->print_cr("Virtualization information:");
-  while (getline(&line, &length, fp) != -1) {
-    int i = 0;
-    while (keywords_to_match[i] != NULL) {
-      if (strncmp(line, keywords_to_match[i], strlen(keywords_to_match[i])) == 0) {
-        st->print("%s", line);
-        break;
-      }
-      i++;
-    }
-  }
-
-  free(line);
-  fclose(fp);
-
-  return true;
-}
-#endif
-
 void os::print_dll_info(outputStream *st) {
   st->print_cr("Dynamic libraries:");
 
@@ -1976,7 +2024,9 @@
 
   os::Linux::print_container_info(st);
 
-  os::Linux::print_virtualization_info(st);
+  VM_Version::print_platform_virtualization_info(st);
+
+  os::Linux::print_steal_info(st);
 }
 
 // Try to identify popular distros.
@@ -2231,38 +2281,22 @@
   st->cr();
 }
 
-void os::Linux::print_virtualization_info(outputStream* st) {
-#if defined(S390)
-  // /proc/sysinfo contains interesting information about
-  // - LPAR
-  // - whole "Box" (CPUs )
-  // - z/VM / KVM (VM<nn>); this is not available in an LPAR-only setup
-  const char* kw[] = { "LPAR", "CPUs", "VM", NULL };
-  const char* info_file = "/proc/sysinfo";
-
-  if (!print_matching_lines_from_file(info_file, st, kw)) {
-    st->print_cr("  <%s Not Available>", info_file);
-  }
-#elif defined(PPC64)
-  const char* info_file = "/proc/ppc64/lparcfg";
-  const char* kw[] = { "system_type=", // qemu indicates PowerKVM
-                       "partition_entitled_capacity=", // entitled processor capacity percentage
-                       "partition_max_entitled_capacity=",
-                       "capacity_weight=", // partition CPU weight
-                       "partition_active_processors=",
-                       "partition_potential_processors=",
-                       "entitled_proc_capacity_available=",
-                       "capped=", // 0 - uncapped, 1 - vcpus capped at entitled processor capacity percentage
-                       "shared_processor_mode=", // (non)dedicated partition
-                       "system_potential_processors=",
-                       "pool=", // CPU-pool number
-                       "pool_capacity=",
-                       "NumLpars=", // on non-KVM machines, NumLpars is not found for full partition mode machines
-                       NULL };
-  if (!print_matching_lines_from_file(info_file, st, kw)) {
-    st->print_cr("  <%s Not Available>", info_file);
-  }
-#endif
+void os::Linux::print_steal_info(outputStream* st) {
+  if (has_initial_tick_info) {
+    CPUPerfTicks pticks;
+    bool res = os::Linux::get_tick_information(&pticks, -1);
+
+    if (res && pticks.has_steal_ticks) {
+      uint64_t steal_ticks_difference = pticks.steal - initial_steal_ticks;
+      uint64_t total_ticks_difference = pticks.total - initial_total_ticks;
+      double steal_ticks_perc = 0.0;
+      if (total_ticks_difference != 0) {
+        steal_ticks_perc = (double) steal_ticks_difference / total_ticks_difference;
+      }
+      st->print_cr("Steal ticks since vm start: " UINT64_FORMAT, steal_ticks_difference);
+      st->print_cr("Steal ticks percentage since vm start:%7.3f", steal_ticks_perc);
+    }
+  }
 }
 
 void os::print_memory_info(outputStream* st) {
@@ -4989,6 +5023,15 @@
 
   Linux::initialize_os_info();
 
+  os::Linux::CPUPerfTicks pticks;
+  bool res = os::Linux::get_tick_information(&pticks, -1);
+
+  if (res && pticks.has_steal_ticks) {
+    has_initial_tick_info = true;
+    initial_total_ticks = pticks.total;
+    initial_steal_ticks = pticks.steal;
+  }
+
   // _main_thread points to the thread that created/loaded the JVM.
   Linux::_main_thread = pthread_self();
 
--- a/src/hotspot/os/linux/os_linux.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/os/linux/os_linux.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -108,13 +108,23 @@
 
   static void print_full_memory_info(outputStream* st);
   static void print_container_info(outputStream* st);
-  static void print_virtualization_info(outputStream* st);
+  static void print_steal_info(outputStream* st);
   static void print_distro_info(outputStream* st);
   static void print_libversion_info(outputStream* st);
   static void print_proc_sys_info(outputStream* st);
   static void print_ld_preload_file(outputStream* st);
 
  public:
+  struct CPUPerfTicks {
+    uint64_t used;
+    uint64_t usedKernel;
+    uint64_t total;
+    uint64_t steal;
+    bool     has_steal_ticks;
+  };
+
+  // which_logical_cpu=-1 returns accumulated ticks for all cpus.
+  static bool get_tick_information(CPUPerfTicks* pticks, int which_logical_cpu);
   static bool _stack_is_executable;
   static void *dlopen_helper(const char *name, char *ebuf, int ebuflen);
   static void *dll_load_in_vmthread(const char *name, char *ebuf, int ebuflen);
--- a/src/hotspot/os/linux/os_perf_linux.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/os/linux/os_perf_linux.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -206,13 +206,6 @@
 #  define _SCANFMT_
 #endif
 
-
-struct CPUPerfTicks {
-  uint64_t  used;
-  uint64_t  usedKernel;
-  uint64_t  total;
-};
-
 typedef enum {
   CPU_LOAD_VM_ONLY,
   CPU_LOAD_GLOBAL,
@@ -227,8 +220,8 @@
 
 struct CPUPerfCounters {
   int   nProcs;
-  CPUPerfTicks jvmTicks;
-  CPUPerfTicks* cpus;
+  os::Linux::CPUPerfTicks jvmTicks;
+  os::Linux::CPUPerfTicks* cpus;
 };
 
 static double get_cpu_load(int which_logical_cpu, CPUPerfCounters* counters, double* pkernelLoad, CpuLoadTarget target);
@@ -287,80 +280,6 @@
   return f;
 }
 
-static void
-next_line(FILE *f) {
-  int c;
-  do {
-    c = fgetc(f);
-  } while (c != '\n' && c != EOF);
-}
-
-/**
- * Return the total number of ticks since the system was booted.
- * If the usedTicks parameter is not NULL, it will be filled with
- * the number of ticks spent on actual processes (user, system or
- * nice processes) since system boot. Note that this is the total number
- * of "executed" ticks on _all_ CPU:s, that is on a n-way system it is
- * n times the number of ticks that has passed in clock time.
- *
- * Returns a negative value if the reading of the ticks failed.
- */
-static OSReturn get_total_ticks(int which_logical_cpu, CPUPerfTicks* pticks) {
-  FILE*         fh;
-  uint64_t      userTicks, niceTicks, systemTicks, idleTicks;
-  uint64_t      iowTicks = 0, irqTicks = 0, sirqTicks= 0;
-  int           logical_cpu = -1;
-  const int     expected_assign_count = (-1 == which_logical_cpu) ? 4 : 5;
-  int           n;
-
-  if ((fh = open_statfile()) == NULL) {
-    return OS_ERR;
-  }
-  if (-1 == which_logical_cpu) {
-    n = fscanf(fh, "cpu " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
-            UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT,
-            &userTicks, &niceTicks, &systemTicks, &idleTicks,
-            &iowTicks, &irqTicks, &sirqTicks);
-  } else {
-    // Move to next line
-    next_line(fh);
-
-    // find the line for requested cpu faster to just iterate linefeeds?
-    for (int i = 0; i < which_logical_cpu; i++) {
-      next_line(fh);
-    }
-
-    n = fscanf(fh, "cpu%u " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
-               UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT,
-               &logical_cpu, &userTicks, &niceTicks,
-               &systemTicks, &idleTicks, &iowTicks, &irqTicks, &sirqTicks);
-  }
-
-  fclose(fh);
-  if (n < expected_assign_count || logical_cpu != which_logical_cpu) {
-#ifdef DEBUG_LINUX_PROC_STAT
-    vm_fprintf(stderr, "[stat] read failed");
-#endif
-    return OS_ERR;
-  }
-
-#ifdef DEBUG_LINUX_PROC_STAT
-  vm_fprintf(stderr, "[stat] read "
-          UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
-          UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " \n",
-          userTicks, niceTicks, systemTicks, idleTicks,
-          iowTicks, irqTicks, sirqTicks);
-#endif
-
-  pticks->used       = userTicks + niceTicks;
-  pticks->usedKernel = systemTicks + irqTicks + sirqTicks;
-  pticks->total      = userTicks + niceTicks + systemTicks + idleTicks +
-                       iowTicks + irqTicks + sirqTicks;
-
-  return OS_OK;
-}
-
-
 static int get_systemtype(void) {
   static int procEntriesType = UNDETECTED;
   DIR *taskDir;
@@ -391,7 +310,7 @@
  * Return the number of ticks spent in any of the processes belonging
  * to the JVM on any CPU.
  */
-static OSReturn get_jvm_ticks(CPUPerfTicks* pticks) {
+static OSReturn get_jvm_ticks(os::Linux::CPUPerfTicks* pticks) {
   uint64_t userTicks;
   uint64_t systemTicks;
 
@@ -404,7 +323,7 @@
   }
 
   // get the total
-  if (get_total_ticks(-1, pticks) != OS_OK) {
+  if (! os::Linux::get_tick_information(pticks, -1)) {
     return OS_ERR;
   }
 
@@ -423,8 +342,8 @@
  */
 static double get_cpu_load(int which_logical_cpu, CPUPerfCounters* counters, double* pkernelLoad, CpuLoadTarget target) {
   uint64_t udiff, kdiff, tdiff;
-  CPUPerfTicks* pticks;
-  CPUPerfTicks  tmp;
+  os::Linux::CPUPerfTicks* pticks;
+  os::Linux::CPUPerfTicks  tmp;
   double user_load;
 
   *pkernelLoad = 0.0;
@@ -443,7 +362,7 @@
     if (get_jvm_ticks(pticks) != OS_OK) {
       return -1.0;
     }
-  } else if (get_total_ticks(which_logical_cpu, pticks) != OS_OK) {
+  } else if (! os::Linux::get_tick_information(pticks, which_logical_cpu)) {
     return -1.0;
   }
 
@@ -584,19 +503,19 @@
 }
 
 bool CPUPerformanceInterface::CPUPerformance::initialize() {
-  size_t tick_array_size = (_counters.nProcs +1) * sizeof(CPUPerfTicks);
-  _counters.cpus = (CPUPerfTicks*)NEW_C_HEAP_ARRAY(char, tick_array_size, mtInternal);
+  size_t tick_array_size = (_counters.nProcs +1) * sizeof(os::Linux::CPUPerfTicks);
+  _counters.cpus = (os::Linux::CPUPerfTicks*)NEW_C_HEAP_ARRAY(char, tick_array_size, mtInternal);
   if (NULL == _counters.cpus) {
     return false;
   }
   memset(_counters.cpus, 0, tick_array_size);
 
   // For the CPU load total
-  get_total_ticks(-1, &_counters.cpus[_counters.nProcs]);
+  os::Linux::get_tick_information(&_counters.cpus[_counters.nProcs], -1);
 
   // For each CPU
   for (int i = 0; i < _counters.nProcs; i++) {
-    get_total_ticks(i, &_counters.cpus[i]);
+    os::Linux::get_tick_information(&_counters.cpus[i], i);
   }
   // For JVM load
   get_jvm_ticks(&_counters.jvmTicks);
--- a/src/hotspot/os/windows/os_windows.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/os/windows/os_windows.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -1601,6 +1601,10 @@
 #endif
   st->print("OS:");
   os::win32::print_windows_version(st);
+
+#ifdef _LP64
+  VM_Version::print_platform_virtualization_info(st);
+#endif
 }
 
 void os::win32::print_windows_version(outputStream* st) {
--- a/src/hotspot/os/windows/version.rc	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/os/windows/version.rc	Tue Apr 30 11:07:58 2019 +0200
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 2004, 2016, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2004, 2019, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -36,8 +36,8 @@
 //
 
 VS_VERSION_INFO VERSIONINFO
- FILEVERSION    JDK_VER
- PRODUCTVERSION JDK_VER
+ FILEVERSION    HS_FVER
+ PRODUCTVERSION HS_FVER
  FILEFLAGSMASK 0x3fL
 #ifdef _DEBUG
  FILEFLAGS 0x1L
@@ -54,15 +54,15 @@
     BEGIN
         BLOCK "000004b0"
         BEGIN
-            VALUE "CompanyName",      XSTR(HS_COMPANY)       "\0"
-            VALUE "FileDescription",  XSTR(HS_FILEDESC)      "\0"
-            VALUE "FileVersion",      XSTR(JDK_DOTVER)        "\0"
-            VALUE "Full Version",     XSTR(HS_BUILD_ID)      "\0"
-	    VALUE "InternalName",     XSTR(HS_INTERNAL_NAME) "\0"
-            VALUE "LegalCopyright",   XSTR(HS_COPYRIGHT)     "\0"
-            VALUE "OriginalFilename", XSTR(HS_FNAME)         "\0"
-            VALUE "ProductName",      XSTR(HS_NAME)          "\0"
-            VALUE "ProductVersion",   XSTR(JDK_DOTVER)       "\0"
+            VALUE "CompanyName",      XSTR(HS_COMPANY)        "\0"
+            VALUE "FileDescription",  XSTR(HS_FILEDESC)       "\0"
+            VALUE "FileVersion",      XSTR(HS_VER)            "\0"
+            VALUE "Full Version",     XSTR(HS_VERSION_STRING) "\0"
+            VALUE "InternalName",     XSTR(HS_INTERNAL_NAME)  "\0"
+            VALUE "LegalCopyright",   XSTR(HS_COPYRIGHT)      "\0"
+            VALUE "OriginalFilename", XSTR(HS_FNAME)          "\0"
+            VALUE "ProductName",      XSTR(HS_NAME)           "\0"
+            VALUE "ProductVersion",   XSTR(HS_VER)            "\0"
         END
     END
     BLOCK "VarFileInfo"
--- a/src/hotspot/os_cpu/linux_ppc/thread_linux_ppc.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/os_cpu/linux_ppc/thread_linux_ppc.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -66,7 +66,7 @@
 
     if (ret_frame.is_interpreted_frame()) {
        frame::ijava_state* istate = ret_frame.get_ijava_state();
-       if (!((Method*)(istate->method))->is_metaspace_object()) {
+       if (MetaspaceObj::is_valid((Method*)(istate->method)) == false) {
          return false;
        }
        uint64_t reg_bcp = uc->uc_mcontext.regs->gpr[14/*R14_bcp*/];
--- a/src/hotspot/os_cpu/linux_s390/thread_linux_s390.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/os_cpu/linux_s390/thread_linux_s390.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -63,7 +63,8 @@
 
     if (ret_frame.is_interpreted_frame()) {
       frame::z_ijava_state* istate = ret_frame.ijava_state_unchecked();
-       if ((stack_base() >= (address)istate && (address)istate > stack_end()) || !((Method*)(istate->method))->is_metaspace_object()) {
+       if ((stack_base() >= (address)istate && (address)istate > stack_end()) ||
+           MetaspaceObj::is_valid((Method*)(istate->method)) == false) {
          return false;
        }
        uint64_t reg_bcp = uc->uc_mcontext.gregs[13/*Z_BCP*/];
--- a/src/hotspot/share/adlc/formssel.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/adlc/formssel.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -774,10 +774,16 @@
        !strcmp(_matrule->_rChild->_opType,"CheckCastPP")  ||
        !strcmp(_matrule->_rChild->_opType,"GetAndSetP")   ||
        !strcmp(_matrule->_rChild->_opType,"GetAndSetN")   ||
+#if INCLUDE_ZGC
+       !strcmp(_matrule->_rChild->_opType,"LoadBarrierSlowReg") ||
+       !strcmp(_matrule->_rChild->_opType,"LoadBarrierWeakSlowReg") ||
+#endif
+#if INCLUDE_SHENANDOAHGC
+       !strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeP") ||
+       !strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeN") ||
+#endif
        !strcmp(_matrule->_rChild->_opType,"CompareAndExchangeP") ||
-       !strcmp(_matrule->_rChild->_opType,"CompareAndExchangeN") ||
-       !strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeP") ||
-       !strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeN"))) return true;
+       !strcmp(_matrule->_rChild->_opType,"CompareAndExchangeN"))) return true;
   else if ( is_ideal_load() == Form::idealP )                return true;
   else if ( is_ideal_store() != Form::none  )                return true;
 
--- a/src/hotspot/share/classfile/classFileParser.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/classfile/classFileParser.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -5975,9 +5975,9 @@
   _minor_version = stream->get_u2_fast();
   _major_version = stream->get_u2_fast();
 
-  if (DumpSharedSpaces && _major_version < JAVA_1_5_VERSION) {
+  if (DumpSharedSpaces && _major_version < JAVA_6_VERSION) {
     ResourceMark rm;
-    warning("Pre JDK 1.5 class not supported by CDS: %u.%u %s",
+    warning("Pre JDK 6 class not supported by CDS: %u.%u %s",
             _major_version,  _minor_version, _class_name->as_C_string());
     Exceptions::fthrow(
       THREAD_AND_LOCATION,
--- a/src/hotspot/share/classfile/javaClasses.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/classfile/javaClasses.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -160,6 +160,7 @@
 
 int java_lang_String::value_offset  = 0;
 int java_lang_String::hash_offset   = 0;
+int java_lang_String::hashIsZero_offset = 0;
 int java_lang_String::coder_offset  = 0;
 
 bool java_lang_String::initialized  = false;
@@ -179,7 +180,8 @@
 #define STRING_FIELDS_DO(macro) \
   macro(value_offset, k, vmSymbols::value_name(), byte_array_signature, false); \
   macro(hash_offset,  k, "hash",                  int_signature,        false); \
-  macro(coder_offset, k, "coder",                 byte_signature,       false)
+  macro(hashIsZero_offset, k, "hashIsZero",       bool_signature,       false); \
+  macro(coder_offset, k, "coder",                 byte_signature,       false);
 
 void java_lang_String::compute_offsets() {
   if (initialized) {
@@ -218,7 +220,7 @@
 
 void java_lang_String::set_compact_strings(bool value) {
   CompactStringsFixup fix(value);
-  InstanceKlass::cast(SystemDictionary::String_klass())->do_local_static_fields(&fix);
+  SystemDictionary::String_klass()->do_local_static_fields(&fix);
 }
 
 Handle java_lang_String::basic_create(int length, bool is_latin1, TRAPS) {
@@ -507,18 +509,38 @@
 }
 
 unsigned int java_lang_String::hash_code(oop java_string) {
-  typeArrayOop value  = java_lang_String::value(java_string);
-  int          length = java_lang_String::length(java_string, value);
-  // Zero length string will hash to zero with String.hashCode() function.
-  if (length == 0) return 0;
-
-  bool      is_latin1 = java_lang_String::is_latin1(java_string);
-
-  if (is_latin1) {
-    return java_lang_String::hash_code(value->byte_at_addr(0), length);
+  // The hash and hashIsZero fields are subject to a benign data race,
+  // making it crucial to ensure that any observable result of the
+  // calculation in this method stays correct under any possible read of
+  // these fields. Necessary restrictions to allow this to be correct
+  // without explicit memory fences or similar concurrency primitives is
+  // that we can ever only write to one of these two fields for a given
+  // String instance, and that the computation is idempotent and derived
+  // from immutable state
+  assert(initialized && (hash_offset > 0) && (hashIsZero_offset > 0), "Must be initialized");
+  if (java_lang_String::hash_is_set(java_string)) {
+    return java_string->int_field(hash_offset);
+  }
+
+  typeArrayOop value = java_lang_String::value(java_string);
+  int         length = java_lang_String::length(java_string, value);
+  bool     is_latin1 = java_lang_String::is_latin1(java_string);
+
+  unsigned int hash = 0;
+  if (length > 0) {
+    if (is_latin1) {
+      hash = java_lang_String::hash_code(value->byte_at_addr(0), length);
+    } else {
+      hash = java_lang_String::hash_code(value->char_at_addr(0), length);
+    }
+  }
+
+  if (hash != 0) {
+    java_string->int_field_put(hash_offset, hash);
   } else {
-    return java_lang_String::hash_code(value->char_at_addr(0), length);
+    java_string->bool_field_put(hashIsZero_offset, true);
   }
+  return hash;
 }
 
 char* java_lang_String::as_quoted_ascii(oop java_string) {
@@ -1692,20 +1714,13 @@
 
 
 jlong java_lang_Thread::stackSize(oop java_thread) {
-  if (_stackSize_offset > 0) {
-    return java_thread->long_field(_stackSize_offset);
-  } else {
-    return 0;
-  }
+  return java_thread->long_field(_stackSize_offset);
 }
 
 // Write the thread status value to threadStatus field in java.lang.Thread java class.
 void java_lang_Thread::set_thread_status(oop java_thread,
                                          java_lang_Thread::ThreadStatus status) {
-  // The threadStatus is only present starting in 1.5
-  if (_thread_status_offset > 0) {
-    java_thread->int_field_put(_thread_status_offset, status);
-  }
+  java_thread->int_field_put(_thread_status_offset, status);
 }
 
 // Read thread status value from threadStatus field in java.lang.Thread java class.
@@ -1715,62 +1730,31 @@
   assert(Threads_lock->owned_by_self() || Thread::current()->is_VM_thread() ||
          JavaThread::current()->thread_state() == _thread_in_vm,
          "Java Thread is not running in vm");
-  // The threadStatus is only present starting in 1.5
-  if (_thread_status_offset > 0) {
-    return (java_lang_Thread::ThreadStatus)java_thread->int_field(_thread_status_offset);
-  } else {
-    // All we can easily figure out is if it is alive, but that is
-    // enough info for a valid unknown status.
-    // These aren't restricted to valid set ThreadStatus values, so
-    // use JVMTI values and cast.
-    JavaThread* thr = java_lang_Thread::thread(java_thread);
-    if (thr == NULL) {
-      // the thread hasn't run yet or is in the process of exiting
-      return NEW;
-    }
-    return (java_lang_Thread::ThreadStatus)JVMTI_THREAD_STATE_ALIVE;
-  }
+  return (java_lang_Thread::ThreadStatus)java_thread->int_field(_thread_status_offset);
 }
 
 
 jlong java_lang_Thread::thread_id(oop java_thread) {
-  // The thread ID field is only present starting in 1.5
-  if (_tid_offset > 0) {
-    return java_thread->long_field(_tid_offset);
-  } else {
-    return 0;
-  }
+  return java_thread->long_field(_tid_offset);
 }
 
 oop java_lang_Thread::park_blocker(oop java_thread) {
-  assert(JDK_Version::current().supports_thread_park_blocker() &&
-         _park_blocker_offset != 0, "Must support parkBlocker field");
-
-  if (_park_blocker_offset > 0) {
-    return java_thread->obj_field(_park_blocker_offset);
-  }
-
-  return NULL;
+  assert(JDK_Version::current().supports_thread_park_blocker(),
+         "Must support parkBlocker field");
+
+  return java_thread->obj_field(_park_blocker_offset);
 }
 
 jlong java_lang_Thread::park_event(oop java_thread) {
-  if (_park_event_offset > 0) {
-    return java_thread->long_field(_park_event_offset);
-  }
-  return 0;
+  return java_thread->long_field(_park_event_offset);
 }
 
 bool java_lang_Thread::set_park_event(oop java_thread, jlong ptr) {
-  if (_park_event_offset > 0) {
-    java_thread->long_field_put(_park_event_offset, ptr);
-    return true;
-  }
-  return false;
-}
-
+  java_thread->long_field_put(_park_event_offset, ptr);
+  return true;
+}
 
 const char* java_lang_Thread::thread_status_name(oop java_thread) {
-  assert(_thread_status_offset != 0, "Must have thread status");
   ThreadStatus status = (java_lang_Thread::ThreadStatus)java_thread->int_field(_thread_status_offset);
   switch (status) {
     case NEW                      : return "NEW";
@@ -3613,23 +3597,48 @@
   resolved_method->address_field_put(_vmtarget_offset, (address)m);
 }
 
+void java_lang_invoke_ResolvedMethodName::set_vmholder(oop resolved_method, oop holder) {
+  assert(is_instance(resolved_method), "wrong type");
+  resolved_method->obj_field_put(_vmholder_offset, holder);
+}
+
 oop java_lang_invoke_ResolvedMethodName::find_resolved_method(const methodHandle& m, TRAPS) {
+  const Method* method = m();
+
   // lookup ResolvedMethod oop in the table, or create a new one and intern it
-  oop resolved_method = ResolvedMethodTable::find_method(m());
-  if (resolved_method == NULL) {
-    InstanceKlass* k = SystemDictionary::ResolvedMethodName_klass();
-    if (!k->is_initialized()) {
-      k->initialize(CHECK_NULL);
-    }
-    oop new_resolved_method = k->allocate_instance(CHECK_NULL);
-    new_resolved_method->address_field_put(_vmtarget_offset, (address)m());
-    // Add a reference to the loader (actually mirror because unsafe anonymous classes will not have
-    // distinct loaders) to ensure the metadata is kept alive.
-    // This mirror may be different than the one in clazz field.
-    new_resolved_method->obj_field_put(_vmholder_offset, m->method_holder()->java_mirror());
-    resolved_method = ResolvedMethodTable::add_method(m, Handle(THREAD, new_resolved_method));
+  oop resolved_method = ResolvedMethodTable::find_method(method);
+  if (resolved_method != NULL) {
+    return resolved_method;
   }
-  return resolved_method;
+
+  InstanceKlass* k = SystemDictionary::ResolvedMethodName_klass();
+  if (!k->is_initialized()) {
+    k->initialize(CHECK_NULL);
+  }
+
+  oop new_resolved_method = k->allocate_instance(CHECK_NULL);
+
+  NoSafepointVerifier nsv;
+
+  if (method->is_old()) {
+    method = (method->is_deleted()) ? Universe::throw_no_such_method_error() :
+                                      method->get_new_method();
+  }
+
+  InstanceKlass* holder = method->method_holder();
+
+  set_vmtarget(new_resolved_method, const_cast<Method*>(method));
+  // Add a reference to the loader (actually mirror because unsafe anonymous classes will not have
+  // distinct loaders) to ensure the metadata is kept alive.
+  // This mirror may be different than the one in clazz field.
+  set_vmholder(new_resolved_method, holder->java_mirror());
+
+  // Set flag in class to indicate this InstanceKlass has entries in the table
+  // to avoid walking table during redefinition if none of the redefined classes
+  // have any membernames in the table.
+  holder->set_has_resolved_methods();
+
+  return ResolvedMethodTable::add_method(method, Handle(THREAD, new_resolved_method));
 }
 
 oop java_lang_invoke_LambdaForm::vmentry(oop lform) {
@@ -3991,6 +4000,48 @@
 int java_lang_System::out_offset_in_bytes() { return static_out_offset; }
 int java_lang_System::err_offset_in_bytes() { return static_err_offset; }
 
+// Support for jdk_internal_misc_UnsafeConstants
+//
+class UnsafeConstantsFixup : public FieldClosure {
+private:
+  int _address_size;
+  int _page_size;
+  bool _big_endian;
+  bool _use_unaligned_access;
+public:
+  UnsafeConstantsFixup() {
+    // round up values for all static final fields
+    _address_size = sizeof(void*);
+    _page_size = os::vm_page_size();
+    _big_endian = LITTLE_ENDIAN_ONLY(false) BIG_ENDIAN_ONLY(true);
+    _use_unaligned_access = UseUnalignedAccesses;
+  }
+
+  void do_field(fieldDescriptor* fd) {
+    oop mirror = fd->field_holder()->java_mirror();
+    assert(mirror != NULL, "UnsafeConstants must have mirror already");
+    assert(fd->field_holder() == SystemDictionary::UnsafeConstants_klass(), "Should be UnsafeConstants");
+    assert(fd->is_final(), "fields of UnsafeConstants must be final");
+    assert(fd->is_static(), "fields of UnsafeConstants must be static");
+    if (fd->name() == vmSymbols::address_size_name()) {
+      mirror->int_field_put(fd->offset(), _address_size);
+    } else if (fd->name() == vmSymbols::page_size_name()) {
+      mirror->int_field_put(fd->offset(), _page_size);
+    } else if (fd->name() == vmSymbols::big_endian_name()) {
+      mirror->bool_field_put(fd->offset(), _big_endian);
+    } else if (fd->name() == vmSymbols::use_unaligned_access_name()) {
+      mirror->bool_field_put(fd->offset(), _use_unaligned_access);
+    } else {
+      assert(false, "unexpected UnsafeConstants field");
+    }
+  }
+};
+
+void jdk_internal_misc_UnsafeConstants::set_unsafe_constants() {
+  UnsafeConstantsFixup fixup;
+  SystemDictionary::UnsafeConstants_klass()->do_local_static_fields(&fixup);
+}
+
 int java_lang_Class::_klass_offset;
 int java_lang_Class::_array_klass_offset;
 int java_lang_Class::_oop_size_offset;
--- a/src/hotspot/share/classfile/javaClasses.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/classfile/javaClasses.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -81,6 +81,7 @@
   f(java_lang_StackFrameInfo) \
   f(java_lang_LiveStackFrameInfo) \
   f(java_util_concurrent_locks_AbstractOwnableSynchronizer) \
+  f(jdk_internal_misc_UnsafeConstants) \
   //end
 
 #define BASIC_JAVA_CLASSES_DO(f) \
@@ -93,6 +94,7 @@
  private:
   static int value_offset;
   static int hash_offset;
+  static int hashIsZero_offset;
   static int coder_offset;
 
   static bool initialized;
@@ -131,6 +133,10 @@
     assert(initialized && (hash_offset > 0), "Must be initialized");
     return hash_offset;
   }
+  static int hashIsZero_offset_in_bytes()   {
+    assert(initialized && (hashIsZero_offset > 0), "Must be initialized");
+    return hashIsZero_offset;
+  }
   static int coder_offset_in_bytes()   {
     assert(initialized && (coder_offset > 0), "Must be initialized");
     return coder_offset;
@@ -138,12 +144,11 @@
 
   static inline void set_value_raw(oop string, typeArrayOop buffer);
   static inline void set_value(oop string, typeArrayOop buffer);
-  static inline void set_hash(oop string, unsigned int hash);
 
   // Accessors
   static inline typeArrayOop value(oop java_string);
   static inline typeArrayOop value_no_keepalive(oop java_string);
-  static inline unsigned int hash(oop java_string);
+  static inline bool hash_is_set(oop string);
   static inline bool is_latin1(oop java_string);
   static inline int length(oop java_string);
   static inline int length(oop java_string, typeArrayOop string_value);
@@ -1058,6 +1063,8 @@
   static Method* vmtarget(oop resolved_method);
   static void set_vmtarget(oop resolved_method, Method* method);
 
+  static void set_vmholder(oop resolved_method, oop holder);
+
   // find or create resolved member name
   static oop find_resolved_method(const methodHandle& m, TRAPS);
 
@@ -1483,6 +1490,15 @@
   static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
 };
 
+ // Interface to jdk.internal.misc.UnsafeConsants
+
+class jdk_internal_misc_UnsafeConstants : AllStatic {
+ public:
+  static void set_unsafe_constants();
+  static void compute_offsets() { }
+  static void serialize_offsets(SerializeClosure* f) { }
+};
+
 // Use to declare fields that need to be injected into Java classes
 // for the JVM to use.  The name_index and signature_index are
 // declared in vmSymbols.  The may_be_java flag is used to declare
--- a/src/hotspot/share/classfile/javaClasses.inline.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/classfile/javaClasses.inline.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -45,9 +45,9 @@
   string->obj_field_put(value_offset, (oop)buffer);
 }
 
-void java_lang_String::set_hash(oop string, unsigned int hash) {
-  assert(initialized && (hash_offset > 0), "Must be initialized");
-  string->int_field_put(hash_offset, hash);
+bool java_lang_String::hash_is_set(oop java_string) {
+  assert(initialized && (hash_offset > 0) && (hashIsZero_offset > 0), "Must be initialized");
+  return java_string->int_field(hash_offset) != 0 || java_string->bool_field(hashIsZero_offset) != 0;
 }
 
 // Accessors
@@ -71,12 +71,6 @@
   return (typeArrayOop) java_string->obj_field_access<AS_NO_KEEPALIVE>(value_offset);
 }
 
-unsigned int java_lang_String::hash(oop java_string) {
-  assert(initialized && (hash_offset > 0), "Must be initialized");
-  assert(is_instance(java_string), "must be java_string");
-  return java_string->int_field(hash_offset);
-}
-
 bool java_lang_String::is_latin1(oop java_string) {
   assert(initialized && (coder_offset > 0), "Must be initialized");
   assert(is_instance(java_string), "must be java_string");
--- a/src/hotspot/share/classfile/stringTable.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/classfile/stringTable.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -761,8 +761,6 @@
       return true;
     }
     unsigned int hash = java_lang_String::hash_code(s);
-
-    java_lang_String::set_hash(s, hash);
     oop new_s = StringTable::create_archived_string(s, Thread::current());
     if (new_s == NULL) {
       return true;
--- a/src/hotspot/share/classfile/systemDictionary.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/classfile/systemDictionary.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -68,7 +68,6 @@
 #include "oops/symbol.hpp"
 #include "oops/typeArrayKlass.hpp"
 #include "prims/jvmtiExport.hpp"
-#include "prims/resolvedMethodTable.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/biasedLocking.hpp"
@@ -1836,8 +1835,6 @@
   }
 
   GCTraceTime(Debug, gc, phases) t("Trigger cleanups", gc_timer);
-  // Trigger cleaning the ResolvedMethodTable even if no unloading occurred.
-  ResolvedMethodTable::trigger_cleanup();
 
   if (unloading_occurred) {
     SymbolTable::trigger_cleanup();
--- a/src/hotspot/share/classfile/systemDictionary.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/classfile/systemDictionary.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -177,6 +177,7 @@
   do_klass(AssertionStatusDirectives_klass,             java_lang_AssertionStatusDirectives                   ) \
   do_klass(StringBuffer_klass,                          java_lang_StringBuffer                                ) \
   do_klass(StringBuilder_klass,                         java_lang_StringBuilder                               ) \
+  do_klass(UnsafeConstants_klass,                       jdk_internal_misc_UnsafeConstants                     ) \
   do_klass(internal_Unsafe_klass,                       jdk_internal_misc_Unsafe                              ) \
   do_klass(module_Modules_klass,                        jdk_internal_module_Modules                           ) \
                                                                                                                 \
--- a/src/hotspot/share/classfile/verifier.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/classfile/verifier.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -2060,7 +2060,9 @@
   ss.vprint(msg, va);
   va_end(va);
   if (!_method.is_null()) {
-    ss.print(" in method %s", _method->name_and_sig_as_C_string());
+    ss.print(" in method '");
+    _method->print_external_name(&ss);
+    ss.print("'");
   }
   _message = ss.as_string();
 }
--- a/src/hotspot/share/classfile/vmSymbols.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/classfile/vmSymbols.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -446,6 +446,10 @@
   template(module_entry_name,                         "module_entry")                             \
   template(resolved_references_name,                  "<resolved_references>")                    \
   template(init_lock_name,                            "<init_lock>")                              \
+  template(address_size_name,                         "ADDRESS_SIZE0")                            \
+  template(page_size_name,                            "PAGE_SIZE")                                \
+  template(big_endian_name,                           "BIG_ENDIAN")                               \
+  template(use_unaligned_access_name,                 "UNALIGNED_ACCESS")                         \
                                                                                                   \
   /* name symbols needed by intrinsics */                                                         \
   VM_INTRINSICS_DO(VM_INTRINSIC_IGNORE, VM_SYMBOL_IGNORE, template, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE) \
@@ -1070,6 +1074,9 @@
   do_intrinsic(_updateByteBufferAdler32,  java_util_zip_Adler32,  updateByteBuffer_A_name,  updateByteBuffer_signature,  F_SN) \
    do_name(     updateByteBuffer_A_name,                          "updateByteBuffer")                                   \
                                                                                                                         \
+  /* support for UnsafeConstants */                                                                                     \
+  do_class(jdk_internal_misc_UnsafeConstants,      "jdk/internal/misc/UnsafeConstants")                                 \
+                                                                                                                        \
   /* support for Unsafe */                                                                                              \
   do_class(jdk_internal_misc_Unsafe,               "jdk/internal/misc/Unsafe")                                          \
                                                                                                                         \
--- a/src/hotspot/share/code/codeCache.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/code/codeCache.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -780,13 +780,14 @@
 CodeCache::UnloadingScope::UnloadingScope(BoolObjectClosure* is_alive)
   : _is_unloading_behaviour(is_alive)
 {
+  _saved_behaviour = IsUnloadingBehaviour::current();
   IsUnloadingBehaviour::set_current(&_is_unloading_behaviour);
   increment_unloading_cycle();
   DependencyContext::cleaning_start();
 }
 
 CodeCache::UnloadingScope::~UnloadingScope() {
-  IsUnloadingBehaviour::set_current(NULL);
+  IsUnloadingBehaviour::set_current(_saved_behaviour);
   DependencyContext::cleaning_end();
 }
 
--- a/src/hotspot/share/code/codeCache.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/code/codeCache.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -170,6 +170,7 @@
   // "unloading_occurred" controls whether metadata should be cleaned because of class unloading.
   class UnloadingScope: StackObj {
     ClosureIsUnloadingBehaviour _is_unloading_behaviour;
+    IsUnloadingBehaviour*       _saved_behaviour;
 
   public:
     UnloadingScope(BoolObjectClosure* is_alive);
--- a/src/hotspot/share/compiler/compileBroker.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/compiler/compileBroker.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -135,11 +135,6 @@
 volatile jint CompileBroker::_compilation_id     = 0;
 volatile jint CompileBroker::_osr_compilation_id = 0;
 
-// Debugging information
-int  CompileBroker::_last_compile_type     = no_compile;
-int  CompileBroker::_last_compile_level    = CompLevel_none;
-char CompileBroker::_last_method_compiled[CompileBroker::name_buffer_length];
-
 // Performance counters
 PerfCounter* CompileBroker::_perf_total_compilation = NULL;
 PerfCounter* CompileBroker::_perf_osr_compilation = NULL;
@@ -577,8 +572,6 @@
 //
 // Initialize the Compilation object
 void CompileBroker::compilation_init_phase1(TRAPS) {
-  _last_method_compiled[0] = '\0';
-
   // No need to initialize compilation system if we do not use it.
   if (!UseCompiler) {
     return;
@@ -2032,8 +2025,10 @@
     // Look up matching directives
     directive = DirectivesStack::getMatchingDirective(method, comp);
 
-    // Save information about this method in case of failure.
-    set_last_compile(thread, method, is_osr, task_level);
+    // Update compile information when using perfdata.
+    if (UsePerfData) {
+      update_compile_perf_data(thread, method, is_osr);
+    }
 
     DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, compiler_name(task_level));
   }
@@ -2264,58 +2259,49 @@
 }
 
 // ------------------------------------------------------------------
-// CompileBroker::set_last_compile
+// CompileBroker::update_compile_perf_data
 //
 // Record this compilation for debugging purposes.
-void CompileBroker::set_last_compile(CompilerThread* thread, const methodHandle& method, bool is_osr, int comp_level) {
+void CompileBroker::update_compile_perf_data(CompilerThread* thread, const methodHandle& method, bool is_osr) {
   ResourceMark rm;
   char* method_name = method->name()->as_C_string();
-  strncpy(_last_method_compiled, method_name, CompileBroker::name_buffer_length);
-  _last_method_compiled[CompileBroker::name_buffer_length - 1] = '\0'; // ensure null terminated
   char current_method[CompilerCounters::cmname_buffer_length];
   size_t maxLen = CompilerCounters::cmname_buffer_length;
 
-  if (UsePerfData) {
-    const char* class_name = method->method_holder()->name()->as_C_string();
+  const char* class_name = method->method_holder()->name()->as_C_string();
 
-    size_t s1len = strlen(class_name);
-    size_t s2len = strlen(method_name);
+  size_t s1len = strlen(class_name);
+  size_t s2len = strlen(method_name);
 
-    // check if we need to truncate the string
-    if (s1len + s2len + 2 > maxLen) {
+  // check if we need to truncate the string
+  if (s1len + s2len + 2 > maxLen) {
 
-      // the strategy is to lop off the leading characters of the
-      // class name and the trailing characters of the method name.
+    // the strategy is to lop off the leading characters of the
+    // class name and the trailing characters of the method name.
 
-      if (s2len + 2 > maxLen) {
-        // lop of the entire class name string, let snprintf handle
-        // truncation of the method name.
-        class_name += s1len; // null string
-      }
-      else {
-        // lop off the extra characters from the front of the class name
-        class_name += ((s1len + s2len + 2) - maxLen);
-      }
+    if (s2len + 2 > maxLen) {
+      // lop of the entire class name string, let snprintf handle
+      // truncation of the method name.
+      class_name += s1len; // null string
     }
-
-    jio_snprintf(current_method, maxLen, "%s %s", class_name, method_name);
+    else {
+      // lop off the extra characters from the front of the class name
+      class_name += ((s1len + s2len + 2) - maxLen);
+    }
   }
 
+  jio_snprintf(current_method, maxLen, "%s %s", class_name, method_name);
+
+  int last_compile_type = normal_compile;
   if (CICountOSR && is_osr) {
-    _last_compile_type = osr_compile;
-  } else {
-    _last_compile_type = normal_compile;
+    last_compile_type = osr_compile;
   }
-  _last_compile_level = comp_level;
 
-  if (UsePerfData) {
-    CompilerCounters* counters = thread->counters();
-    counters->set_current_method(current_method);
-    counters->set_compile_type((jlong)_last_compile_type);
-  }
+  CompilerCounters* counters = thread->counters();
+  counters->set_current_method(current_method);
+  counters->set_compile_type((jlong) last_compile_type);
 }
 
-
 // ------------------------------------------------------------------
 // CompileBroker::push_jni_handle_block
 //
@@ -2618,21 +2604,6 @@
   tty->print_cr("  nmethod total size        : %8d bytes", nmethods_size);
 }
 
-// Debugging output for failure
-void CompileBroker::print_last_compile() {
-  if (_last_compile_level != CompLevel_none &&
-      compiler(_last_compile_level) != NULL &&
-      _last_compile_type != no_compile) {
-    if (_last_compile_type == osr_compile) {
-      tty->print_cr("Last parse:  [osr]%d+++(%d) %s",
-                    _osr_compilation_id, _last_compile_level, _last_method_compiled);
-    } else {
-      tty->print_cr("Last parse:  %d+++(%d) %s",
-                    _compilation_id, _last_compile_level, _last_method_compiled);
-    }
-  }
-}
-
 // Print general/accumulated JIT information.
 void CompileBroker::print_info(outputStream *out) {
   if (out == NULL) out = tty;
--- a/src/hotspot/share/compiler/compileBroker.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/compiler/compileBroker.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -173,10 +173,6 @@
   static volatile jint _compilation_id;
   static volatile jint _osr_compilation_id;
 
-  static int  _last_compile_type;
-  static int  _last_compile_level;
-  static char _last_method_compiled[name_buffer_length];
-
   static CompileQueue* _c2_compile_queue;
   static CompileQueue* _c1_compile_queue;
 
@@ -254,7 +250,8 @@
   static void invoke_compiler_on_method(CompileTask* task);
   static void post_compile(CompilerThread* thread, CompileTask* task, bool success, ciEnv* ci_env,
                            int compilable, const char* failure_reason);
-  static void set_last_compile(CompilerThread *thread, const methodHandle& method, bool is_osr, int comp_level);
+  static void update_compile_perf_data(CompilerThread *thread, const methodHandle& method, bool is_osr);
+
   static void push_jni_handle_block();
   static void pop_jni_handle_block();
   static void collect_statistics(CompilerThread* thread, elapsedTimer time, CompileTask* task);
@@ -382,9 +379,6 @@
   // Print a detailed accounting of compilation time
   static void print_times(bool per_compiler = true, bool aggregate = true);
 
-  // Debugging output for failure
-  static void print_last_compile();
-
   // compiler name for debugging
   static const char* compiler_name(int comp_level);
 
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -154,6 +154,11 @@
   reset_from_card_cache(start_idx, num_regions);
 }
 
+Tickspan G1CollectedHeap::run_task(AbstractGangTask* task) {
+  Ticks start = Ticks::now();
+  workers()->run_task(task, workers()->active_workers());
+  return Ticks::now() - start;
+}
 
 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
                                              MemRegion mr) {
@@ -2242,12 +2247,12 @@
   _hrm->par_iterate(cl, hrclaimer, 0);
 }
 
-void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
+void G1CollectedHeap::collection_set_iterate_all(HeapRegionClosure* cl) {
   _collection_set.iterate(cl);
 }
 
-void G1CollectedHeap::collection_set_iterate_from(HeapRegionClosure *cl, uint worker_id) {
-  _collection_set.iterate_from(cl, worker_id, workers()->active_workers());
+void G1CollectedHeap::collection_set_iterate_increment_from(HeapRegionClosure *cl, uint worker_id) {
+  _collection_set.iterate_incremental_part_from(cl, worker_id, workers()->active_workers());
 }
 
 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
@@ -2484,7 +2489,7 @@
 
 void G1CollectedHeap::print_cset_rsets() {
   PrintRSetsClosure cl("Printing CSet RSets");
-  collection_set_iterate(&cl);
+  collection_set_iterate_all(&cl);
 }
 
 void G1CollectedHeap::print_all_rsets() {
@@ -2495,8 +2500,8 @@
 
 G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
 
-  size_t eden_used_bytes = heap()->eden_regions_count() * HeapRegion::GrainBytes;
-  size_t survivor_used_bytes = heap()->survivor_regions_count() * HeapRegion::GrainBytes;
+  size_t eden_used_bytes = _eden.used_bytes();
+  size_t survivor_used_bytes = _survivor.used_bytes();
   size_t heap_used = Heap_lock->owned_by_self() ? used() : used_unlocked();
 
   size_t eden_capacity_bytes =
@@ -2880,15 +2885,18 @@
   phase_times()->record_start_new_cset_time_ms((os::elapsedTime() - start) * 1000.0);
 }
 
-void G1CollectedHeap::calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms){
-  policy()->finalize_collection_set(target_pause_time_ms, &_survivor);
-  evacuation_info.set_collectionset_regions(collection_set()->region_length());
+void G1CollectedHeap::calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms) {
+
+  _collection_set.finalize_initial_collection_set(target_pause_time_ms, &_survivor);
+  evacuation_info.set_collectionset_regions(collection_set()->region_length() +
+                                            collection_set()->optional_region_length());
 
   _cm->verify_no_collection_set_oops();
 
   if (_hr_printer.is_active()) {
     G1PrintCollectionSetClosure cl(&_hr_printer);
     _collection_set.iterate(&cl);
+    _collection_set.iterate_optional(&cl);
   }
 }
 
@@ -3060,9 +3068,10 @@
         pre_evacuate_collection_set(evacuation_info);
 
         // Actually do the work...
-        evacuate_collection_set(&per_thread_states);
-        evacuate_optional_collection_set(&per_thread_states);
-
+        evacuate_initial_collection_set(&per_thread_states);
+        if (_collection_set.optional_region_length() != 0) {
+          evacuate_optional_collection_set(&per_thread_states);
+        }
         post_evacuate_collection_set(evacuation_info, &per_thread_states);
 
         start_new_collection_set();
@@ -3088,7 +3097,8 @@
 
         double sample_end_time_sec = os::elapsedTime();
         double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
-        size_t total_cards_scanned = phase_times()->sum_thread_work_items(G1GCPhaseTimes::ScanRS, G1GCPhaseTimes::ScanRSScannedCards);
+        size_t total_cards_scanned = phase_times()->sum_thread_work_items(G1GCPhaseTimes::ScanRS, G1GCPhaseTimes::ScanRSScannedCards) +
+                                     phase_times()->sum_thread_work_items(G1GCPhaseTimes::OptScanRS, G1GCPhaseTimes::ScanRSScannedCards);
         policy()->record_collection_pause_end(pause_time_ms, total_cards_scanned, heap_used_bytes_before_gc);
       }
 
@@ -3192,86 +3202,6 @@
   } while (!offer_termination());
 }
 
-class G1ParTask : public AbstractGangTask {
-protected:
-  G1CollectedHeap*         _g1h;
-  G1ParScanThreadStateSet* _pss;
-  RefToScanQueueSet*       _queues;
-  G1RootProcessor*         _root_processor;
-  TaskTerminator           _terminator;
-  uint                     _n_workers;
-
-public:
-  G1ParTask(G1CollectedHeap* g1h, G1ParScanThreadStateSet* per_thread_states, RefToScanQueueSet *task_queues, G1RootProcessor* root_processor, uint n_workers)
-    : AbstractGangTask("G1 collection"),
-      _g1h(g1h),
-      _pss(per_thread_states),
-      _queues(task_queues),
-      _root_processor(root_processor),
-      _terminator(n_workers, _queues),
-      _n_workers(n_workers)
-  {}
-
-  void work(uint worker_id) {
-    if (worker_id >= _n_workers) return;  // no work needed this round
-
-    double start_sec = os::elapsedTime();
-    _g1h->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, start_sec);
-
-    {
-      ResourceMark rm;
-      HandleMark   hm;
-
-      ReferenceProcessor*             rp = _g1h->ref_processor_stw();
-
-      G1ParScanThreadState*           pss = _pss->state_for_worker(worker_id);
-      pss->set_ref_discoverer(rp);
-
-      double start_strong_roots_sec = os::elapsedTime();
-
-      _root_processor->evacuate_roots(pss, worker_id);
-
-      _g1h->rem_set()->oops_into_collection_set_do(pss, worker_id);
-
-      double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec;
-
-      double term_sec = 0.0;
-      size_t evac_term_attempts = 0;
-      {
-        double start = os::elapsedTime();
-        G1ParEvacuateFollowersClosure evac(_g1h, pss, _queues, _terminator.terminator(), G1GCPhaseTimes::ObjCopy);
-        evac.do_void();
-
-        evac_term_attempts = evac.term_attempts();
-        term_sec = evac.term_time();
-        double elapsed_sec = os::elapsedTime() - start;
-
-        G1GCPhaseTimes* p = _g1h->phase_times();
-        p->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
-
-        p->record_or_add_thread_work_item(G1GCPhaseTimes::ObjCopy,
-                                          worker_id,
-                                          pss->lab_waste_words() * HeapWordSize,
-                                          G1GCPhaseTimes::ObjCopyLABWaste);
-        p->record_or_add_thread_work_item(G1GCPhaseTimes::ObjCopy,
-                                          worker_id,
-                                          pss->lab_undo_waste_words() * HeapWordSize,
-                                          G1GCPhaseTimes::ObjCopyLABUndoWaste);
-
-        p->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
-        p->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, evac_term_attempts);
-      }
-
-      assert(pss->queue_is_empty(), "should be empty");
-
-      // Close the inner scope so that the ResourceMark and HandleMark
-      // destructors are executed here and are included as part of the
-      // "GC Worker Time".
-    }
-    _g1h->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime());
-  }
-};
-
 void G1CollectedHeap::complete_cleaning(BoolObjectClosure* is_alive,
                                         bool class_unloading_occurred) {
   uint num_workers = workers()->active_workers();
@@ -3675,176 +3605,196 @@
     double recorded_clear_claimed_marks_time_ms = (os::elapsedTime() - start_clear_claimed_marks) * 1000.0;
     phase_times()->record_clear_claimed_marks_time_ms(recorded_clear_claimed_marks_time_ms);
   }
-}
-
-void G1CollectedHeap::evacuate_collection_set(G1ParScanThreadStateSet* per_thread_states) {
+
   // Should G1EvacuationFailureALot be in effect for this GC?
   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
 
   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
-
-  double start_par_time_sec = os::elapsedTime();
-  double end_par_time_sec;
-
-  {
-    const uint n_workers = workers()->active_workers();
-    G1RootProcessor root_processor(this, n_workers);
-    G1ParTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, n_workers);
-
-    workers()->run_task(&g1_par_task);
-    end_par_time_sec = os::elapsedTime();
-
-    // Closing the inner scope will execute the destructor
-    // for the G1RootProcessor object. We record the current
-    // elapsed time before closing the scope so that time
-    // taken for the destructor is NOT included in the
-    // reported parallel time.
-  }
-
-  double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
-  phase_times()->record_par_time(par_time_ms);
-
-  double code_root_fixup_time_ms =
-        (os::elapsedTime() - end_par_time_sec) * 1000.0;
-  phase_times()->record_code_root_fixup_time(code_root_fixup_time_ms);
 }
 
-class G1EvacuateOptionalRegionTask : public AbstractGangTask {
+class G1EvacuateRegionsBaseTask : public AbstractGangTask {
+protected:
   G1CollectedHeap* _g1h;
   G1ParScanThreadStateSet* _per_thread_states;
-  G1OptionalCSet* _optional;
-  RefToScanQueueSet* _queues;
-  ParallelTaskTerminator _terminator;
-
-  Tickspan trim_ticks(G1ParScanThreadState* pss) {
-    Tickspan copy_time = pss->trim_ticks();
-    pss->reset_trim_ticks();
-    return copy_time;
+  RefToScanQueueSet* _task_queues;
+  TaskTerminator _terminator;
+  uint _num_workers;
+
+  void evacuate_live_objects(G1ParScanThreadState* pss,
+                             uint worker_id,
+                             G1GCPhaseTimes::GCParPhases objcopy_phase,
+                             G1GCPhaseTimes::GCParPhases termination_phase) {
+    G1GCPhaseTimes* p = _g1h->phase_times();
+
+    Ticks start = Ticks::now();
+    G1ParEvacuateFollowersClosure cl(_g1h, pss, _task_queues, _terminator.terminator(), objcopy_phase);
+    cl.do_void();
+
+    assert(pss->queue_is_empty(), "should be empty");
+
+    Tickspan evac_time = (Ticks::now() - start);
+    p->record_or_add_time_secs(objcopy_phase, worker_id, evac_time.seconds() - cl.term_time());
+
+    p->record_or_add_thread_work_item(objcopy_phase, worker_id, pss->lab_waste_words() * HeapWordSize, G1GCPhaseTimes::ObjCopyLABWaste);
+    p->record_or_add_thread_work_item(objcopy_phase, worker_id, pss->lab_undo_waste_words() * HeapWordSize, G1GCPhaseTimes::ObjCopyLABUndoWaste);
+
+    if (termination_phase == G1GCPhaseTimes::Termination) {
+      p->record_time_secs(termination_phase, worker_id, cl.term_time());
+      p->record_thread_work_item(termination_phase, worker_id, cl.term_attempts());
+    } else {
+      p->record_or_add_time_secs(termination_phase, worker_id, cl.term_time());
+      p->record_or_add_thread_work_item(termination_phase, worker_id, cl.term_attempts());
+    }
+    assert(pss->trim_ticks().seconds() == 0.0, "Unexpected partial trimming during evacuation");
   }
 
-  void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
-    G1EvacuationRootClosures* root_cls = pss->closures();
-    G1ScanObjsDuringScanRSClosure obj_cl(_g1h, pss);
-
-    size_t scanned = 0;
-    size_t claimed = 0;
-    size_t skipped = 0;
-    size_t used_memory = 0;
-
-    Ticks    start = Ticks::now();
-    Tickspan copy_time;
-
-    for (uint i = _optional->current_index(); i < _optional->current_limit(); i++) {
-      HeapRegion* hr = _optional->region_at(i);
-      G1ScanRSForOptionalClosure scan_opt_cl(&obj_cl);
-      pss->oops_into_optional_region(hr)->oops_do(&scan_opt_cl, root_cls->raw_strong_oops());
-      copy_time += trim_ticks(pss);
-
-      G1ScanRSForRegionClosure scan_rs_cl(_g1h->rem_set()->scan_state(), &obj_cl, pss, G1GCPhaseTimes::OptScanRS, worker_id);
-      scan_rs_cl.do_heap_region(hr);
-      copy_time += trim_ticks(pss);
-      scanned += scan_rs_cl.cards_scanned();
-      claimed += scan_rs_cl.cards_claimed();
-      skipped += scan_rs_cl.cards_skipped();
-
-      // Chunk lists for this region is no longer needed.
-      used_memory += pss->oops_into_optional_region(hr)->used_memory();
+  virtual void start_work(uint worker_id) { }
+
+  virtual void end_work(uint worker_id) { }
+
+  virtual void scan_roots(G1ParScanThreadState* pss, uint worker_id) = 0;
+
+  virtual void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) = 0;
+
+public:
+  G1EvacuateRegionsBaseTask(const char* name, G1ParScanThreadStateSet* per_thread_states, RefToScanQueueSet* task_queues, uint num_workers) :
+    AbstractGangTask(name),
+    _g1h(G1CollectedHeap::heap()),
+    _per_thread_states(per_thread_states),
+    _task_queues(task_queues),
+    _terminator(num_workers, _task_queues),
+    _num_workers(num_workers)
+  { }
+
+  void work(uint worker_id) {
+    start_work(worker_id);
+
+    {
+      ResourceMark rm;
+      HandleMark   hm;
+
+      G1ParScanThreadState* pss = _per_thread_states->state_for_worker(worker_id);
+      pss->set_ref_discoverer(_g1h->ref_processor_stw());
+
+      scan_roots(pss, worker_id);
+      evacuate_live_objects(pss, worker_id);
     }
 
-    Tickspan scan_time = (Ticks::now() - start) - copy_time;
-    G1GCPhaseTimes* p = _g1h->phase_times();
-    p->record_or_add_time_secs(G1GCPhaseTimes::OptScanRS, worker_id, scan_time.seconds());
-    p->record_or_add_time_secs(G1GCPhaseTimes::OptObjCopy, worker_id, copy_time.seconds());
-
-    p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, scanned, G1GCPhaseTimes::OptCSetScannedCards);
-    p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, claimed, G1GCPhaseTimes::OptCSetClaimedCards);
-    p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, skipped, G1GCPhaseTimes::OptCSetSkippedCards);
-    p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, used_memory, G1GCPhaseTimes::OptCSetUsedMemory);
-  }
-
-  void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
-    Ticks start = Ticks::now();
-    G1ParEvacuateFollowersClosure cl(_g1h, pss, _queues, &_terminator, G1GCPhaseTimes::OptObjCopy);
-    cl.do_void();
-
-    Tickspan evac_time = (Ticks::now() - start);
-    G1GCPhaseTimes* p = _g1h->phase_times();
-    p->record_or_add_time_secs(G1GCPhaseTimes::OptObjCopy, worker_id, evac_time.seconds());
-    assert(pss->trim_ticks().seconds() == 0.0, "Unexpected partial trimming done during optional evacuation");
-  }
-
- public:
-  G1EvacuateOptionalRegionTask(G1CollectedHeap* g1h,
-                               G1ParScanThreadStateSet* per_thread_states,
-                               G1OptionalCSet* cset,
-                               RefToScanQueueSet* queues,
-                               uint n_workers) :
-    AbstractGangTask("G1 Evacuation Optional Region Task"),
-    _g1h(g1h),
-    _per_thread_states(per_thread_states),
-    _optional(cset),
-    _queues(queues),
-    _terminator(n_workers, _queues) {
-  }
-
-  void work(uint worker_id) {
-    ResourceMark rm;
-    HandleMark  hm;
-
-    G1ParScanThreadState* pss = _per_thread_states->state_for_worker(worker_id);
-    pss->set_ref_discoverer(_g1h->ref_processor_stw());
-
-    scan_roots(pss, worker_id);
-    evacuate_live_objects(pss, worker_id);
+    end_work(worker_id);
   }
 };
 
-void G1CollectedHeap::evacuate_optional_regions(G1ParScanThreadStateSet* per_thread_states, G1OptionalCSet* ocset) {
-  class G1MarkScope : public MarkScope {};
-  G1MarkScope code_mark_scope;
-
-  G1EvacuateOptionalRegionTask task(this, per_thread_states, ocset, _task_queues, workers()->active_workers());
-  workers()->run_task(&task);
+class G1EvacuateRegionsTask : public G1EvacuateRegionsBaseTask {
+  G1RootProcessor* _root_processor;
+
+  void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
+    _root_processor->evacuate_roots(pss, worker_id);
+    _g1h->rem_set()->update_rem_set(pss, worker_id);
+    _g1h->rem_set()->scan_rem_set(pss, worker_id, G1GCPhaseTimes::ScanRS, G1GCPhaseTimes::ObjCopy, G1GCPhaseTimes::CodeRoots);
+  }
+
+  void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
+    G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::ObjCopy, G1GCPhaseTimes::Termination);
+  }
+
+  void start_work(uint worker_id) {
+    _g1h->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, Ticks::now().seconds());
+  }
+
+  void end_work(uint worker_id) {
+    _g1h->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, Ticks::now().seconds());
+  }
+
+public:
+  G1EvacuateRegionsTask(G1CollectedHeap* g1h,
+                        G1ParScanThreadStateSet* per_thread_states,
+                        RefToScanQueueSet* task_queues,
+                        G1RootProcessor* root_processor,
+                        uint num_workers) :
+    G1EvacuateRegionsBaseTask("G1 Evacuate Regions", per_thread_states, task_queues, num_workers),
+    _root_processor(root_processor)
+  { }
+};
+
+void G1CollectedHeap::evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states) {
+  Tickspan task_time;
+  const uint num_workers = workers()->active_workers();
+
+  Ticks start_processing = Ticks::now();
+  {
+    G1RootProcessor root_processor(this, num_workers);
+    G1EvacuateRegionsTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, num_workers);
+    task_time = run_task(&g1_par_task);
+    // Closing the inner scope will execute the destructor for the G1RootProcessor object.
+    // To extract its code root fixup time we measure total time of this scope and
+    // subtract from the time the WorkGang task took.
+  }
+  Tickspan total_processing = Ticks::now() - start_processing;
+
+  G1GCPhaseTimes* p = phase_times();
+  p->record_initial_evac_time(task_time.seconds() * 1000.0);
+  p->record_or_add_code_root_fixup_time((total_processing - task_time).seconds() * 1000.0);
 }
 
+class G1EvacuateOptionalRegionsTask : public G1EvacuateRegionsBaseTask {
+
+  void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
+    _g1h->rem_set()->scan_rem_set(pss, worker_id, G1GCPhaseTimes::OptScanRS, G1GCPhaseTimes::OptObjCopy, G1GCPhaseTimes::OptCodeRoots);
+  }
+
+  void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
+    G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::OptObjCopy, G1GCPhaseTimes::OptTermination);
+  }
+
+public:
+  G1EvacuateOptionalRegionsTask(G1ParScanThreadStateSet* per_thread_states,
+                                RefToScanQueueSet* queues,
+                                uint num_workers) :
+    G1EvacuateRegionsBaseTask("G1 Evacuate Optional Regions", per_thread_states, queues, num_workers) {
+  }
+};
+
+void G1CollectedHeap::evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states) {
+  class G1MarkScope : public MarkScope { };
+
+  Tickspan task_time;
+
+  Ticks start_processing = Ticks::now();
+  {
+    G1MarkScope code_mark_scope;
+    G1EvacuateOptionalRegionsTask task(per_thread_states, _task_queues, workers()->active_workers());
+    task_time = run_task(&task);
+    // See comment in evacuate_collection_set() for the reason of the scope.
+  }
+  Tickspan total_processing = Ticks::now() - start_processing;
+
+  G1GCPhaseTimes* p = phase_times();
+  p->record_or_add_code_root_fixup_time((total_processing - task_time).seconds() * 1000.0);
+}
+
 void G1CollectedHeap::evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states) {
-  G1OptionalCSet optional_cset(&_collection_set, per_thread_states);
-  if (optional_cset.is_empty()) {
-    return;
-  }
-
-  if (evacuation_failed()) {
-    return;
-  }
-
   const double gc_start_time_ms = phase_times()->cur_collection_start_sec() * 1000.0;
 
-  double start_time_sec = os::elapsedTime();
-
-  do {
+  Ticks start = Ticks::now();
+
+  while (!evacuation_failed() && _collection_set.optional_region_length() > 0) {
+
     double time_used_ms = os::elapsedTime() * 1000.0 - gc_start_time_ms;
     double time_left_ms = MaxGCPauseMillis - time_used_ms;
 
-    if (time_left_ms < 0) {
-      log_trace(gc, ergo, cset)("Skipping %u optional regions, pause time exceeded %.3fms", optional_cset.size(), time_used_ms);
+    if (time_left_ms < 0 ||
+        !_collection_set.finalize_optional_for_evacuation(time_left_ms * policy()->optional_evacuation_fraction())) {
+      log_trace(gc, ergo, cset)("Skipping evacuation of %u optional regions, no more regions can be evacuated in %.3fms",
+                                _collection_set.optional_region_length(), time_left_ms);
       break;
     }
 
-    optional_cset.prepare_evacuation(time_left_ms * _policy->optional_evacuation_fraction());
-    if (optional_cset.prepare_failed()) {
-      log_trace(gc, ergo, cset)("Skipping %u optional regions, no regions can be evacuated in %.3fms", optional_cset.size(), time_left_ms);
-      break;
-    }
-
-    evacuate_optional_regions(per_thread_states, &optional_cset);
-
-    optional_cset.complete_evacuation();
-    if (optional_cset.evacuation_failed()) {
-      break;
-    }
-  } while (!optional_cset.is_empty());
-
-  phase_times()->record_optional_evacuation((os::elapsedTime() - start_time_sec) * 1000.0);
+    evacuate_next_optional_regions(per_thread_states);
+  }
+
+  _collection_set.abandon_optional_collection_set(per_thread_states);
+
+  phase_times()->record_or_add_optional_evac_time((Ticks::now() - start).seconds() * 1000.0);
 }
 
 void G1CollectedHeap::post_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
@@ -4259,15 +4209,14 @@
   double free_cset_start_time = os::elapsedTime();
 
   {
-    uint const num_chunks = MAX2(_collection_set.region_length() / G1FreeCollectionSetTask::chunk_size(), 1U);
+    uint const num_regions = _collection_set.region_length();
+    uint const num_chunks = MAX2(num_regions / G1FreeCollectionSetTask::chunk_size(), 1U);
     uint const num_workers = MIN2(workers()->active_workers(), num_chunks);
 
     G1FreeCollectionSetTask cl(collection_set, &evacuation_info, surviving_young_words);
 
     log_debug(gc, ergo)("Running %s using %u workers for collection set length %u",
-                        cl.name(),
-                        num_workers,
-                        _collection_set.region_length());
+                        cl.name(), num_workers, num_regions);
     workers()->run_task(&cl, num_workers);
   }
   phase_times()->record_total_free_cset_time_ms((os::elapsedTime() - free_cset_start_time) * 1000.0);
@@ -4436,7 +4385,7 @@
 
 void G1CollectedHeap::abandon_collection_set(G1CollectionSet* collection_set) {
   G1AbandonCollectionSetClosure cl;
-  collection_set->iterate(&cl);
+  collection_set_iterate_all(&cl);
 
   collection_set->clear();
   collection_set->stop_incremental_building();
@@ -4636,7 +4585,9 @@
 
   collection_set()->add_eden_region(alloc_region);
   increase_used(allocated_bytes);
+  _eden.add_used_bytes(allocated_bytes);
   _hr_printer.retire(alloc_region);
+
   // We update the eden sizes here, when the region is retired,
   // instead of when it's allocated, since this is the point that its
   // used space has been recorded in _summary_bytes_used.
@@ -4693,6 +4644,9 @@
   policy()->record_bytes_copied_during_gc(allocated_bytes);
   if (dest.is_old()) {
     old_set_add(alloc_region);
+  } else {
+    assert(dest.is_young(), "Retiring alloc region should be young(%d)", dest.value());
+    _survivor.add_used_bytes(allocated_bytes);
   }
 
   bool const during_im = collector_state()->in_initial_mark_gc();
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -143,9 +143,8 @@
   // Closures used in implementation.
   friend class G1ParScanThreadState;
   friend class G1ParScanThreadStateSet;
-  friend class G1ParTask;
+  friend class G1EvacuateRegionsTask;
   friend class G1PLABAllocator;
-  friend class G1PrepareCompactClosure;
 
   // Other related classes.
   friend class HeapRegionClaimer;
@@ -206,7 +205,7 @@
 
   // Outside of GC pauses, the number of bytes used in all regions other
   // than the current allocation region(s).
-  size_t _summary_bytes_used;
+  volatile size_t _summary_bytes_used;
 
   void increase_used(size_t bytes);
   void decrease_used(size_t bytes);
@@ -519,6 +518,10 @@
 
   WorkGang* workers() const { return _workers; }
 
+  // Runs the given AbstractGangTask with the current active workers, returning the
+  // total time taken.
+  Tickspan run_task(AbstractGangTask* task);
+
   G1Allocator* allocator() {
     return _allocator;
   }
@@ -738,11 +741,14 @@
 
   void calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms);
 
-  // Actually do the work of evacuating the collection set.
-  void evacuate_collection_set(G1ParScanThreadStateSet* per_thread_states);
+  // Actually do the work of evacuating the parts of the collection set.
+  void evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states);
   void evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states);
-  void evacuate_optional_regions(G1ParScanThreadStateSet* per_thread_states, G1OptionalCSet* ocset);
+private:
+  // Evacuate the next set of optional regions.
+  void evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states);
 
+public:
   void pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info);
   void post_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
 
@@ -1165,14 +1171,14 @@
   void heap_region_par_iterate_from_start(HeapRegionClosure* cl,
                                           HeapRegionClaimer* hrclaimer) const;
 
-  // Iterate over the regions (if any) in the current collection set.
-  void collection_set_iterate(HeapRegionClosure* blk);
+  // Iterate over all regions currently in the current collection set.
+  void collection_set_iterate_all(HeapRegionClosure* blk);
 
-  // Iterate over the regions (if any) in the current collection set. Starts the
-  // iteration over the entire collection set so that the start regions of a given
-  // worker id over the set active_workers are evenly spread across the set of
-  // collection set regions.
-  void collection_set_iterate_from(HeapRegionClosure *blk, uint worker_id);
+  // Iterate over the regions in the current increment of the collection set.
+  // Starts the iteration so that the start regions of a given worker id over the
+  // set active_workers are evenly spread across the set of collection set regions
+  // to be iterated.
+  void collection_set_iterate_increment_from(HeapRegionClosure *blk, uint worker_id);
 
   // Returns the HeapRegion that contains addr. addr must not be NULL.
   template <class T>
@@ -1252,6 +1258,8 @@
 
   uint eden_regions_count() const { return _eden.length(); }
   uint survivor_regions_count() const { return _survivor.length(); }
+  size_t eden_regions_used_bytes() const { return _eden.used_bytes(); }
+  size_t survivor_regions_used_bytes() const { return _survivor.used_bytes(); }
   uint young_regions_count() const { return _eden.length() + _survivor.length(); }
   uint old_regions_count() const { return _old_set.length(); }
   uint archive_regions_count() const { return _archive_set.length(); }
@@ -1420,7 +1428,7 @@
   size_t _term_attempts;
 
   void start_term_time() { _term_attempts++; _start_term = os::elapsedTime(); }
-  void end_term_time() { _term_time += os::elapsedTime() - _start_term; }
+  void end_term_time() { _term_time += (os::elapsedTime() - _start_term); }
 protected:
   G1CollectedHeap*              _g1h;
   G1ParScanThreadState*         _par_scan_state;
--- a/src/hotspot/share/gc/g1/g1CollectionSet.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectionSet.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -59,12 +59,11 @@
   _collection_set_regions(NULL),
   _collection_set_cur_length(0),
   _collection_set_max_length(0),
-  _optional_regions(NULL),
-  _optional_region_length(0),
-  _optional_region_max_length(0),
+  _num_optional_regions(0),
   _bytes_used_before(0),
   _recorded_rs_lengths(0),
   _inc_build_state(Inactive),
+  _inc_part_start(0),
   _inc_bytes_used_before(0),
   _inc_recorded_rs_lengths(0),
   _inc_recorded_rs_lengths_diffs(0),
@@ -90,8 +89,8 @@
   assert((size_t) young_region_length() == _collection_set_cur_length,
          "Young region length %u should match collection set length " SIZE_FORMAT, young_region_length(), _collection_set_cur_length);
 
-  _old_region_length      = 0;
-  _optional_region_length = 0;
+  _old_region_length = 0;
+  free_optional_regions();
 }
 
 void G1CollectionSet::initialize(uint max_region_length) {
@@ -100,21 +99,8 @@
   _collection_set_regions = NEW_C_HEAP_ARRAY(uint, max_region_length, mtGC);
 }
 
-void G1CollectionSet::initialize_optional(uint max_length) {
-  assert(_optional_regions == NULL, "Already initialized");
-  assert(_optional_region_length == 0, "Already initialized");
-  assert(_optional_region_max_length == 0, "Already initialized");
-  _optional_region_max_length = max_length;
-  _optional_regions = NEW_C_HEAP_ARRAY(HeapRegion*, _optional_region_max_length, mtGC);
-}
-
 void G1CollectionSet::free_optional_regions() {
-  _optional_region_length = 0;
-  _optional_region_max_length = 0;
-  if (_optional_regions != NULL) {
-    FREE_C_HEAP_ARRAY(HeapRegion*, _optional_regions);
-    _optional_regions = NULL;
-  }
+  _num_optional_regions = 0;
 }
 
 void G1CollectionSet::clear_candidates() {
@@ -130,39 +116,32 @@
 void G1CollectionSet::add_old_region(HeapRegion* hr) {
   assert_at_safepoint_on_vm_thread();
 
-  assert(_inc_build_state == Active || hr->index_in_opt_cset() != G1OptionalCSet::InvalidCSetIndex,
+  assert(_inc_build_state == Active,
          "Precondition, actively building cset or adding optional later on");
   assert(hr->is_old(), "the region should be old");
 
-  assert(!hr->in_collection_set(), "should not already be in the CSet");
+  assert(!hr->in_collection_set(), "should not already be in the collection set");
   _g1h->register_old_region_with_cset(hr);
 
   _collection_set_regions[_collection_set_cur_length++] = hr->hrm_index();
   assert(_collection_set_cur_length <= _collection_set_max_length, "Collection set now larger than maximum size.");
 
   _bytes_used_before += hr->used();
-  size_t rs_length = hr->rem_set()->occupied();
-  _recorded_rs_lengths += rs_length;
-  _old_region_length += 1;
+  _recorded_rs_lengths += hr->rem_set()->occupied();
+  _old_region_length++;
 
-  log_trace(gc, cset)("Added old region %d to collection set", hr->hrm_index());
+  _g1h->old_set_remove(hr);
 }
 
 void G1CollectionSet::add_optional_region(HeapRegion* hr) {
-  assert(!optional_is_full(), "Precondition, must have room left for this region");
   assert(hr->is_old(), "the region should be old");
   assert(!hr->in_collection_set(), "should not already be in the CSet");
 
   _g1h->register_optional_region_with_cset(hr);
 
-  _optional_regions[_optional_region_length] = hr;
-  uint index = _optional_region_length++;
-  hr->set_index_in_opt_cset(index);
-
-  log_trace(gc, cset)("Added region %d to optional collection set (%u)", hr->hrm_index(), _optional_region_length);
+  hr->set_index_in_opt_cset(_num_optional_regions++);
 }
 
-// Initialize the per-collection-set information
 void G1CollectionSet::start_incremental_building() {
   assert(_collection_set_cur_length == 0, "Collection set must be empty before starting a new collection set.");
   assert(_inc_build_state == Inactive, "Precondition");
@@ -173,7 +152,8 @@
   _inc_recorded_rs_lengths_diffs = 0;
   _inc_predicted_elapsed_time_ms = 0.0;
   _inc_predicted_elapsed_time_ms_diffs = 0.0;
-  _inc_build_state = Active;
+
+  update_incremental_marker();
 }
 
 void G1CollectionSet::finalize_incremental_building() {
@@ -211,29 +191,48 @@
 void G1CollectionSet::clear() {
   assert_at_safepoint_on_vm_thread();
   _collection_set_cur_length = 0;
-  _optional_region_length = 0;
 }
 
 void G1CollectionSet::iterate(HeapRegionClosure* cl) const {
-  iterate_from(cl, 0, 1);
-}
-
-void G1CollectionSet::iterate_from(HeapRegionClosure* cl, uint worker_id, uint total_workers) const {
   size_t len = _collection_set_cur_length;
   OrderAccess::loadload();
-  if (len == 0) {
-    return;
-  }
-  size_t start_pos = (worker_id * len) / total_workers;
-  size_t cur_pos = start_pos;
 
-  do {
-    HeapRegion* r = _g1h->region_at(_collection_set_regions[cur_pos]);
+  for (uint i = 0; i < len; i++) {
+    HeapRegion* r = _g1h->region_at(_collection_set_regions[i]);
     bool result = cl->do_heap_region(r);
     if (result) {
       cl->set_incomplete();
       return;
     }
+  }
+}
+
+void G1CollectionSet::iterate_optional(HeapRegionClosure* cl) const {
+  assert_at_safepoint();
+
+  for (uint i = 0; i < _num_optional_regions; i++) {
+    HeapRegion* r = _candidates->at(i);
+    bool result = cl->do_heap_region(r);
+    guarantee(!result, "Must not cancel iteration");
+  }
+}
+
+void G1CollectionSet::iterate_incremental_part_from(HeapRegionClosure* cl, uint worker_id, uint total_workers) const {
+  assert_at_safepoint();
+
+  size_t len = _collection_set_cur_length - _inc_part_start;
+  if (len == 0) {
+    return;
+  }
+
+  size_t start_pos = (worker_id * len) / total_workers;
+  size_t cur_pos = start_pos;
+
+  do {
+    HeapRegion* r = _g1h->region_at(_collection_set_regions[cur_pos + _inc_part_start]);
+    bool result = cl->do_heap_region(r);
+    guarantee(!result, "Must not cancel iteration");
+
     cur_pos++;
     if (cur_pos == len) {
       cur_pos = 0;
@@ -440,30 +439,6 @@
   return time_remaining_ms;
 }
 
-void G1CollectionSet::add_as_old(HeapRegion* hr) {
-  candidates()->pop_front(); // already have region via peek()
-  _g1h->old_set_remove(hr);
-  add_old_region(hr);
-}
-
-void G1CollectionSet::add_as_optional(HeapRegion* hr) {
-  assert(_optional_regions != NULL, "Must not be called before array is allocated");
-  candidates()->pop_front(); // already have region via peek()
-  _g1h->old_set_remove(hr);
-  add_optional_region(hr);
-}
-
-bool G1CollectionSet::optional_is_full() {
-  assert(_optional_region_length <= _optional_region_max_length, "Invariant");
-  return _optional_region_length == _optional_region_max_length;
-}
-
-void G1CollectionSet::clear_optional_region(const HeapRegion* hr) {
-  assert(_optional_regions != NULL, "Must not be called before array is allocated");
-  uint index = hr->index_in_opt_cset();
-  _optional_regions[index] = NULL;
-}
-
 static int compare_region_idx(const uint a, const uint b) {
   if (a > b) {
     return 1;
@@ -476,87 +451,25 @@
 
 void G1CollectionSet::finalize_old_part(double time_remaining_ms) {
   double non_young_start_time_sec = os::elapsedTime();
-  double predicted_old_time_ms = 0.0;
-  double predicted_optional_time_ms = 0.0;
-  double optional_threshold_ms = time_remaining_ms * _policy->optional_prediction_fraction();
-  uint expensive_region_num = 0;
 
   if (collector_state()->in_mixed_phase()) {
     candidates()->verify();
-    const uint min_old_cset_length = _policy->calc_min_old_cset_length();
-    const uint max_old_cset_length = MAX2(min_old_cset_length, _policy->calc_max_old_cset_length());
-    bool check_time_remaining = _policy->adaptive_young_list_length();
 
-    initialize_optional(max_old_cset_length - min_old_cset_length);
-    log_debug(gc, ergo, cset)("Start adding old regions for mixed gc. min %u regions, max %u regions, "
-                              "time remaining %1.2fms, optional threshold %1.2fms",
-                              min_old_cset_length, max_old_cset_length, time_remaining_ms, optional_threshold_ms);
+    uint num_initial_old_regions;
+    uint num_optional_old_regions;
 
-    HeapRegion* hr = candidates()->peek_front();
-    while (hr != NULL) {
-      if (old_region_length() + optional_region_length() >= max_old_cset_length) {
-        // Added maximum number of old regions to the CSet.
-        log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached max). "
-                                  "old %u regions, optional %u regions",
-                                  old_region_length(), optional_region_length());
-        break;
-      }
+    _policy->calculate_old_collection_set_regions(candidates(),
+                                                  time_remaining_ms,
+                                                  num_initial_old_regions,
+                                                  num_optional_old_regions);
 
-      // Stop adding regions if the remaining reclaimable space is
-      // not above G1HeapWastePercent.
-      size_t reclaimable_bytes = candidates()->remaining_reclaimable_bytes();
-      double reclaimable_percent = _policy->reclaimable_bytes_percent(reclaimable_bytes);
-      double threshold = (double) G1HeapWastePercent;
-      if (reclaimable_percent <= threshold) {
-        // We've added enough old regions that the amount of uncollected
-        // reclaimable space is at or below the waste threshold. Stop
-        // adding old regions to the CSet.
-        log_debug(gc, ergo, cset)("Finish adding old regions to CSet (reclaimable percentage not over threshold). "
-                                  "reclaimable: " SIZE_FORMAT "%s (%1.2f%%) threshold: " UINTX_FORMAT "%%",
-                                  byte_size_in_proper_unit(reclaimable_bytes), proper_unit_for_byte_size(reclaimable_bytes),
-                                  reclaimable_percent, G1HeapWastePercent);
-        break;
-      }
+    // Prepare initial old regions.
+    move_candidates_to_collection_set(num_initial_old_regions);
 
-      double predicted_time_ms = predict_region_elapsed_time_ms(hr);
-      time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
-      // Add regions to old set until we reach minimum amount
-      if (old_region_length() < min_old_cset_length) {
-        predicted_old_time_ms += predicted_time_ms;
-        add_as_old(hr);
-        // Record the number of regions added when no time remaining
-        if (time_remaining_ms == 0.0) {
-          expensive_region_num++;
-        }
-      } else {
-        // In the non-auto-tuning case, we'll finish adding regions
-        // to the CSet if we reach the minimum.
-        if (!check_time_remaining) {
-          log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached min).");
-          break;
-        }
-        // Keep adding regions to old set until we reach optional threshold
-        if (time_remaining_ms > optional_threshold_ms) {
-          predicted_old_time_ms += predicted_time_ms;
-          add_as_old(hr);
-        } else if (time_remaining_ms > 0) {
-          // Keep adding optional regions until time is up
-          if (!optional_is_full()) {
-            predicted_optional_time_ms += predicted_time_ms;
-            add_as_optional(hr);
-          } else {
-            log_debug(gc, ergo, cset)("Finish adding old regions to CSet (optional set full).");
-            break;
-          }
-        } else {
-          log_debug(gc, ergo, cset)("Finish adding old regions to CSet (predicted time is too high).");
-          break;
-        }
-      }
-      hr = candidates()->peek_front();
-    }
-    if (hr == NULL) {
-      log_debug(gc, ergo, cset)("Finish adding old regions to CSet (candidate old regions not available)");
+    // Prepare optional old regions for evacuation.
+    uint candidate_idx = candidates()->cur_idx();
+    for (uint i = 0; i < num_optional_old_regions; i++) {
+      add_optional_region(candidates()->at(candidate_idx + i));
     }
 
     candidates()->verify();
@@ -564,99 +477,59 @@
 
   stop_incremental_building();
 
-  log_debug(gc, ergo, cset)("Finish choosing CSet regions old: %u, optional: %u, "
-                            "predicted old time: %1.2fms, predicted optional time: %1.2fms, time remaining: %1.2f",
-                            old_region_length(), optional_region_length(),
-                            predicted_old_time_ms, predicted_optional_time_ms, time_remaining_ms);
-  if (expensive_region_num > 0) {
-    log_debug(gc, ergo, cset)("CSet contains %u old regions that were added although the predicted time was too high.",
-                              expensive_region_num);
-  }
-
   double non_young_end_time_sec = os::elapsedTime();
   phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
 
   QuickSort::sort(_collection_set_regions, _collection_set_cur_length, compare_region_idx, true);
 }
 
-HeapRegion* G1OptionalCSet::region_at(uint index) {
-  return _cset->optional_region_at(index);
+void G1CollectionSet::move_candidates_to_collection_set(uint num_old_candidate_regions) {
+  if (num_old_candidate_regions == 0) {
+    return;
+  }
+  uint candidate_idx = candidates()->cur_idx();
+  for (uint i = 0; i < num_old_candidate_regions; i++) {
+    HeapRegion* r = candidates()->at(candidate_idx + i);
+    // This potentially optional candidate region is going to be an actual collection
+    // set region. Clear cset marker.
+    _g1h->clear_in_cset(r);
+    add_old_region(r);
+  }
+  candidates()->remove(num_old_candidate_regions);
+
+  candidates()->verify();
 }
 
-void G1OptionalCSet::prepare_evacuation(double time_limit) {
-  assert(_current_index == _current_limit, "Before prepare no regions should be ready for evac");
-
-  uint prepared_regions = 0;
-  double prediction_ms = 0;
-
-  _prepare_failed = true;
-  for (uint i = _current_index; i < _cset->optional_region_length(); i++) {
-    HeapRegion* hr = region_at(i);
-    prediction_ms += _cset->predict_region_elapsed_time_ms(hr);
-    if (prediction_ms > time_limit) {
-      log_debug(gc, cset)("Prepared %u regions for optional evacuation. Predicted time: %.3fms", prepared_regions, prediction_ms);
-      return;
-    }
-
-    // This region will be included in the next optional evacuation.
-    prepare_to_evacuate_optional_region(hr);
-    prepared_regions++;
-    _current_limit++;
-    _prepare_failed = false;
-  }
-
-  log_debug(gc, cset)("Prepared all %u regions for optional evacuation. Predicted time: %.3fms",
-                      prepared_regions, prediction_ms);
+void G1CollectionSet::finalize_initial_collection_set(double target_pause_time_ms, G1SurvivorRegions* survivor) {
+  double time_remaining_ms = finalize_young_part(target_pause_time_ms, survivor);
+  finalize_old_part(time_remaining_ms);
 }
 
-bool G1OptionalCSet::prepare_failed() {
-  return _prepare_failed;
+bool G1CollectionSet::finalize_optional_for_evacuation(double remaining_pause_time) {
+  update_incremental_marker();
+
+  uint num_selected_regions;
+  _policy->calculate_optional_collection_set_regions(candidates(),
+                                                     _num_optional_regions,
+                                                     remaining_pause_time,
+                                                     num_selected_regions);
+
+  move_candidates_to_collection_set(num_selected_regions);
+
+  _num_optional_regions -= num_selected_regions;
+
+  stop_incremental_building();
+  return num_selected_regions > 0;
 }
 
-void G1OptionalCSet::complete_evacuation() {
-  _evacuation_failed = false;
-  for (uint i = _current_index; i < _current_limit; i++) {
-    HeapRegion* hr = region_at(i);
-    _cset->clear_optional_region(hr);
-    if (hr->evacuation_failed()){
-      _evacuation_failed = true;
-    }
+void G1CollectionSet::abandon_optional_collection_set(G1ParScanThreadStateSet* pss) {
+  for (uint i = 0; i < _num_optional_regions; i++) {
+    HeapRegion* r = candidates()->at(candidates()->cur_idx() + i);
+    pss->record_unused_optional_region(r);
+    _g1h->clear_in_cset(r);
+    r->clear_index_in_opt_cset();
   }
-  _current_index = _current_limit;
-}
-
-bool G1OptionalCSet::evacuation_failed() {
-  return _evacuation_failed;
-}
-
-G1OptionalCSet::~G1OptionalCSet() {
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  while (!is_empty()) {
-    // We want to return regions not evacuated to the collection set candidates
-    // in reverse order to maintain the old order.
-    HeapRegion* hr = _cset->remove_last_optional_region();
-    assert(hr != NULL, "Should be valid region left");
-    _pset->record_unused_optional_region(hr);
-    g1h->old_set_add(hr);
-    g1h->clear_in_cset(hr);
-    hr->set_index_in_opt_cset(InvalidCSetIndex);
-    _cset->candidates()->push_front(hr);
-  }
-  _cset->free_optional_regions();
-}
-
-uint G1OptionalCSet::size() {
-  return _cset->optional_region_length() - _current_index;
-}
-
-bool G1OptionalCSet::is_empty() {
-  return size() == 0;
-}
-
-void G1OptionalCSet::prepare_to_evacuate_optional_region(HeapRegion* hr) {
-  log_trace(gc, cset)("Adding region %u for optional evacuation", hr->hrm_index());
-  G1CollectedHeap::heap()->clear_in_cset(hr);
-  _cset->add_old_region(hr);
+  free_optional_regions();
 }
 
 #ifdef ASSERT
--- a/src/hotspot/share/gc/g1/g1CollectionSet.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectionSet.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -38,11 +38,101 @@
 class HeapRegion;
 class HeapRegionClosure;
 
+// The collection set.
+//
+// The set of regions that are evacuated during an evacuation pause.
+//
+// At the end of a collection, before freeing the collection set, this set
+// contains all regions that were evacuated during this collection:
+//
+// - survivor regions from the last collection (if any)
+// - eden regions allocated by the mutator
+// - old gen regions evacuated during mixed gc
+//
+// This set is built incrementally at mutator time as regions are retired, and
+// if this had been a mixed gc, some additional (during gc) incrementally added
+// old regions from the collection set candidates built during the concurrent
+// cycle.
+//
+// A more detailed overview of how the collection set changes over time follows:
+//
+// 0) at the end of GC the survivor regions are added to this collection set.
+// 1) the mutator incrementally adds eden regions as they retire
+//
+// ----- gc starts
+//
+// 2) prepare (finalize) young regions of the collection set for collection
+//    - relabel the survivors as eden
+//    - finish up the incremental building that happened at mutator time
+//
+// iff this is a young-only collection:
+//
+// a3) evacuate the current collection set in one "initial evacuation" phase
+//
+// iff this is a mixed collection:
+//
+// b3) calculate the set of old gen regions we may be able to collect in this
+//     collection from the list of collection set candidates.
+//     - one part is added to the current collection set
+//     - the remainder regions are labeled as optional, and NOT yet added to the
+//     collection set.
+// b4) evacuate the current collection set in the "initial evacuation" phase
+// b5) evacuate the optional regions in the "optional evacuation" phase. This is
+//     done in increments (or rounds).
+//     b5-1) add a few of the optional regions to the current collection set
+//     b5-2) evacuate only these newly added optional regions. For this mechanism we
+//     reuse the incremental collection set building infrastructure (used also at
+//     mutator time).
+//     b5-3) repeat from b5-1 until the policy determines we are done
+//
+// all collections
+//
+// 6) free the collection set (contains all regions now; empties collection set
+//    afterwards)
+// 7) add survivors to this collection set
+//
+// ----- gc ends
+//
+// goto 1)
+//
+// Examples of how the collection set might look over time:
+//
+// Legend:
+// S = survivor, E = eden, O = old.
+// |xxxx| = increment (with increment markers), containing four regions
+//
+// |SSSS|                         ... after step 0), with four survivor regions
+// |SSSSEE|                       ... at step 1), after retiring two eden regions
+// |SSSSEEEE|                     ... after step 1), after retiring four eden regions
+// |EEEEEEEE|                     ... after step 2)
+//
+// iff this is a young-only collection
+//
+// EEEEEEEE||                      ... after step a3), after initial evacuation phase
+// ||                              ... after step 6)
+// |SS|                            ... after step 7), with two survivor regions
+//
+// iff this is a mixed collection
+//
+// |EEEEEEEEOOOO|                  ... after step b3), added four regions to be
+//                                     evacuated in the "initial evacuation" phase
+// EEEEEEEEOOOO||                  ... after step b4), incremental part is empty
+//                                     after evacuation
+// EEEEEEEEOOOO|OO|                ... after step b5.1), added two regions to be
+//                                     evacuated in the first round of the
+//                                     "optional evacuation" phase
+// EEEEEEEEOOOOOO|O|               ... after step b5.1), added one region to be
+//                                     evacuated in the second round of the
+//                                     "optional evacuation" phase
+// EEEEEEEEOOOOOOO||               ... after step b5), the complete collection set.
+// ||                              ... after step b6)
+// |SSS|                           ... after step 7), with three survivor regions
+//
 class G1CollectionSet {
   G1CollectedHeap* _g1h;
   G1Policy* _policy;
 
-  // All old gen collection set candidate regions for the current mixed gc phase.
+  // All old gen collection set candidate regions for the current mixed phase.
   G1CollectionSetCandidates* _candidates;
 
   uint _eden_region_length;
@@ -51,7 +141,7 @@
 
   // The actual collection set as a set of region indices.
   // All entries in _collection_set_regions below _collection_set_cur_length are
-  // assumed to be valid entries.
+  // assumed to be part of the collection set.
   // We assume that at any time there is at most only one writer and (one or more)
   // concurrent readers. This means we are good with using storestore and loadload
   // barriers on the writer and reader respectively only.
@@ -59,31 +149,33 @@
   volatile size_t _collection_set_cur_length;
   size_t _collection_set_max_length;
 
-  // When doing mixed collections we can add old regions to the collection, which
-  // can be collected if there is enough time. We call these optional regions and
-  // the pointer to these regions are stored in the array below.
-  HeapRegion** _optional_regions;
-  uint _optional_region_length;
-  uint _optional_region_max_length;
+  // When doing mixed collections we can add old regions to the collection set, which
+  // will be collected only if there is enough time. We call these optional regions.
+  // This member records the current number of regions that are of that type that
+  // correspond to the first x entries in the collection set candidates.
+  uint _num_optional_regions;
 
   // The number of bytes in the collection set before the pause. Set from
   // the incrementally built collection set at the start of an evacuation
-  // pause, and incremented in finalize_old_part() when adding old regions
-  // (if any) to the collection set.
+  // pause, and updated as more regions are added to the collection set.
   size_t _bytes_used_before;
 
+  // The number of cards in the remembered set in the collection set. Set from
+  // the incrementally built collection set at the start of an evacuation
+  // pause, and updated as more regions are added to the collection set.
   size_t _recorded_rs_lengths;
 
-  // The associated information that is maintained while the incremental
-  // collection set is being built with young regions. Used to populate
-  // the recorded info for the evacuation pause.
-
   enum CSetBuildType {
     Active,             // We are actively building the collection set
     Inactive            // We are not actively building the collection set
   };
 
   CSetBuildType _inc_build_state;
+  size_t _inc_part_start;
+
+  // The associated information that is maintained while the incremental
+  // collection set is being built with *young* regions. Used to populate
+  // the recorded info for the evacuation pause.
 
   // The number of bytes in the incrementally built collection set.
   // Used to set _collection_set_bytes_used_before at the start of
@@ -113,22 +205,44 @@
   // See the comment for _inc_recorded_rs_lengths_diffs.
   double _inc_predicted_elapsed_time_ms_diffs;
 
+  void set_recorded_rs_lengths(size_t rs_lengths);
+
   G1CollectorState* collector_state();
   G1GCPhaseTimes* phase_times();
 
   void verify_young_cset_indices() const NOT_DEBUG_RETURN;
-  void add_as_optional(HeapRegion* hr);
-  void add_as_old(HeapRegion* hr);
-  bool optional_is_full();
 
+  double predict_region_elapsed_time_ms(HeapRegion* hr);
+
+  // Update the incremental collection set information when adding a region.
+  void add_young_region_common(HeapRegion* hr);
+
+  // Add old region "hr" to the collection set.
+  void add_old_region(HeapRegion* hr);
+  void free_optional_regions();
+
+  // Add old region "hr" to optional collection set.
+  void add_optional_region(HeapRegion* hr);
+
+  void move_candidates_to_collection_set(uint num_regions);
+
+  // Finalize the young part of the initial collection set. Relabel survivor regions
+  // as Eden and calculate a prediction on how long the evacuation of all young regions
+  // will take.
+  double finalize_young_part(double target_pause_time_ms, G1SurvivorRegions* survivors);
+  // Perform any final calculations on the incremental collection set fields before we
+  // can use them.
+  void finalize_incremental_building();
+
+  // Select the old regions of the initial collection set and determine how many optional
+  // regions we might be able to evacuate in this pause.
+  void finalize_old_part(double time_remaining_ms);
 public:
   G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy);
   ~G1CollectionSet();
 
   // Initializes the collection set giving the maximum possible length of the collection set.
   void initialize(uint max_region_length);
-  void initialize_optional(uint max_length);
-  void free_optional_regions();
 
   void clear_candidates();
 
@@ -141,8 +255,6 @@
   void init_region_lengths(uint eden_cset_region_length,
                            uint survivor_cset_region_length);
 
-  void set_recorded_rs_lengths(size_t rs_lengths);
-
   uint region_length() const       { return young_region_length() +
                                             old_region_length(); }
   uint young_region_length() const { return eden_region_length() +
@@ -151,32 +263,29 @@
   uint eden_region_length() const     { return _eden_region_length;     }
   uint survivor_region_length() const { return _survivor_region_length; }
   uint old_region_length() const      { return _old_region_length;      }
-  uint optional_region_length() const { return _optional_region_length; }
+  uint optional_region_length() const { return _num_optional_regions; }
+
+  // Reset the contents of the collection set.
+  void clear();
 
   // Incremental collection set support
 
   // Initialize incremental collection set info.
   void start_incremental_building();
+  // Start a new collection set increment.
+  void update_incremental_marker() { _inc_build_state = Active; _inc_part_start = _collection_set_cur_length; }
+  // Stop adding regions to the current collection set increment.
+  void stop_incremental_building() { _inc_build_state = Inactive; }
 
-  // Perform any final calculations on the incremental collection set fields
-  // before we can use them.
-  void finalize_incremental_building();
+  // Iterate over the current collection set increment applying the given HeapRegionClosure
+  // from a starting position determined by the given worker id.
+  void iterate_incremental_part_from(HeapRegionClosure* cl, uint worker_id, uint total_workers) const;
 
-  // Reset the contents of the collection set.
-  void clear();
-
-  // Iterate over the collection set, applying the given HeapRegionClosure on all of them.
-  // If may_be_aborted is true, iteration may be aborted using the return value of the
-  // called closure method.
+  // Iterate over the entire collection set (all increments calculated so far), applying
+  // the given HeapRegionClosure on all of them.
   void iterate(HeapRegionClosure* cl) const;
 
-  // Iterate over the collection set, applying the given HeapRegionClosure on all of them,
-  // trying to optimally spread out starting position of total_workers workers given the
-  // caller's worker_id.
-  void iterate_from(HeapRegionClosure* cl, uint worker_id, uint total_workers) const;
-
-  // Stop adding regions to the incremental collection set.
-  void stop_incremental_building() { _inc_build_state = Inactive; }
+  void iterate_optional(HeapRegionClosure* cl) const;
 
   size_t recorded_rs_lengths() { return _recorded_rs_lengths; }
 
@@ -188,16 +297,14 @@
     _bytes_used_before = 0;
   }
 
-  // Choose a new collection set.  Marks the chosen regions as being
-  // "in_collection_set".
-  double finalize_young_part(double target_pause_time_ms, G1SurvivorRegions* survivors);
-  void finalize_old_part(double time_remaining_ms);
-
-  // Add old region "hr" to the collection set.
-  void add_old_region(HeapRegion* hr);
-
-  // Add old region "hr" to optional collection set.
-  void add_optional_region(HeapRegion* hr);
+  // Finalize the initial collection set consisting of all young regions potentially a
+  // few old gen regions.
+  void finalize_initial_collection_set(double target_pause_time_ms, G1SurvivorRegions* survivor);
+  // Finalize the next collection set from the set of available optional old gen regions.
+  bool finalize_optional_for_evacuation(double remaining_pause_time);
+  // Abandon (clean up) optional collection set regions that were not evacuated in this
+  // pause.
+  void abandon_optional_collection_set(G1ParScanThreadStateSet* pss);
 
   // Update information about hr in the aggregated information for
   // the incrementally built collection set.
@@ -214,73 +321,6 @@
 
   void print(outputStream* st);
 #endif // !PRODUCT
-
-  double predict_region_elapsed_time_ms(HeapRegion* hr);
-
-  void clear_optional_region(const HeapRegion* hr);
-
-  HeapRegion* optional_region_at(uint i) const {
-    assert(_optional_regions != NULL, "Not yet initialized");
-    assert(i < _optional_region_length, "index %u out of bounds (%u)", i, _optional_region_length);
-    return _optional_regions[i];
-  }
-
-  HeapRegion* remove_last_optional_region() {
-    assert(_optional_regions != NULL, "Not yet initialized");
-    assert(_optional_region_length != 0, "No region to remove");
-    _optional_region_length--;
-    HeapRegion* removed = _optional_regions[_optional_region_length];
-    _optional_regions[_optional_region_length] = NULL;
-    return removed;
-  }
-
-private:
-  // Update the incremental collection set information when adding a region.
-  void add_young_region_common(HeapRegion* hr);
-};
-
-// Helper class to manage the optional regions in a Mixed collection.
-class G1OptionalCSet : public StackObj {
-private:
-  G1CollectionSet* _cset;
-  G1ParScanThreadStateSet* _pset;
-  uint _current_index;
-  uint _current_limit;
-  bool _prepare_failed;
-  bool _evacuation_failed;
-
-  void prepare_to_evacuate_optional_region(HeapRegion* hr);
-
-public:
-  static const uint InvalidCSetIndex = UINT_MAX;
-
-  G1OptionalCSet(G1CollectionSet* cset, G1ParScanThreadStateSet* pset) :
-    _cset(cset),
-    _pset(pset),
-    _current_index(0),
-    _current_limit(0),
-    _prepare_failed(false),
-    _evacuation_failed(false) { }
-  // The destructor returns regions to the collection set candidates set and
-  // frees the optional structure in the collection set.
-  ~G1OptionalCSet();
-
-  uint current_index() { return _current_index; }
-  uint current_limit() { return _current_limit; }
-
-  uint size();
-  bool is_empty();
-
-  HeapRegion* region_at(uint index);
-
-  // Prepare a set of regions for optional evacuation.
-  void prepare_evacuation(double time_left_ms);
-  bool prepare_failed();
-
-  // Complete the evacuation of the previously prepared
-  // regions by updating their state and check for failures.
-  void complete_evacuation();
-  bool evacuation_failed();
 };
 
 #endif // SHARE_GC_G1_G1COLLECTIONSET_HPP
--- a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -27,26 +27,12 @@
 #include "gc/g1/g1CollectionSetChooser.hpp"
 #include "gc/g1/heapRegion.inline.hpp"
 
-HeapRegion* G1CollectionSetCandidates::pop_front() {
-  assert(_front_idx < _num_regions, "pre-condition");
-  HeapRegion* hr = _regions[_front_idx];
-  assert(hr != NULL, "pre-condition");
-  _regions[_front_idx] = NULL;
-  assert(hr->reclaimable_bytes() <= _remaining_reclaimable_bytes,
-         "Remaining reclaimable bytes inconsistent "
-         "from region: " SIZE_FORMAT " remaining: " SIZE_FORMAT,
-         hr->reclaimable_bytes(), _remaining_reclaimable_bytes);
-  _remaining_reclaimable_bytes -= hr->reclaimable_bytes();
-  _front_idx++;
-  return hr;
-}
-
-void G1CollectionSetCandidates::push_front(HeapRegion* hr) {
-  assert(hr != NULL, "Can't put back a NULL region");
-  assert(_front_idx >= 1, "Too many regions have been put back.");
-  _front_idx--;
-  _regions[_front_idx] = hr;
-  _remaining_reclaimable_bytes += hr->reclaimable_bytes();
+void G1CollectionSetCandidates::remove(uint num_regions) {
+  assert(num_regions <= num_remaining(), "Trying to remove more regions (%u) than available (%u)", num_regions, num_remaining());
+  for (uint i = 0; i < num_regions; i++) {
+    _remaining_reclaimable_bytes -= at(_front_idx)->reclaimable_bytes();
+    _front_idx++;
+  }
 }
 
 void G1CollectionSetCandidates::iterate(HeapRegionClosure* cl) {
@@ -62,13 +48,8 @@
 #ifndef PRODUCT
 void G1CollectionSetCandidates::verify() const {
   guarantee(_front_idx <= _num_regions, "Index: %u Num_regions: %u", _front_idx, _num_regions);
-  uint idx = 0;
+  uint idx = _front_idx;
   size_t sum_of_reclaimable_bytes = 0;
-  while (idx < _front_idx) {
-    guarantee(_regions[idx] == NULL, "All entries before _front_idx %u should be NULL, but %u is not",
-              _front_idx, idx);
-    idx++;
-  }
   HeapRegion *prev = NULL;
   for (; idx < _num_regions; idx++) {
     HeapRegion *cur = _regions[idx];
--- a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -63,22 +63,18 @@
   // Returns the total number of collection set candidate old regions added.
   uint num_regions() { return _num_regions; }
 
-  // Return the candidate region at the cursor position to be considered for collection without
-  // removing it.
-  HeapRegion* peek_front() {
+  uint cur_idx() const { return _front_idx; }
+
+  HeapRegion* at(uint idx) const {
     HeapRegion* res = NULL;
-    if (_front_idx < _num_regions) {
-      res = _regions[_front_idx];
-      assert(res != NULL, "Unexpected NULL HeapRegion at index %u", _front_idx);
+    if (idx < _num_regions) {
+      res = _regions[idx];
+      assert(res != NULL, "Unexpected NULL HeapRegion at index %u", idx);
     }
     return res;
   }
 
-  // Remove the given region from the candidates set and move the cursor to the next one.
-  HeapRegion* pop_front();
-
-  // Add the given HeapRegion to the front of the collection set candidate set again.
-  void push_front(HeapRegion* hr);
+  void remove(uint num_regions);
 
   // Iterate over all remaining collection set candidate regions.
   void iterate(HeapRegionClosure* cl);
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -1940,9 +1940,10 @@
     guarantee(oopDesc::is_oop(task_entry.obj()),
               "Non-oop " PTR_FORMAT ", phase: %s, info: %d",
               p2i(task_entry.obj()), _phase, _info);
-    guarantee(!_g1h->is_in_cset(task_entry.obj()),
-              "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d",
-              p2i(task_entry.obj()), _phase, _info);
+    HeapRegion* r = _g1h->heap_region_containing(task_entry.obj());
+    guarantee(!(r->in_collection_set() || r->has_index_in_opt_cset()),
+              "obj " PTR_FORMAT " from %s (%d) in region %u in (optional) collection set",
+              p2i(task_entry.obj()), _phase, _info, r->hrm_index());
   }
 };
 
@@ -1979,11 +1980,11 @@
     HeapWord* task_finger = task->finger();
     if (task_finger != NULL && task_finger < _heap.end()) {
       // See above note on the global finger verification.
-      HeapRegion* task_hr = _g1h->heap_region_containing(task_finger);
-      guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
-                !task_hr->in_collection_set(),
+      HeapRegion* r = _g1h->heap_region_containing(task_finger);
+      guarantee(r == NULL || task_finger == r->bottom() ||
+                !r->in_collection_set() || !r->has_index_in_opt_cset(),
                 "task finger: " PTR_FORMAT " region: " HR_FORMAT,
-                p2i(task_finger), HR_FORMAT_PARAMS(task_hr));
+                p2i(task_finger), HR_FORMAT_PARAMS(r));
     }
   }
 }
--- a/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -127,7 +127,7 @@
 }
 
 void G1ConcurrentMarkThread::delay_to_keep_mmu(G1Policy* g1_policy, bool remark) {
-  if (g1_policy->adaptive_young_list_length()) {
+  if (g1_policy->use_adaptive_young_list_length()) {
     jlong sleep_time_ms = mmu_sleep_time(g1_policy, remark);
     if (!_cm->has_aborted() && sleep_time_ms > 0) {
       os::sleep(this, sleep_time_ms, false);
--- a/src/hotspot/share/gc/g1/g1EdenRegions.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1EdenRegions.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -31,19 +31,28 @@
 
 class G1EdenRegions {
 private:
-  int _length;
+  int    _length;
+  // Sum of used bytes from all retired eden regions.
+  // I.e. updated when mutator regions are retired.
+  volatile size_t _used_bytes;
 
 public:
-  G1EdenRegions() : _length(0) {}
+  G1EdenRegions() : _length(0), _used_bytes(0) { }
 
   void add(HeapRegion* hr) {
     assert(!hr->is_eden(), "should not already be set");
     _length++;
   }
 
-  void clear() { _length = 0; }
+  void clear() { _length = 0; _used_bytes = 0; }
 
   uint length() const { return _length; }
+
+  size_t used_bytes() const { return _used_bytes; }
+
+  void add_used_bytes(size_t used_bytes) {
+    _used_bytes += used_bytes;
+  }
 };
 
 #endif // SHARE_GC_G1_G1EDENREGIONS_HPP
--- a/src/hotspot/share/gc/g1/g1EvacFailure.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1EvacFailure.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -228,6 +228,8 @@
 
     if (_hrclaimer->claim_region(hr->hrm_index())) {
       if (hr->evacuation_failed()) {
+        hr->clear_index_in_opt_cset();
+
         bool during_initial_mark = _g1h->collector_state()->in_initial_mark_gc();
         bool during_conc_mark = _g1h->collector_state()->mark_or_rebuild_in_progress();
 
@@ -257,5 +259,5 @@
 void G1ParRemoveSelfForwardPtrsTask::work(uint worker_id) {
   RemoveSelfForwardPtrHRClosure rsfp_cl(worker_id, &_hrclaimer);
 
-  _g1h->collection_set_iterate_from(&rsfp_cl, worker_id);
+  _g1h->collection_set_iterate_increment_from(&rsfp_cl, worker_id);
 }
--- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -76,10 +76,12 @@
   }
   _gc_par_phases[ScanRS] = new WorkerDataArray<double>(max_gc_threads, "Scan RS (ms):");
   _gc_par_phases[OptScanRS] = new WorkerDataArray<double>(max_gc_threads, "Optional Scan RS (ms):");
-  _gc_par_phases[CodeRoots] = new WorkerDataArray<double>(max_gc_threads, "Code Root Scanning (ms):");
+  _gc_par_phases[CodeRoots] = new WorkerDataArray<double>(max_gc_threads, "Code Root Scan (ms):");
+  _gc_par_phases[OptCodeRoots] = new WorkerDataArray<double>(max_gc_threads, "Optional Code Root Scan (ms):");
   _gc_par_phases[ObjCopy] = new WorkerDataArray<double>(max_gc_threads, "Object Copy (ms):");
   _gc_par_phases[OptObjCopy] = new WorkerDataArray<double>(max_gc_threads, "Optional Object Copy (ms):");
   _gc_par_phases[Termination] = new WorkerDataArray<double>(max_gc_threads, "Termination (ms):");
+  _gc_par_phases[OptTermination] = new WorkerDataArray<double>(max_gc_threads, "Optional Termination (ms):");
   _gc_par_phases[GCWorkerTotal] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Total (ms):");
   _gc_par_phases[GCWorkerEnd] = new WorkerDataArray<double>(max_gc_threads, "GC Worker End (ms):");
   _gc_par_phases[Other] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Other (ms):");
@@ -91,14 +93,16 @@
   _scan_rs_skipped_cards = new WorkerDataArray<size_t>(max_gc_threads, "Skipped Cards:");
   _gc_par_phases[ScanRS]->link_thread_work_items(_scan_rs_skipped_cards, ScanRSSkippedCards);
 
-  _opt_cset_scanned_cards = new WorkerDataArray<size_t>(max_gc_threads, "Scanned Cards:");
-  _gc_par_phases[OptScanRS]->link_thread_work_items(_opt_cset_scanned_cards, OptCSetScannedCards);
-  _opt_cset_claimed_cards = new WorkerDataArray<size_t>(max_gc_threads, "Claimed Cards:");
-  _gc_par_phases[OptScanRS]->link_thread_work_items(_opt_cset_claimed_cards, OptCSetClaimedCards);
-  _opt_cset_skipped_cards = new WorkerDataArray<size_t>(max_gc_threads, "Skipped Cards:");
-  _gc_par_phases[OptScanRS]->link_thread_work_items(_opt_cset_skipped_cards, OptCSetSkippedCards);
-  _opt_cset_used_memory = new WorkerDataArray<size_t>(max_gc_threads, "Used Memory:");
-  _gc_par_phases[OptScanRS]->link_thread_work_items(_opt_cset_used_memory, OptCSetUsedMemory);
+  _opt_scan_rs_scanned_cards = new WorkerDataArray<size_t>(max_gc_threads, "Scanned Cards:");
+  _gc_par_phases[OptScanRS]->link_thread_work_items(_opt_scan_rs_scanned_cards, ScanRSScannedCards);
+  _opt_scan_rs_claimed_cards = new WorkerDataArray<size_t>(max_gc_threads, "Claimed Cards:");
+  _gc_par_phases[OptScanRS]->link_thread_work_items(_opt_scan_rs_claimed_cards, ScanRSClaimedCards);
+  _opt_scan_rs_skipped_cards = new WorkerDataArray<size_t>(max_gc_threads, "Skipped Cards:");
+  _gc_par_phases[OptScanRS]->link_thread_work_items(_opt_scan_rs_skipped_cards, ScanRSSkippedCards);
+  _opt_scan_rs_scanned_opt_refs = new WorkerDataArray<size_t>(max_gc_threads, "Scanned Refs:");
+  _gc_par_phases[OptScanRS]->link_thread_work_items(_opt_scan_rs_scanned_opt_refs, ScanRSScannedOptRefs);
+  _opt_scan_rs_used_memory = new WorkerDataArray<size_t>(max_gc_threads, "Used Memory:");
+  _gc_par_phases[OptScanRS]->link_thread_work_items(_opt_scan_rs_used_memory, ScanRSUsedMemory);
 
   _update_rs_processed_buffers = new WorkerDataArray<size_t>(max_gc_threads, "Processed Buffers:");
   _gc_par_phases[UpdateRS]->link_thread_work_items(_update_rs_processed_buffers, UpdateRSProcessedBuffers);
@@ -112,9 +116,17 @@
   _obj_copy_lab_undo_waste = new WorkerDataArray<size_t>(max_gc_threads, "LAB Undo Waste");
   _gc_par_phases[ObjCopy]->link_thread_work_items(_obj_copy_lab_undo_waste, ObjCopyLABUndoWaste);
 
+  _opt_obj_copy_lab_waste = new WorkerDataArray<size_t>(max_gc_threads, "LAB Waste");
+  _gc_par_phases[OptObjCopy]->link_thread_work_items(_obj_copy_lab_waste, ObjCopyLABWaste);
+  _opt_obj_copy_lab_undo_waste  = new WorkerDataArray<size_t>(max_gc_threads, "LAB Undo Waste");
+  _gc_par_phases[OptObjCopy]->link_thread_work_items(_obj_copy_lab_undo_waste, ObjCopyLABUndoWaste);
+
   _termination_attempts = new WorkerDataArray<size_t>(max_gc_threads, "Termination Attempts:");
   _gc_par_phases[Termination]->link_thread_work_items(_termination_attempts);
 
+  _opt_termination_attempts = new WorkerDataArray<size_t>(max_gc_threads, "Optional Termination Attempts:");
+  _gc_par_phases[OptTermination]->link_thread_work_items(_opt_termination_attempts);
+
   if (UseStringDeduplication) {
     _gc_par_phases[StringDedupQueueFixup] = new WorkerDataArray<double>(max_gc_threads, "Queue Fixup (ms):");
     _gc_par_phases[StringDedupTableFixup] = new WorkerDataArray<double>(max_gc_threads, "Table Fixup (ms):");
@@ -134,7 +146,7 @@
 }
 
 void G1GCPhaseTimes::reset() {
-  _cur_collection_par_time_ms = 0.0;
+  _cur_collection_initial_evac_time_ms = 0.0;
   _cur_optional_evac_ms = 0.0;
   _cur_collection_code_root_fixup_time_ms = 0.0;
   _cur_strong_code_root_purge_time_ms = 0.0;
@@ -251,6 +263,10 @@
   }
 }
 
+double G1GCPhaseTimes::get_time_secs(GCParPhases phase, uint worker_i) {
+  return _gc_par_phases[phase]->get(worker_i);
+}
+
 void G1GCPhaseTimes::record_thread_work_item(GCParPhases phase, uint worker_i, size_t count, uint index) {
   _gc_par_phases[phase]->set_thread_work_item(worker_i, count, index);
 }
@@ -259,6 +275,10 @@
   _gc_par_phases[phase]->set_or_add_thread_work_item(worker_i, count, index);
 }
 
+size_t G1GCPhaseTimes::get_thread_work_item(GCParPhases phase, uint worker_i, uint index) {
+  return _gc_par_phases[phase]->get_thread_work_item(worker_i, index);
+}
+
 // return the average time for a phase in milliseconds
 double G1GCPhaseTimes::average_time_ms(GCParPhases phase) {
   return _gc_par_phases[phase]->average() * 1000.0;
@@ -374,12 +394,14 @@
     info_time("Evacuate Optional Collection Set", sum_ms);
     debug_phase(_gc_par_phases[OptScanRS]);
     debug_phase(_gc_par_phases[OptObjCopy]);
+    debug_phase(_gc_par_phases[OptCodeRoots]);
+    debug_phase(_gc_par_phases[OptTermination]);
   }
   return sum_ms;
 }
 
 double G1GCPhaseTimes::print_evacuate_collection_set() const {
-  const double sum_ms = _cur_collection_par_time_ms;
+  const double sum_ms = _cur_collection_initial_evac_time_ms;
 
   info_time("Evacuate Collection Set", sum_ms);
 
@@ -517,9 +539,11 @@
       "ScanRS",
       "OptScanRS",
       "CodeRoots",
+      "OptCodeRoots",
       "ObjCopy",
       "OptObjCopy",
       "Termination",
+      "OptTermination",
       "Other",
       "GCWorkerTotal",
       "GCWorkerEnd",
--- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -67,9 +67,11 @@
     ScanRS,
     OptScanRS,
     CodeRoots,
+    OptCodeRoots,
     ObjCopy,
     OptObjCopy,
     Termination,
+    OptTermination,
     Other,
     GCWorkerTotal,
     GCWorkerEnd,
@@ -87,7 +89,9 @@
   enum GCScanRSWorkItems {
     ScanRSScannedCards,
     ScanRSClaimedCards,
-    ScanRSSkippedCards
+    ScanRSSkippedCards,
+    ScanRSScannedOptRefs,
+    ScanRSUsedMemory
   };
 
   enum GCUpdateRSWorkItems {
@@ -101,13 +105,6 @@
     ObjCopyLABUndoWaste
   };
 
-  enum GCOptCSetWorkItems {
-      OptCSetScannedCards,
-      OptCSetClaimedCards,
-      OptCSetSkippedCards,
-      OptCSetUsedMemory
-  };
-
  private:
   // Markers for grouping the phases in the GCPhases enum above
   static const int GCMainParPhasesLast = GCWorkerEnd;
@@ -122,19 +119,25 @@
   WorkerDataArray<size_t>* _scan_rs_claimed_cards;
   WorkerDataArray<size_t>* _scan_rs_skipped_cards;
 
+  WorkerDataArray<size_t>* _opt_scan_rs_scanned_cards;
+  WorkerDataArray<size_t>* _opt_scan_rs_claimed_cards;
+  WorkerDataArray<size_t>* _opt_scan_rs_skipped_cards;
+  WorkerDataArray<size_t>* _opt_scan_rs_scanned_opt_refs;
+  WorkerDataArray<size_t>* _opt_scan_rs_used_memory;
+
   WorkerDataArray<size_t>* _obj_copy_lab_waste;
   WorkerDataArray<size_t>* _obj_copy_lab_undo_waste;
 
-  WorkerDataArray<size_t>* _opt_cset_scanned_cards;
-  WorkerDataArray<size_t>* _opt_cset_claimed_cards;
-  WorkerDataArray<size_t>* _opt_cset_skipped_cards;
-  WorkerDataArray<size_t>* _opt_cset_used_memory;
+  WorkerDataArray<size_t>* _opt_obj_copy_lab_waste;
+  WorkerDataArray<size_t>* _opt_obj_copy_lab_undo_waste;
 
   WorkerDataArray<size_t>* _termination_attempts;
 
+  WorkerDataArray<size_t>* _opt_termination_attempts;
+
   WorkerDataArray<size_t>* _redirtied_cards;
 
-  double _cur_collection_par_time_ms;
+  double _cur_collection_initial_evac_time_ms;
   double _cur_optional_evac_ms;
   double _cur_collection_code_root_fixup_time_ms;
   double _cur_strong_code_root_purge_time_ms;
@@ -225,10 +228,14 @@
 
   void record_or_add_time_secs(GCParPhases phase, uint worker_i, double secs);
 
+  double get_time_secs(GCParPhases phase, uint worker_i);
+
   void record_thread_work_item(GCParPhases phase, uint worker_i, size_t count, uint index = 0);
 
   void record_or_add_thread_work_item(GCParPhases phase, uint worker_i, size_t count, uint index = 0);
 
+  size_t get_thread_work_item(GCParPhases phase, uint worker_i, uint index = 0);
+
   // return the average time for a phase in milliseconds
   double average_time_ms(GCParPhases phase);
 
@@ -256,16 +263,16 @@
     _cur_expand_heap_time_ms = ms;
   }
 
-  void record_par_time(double ms) {
-    _cur_collection_par_time_ms = ms;
+  void record_initial_evac_time(double ms) {
+    _cur_collection_initial_evac_time_ms = ms;
   }
 
-  void record_optional_evacuation(double ms) {
-    _cur_optional_evac_ms = ms;
+  void record_or_add_optional_evac_time(double ms) {
+    _cur_optional_evac_ms += ms;
   }
 
-  void record_code_root_fixup_time(double ms) {
-    _cur_collection_code_root_fixup_time_ms = ms;
+  void record_or_add_code_root_fixup_time(double ms) {
+    _cur_collection_code_root_fixup_time_ms += ms;
   }
 
   void record_strong_code_root_purge_time(double ms) {
@@ -360,7 +367,7 @@
   }
 
   double cur_collection_par_time_ms() {
-    return _cur_collection_par_time_ms;
+    return _cur_collection_initial_evac_time_ms;
   }
 
   double cur_clear_ct_time_ms() {
--- a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -371,6 +371,7 @@
   }
 
   bool do_heap_region(HeapRegion* r) {
+    guarantee(!r->has_index_in_opt_cset(), "Region %u still has opt collection set index %u", r->hrm_index(), r->index_in_opt_cset());
     guarantee(!r->is_young() || r->rem_set()->is_complete(), "Remembered set for Young region %u must be complete, is %s", r->hrm_index(), r->rem_set()->get_state_str());
     // Humongous and old regions regions might be of any state, so can't check here.
     guarantee(!r->is_free() || !r->rem_set()->is_tracked(), "Remembered set for free region %u must be untracked, is %s", r->hrm_index(), r->rem_set()->get_state_str());
--- a/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -228,23 +228,25 @@
   MutexLockerEx x(MonitoringSupport_lock, Mutex::_no_safepoint_check_flag);
   // Recalculate all the sizes from scratch.
 
-  uint young_list_length = _g1h->young_regions_count();
+  // This never includes used bytes of current allocating heap region.
+  _overall_used = _g1h->used_unlocked();
+  _eden_space_used = _g1h->eden_regions_used_bytes();
+  _survivor_space_used = _g1h->survivor_regions_used_bytes();
+
+  // _overall_used and _eden_space_used are obtained concurrently so
+  // may be inconsistent with each other. To prevent _old_gen_used going negative,
+  // use smaller value to substract.
+  _old_gen_used = _overall_used - MIN2(_overall_used, _eden_space_used + _survivor_space_used);
+
   uint survivor_list_length = _g1h->survivor_regions_count();
-  assert(young_list_length >= survivor_list_length, "invariant");
-  uint eden_list_length = young_list_length - survivor_list_length;
   // Max length includes any potential extensions to the young gen
   // we'll do when the GC locker is active.
   uint young_list_max_length = _g1h->policy()->young_list_max_length();
   assert(young_list_max_length >= survivor_list_length, "invariant");
   uint eden_list_max_length = young_list_max_length - survivor_list_length;
 
-  _overall_used = _g1h->used_unlocked();
-  _eden_space_used = (size_t) eden_list_length * HeapRegion::GrainBytes;
-  _survivor_space_used = (size_t) survivor_list_length * HeapRegion::GrainBytes;
-  _old_gen_used = subtract_up_to_zero(_overall_used, _eden_space_used + _survivor_space_used);
-
   // First calculate the committed sizes that can be calculated independently.
-  _survivor_space_committed = _survivor_space_used;
+  _survivor_space_committed = survivor_list_length * HeapRegion::GrainBytes;
   _old_gen_committed = HeapRegion::align_up_to_region_byte_size(_old_gen_used);
 
   // Next, start with the overall committed size.
@@ -274,11 +276,15 @@
   // Somewhat defensive: cap the eden used size to make sure it
   // never exceeds the committed size.
   _eden_space_used = MIN2(_eden_space_used, _eden_space_committed);
-  // _survivor_committed and _old_committed are calculated in terms of
-  // the corresponding _*_used value, so the next two conditions
-  // should hold.
-  assert(_survivor_space_used <= _survivor_space_committed, "post-condition");
-  assert(_old_gen_used <= _old_gen_committed, "post-condition");
+  // _survivor_space_used is calculated during a safepoint and _survivor_space_committed
+  // is calculated from survivor region count * heap region size.
+  assert(_survivor_space_used <= _survivor_space_committed, "Survivor used bytes(" SIZE_FORMAT
+         ") should be less than or equal to survivor committed(" SIZE_FORMAT ")",
+         _survivor_space_used, _survivor_space_committed);
+  // _old_gen_committed is calculated in terms of _old_gen_used value.
+  assert(_old_gen_used <= _old_gen_committed, "Old gen used bytes(" SIZE_FORMAT
+         ") should be less than or equal to old gen committed(" SIZE_FORMAT ")",
+         _old_gen_used, _old_gen_committed);
 }
 
 void G1MonitoringSupport::update_sizes() {
--- a/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -174,21 +174,6 @@
 
   size_t _old_gen_used;
 
-  // It returns x - y if x > y, 0 otherwise.
-  // As described in the comment above, some of the inputs to the
-  // calculations we have to do are obtained concurrently and hence
-  // may be inconsistent with each other. So, this provides a
-  // defensive way of performing the subtraction and avoids the value
-  // going negative (which would mean a very large result, given that
-  // the parameter are size_t).
-  static size_t subtract_up_to_zero(size_t x, size_t y) {
-    if (x > y) {
-      return x - y;
-    } else {
-      return 0;
-    }
-  }
-
   // Recalculate all the sizes.
   void recalculate_sizes();
 
--- a/src/hotspot/share/gc/g1/g1OopStarChunkedList.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1OopStarChunkedList.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,9 +32,11 @@
   delete_list(_coops);
 }
 
-void G1OopStarChunkedList::oops_do(OopClosure* obj_cl, OopClosure* root_cl) {
-  chunks_do(_roots, root_cl);
-  chunks_do(_croots, root_cl);
-  chunks_do(_oops, obj_cl);
-  chunks_do(_coops, obj_cl);
+size_t G1OopStarChunkedList::oops_do(OopClosure* obj_cl, OopClosure* root_cl) {
+  size_t result = 0;
+  result += chunks_do(_roots, root_cl);
+  result += chunks_do(_croots, root_cl);
+  result += chunks_do(_oops, obj_cl);
+  result += chunks_do(_coops, obj_cl);
+  return result;
 }
--- a/src/hotspot/share/gc/g1/g1OopStarChunkedList.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1OopStarChunkedList.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,8 +41,8 @@
   template <typename T> void delete_list(ChunkedList<T*, mtGC>* c);
 
   template <typename T>
-  void chunks_do(ChunkedList<T*, mtGC>* head,
-                 OopClosure* cl);
+  size_t chunks_do(ChunkedList<T*, mtGC>* head,
+                   OopClosure* cl);
 
   template <typename T>
   inline void push(ChunkedList<T*, mtGC>** field, T* p);
@@ -53,7 +53,7 @@
 
   size_t used_memory() { return _used_memory; }
 
-  void oops_do(OopClosure* obj_cl, OopClosure* root_cl);
+  size_t oops_do(OopClosure* obj_cl, OopClosure* root_cl);
 
   inline void push_oop(oop* p);
   inline void push_oop(narrowOop* p);
--- a/src/hotspot/share/gc/g1/g1OopStarChunkedList.inline.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1OopStarChunkedList.inline.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -72,13 +72,16 @@
 }
 
 template <typename T>
-void G1OopStarChunkedList::chunks_do(ChunkedList<T*, mtGC>* head, OopClosure* cl) {
+size_t G1OopStarChunkedList::chunks_do(ChunkedList<T*, mtGC>* head, OopClosure* cl) {
+  size_t result = 0;
   for (ChunkedList<T*, mtGC>* c = head; c != NULL; c = c->next_used()) {
+    result += c->size();
     for (size_t i = 0; i < c->size(); i++) {
       T* p = c->at(i);
       cl->do_oop(p);
     }
   }
+  return result;
 }
 
 #endif // SHARE_GC_G1_G1OOPSTARCHUNKEDLIST_INLINE_HPP
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -372,7 +372,7 @@
     }
 
     size_t used_memory = pss->oops_into_optional_region(hr)->used_memory();
-    _g1h->phase_times()->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_index, used_memory, G1GCPhaseTimes::OptCSetUsedMemory);
+    _g1h->phase_times()->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_index, used_memory, G1GCPhaseTimes::ScanRSUsedMemory);
   }
 }
 
--- a/src/hotspot/share/gc/g1/g1Policy.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1Policy.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -105,7 +105,7 @@
 
   assert(Heap_lock->owned_by_self(), "Locking discipline.");
 
-  if (!adaptive_young_list_length()) {
+  if (!use_adaptive_young_list_length()) {
     _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
   }
   _young_gen_sizer->adjust_max_new_size(_g1h->max_expandable_regions());
@@ -195,7 +195,7 @@
 
 uint G1Policy::calculate_young_list_desired_min_length(uint base_min_length) const {
   uint desired_min_length = 0;
-  if (adaptive_young_list_length()) {
+  if (use_adaptive_young_list_length()) {
     if (_analytics->num_alloc_rate_ms() > 3) {
       double now_sec = os::elapsedTime();
       double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
@@ -252,7 +252,7 @@
   uint desired_max_length = calculate_young_list_desired_max_length();
 
   uint young_list_target_length = 0;
-  if (adaptive_young_list_length()) {
+  if (use_adaptive_young_list_length()) {
     if (collector_state()->in_young_only_phase()) {
       young_list_target_length =
                         calculate_young_list_target_length(rs_lengths,
@@ -304,7 +304,7 @@
                                                     uint base_min_length,
                                                     uint desired_min_length,
                                                     uint desired_max_length) const {
-  assert(adaptive_young_list_length(), "pre-condition");
+  assert(use_adaptive_young_list_length(), "pre-condition");
   assert(collector_state()->in_young_only_phase(), "only call this for young GCs");
 
   // In case some edge-condition makes the desired max length too small...
@@ -414,7 +414,7 @@
 }
 
 void G1Policy::revise_young_list_target_length_if_necessary(size_t rs_lengths) {
-  guarantee( adaptive_young_list_length(), "should not call this otherwise" );
+  guarantee(use_adaptive_young_list_length(), "should not call this otherwise" );
 
   if (rs_lengths > _rs_lengths_prediction) {
     // add 10% to avoid having to recalculate often
@@ -430,7 +430,7 @@
 }
 
 void G1Policy::update_rs_lengths_prediction(size_t prediction) {
-  if (collector_state()->in_young_only_phase() && adaptive_young_list_length()) {
+  if (collector_state()->in_young_only_phase() && use_adaptive_young_list_length()) {
     _rs_lengths_prediction = prediction;
   }
 }
@@ -659,7 +659,11 @@
 
     double cost_per_entry_ms = 0.0;
     if (cards_scanned > 10) {
-      cost_per_entry_ms = average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned;
+      double avg_time_scan_rs = average_time_ms(G1GCPhaseTimes::ScanRS);
+      if (this_pause_was_young_only) {
+        avg_time_scan_rs += average_time_ms(G1GCPhaseTimes::OptScanRS);
+      }
+      cost_per_entry_ms = avg_time_scan_rs / cards_scanned;
       _analytics->report_cost_per_entry_ms(cost_per_entry_ms, this_pause_was_young_only);
     }
 
@@ -694,7 +698,7 @@
     double cost_per_byte_ms = 0.0;
 
     if (copied_bytes > 0) {
-      cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes;
+      cost_per_byte_ms = (average_time_ms(G1GCPhaseTimes::ObjCopy) + average_time_ms(G1GCPhaseTimes::OptObjCopy)) / (double) copied_bytes;
       _analytics->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->mark_or_rebuild_in_progress());
     }
 
@@ -906,8 +910,8 @@
   return young_list_length < young_list_max_length;
 }
 
-bool G1Policy::adaptive_young_list_length() const {
-  return _young_gen_sizer->adaptive_young_list_length();
+bool G1Policy::use_adaptive_young_list_length() const {
+  return _young_gen_sizer->use_adaptive_young_list_length();
 }
 
 size_t G1Policy::desired_survivor_size(uint max_regions) const {
@@ -1188,11 +1192,135 @@
   return (uint) result;
 }
 
-uint G1Policy::finalize_collection_set(double target_pause_time_ms, G1SurvivorRegions* survivor) {
-  double time_remaining_ms = _collection_set->finalize_young_part(target_pause_time_ms, survivor);
-  _collection_set->finalize_old_part(time_remaining_ms);
+void G1Policy::calculate_old_collection_set_regions(G1CollectionSetCandidates* candidates,
+                                                    double time_remaining_ms,
+                                                    uint& num_initial_regions,
+                                                    uint& num_optional_regions) {
+  assert(candidates != NULL, "Must be");
 
-  return _collection_set->region_length();
+  num_initial_regions = 0;
+  num_optional_regions = 0;
+  uint num_expensive_regions = 0;
+
+  double predicted_old_time_ms = 0.0;
+  double predicted_initial_time_ms = 0.0;
+  double predicted_optional_time_ms = 0.0;
+
+  double optional_threshold_ms = time_remaining_ms * optional_prediction_fraction();
+
+  const uint min_old_cset_length = calc_min_old_cset_length();
+  const uint max_old_cset_length = MAX2(min_old_cset_length, calc_max_old_cset_length());
+  const uint max_optional_regions = max_old_cset_length - min_old_cset_length;
+  bool check_time_remaining = use_adaptive_young_list_length();
+
+  uint candidate_idx = candidates->cur_idx();
+
+  log_debug(gc, ergo, cset)("Start adding old regions to collection set. Min %u regions, max %u regions, "
+                            "time remaining %1.2fms, optional threshold %1.2fms",
+                            min_old_cset_length, max_old_cset_length, time_remaining_ms, optional_threshold_ms);
+
+  HeapRegion* hr = candidates->at(candidate_idx);
+  while (hr != NULL) {
+    if (num_initial_regions + num_optional_regions >= max_old_cset_length) {
+      // Added maximum number of old regions to the CSet.
+      log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Maximum number of regions). "
+                                "Initial %u regions, optional %u regions",
+                                num_initial_regions, num_optional_regions);
+      break;
+    }
+
+    // Stop adding regions if the remaining reclaimable space is
+    // not above G1HeapWastePercent.
+    size_t reclaimable_bytes = candidates->remaining_reclaimable_bytes();
+    double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes);
+    double threshold = (double) G1HeapWastePercent;
+    if (reclaimable_percent <= threshold) {
+      // We've added enough old regions that the amount of uncollected
+      // reclaimable space is at or below the waste threshold. Stop
+      // adding old regions to the CSet.
+      log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Reclaimable percentage below threshold). "
+                                "Reclaimable: " SIZE_FORMAT "%s (%1.2f%%) threshold: " UINTX_FORMAT "%%",
+                                byte_size_in_proper_unit(reclaimable_bytes), proper_unit_for_byte_size(reclaimable_bytes),
+                                reclaimable_percent, G1HeapWastePercent);
+      break;
+    }
+
+    double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
+    time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
+    // Add regions to old set until we reach the minimum amount
+    if (num_initial_regions < min_old_cset_length) {
+      predicted_old_time_ms += predicted_time_ms;
+      num_initial_regions++;
+      // Record the number of regions added with no time remaining
+      if (time_remaining_ms == 0.0) {
+        num_expensive_regions++;
+      }
+    } else if (!check_time_remaining) {
+      // In the non-auto-tuning case, we'll finish adding regions
+      // to the CSet if we reach the minimum.
+      log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Region amount reached min).");
+      break;
+    } else {
+      // Keep adding regions to old set until we reach the optional threshold
+      if (time_remaining_ms > optional_threshold_ms) {
+        predicted_old_time_ms += predicted_time_ms;
+        num_initial_regions++;
+      } else if (time_remaining_ms > 0) {
+        // Keep adding optional regions until time is up.
+        assert(num_optional_regions < max_optional_regions, "Should not be possible.");
+        predicted_optional_time_ms += predicted_time_ms;
+        num_optional_regions++;
+      } else {
+        log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Predicted time too high).");
+        break;
+      }
+    }
+    hr = candidates->at(++candidate_idx);
+  }
+  if (hr == NULL) {
+    log_debug(gc, ergo, cset)("Old candidate collection set empty.");
+  }
+
+  if (num_expensive_regions > 0) {
+    log_debug(gc, ergo, cset)("Added %u initial old regions to collection set although the predicted time was too high.",
+                              num_expensive_regions);
+  }
+
+  log_debug(gc, ergo, cset)("Finish choosing collection set old regions. Initial: %u, optional: %u, "
+                            "predicted old time: %1.2fms, predicted optional time: %1.2fms, time remaining: %1.2f",
+                            num_initial_regions, num_optional_regions,
+                            predicted_initial_time_ms, predicted_optional_time_ms, time_remaining_ms);
+}
+
+void G1Policy::calculate_optional_collection_set_regions(G1CollectionSetCandidates* candidates,
+                                                         uint const max_optional_regions,
+                                                         double time_remaining_ms,
+                                                         uint& num_optional_regions) {
+  assert(_g1h->collector_state()->in_mixed_phase(), "Should only be called in mixed phase");
+
+  num_optional_regions = 0;
+  double prediction_ms = 0;
+  uint candidate_idx = candidates->cur_idx();
+
+  HeapRegion* r = candidates->at(candidate_idx);
+  while (num_optional_regions < max_optional_regions) {
+    assert(r != NULL, "Region must exist");
+    prediction_ms += predict_region_elapsed_time_ms(r, false);
+
+    if (prediction_ms > time_remaining_ms) {
+      log_debug(gc, ergo, cset)("Prediction %.3fms for region %u does not fit remaining time: %.3fms.",
+                                prediction_ms, r->hrm_index(), time_remaining_ms);
+      break;
+    }
+    // This region will be included in the next optional evacuation.
+
+    time_remaining_ms -= prediction_ms;
+    num_optional_regions++;
+    r = candidates->at(++candidate_idx);
+  }
+
+  log_debug(gc, ergo, cset)("Prepared %u regions out of %u for optional evacuation. Predicted time: %.3fms",
+                            num_optional_regions, max_optional_regions, prediction_ms);
 }
 
 void G1Policy::transfer_survivors_to_cset(const G1SurvivorRegions* survivors) {
--- a/src/hotspot/share/gc/g1/g1Policy.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1Policy.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -44,6 +44,7 @@
 
 class HeapRegion;
 class G1CollectionSet;
+class G1CollectionSetCandidates;
 class G1CollectionSetChooser;
 class G1IHOPControl;
 class G1Analytics;
@@ -344,7 +345,21 @@
   bool next_gc_should_be_mixed(const char* true_action_str,
                                const char* false_action_str) const;
 
-  uint finalize_collection_set(double target_pause_time_ms, G1SurvivorRegions* survivor);
+  // Calculate and return the number of initial and optional old gen regions from
+  // the given collection set candidates and the remaining time.
+  void calculate_old_collection_set_regions(G1CollectionSetCandidates* candidates,
+                                            double time_remaining_ms,
+                                            uint& num_initial_regions,
+                                            uint& num_optional_regions);
+
+  // Calculate the number of optional regions from the given collection set candidates,
+  // the remaining time and the maximum number of these regions and return the number
+  // of actually selected regions in num_optional_regions.
+  void calculate_optional_collection_set_regions(G1CollectionSetCandidates* candidates,
+                                                 uint const max_optional_regions,
+                                                 double time_remaining_ms,
+                                                 uint& num_optional_regions);
+
 private:
   // Set the state to start a concurrent marking cycle and clear
   // _initiate_conc_mark_if_possible because it has now been
@@ -384,7 +399,7 @@
     return _young_list_max_length;
   }
 
-  bool adaptive_young_list_length() const;
+  bool use_adaptive_young_list_length() const;
 
   void transfer_survivors_to_cset(const G1SurvivorRegions* survivors);
 
@@ -403,11 +418,13 @@
   AgeTable _survivors_age_table;
 
   size_t desired_survivor_size(uint max_regions) const;
-public:
+
   // Fraction used when predicting how many optional regions to include in
   // the CSet. This fraction of the available time is used for optional regions,
   // the rest is used to add old regions to the normal CSet.
   double optional_prediction_fraction() { return 0.2; }
+
+public:
   // Fraction used when evacuating the optional regions. This fraction of the
   // remaining time is used to choose what regions to include in the evacuation.
   double optional_evacuation_fraction() { return 0.75; }
--- a/src/hotspot/share/gc/g1/g1RemSet.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1RemSet.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -316,6 +316,8 @@
   _scan_state(scan_state),
   _phase(phase),
   _worker_i(worker_i),
+  _opt_refs_scanned(0),
+  _opt_refs_memory_used(0),
   _cards_scanned(0),
   _cards_claimed(0),
   _cards_skipped(0),
@@ -338,6 +340,19 @@
   _cards_scanned++;
 }
 
+void G1ScanRSForRegionClosure::scan_opt_rem_set_roots(HeapRegion* r) {
+  EventGCPhaseParallel event;
+
+  G1OopStarChunkedList* opt_rem_set_list = _pss->oops_into_optional_region(r);
+
+  G1ScanObjsDuringScanRSClosure scan_cl(_g1h, _pss);
+  G1ScanRSForOptionalClosure cl(&scan_cl);
+  _opt_refs_scanned += opt_rem_set_list->oops_do(&cl, _pss->closures()->raw_strong_oops());
+  _opt_refs_memory_used += opt_rem_set_list->used_memory();
+
+  event.commit(GCId::current(), _worker_i, G1GCPhaseTimes::phase_name(_phase));
+}
+
 void G1ScanRSForRegionClosure::scan_rem_set_roots(HeapRegion* r) {
   EventGCPhaseParallel event;
   uint const region_idx = r->hrm_index();
@@ -414,11 +429,16 @@
 }
 
 bool G1ScanRSForRegionClosure::do_heap_region(HeapRegion* r) {
-  assert(r->in_collection_set(),
-         "Should only be called on elements of the collection set but region %u is not.",
-         r->hrm_index());
+  assert(r->in_collection_set(), "Region %u is not in the collection set.", r->hrm_index());
   uint const region_idx = r->hrm_index();
 
+  // The individual references for the optional remembered set are per-worker, so we
+  // always need to scan them.
+  if (r->has_index_in_opt_cset()) {
+    G1EvacPhaseWithTrimTimeTracker timer(_pss, _rem_set_root_scan_time, _rem_set_trim_partially_time);
+    scan_opt_rem_set_roots(r);
+  }
+
   // Do an early out if we know we are complete.
   if (_scan_state->iter_is_complete(region_idx)) {
     return false;
@@ -437,22 +457,33 @@
   return false;
 }
 
-void G1RemSet::scan_rem_set(G1ParScanThreadState* pss, uint worker_i) {
+void G1RemSet::scan_rem_set(G1ParScanThreadState* pss,
+                            uint worker_i,
+                            G1GCPhaseTimes::GCParPhases scan_phase,
+                            G1GCPhaseTimes::GCParPhases objcopy_phase,
+                            G1GCPhaseTimes::GCParPhases coderoots_phase) {
+  assert(pss->trim_ticks().value() == 0, "Queues must have been trimmed before entering.");
+
   G1ScanObjsDuringScanRSClosure scan_cl(_g1h, pss);
-  G1ScanRSForRegionClosure cl(_scan_state, &scan_cl, pss, G1GCPhaseTimes::ScanRS, worker_i);
-  _g1h->collection_set_iterate_from(&cl, worker_i);
+  G1ScanRSForRegionClosure cl(_scan_state, &scan_cl, pss, scan_phase, worker_i);
+  _g1h->collection_set_iterate_increment_from(&cl, worker_i);
 
   G1GCPhaseTimes* p = _g1p->phase_times();
 
-  p->record_time_secs(G1GCPhaseTimes::ScanRS, worker_i, cl.rem_set_root_scan_time().seconds());
-  p->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_i, cl.rem_set_trim_partially_time().seconds());
+  p->record_or_add_time_secs(objcopy_phase, worker_i, cl.rem_set_trim_partially_time().seconds());
 
-  p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_scanned(), G1GCPhaseTimes::ScanRSScannedCards);
-  p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_claimed(), G1GCPhaseTimes::ScanRSClaimedCards);
-  p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_skipped(), G1GCPhaseTimes::ScanRSSkippedCards);
+  p->record_or_add_time_secs(scan_phase, worker_i, cl.rem_set_root_scan_time().seconds());
+  p->record_or_add_thread_work_item(scan_phase, worker_i, cl.cards_scanned(), G1GCPhaseTimes::ScanRSScannedCards);
+  p->record_or_add_thread_work_item(scan_phase, worker_i, cl.cards_claimed(), G1GCPhaseTimes::ScanRSClaimedCards);
+  p->record_or_add_thread_work_item(scan_phase, worker_i, cl.cards_skipped(), G1GCPhaseTimes::ScanRSSkippedCards);
+  // At this time we only record some metrics for the optional remembered set.
+  if (scan_phase == G1GCPhaseTimes::OptScanRS) {
+    p->record_or_add_thread_work_item(scan_phase, worker_i, cl.opt_refs_scanned(), G1GCPhaseTimes::ScanRSScannedOptRefs);
+    p->record_or_add_thread_work_item(scan_phase, worker_i, cl.opt_refs_memory_used(), G1GCPhaseTimes::ScanRSUsedMemory);
+  }
 
-  p->record_time_secs(G1GCPhaseTimes::CodeRoots, worker_i, cl.strong_code_root_scan_time().seconds());
-  p->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_i, cl.strong_code_root_trim_partially_time().seconds());
+  p->record_or_add_time_secs(coderoots_phase, worker_i, cl.strong_code_root_scan_time().seconds());
+  p->add_time_secs(objcopy_phase, worker_i, cl.strong_code_root_trim_partially_time().seconds());
 }
 
 // Closure used for updating rem sets. Only called during an evacuation pause.
@@ -514,11 +545,6 @@
   }
 }
 
-void G1RemSet::oops_into_collection_set_do(G1ParScanThreadState* pss, uint worker_i) {
-  update_rem_set(pss, worker_i);
-  scan_rem_set(pss, worker_i);;
-}
-
 void G1RemSet::prepare_for_oops_into_collection_set_do() {
   G1BarrierSet::dirty_card_queue_set().concatenate_logs();
   _scan_state->reset();
--- a/src/hotspot/share/gc/g1/g1RemSet.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1RemSet.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -60,14 +60,6 @@
 
   G1RemSetSummary _prev_period_summary;
 
-  // Scan all remembered sets of the collection set for references into the collection
-  // set.
-  void scan_rem_set(G1ParScanThreadState* pss, uint worker_i);
-
-  // Flush remaining refinement buffers for cross-region references to either evacuate references
-  // into the collection set or update the remembered set.
-  void update_rem_set(G1ParScanThreadState* pss, uint worker_i);
-
   G1CollectedHeap* _g1h;
   size_t _num_conc_refined_cards; // Number of cards refined concurrently to the mutator.
 
@@ -93,12 +85,19 @@
            G1HotCardCache* hot_card_cache);
   ~G1RemSet();
 
-  // Process all oops in the collection set from the cards in the refinement buffers and
-  // remembered sets using pss.
-  //
+  // Scan all remembered sets of the collection set for references into the collection
+  // set.
   // Further applies heap_region_codeblobs on the oops of the unmarked nmethods on the strong code
   // roots list for each region in the collection set.
-  void oops_into_collection_set_do(G1ParScanThreadState* pss, uint worker_i);
+  void scan_rem_set(G1ParScanThreadState* pss,
+                    uint worker_i,
+                    G1GCPhaseTimes::GCParPhases scan_phase,
+                    G1GCPhaseTimes::GCParPhases objcopy_phase,
+                    G1GCPhaseTimes::GCParPhases coderoots_phase);
+
+  // Flush remaining refinement buffers for cross-region references to either evacuate references
+  // into the collection set or update the remembered set.
+  void update_rem_set(G1ParScanThreadState* pss, uint worker_i);
 
   // Prepare for and cleanup after an oops_into_collection_set_do
   // call.  Must call each of these once before and after (in sequential
@@ -144,6 +143,9 @@
 
   uint   _worker_i;
 
+  size_t _opt_refs_scanned;
+  size_t _opt_refs_memory_used;
+
   size_t _cards_scanned;
   size_t _cards_claimed;
   size_t _cards_skipped;
@@ -157,6 +159,7 @@
   void claim_card(size_t card_index, const uint region_idx_for_card);
   void scan_card(MemRegion mr, uint region_idx_for_card);
 
+  void scan_opt_rem_set_roots(HeapRegion* r);
   void scan_rem_set_roots(HeapRegion* r);
   void scan_strong_code_roots(HeapRegion* r);
 public:
@@ -177,6 +180,9 @@
   size_t cards_scanned() const { return _cards_scanned; }
   size_t cards_claimed() const { return _cards_claimed; }
   size_t cards_skipped() const { return _cards_skipped; }
+
+  size_t opt_refs_scanned() const { return _opt_refs_scanned; }
+  size_t opt_refs_memory_used() const { return _opt_refs_memory_used; }
 };
 
 #endif // SHARE_GC_G1_G1REMSET_HPP
--- a/src/hotspot/share/gc/g1/g1SurvivorRegions.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1SurvivorRegions.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,9 @@
 #include "utilities/growableArray.hpp"
 #include "utilities/debug.hpp"
 
-G1SurvivorRegions::G1SurvivorRegions() : _regions(new (ResourceObj::C_HEAP, mtGC) GrowableArray<HeapRegion*>(8, true, mtGC)) {}
+G1SurvivorRegions::G1SurvivorRegions() :
+  _regions(new (ResourceObj::C_HEAP, mtGC) GrowableArray<HeapRegion*>(8, true, mtGC)),
+  _used_bytes(0) {}
 
 void G1SurvivorRegions::add(HeapRegion* hr) {
   assert(hr->is_survivor(), "should be flagged as survivor region");
@@ -51,5 +53,9 @@
 
 void G1SurvivorRegions::clear() {
   _regions->clear();
+  _used_bytes = 0;
 }
 
+void G1SurvivorRegions::add_used_bytes(size_t used_bytes) {
+  _used_bytes += used_bytes;
+}
--- a/src/hotspot/share/gc/g1/g1SurvivorRegions.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1SurvivorRegions.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -34,6 +34,7 @@
 class G1SurvivorRegions {
 private:
   GrowableArray<HeapRegion*>* _regions;
+  volatile size_t             _used_bytes;
 
 public:
   G1SurvivorRegions();
@@ -49,6 +50,11 @@
   const GrowableArray<HeapRegion*>* regions() const {
     return _regions;
   }
+
+  // Used bytes of all survivor regions.
+  size_t used_bytes() const { return _used_bytes; }
+
+  void add_used_bytes(size_t used_bytes);
 };
 
 #endif // SHARE_GC_G1_G1SURVIVORREGIONS_HPP
--- a/src/hotspot/share/gc/g1/g1YoungGenSizer.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1YoungGenSizer.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -30,14 +30,14 @@
 #include "logging/log.hpp"
 
 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults),
-  _adaptive_size(true), _min_desired_young_length(0), _max_desired_young_length(0) {
+  _use_adaptive_sizing(true), _min_desired_young_length(0), _max_desired_young_length(0) {
 
   if (FLAG_IS_CMDLINE(NewRatio)) {
     if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
       log_warning(gc, ergo)("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
     } else {
       _sizer_kind = SizerNewRatio;
-      _adaptive_size = false;
+      _use_adaptive_sizing = false;
       return;
     }
   }
@@ -59,7 +59,7 @@
                              MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
                                   1U);
       _sizer_kind = SizerMaxAndNewSize;
-      _adaptive_size = _min_desired_young_length != _max_desired_young_length;
+      _use_adaptive_sizing = _min_desired_young_length != _max_desired_young_length;
     } else {
       _sizer_kind = SizerNewSizeOnly;
     }
--- a/src/hotspot/share/gc/g1/g1YoungGenSizer.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1YoungGenSizer.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -77,7 +77,7 @@
 
   // False when using a fixed young generation size due to command-line options,
   // true otherwise.
-  bool _adaptive_size;
+  bool _use_adaptive_sizing;
 
   uint calculate_default_min_length(uint new_number_of_heap_regions);
   uint calculate_default_max_length(uint new_number_of_heap_regions);
@@ -104,8 +104,8 @@
     return _max_desired_young_length;
   }
 
-  bool adaptive_young_list_length() const {
-    return _adaptive_size;
+  bool use_adaptive_young_list_length() const {
+    return _use_adaptive_sizing;
   }
 
   static G1YoungGenSizer* create_gen_sizer(G1CollectorPolicy* policy);
--- a/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -165,7 +165,7 @@
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   G1Policy* policy = g1h->policy();
 
-  if (policy->adaptive_young_list_length()) {
+  if (policy->use_adaptive_young_list_length()) {
     G1YoungRemSetSamplingClosure cl(&sts);
 
     G1CollectionSet* g1cs = g1h->collection_set();
--- a/src/hotspot/share/gc/g1/heapRegion.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/g1/heapRegion.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -117,6 +117,7 @@
          "Should not clear heap region %u in the collection set", hrm_index());
 
   set_young_index_in_cset(-1);
+  clear_index_in_opt_cset();
   uninstall_surv_rate_group();
   set_free();
   reset_pre_dummy_top();
@@ -241,7 +242,7 @@
     _containing_set(NULL),
 #endif
     _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
-    _index_in_opt_cset(G1OptionalCSet::InvalidCSetIndex), _young_index_in_cset(-1),
+    _index_in_opt_cset(InvalidCSetIndex), _young_index_in_cset(-1),
     _surv_rate_group(NULL), _age_index(-1),
     _prev_top_at_mark_start(NULL), _next_top_at_mark_start(NULL),
     _recorded_rs_length(0), _predicted_elapsed_time_ms(0)
--- a/src/hotspot/share/gc/g1/heapRegion.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/g1/heapRegion.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -250,6 +250,8 @@
   // The calculated GC efficiency of the region.
   double _gc_efficiency;
 
+  static const uint InvalidCSetIndex = UINT_MAX;
+
   // The index in the optional regions array, if this region
   // is considered optional during a mixed collections.
   uint _index_in_opt_cset;
@@ -549,8 +551,13 @@
   void calc_gc_efficiency(void);
   double gc_efficiency() const { return _gc_efficiency;}
 
-  uint index_in_opt_cset() const { return _index_in_opt_cset; }
+  uint index_in_opt_cset() const {
+    assert(has_index_in_opt_cset(), "Opt cset index not set.");
+    return _index_in_opt_cset;
+  }
+  bool has_index_in_opt_cset() const { return _index_in_opt_cset != InvalidCSetIndex; }
   void set_index_in_opt_cset(uint index) { _index_in_opt_cset = index; }
+  void clear_index_in_opt_cset() { _index_in_opt_cset = InvalidCSetIndex; }
 
   int  young_index_in_cset() const { return _young_index_in_cset; }
   void set_young_index_in_cset(int index) {
--- a/src/hotspot/share/gc/g1/vmStructs_g1.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/g1/vmStructs_g1.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -52,8 +52,8 @@
   nonstatic_field(HeapRegionManager, _regions,          G1HeapRegionTable)    \
   nonstatic_field(HeapRegionManager, _num_committed,    uint)                 \
                                                                               \
-  nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t)               \
-  nonstatic_field(G1CollectedHeap, _hrm,                HeapRegionManager*)    \
+  volatile_nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t)      \
+  nonstatic_field(G1CollectedHeap, _hrm,                HeapRegionManager*)   \
   nonstatic_field(G1CollectedHeap, _g1mm,               G1MonitoringSupport*) \
   nonstatic_field(G1CollectedHeap, _old_set,            HeapRegionSetBase)    \
   nonstatic_field(G1CollectedHeap, _archive_set,        HeapRegionSetBase)    \
--- a/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -351,19 +351,14 @@
   unsigned int hash = 0;
 
   if (use_java_hash()) {
-    // Get hash code from cache
-    hash = java_lang_String::hash(java_string);
-  }
-
-  if (hash == 0) {
+    if (!java_lang_String::hash_is_set(java_string)) {
+      stat->inc_hashed();
+    }
+    hash = java_lang_String::hash_code(java_string);
+  } else {
     // Compute hash
     hash = hash_code(value, latin1);
     stat->inc_hashed();
-
-    if (use_java_hash() && hash != 0) {
-      // Store hash code in cache
-      java_lang_String::set_hash(java_string, hash);
-    }
   }
 
   typeArrayOop existing_value = lookup_or_add(value, latin1, hash);
--- a/src/hotspot/share/gc/shared/weakProcessor.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/shared/weakProcessor.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -31,22 +31,33 @@
 #include "gc/shared/weakProcessorPhaseTimes.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/iterator.hpp"
+#include "prims/resolvedMethodTable.hpp"
 #include "runtime/globals.hpp"
 #include "utilities/macros.hpp"
 
+template <typename Container>
+class OopsDoAndReportCounts {
+public:
+  void operator()(BoolObjectClosure* is_alive, OopClosure* keep_alive, WeakProcessorPhase phase) {
+    Container::reset_dead_counter();
+
+    CountingSkippedIsAliveClosure<BoolObjectClosure, OopClosure> cl(is_alive, keep_alive);
+    WeakProcessorPhases::oop_storage(phase)->oops_do(&cl);
+
+    Container::inc_dead_counter(cl.num_dead() + cl.num_skipped());
+    Container::finish_dead_counter();
+  }
+};
+
 void WeakProcessor::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive) {
   FOR_EACH_WEAK_PROCESSOR_PHASE(phase) {
     if (WeakProcessorPhases::is_serial(phase)) {
       WeakProcessorPhases::processor(phase)(is_alive, keep_alive);
     } else {
       if (WeakProcessorPhases::is_stringtable(phase)) {
-        StringTable::reset_dead_counter();
-
-        CountingSkippedIsAliveClosure<BoolObjectClosure, OopClosure> cl(is_alive, keep_alive);
-        WeakProcessorPhases::oop_storage(phase)->oops_do(&cl);
-
-        StringTable::inc_dead_counter(cl.num_dead() + cl.num_skipped());
-        StringTable::finish_dead_counter();
+        OopsDoAndReportCounts<StringTable>()(is_alive, keep_alive, phase);
+      } else if (WeakProcessorPhases::is_resolved_method_table(phase)){
+        OopsDoAndReportCounts<ResolvedMethodTable>()(is_alive, keep_alive, phase);
       } else {
         WeakProcessorPhases::oop_storage(phase)->weak_oops_do(is_alive, keep_alive);
       }
@@ -104,6 +115,7 @@
     new (states++) StorageState(storage, _nworkers);
   }
   StringTable::reset_dead_counter();
+  ResolvedMethodTable::reset_dead_counter();
 }
 
 WeakProcessor::Task::Task(uint nworkers) :
@@ -134,6 +146,7 @@
     FREE_C_HEAP_ARRAY(StorageState, _storage_states);
   }
   StringTable::finish_dead_counter();
+  ResolvedMethodTable::finish_dead_counter();
 }
 
 void WeakProcessor::GangTask::work(uint worker_id) {
--- a/src/hotspot/share/gc/shared/weakProcessor.inline.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/shared/weakProcessor.inline.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -32,6 +32,7 @@
 #include "gc/shared/weakProcessorPhases.hpp"
 #include "gc/shared/weakProcessorPhaseTimes.hpp"
 #include "gc/shared/workgroup.hpp"
+#include "prims/resolvedMethodTable.hpp"
 #include "utilities/debug.hpp"
 
 class BoolObjectClosure;
@@ -115,6 +116,9 @@
       if (WeakProcessorPhases::is_stringtable(phase)) {
         StringTable::inc_dead_counter(cl.num_dead() + cl.num_skipped());
       }
+      if (WeakProcessorPhases::is_resolved_method_table(phase)) {
+        ResolvedMethodTable::inc_dead_counter(cl.num_dead() + cl.num_skipped());
+      }
     }
   }
 
--- a/src/hotspot/share/gc/shared/weakProcessorPhases.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/shared/weakProcessorPhases.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -26,6 +26,7 @@
 #include "classfile/stringTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "gc/shared/weakProcessorPhases.hpp"
+#include "prims/resolvedMethodTable.hpp"
 #include "runtime/jniHandles.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/macros.hpp"
@@ -80,6 +81,7 @@
   JFR_ONLY(case jfr: return "JFR weak processing";)
   case jni: return "JNI weak processing";
   case stringtable: return "StringTable weak processing";
+  case resolved_method_table: return "ResolvedMethodTable weak processing";
   case vm: return "VM weak processing";
   default:
     ShouldNotReachHere();
@@ -101,6 +103,7 @@
   switch (phase) {
   case jni: return JNIHandles::weak_global_handles();
   case stringtable: return StringTable::weak_storage();
+  case resolved_method_table: return ResolvedMethodTable::weak_storage();
   case vm: return SystemDictionary::vm_weak_oop_storage();
   default:
     ShouldNotReachHere();
@@ -111,3 +114,7 @@
 bool WeakProcessorPhases::is_stringtable(Phase phase) {
   return phase == stringtable;
 }
+
+bool WeakProcessorPhases::is_resolved_method_table(Phase phase) {
+  return phase == resolved_method_table;
+}
--- a/src/hotspot/share/gc/shared/weakProcessorPhases.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/shared/weakProcessorPhases.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -45,6 +45,7 @@
     // OopStorage phases.
     jni,
     stringtable,
+    resolved_method_table,
     vm
   };
 
@@ -68,6 +69,7 @@
   static OopStorage* oop_storage(Phase phase); // Precondition: is_oop_storage(phase)
 
   static bool is_stringtable(Phase phase);
+  static bool is_resolved_method_table(Phase phase);
 };
 
 typedef WeakProcessorPhases::Phase WeakProcessorPhase;
--- a/src/hotspot/share/gc/shared/workerDataArray.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/shared/workerDataArray.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -34,7 +34,7 @@
 class WorkerDataArray  : public CHeapObj<mtGC> {
   friend class WDAPrinter;
 public:
-  static const uint MaxThreadWorkItems = 4;
+  static const uint MaxThreadWorkItems = 5;
 private:
   T*          _data;
   uint        _length;
@@ -50,6 +50,7 @@
   void set_thread_work_item(uint worker_i, size_t value, uint index = 0);
   void add_thread_work_item(uint worker_i, size_t value, uint index = 0);
   void set_or_add_thread_work_item(uint worker_i, size_t value, uint index = 0);
+  size_t get_thread_work_item(uint worker_i, uint index = 0);
 
   WorkerDataArray<size_t>* thread_work_items(uint index = 0) const {
     assert(index < MaxThreadWorkItems, "Tried to access thread work item %u max %u", index, MaxThreadWorkItems);
--- a/src/hotspot/share/gc/shared/workerDataArray.inline.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/shared/workerDataArray.inline.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -92,6 +92,13 @@
 }
 
 template <typename T>
+size_t WorkerDataArray<T>::get_thread_work_item(uint worker_i, uint index) {
+  assert(index < MaxThreadWorkItems, "Tried to access thread work item %u (max %u)", index, MaxThreadWorkItems);
+  assert(_thread_work_items[index] != NULL, "No sub count");
+  return _thread_work_items[index]->get(worker_i);
+}
+
+template <typename T>
 void WorkerDataArray<T>::add(uint worker_i, T value) {
   assert(worker_i < _length, "Worker %d is greater than max: %d", worker_i, _length);
   assert(_data[worker_i] != uninitialized(), "No data to add to for worker %d", worker_i);
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -3133,6 +3133,8 @@
       case Op_CompareAndSwapI:
       case Op_CompareAndSwapB:
       case Op_CompareAndSwapS:
+      case Op_CompareAndSwapN:
+      case Op_CompareAndSwapP:
       case Op_ShenandoahCompareAndSwapN:
       case Op_ShenandoahCompareAndSwapP:
       case Op_ShenandoahWeakCompareAndSwapN:
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -72,7 +72,7 @@
   // we hit max_cset. When max_cset is hit, we terminate the cset selection. Note that in this scheme,
   // ShenandoahGarbageThreshold is the soft threshold which would be ignored until min_garbage is hit.
 
-  size_t capacity    = ShenandoahHeap::heap()->capacity();
+  size_t capacity    = ShenandoahHeap::heap()->max_capacity();
   size_t free_target = ShenandoahMinFreeThreshold * capacity / 100;
   size_t min_garbage = free_target > actual_free ? (free_target - actual_free) : 0;
   size_t max_cset    = (size_t)(1.0 * ShenandoahEvacReserve * capacity / 100 / ShenandoahEvacWaste);
@@ -123,12 +123,12 @@
 
 bool ShenandoahAdaptiveHeuristics::should_start_normal_gc() const {
   ShenandoahHeap* heap = ShenandoahHeap::heap();
-  size_t capacity = heap->capacity();
+  size_t capacity = heap->max_capacity();
   size_t available = heap->free_set()->available();
 
   // Check if we are falling below the worst limit, time to trigger the GC, regardless of
   // anything else.
-  size_t min_threshold = ShenandoahMinFreeThreshold * heap->capacity() / 100;
+  size_t min_threshold = ShenandoahMinFreeThreshold * heap->max_capacity() / 100;
   if (available < min_threshold) {
     log_info(gc)("Trigger: Free (" SIZE_FORMAT "M) is below minimum threshold (" SIZE_FORMAT "M)",
                  available / M, min_threshold / M);
@@ -138,7 +138,7 @@
   // Check if are need to learn a bit about the application
   const size_t max_learn = ShenandoahLearningSteps;
   if (_gc_times_learned < max_learn) {
-    size_t init_threshold = ShenandoahInitFreeThreshold * heap->capacity() / 100;
+    size_t init_threshold = ShenandoahInitFreeThreshold * heap->max_capacity() / 100;
     if (available < init_threshold) {
       log_info(gc)("Trigger: Learning " SIZE_FORMAT " of " SIZE_FORMAT ". Free (" SIZE_FORMAT "M) is below initial threshold (" SIZE_FORMAT "M)",
                    _gc_times_learned + 1, max_learn, available / M, init_threshold / M);
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -53,8 +53,8 @@
   ShenandoahHeap* heap = ShenandoahHeap::heap();
 
   size_t available = heap->free_set()->available();
-  size_t threshold_bytes_allocated = heap->capacity() * ShenandoahAllocationThreshold / 100;
-  size_t min_threshold = ShenandoahMinFreeThreshold * heap->capacity() / 100;
+  size_t threshold_bytes_allocated = heap->max_capacity() * ShenandoahAllocationThreshold / 100;
+  size_t min_threshold = ShenandoahMinFreeThreshold * heap->max_capacity() / 100;
 
   if (available < min_threshold) {
     log_info(gc)("Trigger: Free (" SIZE_FORMAT "M) is below minimum threshold (" SIZE_FORMAT "M)",
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -81,7 +81,7 @@
 
   // Do not select too large CSet that would overflow the available free space.
   // Take at least the entire evacuation reserve, and be free to overflow to free space.
-  size_t capacity  = ShenandoahHeap::heap()->capacity();
+  size_t capacity  = ShenandoahHeap::heap()->max_capacity();
   size_t available = MAX2(ShenandoahEvacReserve * capacity / 100, actual_free);
   size_t max_cset  = (size_t)(available / ShenandoahEvacWaste);
 
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -52,7 +52,7 @@
 bool ShenandoahStaticHeuristics::should_start_normal_gc() const {
   ShenandoahHeap* heap = ShenandoahHeap::heap();
 
-  size_t capacity = heap->capacity();
+  size_t capacity = heap->max_capacity();
   size_t available = heap->free_set()->available();
   size_t threshold_available = (capacity * ShenandoahFreeThreshold) / 100;
 
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahTraversalHeuristics.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahTraversalHeuristics.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -41,13 +41,6 @@
   FLAG_SET_DEFAULT(ShenandoahKeepAliveBarrier,       false);
   FLAG_SET_DEFAULT(ShenandoahAllowMixedAllocs,       false);
 
-  SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahRefProcFrequency, 1);
-
-  // Adjust class unloading settings only if globally enabled.
-  if (ClassUnloadingWithConcurrentMark) {
-    SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahUnloadClassesFrequency, 1);
-  }
-
   SHENANDOAH_ERGO_ENABLE_FLAG(ExplicitGCInvokesConcurrent);
   SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahImplicitGCInvokesConcurrent);
 
@@ -124,7 +117,7 @@
   // The significant complication is that liveness data was collected at the previous cycle, and only
   // for those regions that were allocated before previous cycle started.
 
-  size_t capacity    = heap->capacity();
+  size_t capacity    = heap->max_capacity();
   size_t actual_free = heap->free_set()->available();
   size_t free_target = ShenandoahMinFreeThreshold * capacity / 100;
   size_t min_garbage = free_target > actual_free ? (free_target - actual_free) : 0;
@@ -213,12 +206,12 @@
   ShenandoahHeap* heap = ShenandoahHeap::heap();
   assert(!heap->has_forwarded_objects(), "no forwarded objects here");
 
-  size_t capacity = heap->capacity();
+  size_t capacity = heap->max_capacity();
   size_t available = heap->free_set()->available();
 
   // Check if we are falling below the worst limit, time to trigger the GC, regardless of
   // anything else.
-  size_t min_threshold = ShenandoahMinFreeThreshold * heap->capacity() / 100;
+  size_t min_threshold = ShenandoahMinFreeThreshold * heap->max_capacity() / 100;
   if (available < min_threshold) {
     log_info(gc)("Trigger: Free (" SIZE_FORMAT "M) is below minimum threshold (" SIZE_FORMAT "M)",
                  available / M, min_threshold / M);
@@ -228,7 +221,7 @@
   // Check if are need to learn a bit about the application
   const size_t max_learn = ShenandoahLearningSteps;
   if (_gc_times_learned < max_learn) {
-    size_t init_threshold = ShenandoahInitFreeThreshold * heap->capacity() / 100;
+    size_t init_threshold = ShenandoahInitFreeThreshold * heap->max_capacity() / 100;
     if (available < init_threshold) {
       log_info(gc)("Trigger: Learning " SIZE_FORMAT " of " SIZE_FORMAT ". Free (" SIZE_FORMAT "M) is below initial threshold (" SIZE_FORMAT "M)",
                    _gc_times_learned + 1, max_learn, available / M, init_threshold / M);
--- a/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -139,6 +139,11 @@
     FLAG_SET_DEFAULT(ShenandoahUncommit, false);
   }
 
+  if ((InitialHeapSize == MaxHeapSize) && ShenandoahUncommit) {
+    log_info(gc)("Min heap equals to max heap, disabling ShenandoahUncommit");
+    FLAG_SET_DEFAULT(ShenandoahUncommit, false);
+  }
+
   // If class unloading is disabled, no unloading for concurrent cycles as well.
   // If class unloading is enabled, users should opt-in for unloading during
   // concurrent cycles.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahClosures.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2019, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_HPP
+#define SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_HPP
+
+#include "memory/iterator.hpp"
+
+class ShenandoahHeap;
+class ShenandoahMarkingContext;
+class Thread;
+
+class ShenandoahForwardedIsAliveClosure: public BoolObjectClosure {
+private:
+  ShenandoahMarkingContext* const _mark_context;
+public:
+  inline ShenandoahForwardedIsAliveClosure();
+  inline bool do_object_b(oop obj);
+};
+
+class ShenandoahIsAliveClosure: public BoolObjectClosure {
+private:
+  ShenandoahMarkingContext* const _mark_context;
+public:
+  inline ShenandoahIsAliveClosure();
+  inline bool do_object_b(oop obj);
+};
+
+class ShenandoahIsAliveSelector : public StackObj {
+private:
+  ShenandoahIsAliveClosure _alive_cl;
+  ShenandoahForwardedIsAliveClosure _fwd_alive_cl;
+public:
+  inline BoolObjectClosure* is_alive_closure();
+};
+
+class ShenandoahUpdateRefsClosure: public OopClosure {
+private:
+  ShenandoahHeap* _heap;
+public:
+  inline ShenandoahUpdateRefsClosure();
+  inline void do_oop(oop* p);
+  inline void do_oop(narrowOop* p);
+private:
+  template <class T>
+  inline void do_oop_work(T* p);
+};
+
+class ShenandoahEvacuateUpdateRootsClosure: public BasicOopIterateClosure {
+private:
+  ShenandoahHeap* _heap;
+  Thread* _thread;
+public:
+  inline ShenandoahEvacuateUpdateRootsClosure();
+  inline void do_oop(oop* p);
+  inline void do_oop(narrowOop* p);
+
+private:
+  template <class T>
+  inline void do_oop_work(T* p);
+};
+
+#endif // SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2019, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_INLINE_HPP
+#define SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_INLINE_HPP
+
+#include "gc/shenandoah/shenandoahAsserts.hpp"
+#include "gc/shenandoah/shenandoahClosures.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
+#include "runtime/thread.hpp"
+
+ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
+  _mark_context(ShenandoahHeap::heap()->marking_context()) {
+}
+
+bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
+  if (CompressedOops::is_null(obj)) {
+    return false;
+  }
+  obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
+  shenandoah_assert_not_forwarded_if(NULL, obj,
+                                     (ShenandoahHeap::heap()->is_concurrent_mark_in_progress() ||
+                                     ShenandoahHeap::heap()->is_concurrent_traversal_in_progress()));
+  return _mark_context->is_marked(obj);
+}
+
+ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() :
+  _mark_context(ShenandoahHeap::heap()->marking_context()) {
+}
+
+bool ShenandoahIsAliveClosure::do_object_b(oop obj) {
+  if (CompressedOops::is_null(obj)) {
+    return false;
+  }
+  shenandoah_assert_not_forwarded(NULL, obj);
+  return _mark_context->is_marked(obj);
+}
+
+BoolObjectClosure* ShenandoahIsAliveSelector::is_alive_closure() {
+  return ShenandoahHeap::heap()->has_forwarded_objects() ?
+         reinterpret_cast<BoolObjectClosure*>(&_fwd_alive_cl) :
+         reinterpret_cast<BoolObjectClosure*>(&_alive_cl);
+}
+
+ShenandoahUpdateRefsClosure::ShenandoahUpdateRefsClosure() :
+  _heap(ShenandoahHeap::heap()) {
+}
+
+template <class T>
+void ShenandoahUpdateRefsClosure::do_oop_work(T* p) {
+  T o = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(o)) {
+    oop obj = CompressedOops::decode_not_null(o);
+    _heap->update_with_forwarded_not_null(p, obj);
+  }
+}
+
+void ShenandoahUpdateRefsClosure::do_oop(oop* p)       { do_oop_work(p); }
+void ShenandoahUpdateRefsClosure::do_oop(narrowOop* p) { do_oop_work(p); }
+
+ShenandoahEvacuateUpdateRootsClosure::ShenandoahEvacuateUpdateRootsClosure() :
+  _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
+}
+
+template <class T>
+void ShenandoahEvacuateUpdateRootsClosure::do_oop_work(T* p) {
+  assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
+
+  T o = RawAccess<>::oop_load(p);
+  if (! CompressedOops::is_null(o)) {
+    oop obj = CompressedOops::decode_not_null(o);
+    if (_heap->in_collection_set(obj)) {
+      shenandoah_assert_marked(p, obj);
+      oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
+      if (oopDesc::equals_raw(resolved, obj)) {
+        resolved = _heap->evacuate_object(obj, _thread);
+      }
+      RawAccess<IS_NOT_NULL>::oop_store(p, resolved);
+    }
+  }
+}
+void ShenandoahEvacuateUpdateRootsClosure::do_oop(oop* p) {
+  do_oop_work(p);
+}
+
+void ShenandoahEvacuateUpdateRootsClosure::do_oop(narrowOop* p) {
+  do_oop_work(p);
+}
+
+#endif // SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_HPP
--- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -33,6 +33,7 @@
 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
 
 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
+#include "gc/shenandoah/shenandoahClosures.inline.hpp"
 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
--- a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -462,9 +462,11 @@
 void ShenandoahControlThread::service_uncommit(double shrink_before) {
   ShenandoahHeap* heap = ShenandoahHeap::heap();
 
-  // Scan through the heap and determine if there is work to do. This avoids taking
-  // heap lock if there is no work available, avoids spamming logs with superfluous
-  // logging messages, and minimises the amount of work while locks are taken.
+  // Determine if there is work to do. This avoids taking heap lock if there is
+  // no work available, avoids spamming logs with superfluous logging messages,
+  // and minimises the amount of work while locks are taken.
+
+  if (heap->committed() <= heap->min_capacity()) return;
 
   bool has_work = false;
   for (size_t i = 0; i < heap->num_regions(); i++) {
--- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -430,7 +430,7 @@
   }
 
   // Evac reserve: reserve trailing space for evacuations
-  size_t to_reserve = ShenandoahEvacReserve * _heap->capacity() / 100;
+  size_t to_reserve = ShenandoahEvacReserve * _heap->max_capacity() / 100;
   size_t reserved = 0;
 
   for (size_t idx = _heap->num_regions() - 1; idx > 0; idx--) {
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -33,6 +33,7 @@
 #include "gc/shenandoah/shenandoahAllocTracker.hpp"
 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
 #include "gc/shenandoah/shenandoahBrooksPointer.hpp"
+#include "gc/shenandoah/shenandoahClosures.inline.hpp"
 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
@@ -70,8 +71,6 @@
 #include "runtime/vmThread.hpp"
 #include "services/mallocTracker.hpp"
 
-ShenandoahUpdateRefsClosure::ShenandoahUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {}
-
 #ifdef ASSERT
 template <class T>
 void ShenandoahAssertToSpaceClosure::do_oop_work(T* p) {
@@ -141,6 +140,7 @@
   //
 
   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
+  size_t min_byte_size  = collector_policy()->min_heap_byte_size();
   size_t max_byte_size  = collector_policy()->max_heap_byte_size();
   size_t heap_alignment = collector_policy()->heap_alignment();
 
@@ -159,8 +159,13 @@
   size_t num_committed_regions = init_byte_size / reg_size_bytes;
   num_committed_regions = MIN2(num_committed_regions, _num_regions);
   assert(num_committed_regions <= _num_regions, "sanity");
-
   _initial_size = num_committed_regions * reg_size_bytes;
+
+  size_t num_min_regions = min_byte_size / reg_size_bytes;
+  num_min_regions = MIN2(num_min_regions, _num_regions);
+  assert(num_min_regions <= _num_regions, "sanity");
+  _minimum_size = num_min_regions * reg_size_bytes;
+
   _committed = _initial_size;
 
   size_t heap_page_size   = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
@@ -351,8 +356,11 @@
 
   _control_thread = new ShenandoahControlThread();
 
-  log_info(gc, init)("Initialize Shenandoah heap with initial size " SIZE_FORMAT "%s",
-                     byte_size_in_proper_unit(_initial_size), proper_unit_for_byte_size(_initial_size));
+  log_info(gc, init)("Initialize Shenandoah heap: " SIZE_FORMAT "%s initial, " SIZE_FORMAT "%s min, " SIZE_FORMAT "%s max",
+                     byte_size_in_proper_unit(_initial_size),  proper_unit_for_byte_size(_initial_size),
+                     byte_size_in_proper_unit(_minimum_size),  proper_unit_for_byte_size(_minimum_size),
+                     byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity())
+  );
 
   log_info(gc, init)("Safepointing mechanism: %s",
                      SafepointMechanism::uses_thread_local_poll() ? "thread-local poll" :
@@ -450,8 +458,8 @@
 
   _max_workers = MAX2(_max_workers, 1U);
   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
-                            /* are_GC_task_threads */true,
-                            /* are_ConcurrentGC_threads */false);
+                            /* are_GC_task_threads */ true,
+                            /* are_ConcurrentGC_threads */ true);
   if (_workers == NULL) {
     vm_exit_during_initialization("Failed necessary allocation.");
   } else {
@@ -461,7 +469,8 @@
   if (ShenandoahParallelSafepointThreads > 1) {
     _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread",
                                                 ShenandoahParallelSafepointThreads,
-                                                false, false);
+                      /* are_GC_task_threads */ false,
+                 /* are_ConcurrentGC_threads */ false);
     _safepoint_workers->initialize_workers();
   }
 }
@@ -502,7 +511,7 @@
 void ShenandoahHeap::print_on(outputStream* st) const {
   st->print_cr("Shenandoah Heap");
   st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used",
-               capacity() / K, committed() / K, used() / K);
+               max_capacity() / K, committed() / K, used() / K);
   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions",
                num_regions(), ShenandoahHeapRegion::region_size_bytes() / K);
 
@@ -615,13 +624,17 @@
 }
 
 size_t ShenandoahHeap::capacity() const {
-  return num_regions() * ShenandoahHeapRegion::region_size_bytes();
+  return committed();
 }
 
 size_t ShenandoahHeap::max_capacity() const {
   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 }
 
+size_t ShenandoahHeap::min_capacity() const {
+  return _minimum_size;
+}
+
 size_t ShenandoahHeap::initial_capacity() const {
   return _initial_size;
 }
@@ -635,12 +648,22 @@
 void ShenandoahHeap::op_uncommit(double shrink_before) {
   assert (ShenandoahUncommit, "should be enabled");
 
+  // Application allocates from the beginning of the heap, and GC allocates at
+  // the end of it. It is more efficient to uncommit from the end, so that applications
+  // could enjoy the near committed regions. GC allocations are much less frequent,
+  // and therefore can accept the committing costs.
+
   size_t count = 0;
-  for (size_t i = 0; i < num_regions(); i++) {
-    ShenandoahHeapRegion* r = get_region(i);
+  for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
+    ShenandoahHeapRegion* r = get_region(i - 1);
     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
       ShenandoahHeapLocker locker(lock());
       if (r->is_empty_committed()) {
+        // Do not uncommit below minimal capacity
+        if (committed() < min_capacity() + ShenandoahHeapRegion::region_size_bytes()) {
+          break;
+        }
+
         r->make_uncommitted();
         count++;
       }
@@ -649,8 +672,6 @@
   }
 
   if (count > 0) {
-    log_info(gc)("Uncommitted " SIZE_FORMAT "M. Heap: " SIZE_FORMAT "M reserved, " SIZE_FORMAT "M committed, " SIZE_FORMAT "M used",
-                 count * ShenandoahHeapRegion::region_size_bytes() / M, capacity() / M, committed() / M, used() / M);
     control_thread()->notify_heap_changed();
   }
 }
@@ -918,43 +939,6 @@
   return CollectedHeap::min_dummy_object_size() + ShenandoahBrooksPointer::word_size();
 }
 
-class ShenandoahEvacuateUpdateRootsClosure: public BasicOopIterateClosure {
-private:
-  ShenandoahHeap* _heap;
-  Thread* _thread;
-public:
-  ShenandoahEvacuateUpdateRootsClosure() :
-    _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
-  }
-
-private:
-  template <class T>
-  void do_oop_work(T* p) {
-    assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
-
-    T o = RawAccess<>::oop_load(p);
-    if (! CompressedOops::is_null(o)) {
-      oop obj = CompressedOops::decode_not_null(o);
-      if (_heap->in_collection_set(obj)) {
-        shenandoah_assert_marked(p, obj);
-        oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
-        if (oopDesc::equals_raw(resolved, obj)) {
-          resolved = _heap->evacuate_object(obj, _thread);
-        }
-        RawAccess<IS_NOT_NULL>::oop_store(p, resolved);
-      }
-    }
-  }
-
-public:
-  void do_oop(oop* p) {
-    do_oop_work(p);
-  }
-  void do_oop(narrowOop* p) {
-    do_oop_work(p);
-  }
-};
-
 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
 private:
   ShenandoahHeap* const _heap;
@@ -1204,7 +1188,9 @@
 
 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
   workers()->threads_do(tcl);
-  _safepoint_workers->threads_do(tcl);
+  if (_safepoint_workers != NULL) {
+    _safepoint_workers->threads_do(tcl);
+  }
   if (ShenandoahStringDedup::is_enabled()) {
     ShenandoahStringDedup::threads_do(tcl);
   }
@@ -1545,6 +1531,10 @@
       if (ShenandoahPacing) {
         pacer()->setup_for_evac();
       }
+
+      if (ShenandoahVerify) {
+        verifier()->verify_during_evacuation();
+      }
     } else {
       if (ShenandoahVerify) {
         verifier()->verify_after_concmark();
@@ -1856,31 +1846,6 @@
   return result;
 }
 
-ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
-  _mark_context(ShenandoahHeap::heap()->marking_context()) {
-}
-
-ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() :
-  _mark_context(ShenandoahHeap::heap()->marking_context()) {
-}
-
-bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
-  if (CompressedOops::is_null(obj)) {
-    return false;
-  }
-  obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
-  shenandoah_assert_not_forwarded_if(NULL, obj, ShenandoahHeap::heap()->is_concurrent_mark_in_progress() || ShenandoahHeap::heap()->is_concurrent_traversal_in_progress());
-  return _mark_context->is_marked(obj);
-}
-
-bool ShenandoahIsAliveClosure::do_object_b(oop obj) {
-  if (CompressedOops::is_null(obj)) {
-    return false;
-  }
-  shenandoah_assert_not_forwarded(NULL, obj);
-  return _mark_context->is_marked(obj);
-}
-
 void ShenandoahHeap::ref_processing_init() {
   assert(_max_workers > 0, "Sanity");
 
@@ -2851,8 +2816,3 @@
 ptrdiff_t ShenandoahHeap::cell_header_size() const {
   return ShenandoahBrooksPointer::byte_size();
 }
-
-BoolObjectClosure* ShenandoahIsAliveSelector::is_alive_closure() {
-  return ShenandoahHeap::heap()->has_forwarded_objects() ? reinterpret_cast<BoolObjectClosure*>(&_fwd_alive_cl)
-                                                         : reinterpret_cast<BoolObjectClosure*>(&_alive_cl);
-}
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -91,19 +91,6 @@
   virtual bool is_thread_safe() { return false; }
 };
 
-class ShenandoahUpdateRefsClosure: public OopClosure {
-private:
-  ShenandoahHeap* _heap;
-
-  template <class T>
-  inline void do_oop_work(T* p);
-
-public:
-  ShenandoahUpdateRefsClosure();
-  inline void do_oop(oop* p);
-  inline void do_oop(narrowOop* p);
-};
-
 #ifdef ASSERT
 class ShenandoahAssertToSpaceClosure : public OopClosure {
 private:
@@ -115,34 +102,6 @@
 };
 #endif
 
-class ShenandoahAlwaysTrueClosure : public BoolObjectClosure {
-public:
-  bool do_object_b(oop p) { return true; }
-};
-
-class ShenandoahForwardedIsAliveClosure: public BoolObjectClosure {
-private:
-  ShenandoahMarkingContext* const _mark_context;
-public:
-  ShenandoahForwardedIsAliveClosure();
-  bool do_object_b(oop obj);
-};
-
-class ShenandoahIsAliveClosure: public BoolObjectClosure {
-private:
-  ShenandoahMarkingContext* const _mark_context;
-public:
-  ShenandoahIsAliveClosure();
-  bool do_object_b(oop obj);
-};
-
-class ShenandoahIsAliveSelector : public StackObj {
-private:
-  ShenandoahIsAliveClosure _alive_cl;
-  ShenandoahForwardedIsAliveClosure _fwd_alive_cl;
-public:
-  BoolObjectClosure* is_alive_closure();
-};
 
 // Shenandoah GC is low-pause concurrent GC that uses Brooks forwarding pointers
 // to encode forwarding data. See BrooksPointer for details on forwarding data encoding.
@@ -198,6 +157,7 @@
 //
 private:
            size_t _initial_size;
+           size_t _minimum_size;
   DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t));
   volatile size_t _used;
   volatile size_t _committed;
@@ -216,6 +176,7 @@
   size_t bytes_allocated_since_gc_start();
   void reset_bytes_allocated_since_gc_start();
 
+  size_t min_capacity()     const;
   size_t max_capacity()     const;
   size_t initial_capacity() const;
   size_t capacity()         const;
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -46,17 +46,6 @@
 #include "utilities/copy.hpp"
 #include "utilities/globalDefinitions.hpp"
 
-template <class T>
-void ShenandoahUpdateRefsClosure::do_oop_work(T* p) {
-  T o = RawAccess<>::oop_load(p);
-  if (!CompressedOops::is_null(o)) {
-    oop obj = CompressedOops::decode_not_null(o);
-    _heap->update_with_forwarded_not_null(p, obj);
-  }
-}
-
-void ShenandoahUpdateRefsClosure::do_oop(oop* p)       { do_oop_work(p); }
-void ShenandoahUpdateRefsClosure::do_oop(narrowOop* p) { do_oop_work(p); }
 
 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
   size_t new_index = Atomic::add((size_t) 1, &_index);
--- a/src/hotspot/share/gc/shenandoah/shenandoahMonitoringSupport.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMonitoringSupport.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -46,12 +46,12 @@
   ShenandoahHeap* _heap;
 public:
   ShenandoahGenerationCounters(ShenandoahHeap* heap) :
-          GenerationCounters("Heap", 1, 1, heap->initial_capacity(), heap->max_capacity(), heap->committed()),
+          GenerationCounters("Heap", 1, 1, heap->initial_capacity(), heap->max_capacity(), heap->capacity()),
           _heap(heap)
   {};
 
   virtual void update_all() {
-    _current_size->set_value(_heap->committed());
+    _current_size->set_value(_heap->capacity());
   }
 };
 
@@ -94,7 +94,7 @@
   if (UsePerfData) {
     ShenandoahHeap* heap = ShenandoahHeap::heap();
     size_t used = heap->used();
-    size_t capacity = heap->capacity();
+    size_t capacity = heap->max_capacity();
     _heap_counters->update_all();
     _space_counters->update_all(capacity, used);
     _heap_region_counters->update();
--- a/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -153,7 +153,7 @@
 void ShenandoahPacer::setup_for_idle() {
   assert(ShenandoahPacing, "Only be here when pacing is enabled");
 
-  size_t initial = _heap->capacity() * ShenandoahPacingIdleSlack / 100;
+  size_t initial = _heap->max_capacity() * ShenandoahPacingIdleSlack / 100;
   double tax = 1;
 
   restart_with(initial, tax);
@@ -166,7 +166,7 @@
   if (_progress == -1) {
     // First initialization, report some prior
     Atomic::store((intptr_t)PACING_PROGRESS_ZERO, &_progress);
-    return (size_t) (_heap->capacity() * 0.1);
+    return (size_t) (_heap->max_capacity() * 0.1);
   } else {
     // Record history, and reply historical data
     _progress_history->add(_progress);
--- a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -51,6 +51,7 @@
   f(scan_jfr_weak_roots,                            "    S: JFR Weak Roots")            \
   f(scan_jni_weak_roots,                            "    S: JNI Weak Roots")            \
   f(scan_stringtable_roots,                         "    S: String Table Roots")        \
+  f(scan_resolved_method_table_roots,               "    S: Resolved Table Roots")      \
   f(scan_vm_weak_roots,                             "    S: VM Weak Roots")             \
   f(scan_synchronizer_roots,                        "    S: Synchronizer Roots")        \
   f(scan_management_roots,                          "    S: Management Roots")          \
@@ -76,6 +77,7 @@
   f(update_jfr_weak_roots,                          "    U: JFR Weak Roots")            \
   f(update_jni_weak_roots,                          "    U: JNI Weak Roots")            \
   f(update_stringtable_roots,                       "    U: String Table Roots")        \
+  f(update_resolved_method_table_roots,             "    U: Resolved Table Roots")      \
   f(update_vm_weak_roots,                           "    U: VM Weak Roots")             \
   f(update_synchronizer_roots,                      "    U: Synchronizer Roots")        \
   f(update_management_roots,                        "    U: Management Roots")          \
@@ -109,6 +111,7 @@
   f(evac_jfr_weak_roots,                            "    E: JFR Weak Roots")            \
   f(evac_jni_weak_roots,                            "    E: JNI Weak Roots")            \
   f(evac_stringtable_roots,                         "    E: String Table Roots")        \
+  f(evac_resolved_method_table_roots,               "    E: Resolved Table Roots")      \
   f(evac_vm_weak_roots,                             "    E: VM Weak Roots")             \
   f(evac_synchronizer_roots,                        "    E: Synchronizer Roots")        \
   f(evac_management_roots,                          "    E: Management Roots")          \
@@ -139,6 +142,7 @@
   f(final_update_jfr_weak_roots,                     "    UR: JFR Weak Roots")          \
   f(final_update_jni_weak_roots,                     "    UR: JNI Weak Roots")          \
   f(final_update_stringtable_roots,                  "    UR: String Table Roots")      \
+  f(final_update_resolved_method_table_roots,        "    UR: Resolved Table Roots")    \
   f(final_update_vm_weak_roots,                      "    UR: VM Weak Roots")           \
   f(final_update_refs_synchronizer_roots,            "    UR: Synchronizer Roots")      \
   f(final_update_refs_management_roots,              "    UR: Management Roots")        \
@@ -164,6 +168,7 @@
   f(degen_gc_update_jfr_weak_roots,                  "    DU: JFR Weak Roots")          \
   f(degen_gc_update_jni_weak_roots,                  "    DU: JNI Weak Roots")          \
   f(degen_gc_update_stringtable_roots,               "    DU: String Table Roots")      \
+  f(degen_gc_update_resolved_method_table_roots,     "    DU: Resolved Table Roots")    \
   f(degen_gc_update_vm_weak_roots,                   "    DU: VM Weak Roots")           \
   f(degen_gc_update_synchronizer_roots,              "    DU: Synchronizer Roots")      \
   f(degen_gc_update_management_roots,                "    DU: Management Roots")        \
@@ -190,6 +195,7 @@
   f(init_traversal_gc_jfr_weak_roots,                "    TI: JFR Weak Roots")          \
   f(init_traversal_gc_jni_weak_roots,                "    TI: JNI Weak Roots")          \
   f(init_traversal_gc_stringtable_roots,             "    TI: String Table Roots")      \
+  f(init_traversal_gc_resolved_method_table_roots,   "    TI: Resolved Table Roots")    \
   f(init_traversal_gc_vm_weak_roots,                 "    TI: VM Weak Roots")           \
   f(init_traversal_gc_synchronizer_roots,            "    TI: Synchronizer Roots")      \
   f(init_traversal_gc_management_roots,              "    TI: Management Roots")        \
@@ -213,6 +219,7 @@
   f(final_traversal_gc_jfr_weak_roots,               "    TF: JFR Weak Roots")          \
   f(final_traversal_gc_jni_weak_roots,               "    TF: JNI Weak Roots")          \
   f(final_traversal_gc_stringtable_roots,            "    TF: String Table Roots")      \
+  f(final_traversal_gc_resolved_method_table_roots,  "    TF: Resolved Table Roots")    \
   f(final_traversal_gc_vm_weak_roots,                "    TF: VM Weak Roots")           \
   f(final_traversal_gc_synchronizer_roots,           "    TF: Synchronizer Roots")      \
   f(final_traversal_gc_management_roots,             "    TF: Management Roots")        \
@@ -225,24 +232,25 @@
   f(final_traversal_gc_termination,                  "    TF:   Termination")           \
                                                                                         \
   /* Per-thread timer block, should have "roots" counters in consistent order */        \
-  f(final_traversal_update_roots,                    "  Update Roots")                  \
-  f(final_traversal_update_thread_roots,             "    TU: Thread Roots")            \
-  f(final_traversal_update_code_roots,               "    TU: Code Cache Roots")        \
-  f(final_traversal_update_universe_roots,           "    TU: Universe Roots")          \
-  f(final_traversal_update_jni_roots,                "    TU: JNI Roots")               \
-  f(final_traversal_update_jvmti_weak_roots,         "    TU: JVMTI Weak Roots")        \
-  f(final_traversal_update_jfr_weak_roots,           "    TU: JFR Weak Roots")          \
-  f(final_traversal_update_jni_weak_roots,           "    TU: JNI Weak Roots")          \
-  f(final_traversal_update_stringtable_roots,        "    TU: String Table Roots")      \
-  f(final_traversal_update_vm_weak_roots,            "    TU: VM Weak Roots")           \
-  f(final_traversal_update_synchronizer_roots,       "    TU: Synchronizer Roots")      \
-  f(final_traversal_update_management_roots,         "    TU: Management Roots")        \
-  f(final_traversal_update_system_dict_roots,        "    TU: System Dict Roots")       \
-  f(final_traversal_update_cldg_roots,               "    TU: CLDG Roots")              \
-  f(final_traversal_update_jvmti_roots,              "    TU: JVMTI Roots")             \
-  f(final_traversal_update_string_dedup_table_roots, "    TU: Dedup Table Roots")       \
-  f(final_traversal_update_string_dedup_queue_roots, "    TU: Dedup Queue Roots")       \
-  f(final_traversal_update_finish_queues,            "    TU: Finish Queues")           \
+  f(final_traversal_update_roots,                       "  Update Roots")               \
+  f(final_traversal_update_thread_roots,                "    TU: Thread Roots")         \
+  f(final_traversal_update_code_roots,                  "    TU: Code Cache Roots")     \
+  f(final_traversal_update_universe_roots,              "    TU: Universe Roots")       \
+  f(final_traversal_update_jni_roots,                   "    TU: JNI Roots")            \
+  f(final_traversal_update_jvmti_weak_roots,            "    TU: JVMTI Weak Roots")     \
+  f(final_traversal_update_jfr_weak_roots,              "    TU: JFR Weak Roots")       \
+  f(final_traversal_update_jni_weak_roots,              "    TU: JNI Weak Roots")       \
+  f(final_traversal_update_stringtable_roots,           "    TU: String Table Roots")   \
+  f(final_traversal_update_resolved_method_table_roots, "    TU: Resolved Table Roots") \
+  f(final_traversal_update_vm_weak_roots,               "    TU: VM Weak Roots")        \
+  f(final_traversal_update_synchronizer_roots,          "    TU: Synchronizer Roots")   \
+  f(final_traversal_update_management_roots,            "    TU: Management Roots")     \
+  f(final_traversal_update_system_dict_roots,           "    TU: System Dict Roots")    \
+  f(final_traversal_update_cldg_roots,                  "    TU: CLDG Roots")           \
+  f(final_traversal_update_jvmti_roots,                 "    TU: JVMTI Roots")          \
+  f(final_traversal_update_string_dedup_table_roots,    "    TU: Dedup Table Roots")    \
+  f(final_traversal_update_string_dedup_queue_roots,    "    TU: Dedup Queue Roots")    \
+  f(final_traversal_update_finish_queues,               "    TU: Finish Queues")        \
                                                                                         \
   f(traversal_gc_cleanup,                            "  Cleanup")                       \
                                                                                         \
@@ -261,6 +269,7 @@
   f(full_gc_jfr_weak_roots,                          "    F: JFR Weak Roots")           \
   f(full_gc_jni_weak_roots,                          "    F: JNI Weak Roots")           \
   f(full_gc_stringtable_roots,                       "    F: String Table Roots")       \
+  f(full_gc_resolved_method_table_roots,             "    F: Resolved Table Roots")     \
   f(full_gc_vm_weak_roots,                           "    F: VM Weak Roots")            \
   f(full_gc_synchronizer_roots,                      "    F: Synchronizer Roots")       \
   f(full_gc_management_roots,                        "    F: Management Roots")         \
@@ -311,23 +320,24 @@
   // end
 
 #define SHENANDOAH_GC_PAR_PHASE_DO(f)                           \
-  f(ThreadRoots,             "Thread Roots (ms):")              \
-  f(CodeCacheRoots,          "CodeCache Roots (ms):")           \
-  f(UniverseRoots,           "Universe Roots (ms):")            \
-  f(JNIRoots,                "JNI Handles Roots (ms):")         \
-  f(JVMTIWeakRoots,          "JVMTI Weak Roots (ms):")          \
-  f(JFRWeakRoots,            "JFR Weak Roots (ms):")            \
-  f(JNIWeakRoots,            "JNI Weak Roots (ms):")            \
-  f(StringTableRoots,        "StringTable Roots(ms):")          \
-  f(VMWeakRoots,             "VM Weak Roots(ms)")               \
-  f(ObjectSynchronizerRoots, "ObjectSynchronizer Roots (ms):")  \
-  f(ManagementRoots,         "Management Roots (ms):")          \
-  f(SystemDictionaryRoots,   "SystemDictionary Roots (ms):")    \
-  f(CLDGRoots,               "CLDG Roots (ms):")                \
-  f(JVMTIRoots,              "JVMTI Roots (ms):")               \
-  f(StringDedupTableRoots,   "String Dedup Table Roots (ms):")  \
-  f(StringDedupQueueRoots,   "String Dedup Queue Roots (ms):")  \
-  f(FinishQueues,            "Finish Queues (ms):")             \
+  f(ThreadRoots,              "Thread Roots (ms):")              \
+  f(CodeCacheRoots,           "CodeCache Roots (ms):")           \
+  f(UniverseRoots,            "Universe Roots (ms):")            \
+  f(JNIRoots,                 "JNI Handles Roots (ms):")         \
+  f(JVMTIWeakRoots,           "JVMTI Weak Roots (ms):")          \
+  f(JFRWeakRoots,             "JFR Weak Roots (ms):")            \
+  f(JNIWeakRoots,             "JNI Weak Roots (ms):")            \
+  f(StringTableRoots,         "StringTable Roots(ms):")          \
+  f(ResolvedMethodTableRoots, "Resolved Table Roots(ms):")       \
+  f(VMWeakRoots,              "VM Weak Roots(ms)")               \
+  f(ObjectSynchronizerRoots,  "ObjectSynchronizer Roots (ms):")  \
+  f(ManagementRoots,          "Management Roots (ms):")          \
+  f(SystemDictionaryRoots,    "SystemDictionary Roots (ms):")    \
+  f(CLDGRoots,                "CLDG Roots (ms):")                \
+  f(JVMTIRoots,               "JVMTI Roots (ms):")               \
+  f(StringDedupTableRoots,    "String Dedup Table Roots (ms):")  \
+  f(StringDedupQueueRoots,    "String Dedup Queue Roots (ms):")  \
+  f(FinishQueues,             "Finish Queues (ms):")             \
   // end
 
 class ShenandoahPhaseTimings : public CHeapObj<mtGC> {
--- a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -27,6 +27,7 @@
 #include "classfile/stringTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "code/codeCache.hpp"
+#include "gc/shenandoah/shenandoahClosures.inline.hpp"
 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
 #include "gc/shenandoah/shenandoahHeap.hpp"
 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
@@ -48,14 +49,15 @@
 
 static const struct PhaseMap phase_mapping[] = {
 #if INCLUDE_JVMTI
-  {WeakProcessorPhases::jvmti,       ShenandoahPhaseTimings::JVMTIWeakRoots},
+  {WeakProcessorPhases::jvmti,                 ShenandoahPhaseTimings::JVMTIWeakRoots},
 #endif
 #if INCLUDE_JFR
-  {WeakProcessorPhases::jfr,         ShenandoahPhaseTimings::JFRWeakRoots},
+  {WeakProcessorPhases::jfr,                   ShenandoahPhaseTimings::JFRWeakRoots},
 #endif
-  {WeakProcessorPhases::jni,         ShenandoahPhaseTimings::JNIWeakRoots},
-  {WeakProcessorPhases::stringtable, ShenandoahPhaseTimings::StringTableRoots},
-  {WeakProcessorPhases::vm,          ShenandoahPhaseTimings::VMWeakRoots}
+  {WeakProcessorPhases::jni,                   ShenandoahPhaseTimings::JNIWeakRoots},
+  {WeakProcessorPhases::stringtable,           ShenandoahPhaseTimings::StringTableRoots},
+  {WeakProcessorPhases::resolved_method_table, ShenandoahPhaseTimings::ResolvedMethodTableRoots},
+  {WeakProcessorPhases::vm,                    ShenandoahPhaseTimings::VMWeakRoots}
 };
 
 STATIC_ASSERT(sizeof(phase_mapping) / sizeof(PhaseMap) == WeakProcessorPhases::phase_count);
@@ -64,7 +66,6 @@
                                                  ShenandoahPhaseTimings::Phase phase) :
   _process_strong_tasks(new SubTasksDone(SHENANDOAH_RP_PS_NumElements)),
   _srs(n_workers),
-  _par_state_string(StringTable::weak_storage()),
   _phase(phase),
   _coderoots_all_iterator(ShenandoahCodeRoots::iterator()),
   _weak_processor_timings(n_workers),
@@ -242,10 +243,7 @@
   _evacuation_tasks(new SubTasksDone(SHENANDOAH_EVAC_NumElements)),
   _srs(n_workers),
   _phase(phase),
-  _coderoots_cset_iterator(ShenandoahCodeRoots::cset_iterator()),
-  _par_state_string(StringTable::weak_storage())
-
-{
+  _coderoots_cset_iterator(ShenandoahCodeRoots::cset_iterator()) {
   heap->phase_timings()->record_workers_start(_phase);
   if (ShenandoahStringDedup::is_enabled()) {
     StringDedup::gc_prologue(false);
--- a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -56,7 +56,6 @@
 class ShenandoahRootProcessor : public StackObj {
   SubTasksDone* _process_strong_tasks;
   StrongRootsScope _srs;
-  OopStorage::ParState<false, false> _par_state_string;
   ShenandoahPhaseTimings::Phase _phase;
   ParallelCLDRootIterator _cld_iterator;
   ShenandoahAllCodeRootsIterator _coderoots_all_iterator;
@@ -120,7 +119,6 @@
   StrongRootsScope _srs;
   ShenandoahPhaseTimings::Phase _phase;
   ShenandoahCsetCodeRootsIterator _coderoots_cset_iterator;
-  OopStorage::ParState<false, false> _par_state_string;
 
   enum Shenandoah_evacuate_roots_tasks {
     SHENANDOAH_EVAC_Universe_oops_do,
--- a/src/hotspot/share/gc/shenandoah/shenandoahStringDedup.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/shenandoahStringDedup.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -92,7 +92,7 @@
 void ShenandoahStringDedup::oops_do_slow(OopClosure* cl) {
   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
   assert(is_enabled(), "String deduplication not enabled");
-  ShenandoahAlwaysTrueClosure always_true;
+  AlwaysTrueClosure always_true;
   StringDedupUnlinkOrOopsDoClosure sd_cl(&always_true, cl);
   StringDedupQueue::unlink_or_oops_do(&sd_cl);
   StringDedupTable::unlink_or_oops_do(&sd_cl, 0);
--- a/src/hotspot/share/gc/shenandoah/shenandoahTimingTracker.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/shenandoahTimingTracker.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -76,8 +76,11 @@
          phase == ShenandoahPhaseTimings::full_gc_weakrefs_termination,
          "Only these phases");
 
-  assert(Thread::current()->is_VM_thread() || Thread::current()->is_ConcurrentGC_thread(),
-    "Called from wrong thread");
+  assert(!Thread::current()->is_Worker_thread() &&
+             (Thread::current()->is_VM_thread() ||
+              Thread::current()->is_ConcurrentGC_thread()),
+        "Called from wrong thread");
+
   _current_termination_phase = phase;
   ShenandoahHeap::heap()->phase_timings()->termination_times()->reset();
 }
--- a/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -30,6 +30,7 @@
 #include "gc/shared/workgroup.hpp"
 #include "gc/shared/weakProcessor.inline.hpp"
 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
+#include "gc/shenandoah/shenandoahClosures.inline.hpp"
 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
@@ -196,6 +197,10 @@
       } else {
         _rp->process_all_roots(&roots_cl, &cld_cl, &code_cl, NULL, worker_id);
       }
+      if (ShenandoahStringDedup::is_enabled()) {
+        AlwaysTrueClosure is_alive;
+        ShenandoahStringDedup::parallel_oops_do(&is_alive, &roots_cl, worker_id);
+      }
     }
   }
 };
@@ -595,11 +600,10 @@
   }
 
   if (!_heap->cancelled_gc()) {
+    fixup_roots();
     if (_heap->unload_classes()) {
       _heap->unload_classes_and_cleanup_tables(false);
     }
-
-    fixup_roots();
   }
 
   if (!_heap->cancelled_gc()) {
@@ -769,29 +773,6 @@
   void do_oop(oop* p)       { do_oop_work(p); }
 };
 
-class ShenandoahTraversalWeakUpdateClosure : public OopClosure {
-private:
-  template <class T>
-  inline void do_oop_work(T* p) {
-    // Cannot call maybe_update_with_forwarded, because on traversal-degen
-    // path the collection set is already dropped. Instead, do the unguarded store.
-    // TODO: This can be fixed after degen-traversal stops dropping cset.
-    T o = RawAccess<>::oop_load(p);
-    if (!CompressedOops::is_null(o)) {
-      oop obj = CompressedOops::decode_not_null(o);
-      obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
-      shenandoah_assert_marked(p, obj);
-      RawAccess<IS_NOT_NULL>::oop_store(p, obj);
-    }
-  }
-
-public:
-  ShenandoahTraversalWeakUpdateClosure() {}
-
-  void do_oop(narrowOop* p) { do_oop_work(p); }
-  void do_oop(oop* p)       { do_oop_work(p); }
-};
-
 class ShenandoahTraversalKeepAliveUpdateDegenClosure : public OopClosure {
 private:
   ShenandoahObjToScanQueue* _queue;
@@ -1104,16 +1085,6 @@
                                       &pt);
   }
 
-  {
-    ShenandoahGCPhase phase(phase_process);
-    ShenandoahTerminationTracker termination(ShenandoahPhaseTimings::weakrefs_termination);
-
-    // Process leftover weak oops (using parallel version)
-    ShenandoahTraversalWeakUpdateClosure cl;
-    WeakProcessor::weak_oops_do(workers, &is_alive, &cl, 1);
-
-    pt.print_all_references();
-
-    assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty");
-  }
+  pt.print_all_references();
+  assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty");
 }
--- a/src/hotspot/share/gc/shenandoah/shenandoahUtils.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/shenandoahUtils.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -100,9 +100,10 @@
 
 ShenandoahGCPhase::ShenandoahGCPhase(const ShenandoahPhaseTimings::Phase phase) :
   _heap(ShenandoahHeap::heap()), _phase(phase) {
-  assert(Thread::current()->is_VM_thread() ||
-         Thread::current()->is_ConcurrentGC_thread(),
-        "Must be set by these threads");
+   assert(!Thread::current()->is_Worker_thread() &&
+              (Thread::current()->is_VM_thread() ||
+               Thread::current()->is_ConcurrentGC_thread()),
+          "Must be set by these threads");
   _parent_phase = _current_phase;
   _current_phase = phase;
 
--- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -628,6 +628,10 @@
         enabled = true;
         expected = ShenandoahHeap::HAS_FORWARDED;
         break;
+      case _verify_gcstate_evacuation:
+        enabled = true;
+        expected = ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::EVACUATION;
+        break;
       case _verify_gcstate_stable:
         enabled = true;
         expected = ShenandoahHeap::STABLE;
@@ -808,6 +812,18 @@
   );
 }
 
+void ShenandoahVerifier::verify_during_evacuation() {
+  verify_at_safepoint(
+          "During Evacuation",
+          _verify_forwarded_allow,   // some forwarded references are allowed
+          _verify_marked_disable,    // walk only roots
+          _verify_cset_disable,      // some cset references are not forwarded yet
+          _verify_liveness_disable,  // liveness data might be already stale after pre-evacs
+          _verify_regions_disable,   // trash regions not yet recycled
+          _verify_gcstate_evacuation // evacuation is in progress
+  );
+}
+
 void ShenandoahVerifier::verify_after_evacuation() {
   verify_at_safepoint(
           "After Evacuation",
--- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -135,6 +135,9 @@
 
     // Nothing is in progress, some objects are forwarded
     _verify_gcstate_forwarded,
+
+    // Evacuation is in progress, some objects are forwarded
+    _verify_gcstate_evacuation,
   } VerifyGCState;
 
   struct VerifyOptions {
@@ -173,6 +176,7 @@
   void verify_before_concmark();
   void verify_after_concmark();
   void verify_before_evacuation();
+  void verify_during_evacuation();
   void verify_after_evacuation();
   void verify_before_updaterefs();
   void verify_after_updaterefs();
--- a/src/hotspot/share/gc/z/zBarrier.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/z/zBarrier.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -123,7 +123,7 @@
 }
 
 void ZBarrier::load_barrier_on_oop_fields(oop o) {
-  assert(ZOop::is_good(o), "Should be good");
+  assert(ZAddress::is_good(ZOop::to_address(o)), "Should be good");
   ZLoadBarrierOopClosure cl;
   o->oop_iterate(&cl);
 }
--- a/src/hotspot/share/gc/z/zBarrier.inline.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/z/zBarrier.inline.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -37,7 +37,7 @@
 retry:
   // Fast path
   if (fast_path(addr)) {
-    return ZOop::to_oop(addr);
+    return ZOop::from_address(addr);
   }
 
   // Slow path
@@ -56,7 +56,7 @@
     }
   }
 
-  return ZOop::to_oop(good_addr);
+  return ZOop::from_address(good_addr);
 }
 
 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
@@ -67,7 +67,7 @@
   if (fast_path(addr)) {
     // Return the good address instead of the weak good address
     // to ensure that the currently active heap view is used.
-    return ZOop::to_oop(ZAddress::good_or_null(addr));
+    return ZOop::from_address(ZAddress::good_or_null(addr));
   }
 
   // Slow path
@@ -95,7 +95,7 @@
     }
   }
 
-  return ZOop::to_oop(good_addr);
+  return ZOop::from_address(good_addr);
 }
 
 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
@@ -117,7 +117,7 @@
   // to heal the same root if it is aligned, since they would always heal
   // the root in the same way and it does not matter in which order it
   // happens. For misaligned oops, there needs to be mutual exclusion.
-  *p = ZOop::to_oop(good_addr);
+  *p = ZOop::from_address(good_addr);
 }
 
 inline bool ZBarrier::is_null_fast_path(uintptr_t addr) {
--- a/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -55,11 +55,7 @@
 
   // Heal oops and disarm
   ZNMethodOopClosure cl;
-  nm->oops_do(&cl);
-  nm->fix_oop_relocations();
-
-  OrderAccess::release();
-
+  ZNMethod::nmethod_oops_do(nm, &cl);
   disarm(nm);
 
   return true;
--- a/src/hotspot/share/gc/z/zForwarding.cpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/z/zForwarding.cpp	Tue Apr 30 11:07:58 2019 +0200
@@ -69,6 +69,11 @@
     // Check for duplicates
     for (ZForwardingCursor j = i + 1; j < _entries.length(); j++) {
       const ZForwardingEntry other = at(&j);
+      if (!other.populated()) {
+        // Skip empty entries
+        continue;
+      }
+
       guarantee(entry.from_index() != other.from_index(), "Duplicate from");
       guarantee(entry.to_offset() != other.to_offset(), "Duplicate to");
     }
--- a/src/hotspot/share/gc/z/zHeap.inline.hpp	Mon Apr 08 13:41:48 2019 +0200
+++ b/src/hotspot/share/gc/z/zHeap.inline.hpp	Tue Apr 30 11:07:58 2019 +0200
@@ -135,7 +135,7 @@
 }
 
 inline bool ZHeap::is_oop(oop object) const {
-  return ZOop::is_good(object);