changeset 54184:1a12516c63e4 datum

Automatic merge with default
author mcimadamore
date Thu, 03 Jan 2019 21:25:54 +0100
parents 182ed0d0c692 5f942c387778
children 2e113032e1f2 d446deea67ba
files make/autoconf/spec.gmk.in src/hotspot/share/classfile/vmSymbols.hpp src/hotspot/share/include/jvm.h src/hotspot/share/prims/jvm.cpp src/hotspot/share/runtime/arguments_ext.hpp src/hotspot/share/services/diagnosticCommand_ext.hpp src/jdk.compiler/share/classes/com/sun/tools/javac/code/Source.java src/jdk.compiler/share/classes/com/sun/tools/javac/code/Types.java src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Check.java src/utils/LogCompilation/src/test/resources/hotspot_pid23756.log src/utils/LogCompilation/src/test/resources/hotspot_pid25109.log src/utils/LogCompilation/src/test/resources/no_tiered_short.log src/utils/LogCompilation/src/test/resources/tiered_short.log test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorEventsForTwoThreadsTest.java test/jdk/java/net/MulticastSocket/PromiscuousIPv6.java test/jdk/java/nio/channels/DatagramChannel/PromiscuousIPv6.java test/langtools/jdk/javadoc/doclet/lib/JavadocTester.java
diffstat 630 files changed, 14590 insertions(+), 122076 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Wed Dec 19 12:33:25 2018 -0500
+++ b/.hgtags	Thu Jan 03 21:25:54 2019 +0100
@@ -527,3 +527,7 @@
 732bec44c89e8b93a38296bf690f97b7230c5b6d jdk-12+22
 eef755718cb24813031a842bbfc716a6cea18e9a jdk-12+23
 cc4098b3bc10d1c390384289025fea7b0d4b9e93 jdk-13+0
+7d4397b43fa305806160785a4c7210600d59581a jdk-12+24
+11033c4ada542f9c9a873314b6ecf60af19e8256 jdk-13+1
+7496df94b3b79f3da53925d2d137317715f11d97 jdk-12+25
+50677f43ac3df9a8684222b8893543c60f3aa0bd jdk-13+2
--- a/doc/building.html	Wed Dec 19 12:33:25 2018 -0500
+++ b/doc/building.html	Thu Jan 03 21:25:54 2019 +0100
@@ -145,7 +145,7 @@
 <li><p>Do not check out the source code in a path which contains spaces. Chances are the build will not work. This is most likely to be an issue on Windows systems.</p></li>
 <li><p>Do not check out the source code in a path which has a very long name or is nested many levels deep. Chances are you will hit an OS limitation during the build.</p></li>
 <li><p>Put the source code on a local disk, not a network share. If possible, use an SSD. The build process is very disk intensive, and having slow disk access will significantly increase build times. If you need to use a network share for the source code, see below for suggestions on how to keep the build artifacts on a local disk.</p></li>
-<li><p>On Windows, extra care must be taken to make sure the <a href="#cygwin">Cygwin</a> environment is consistent. It is recommended that you follow this procedure:</p>
+<li><p>On Windows, if using <a href="#cygwin">Cygwin</a>, extra care must be taken to make sure the environment is consistent. It is recommended that you follow this procedure:</p>
 <ul>
 <li><p>Create the directory that is going to contain the top directory of the JDK clone by using the <code>mkdir</code> command in the Cygwin bash shell. That is, do <em>not</em> create it using Windows Explorer. This will ensure that it will have proper Cygwin attributes, and that it's children will inherit those attributes.</p></li>
 <li><p>Do not put the JDK clone in a path under your Cygwin home directory. This is especially important if your user name contains spaces and/or mixed upper and lower case letters.</p></li>
@@ -201,12 +201,12 @@
 <h3 id="windows">Windows</h3>
 <p>Windows XP is not a supported platform, but all newer Windows should be able to build the JDK.</p>
 <p>On Windows, it is important that you pay attention to the instructions in the <a href="#special-considerations">Special Considerations</a>.</p>
-<p>Windows is the only non-POSIX OS supported by the JDK, and as such, requires some extra care. A POSIX support layer is required to build on Windows. Currently, the only supported such layer is Cygwin. (Msys is no longer supported due to a too old bash; msys2 and the new Windows Subsystem for Linux (WSL) would likely be possible to support in a future version but that would require effort to implement.)</p>
+<p>Windows is the only non-POSIX OS supported by the JDK, and as such, requires some extra care. A POSIX support layer is required to build on Windows. Currently, the only supported such layers are Cygwin and Windows Subsystem for Linux (WSL). (Msys is no longer supported due to a too old bash; msys2 would likely be possible to support in a future version but that would require effort to implement.)</p>
 <p>Internally in the build system, all paths are represented as Unix-style paths, e.g. <code>/cygdrive/c/hg/jdk9/Makefile</code> rather than <code>C:\hg\jdk9\Makefile</code>. This rule also applies to input to the build system, e.g. in arguments to <code>configure</code>. So, use <code>--with-msvcr-dll=/cygdrive/c/msvcr100.dll</code> rather than <code>--with-msvcr-dll=c:\msvcr100.dll</code>. For details on this conversion, see the section on <a href="#fixpath">Fixpath</a>.</p>
 <h4 id="cygwin">Cygwin</h4>
-<p>A functioning <a href="http://www.cygwin.com/">Cygwin</a> environment is thus required for building the JDK on Windows. If you have a 64-bit OS, we strongly recommend using the 64-bit version of Cygwin.</p>
+<p>A functioning <a href="http://www.cygwin.com/">Cygwin</a> environment is required for building the JDK on Windows. If you have a 64-bit OS, we strongly recommend using the 64-bit version of Cygwin.</p>
 <p><strong>Note:</strong> Cygwin has a model of continuously updating all packages without any easy way to install or revert to a specific version of a package. This means that whenever you add or update a package in Cygwin, you might (inadvertently) update tools that are used by the JDK build process, and that can cause unexpected build problems.</p>
-<p>The JDK requires GNU Make 4.0 or greater on Windows. This is usually not a problem, since Cygwin currently only distributes GNU Make at a version above 4.0.</p>
+<p>The JDK requires GNU Make 4.0 or greater in Cygwin. This is usually not a problem, since Cygwin currently only distributes GNU Make at a version above 4.0.</p>
 <p>Apart from the basic Cygwin installation, the following packages must also be installed:</p>
 <ul>
 <li><code>autoconf</code></li>
@@ -217,6 +217,11 @@
 <p>Often, you can install these packages using the following command line:</p>
 <pre><code>&lt;path to Cygwin setup&gt;/setup-x86_64 -q -P autoconf -P make -P unzip -P zip</code></pre>
 <p>Unfortunately, Cygwin can be unreliable in certain circumstances. If you experience build tool crashes or strange issues when building on Windows, please check the Cygwin FAQ on the <a href="https://cygwin.com/faq/faq.html#faq.using.bloda">&quot;BLODA&quot; list</a> and the section on <a href="https://cygwin.com/faq/faq.html#faq.using.fixing-fork-failures">fork() failures</a>.</p>
+<h4 id="windows-subsystem-for-linux-wsl">Windows Subsystem for Linux (WSL)</h4>
+<p>Windows 10 1809 or newer is supported due to a dependency on the wslpath utility and support for environment variable sharing through WSLENV. Version 1803 can work but intermittent build failures have been observed.</p>
+<p>It's possible to build both Windows and Linux binaries from WSL. To build Windows binaries, you must use a Windows boot JDK (located in a Windows-accessible directory). To build Linux binaries, you must use a Linux boot JDK. The default behavior is to build for Windows. To build for Linux, pass <code>--build=x86_64-unknown-linux-gnu --host=x86_64-unknown-linux-gnu</code> to <code>configure</code>.</p>
+<p>If building Windows binaries, the source code must be located in a Windows- accessible directory. This is because Windows executables (such as Visual Studio and the boot JDK) must be able to access the source code. Also, the drive where the source is stored must be mounted as case-insensitive by changing either /etc/fstab or /etc/wsl.conf in WSL. Individual directories may be corrected using the fsutil tool in case the source was cloned before changing the mount options.</p>
+<p>Note that while it's possible to build on WSL, testing is still not fully supported.</p>
 <h3 id="solaris">Solaris</h3>
 <p>See <code>make/devkit/solaris11.1-package-list.txt</code> for a list of recommended packages to install when building on Solaris. The versions specified in this list is the versions used by the daily builds at Oracle, and is likely to work properly.</p>
 <p>Older versions of Solaris shipped a broken version of <code>objcopy</code>. At least version 2.21.1 is needed, which is provided by Solaris 11 Update 1. Objcopy is needed if you want to have external debug symbols. Please make sure you are using at least version 2.21.1 of objcopy, or that you disable external debug symbols.</p>
--- a/doc/building.md	Wed Dec 19 12:33:25 2018 -0500
+++ b/doc/building.md	Thu Jan 03 21:25:54 2019 +0100
@@ -75,8 +75,8 @@
     network share for the source code, see below for suggestions on how to keep
     the build artifacts on a local disk.
 
-  * On Windows, extra care must be taken to make sure the [Cygwin](#cygwin)
-    environment is consistent. It is recommended that you follow this
+  * On Windows, if using [Cygwin](#cygwin), extra care must be taken to make sure
+    the environment is consistent. It is recommended that you follow this
     procedure:
 
       * Create the directory that is going to contain the top directory of the
@@ -174,10 +174,10 @@
 
 Windows is the only non-POSIX OS supported by the JDK, and as such, requires
 some extra care. A POSIX support layer is required to build on Windows.
-Currently, the only supported such layer is Cygwin. (Msys is no longer
-supported due to a too old bash; msys2 and the new Windows Subsystem for Linux
-(WSL) would likely be possible to support in a future version but that would
-require effort to implement.)
+Currently, the only supported such layers are Cygwin and Windows Subsystem for
+Linux (WSL). (Msys is no longer supported due to a too old bash; msys2 would
+likely be possible to support in a future version but that would require effort
+to implement.)
 
 Internally in the build system, all paths are represented as Unix-style paths,
 e.g. `/cygdrive/c/hg/jdk9/Makefile` rather than `C:\hg\jdk9\Makefile`. This
@@ -188,7 +188,7 @@
 
 #### Cygwin
 
-A functioning [Cygwin](http://www.cygwin.com/) environment is thus required for
+A functioning [Cygwin](http://www.cygwin.com/) environment is required for
 building the JDK on Windows. If you have a 64-bit OS, we strongly recommend
 using the 64-bit version of Cygwin.
 
@@ -198,7 +198,7 @@
 update tools that are used by the JDK build process, and that can cause
 unexpected build problems.
 
-The JDK requires GNU Make 4.0 or greater on Windows. This is usually not a
+The JDK requires GNU Make 4.0 or greater in Cygwin. This is usually not a
 problem, since Cygwin currently only distributes GNU Make at a version above
 4.0.
 
@@ -221,6 +221,30 @@
 https://cygwin.com/faq/faq.html#faq.using.bloda) and the section on [fork()
 failures](https://cygwin.com/faq/faq.html#faq.using.fixing-fork-failures).
 
+#### Windows Subsystem for Linux (WSL)
+
+Windows 10 1809 or newer is supported due to a dependency on the wslpath utility
+and support for environment variable sharing through WSLENV. Version 1803 can
+work but intermittent build failures have been observed.
+
+It's possible to build both Windows and Linux binaries from WSL. To build
+Windows binaries, you must use a Windows boot JDK (located in a
+Windows-accessible directory). To build Linux binaries, you must use a Linux
+boot JDK. The default behavior is to build for Windows. To build for Linux, pass
+`--build=x86_64-unknown-linux-gnu --host=x86_64-unknown-linux-gnu` to
+`configure`.
+
+If building Windows binaries, the source code must be located in a Windows-
+accessible directory. This is because Windows executables (such as Visual Studio
+and the boot JDK) must be able to access the source code. Also, the drive where
+the source is stored must be mounted as case-insensitive by changing either
+/etc/fstab or /etc/wsl.conf in WSL. Individual directories may be corrected
+using the fsutil tool in case the source was cloned before changing the mount
+options.
+
+Note that while it's possible to build on WSL, testing is still not fully
+supported.
+
 ### Solaris
 
 See `make/devkit/solaris11.1-package-list.txt` for a list of recommended
--- a/make/Docs.gmk	Wed Dec 19 12:33:25 2018 -0500
+++ b/make/Docs.gmk	Thu Jan 03 21:25:54 2019 +0100
@@ -517,7 +517,7 @@
   ) \
 )
 
-ifneq ($(PANDOC), )
+ifeq ($(ENABLE_PANDOC), true)
   # For all markdown files in $module/share/specs directories, convert them to
   # html, if we have pandoc (otherwise we'll just skip this).
 
--- a/make/Images.gmk	Wed Dec 19 12:33:25 2018 -0500
+++ b/make/Images.gmk	Thu Jan 03 21:25:54 2019 +0100
@@ -99,7 +99,8 @@
 	)
         ifeq ($(BUILD_CDS_ARCHIVE), true)
 	  $(call LogWarn, Creating CDS archive for jdk image)
-	  $(JDK_IMAGE_DIR)/bin/java -Xshare:dump -Xmx128M -Xms128M $(LOG_INFO)
+	  $(FIXPATH) $(JDK_IMAGE_DIR)/bin/java \
+	      -Xshare:dump -Xmx128M -Xms128M $(LOG_INFO)
         endif
 	$(TOUCH) $@
 
@@ -114,7 +115,8 @@
 	)
         ifeq ($(BUILD_CDS_ARCHIVE), true)
 	  $(call LogWarn, Creating CDS archive for jre image)
-	  $(JRE_IMAGE_DIR)/bin/java -Xshare:dump -Xmx128M -Xms128M $(LOG_INFO)
+	  $(FIXPATH) $(JRE_IMAGE_DIR)/bin/java \
+	      -Xshare:dump -Xmx128M -Xms128M $(LOG_INFO)
         endif
 	$(TOUCH) $@
 
--- a/make/autoconf/basics.m4	Wed Dec 19 12:33:25 2018 -0500
+++ b/make/autoconf/basics.m4	Thu Jan 03 21:25:54 2019 +0100
@@ -223,6 +223,8 @@
       BASIC_FIXUP_PATH_CYGWIN($1)
     elif test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.msys"; then
       BASIC_FIXUP_PATH_MSYS($1)
+    elif test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.wsl"; then
+      BASIC_FIXUP_PATH_WSL($1)
     else
       # We're on a unix platform. Hooray! :)
       path="[$]$1"
@@ -270,6 +272,8 @@
       BASIC_FIXUP_EXECUTABLE_CYGWIN($1)
     elif test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.msys"; then
       BASIC_FIXUP_EXECUTABLE_MSYS($1)
+    elif test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.wsl"; then
+      BASIC_FIXUP_EXECUTABLE_WSL($1)
     else
       # We're on a unix platform. Hooray! :)
       # First separate the path from the arguments. This will split at the first
@@ -607,10 +611,21 @@
 
   # These are not required on all platforms
   BASIC_PATH_PROGS(CYGPATH, cygpath)
+  BASIC_PATH_PROGS(WSLPATH, wslpath)
   BASIC_PATH_PROGS(DF, df)
   BASIC_PATH_PROGS(CPIO, [cpio bsdcpio])
   BASIC_PATH_PROGS(NICE, nice)
+
   BASIC_PATH_PROGS(PANDOC, pandoc)
+  if test -n "$PANDOC"; then
+    ENABLE_PANDOC="true"
+  else
+    ENABLE_PANDOC="false"
+  fi
+  AC_SUBST(ENABLE_PANDOC)
+
+  BASIC_PATH_PROGS(LSB_RELEASE, lsb_release)
+  BASIC_PATH_PROGS(CMD, [cmd.exe /mnt/c/Windows/System32/cmd.exe])
 ])
 
 ###############################################################################
@@ -631,11 +646,14 @@
 
   if test "x$OPENJDK_TARGET_OS" = "xwindows"; then
     PATH_SEP=";"
+    EXE_SUFFIX=".exe"
     BASIC_CHECK_PATHS_WINDOWS
   else
     PATH_SEP=":"
+    EXE_SUFFIX=""
   fi
   AC_SUBST(PATH_SEP)
+  AC_SUBST(EXE_SUFFIX)
 
   # We get the top-level directory from the supporting wrappers.
   AC_MSG_CHECKING([for top-level directory])
@@ -980,6 +998,8 @@
             MAKE_EXPECTED_ENV='cygwin'
           elif test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.msys"; then
             MAKE_EXPECTED_ENV='msys'
+          elif test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.wsl"; then
+            MAKE_EXPECTED_ENV='x86_64-pc-linux-gnu'
           else
             AC_MSG_ERROR([Unknown Windows environment])
           fi
@@ -1267,7 +1287,18 @@
     if $DF $DF_LOCAL_ONLY_OPTION $1 > /dev/null 2>&1; then
       $2
     else
-      $3
+      # In WSL, local Windows drives are considered remote by df, but we are
+      # required to build into a directory accessible from windows, so consider
+      # them local here.
+      if test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.wsl"; then
+        if $DF $1 | $GREP -q "^[[A-Z]]:"; then
+          $2
+        else
+          $3
+        fi
+      else
+        $3
+      fi
     fi
   fi
 ])
--- a/make/autoconf/basics_windows.m4	Wed Dec 19 12:33:25 2018 -0500
+++ b/make/autoconf/basics_windows.m4	Thu Jan 03 21:25:54 2019 +0100
@@ -32,6 +32,13 @@
   elif test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.msys"; then
     unix_path=`$ECHO "$windows_path" | $SED -e 's,^\\(.\\):,/\\1,g' -e 's,\\\\,/,g'`
     $1="$unix_path"
+  elif test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.wsl"; then
+    # wslpath does not check the input, only call if an actual windows path was
+    # given.
+    if $ECHO "$windows_path" | $GREP -q ["^[a-zA-Z]:[\\\\/]"]; then
+      unix_path=`$WSLPATH -u "$windows_path"`
+      $1="$unix_path"
+    fi
   fi
 ])
 
@@ -44,6 +51,9 @@
   elif test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.msys"; then
     windows_path=`cmd //c echo $unix_path`
     $1="$windows_path"
+  elif test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.wsl"; then
+    windows_path=`$WSLPATH -m "$unix_path"`
+    $1="$windows_path"
   fi
 ])
 
@@ -100,6 +110,31 @@
   fi
 ])
 
+# Helper function which possibly converts a path using DOS-style short mode.
+# If so, the updated path is stored in $new_path.
+# $1: The path to check
+AC_DEFUN([BASIC_MAKE_WINDOWS_SPACE_SAFE_WSL],
+[
+  input_path="$1"
+  # Check if we need to convert this using DOS-style short mode. If the path
+  # contains just simple characters, use it. Otherwise (spaces, weird characters),
+  # take no chances and rewrite it.
+  # Note: m4 eats our [], so we need to use @<:@ and @:>@ instead.
+  has_forbidden_chars=`$ECHO "$input_path" | $GREP [[^-_/:a-zA-Z0-9\\.]]`
+  if test "x$has_forbidden_chars" != x; then
+    # Now convert it to mixed DOS-style, short mode (no spaces, and / instead of \)
+    TOPDIR_windows="$TOPDIR"
+    BASIC_WINDOWS_REWRITE_AS_WINDOWS_MIXED_PATH([TOPDIR_windows])
+    # First convert to Windows path to make input valid for cmd
+    BASIC_WINDOWS_REWRITE_AS_WINDOWS_MIXED_PATH([input_path])
+    new_path=`$CMD /c $TOPDIR_windows/make/scripts/windowsShortName.bat "$input_path" \
+        | $SED -e 's|\r||g' \
+        | $TR \\\\\\\\ / | $TR 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+    # Rewrite back to unix style
+    BASIC_WINDOWS_REWRITE_AS_UNIX_PATH([new_path])
+  fi
+])
+
 # FIXME: The BASIC_FIXUP_*_CYGWIN/MSYS is most likely too convoluted
 # and could probably be heavily simplified. However, all changes in this
 # area tend to need lot of testing in different scenarios, and in lack of
@@ -157,6 +192,23 @@
   all_fixpath_prefixes=("${all_fixpath_prefixes@<:@@@:>@}" "${new_path:0:10}")
 ])
 
+AC_DEFUN([BASIC_FIXUP_PATH_WSL],
+[
+  # Input might be given as Windows format, start by converting to
+  # unix format.
+  new_path="[$]$1"
+  BASIC_WINDOWS_REWRITE_AS_UNIX_PATH([new_path])
+
+  # Call helper function which possibly converts this using DOS-style short mode.
+  # If so, the updated path is stored in $new_path.
+  BASIC_MAKE_WINDOWS_SPACE_SAFE_WSL([$new_path])
+
+  if test "x$path" != "x$new_path"; then
+    $1="$new_path"
+    AC_MSG_NOTICE([Rewriting $1 to "$new_path"])
+  fi
+])
+
 AC_DEFUN([BASIC_FIXUP_EXECUTABLE_CYGWIN],
 [
   # First separate the path from the arguments. This will split at the first
@@ -305,6 +357,79 @@
   fi
 ])
 
+AC_DEFUN([BASIC_FIXUP_EXECUTABLE_WSL],
+[
+  # First separate the path from the arguments. This will split at the first
+  # space.
+  complete="[$]$1"
+  path="${complete%% *}"
+  tmp="$complete EOL"
+  arguments="${tmp#* }"
+
+  # Input might be given as Windows format, start by converting to
+  # unix format.
+  new_path="$path"
+  BASIC_WINDOWS_REWRITE_AS_UNIX_PATH([new_path])
+
+  # Now try to locate executable using which
+  new_path_bak="$new_path"
+  new_path=`$WHICH "$new_path" 2> /dev/null`
+  # bat and cmd files are not considered executable in WSL
+  if test "x$new_path" = x \
+      && test "x`$ECHO \"$path\" | $GREP -i -e \"\\.bat$\" -e \"\\.cmd$\"`" != x \
+      && test "x`$LS \"$path\" 2>/dev/null`" != x; then
+    new_path="$new_path_back"
+  fi
+  if test "x$new_path" = x; then
+    # Oops. Which didn't find the executable.
+    # The splitting of arguments from the executable at a space might have been incorrect,
+    # since paths with space are more likely in Windows. Give it another try with the whole
+    # argument.
+    path="$complete"
+    arguments="EOL"
+    new_path="$path"
+    BASIC_WINDOWS_REWRITE_AS_UNIX_PATH([new_path])
+    new_path_bak="$new_path"
+    new_path=`$WHICH "$new_path" 2> /dev/null`
+    # bat and cmd files are not considered executable in WSL
+    if test "x$new_path" = x \
+        && test "x`$ECHO \"$path\" | $GREP -i -e \"\\.bat$\" -e \"\\.cmd$\"`" != x \
+        && test "x`$LS \"$path\" 2>/dev/null`" != x; then
+      new_path="$new_path_bak"
+    fi
+    if test "x$new_path" = x; then
+      # It's still not found. Now this is an unrecoverable error.
+      AC_MSG_NOTICE([The path of $1, which resolves as "$complete", is not found.])
+      has_space=`$ECHO "$complete" | $GREP " "`
+      if test "x$has_space" != x; then
+        AC_MSG_NOTICE([You might be mixing spaces in the path and extra arguments, which is not allowed.])
+      fi
+      AC_MSG_ERROR([Cannot locate the the path of $1])
+    fi
+  fi
+
+  # In WSL, suffixes must be present for Windows executables
+  if test ! -f "$new_path"; then
+    # Try adding .exe or .cmd
+    if test -f "${new_path}.exe"; then
+      input_to_shortpath="${new_path}.exe"
+    elif test -f "${new_path}.cmd"; then
+      input_to_shortpath="${new_path}.cmd"
+    else
+      AC_MSG_NOTICE([The path of $1, which resolves as "$new_path", is invalid.])
+      AC_MSG_NOTICE([Neither "$new_path" nor "$new_path.exe/cmd" can be found])
+      AC_MSG_ERROR([Cannot locate the the path of $1])
+    fi
+  else
+    input_to_shortpath="$new_path"
+  fi
+
+  # Call helper function which possibly converts this using DOS-style short mode.
+  # If so, the updated path is stored in $new_path.
+  new_path="$input_to_shortpath"
+  BASIC_MAKE_WINDOWS_SPACE_SAFE_WSL([$input_to_shortpath])
+])
+
 # Setup basic configuration paths, and platform-specific stuff related to PATHs.
 AC_DEFUN([BASIC_CHECK_PATHS_WINDOWS],
 [
@@ -353,8 +478,28 @@
     BASIC_WINDOWS_REWRITE_AS_UNIX_PATH(MSYS_ROOT_PATH)
     AC_MSG_RESULT([$MSYS_ROOT_PATH])
     WINDOWS_ENV_ROOT_PATH="$MSYS_ROOT_PATH"
+  elif test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.wsl"; then
+    AC_MSG_CHECKING([Windows version])
+    # m4 replaces [ and ] so we use @<:@ and @:>@ instead
+    WINDOWS_VERSION=`$CMD /c ver.exe | $EGREP -o '(@<:@0-9@:>@+\.)+@<:@0-9@:>@+'`
+    AC_MSG_RESULT([$WINDOWS_VERSION])
+
+    AC_MSG_CHECKING([WSL kernel version])
+    WSL_KERNEL_VERSION=`$UNAME -v`
+    AC_MSG_RESULT([$WSL_KERNEL_VERSION])
+
+    AC_MSG_CHECKING([WSL kernel release])
+    WSL_KERNEL_RELEASE=`$UNAME -r`
+    AC_MSG_RESULT([$WSL_KERNEL_RELEASE])
+
+    AC_MSG_CHECKING([WSL distribution])
+    WSL_DISTRIBUTION=`$LSB_RELEASE -d | sed 's/Description:\t//'`
+    AC_MSG_RESULT([$WSL_DISTRIBUTION])
+
+    WINDOWS_ENV_VENDOR='WSL'
+    WINDOWS_ENV_VERSION="$WSL_DISTRIBUTION $WSL_KERNEL_VERSION $WSL_KERNEL_RELEASE (on Windows build $WINDOWS_VERSION)"
   else
-    AC_MSG_ERROR([Unknown Windows environment. Neither cygwin nor msys was detected.])
+    AC_MSG_ERROR([Unknown Windows environment. Neither cygwin, msys, nor wsl was detected.])
   fi
 
   # Test if windows or unix (cygwin/msys) find is first in path.
@@ -395,6 +540,8 @@
           | tr ' ' '\n' | $GREP '^/./' | $SORT | $UNIQ`
       fixpath_argument_list=`echo $all_unique_prefixes  | tr ' ' '@'`
       FIXPATH="$FIXPATH_BIN -m$fixpath_argument_list"
+    elif test "x$OPENJDK_BUILD_OS_ENV" = xwindows.wsl; then
+      FIXPATH="$FIXPATH_BIN -w"
     fi
     FIXPATH_SRC_W="$FIXPATH_SRC"
     FIXPATH_BIN_W="$FIXPATH_BIN"
@@ -412,6 +559,17 @@
       AC_MSG_ERROR([Could not create $FIXPATH_BIN])
     fi
     AC_MSG_RESULT([yes])
+
+    if test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.wsl"; then
+      OLD_WSLENV="$WSLENV"
+      WSLENV=`$ECHO $WSLENV | $SED 's/PATH\/l://'`
+      BASIC_APPEND_TO_PATH(WSLENV, "FIXPATH_PATH")
+      export WSLENV
+      export FIXPATH_PATH=$VS_PATH_WINDOWS
+      AC_MSG_NOTICE([FIXPATH_PATH is $FIXPATH_PATH])
+      AC_MSG_NOTICE([Rewriting WSLENV from $OLD_WSLENV to $WSLENV])
+    fi
+
     AC_MSG_CHECKING([if fixpath.exe works])
     cd $FIXPATH_DIR
     $FIXPATH $CC $FIXPATH_SRC -Fe$FIXPATH_DIR/fixpath2.exe \
--- a/make/autoconf/boot-jdk.m4	Wed Dec 19 12:33:25 2018 -0500
+++ b/make/autoconf/boot-jdk.m4	Thu Jan 03 21:25:54 2019 +0100
@@ -63,18 +63,18 @@
     # If previous step claimed to have found a JDK, check it to see if it seems to be valid.
     if test "x$BOOT_JDK_FOUND" = xmaybe; then
       # Do we have a bin/java?
-      if test ! -x "$BOOT_JDK/bin/java"; then
+      if test ! -x "$BOOT_JDK/bin/java$EXE_SUFFIX"; then
         AC_MSG_NOTICE([Potential Boot JDK found at $BOOT_JDK did not contain bin/java; ignoring])
         BOOT_JDK_FOUND=no
       else
         # Do we have a bin/javac?
-        if test ! -x "$BOOT_JDK/bin/javac"; then
+        if test ! -x "$BOOT_JDK/bin/javac$EXE_SUFFIX"; then
           AC_MSG_NOTICE([Potential Boot JDK found at $BOOT_JDK did not contain bin/javac; ignoring])
           AC_MSG_NOTICE([(This might be an JRE instead of an JDK)])
           BOOT_JDK_FOUND=no
         else
           # Oh, this is looking good! We probably have found a proper JDK. Is it the correct version?
-          BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" $USER_BOOT_JDK_OPTIONS -version 2>&1 | $HEAD -n 1`
+          BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java$EXE_SUFFIX" $USER_BOOT_JDK_OPTIONS -version 2>&1 | $HEAD -n 1`
           if [ [[ "$BOOT_JDK_VERSION" =~ "Picked up" ]] ]; then
             AC_MSG_NOTICE([You have _JAVA_OPTIONS or JAVA_TOOL_OPTIONS set. This can mess up the build. Please use --with-boot-jdk-jvmargs instead.])
             AC_MSG_NOTICE([Java reports: "$BOOT_JDK_VERSION".])
@@ -101,7 +101,7 @@
             AC_MSG_CHECKING([for Boot JDK])
             AC_MSG_RESULT([$BOOT_JDK])
             AC_MSG_CHECKING([Boot JDK version])
-            BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" $USER_BOOT_JDK_OPTIONS -version 2>&1 | $TR '\n\r' '  '`
+            BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java$EXE_SUFFIX" $USER_BOOT_JDK_OPTIONS -version 2>&1 | $TR '\n\r' '  '`
             AC_MSG_RESULT([$BOOT_JDK_VERSION])
           fi # end check jdk version
         fi # end check javac
@@ -335,11 +335,11 @@
   AC_SUBST(BOOT_JDK)
 
   # Setup tools from the Boot JDK.
-  BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JAVA, java)
-  BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JAVAC, javac)
-  BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JAVADOC, javadoc)
-  BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JAR, jar)
-  BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JARSIGNER, jarsigner)
+  BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JAVA, java$EXE_SUFFIX)
+  BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JAVAC, javac$EXE_SUFFIX)
+  BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JAVADOC, javadoc$EXE_SUFFIX)
+  BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JAR, jar$EXE_SUFFIX)
+  BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JARSIGNER, jarsigner$EXE_SUFFIX)
 
   # Finally, set some other options...
 
--- a/make/autoconf/build-aux/config.guess	Wed Dec 19 12:33:25 2018 -0500
+++ b/make/autoconf/build-aux/config.guess	Thu Jan 03 21:25:54 2019 +0100
@@ -60,6 +60,15 @@
   esac
 fi
 
+# Test and fix wsl
+echo $OUT | grep x86_64-unknown-linux-gnu > /dev/null 2> /dev/null
+if test $? = 0; then
+  uname -r | grep Microsoft > /dev/null 2> /dev/null
+  if test $? = 0; then
+    OUT="x86_64-pc-wsl"
+  fi
+fi
+
 # Test and fix architecture string on AIX
 # On AIX 'config.guess' returns 'powerpc' as architecture but 'powerpc' is
 # implicitely handled as 32-bit architecture in 'platform.m4' so we check
--- a/make/autoconf/build-aux/config.sub	Wed Dec 19 12:33:25 2018 -0500
+++ b/make/autoconf/build-aux/config.sub	Thu Jan 03 21:25:54 2019 +0100
@@ -29,7 +29,13 @@
 
 DIR=`dirname $0`
 
-# First, filter out everything that doesn't begin with "aarch64-"
+# Allow wsl
+if echo $* | grep x86_64-pc-wsl >/dev/null ; then
+    echo $*
+    exit
+fi
+
+# Filter out everything that doesn't begin with "aarch64-"
 if ! echo $* | grep '^aarch64-' >/dev/null ; then
     . $DIR/autoconf-config.sub "$@"
     # autoconf-config.sub exits, so we never reach here, but just in
--- a/make/autoconf/compare.sh.in	Wed Dec 19 12:33:25 2018 -0500
+++ b/make/autoconf/compare.sh.in	Thu Jan 03 21:25:54 2019 +0100
@@ -31,6 +31,7 @@
 
 export LEGACY_BUILD_DIR=@OPENJDK_TARGET_OS@-@OPENJDK_TARGET_CPU_LEGACY@
 
+export OPENJDK_BUILD_OS_ENV="@OPENJDK_BUILD_OS_ENV@"
 export OPENJDK_TARGET_OS="@OPENJDK_TARGET_OS@"
 export OPENJDK_TARGET_CPU="@OPENJDK_TARGET_CPU@"
 export DEBUG_LEVEL="@DEBUG_LEVEL@"
@@ -73,18 +74,23 @@
 export OUTPUTDIR="@OUTPUTDIR@"
 
 if [ "@COMPILE_TYPE@" != "cross" ]; then
-    export JAVAP="@FIXPATH@ $OUTPUTDIR/jdk/bin/javap @JAVA_TOOL_FLAGS_SMALL@"
-    export JIMAGE="@FIXPATH@ $OUTPUTDIR/jdk/bin/jimage"
+  export JAVAP="@FIXPATH@ $OUTPUTDIR/jdk/bin/javap @JAVA_TOOL_FLAGS_SMALL@"
+  export JIMAGE="@FIXPATH@ $OUTPUTDIR/jdk/bin/jimage"
 elif [ "@CREATE_BUILDJDK@" = "true" ]; then
-    export JAVAP="@FIXPATH@ $OUTPUTDIR/buildjdk/jdk/bin/javap @JAVA_TOOL_FLAGS_SMALL@"
-    export JIMAGE="@FIXPATH@ $OUTPUTDIR/buildjdk/jdk/bin/jimage"
+  export JAVAP="@FIXPATH@ $OUTPUTDIR/buildjdk/jdk/bin/javap @JAVA_TOOL_FLAGS_SMALL@"
+  export JIMAGE="@FIXPATH@ $OUTPUTDIR/buildjdk/jdk/bin/jimage"
 else
-    export JAVAP="@FIXPATH@ @BUILD_JDK@/bin/javap @JAVA_TOOL_FLAGS_SMALL@"
-    export JIMAGE="@FIXPATH@ @BUILD_JDK@/bin/jimage"
+  export JAVAP="@FIXPATH@ @BUILD_JDK@/bin/javap @JAVA_TOOL_FLAGS_SMALL@"
+  export JIMAGE="@FIXPATH@ @BUILD_JDK@/bin/jimage"
 fi
 
 if [ "$OPENJDK_TARGET_OS" = "windows" ]; then
-  export PATH="@VS_PATH@"
+  if [ "$OPENJDK_BUILD_OS_ENV" = "windows.wsl" ]; then
+    export FIXPATH_PATH="@VS_PATH_WINDOWS@"
+    export WSLENV="$WSLENV:FIXPATH_PATH:DEBUG_FIXPATH"
+  else
+    export PATH="@VS_PATH@"
+  fi
 fi
 
 # Now locate the main script and run it.
--- a/make/autoconf/flags-cflags.m4	Wed Dec 19 12:33:25 2018 -0500
+++ b/make/autoconf/flags-cflags.m4	Thu Jan 03 21:25:54 2019 +0100
@@ -559,7 +559,7 @@
     TOOLCHAIN_CFLAGS="-errshort=tags"
 
     TOOLCHAIN_CFLAGS_JDK="-mt $TOOLCHAIN_FLAGS"
-    TOOLCHAIN_CFLAGS_JDK_CONLY="-xc99=%none -xCC -Xa -W0,-noglobal $TOOLCHAIN_CFLAGS" # C only
+    TOOLCHAIN_CFLAGS_JDK_CONLY="-xCC -Xa -W0,-noglobal $TOOLCHAIN_CFLAGS" # C only
     TOOLCHAIN_CFLAGS_JDK_CXXONLY="-features=no%except -norunpath -xnolib" # CXX only
     TOOLCHAIN_CFLAGS_JVM="-template=no%extdef -features=no%split_init \
         -library=stlport4 -mt -features=no%except $TOOLCHAIN_FLAGS"
--- a/make/autoconf/platform.m4	Wed Dec 19 12:33:25 2018 -0500
+++ b/make/autoconf/platform.m4	Thu Jan 03 21:25:54 2019 +0100
@@ -188,6 +188,10 @@
       VAR_OS=windows
       VAR_OS_ENV=windows.cygwin
       ;;
+    *wsl*)
+      VAR_OS=windows
+      VAR_OS_ENV=windows.wsl
+      ;;
     *mingw*)
       VAR_OS=windows
       VAR_OS_ENV=windows.msys
--- a/make/autoconf/spec.gmk.in	Wed Dec 19 12:33:25 2018 -0500
+++ b/make/autoconf/spec.gmk.in	Thu Jan 03 21:25:54 2019 +0100
@@ -122,7 +122,12 @@
 ifeq ($(OPENJDK_TARGET_OS), windows)
   # On Windows, the Visual Studio toolchain needs the PATH to be adjusted
   # to include Visual Studio tools (this needs to be in cygwin/msys style).
-  export PATH:=@VS_PATH@
+  ifeq ($(OPENJDK_TARGET_OS_ENV), windows.wsl)
+    export FIXPATH_PATH:=@VS_PATH_WINDOWS@
+    export WSLENV:=$(WSLENV):FIXPATH_PATH:DEBUG_FIXPATH
+  else
+    export PATH:=@VS_PATH@
+  endif
 endif
 
 SYSROOT_CFLAGS := @SYSROOT_CFLAGS@
@@ -762,6 +767,7 @@
 MSVCP_DLL:=@MSVCP_DLL@
 UCRT_DLL_DIR:=@UCRT_DLL_DIR@
 STLPORT_LIB:=@STLPORT_LIB@
+ENABLE_PANDOC:=@ENABLE_PANDOC@
 
 ####################################################
 #
--- a/make/autoconf/toolchain.m4	Wed Dec 19 12:33:25 2018 -0500
+++ b/make/autoconf/toolchain.m4	Thu Jan 03 21:25:54 2019 +0100
@@ -180,7 +180,6 @@
     SHARED_LIBRARY='[$]1.dll'
     STATIC_LIBRARY='[$]1.lib'
     OBJ_SUFFIX='.obj'
-    EXE_SUFFIX='.exe'
   else
     LIBRARY_PREFIX=lib
     SHARED_LIBRARY_SUFFIX='.so'
@@ -188,7 +187,6 @@
     SHARED_LIBRARY='lib[$]1.so'
     STATIC_LIBRARY='lib[$]1.a'
     OBJ_SUFFIX='.o'
-    EXE_SUFFIX=''
     if test "x$OPENJDK_TARGET_OS" = xmacosx; then
       # For full static builds, we're overloading the SHARED_LIBRARY
       # variables in order to limit the amount of changes required.
@@ -212,7 +210,6 @@
   AC_SUBST(SHARED_LIBRARY)
   AC_SUBST(STATIC_LIBRARY)
   AC_SUBST(OBJ_SUFFIX)
-  AC_SUBST(EXE_SUFFIX)
 ])
 
 # Determine which toolchain type to use, and make sure it is valid for this
@@ -281,13 +278,13 @@
 
   TOOLCHAIN_CC_BINARY_clang="clang"
   TOOLCHAIN_CC_BINARY_gcc="gcc"
-  TOOLCHAIN_CC_BINARY_microsoft="cl"
+  TOOLCHAIN_CC_BINARY_microsoft="cl$EXE_SUFFIX"
   TOOLCHAIN_CC_BINARY_solstudio="cc"
   TOOLCHAIN_CC_BINARY_xlc="xlc_r"
 
   TOOLCHAIN_CXX_BINARY_clang="clang++"
   TOOLCHAIN_CXX_BINARY_gcc="g++"
-  TOOLCHAIN_CXX_BINARY_microsoft="cl"
+  TOOLCHAIN_CXX_BINARY_microsoft="cl$EXE_SUFFIX"
   TOOLCHAIN_CXX_BINARY_solstudio="CC"
   TOOLCHAIN_CXX_BINARY_xlc="xlC_r"
 
@@ -333,9 +330,17 @@
   if test "x$OPENJDK_BUILD_OS" = "xwindows" \
       && test "x$TOOLCHAIN_TYPE" = "xmicrosoft"; then
     TOOLCHAIN_SETUP_VISUAL_STUDIO_ENV
-    # Reset path to VS_PATH. It will include everything that was on PATH at the time we
-    # ran TOOLCHAIN_SETUP_VISUAL_STUDIO_ENV.
-    PATH="$VS_PATH"
+    if test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.wsl"; then
+      # Append VS_PATH. In WSL, VS_PATH will not contain the WSL env path needed
+      # for using basic Unix tools, so need to keep the original PATH.
+      BASIC_APPEND_TO_PATH(PATH, $VS_PATH)
+      BASIC_APPEND_TO_PATH(WSLENV, "PATH/l:LIB:INCLUDE")
+      export WSLENV
+    else
+      # Reset path to VS_PATH. It will include everything that was on PATH at the time we
+      # ran TOOLCHAIN_SETUP_VISUAL_STUDIO_ENV.
+      PATH="$VS_PATH"
+    fi
     # The microsoft toolchain also requires INCLUDE and LIB to be set.
     export INCLUDE="$VS_INCLUDE"
     export LIB="$VS_LIB"
@@ -430,7 +435,7 @@
     # There is no specific version flag, but all output starts with a version string.
     # First line typically looks something like:
     # Microsoft (R) 32-bit C/C++ Optimizing Compiler Version 16.00.40219.01 for 80x86
-    COMPILER_VERSION_OUTPUT=`$COMPILER 2>&1 | $HEAD -n 1 | $TR -d '\r'`
+    COMPILER_VERSION_OUTPUT=`"$COMPILER" 2>&1 | $GREP -v 'ERROR.*UtilTranslatePathList' | $HEAD -n 1 | $TR -d '\r'`
     # Check that this is likely to be Microsoft CL.EXE.
     $ECHO "$COMPILER_VERSION_OUTPUT" | $GREP "Microsoft.*Compiler" > /dev/null
     if test $? -ne 0; then
@@ -698,7 +703,7 @@
     # In the Microsoft toolchain we have a separate LD command "link".
     # Make sure we reject /usr/bin/link (as determined in CYGWIN_LINK), which is
     # a cygwin program for something completely different.
-    AC_CHECK_PROG([LD], [link],[link],,, [$CYGWIN_LINK])
+    AC_CHECK_PROG([LD], [link$EXE_SUFFIX],[link$EXE_SUFFIX],,, [$CYGWIN_LINK])
     BASIC_FIXUP_EXECUTABLE(LD)
     # Verify that we indeed succeeded with this trick.
     AC_MSG_CHECKING([if the found link.exe is actually the Visual Studio linker])
@@ -750,7 +755,7 @@
   #
   if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
     # The corresponding ar tool is lib.exe (used to create static libraries)
-    AC_CHECK_PROG([AR], [lib],[lib],,,)
+    AC_CHECK_PROG([AR], [lib$EXE_SUFFIX],[lib$EXE_SUFFIX],,,)
   elif test "x$TOOLCHAIN_TYPE" = xgcc; then
     BASIC_CHECK_TOOLS(AR, ar gcc-ar)
   else
@@ -774,12 +779,12 @@
   fi
 
   if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
-    AC_CHECK_PROG([MT], [mt], [mt],,, [/usr/bin/mt])
+    AC_CHECK_PROG([MT], [mt$EXE_SUFFIX], [mt$EXE_SUFFIX],,, [/usr/bin/mt])
     BASIC_FIXUP_EXECUTABLE(MT)
     # Setup the resource compiler (RC)
-    AC_CHECK_PROG([RC], [rc], [rc],,, [/usr/bin/rc])
+    AC_CHECK_PROG([RC], [rc$EXE_SUFFIX], [rc$EXE_SUFFIX],,, [/usr/bin/rc])
     BASIC_FIXUP_EXECUTABLE(RC)
-    AC_CHECK_PROG([DUMPBIN], [dumpbin], [dumpbin],,,)
+    AC_CHECK_PROG([DUMPBIN], [dumpbin$EXE_SUFFIX], [dumpbin$EXE_SUFFIX],,,)
     BASIC_FIXUP_EXECUTABLE(DUMPBIN)
     # We need to check for 'msbuild.exe' because at the place where we expect to
     # find 'msbuild.exe' there's also a directory called 'msbuild' and configure
@@ -788,7 +793,7 @@
     # Notice that we intentionally don't fix up the path to MSBUILD because we
     # will call it in a DOS shell during freetype detection on Windows (see
     # 'LIB_SETUP_FREETYPE' in "libraries.m4"
-    AC_CHECK_PROG([MSBUILD], [msbuild.exe], [msbuild.exe],,,)
+    AC_CHECK_PROG([MSBUILD], [msbuild$EXE_SUFFIX], [msbuild$EXE_SUFFIX],,,)
   fi
 
   if test "x$OPENJDK_TARGET_OS" = xsolaris; then
@@ -999,7 +1004,7 @@
   # Check for extra potential brokenness.
   if test  "x$TOOLCHAIN_TYPE" = xmicrosoft; then
     # On Windows, double-check that we got the right compiler.
-    CC_VERSION_OUTPUT=`$CC 2>&1 | $HEAD -n 1 | $TR -d '\r'`
+    CC_VERSION_OUTPUT=`$CC 2>&1 | $GREP -v 'ERROR.*UtilTranslatePathList' | $HEAD -n 1 | $TR -d '\r'`
     COMPILER_CPU_TEST=`$ECHO $CC_VERSION_OUTPUT | $SED -n "s/^.* \(.*\)$/\1/p"`
     if test "x$OPENJDK_TARGET_CPU" = "xx86"; then
       if test "x$COMPILER_CPU_TEST" != "x80x86" -a "x$COMPILER_CPU_TEST" != "xx86"; then
--- a/make/autoconf/toolchain_windows.m4	Wed Dec 19 12:33:25 2018 -0500
+++ b/make/autoconf/toolchain_windows.m4	Thu Jan 03 21:25:54 2019 +0100
@@ -115,7 +115,7 @@
         VCVARSFILES="vc/bin/vcvars32.bat vc/auxiliary/build/vcvars32.bat"
       else
         VCVARSFILES="vc/bin/amd64/vcvars64.bat vc/bin/x86_amd64/vcvarsx86_amd64.bat \
-            vc/auxiliary/build/vcvarsx86_amd64.bat vc/auxiliary/build/vcvars64.bat"
+            VC/Auxiliary/Build/vcvarsx86_amd64.bat VC/Auxiliary/Build/vcvars64.bat"
       fi
 
       for VCVARSFILE in $VCVARSFILES; do
@@ -222,7 +222,6 @@
       [C:/Program Files/$VS_INSTALL_DIR], [well-known name])
   TOOLCHAIN_CHECK_POSSIBLE_VISUAL_STUDIO_ROOT([${VS_VERSION}],
       [C:/Program Files (x86)/$VS_INSTALL_DIR], [well-known name])
-
   if test "x$SDK_INSTALL_DIR" != x; then
     if test "x$ProgramW6432" != x; then
       TOOLCHAIN_CHECK_POSSIBLE_WIN_SDK_ROOT([${VS_VERSION}],
@@ -339,7 +338,7 @@
 [
   # Store path to cygwin link.exe to help excluding it when searching for
   # VS linker. This must be done before changing the PATH when looking for VS.
-  AC_PATH_PROG(CYGWIN_LINK, link)
+  AC_PATH_PROG(CYGWIN_LINK, link.exe)
   if test "x$CYGWIN_LINK" != x; then
     AC_MSG_CHECKING([if the first found link.exe is actually the Cygwin link tool])
     "$CYGWIN_LINK" --version > /dev/null
@@ -372,8 +371,13 @@
       # Instead create a shell script which will set the relevant variables when run.
       WINPATH_VS_ENV_CMD="$VS_ENV_CMD"
       BASIC_WINDOWS_REWRITE_AS_WINDOWS_MIXED_PATH([WINPATH_VS_ENV_CMD])
-      WINPATH_BASH="$BASH"
-      BASIC_WINDOWS_REWRITE_AS_WINDOWS_MIXED_PATH([WINPATH_BASH])
+
+      if test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.wsl"; then
+        WINPATH_BASH="bash"
+      else
+        WINPATH_BASH="$BASH"
+        BASIC_WINDOWS_REWRITE_AS_WINDOWS_MIXED_PATH([WINPATH_BASH])
+      fi
 
       # Generate a DOS batch file which runs $VS_ENV_CMD, and then creates a shell
       # script (executable by bash) that will setup the important variables.
@@ -381,41 +385,65 @@
       $ECHO "@echo off" >  $EXTRACT_VC_ENV_BAT_FILE
       # This will end up something like:
       # call C:/progra~2/micros~2.0/vc/bin/amd64/vcvars64.bat
-      $ECHO "call $WINPATH_VS_ENV_CMD $VS_ENV_ARGS" >> $EXTRACT_VC_ENV_BAT_FILE
+      $ECHO "call \"$WINPATH_VS_ENV_CMD\" $VS_ENV_ARGS" >> $EXTRACT_VC_ENV_BAT_FILE
       # In some cases, the VS_ENV_CMD will change directory, change back so
       # the set-vs-env.sh ends up in the right place.
       $ECHO 'cd %~dp0' >> $EXTRACT_VC_ENV_BAT_FILE
-      # These will end up something like:
-      # C:/CygWin/bin/bash -c 'echo VS_PATH=\"$PATH\" > localdevenv.sh
-      # The trailing space for everyone except PATH is no typo, but is needed due
-      # to trailing \ in the Windows paths. These will be stripped later.
-      $ECHO "$WINPATH_BASH -c 'echo VS_PATH="'\"$PATH\" > set-vs-env.sh' \
-          >> $EXTRACT_VC_ENV_BAT_FILE
-      $ECHO "$WINPATH_BASH -c 'echo VS_INCLUDE="'\"$INCLUDE\;$include \" >> set-vs-env.sh' \
-          >> $EXTRACT_VC_ENV_BAT_FILE
-      $ECHO "$WINPATH_BASH -c 'echo VS_LIB="'\"$LIB\;$lib \" >> set-vs-env.sh' \
-          >> $EXTRACT_VC_ENV_BAT_FILE
-      $ECHO "$WINPATH_BASH -c 'echo VCINSTALLDIR="'\"$VCINSTALLDIR \" >> set-vs-env.sh' \
-          >> $EXTRACT_VC_ENV_BAT_FILE
-      $ECHO "$WINPATH_BASH -c 'echo WindowsSdkDir="'\"$WindowsSdkDir \" >> set-vs-env.sh' \
-          >> $EXTRACT_VC_ENV_BAT_FILE
-      $ECHO "$WINPATH_BASH -c 'echo WINDOWSSDKDIR="'\"$WINDOWSSDKDIR \" >> set-vs-env.sh' \
-          >> $EXTRACT_VC_ENV_BAT_FILE
+      if test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.wsl"; then
+        # These will end up something like:
+        # echo VS_PATH=\"$PATH\" > set-vs-env.sh
+        # The trailing space for everyone except PATH is no typo, but is needed due
+        # to trailing \ in the Windows paths. These will be stripped later.
+        # Trying pure CMD extract. This results in windows paths that need to
+        # be converted post extraction, but a simpler script.
+        $ECHO 'echo VS_PATH="%PATH%" > set-vs-env.sh' \
+            >> $EXTRACT_VC_ENV_BAT_FILE
+        $ECHO 'echo VS_INCLUDE="%INCLUDE% " >> set-vs-env.sh' \
+            >> $EXTRACT_VC_ENV_BAT_FILE
+        $ECHO 'echo VS_LIB="%LIB% " >> set-vs-env.sh' \
+            >> $EXTRACT_VC_ENV_BAT_FILE
+        $ECHO 'echo VCINSTALLDIR="%VCINSTALLDIR% " >> set-vs-env.sh' \
+            >> $EXTRACT_VC_ENV_BAT_FILE
+        $ECHO 'echo WindowsSdkDir="%WindowsSdkDir% " >> set-vs-env.sh' \
+            >> $EXTRACT_VC_ENV_BAT_FILE
+        $ECHO 'echo WINDOWSSDKDIR="%WINDOWSSDKDIR% " >> set-vs-env.sh' \
+            >> $EXTRACT_VC_ENV_BAT_FILE
+      else
+        # These will end up something like:
+        # C:/CygWin/bin/bash -c 'echo VS_PATH=\"$PATH\" > localdevenv.sh
+        # The trailing space for everyone except PATH is no typo, but is needed due
+        # to trailing \ in the Windows paths. These will be stripped later.
+        $ECHO "$WINPATH_BASH -c 'echo VS_PATH="'\"$PATH\" > set-vs-env.sh' \
+            >> $EXTRACT_VC_ENV_BAT_FILE
+        $ECHO "$WINPATH_BASH -c 'echo VS_INCLUDE="'\"$INCLUDE\;$include \" >> set-vs-env.sh' \
+            >> $EXTRACT_VC_ENV_BAT_FILE
+        $ECHO "$WINPATH_BASH -c 'echo VS_LIB="'\"$LIB\;$lib \" >> set-vs-env.sh' \
+            >> $EXTRACT_VC_ENV_BAT_FILE
+        $ECHO "$WINPATH_BASH -c 'echo VCINSTALLDIR="'\"$VCINSTALLDIR \" >> set-vs-env.sh' \
+            >> $EXTRACT_VC_ENV_BAT_FILE
+        $ECHO "$WINPATH_BASH -c 'echo WindowsSdkDir="'\"$WindowsSdkDir \" >> set-vs-env.sh' \
+            >> $EXTRACT_VC_ENV_BAT_FILE
+        $ECHO "$WINPATH_BASH -c 'echo WINDOWSSDKDIR="'\"$WINDOWSSDKDIR \" >> set-vs-env.sh' \
+            >> $EXTRACT_VC_ENV_BAT_FILE
+      fi
 
       # Now execute the newly created bat file.
       # The | cat is to stop SetEnv.Cmd to mess with system colors on msys.
       # Change directory so we don't need to mess with Windows paths in redirects.
       cd $VS_ENV_TMP_DIR
-      cmd /c extract-vs-env.bat | $CAT
+      $CMD /c extract-vs-env.bat | $CAT
       cd $CURDIR
 
       if test ! -s $VS_ENV_TMP_DIR/set-vs-env.sh; then
-        AC_MSG_NOTICE([Could not succesfully extract the envionment variables needed for the VS setup.])
+        AC_MSG_NOTICE([Could not succesfully extract the environment variables needed for the VS setup.])
         AC_MSG_NOTICE([Try setting --with-tools-dir to the VC/bin directory within the VS installation])
         AC_MSG_NOTICE([or run "bash.exe -l" from a VS command prompt and then run configure from there.])
         AC_MSG_ERROR([Cannot continue])
       fi
 
+      # Remove windows line endings
+      $SED -i -e 's|\r||g' $VS_ENV_TMP_DIR/set-vs-env.sh
+
       # Now set all paths and other env variables. This will allow the rest of
       # the configure script to find and run the compiler in the proper way.
       AC_MSG_NOTICE([Setting extracted environment variables])
@@ -455,9 +483,29 @@
       AC_SUBST(VS_INCLUDE)
       AC_SUBST(VS_LIB)
 
-      # Convert VS_INCLUDE into SYSROOT_CFLAGS
       OLDIFS="$IFS"
       IFS=";"
+      if test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.wsl"; then
+        # Convert VS_PATH to unix style
+        VS_PATH_WINDOWS="$VS_PATH"
+        VS_PATH=""
+        for i in $VS_PATH_WINDOWS; do
+          path=$i
+          # Only process non-empty elements
+          if test "x$path" != x; then
+            IFS="$OLDIFS"
+            # Check that directory exists before calling fixup_path
+            testpath=$path
+            BASIC_WINDOWS_REWRITE_AS_UNIX_PATH([testpath])
+            if test -d "$testpath"; then
+              BASIC_FIXUP_PATH([path])
+              BASIC_APPEND_TO_PATH(VS_PATH, $path)
+            fi
+            IFS=";"
+          fi
+        done
+      fi
+      # Convert VS_INCLUDE into SYSROOT_CFLAGS
       for i in $VS_INCLUDE; do
         ipath=$i
         # Only process non-empty elements
@@ -490,6 +538,8 @@
         fi
       done
       IFS="$OLDIFS"
+
+      AC_SUBST(VS_PATH_WINDOWS)
     fi
   else
     AC_MSG_RESULT([not found])
@@ -600,10 +650,10 @@
       BASIC_WINDOWS_REWRITE_AS_UNIX_PATH(CYGWIN_VS_TOOLS_DIR)
       if test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
         POSSIBLE_MSVC_DLL=`$FIND "$CYGWIN_VS_TOOLS_DIR" -name $DLL_NAME \
-	    | $GREP -i /x64/ | $HEAD --lines 1`
+        | $GREP -i /x64/ | $HEAD --lines 1`
       else
         POSSIBLE_MSVC_DLL=`$FIND "$CYGWIN_VS_TOOLS_DIR" -name $DLL_NAME \
-	    | $GREP -i /x86/ | $HEAD --lines 1`
+        | $GREP -i /x86/ | $HEAD --lines 1`
       fi
       TOOLCHAIN_CHECK_POSSIBLE_MSVC_DLL([$DLL_NAME], [$POSSIBLE_MSVC_DLL],
           [search of VS100COMNTOOLS])
@@ -616,14 +666,14 @@
     if test "x$CYGWIN_VC_INSTALL_DIR" != x; then
       if test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
         POSSIBLE_MSVC_DLL=`$FIND "$CYGWIN_VC_INSTALL_DIR" -name $DLL_NAME \
-	    | $GREP x64 | $HEAD --lines 1`
+        | $GREP x64 | $HEAD --lines 1`
       else
         POSSIBLE_MSVC_DLL=`$FIND "$CYGWIN_VC_INSTALL_DIR" -name $DLL_NAME \
-	    | $GREP x86 | $GREP -v ia64 | $GREP -v x64 | $HEAD --lines 1`
+        | $GREP x86 | $GREP -v ia64 | $GREP -v x64 | $HEAD --lines 1`
         if test "x$POSSIBLE_MSVC_DLL" = x; then
           # We're grasping at straws now...
           POSSIBLE_MSVC_DLL=`$FIND "$CYGWIN_VC_INSTALL_DIR" -name $DLL_NAME \
-	      | $HEAD --lines 1`
+          | $HEAD --lines 1`
         fi
       fi
 
@@ -693,7 +743,7 @@
   if test "x$USE_UCRT" = "xtrue"; then
     AC_MSG_CHECKING([for UCRT DLL dir])
     if test "x$with_ucrt_dll_dir" != x; then
-      if test -z "$(ls -d "$with_ucrt_dll_dir/*.dll" 2> /dev/null)"; then
+      if test -z "$(ls -d $with_ucrt_dll_dir/*.dll 2> /dev/null)"; then
         AC_MSG_RESULT([no])
         AC_MSG_ERROR([Could not find any dlls in $with_ucrt_dll_dir])
       else
@@ -713,8 +763,16 @@
       fi
       UCRT_DLL_DIR="$CYGWIN_WINDOWSSDKDIR/Redist/ucrt/DLLs/$dll_subdir"
       if test -z "$(ls -d "$UCRT_DLL_DIR/"*.dll 2> /dev/null)"; then
-        AC_MSG_RESULT([no])
-        AC_MSG_ERROR([Could not find any dlls in $UCRT_DLL_DIR])
+        # Try with version subdir
+        UCRT_DLL_DIR="`ls -d $CYGWIN_WINDOWSSDKDIR/Redist/*/ucrt/DLLs/$dll_subdir \
+            2> /dev/null | $SORT -d | $HEAD -n1`"
+        if test -z "$UCRT_DLL_DIR" \
+            || test -z "$(ls -d "$UCRT_DLL_DIR/"*.dll 2> /dev/null)"; then
+          AC_MSG_RESULT([no])
+          AC_MSG_ERROR([Could not find any dlls in $UCRT_DLL_DIR])
+        else
+          AC_MSG_RESULT($UCRT_DLL_DIR)
+        fi
       else
         AC_MSG_RESULT($UCRT_DLL_DIR)
       fi
--- a/make/gendata/Gendata-java.base.gmk	Wed Dec 19 12:33:25 2018 -0500
+++ b/make/gendata/Gendata-java.base.gmk	Thu Jan 03 21:25:54 2019 +0100
@@ -55,7 +55,7 @@
 $(GENDATA_CURDATA): $(TOPDIR)/make/data/currency/CurrencyData.properties $(BUILD_TOOLS_JDK)
 	$(call MakeDir, $(@D))
 	$(RM) $@
-	$(TOOL_GENERATECURRENCYDATA) -o $@.tmp < $<
+	$(TOOL_GENERATECURRENCYDATA) -o $@.tmp -i $<
 	$(MV) $@.tmp $@
 	$(CHMOD) 444 $@
 
--- a/make/gensrc/GensrcBuffer.gmk	Wed Dec 19 12:33:25 2018 -0500
+++ b/make/gensrc/GensrcBuffer.gmk	Thu Jan 03 21:25:54 2019 +0100
@@ -230,7 +230,8 @@
   endif
 
   $$($1_DST): $$($1_DEP) $(GENSRC_BUFFER_DST)/_the.buffer.dir
-	$(TOOL_SPP) < $$($1_SRC) > $$($1_OUT).tmp \
+	$(RM) $$($1_OUT).tmp
+	$(TOOL_SPP) -i$$($1_SRC) -o$$($1_OUT).tmp \
 	    -K$$($1_type) \
 	    -K$$($1_category) \
 	    -K$$($1_streams) \
@@ -260,12 +261,12 @@
         ifeq ($$($1_BIN), 1)
 	  $(SED) -e '/#BIN/,$$$$d' < $$($1_OUT) > $$($1_DST).tmp
 	  $(RM) $$($1_OUT)
-	  $$($1_char_CMD) < $$($1_SRC_BIN) >> $$($1_DST).tmp
-	  $$($1_short_CMD) < $$($1_SRC_BIN) >> $$($1_DST).tmp
-	  $$($1_int_CMD) < $$($1_SRC_BIN) >> $$($1_DST).tmp
-	  $$($1_long_CMD) < $$($1_SRC_BIN) >> $$($1_DST).tmp
-	  $$($1_float_CMD) < $$($1_SRC_BIN) >> $$($1_DST).tmp
-	  $$($1_double_CMD) < $$($1_SRC_BIN) >> $$($1_DST).tmp
+	  $$($1_char_CMD) -i$$($1_SRC_BIN) -o$$($1_DST).tmp
+	  $$($1_short_CMD) -i$$($1_SRC_BIN) -o$$($1_DST).tmp
+	  $$($1_int_CMD) -i$$($1_SRC_BIN) -o$$($1_DST).tmp
+	  $$($1_long_CMD) -i$$($1_SRC_BIN) -o$$($1_DST).tmp
+	  $$($1_float_CMD) -i$$($1_SRC_BIN) -o$$($1_DST).tmp
+	  $$($1_double_CMD) -i$$($1_SRC_BIN) -o$$($1_DST).tmp
 	  $(PRINTF) "}\n" >> $$($1_DST).tmp
 	  mv $$($1_DST).tmp $$($1_DST)
         endif
--- a/make/gensrc/GensrcCharsetCoder.gmk	Wed Dec 19 12:33:25 2018 -0500
+++ b/make/gensrc/GensrcCharsetCoder.gmk	Thu Jan 03 21:25:54 2019 +0100
@@ -36,7 +36,7 @@
 $(GENSRC_CHARSETCODER_DST)/CharsetDecoder.java: $(GENSRC_CHARSETCODER_TEMPLATE)
 	$(call MakeTargetDir)
 	$(RM) $@.tmp
-	$(TOOL_SPP) < $< >$@.tmp \
+	$(TOOL_SPP) -i$< -o$@.tmp \
 	    -Kdecoder \
 	    -DA='A' \
 	    -Da='a' \
@@ -71,7 +71,7 @@
 $(GENSRC_CHARSETCODER_DST)/CharsetEncoder.java: $(GENSRC_CHARSETCODER_TEMPLATE)
 	$(call MakeTargetDir)
 	$(RM) $@.tmp
-	$(TOOL_SPP) < $< >$@.tmp \
+	$(TOOL_SPP) -i$< -o$@.tmp \
 	    -Kencoder \
 	    -DA='An' \
 	    -Da='an' \
--- a/make/gensrc/GensrcVarHandles.gmk	Wed Dec 19 12:33:25 2018 -0500
+++ b/make/gensrc/GensrcVarHandles.gmk	Thu Jan 03 21:25:54 2019 +0100
@@ -59,8 +59,9 @@
 	  $$(eval $1_type := $$$$(shell $(TR) '[:upper:]' '[:lower:]' <<< $$$$($1_Type)))
         endif
 	$$(call MakeDir, $$(@D))
+	$(RM) $$@
 	$(TOOL_SPP) -nel -K$$($1_type) -Dtype=$$($1_type) -DType=$$($1_Type) \
-	    $$($1_ARGS) < $$< > $$@
+	    $$($1_ARGS) -i$$< -o$$@
 
   GENSRC_VARHANDLES += $$($1_FILENAME)
 endef
@@ -147,10 +148,11 @@
 
   $$($1_FILENAME): $(VARHANDLES_SRC_DIR)/X-VarHandleByteArrayView.java.template $(BUILD_TOOLS_JDK)
 	$$(call MakeDir, $$(@D))
+	$(RM) $$@
 	$(TOOL_SPP) -nel -K$$($1_type) \
 	    -Dtype=$$($1_type) -DType=$$($1_Type) -DBoxType=$$($1_BoxType) \
 	    -DrawType=$$($1_rawType) -DRawType=$$($1_RawType) -DRawBoxType=$$($1_RawBoxType) \
-	    $$($1_ARGS) < $$< > $$@
+	    $$($1_ARGS) -i$$< -o$$@
 
   GENSRC_VARHANDLES += $$($1_FILENAME)
 endef
--- a/make/jdk/src/classes/build/tools/generatecurrencydata/GenerateCurrencyData.java	Wed Dec 19 12:33:25 2018 -0500
+++ b/make/jdk/src/classes/build/tools/generatecurrencydata/GenerateCurrencyData.java	Thu Jan 03 21:25:54 2019 +0100
@@ -28,7 +28,9 @@
 import java.io.IOException;
 import java.io.FileNotFoundException;
 import java.io.DataOutputStream;
+import java.io.FileInputStream;
 import java.io.FileOutputStream;
+import java.io.InputStream;
 import java.text.SimpleDateFormat;
 import java.util.Date;
 import java.util.HashMap;
@@ -134,18 +136,43 @@
     private static String currenciesWithMinorUnitsUndefined;
 
     public static void main(String[] args) {
-
+        InputStream in = System.in;
         // Look for "-o outputfilename" option
-        if ( args.length == 2 && args[0].equals("-o") ) {
-            try {
-                out = new DataOutputStream(new FileOutputStream(args[1]));
-            } catch ( FileNotFoundException e ) {
-                System.err.println("Error: " + e.getMessage());
-                e.printStackTrace(System.err);
+        for (int n = 0; n < args.length; ++n) {
+            if (args[n].equals("-o")) {
+                ++n;
+                if (n >= args.length) {
+                    System.err.println("Error: Invalid argument format");
+                    System.exit(1);
+                }
+                try {
+                    out = new DataOutputStream(new FileOutputStream(args[n]));
+                } catch ( FileNotFoundException e ) {
+                    System.err.println("Error: " + e.getMessage());
+                    e.printStackTrace(System.err);
+                    System.exit(1);
+                }
+            } else if (args[n].equals("-i")) {
+                ++n;
+                if (n >= args.length) {
+                    System.err.println("Error: Invalid argument format");
+                    System.exit(1);
+                }
+                try {
+                    in = new FileInputStream(args[n]);
+                } catch ( FileNotFoundException e ) {
+                    System.err.println("Error: " + e.getMessage());
+                    e.printStackTrace(System.err);
+                    System.exit(1);
+                }
+            } else {
+                System.err.println("Error: Invalid argument " + args[n]);
                 System.exit(1);
             }
-        } else {
-            System.err.println("Error: Illegal arg count");
+        }
+
+        if (out == null) {
+            System.err.println("Error: Invalid argument format");
             System.exit(1);
         }
 
@@ -154,7 +181,7 @@
         format.setLenient(false);
 
         try {
-            readInput();
+            readInput(in);
             buildMainAndSpecialCaseTables();
             buildOtherTables();
             writeOutput();
@@ -167,9 +194,9 @@
         }
     }
 
-    private static void readInput() throws IOException {
+    private static void readInput(InputStream in) throws IOException {
         currencyData = new Properties();
-        currencyData.load(System.in);
+        currencyData.load(in);
 
         // initialize other lookup strings
         formatVersion = (String) currencyData.get("formatVersion");
--- a/make/jdk/src/classes/build/tools/spp/Spp.java	Wed Dec 19 12:33:25 2018 -0500
+++ b/make/jdk/src/classes/build/tools/spp/Spp.java	Thu Jan 03 21:25:54 2019 +0100
@@ -25,6 +25,8 @@
 
 package build.tools.spp;
 
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
 import java.util.*;
 import java.util.regex.*;
 
@@ -69,6 +71,8 @@
         Set<String> keys = new HashSet<>();
         boolean be = false;
         boolean el = true;
+        String inputFile = null;
+        String outputFile = null;
 
         for (String arg:args) {
             if (arg.startsWith("-D")) {
@@ -76,6 +80,10 @@
                 vars.put(arg.substring(2, i),arg.substring(i+1));
             } else if (arg.startsWith("-K")) {
                 keys.add(arg.substring(2));
+            } else if (arg.startsWith("-i")) {
+                inputFile = arg.substring(2);
+            } else if (arg.startsWith("-o")) {
+                outputFile = arg.substring(2);
             } else if ("-be".equals(arg)) {
                 be = true;
             } else if ("-nel".equals(arg)) {
@@ -87,11 +95,11 @@
         }
 
         StringBuffer out = new StringBuffer();
-        new Spp().spp(new Scanner(System.in),
+        new Spp().spp(new Scanner(new FileInputStream(inputFile)),
                       out, "",
                       keys, vars, be, el,
                       false);
-        System.out.print(out.toString());
+        new FileOutputStream(outputFile, true).write(out.toString().getBytes());
     }
 
     static final String LNSEP = System.getProperty("line.separator");
--- a/make/launcher/LauncherCommon.gmk	Wed Dec 19 12:33:25 2018 -0500
+++ b/make/launcher/LauncherCommon.gmk	Thu Jan 03 21:25:54 2019 +0100
@@ -203,7 +203,7 @@
 
   ifneq ($(MAN_FILES_MD), )
     # If we got markdown files, ignore the troff files
-    ifeq ($(PANDOC), )
+    ifeq ($(ENABLE_PANDOC), false)
       $(info Warning: pandoc not found. Not generating man pages)
     else
       # Create dynamic man pages from markdown using pandoc. We need
--- a/make/nb_native/nbproject/configurations.xml	Wed Dec 19 12:33:25 2018 -0500
+++ b/make/nb_native/nbproject/configurations.xml	Thu Jan 03 21:25:54 2019 +0100
@@ -2538,7 +2538,6 @@
               <in>arguments.cpp</in>
               <in>arguments.hpp</in>
               <in>arguments_ext.cpp</in>
-              <in>arguments_ext.hpp</in>
               <in>atomic.hpp</in>
               <in>basicLock.cpp</in>
               <in>basicLock.hpp</in>
@@ -2702,7 +2701,6 @@
               <in>diagnosticArgument.hpp</in>
               <in>diagnosticCommand.cpp</in>
               <in>diagnosticCommand.hpp</in>
-              <in>diagnosticCommand_ext.hpp</in>
               <in>diagnosticFramework.cpp</in>
               <in>diagnosticFramework.hpp</in>
               <in>dtraceAttacher.cpp</in>
@@ -13687,11 +13685,6 @@
             tool="1"
             flavor2="0">
       </item>
-      <item path="../../src/hotspot/share/runtime/arguments_ext.hpp"
-            ex="false"
-            tool="3"
-            flavor2="0">
-      </item>
       <item path="../../src/hotspot/share/runtime/atomic.hpp"
             ex="false"
             tool="3"
@@ -14497,11 +14490,6 @@
             tool="3"
             flavor2="0">
       </item>
-      <item path="../../src/hotspot/share/services/diagnosticCommand_ext.hpp"
-            ex="false"
-            tool="3"
-            flavor2="0">
-      </item>
       <item path="../../src/hotspot/share/services/diagnosticFramework.cpp"
             ex="false"
             tool="1"
@@ -27454,11 +27442,6 @@
             tool="1"
             flavor2="0">
       </item>
-      <item path="../../src/hotspot/share/runtime/arguments_ext.hpp"
-            ex="false"
-            tool="3"
-            flavor2="0">
-      </item>
       <item path="../../src/hotspot/share/runtime/atomic.hpp"
             ex="false"
             tool="3"
@@ -28264,11 +28247,6 @@
             tool="3"
             flavor2="0">
       </item>
-      <item path="../../src/hotspot/share/services/diagnosticCommand_ext.hpp"
-            ex="false"
-            tool="3"
-            flavor2="0">
-      </item>
       <item path="../../src/hotspot/share/services/diagnosticFramework.cpp"
             ex="false"
             tool="1"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/scripts/windowsShortName.bat	Thu Jan 03 21:25:54 2019 +0100
@@ -0,0 +1,24 @@
+@echo off
+REM
+REM Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+REM DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+REM
+REM This code is free software; you can redistribute it and/or modify it
+REM under the terms of the GNU General Public License version 2 only, as
+REM published by the Free Software Foundation.
+REM
+REM This code is distributed in the hope that it will be useful, but WITHOUT
+REM ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+REM FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+REM version 2 for more details (a copy is included in the LICENSE file that
+REM accompanied this code).
+REM
+REM You should have received a copy of the GNU General Public License version
+REM 2 along with this work; if not, write to the Free Software Foundation,
+REM Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+REM
+REM Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+REM or visit www.oracle.com if you need additional information or have any
+REM questions.
+REM
+if '%1' NEQ '' echo %~s1
--- a/make/src/native/fixpath.c	Wed Dec 19 12:33:25 2018 -0500
+++ b/make/src/native/fixpath.c	Thu Jan 03 21:25:54 2019 +0100
@@ -24,6 +24,7 @@
  */
 
 #include <Windows.h>
+#include <stdbool.h>
 #include <io.h>
 #include <stdio.h>
 #include <string.h>
@@ -53,25 +54,16 @@
 }
 
 /*
- * Test if pos points to /cygdrive/_/ where _ can
+ * Test if pos points to /prefix/_/ where _ can
  * be any character.
  */
-int is_cygdrive_here(int pos, char const *in, int len)
+int is_prefix_here(int pos, char const *in, int len, const char* prefix)
 {
-  // Length of /cygdrive/c/ is 12
-  if (pos+12 > len) return 0;
-  if (in[pos+11]=='/' &&
-      in[pos+9]=='/' &&
-      in[pos+8]=='e' &&
-      in[pos+7]=='v' &&
-      in[pos+6]=='i' &&
-      in[pos+5]=='r' &&
-      in[pos+4]=='d' &&
-      in[pos+3]=='g' &&
-      in[pos+2]=='y' &&
-      in[pos+1]=='c' &&
-      in[pos+0]=='/') {
-    return 1;
+  // Length of c/ is 2
+  int prefix_size = strlen(prefix);
+  if (pos+prefix_size+2 > len) return 0;
+  if (in[pos+prefix_size+1]=='/') {
+    return strncmp(in + pos, prefix, prefix_size) == 0;
   }
   return 0;
 }
@@ -93,7 +85,7 @@
   }
 
   for (i = 0, j = 0; i<len;) {
-    if (is_cygdrive_here(i, in, len)) {
+    if (is_prefix_here(i, in, len, "/cygdrive/")) {
       out[j++] = in[i+10];
       out[j++] = ':';
       i+=11;
@@ -196,7 +188,39 @@
   return str;
 }
 
+/*
+ * Replace /mnt/_/ with _:/
+ * Works in place since drive letter is always
+ * shorter than /mnt/
+ */
+char *replace_cygdrive_wsl(char const *in)
+{
+  size_t len = strlen(in);
+  char *out = (char*) malloc(len+1);
+  int i,j;
+
+  if (len < 7) {
+    memmove(out, in, len + 1);
+    return out;
+  }
+
+  for (i = 0, j = 0; i<len;) {
+    if (is_prefix_here(i, in, len, "/mnt/")) {
+      out[j++] = in[i+5];
+      out[j++] = ':';
+      i+=6;
+    } else {
+      out[j] = in[i];
+      i++;
+      j++;
+    }
+  }
+  out[j] = '\0';
+  return out;
+}
+
 char*(*replace_cygdrive)(char const *in) = NULL;
+bool debug_fixpath = false;
 
 char *files_to_delete[1024];
 int num_files_to_delete = 0;
@@ -250,11 +274,11 @@
     append(&buffer, &buflen, &used, block, blocklen);
   }
   buffer[used] = 0;
-  if (getenv("DEBUG_FIXPATH") != NULL) {
+  if (debug_fixpath) {
     fprintf(stderr, "fixpath input from @-file %s: %s\n", &in[1], buffer);
   }
   fixed = replace_cygdrive(buffer);
-  if (getenv("DEBUG_FIXPATH") != NULL) {
+  if (debug_fixpath) {
     fprintf(stderr, "fixpath converted to @-file %s is: %s\n", name, fixed);
   }
   fwrite(fixed, strlen(fixed), 1, atout);
@@ -362,28 +386,36 @@
     DWORD processFlags = 0;
     BOOL processInheritHandles = TRUE;
     BOOL waitForChild = TRUE;
+    char* fixpathPath;
 
-    if (argc<2 || argv[1][0] != '-' || (argv[1][1] != 'c' && argv[1][1] != 'm')) {
-        fprintf(stderr, "Usage: fixpath -c|m<path@path@...> [--detach] /cygdrive/c/WINDOWS/notepad.exe [/cygdrive/c/x/test.txt|@/cygdrive/c/x/atfile]\n");
+    debug_fixpath = (getenv("DEBUG_FIXPATH") != NULL);
+
+    if (argc<2 || argv[1][0] != '-' || (argv[1][1] != 'c' && argv[1][1] != 'm' && argv[1][1] != 'w')) {
+        fprintf(stderr, "Usage: fixpath -c|m|w<path@path@...> [--detach] /cygdrive/c/WINDOWS/notepad.exe [/cygdrive/c/x/test.txt|@/cygdrive/c/x/atfile]\n");
         exit(0);
     }
 
-    if (getenv("DEBUG_FIXPATH") != NULL) {
+    if (debug_fixpath) {
       char const * cmdline = GetCommandLine();
       fprintf(stderr, "fixpath input line >%s<\n", strstr(cmdline, argv[1]));
     }
 
     if (argv[1][1] == 'c' && argv[1][2] == '\0') {
-      if (getenv("DEBUG_FIXPATH") != NULL) {
+      if (debug_fixpath) {
         fprintf(stderr, "fixpath using cygwin mode\n");
       }
       replace_cygdrive = replace_cygdrive_cygwin;
     } else if (argv[1][1] == 'm') {
-      if (getenv("DEBUG_FIXPATH") != NULL) {
+      if (debug_fixpath) {
         fprintf(stderr, "fixpath using msys mode, with path list: %s\n", &argv[1][2]);
       }
       setup_msys_path_list(argv[1]);
       replace_cygdrive = replace_cygdrive_msys;
+    } else if (argv[1][1] == 'w') {
+      if (debug_fixpath) {
+        fprintf(stderr, "fixpath using wsl mode, with path list: %s\n", &argv[1][2]);
+      }
+      replace_cygdrive = replace_cygdrive_wsl;
     } else {
       fprintf(stderr, "fixpath Unknown mode: %s\n", argv[1]);
       exit(-1);
@@ -391,7 +423,7 @@
 
     if (argv[2][0] == '-') {
       if (strcmp(argv[2], "--detach") == 0) {
-        if (getenv("DEBUG_FIXPATH") != NULL) {
+        if (debug_fixpath) {
           fprintf(stderr, "fixpath in detached mode\n");
         }
         processFlags |= DETACHED_PROCESS;
@@ -417,7 +449,7 @@
         var[var_len - 1] = '\0';
         strupr(var);
 
-        if (getenv("DEBUG_FIXPATH") != NULL) {
+        if (debug_fixpath) {
           fprintf(stderr, "fixpath setting var >%s< to >%s<\n", var, val);
         }
 
@@ -480,15 +512,15 @@
     }
     *current = '\0';
 
-    if (getenv("DEBUG_FIXPATH") != NULL) {
+    if (debug_fixpath) {
       fprintf(stderr, "fixpath converted line >%s<\n", line);
     }
 
     if (cmd == argc) {
-       if (getenv("DEBUG_FIXPATH") != NULL) {
-         fprintf(stderr, "fixpath no command provided!\n");
-       }
-       exit(0);
+      if (debug_fixpath) {
+        fprintf(stderr, "fixpath no command provided!\n");
+      }
+      exit(0);
     }
 
     ZeroMemory(&si, sizeof(si));
@@ -498,6 +530,23 @@
     fflush(stderr);
     fflush(stdout);
 
+    fixpathPath = calloc(32767, sizeof(char));
+    rc = GetEnvironmentVariable("FIXPATH_PATH", fixpathPath, 32767);
+    if (rc) {
+      if (debug_fixpath) {
+        fprintf(stderr, "Setting Path to FIXPATH_PATH: %s\n", fixpathPath);
+      }
+      rc = SetEnvironmentVariable("Path", fixpathPath);
+      if (!rc) {
+        // Could not set Path for some reason.  Try to report why.
+        const int msg_len = 80 + strlen(fixpathPath);
+        char * msg = (char *)alloca(msg_len);
+        _snprintf_s(msg, msg_len, _TRUNCATE, "Could not set environment variable [Path=%s]", fixpathPath);
+        report_error(msg);
+        exit(1);
+      }
+    }
+
     rc = CreateProcess(NULL,
                        line,
                        0,
@@ -518,7 +567,7 @@
       WaitForSingleObject(pi.hProcess, INFINITE);
       GetExitCodeProcess(pi.hProcess, &exitCode);
 
-      if (getenv("DEBUG_FIXPATH") != NULL) {
+      if (debug_fixpath) {
         for (i=0; i<num_files_to_delete; ++i) {
           fprintf(stderr, "fixpath Not deleting temporary file %s\n",
                   files_to_delete[i]);
@@ -530,13 +579,13 @@
       }
 
       if (exitCode != 0) {
-        if (getenv("DEBUG_FIXPATH") != NULL) {
+        if (debug_fixpath) {
           fprintf(stderr, "fixpath exit code %d\n",
                   exitCode);
         }
       }
     } else {
-      if (getenv("DEBUG_FIXPATH") != NULL) {
+      if (debug_fixpath) {
         fprintf(stderr, "fixpath Not waiting for child process");
       }
     }
--- a/src/hotspot/cpu/aarch64/aarch64.ad	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/cpu/aarch64/aarch64.ad	Thu Jan 03 21:25:54 2019 +0100
@@ -2133,7 +2133,12 @@
 }
 
 const uint Matcher::vector_shift_count_ideal_reg(int size) {
-  return Op_VecX;
+  switch(size) {
+    case  8: return Op_VecD;
+    case 16: return Op_VecX;
+  }
+  ShouldNotReachHere();
+  return 0;
 }
 
 // AES support not yet implemented
@@ -12601,6 +12606,63 @@
 %}
 
 
+// Math.max(FF)F
+instruct maxF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
+  match(Set dst (MaxF src1 src2));
+
+  format %{ "fmaxs   $dst, $src1, $src2" %}
+  ins_encode %{
+    __ fmaxs(as_FloatRegister($dst$$reg),
+             as_FloatRegister($src1$$reg),
+             as_FloatRegister($src2$$reg));
+  %}
+
+  ins_pipe(fp_dop_reg_reg_s);
+%}
+
+// Math.min(FF)F
+instruct minF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
+  match(Set dst (MinF src1 src2));
+
+  format %{ "fmins   $dst, $src1, $src2" %}
+  ins_encode %{
+    __ fmins(as_FloatRegister($dst$$reg),
+             as_FloatRegister($src1$$reg),
+             as_FloatRegister($src2$$reg));
+  %}
+
+  ins_pipe(fp_dop_reg_reg_s);
+%}
+
+// Math.max(DD)D
+instruct maxD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
+  match(Set dst (MaxD src1 src2));
+
+  format %{ "fmaxd   $dst, $src1, $src2" %}
+  ins_encode %{
+    __ fmaxd(as_FloatRegister($dst$$reg),
+             as_FloatRegister($src1$$reg),
+             as_FloatRegister($src2$$reg));
+  %}
+
+  ins_pipe(fp_dop_reg_reg_d);
+%}
+
+// Math.min(DD)D
+instruct minD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
+  match(Set dst (MinD src1 src2));
+
+  format %{ "fmind   $dst, $src1, $src2" %}
+  ins_encode %{
+    __ fmind(as_FloatRegister($dst$$reg),
+             as_FloatRegister($src1$$reg),
+             as_FloatRegister($src2$$reg));
+  %}
+
+  ins_pipe(fp_dop_reg_reg_d);
+%}
+
+
 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
   match(Set dst (DivF src1  src2));
 
@@ -16524,32 +16586,32 @@
 %}
 
 // ------------------------------ Shift ---------------------------------------
-
-instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{
+instruct vshiftcnt8B(vecD dst, iRegIorL2I cnt) %{
+  predicate(n->as_Vector()->length_in_bytes() == 8);
   match(Set dst (LShiftCntV cnt));
-  format %{ "dup  $dst, $cnt\t# shift count (vecX)" %}
+  match(Set dst (RShiftCntV cnt));
+  format %{ "dup  $dst, $cnt\t# shift count vector (8B)" %}
+  ins_encode %{
+    __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($cnt$$reg));
+  %}
+  ins_pipe(vdup_reg_reg64);
+%}
+
+instruct vshiftcnt16B(vecX dst, iRegIorL2I cnt) %{
+  predicate(n->as_Vector()->length_in_bytes() == 16);
+  match(Set dst (LShiftCntV cnt));
+  match(Set dst (RShiftCntV cnt));
+  format %{ "dup  $dst, $cnt\t# shift count vector (16B)" %}
   ins_encode %{
     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
   %}
   ins_pipe(vdup_reg_reg128);
 %}
 
-// Right shifts on aarch64 SIMD are implemented as left shift by -ve amount
-instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{
-  match(Set dst (RShiftCntV cnt));
-  format %{ "dup  $dst, $cnt\t# shift count (vecX)\n\tneg  $dst, $dst\t T16B" %}
-  ins_encode %{
-    __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
-    __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
-  %}
-  ins_pipe(vdup_reg_reg128);
-%}
-
-instruct vsll8B(vecD dst, vecD src, vecX shift) %{
+instruct vsll8B(vecD dst, vecD src, vecD shift) %{
   predicate(n->as_Vector()->length() == 4 ||
             n->as_Vector()->length() == 8);
   match(Set dst (LShiftVB src shift));
-  match(Set dst (RShiftVB src shift));
   ins_cost(INSN_COST);
   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
   ins_encode %{
@@ -16563,7 +16625,6 @@
 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
   predicate(n->as_Vector()->length() == 16);
   match(Set dst (LShiftVB src shift));
-  match(Set dst (RShiftVB src shift));
   ins_cost(INSN_COST);
   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
   ins_encode %{
@@ -16574,29 +16635,93 @@
   ins_pipe(vshift128);
 %}
 
-instruct vsrl8B(vecD dst, vecD src, vecX shift) %{
+// Right shifts with vector shift count on aarch64 SIMD are implemented
+// as left shift by negative shift count.
+// There are two cases for vector shift count.
+//
+// Case 1: The vector shift count is from replication.
+//        |            |
+//    LoadVector  RShiftCntV
+//        |       /
+//     RShiftVI
+// Note: In inner loop, multiple neg instructions are used, which can be
+// moved to outer loop and merge into one neg instruction.
+//
+// Case 2: The vector shift count is from loading.
+// This case isn't supported by middle-end now. But it's supported by
+// panama/vectorIntrinsics(JEP 338: Vector API).
+//        |            |
+//    LoadVector  LoadVector
+//        |       /
+//     RShiftVI
+//
+
+instruct vsra8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
+  predicate(n->as_Vector()->length() == 4 ||
+            n->as_Vector()->length() == 8);
+  match(Set dst (RShiftVB src shift));
+  ins_cost(INSN_COST);
+  effect(TEMP tmp);
+  format %{ "negr  $tmp,$shift\t"
+            "sshl  $dst,$src,$tmp\t# vector (8B)" %}
+  ins_encode %{
+    __ negr(as_FloatRegister($tmp$$reg), __ T8B,
+            as_FloatRegister($shift$$reg));
+    __ sshl(as_FloatRegister($dst$$reg), __ T8B,
+            as_FloatRegister($src$$reg),
+            as_FloatRegister($tmp$$reg));
+  %}
+  ins_pipe(vshift64);
+%}
+
+instruct vsra16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
+  predicate(n->as_Vector()->length() == 16);
+  match(Set dst (RShiftVB src shift));
+  ins_cost(INSN_COST);
+  effect(TEMP tmp);
+  format %{ "negr  $tmp,$shift\t"
+            "sshl  $dst,$src,$tmp\t# vector (16B)" %}
+  ins_encode %{
+    __ negr(as_FloatRegister($tmp$$reg), __ T16B,
+            as_FloatRegister($shift$$reg));
+    __ sshl(as_FloatRegister($dst$$reg), __ T16B,
+            as_FloatRegister($src$$reg),
+            as_FloatRegister($tmp$$reg));
+  %}
+  ins_pipe(vshift128);
+%}
+
+instruct vsrl8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
   predicate(n->as_Vector()->length() == 4 ||
             n->as_Vector()->length() == 8);
   match(Set dst (URShiftVB src shift));
   ins_cost(INSN_COST);
-  format %{ "ushl  $dst,$src,$shift\t# vector (8B)" %}
-  ins_encode %{
+  effect(TEMP tmp);
+  format %{ "negr  $tmp,$shift\t"
+            "ushl  $dst,$src,$tmp\t# vector (8B)" %}
+  ins_encode %{
+    __ negr(as_FloatRegister($tmp$$reg), __ T8B,
+            as_FloatRegister($shift$$reg));
     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
             as_FloatRegister($src$$reg),
-            as_FloatRegister($shift$$reg));
+            as_FloatRegister($tmp$$reg));
   %}
   ins_pipe(vshift64);
 %}
 
-instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
+instruct vsrl16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
   predicate(n->as_Vector()->length() == 16);
   match(Set dst (URShiftVB src shift));
   ins_cost(INSN_COST);
-  format %{ "ushl  $dst,$src,$shift\t# vector (16B)" %}
-  ins_encode %{
+  effect(TEMP tmp);
+  format %{ "negr  $tmp,$shift\t"
+            "ushl  $dst,$src,$tmp\t# vector (16B)" %}
+  ins_encode %{
+    __ negr(as_FloatRegister($tmp$$reg), __ T16B,
+            as_FloatRegister($shift$$reg));
     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
             as_FloatRegister($src$$reg),
-            as_FloatRegister($shift$$reg));
+            as_FloatRegister($tmp$$reg));
   %}
   ins_pipe(vshift128);
 %}
@@ -16708,11 +16833,10 @@
   ins_pipe(vshift128_imm);
 %}
 
-instruct vsll4S(vecD dst, vecD src, vecX shift) %{
+instruct vsll4S(vecD dst, vecD src, vecD shift) %{
   predicate(n->as_Vector()->length() == 2 ||
             n->as_Vector()->length() == 4);
   match(Set dst (LShiftVS src shift));
-  match(Set dst (RShiftVS src shift));
   ins_cost(INSN_COST);
   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
   ins_encode %{
@@ -16726,7 +16850,6 @@
 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
   predicate(n->as_Vector()->length() == 8);
   match(Set dst (LShiftVS src shift));
-  match(Set dst (RShiftVS src shift));
   ins_cost(INSN_COST);
   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
   ins_encode %{
@@ -16737,29 +16860,72 @@
   ins_pipe(vshift128);
 %}
 
-instruct vsrl4S(vecD dst, vecD src, vecX shift) %{
+instruct vsra4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
+  predicate(n->as_Vector()->length() == 2 ||
+            n->as_Vector()->length() == 4);
+  match(Set dst (RShiftVS src shift));
+  ins_cost(INSN_COST);
+  effect(TEMP tmp);
+  format %{ "negr  $tmp,$shift\t"
+            "sshl  $dst,$src,$tmp\t# vector (4H)" %}
+  ins_encode %{
+    __ negr(as_FloatRegister($tmp$$reg), __ T8B,
+            as_FloatRegister($shift$$reg));
+    __ sshl(as_FloatRegister($dst$$reg), __ T4H,
+            as_FloatRegister($src$$reg),
+            as_FloatRegister($tmp$$reg));
+  %}
+  ins_pipe(vshift64);
+%}
+
+instruct vsra8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
+  predicate(n->as_Vector()->length() == 8);
+  match(Set dst (RShiftVS src shift));
+  ins_cost(INSN_COST);
+  effect(TEMP tmp);
+  format %{ "negr  $tmp,$shift\t"
+            "sshl  $dst,$src,$tmp\t# vector (8H)" %}
+  ins_encode %{
+    __ negr(as_FloatRegister($tmp$$reg), __ T16B,
+            as_FloatRegister($shift$$reg));
+    __ sshl(as_FloatRegister($dst$$reg), __ T8H,
+            as_FloatRegister($src$$reg),
+            as_FloatRegister($tmp$$reg));
+  %}
+  ins_pipe(vshift128);
+%}
+
+instruct vsrl4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
   predicate(n->as_Vector()->length() == 2 ||
             n->as_Vector()->length() == 4);
   match(Set dst (URShiftVS src shift));
   ins_cost(INSN_COST);
-  format %{ "ushl  $dst,$src,$shift\t# vector (4H)" %}
-  ins_encode %{
+  effect(TEMP tmp);
+  format %{ "negr  $tmp,$shift\t"
+            "ushl  $dst,$src,$tmp\t# vector (4H)" %}
+  ins_encode %{
+    __ negr(as_FloatRegister($tmp$$reg), __ T8B,
+            as_FloatRegister($shift$$reg));
     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
             as_FloatRegister($src$$reg),
-            as_FloatRegister($shift$$reg));
+            as_FloatRegister($tmp$$reg));
   %}
   ins_pipe(vshift64);
 %}
 
-instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
+instruct vsrl8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
   predicate(n->as_Vector()->length() == 8);
   match(Set dst (URShiftVS src shift));
   ins_cost(INSN_COST);
-  format %{ "ushl  $dst,$src,$shift\t# vector (8H)" %}
-  ins_encode %{
+  effect(TEMP tmp);
+  format %{ "negr  $tmp,$shift\t"
+            "ushl  $dst,$src,$tmp\t# vector (8H)" %}
+  ins_encode %{
+    __ negr(as_FloatRegister($tmp$$reg), __ T16B,
+            as_FloatRegister($shift$$reg));
     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
             as_FloatRegister($src$$reg),
-            as_FloatRegister($shift$$reg));
+            as_FloatRegister($tmp$$reg));
   %}
   ins_pipe(vshift128);
 %}
@@ -16871,10 +17037,9 @@
   ins_pipe(vshift128_imm);
 %}
 
-instruct vsll2I(vecD dst, vecD src, vecX shift) %{
+instruct vsll2I(vecD dst, vecD src, vecD shift) %{
   predicate(n->as_Vector()->length() == 2);
   match(Set dst (LShiftVI src shift));
-  match(Set dst (RShiftVI src shift));
   ins_cost(INSN_COST);
   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
   ins_encode %{
@@ -16888,7 +17053,6 @@
 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
   predicate(n->as_Vector()->length() == 4);
   match(Set dst (LShiftVI src shift));
-  match(Set dst (RShiftVI src shift));
   ins_cost(INSN_COST);
   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
   ins_encode %{
@@ -16899,28 +17063,70 @@
   ins_pipe(vshift128);
 %}
 
-instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
+instruct vsra2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
+  predicate(n->as_Vector()->length() == 2);
+  match(Set dst (RShiftVI src shift));
+  ins_cost(INSN_COST);
+  effect(TEMP tmp);
+  format %{ "negr  $tmp,$shift\t"
+            "sshl  $dst,$src,$tmp\t# vector (2S)" %}
+  ins_encode %{
+    __ negr(as_FloatRegister($tmp$$reg), __ T8B,
+            as_FloatRegister($shift$$reg));
+    __ sshl(as_FloatRegister($dst$$reg), __ T2S,
+            as_FloatRegister($src$$reg),
+            as_FloatRegister($tmp$$reg));
+  %}
+  ins_pipe(vshift64);
+%}
+
+instruct vsra4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
+  predicate(n->as_Vector()->length() == 4);
+  match(Set dst (RShiftVI src shift));
+  ins_cost(INSN_COST);
+  effect(TEMP tmp);
+  format %{ "negr  $tmp,$shift\t"
+            "sshl  $dst,$src,$tmp\t# vector (4S)" %}
+  ins_encode %{
+    __ negr(as_FloatRegister($tmp$$reg), __ T16B,
+            as_FloatRegister($shift$$reg));
+    __ sshl(as_FloatRegister($dst$$reg), __ T4S,
+            as_FloatRegister($src$$reg),
+            as_FloatRegister($tmp$$reg));
+  %}
+  ins_pipe(vshift128);
+%}
+
+instruct vsrl2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
   predicate(n->as_Vector()->length() == 2);
   match(Set dst (URShiftVI src shift));
   ins_cost(INSN_COST);
-  format %{ "ushl  $dst,$src,$shift\t# vector (2S)" %}
-  ins_encode %{
+  effect(TEMP tmp);
+  format %{ "negr  $tmp,$shift\t"
+            "ushl  $dst,$src,$tmp\t# vector (2S)" %}
+  ins_encode %{
+    __ negr(as_FloatRegister($tmp$$reg), __ T8B,
+            as_FloatRegister($shift$$reg));
     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
             as_FloatRegister($src$$reg),
-            as_FloatRegister($shift$$reg));
+            as_FloatRegister($tmp$$reg));
   %}
   ins_pipe(vshift64);
 %}
 
-instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
+instruct vsrl4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
   predicate(n->as_Vector()->length() == 4);
   match(Set dst (URShiftVI src shift));
   ins_cost(INSN_COST);
-  format %{ "ushl  $dst,$src,$shift\t# vector (4S)" %}
-  ins_encode %{
+  effect(TEMP tmp);
+  format %{ "negr  $tmp,$shift\t"
+            "ushl  $dst,$src,$tmp\t# vector (4S)" %}
+  ins_encode %{
+    __ negr(as_FloatRegister($tmp$$reg), __ T16B,
+            as_FloatRegister($shift$$reg));
     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
             as_FloatRegister($src$$reg),
-            as_FloatRegister($shift$$reg));
+            as_FloatRegister($tmp$$reg));
   %}
   ins_pipe(vshift128);
 %}
@@ -17006,7 +17212,6 @@
 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
   predicate(n->as_Vector()->length() == 2);
   match(Set dst (LShiftVL src shift));
-  match(Set dst (RShiftVL src shift));
   ins_cost(INSN_COST);
   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
   ins_encode %{
@@ -17017,15 +17222,36 @@
   ins_pipe(vshift128);
 %}
 
-instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
+instruct vsra2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
+  predicate(n->as_Vector()->length() == 2);
+  match(Set dst (RShiftVL src shift));
+  ins_cost(INSN_COST);
+  effect(TEMP tmp);
+  format %{ "negr  $tmp,$shift\t"
+            "sshl  $dst,$src,$tmp\t# vector (2D)" %}
+  ins_encode %{
+    __ negr(as_FloatRegister($tmp$$reg), __ T16B,
+            as_FloatRegister($shift$$reg));
+    __ sshl(as_FloatRegister($dst$$reg), __ T2D,
+            as_FloatRegister($src$$reg),
+            as_FloatRegister($tmp$$reg));
+  %}
+  ins_pipe(vshift128);
+%}
+
+instruct vsrl2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
   predicate(n->as_Vector()->length() == 2);
   match(Set dst (URShiftVL src shift));
   ins_cost(INSN_COST);
-  format %{ "ushl  $dst,$src,$shift\t# vector (2D)" %}
-  ins_encode %{
+  effect(TEMP tmp);
+  format %{ "negr  $tmp,$shift\t"
+            "ushl  $dst,$src,$tmp\t# vector (2D)" %}
+  ins_encode %{
+    __ negr(as_FloatRegister($tmp$$reg), __ T16B,
+            as_FloatRegister($shift$$reg));
     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
             as_FloatRegister($src$$reg),
-            as_FloatRegister($shift$$reg));
+            as_FloatRegister($tmp$$reg));
   %}
   ins_pipe(vshift128);
 %}
--- a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -1826,12 +1826,16 @@
   INSN(fdivs, 0b000, 0b00, 0b0001);
   INSN(fadds, 0b000, 0b00, 0b0010);
   INSN(fsubs, 0b000, 0b00, 0b0011);
+  INSN(fmaxs, 0b000, 0b00, 0b0100);
+  INSN(fmins, 0b000, 0b00, 0b0101);
   INSN(fnmuls, 0b000, 0b00, 0b1000);
 
   INSN(fmuld, 0b000, 0b01, 0b0000);
   INSN(fdivd, 0b000, 0b01, 0b0001);
   INSN(faddd, 0b000, 0b01, 0b0010);
   INSN(fsubd, 0b000, 0b01, 0b0011);
+  INSN(fmaxd, 0b000, 0b01, 0b0100);
+  INSN(fmind, 0b000, 0b01, 0b0101);
   INSN(fnmuld, 0b000, 0b01, 0b1000);
 
 #undef INSN
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -4896,7 +4896,7 @@
 
   // A very short string
   cmpw(cnt2, minCharsInWord);
-  br(Assembler::LT, SHORT_STRING);
+  br(Assembler::LE, SHORT_STRING);
 
   // Compare longwords
   // load first parts of strings and finish initialization while loading
@@ -4920,8 +4920,7 @@
       ldr(tmp2, Address(str2));
       cmp(cnt2, STUB_THRESHOLD);
       br(GE, STUB);
-      subsw(cnt2, cnt2, 4);
-      br(EQ, TAIL_CHECK);
+      subw(cnt2, cnt2, 4);
       eor(vtmpZ, T16B, vtmpZ, vtmpZ);
       lea(str1, Address(str1, cnt2, Address::uxtw(str1_chr_shift)));
       lea(str2, Address(str2, cnt2, Address::uxtw(str2_chr_shift)));
@@ -4937,8 +4936,7 @@
       ldrs(vtmp, Address(str2));
       cmp(cnt2, STUB_THRESHOLD);
       br(GE, STUB);
-      subsw(cnt2, cnt2, 4);
-      br(EQ, TAIL_CHECK);
+      subw(cnt2, cnt2, 4);
       lea(str1, Address(str1, cnt2, Address::uxtw(str1_chr_shift)));
       eor(vtmpZ, T16B, vtmpZ, vtmpZ);
       lea(str2, Address(str2, cnt2, Address::uxtw(str2_chr_shift)));
@@ -5650,12 +5648,12 @@
           orr(v5, T16B, Vtmp3, Vtmp4);
           uzp1(Vtmp1, T16B, Vtmp1, Vtmp2);
           uzp1(Vtmp3, T16B, Vtmp3, Vtmp4);
-          stpq(Vtmp1, Vtmp3, dst);
           uzp2(v5, T16B, v4, v5); // high bytes
           umov(tmp2, v5, D, 1);
           fmovd(tmp1, v5);
           orr(tmp1, tmp1, tmp2);
           cbnz(tmp1, LOOP_8);
+          stpq(Vtmp1, Vtmp3, dst);
           sub(len, len, 32);
           add(dst, dst, 32);
           add(src, src, 64);
@@ -5673,7 +5671,6 @@
       prfm(Address(src, SoftwarePrefetchHintDistance));
       uzp1(v4, T16B, Vtmp1, Vtmp2);
       uzp1(v5, T16B, Vtmp3, Vtmp4);
-      stpq(v4, v5, dst);
       orr(Vtmp1, T16B, Vtmp1, Vtmp2);
       orr(Vtmp3, T16B, Vtmp3, Vtmp4);
       uzp2(Vtmp1, T16B, Vtmp1, Vtmp3); // high bytes
@@ -5681,6 +5678,7 @@
       fmovd(tmp1, Vtmp1);
       orr(tmp1, tmp1, tmp2);
       cbnz(tmp1, LOOP_8);
+      stpq(v4, v5, dst);
       sub(len, len, 32);
       add(dst, dst, 32);
       add(src, src, 64);
@@ -5695,9 +5693,9 @@
       ld1(Vtmp1, T8H, src);
       uzp1(Vtmp2, T16B, Vtmp1, Vtmp1); // low bytes
       uzp2(Vtmp3, T16B, Vtmp1, Vtmp1); // high bytes
-      strd(Vtmp2, dst);
       fmovd(tmp1, Vtmp3);
       cbnz(tmp1, NEXT_1);
+      strd(Vtmp2, dst);
 
       sub(len, len, 8);
       add(dst, dst, 8);
@@ -5710,9 +5708,9 @@
     cbz(len, DONE);
     BIND(NEXT_1);
       ldrh(tmp1, Address(post(src, 2)));
-      strb(tmp1, Address(post(dst, 1)));
       tst(tmp1, 0xff00);
       br(NE, SET_RESULT);
+      strb(tmp1, Address(post(dst, 1)));
       subs(len, len, 1);
       br(GT, NEXT_1);
 
--- a/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -105,8 +105,8 @@
     // compiled code in threads for which the event is enabled.  Check here for
     // interp_only_mode if these events CAN be enabled.
 
-    __ ldrb(rscratch1, Address(rthread, JavaThread::interp_only_mode_offset()));
-    __ cbnz(rscratch1, run_compiled_code);
+    __ ldrw(rscratch1, Address(rthread, JavaThread::interp_only_mode_offset()));
+    __ cbzw(rscratch1, run_compiled_code);
     __ ldr(rscratch1, Address(method, Method::interpreter_entry_offset()));
     __ br(rscratch1);
     __ BIND(run_compiled_code);
--- a/src/hotspot/cpu/arm/arm.ad	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/cpu/arm/arm.ad	Thu Jan 03 21:25:54 2019 +0100
@@ -8945,9 +8945,10 @@
 instruct cmpFastLock(flagsRegP pcc, iRegP object, iRegP box, iRegP scratch2, iRegP scratch )
 %{
   match(Set pcc (FastLock object box));
+  predicate(!(UseBiasedLocking && !UseOptoBiasInlining));
 
   effect(TEMP scratch, TEMP scratch2);
-  ins_cost(100);
+  ins_cost(DEFAULT_COST*3);
 
   format %{ "FASTLOCK  $object, $box; KILL $scratch, $scratch2" %}
   ins_encode %{
@@ -8956,6 +8957,21 @@
   ins_pipe(long_memory_op);
 %}
 
+instruct cmpFastLock_noBiasInline(flagsRegP pcc, iRegP object, iRegP box, iRegP scratch2,
+                                  iRegP scratch, iRegP scratch3) %{
+  match(Set pcc (FastLock object box));
+  predicate(UseBiasedLocking && !UseOptoBiasInlining);
+
+  effect(TEMP scratch, TEMP scratch2, TEMP scratch3);
+  ins_cost(DEFAULT_COST*5);
+
+  format %{ "FASTLOCK  $object, $box; KILL $scratch, $scratch2, $scratch3" %}
+  ins_encode %{
+    __ fast_lock($object$$Register, $box$$Register, $scratch$$Register, $scratch2$$Register, $scratch3$$Register);
+  %}
+  ins_pipe(long_memory_op);
+%}
+
 
 instruct cmpFastUnlock(flagsRegP pcc, iRegP object, iRegP box, iRegP scratch2, iRegP scratch ) %{
   match(Set pcc (FastUnlock object box));
--- a/src/hotspot/cpu/arm/macroAssembler_arm.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/cpu/arm/macroAssembler_arm.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -1971,7 +1971,7 @@
 
 
 #ifdef COMPILER2
-void MacroAssembler::fast_lock(Register Roop, Register Rbox, Register Rscratch, Register Rscratch2)
+void MacroAssembler::fast_lock(Register Roop, Register Rbox, Register Rscratch, Register Rscratch2, Register scratch3)
 {
   assert(VM_Version::supports_ldrex(), "unsupported, yet?");
 
@@ -1985,11 +1985,13 @@
   Label fast_lock, done;
 
   if (UseBiasedLocking && !UseOptoBiasInlining) {
-    Label failed;
-    biased_locking_enter(Roop, Rmark, Rscratch, false, noreg, done, failed);
-    bind(failed);
+    assert(scratch3 != noreg, "need extra temporary for -XX:-UseOptoBiasInlining");
+    biased_locking_enter(Roop, Rmark, Rscratch, false, scratch3, done, done);
+    // Fall through if lock not biased otherwise branch to done
   }
 
+  // Invariant: Rmark loaded below does not contain biased lock pattern
+
   ldr(Rmark, Address(Roop, oopDesc::mark_offset_in_bytes()));
   tst(Rmark, markOopDesc::unlocked_value);
   b(fast_lock, ne);
@@ -2016,6 +2018,9 @@
 
   bind(done);
 
+  // At this point flags are set as follows:
+  //  EQ -> Success
+  //  NE -> Failure, branch to slow path
 }
 
 void MacroAssembler::fast_unlock(Register Roop, Register Rbox, Register Rscratch, Register Rscratch2)
--- a/src/hotspot/cpu/arm/macroAssembler_arm.hpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/cpu/arm/macroAssembler_arm.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -371,10 +371,10 @@
   // lock_reg and obj_reg must be loaded up with the appropriate values.
   // swap_reg must be supplied.
   // tmp_reg must be supplied.
-  // Optional slow case is for implementations (interpreter and C1) which branch to
-  // slow case directly. If slow_case is NULL, then leaves condition
-  // codes set (for C2's Fast_Lock node) and jumps to done label.
-  // Falls through for the fast locking attempt.
+  // Done label is branched to with condition code EQ set if the lock is
+  // biased and we acquired it. Slow case label is branched to with
+  // condition code NE set if the lock is biased but we failed to acquire
+  // it. Otherwise fall through.
   // Returns offset of first potentially-faulting instruction for null
   // check info (currently consumed only by C1). If
   // swap_reg_contains_mark is true then returns -1 as it is assumed
@@ -1073,7 +1073,7 @@
   void restore_default_fp_mode();
 
 #ifdef COMPILER2
-  void fast_lock(Register obj, Register box, Register scratch, Register scratch2);
+  void fast_lock(Register obj, Register box, Register scratch, Register scratch2, Register scratch3 = noreg);
   void fast_unlock(Register obj, Register box, Register scratch, Register scratch2);
 #endif
 
--- a/src/hotspot/cpu/x86/templateTable_x86.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/cpu/x86/templateTable_x86.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -582,8 +582,9 @@
       __ cmpl(flags, ltos);
       __ jcc(Assembler::notEqual, notLong);
       // ltos
+      // Loading high word first because movptr clobbers rax
+      NOT_LP64(__ movptr(rdx, field.plus_disp(4)));
       __ movptr(rax, field);
-      NOT_LP64(__ movptr(rdx, field.plus_disp(4)));
       __ push(ltos);
       __ jmp(Done);
 
--- a/src/hotspot/cpu/x86/x86_32.ad	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/cpu/x86/x86_32.ad	Thu Jan 03 21:25:54 2019 +0100
@@ -7760,9 +7760,9 @@
   match(Set dst (MulAddS2I (Binary dst src1) (Binary src2 src3)));
   effect(KILL cr, KILL src2);
 
-  expand %{ mulI_rReg(dst, src1, cr);
-           mulI_rReg(src2, src3, cr);
-           addI_rReg(dst, src2, cr); %}
+  expand %{ mulI_eReg(dst, src1, cr);
+           mulI_eReg(src2, src3, cr);
+           addI_eReg(dst, src2, cr); %}
 %}
 
 // Multiply Register Int to Long
--- a/src/hotspot/cpu/x86/x86_64.ad	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/cpu/x86/x86_64.ad	Thu Jan 03 21:25:54 2019 +0100
@@ -4265,132 +4265,196 @@
 
 // Operands for bound floating pointer register arguments
 operand rxmm0() %{
-  constraint(ALLOC_IN_RC(xmm0_reg));  match(VecX);
-  predicate((UseSSE > 0) && (UseAVX<= 2));  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm0_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm1() %{
-  constraint(ALLOC_IN_RC(xmm1_reg));  match(VecX);
-  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm1_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm2() %{
-  constraint(ALLOC_IN_RC(xmm2_reg));  match(VecX);
-  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm2_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm3() %{
-  constraint(ALLOC_IN_RC(xmm3_reg));  match(VecX);
-  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm3_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm4() %{
-  constraint(ALLOC_IN_RC(xmm4_reg));  match(VecX);
-  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm4_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm5() %{
-  constraint(ALLOC_IN_RC(xmm5_reg));  match(VecX);
-  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm5_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm6() %{
-  constraint(ALLOC_IN_RC(xmm6_reg));  match(VecX);
-  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm6_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm7() %{
-  constraint(ALLOC_IN_RC(xmm7_reg));  match(VecX);
-  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm7_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm8() %{
-  constraint(ALLOC_IN_RC(xmm8_reg));  match(VecX);
-  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm8_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm9() %{
-  constraint(ALLOC_IN_RC(xmm9_reg));  match(VecX);
-  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm9_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm10() %{
-  constraint(ALLOC_IN_RC(xmm10_reg));  match(VecX);
-  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm10_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm11() %{
-  constraint(ALLOC_IN_RC(xmm11_reg));  match(VecX);
-  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm11_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm12() %{
-  constraint(ALLOC_IN_RC(xmm12_reg));  match(VecX);
-  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm12_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm13() %{
-  constraint(ALLOC_IN_RC(xmm13_reg));  match(VecX);
-  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm13_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm14() %{
-  constraint(ALLOC_IN_RC(xmm14_reg));  match(VecX);
-  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm14_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm15() %{
-  constraint(ALLOC_IN_RC(xmm15_reg));  match(VecX);
-  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm15_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm16() %{
-  constraint(ALLOC_IN_RC(xmm16_reg));  match(VecX);
-  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm16_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm17() %{
-  constraint(ALLOC_IN_RC(xmm17_reg));  match(VecX);
-  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm17_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm18() %{
-  constraint(ALLOC_IN_RC(xmm18_reg));  match(VecX);
-  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm18_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm19() %{
-  constraint(ALLOC_IN_RC(xmm19_reg));  match(VecX);
-  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm19_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm20() %{
-  constraint(ALLOC_IN_RC(xmm20_reg));  match(VecX);
-  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm20_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm21() %{
-  constraint(ALLOC_IN_RC(xmm21_reg));  match(VecX);
-  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm21_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm22() %{
-  constraint(ALLOC_IN_RC(xmm22_reg));  match(VecX);
-  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm22_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm23() %{
-  constraint(ALLOC_IN_RC(xmm23_reg));  match(VecX);
-  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm23_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm24() %{
-  constraint(ALLOC_IN_RC(xmm24_reg));  match(VecX);
-  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm24_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm25() %{
-  constraint(ALLOC_IN_RC(xmm25_reg));  match(VecX);
-  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm25_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm26() %{
-  constraint(ALLOC_IN_RC(xmm26_reg));  match(VecX);
-  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm26_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm27() %{
-  constraint(ALLOC_IN_RC(xmm27_reg));  match(VecX);
-  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm27_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm28() %{
-  constraint(ALLOC_IN_RC(xmm28_reg));  match(VecX);
-  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm28_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm29() %{
-  constraint(ALLOC_IN_RC(xmm29_reg));  match(VecX);
-  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm29_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm30() %{
-  constraint(ALLOC_IN_RC(xmm30_reg));  match(VecX);
-  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm30_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm31() %{
-  constraint(ALLOC_IN_RC(xmm31_reg));  match(VecX);
-  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm31_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 
 //----------OPERAND CLASSES----------------------------------------------------
@@ -12651,33 +12715,6 @@
 // Execute ZGC load barrier (strong) slow path
 //
 
-// When running without XMM regs
-instruct loadBarrierSlowRegNoVec(rRegP dst, memory mem, rFlagsReg cr) %{
-
-  match(Set dst (LoadBarrierSlowReg mem));
-  predicate(MaxVectorSize < 16);
-
-  effect(DEF dst, KILL cr);
-
-  format %{"LoadBarrierSlowRegNoVec $dst, $mem" %}
-  ins_encode %{
-#if INCLUDE_ZGC
-    Register d = $dst$$Register;
-    ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
-
-    assert(d != r12, "Can't be R12!");
-    assert(d != r15, "Can't be R15!");
-    assert(d != rsp, "Can't be RSP!");
-
-    __ lea(d, $mem$$Address);
-    __ call(RuntimeAddress(bs->load_barrier_slow_stub(d)));
-#else
-    ShouldNotReachHere();
-#endif
-  %}
-  ins_pipe(pipe_slow);
-%}
-
 // For XMM and YMM enabled processors
 instruct loadBarrierSlowRegXmmAndYmm(rRegP dst, memory mem, rFlagsReg cr,
                                      rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
@@ -12686,7 +12723,7 @@
                                      rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
 
   match(Set dst (LoadBarrierSlowReg mem));
-  predicate((UseSSE > 0) && (UseAVX <= 2) && (MaxVectorSize >= 16));
+  predicate(UseAVX <= 2);
 
   effect(DEF dst, KILL cr,
          KILL x0, KILL x1, KILL x2, KILL x3,
@@ -12694,7 +12731,7 @@
          KILL x8, KILL x9, KILL x10, KILL x11,
          KILL x12, KILL x13, KILL x14, KILL x15);
 
-  format %{"LoadBarrierSlowRegXmm $dst, $mem" %}
+  format %{"LoadBarrierSlowRegXmmAndYmm $dst, $mem" %}
   ins_encode %{
 #if INCLUDE_ZGC
     Register d = $dst$$Register;
@@ -12725,7 +12762,7 @@
                                rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
 
   match(Set dst (LoadBarrierSlowReg mem));
-  predicate((UseAVX == 3) && (MaxVectorSize >= 16));
+  predicate(UseAVX == 3);
 
   effect(DEF dst, KILL cr,
          KILL x0, KILL x1, KILL x2, KILL x3,
@@ -12760,33 +12797,6 @@
 // Execute ZGC load barrier (weak) slow path
 //
 
-// When running without XMM regs
-instruct loadBarrierWeakSlowRegNoVec(rRegP dst, memory mem, rFlagsReg cr) %{
-
-  match(Set dst (LoadBarrierSlowReg mem));
-  predicate(MaxVectorSize < 16);
-
-  effect(DEF dst, KILL cr);
-
-  format %{"LoadBarrierSlowRegNoVec $dst, $mem" %}
-  ins_encode %{
-#if INCLUDE_ZGC
-    Register d = $dst$$Register;
-    ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
-
-    assert(d != r12, "Can't be R12!");
-    assert(d != r15, "Can't be R15!");
-    assert(d != rsp, "Can't be RSP!");
-
-    __ lea(d, $mem$$Address);
-    __ call(RuntimeAddress(bs->load_barrier_weak_slow_stub(d)));
-#else
-    ShouldNotReachHere();
-#endif
-  %}
-  ins_pipe(pipe_slow);
-%}
-
 // For XMM and YMM enabled processors
 instruct loadBarrierWeakSlowRegXmmAndYmm(rRegP dst, memory mem, rFlagsReg cr,
                                          rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
@@ -12795,7 +12805,7 @@
                                          rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
 
   match(Set dst (LoadBarrierWeakSlowReg mem));
-  predicate((UseSSE > 0) && (UseAVX <= 2) && (MaxVectorSize >= 16));
+  predicate(UseAVX <= 2);
 
   effect(DEF dst, KILL cr,
          KILL x0, KILL x1, KILL x2, KILL x3,
@@ -12803,7 +12813,7 @@
          KILL x8, KILL x9, KILL x10, KILL x11,
          KILL x12, KILL x13, KILL x14, KILL x15);
 
-  format %{"LoadBarrierWeakSlowRegXmm $dst, $mem" %}
+  format %{"LoadBarrierWeakSlowRegXmmAndYmm $dst, $mem" %}
   ins_encode %{
 #if INCLUDE_ZGC
     Register d = $dst$$Register;
@@ -12834,7 +12844,7 @@
                                    rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
 
   match(Set dst (LoadBarrierWeakSlowReg mem));
-  predicate((UseAVX == 3) && (MaxVectorSize >= 16));
+  predicate(UseAVX == 3);
 
   effect(DEF dst, KILL cr,
          KILL x0, KILL x1, KILL x2, KILL x3,
--- a/src/hotspot/os/aix/os_aix.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/os/aix/os_aix.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -2602,23 +2602,6 @@
   return ::pread(fd, buf, nBytes, offset);
 }
 
-void os::naked_short_sleep(jlong ms) {
-  struct timespec req;
-
-  assert(ms < 1000, "Un-interruptable sleep, short time use only");
-  req.tv_sec = 0;
-  if (ms > 0) {
-    req.tv_nsec = (ms % 1000) * 1000000;
-  }
-  else {
-    req.tv_nsec = 1;
-  }
-
-  nanosleep(&req, NULL);
-
-  return;
-}
-
 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
 void os::infinite_sleep() {
   while (true) {    // sleep forever ...
--- a/src/hotspot/os/bsd/os_bsd.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/os/bsd/os_bsd.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -2225,22 +2225,6 @@
   RESTARTABLE_RETURN_INT(::pread(fd, buf, nBytes, offset));
 }
 
-void os::naked_short_sleep(jlong ms) {
-  struct timespec req;
-
-  assert(ms < 1000, "Un-interruptable sleep, short time use only");
-  req.tv_sec = 0;
-  if (ms > 0) {
-    req.tv_nsec = (ms % 1000) * 1000000;
-  } else {
-    req.tv_nsec = 1;
-  }
-
-  nanosleep(&req, NULL);
-
-  return;
-}
-
 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
 void os::infinite_sleep() {
   while (true) {    // sleep forever ...
@@ -2346,14 +2330,13 @@
 #elif defined(__APPLE__) || defined(__NetBSD__)
   struct sched_param sp;
   int policy;
-  pthread_t self = pthread_self();
-
-  if (pthread_getschedparam(self, &policy, &sp) != 0) {
+
+  if (pthread_getschedparam(thread->osthread()->pthread_id(), &policy, &sp) != 0) {
     return OS_ERR;
   }
 
   sp.sched_priority = newpri;
-  if (pthread_setschedparam(self, policy, &sp) != 0) {
+  if (pthread_setschedparam(thread->osthread()->pthread_id(), policy, &sp) != 0) {
     return OS_ERR;
   }
 
@@ -2377,8 +2360,14 @@
   int policy;
   struct sched_param sp;
 
-  pthread_getschedparam(pthread_self(), &policy, &sp);
-  *priority_ptr = sp.sched_priority;
+  int res = pthread_getschedparam(thread->osthread()->pthread_id(), &policy, &sp);
+  if (res != 0) {
+    *priority_ptr = -1;
+    return OS_ERR;
+  } else {
+    *priority_ptr = sp.sched_priority;
+    return OS_OK;
+  }
 #else
   *priority_ptr = getpriority(PRIO_PROCESS, thread->osthread()->thread_id());
 #endif
--- a/src/hotspot/os/linux/os_linux.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/os/linux/os_linux.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -705,6 +705,8 @@
     }
   }
 
+  assert(osthread->pthread_id() != 0, "pthread_id was not set as expected");
+
   // call one more level start routine
   thread->call_run();
 
@@ -4033,33 +4035,6 @@
   return ::pread(fd, buf, nBytes, offset);
 }
 
-// Short sleep, direct OS call.
-//
-// Note: certain versions of Linux CFS scheduler (since 2.6.23) do not guarantee
-// sched_yield(2) will actually give up the CPU:
-//
-//   * Alone on this pariticular CPU, keeps running.
-//   * Before the introduction of "skip_buddy" with "compat_yield" disabled
-//     (pre 2.6.39).
-//
-// So calling this with 0 is an alternative.
-//
-void os::naked_short_sleep(jlong ms) {
-  struct timespec req;
-
-  assert(ms < 1000, "Un-interruptable sleep, short time use only");
-  req.tv_sec = 0;
-  if (ms > 0) {
-    req.tv_nsec = (ms % 1000) * 1000000;
-  } else {
-    req.tv_nsec = 1;
-  }
-
-  nanosleep(&req, NULL);
-
-  return;
-}
-
 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
 void os::infinite_sleep() {
   while (true) {    // sleep forever ...
@@ -4072,6 +4047,16 @@
   return DontYieldALot;
 }
 
+// Linux CFS scheduler (since 2.6.23) does not guarantee sched_yield(2) will
+// actually give up the CPU. Since skip buddy (v2.6.28):
+//
+// * Sets the yielding task as skip buddy for current CPU's run queue.
+// * Picks next from run queue, if empty, picks a skip buddy (can be the yielding task).
+// * Clears skip buddies for this run queue (yielding task no longer a skip buddy).
+//
+// An alternative is calling os::naked_short_nanosleep with a small number to avoid
+// getting re-scheduled immediately.
+//
 void os::naked_yield() {
   sched_yield();
 }
@@ -5073,7 +5058,7 @@
   // initialize thread priority policy
   prio_init();
 
-  if (!FLAG_IS_DEFAULT(AllocateHeapAt)) {
+  if (!FLAG_IS_DEFAULT(AllocateHeapAt) || !FLAG_IS_DEFAULT(AllocateOldGenAt)) {
     set_coredump_filter(DAX_SHARED_BIT);
   }
 
--- a/src/hotspot/os/posix/os_posix.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/os/posix/os_posix.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -695,6 +695,21 @@
   }
 }
 
+void os::naked_short_nanosleep(jlong ns) {
+  struct timespec req;
+  assert(ns > -1 && ns < NANOUNITS, "Un-interruptable sleep, short time use only");
+  req.tv_sec = 0;
+  req.tv_nsec = ns;
+  ::nanosleep(&req, NULL);
+  return;
+}
+
+void os::naked_short_sleep(jlong ms) {
+  assert(ms < MILLIUNITS, "Un-interruptable sleep, short time use only");
+  os::naked_short_nanosleep(ms * (NANOUNITS / MILLIUNITS));
+  return;
+}
+
 ////////////////////////////////////////////////////////////////////////////////
 // interrupt support
 
--- a/src/hotspot/os/solaris/os_solaris.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/os/solaris/os_solaris.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -2871,16 +2871,6 @@
   return res;
 }
 
-void os::naked_short_sleep(jlong ms) {
-  assert(ms < 1000, "Un-interruptable sleep, short time use only");
-
-  // usleep is deprecated and removed from POSIX, in favour of nanosleep, but
-  // Solaris requires -lrt for this.
-  usleep((ms * 1000));
-
-  return;
-}
-
 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
 void os::infinite_sleep() {
   while (true) {    // sleep forever ...
--- a/src/hotspot/os/windows/os_windows.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/os/windows/os_windows.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -3512,6 +3512,43 @@
   Sleep(ms);
 }
 
+void os::naked_short_nanosleep(jlong ns) {
+  assert(ns > -1 && ns < NANOUNITS, "Un-interruptable sleep, short time use only");
+  LARGE_INTEGER hundreds_nanos = { 0 };
+  HANDLE wait_timer = ::CreateWaitableTimer(NULL /* attributes*/,
+                                            true /* manual reset */,
+                                            NULL /* name */ );
+  if (wait_timer == NULL) {
+    log_warning(os)("Failed to CreateWaitableTimer: %u", GetLastError());
+    return;
+  }
+
+  // We need a minimum of one hundred nanos.
+  ns = ns > 100 ? ns : 100;
+
+  // Round ns to the nearst hundred of nanos.
+  // Negative values indicate relative time.
+  hundreds_nanos.QuadPart = -((ns + 50) / 100);
+
+  if (::SetWaitableTimer(wait_timer /* handle */,
+                         &hundreds_nanos /* due time */,
+                         0 /* period */,
+                         NULL /* comp func */,
+                         NULL /* comp func args */,
+                         FALSE /* resume */)) {
+    DWORD res = ::WaitForSingleObject(wait_timer /* handle */, INFINITE /* timeout */);
+    if (res != WAIT_OBJECT_0) {
+      if (res == WAIT_FAILED) {
+        log_warning(os)("Failed to WaitForSingleObject: %u", GetLastError());
+      } else {
+        log_warning(os)("Unexpected return from WaitForSingleObject: %s",
+                        res == WAIT_ABANDONED ? "WAIT_ABANDONED" : "WAIT_TIMEOUT");
+      }
+    }
+  }
+  ::CloseHandle(wait_timer /* handle */);
+}
+
 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
 void os::infinite_sleep() {
   while (true) {    // sleep forever ...
--- a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -370,7 +370,6 @@
       if (thread->on_local_stack(addr)) {
         // stack overflow
         if (thread->in_stack_yellow_reserved_zone(addr)) {
-          thread->disable_stack_yellow_reserved_zone();
           if (thread->thread_state() == _thread_in_Java) {
             if (thread->in_stack_reserved_zone(addr)) {
               frame fr;
@@ -392,9 +391,11 @@
             }
             // Throw a stack overflow exception.  Guard pages will be reenabled
             // while unwinding the stack.
+            thread->disable_stack_yellow_reserved_zone();
             stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
           } else {
             // Thread was in the vm or native code.  Return and try to finish.
+            thread->disable_stack_yellow_reserved_zone();
             return 1;
           }
         } else if (thread->in_stack_red_zone(addr)) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zArguments_linux_x86.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zArguments.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/globals_extension.hpp"
+#include "utilities/debug.hpp"
+
+void ZArguments::initialize_platform() {
+  // The C2 barrier slow path expects vector registers to be least
+  // 16 bytes wide, which is the minimum width available on all
+  // x86-64 systems. However, the user could have speficied a lower
+  // number on the command-line, in which case we print a warning
+  // and raise it to 16.
+  if (MaxVectorSize < 16) {
+    warning("ZGC requires MaxVectorSize to be at least 16");
+    FLAG_SET_DEFAULT(MaxVectorSize, 16);
+  }
+}
--- a/src/hotspot/share/adlc/formssel.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/adlc/formssel.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -3801,7 +3801,7 @@
     "AddVB","AddVS","AddVI","AddVL","AddVF","AddVD",
     "AndI","AndL",
     "AndV",
-    "MaxI","MinI",
+    "MaxI","MinI","MaxF","MinF","MaxD","MinD",
     "MulI","MulL","MulF","MulD",
     "MulVS","MulVI","MulVL","MulVF","MulVD",
     "OrI","OrL",
--- a/src/hotspot/share/c1/c1_Instruction.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/c1/c1_Instruction.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -827,9 +827,16 @@
       for_each_local_value(existing_state, index, existing_value) {
         Value new_value = new_state->local_at(index);
         if (new_value == NULL || new_value->type()->tag() != existing_value->type()->tag()) {
-          // The old code invalidated the phi function here
-          // Because dead locals are replaced with NULL, this is a very rare case now, so simply bail out
-          return false; // BAILOUT in caller
+          Phi* existing_phi = existing_value->as_Phi();
+          if (existing_phi == NULL) {
+            return false; // BAILOUT in caller
+          }
+          // Invalidate the phi function here. This case is very rare except for
+          // JVMTI capability "can_access_local_variables".
+          // In really rare cases we will bail out in LIRGenerator::move_to_phi.
+          existing_phi->make_illegal();
+          existing_state->invalidate_local(index);
+          TRACE_PHI(tty->print_cr("invalidating local %d because of type mismatch", index));
         }
       }
 
--- a/src/hotspot/share/c1/c1_LIRGenerator.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -1113,7 +1113,7 @@
   // no moves are created for phi functions at the begin of exception
   // handlers, so assign operands manually here
   for_each_phi_fun(block(), phi,
-                   operand_for_instruction(phi));
+                   if (!phi->is_illegal()) { operand_for_instruction(phi); });
 
   LIR_Opr thread_reg = getThreadPointer();
   __ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
--- a/src/hotspot/share/c1/c1_LinearScan.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/c1/c1_LinearScan.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -574,7 +574,7 @@
       // Phi functions at the begin of an exception handler are
       // implicitly defined (= killed) at the beginning of the block.
       for_each_phi_fun(block, phi,
-        live_kill.set_bit(phi->operand()->vreg_number())
+        if (!phi->is_illegal()) { live_kill.set_bit(phi->operand()->vreg_number()); }
       );
     }
 
@@ -1904,7 +1904,7 @@
 
   // the live_in bits are not set for phi functions of the xhandler entry, so iterate them separately
   for_each_phi_fun(block, phi,
-    resolve_exception_entry(block, phi->operand()->vreg_number(), move_resolver)
+    if (!phi->is_illegal()) { resolve_exception_entry(block, phi->operand()->vreg_number(), move_resolver); }
   );
 
   if (move_resolver.has_mappings()) {
@@ -1978,7 +1978,7 @@
 
   // the live_in bits are not set for phi functions of the xhandler entry, so iterate them separately
   for_each_phi_fun(block, phi,
-    resolve_exception_edge(handler, throwing_op_id, phi->operand()->vreg_number(), phi, move_resolver)
+    if (!phi->is_illegal()) { resolve_exception_edge(handler, throwing_op_id, phi->operand()->vreg_number(), phi, move_resolver); }
   );
 
   if (move_resolver.has_mappings()) {
--- a/src/hotspot/share/c1/c1_ValueStack.hpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/c1/c1_ValueStack.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -299,7 +299,7 @@
 }
 
 
-// Macro definition for simple iteration of all phif functions of a block, i.e all
+// Macro definition for simple iteration of all phi functions of a block, i.e all
 // phi functions of the ValueStack where the block matches.
 // Use the following code pattern to iterate all phi functions of a block:
 //
@@ -315,7 +315,7 @@
   Value value;                                                                                 \
   {                                                                                            \
     for_each_stack_value(cur_state, cur_index, value) {                                        \
-      Phi* v_phi = value->as_Phi();                                                      \
+      Phi* v_phi = value->as_Phi();                                                            \
       if (v_phi != NULL && v_phi->block() == v_block) {                                        \
         v_code;                                                                                \
       }                                                                                        \
@@ -323,7 +323,7 @@
   }                                                                                            \
   {                                                                                            \
     for_each_local_value(cur_state, cur_index, value) {                                        \
-      Phi* v_phi = value->as_Phi();                                                      \
+      Phi* v_phi = value->as_Phi();                                                            \
       if (v_phi != NULL && v_phi->block() == v_block) {                                        \
         v_code;                                                                                \
       }                                                                                        \
--- a/src/hotspot/share/classfile/dictionary.hpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/classfile/dictionary.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -173,7 +173,7 @@
     for (ProtectionDomainEntry* current = pd_set(); // accessed at a safepoint
                                 current != NULL;
                                 current = current->_next) {
-      guarantee(oopDesc::is_oop(current->_pd_cache->object_no_keepalive()), "Invalid oop");
+      guarantee(oopDesc::is_oop_or_null(current->_pd_cache->object_no_keepalive()), "Invalid oop");
     }
   }
 
--- a/src/hotspot/share/classfile/stackMapFrame.hpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/classfile/stackMapFrame.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -63,6 +63,7 @@
   ClassVerifier* _verifier;  // the verifier verifying this method
 
   StackMapFrame(const StackMapFrame& cp) :
+      ResourceObj(cp),
       _offset(cp._offset), _locals_size(cp._locals_size),
       _stack_size(cp._stack_size), _stack_mark(cp._stack_mark),
       _max_locals(cp._max_locals), _max_stack(cp._max_stack),
--- a/src/hotspot/share/classfile/systemDictionary.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/classfile/systemDictionary.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -71,7 +71,6 @@
 #include "prims/resolvedMethodTable.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/arguments_ext.hpp"
 #include "runtime/biasedLocking.hpp"
 #include "runtime/fieldType.hpp"
 #include "runtime/handles.inline.hpp"
--- a/src/hotspot/share/classfile/vmSymbols.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/classfile/vmSymbols.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -580,6 +580,10 @@
   case vmIntrinsics::_max:
   case vmIntrinsics::_floatToIntBits:
   case vmIntrinsics::_doubleToLongBits:
+  case vmIntrinsics::_maxF:
+  case vmIntrinsics::_minF:
+  case vmIntrinsics::_maxD:
+  case vmIntrinsics::_minD:
     if (!InlineMathNatives) return true;
     break;
   case vmIntrinsics::_fmaD:
--- a/src/hotspot/share/classfile/vmSymbols.hpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/classfile/vmSymbols.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -749,6 +749,7 @@
   do_class(java_lang_StrictMath,          "java/lang/StrictMath")                                                       \
   do_signature(double2_double_signature,  "(DD)D")                                                                      \
   do_signature(double3_double_signature,  "(DDD)D")                                                                     \
+  do_signature(float2_float_signature,    "(FF)F")                                                                      \
   do_signature(float3_float_signature,    "(FFF)F")                                                                     \
   do_signature(int2_int_signature,        "(II)I")                                                                      \
   do_signature(long2_long_signature,      "(JJ)J")                                                                      \
@@ -795,6 +796,10 @@
   do_intrinsic(_subtractExactL,           java_lang_Math,         subtractExact_name, long2_long_signature,      F_S)   \
   do_intrinsic(_fmaD,                     java_lang_Math,         fma_name,           double3_double_signature,  F_S)   \
   do_intrinsic(_fmaF,                     java_lang_Math,         fma_name,           float3_float_signature,    F_S)   \
+  do_intrinsic(_maxF,                     java_lang_Math,         max_name,           float2_float_signature,    F_S)   \
+  do_intrinsic(_minF,                     java_lang_Math,         min_name,           float2_float_signature,    F_S)   \
+  do_intrinsic(_maxD,                     java_lang_Math,         max_name,           double2_double_signature,  F_S)   \
+  do_intrinsic(_minD,                     java_lang_Math,         min_name,           double2_double_signature,  F_S)   \
                                                                                                                         \
   do_intrinsic(_floatToRawIntBits,        java_lang_Float,        floatToRawIntBits_name,   float_int_signature, F_S)   \
    do_name(     floatToRawIntBits_name,                          "floatToRawIntBits")                                   \
--- a/src/hotspot/share/code/icBuffer.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/code/icBuffer.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -53,29 +53,29 @@
     _refill_remembered(false)
 {
   Thread* thread = Thread::current();
-  assert(thread->missed_ic_stub_refill_mark() == NULL, "nesting not supported");
-  thread->set_missed_ic_stub_refill_mark(this);
+  assert(thread->missed_ic_stub_refill_verifier() == NULL, "nesting not supported");
+  thread->set_missed_ic_stub_refill_verifier(this);
 }
 
 ICRefillVerifier::~ICRefillVerifier() {
   assert(!_refill_requested || _refill_remembered,
          "Forgot to refill IC stubs after failed IC transition");
-  Thread::current()->set_missed_ic_stub_refill_mark(NULL);
+  Thread::current()->set_missed_ic_stub_refill_verifier(NULL);
 }
 
 ICRefillVerifierMark::ICRefillVerifierMark(ICRefillVerifier* verifier) {
   Thread* thread = Thread::current();
-  assert(thread->missed_ic_stub_refill_mark() == NULL, "nesting not supported");
-  thread->set_missed_ic_stub_refill_mark(this);
+  assert(thread->missed_ic_stub_refill_verifier() == NULL, "nesting not supported");
+  thread->set_missed_ic_stub_refill_verifier(verifier);
 }
 
 ICRefillVerifierMark::~ICRefillVerifierMark() {
-  Thread::current()->set_missed_ic_stub_refill_mark(NULL);
+  Thread::current()->set_missed_ic_stub_refill_verifier(NULL);
 }
 
 static ICRefillVerifier* current_ic_refill_verifier() {
   Thread* current = Thread::current();
-  ICRefillVerifier* verifier = reinterpret_cast<ICRefillVerifier*>(current->missed_ic_stub_refill_mark());
+  ICRefillVerifier* verifier = current->missed_ic_stub_refill_verifier();
   assert(verifier != NULL, "need a verifier for safety");
   return verifier;
 }
--- a/src/hotspot/share/code/nmethod.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/code/nmethod.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -1283,6 +1283,13 @@
       flush_dependencies(/*delete_immediately*/true);
     }
 
+    // Clear ICStubs to prevent back patching stubs of zombie or flushed
+    // nmethods during the next safepoint (see ICStub::finalize).
+    {
+      CompiledICLocker ml(this);
+      clear_ic_stubs();
+    }
+
     // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
     // event and it hasn't already been reported for this nmethod then
     // report it now. The event may have been reported earlier if the GC
@@ -2533,6 +2540,7 @@
         case relocInfo::section_word_type:     return "section_word";
         case relocInfo::poll_type:             return "poll";
         case relocInfo::poll_return_type:      return "poll_return";
+        case relocInfo::trampoline_stub_type:  return "trampoline_stub";
         case relocInfo::type_mask:             return "type_bit_mask";
 
         default:
--- a/src/hotspot/share/code/scopeDesc.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/code/scopeDesc.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -51,9 +51,9 @@
 }
 
 
-ScopeDesc::ScopeDesc(const ScopeDesc* parent) {
+void ScopeDesc::initialize(const ScopeDesc* parent, int decode_offset) {
   _code          = parent->_code;
-  _decode_offset = parent->_sender_decode_offset;
+  _decode_offset = decode_offset;
   _objects       = parent->_objects;
   _reexecute     = false; //reexecute only applies to the first scope
   _rethrow_exception = false;
@@ -61,6 +61,14 @@
   decode_body();
 }
 
+ScopeDesc::ScopeDesc(const ScopeDesc* parent) {
+  initialize(parent, parent->_sender_decode_offset);
+}
+
+ScopeDesc::ScopeDesc(const ScopeDesc* parent, int decode_offset) {
+  initialize(parent, decode_offset);
+}
+
 
 void ScopeDesc::decode_body() {
   if (decode_offset() == DebugInformationRecorder::serialized_null) {
--- a/src/hotspot/share/code/scopeDesc.hpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/code/scopeDesc.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -67,6 +67,9 @@
   // avoid a .hpp-.hpp dependency.)
   ScopeDesc(const CompiledMethod* code, int decode_offset, bool reexecute, bool rethrow_exception, bool return_oop);
 
+  // Direct access to scope
+  ScopeDesc* at_offset(int decode_offset) { return new ScopeDesc(this, decode_offset); }
+
   // JVM state
   Method* method()      const { return _method; }
   int          bci()      const { return _bci;    }
@@ -85,12 +88,16 @@
   // Returns where the scope was decoded
   int decode_offset() const { return _decode_offset; }
 
+  int sender_decode_offset() const { return _sender_decode_offset; }
+
   // Tells whether sender() returns NULL
   bool is_top() const;
 
  private:
-  // Alternative constructor
+  void initialize(const ScopeDesc* parent, int decode_offset);
+  // Alternative constructors
   ScopeDesc(const ScopeDesc* parent);
+  ScopeDesc(const ScopeDesc* parent, int decode_offset);
 
   // JVM state
   Method*       _method;
--- a/src/hotspot/share/gc/g1/dirtyCardQueue.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/g1/dirtyCardQueue.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -137,7 +137,9 @@
   PtrQueueSet(notify_when_complete),
   _shared_dirty_card_queue(this, true /* permanent */),
   _free_ids(NULL),
-  _processed_buffers_mut(0), _processed_buffers_rs_thread(0)
+  _processed_buffers_mut(0),
+  _processed_buffers_rs_thread(0),
+  _cur_par_buffer_node(NULL)
 {
   _all_active = true;
 }
@@ -154,7 +156,7 @@
   PtrQueueSet::initialize(cbl_mon, allocator);
   _shared_dirty_card_queue.set_lock(lock);
   if (init_free_ids) {
-    _free_ids = new FreeIdSet(num_par_ids(), _cbl_mon);
+    _free_ids = new FreeIdSet(num_par_ids(), cbl_mon);
   }
 }
 
@@ -215,29 +217,6 @@
   return result;
 }
 
-
-BufferNode* DirtyCardQueueSet::get_completed_buffer(size_t stop_at) {
-  MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
-
-  if (_n_completed_buffers <= stop_at) {
-    return NULL;
-  }
-
-  assert(_n_completed_buffers > 0, "invariant");
-  assert(_completed_buffers_head != NULL, "invariant");
-  assert(_completed_buffers_tail != NULL, "invariant");
-
-  BufferNode* nd = _completed_buffers_head;
-  _completed_buffers_head = nd->next();
-  _n_completed_buffers--;
-  if (_completed_buffers_head == NULL) {
-    assert(_n_completed_buffers == 0, "Invariant");
-    _completed_buffers_tail = NULL;
-  }
-  DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
-  return nd;
-}
-
 bool DirtyCardQueueSet::refine_completed_buffer_concurrently(uint worker_i, size_t stop_at) {
   G1RefineCardConcurrentlyClosure cl;
   return apply_closure_to_completed_buffer(&cl, worker_i, stop_at, false);
@@ -265,7 +244,7 @@
     } else {
       // Return partially processed buffer to the queue.
       guarantee(!during_pause, "Should never stop early");
-      enqueue_complete_buffer(nd);
+      enqueue_completed_buffer(nd);
     }
     return true;
   }
@@ -286,32 +265,9 @@
   }
 }
 
-// Deallocates any completed log buffers
-void DirtyCardQueueSet::clear() {
-  BufferNode* buffers_to_delete = NULL;
-  {
-    MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
-    while (_completed_buffers_head != NULL) {
-      BufferNode* nd = _completed_buffers_head;
-      _completed_buffers_head = nd->next();
-      nd->set_next(buffers_to_delete);
-      buffers_to_delete = nd;
-    }
-    _n_completed_buffers = 0;
-    _completed_buffers_tail = NULL;
-    DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
-  }
-  while (buffers_to_delete != NULL) {
-    BufferNode* nd = buffers_to_delete;
-    buffers_to_delete = nd->next();
-    deallocate_buffer(nd);
-  }
-
-}
-
 void DirtyCardQueueSet::abandon_logs() {
   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
-  clear();
+  abandon_completed_buffers();
   // Since abandon is done only at safepoints, we can safely manipulate
   // these queues.
   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
@@ -331,10 +287,11 @@
   // the global list of logs.  Temporarily turn off the limit on the number
   // of outstanding buffers.
   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
-  SizeTFlagSetting local_max(_max_completed_buffers,
-                             MaxCompletedBuffersUnlimited);
+  size_t old_limit = max_completed_buffers();
+  set_max_completed_buffers(MaxCompletedBuffersUnlimited);
   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
     concatenate_log(G1ThreadLocalData::dirty_card_queue(t));
   }
   concatenate_log(_shared_dirty_card_queue);
+  set_max_completed_buffers(old_limit);
 }
--- a/src/hotspot/share/gc/g1/dirtyCardQueue.hpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/g1/dirtyCardQueue.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -136,9 +136,7 @@
   // must never return false. Must only be called during GC.
   bool apply_closure_during_gc(CardTableEntryClosure* cl, uint worker_i);
 
-  BufferNode* get_completed_buffer(size_t stop_at);
-
-  void reset_for_par_iteration() { _cur_par_buffer_node = _completed_buffers_head; }
+  void reset_for_par_iteration() { _cur_par_buffer_node = completed_buffers_head(); }
   // Applies the current closure to all completed buffers, non-consumptively.
   // Can be used in parallel, all callers using the iteration state initialized
   // by reset_for_par_iteration.
@@ -148,16 +146,12 @@
     return &_shared_dirty_card_queue;
   }
 
-  // Deallocate any completed log buffers
-  void clear();
-
   // If a full collection is happening, reset partial logs, and ignore
   // completed ones: the full collection will make them all irrelevant.
   void abandon_logs();
 
   // If any threads have partial logs, add them to the global list of logs.
   void concatenate_logs();
-  void clear_n_completed_buffers() { _n_completed_buffers = 0;}
 
   jint processed_buffers_mut() {
     return _processed_buffers_mut;
--- a/src/hotspot/share/gc/g1/g1Allocator.inline.hpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/g1/g1Allocator.inline.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -97,7 +97,7 @@
   }
 
   _archive_check_enabled = true;
-  size_t length = Universe::heap()->max_capacity();
+  size_t length = G1CollectedHeap::heap()->max_reserved_capacity();
   _closed_archive_region_map.initialize((HeapWord*)Universe::heap()->base(),
                                         (HeapWord*)Universe::heap()->base() + length,
                                         HeapRegion::GrainBytes);
--- a/src/hotspot/share/gc/g1/g1Arguments.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/g1/g1Arguments.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -28,6 +28,7 @@
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1HeapVerifier.hpp"
+#include "gc/g1/g1HeterogeneousCollectorPolicy.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "gc/shared/gcArguments.inline.hpp"
 #include "gc/shared/workerPolicy.hpp"
@@ -156,5 +157,9 @@
 }
 
 CollectedHeap* G1Arguments::create_heap() {
-  return create_heap_with_policy<G1CollectedHeap, G1CollectorPolicy>();
+  if (AllocateOldGenAt != NULL) {
+    return create_heap_with_policy<G1CollectedHeap, G1HeterogeneousCollectorPolicy>();
+  } else {
+    return create_heap_with_policy<G1CollectedHeap, G1CollectorPolicy>();
+  }
 }
--- a/src/hotspot/share/gc/g1/g1CardCounts.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/g1/g1CardCounts.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -58,12 +58,12 @@
 }
 
 G1CardCounts::G1CardCounts(G1CollectedHeap *g1h):
-  _listener(), _g1h(g1h), _card_counts(NULL), _reserved_max_card_num(0) {
+  _listener(), _g1h(g1h), _ct(NULL), _card_counts(NULL), _reserved_max_card_num(0), _ct_bot(NULL) {
   _listener.set_cardcounts(this);
 }
 
 void G1CardCounts::initialize(G1RegionToSpaceMapper* mapper) {
-  assert(_g1h->max_capacity() > 0, "initialization order");
+  assert(_g1h->max_reserved_capacity() > 0, "initialization order");
   assert(_g1h->capacity() == 0, "initialization order");
 
   if (G1ConcRSHotCardLimit > 0) {
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -161,12 +161,12 @@
 
 // Private methods.
 
-HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
+HeapRegion* G1CollectedHeap::new_region(size_t word_size, HeapRegionType type, bool do_expand) {
   assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
          "the only time we use this to allocate a humongous region is "
          "when we are allocating a single humongous region");
 
-  HeapRegion* res = _hrm.allocate_free_region(is_old);
+  HeapRegion* res = _hrm->allocate_free_region(type);
 
   if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
     // Currently, only attempts to allocate GC alloc regions set
@@ -183,7 +183,7 @@
       // always expand the heap by an amount aligned to the heap
       // region size, the free list should in theory not be empty.
       // In either case allocate_free_region() will check for NULL.
-      res = _hrm.allocate_free_region(is_old);
+      res = _hrm->allocate_free_region(type);
     } else {
       _expand_heap_after_alloc_failure = false;
     }
@@ -330,16 +330,16 @@
     // Only one region to allocate, try to use a fast path by directly allocating
     // from the free lists. Do not try to expand here, we will potentially do that
     // later.
-    HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
+    HeapRegion* hr = new_region(word_size, HeapRegionType::Humongous, false /* do_expand */);
     if (hr != NULL) {
       first = hr->hrm_index();
     }
   } else {
     // Policy: Try only empty regions (i.e. already committed first). Maybe we
     // are lucky enough to find some.
-    first = _hrm.find_contiguous_only_empty(obj_regions);
+    first = _hrm->find_contiguous_only_empty(obj_regions);
     if (first != G1_NO_HRM_INDEX) {
-      _hrm.allocate_free_regions_starting_at(first, obj_regions);
+      _hrm->allocate_free_regions_starting_at(first, obj_regions);
     }
   }
 
@@ -347,14 +347,14 @@
     // Policy: We could not find enough regions for the humongous object in the
     // free list. Look through the heap to find a mix of free and uncommitted regions.
     // If so, try expansion.
-    first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
+    first = _hrm->find_contiguous_empty_or_unavailable(obj_regions);
     if (first != G1_NO_HRM_INDEX) {
       // We found something. Make sure these regions are committed, i.e. expand
       // the heap. Alternatively we could do a defragmentation GC.
       log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B",
                                     word_size * HeapWordSize);
 
-      _hrm.expand_at(first, obj_regions, workers());
+      _hrm->expand_at(first, obj_regions, workers());
       g1_policy()->record_new_heap_size(num_regions());
 
 #ifdef ASSERT
@@ -365,7 +365,7 @@
         assert(is_on_master_free_list(hr), "sanity");
       }
 #endif
-      _hrm.allocate_free_regions_starting_at(first, obj_regions);
+      _hrm->allocate_free_regions_starting_at(first, obj_regions);
     } else {
       // Policy: Potentially trigger a defragmentation GC.
     }
@@ -554,7 +554,7 @@
 bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) {
   assert(ranges != NULL, "MemRegion array NULL");
   assert(count != 0, "No MemRegions provided");
-  MemRegion reserved = _hrm.reserved();
+  MemRegion reserved = _hrm->reserved();
   for (size_t i = 0; i < count; i++) {
     if (!reserved.contains(ranges[i].start()) || !reserved.contains(ranges[i].last())) {
       return false;
@@ -571,7 +571,7 @@
   assert(count != 0, "No MemRegions provided");
   MutexLockerEx x(Heap_lock);
 
-  MemRegion reserved = _hrm.reserved();
+  MemRegion reserved = _hrm->reserved();
   HeapWord* prev_last_addr = NULL;
   HeapRegion* prev_last_region = NULL;
 
@@ -605,7 +605,7 @@
     // range ended, and adjust the start address so we don't try to allocate
     // the same region again. If the current range is entirely within that
     // region, skip it, just adjusting the recorded top.
-    HeapRegion* start_region = _hrm.addr_to_region(start_address);
+    HeapRegion* start_region = _hrm->addr_to_region(start_address);
     if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
       start_address = start_region->end();
       if (start_address > last_address) {
@@ -615,12 +615,12 @@
       }
       start_region->set_top(start_address);
       curr_range = MemRegion(start_address, last_address + 1);
-      start_region = _hrm.addr_to_region(start_address);
+      start_region = _hrm->addr_to_region(start_address);
     }
 
     // Perform the actual region allocation, exiting if it fails.
     // Then note how much new space we have allocated.
-    if (!_hrm.allocate_containing_regions(curr_range, &commits, workers())) {
+    if (!_hrm->allocate_containing_regions(curr_range, &commits, workers())) {
       return false;
     }
     increase_used(word_size * HeapWordSize);
@@ -632,8 +632,8 @@
 
     // Mark each G1 region touched by the range as archive, add it to
     // the old set, and set top.
-    HeapRegion* curr_region = _hrm.addr_to_region(start_address);
-    HeapRegion* last_region = _hrm.addr_to_region(last_address);
+    HeapRegion* curr_region = _hrm->addr_to_region(start_address);
+    HeapRegion* last_region = _hrm->addr_to_region(last_address);
     prev_last_region = last_region;
 
     while (curr_region != NULL) {
@@ -650,7 +650,7 @@
       HeapRegion* next_region;
       if (curr_region != last_region) {
         top = curr_region->end();
-        next_region = _hrm.next_region_in_heap(curr_region);
+        next_region = _hrm->next_region_in_heap(curr_region);
       } else {
         top = last_address + 1;
         next_region = NULL;
@@ -671,7 +671,7 @@
   assert(!is_init_completed(), "Expect to be called at JVM init time");
   assert(ranges != NULL, "MemRegion array NULL");
   assert(count != 0, "No MemRegions provided");
-  MemRegion reserved = _hrm.reserved();
+  MemRegion reserved = _hrm->reserved();
   HeapWord *prev_last_addr = NULL;
   HeapRegion* prev_last_region = NULL;
 
@@ -691,8 +691,8 @@
            "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
            p2i(start_address), p2i(prev_last_addr));
 
-    HeapRegion* start_region = _hrm.addr_to_region(start_address);
-    HeapRegion* last_region = _hrm.addr_to_region(last_address);
+    HeapRegion* start_region = _hrm->addr_to_region(start_address);
+    HeapRegion* last_region = _hrm->addr_to_region(last_address);
     HeapWord* bottom_address = start_region->bottom();
 
     // Check for a range beginning in the same region in which the
@@ -708,7 +708,7 @@
       guarantee(curr_region->is_archive(),
                 "Expected archive region at index %u", curr_region->hrm_index());
       if (curr_region != last_region) {
-        curr_region = _hrm.next_region_in_heap(curr_region);
+        curr_region = _hrm->next_region_in_heap(curr_region);
       } else {
         curr_region = NULL;
       }
@@ -757,7 +757,7 @@
   assert(!is_init_completed(), "Expect to be called at JVM init time");
   assert(ranges != NULL, "MemRegion array NULL");
   assert(count != 0, "No MemRegions provided");
-  MemRegion reserved = _hrm.reserved();
+  MemRegion reserved = _hrm->reserved();
   HeapWord* prev_last_addr = NULL;
   HeapRegion* prev_last_region = NULL;
   size_t size_used = 0;
@@ -779,8 +779,8 @@
     size_used += ranges[i].byte_size();
     prev_last_addr = last_address;
 
-    HeapRegion* start_region = _hrm.addr_to_region(start_address);
-    HeapRegion* last_region = _hrm.addr_to_region(last_address);
+    HeapRegion* start_region = _hrm->addr_to_region(start_address);
+    HeapRegion* last_region = _hrm->addr_to_region(last_address);
 
     // Check for ranges that start in the same G1 region in which the previous
     // range ended, and adjust the start address so we don't try to free
@@ -791,7 +791,7 @@
       if (start_address > last_address) {
         continue;
       }
-      start_region = _hrm.addr_to_region(start_address);
+      start_region = _hrm->addr_to_region(start_address);
     }
     prev_last_region = last_region;
 
@@ -806,11 +806,11 @@
       curr_region->set_free();
       curr_region->set_top(curr_region->bottom());
       if (curr_region != last_region) {
-        curr_region = _hrm.next_region_in_heap(curr_region);
+        curr_region = _hrm->next_region_in_heap(curr_region);
       } else {
         curr_region = NULL;
       }
-      _hrm.shrink_at(curr_index, 1);
+      _hrm->shrink_at(curr_index, 1);
       uncommitted_regions++;
     }
 
@@ -1024,6 +1024,8 @@
   abandon_collection_set(collection_set());
 
   tear_down_region_sets(false /* free_list_only */);
+
+  hrm()->prepare_for_full_collection_start();
 }
 
 void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
@@ -1035,6 +1037,8 @@
 }
 
 void G1CollectedHeap::prepare_heap_for_mutators() {
+  hrm()->prepare_for_full_collection_end();
+
   // Delete metaspaces for unloaded class loaders and clean up loader_data graph
   ClassLoaderDataGraph::purge();
   MetaspaceUtils::verify_metrics();
@@ -1071,7 +1075,7 @@
 }
 
 void G1CollectedHeap::verify_after_full_collection() {
-  _hrm.verify_optional();
+  _hrm->verify_optional();
   _verifier->verify_region_sets_optional();
   _verifier->verify_after_gc(G1HeapVerifier::G1VerifyFull);
   // Clear the previous marking bitmap, if needed for bitmap verification.
@@ -1325,7 +1329,7 @@
 
 
   if (expand(expand_bytes, _workers)) {
-    _hrm.verify_optional();
+    _hrm->verify_optional();
     _verifier->verify_region_sets_optional();
     return attempt_allocation_at_safepoint(word_size,
                                            false /* expect_null_mutator_alloc_region */);
@@ -1350,7 +1354,7 @@
   uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
   assert(regions_to_expand > 0, "Must expand by at least one region");
 
-  uint expanded_by = _hrm.expand_by(regions_to_expand, pretouch_workers);
+  uint expanded_by = _hrm->expand_by(regions_to_expand, pretouch_workers);
   if (expand_time_ms != NULL) {
     *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
   }
@@ -1365,7 +1369,7 @@
     // The expansion of the virtual storage space was unsuccessful.
     // Let's see if it was because we ran out of swap.
     if (G1ExitOnExpansionFailure &&
-        _hrm.available() >= regions_to_expand) {
+        _hrm->available() >= regions_to_expand) {
       // We had head room...
       vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
     }
@@ -1380,7 +1384,7 @@
                                          HeapRegion::GrainBytes);
   uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
 
-  uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
+  uint num_regions_removed = _hrm->shrink_by(num_regions_to_remove);
   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
 
 
@@ -1408,7 +1412,7 @@
   shrink_helper(shrink_bytes);
   rebuild_region_sets(true /* free_list_only */);
 
-  _hrm.verify_optional();
+  _hrm->verify_optional();
   _verifier->verify_region_sets_optional();
 }
 
@@ -1486,7 +1490,7 @@
   _humongous_set("Humongous Region Set", new HumongousRegionSetChecker()),
   _bot(NULL),
   _listener(),
-  _hrm(),
+  _hrm(NULL),
   _allocator(NULL),
   _verifier(NULL),
   _summary_bytes_used(0),
@@ -1505,7 +1509,7 @@
   _survivor(),
   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
-  _g1_policy(new G1Policy(_gc_timer_stw)),
+  _g1_policy(G1Policy::create_policy(collector_policy, _gc_timer_stw)),
   _heap_sizing_policy(NULL),
   _collection_set(this, _g1_policy),
   _hot_card_cache(NULL),
@@ -1632,7 +1636,7 @@
   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
 
   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
-  size_t max_byte_size = collector_policy()->max_heap_byte_size();
+  size_t max_byte_size = g1_collector_policy()->heap_reserved_size_bytes();
   size_t heap_alignment = collector_policy()->heap_alignment();
 
   // Ensure that the sizes are properly aligned.
@@ -1692,12 +1696,17 @@
   ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
   size_t page_size = actual_reserved_page_size(heap_rs);
   G1RegionToSpaceMapper* heap_storage =
-    G1RegionToSpaceMapper::create_mapper(g1_rs,
-                                         g1_rs.size(),
-                                         page_size,
-                                         HeapRegion::GrainBytes,
-                                         1,
-                                         mtJavaHeap);
+    G1RegionToSpaceMapper::create_heap_mapper(g1_rs,
+                                              g1_rs.size(),
+                                              page_size,
+                                              HeapRegion::GrainBytes,
+                                              1,
+                                              mtJavaHeap);
+  if(heap_storage == NULL) {
+    vm_shutdown_during_initialization("Could not initialize G1 heap");
+    return JNI_ERR;
+  }
+
   os::trace_page_sizes("Heap",
                        collector_policy()->min_heap_byte_size(),
                        max_byte_size,
@@ -1728,7 +1737,9 @@
   G1RegionToSpaceMapper* next_bitmap_storage =
     create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
 
-  _hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
+  _hrm = HeapRegionManager::create_manager(this, g1_collector_policy());
+
+  _hrm->initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
   _card_table->initialize(cardtable_storage);
   // Do later initialization work for concurrent refinement.
   _hot_card_cache->initialize(card_counts_storage);
@@ -1743,20 +1754,20 @@
   guarantee(g1_rs.base() >= (char*)G1CardTable::card_size, "Java heap must not start within the first card.");
   // Also create a G1 rem set.
   _g1_rem_set = new G1RemSet(this, _card_table, _hot_card_cache);
-  _g1_rem_set->initialize(max_capacity(), max_regions());
+  _g1_rem_set->initialize(max_reserved_capacity(), max_regions());
 
   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
   guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
   guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
             "too many cards per region");
 
-  FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
+  FreeRegionList::set_unrealistically_long_length(max_expandable_regions() + 1);
 
   _bot = new G1BlockOffsetTable(reserved_region(), bot_storage);
 
   {
-    HeapWord* start = _hrm.reserved().start();
-    HeapWord* end = _hrm.reserved().end();
+    HeapWord* start = _hrm->reserved().start();
+    HeapWord* end = _hrm->reserved().end();
     size_t granularity = HeapRegion::GrainBytes;
 
     _in_cset_fast_test.initialize(start, end, granularity);
@@ -1807,7 +1818,7 @@
 
   // Here we allocate the dummy HeapRegion that is required by the
   // G1AllocRegion class.
-  HeapRegion* dummy_region = _hrm.get_dummy_region();
+  HeapRegion* dummy_region = _hrm->get_dummy_region();
 
   // We'll re-use the same region whether the alloc region will
   // require BOT updates or not and, if it doesn't, then a non-young
@@ -1927,16 +1938,20 @@
   return _collector_policy;
 }
 
+G1CollectorPolicy* G1CollectedHeap::g1_collector_policy() const {
+  return _collector_policy;
+}
+
 SoftRefPolicy* G1CollectedHeap::soft_ref_policy() {
   return &_soft_ref_policy;
 }
 
 size_t G1CollectedHeap::capacity() const {
-  return _hrm.length() * HeapRegion::GrainBytes;
+  return _hrm->length() * HeapRegion::GrainBytes;
 }
 
 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
-  return _hrm.total_free_bytes();
+  return _hrm->total_free_bytes();
 }
 
 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
@@ -1949,9 +1964,8 @@
   while (dcqs.apply_closure_during_gc(cl, worker_i)) {
     n_completed_buffers++;
   }
+  assert(dcqs.completed_buffers_num() == 0, "Completed buffers exist!");
   g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers, G1GCPhaseTimes::UpdateRSProcessedBuffers);
-  dcqs.clear_n_completed_buffers();
-  assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
 }
 
 // Computes the sum of the storage used by the various regions.
@@ -2002,6 +2016,18 @@
   }
 }
 
+bool G1CollectedHeap::should_upgrade_to_full_gc(GCCause::Cause cause) {
+  if(g1_policy()->force_upgrade_to_full()) {
+    return true;
+  } else if (should_do_concurrent_full_gc(_gc_cause)) {
+    return false;
+  } else if (has_regions_left_for_allocation()) {
+    return false;
+  } else {
+    return true;
+  }
+}
+
 #ifndef PRODUCT
 void G1CollectedHeap::allocate_dummy_regions() {
   // Let's fill up most of the region
@@ -2152,7 +2178,7 @@
 }
 
 bool G1CollectedHeap::is_in(const void* p) const {
-  if (_hrm.reserved().contains(p)) {
+  if (_hrm->reserved().contains(p)) {
     // Given that we know that p is in the reserved space,
     // heap_region_containing() should successfully
     // return the containing region.
@@ -2166,7 +2192,7 @@
 #ifdef ASSERT
 bool G1CollectedHeap::is_in_exact(const void* p) const {
   bool contains = reserved_region().contains(p);
-  bool available = _hrm.is_available(addr_to_region((HeapWord*)p));
+  bool available = _hrm->is_available(addr_to_region((HeapWord*)p));
   if (contains && available) {
     return true;
   } else {
@@ -2197,18 +2223,18 @@
 }
 
 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
-  _hrm.iterate(cl);
+  _hrm->iterate(cl);
 }
 
 void G1CollectedHeap::heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,
                                                                  HeapRegionClaimer *hrclaimer,
                                                                  uint worker_id) const {
-  _hrm.par_iterate(cl, hrclaimer, hrclaimer->offset_for_worker(worker_id));
+  _hrm->par_iterate(cl, hrclaimer, hrclaimer->offset_for_worker(worker_id));
 }
 
 void G1CollectedHeap::heap_region_par_iterate_from_start(HeapRegionClosure* cl,
                                                          HeapRegionClaimer *hrclaimer) const {
-  _hrm.par_iterate(cl, hrclaimer, 0);
+  _hrm->par_iterate(cl, hrclaimer, 0);
 }
 
 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
@@ -2257,7 +2283,11 @@
 }
 
 size_t G1CollectedHeap::max_capacity() const {
-  return _hrm.reserved().byte_size();
+  return _hrm->max_expandable_length() * HeapRegion::GrainBytes;
+}
+
+size_t G1CollectedHeap::max_reserved_capacity() const {
+  return _hrm->max_length() * HeapRegion::GrainBytes;
 }
 
 jlong G1CollectedHeap::millis_since_last_gc() {
@@ -2347,8 +2377,8 @@
   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
             capacity()/K, used_unlocked()/K);
   st->print(" [" PTR_FORMAT ", " PTR_FORMAT ")",
-            p2i(_hrm.reserved().start()),
-            p2i(_hrm.reserved().end()));
+            p2i(_hrm->reserved().start()),
+            p2i(_hrm->reserved().end()));
   st->cr();
   st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
   uint young_regions = young_regions_count();
@@ -3131,7 +3161,7 @@
     // output from the concurrent mark thread interfering with this
     // logging output either.
 
-    _hrm.verify_optional();
+    _hrm->verify_optional();
     _verifier->verify_region_sets_optional();
 
     TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
@@ -3947,7 +3977,7 @@
                                   bool locked) {
   assert(!hr->is_free(), "the region should not be free");
   assert(!hr->is_empty(), "the region should not be empty");
-  assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
+  assert(_hrm->is_available(hr->hrm_index()), "region should be committed");
   assert(free_list != NULL, "pre-condition");
 
   if (G1VerifyBitmaps) {
@@ -3988,7 +4018,7 @@
   assert(list != NULL, "list can't be null");
   if (!list->is_empty()) {
     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
-    _hrm.insert_list_into_free_list(list);
+    _hrm->insert_list_into_free_list(list);
   }
 }
 
@@ -4287,7 +4317,7 @@
  public:
 
   G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) :
-    _free_region_list(free_region_list), _humongous_objects_reclaimed(0), _humongous_regions_reclaimed(0), _freed_bytes(0) {
+    _free_region_list(free_region_list), _proxy_set(NULL), _humongous_objects_reclaimed(0), _humongous_regions_reclaimed(0), _freed_bytes(0) {
   }
 
   virtual bool do_heap_region(HeapRegion* r) {
@@ -4521,7 +4551,7 @@
     // this is that during a full GC string deduplication needs to know if
     // a collected region was young or old when the full GC was initiated.
   }
-  _hrm.remove_all_free_regions();
+  _hrm->remove_all_free_regions();
 }
 
 void G1CollectedHeap::increase_used(size_t bytes) {
@@ -4596,7 +4626,7 @@
     _survivor.clear();
   }
 
-  RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
+  RebuildRegionSetsClosure cl(free_list_only, &_old_set, _hrm);
   heap_region_iterate(&cl);
 
   if (!free_list_only) {
@@ -4623,7 +4653,7 @@
   bool should_allocate = g1_policy()->should_allocate_mutator_region();
   if (force || should_allocate) {
     HeapRegion* new_alloc_region = new_region(word_size,
-                                              false /* is_old */,
+                                              HeapRegionType::Eden,
                                               false /* do_expand */);
     if (new_alloc_region != NULL) {
       set_region_short_lived_locked(new_alloc_region);
@@ -4667,13 +4697,19 @@
     return NULL;
   }
 
-  const bool is_survivor = dest.is_young();
+  HeapRegionType type;
+  if (dest.is_young()) {
+    type = HeapRegionType::Survivor;
+  } else {
+    type = HeapRegionType::Old;
+  }
 
   HeapRegion* new_alloc_region = new_region(word_size,
-                                            !is_survivor,
+                                            type,
                                             true /* do_expand */);
+
   if (new_alloc_region != NULL) {
-    if (is_survivor) {
+    if (type.is_survivor()) {
       new_alloc_region->set_survivor();
       _survivor.add(new_alloc_region);
       _verifier->check_bitmaps("Survivor Region Allocation", new_alloc_region);
@@ -4705,14 +4741,14 @@
 
 HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
   bool expanded = false;
-  uint index = _hrm.find_highest_free(&expanded);
+  uint index = _hrm->find_highest_free(&expanded);
 
   if (index != G1_NO_HRM_INDEX) {
     if (expanded) {
       log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B",
                                 HeapRegion::GrainWords * HeapWordSize);
     }
-    _hrm.allocate_free_regions_starting_at(index, 1);
+    _hrm->allocate_free_regions_starting_at(index, 1);
     return region_at(index);
   }
   return NULL;
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -45,6 +45,7 @@
 #include "gc/g1/g1YCTypes.hpp"
 #include "gc/g1/heapRegionManager.hpp"
 #include "gc/g1/heapRegionSet.hpp"
+#include "gc/g1/heterogeneousHeapRegionManager.hpp"
 #include "gc/shared/barrierSet.hpp"
 #include "gc/shared/collectedHeap.hpp"
 #include "gc/shared/gcHeapSummary.hpp"
@@ -194,7 +195,7 @@
   G1RegionMappingChangedListener _listener;
 
   // The sequence of all heap regions in the heap.
-  HeapRegionManager _hrm;
+  HeapRegionManager* _hrm;
 
   // Manages all allocations with regions except humongous object allocations.
   G1Allocator* _allocator;
@@ -267,6 +268,9 @@
   // (e) cause == _wb_conc_mark
   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 
+  // Return true if should upgrade to full gc after an incremental one.
+  bool should_upgrade_to_full_gc(GCCause::Cause cause);
+
   // indicates whether we are in young or mixed GC mode
   G1CollectorState _collector_state;
 
@@ -369,9 +373,9 @@
   // Try to allocate a single non-humongous HeapRegion sufficient for
   // an allocation of the given word_size. If do_expand is true,
   // attempt to expand the heap if necessary to satisfy the allocation
-  // request. If the region is to be used as an old region or for a
-  // humongous object, set is_old to true. If not, to false.
-  HeapRegion* new_region(size_t word_size, bool is_old, bool do_expand);
+  // request. 'type' takes the type of region to be allocated. (Use constants
+  // Old, Eden, Humongous, Survivor defined in HeapRegionType.)
+  HeapRegion* new_region(size_t word_size, HeapRegionType type, bool do_expand);
 
   // Initialize a contiguous set of free regions of length num_regions
   // and starting at index first so that they appear as a single
@@ -957,10 +961,13 @@
   // The current policy object for the collector.
   G1Policy* g1_policy() const { return _g1_policy; }
 
+  HeapRegionManager* hrm() const { return _hrm; }
+
   const G1CollectionSet* collection_set() const { return &_collection_set; }
   G1CollectionSet* collection_set() { return &_collection_set; }
 
   virtual CollectorPolicy* collector_policy() const;
+  virtual G1CollectorPolicy* g1_collector_policy() const;
 
   virtual SoftRefPolicy* soft_ref_policy();
 
@@ -1009,7 +1016,7 @@
   // But G1CollectedHeap doesn't yet support this.
 
   virtual bool is_maximal_no_gc() const {
-    return _hrm.available() == 0;
+    return _hrm->available() == 0;
   }
 
   // Returns whether there are any regions left in the heap for allocation.
@@ -1018,19 +1025,22 @@
   }
 
   // The current number of regions in the heap.
-  uint num_regions() const { return _hrm.length(); }
+  uint num_regions() const { return _hrm->length(); }
 
   // The max number of regions in the heap.
-  uint max_regions() const { return _hrm.max_length(); }
+  uint max_regions() const { return _hrm->max_length(); }
+
+  // Max number of regions that can be comitted.
+  uint max_expandable_regions() const { return _hrm->max_expandable_length(); }
 
   // The number of regions that are completely free.
-  uint num_free_regions() const { return _hrm.num_free_regions(); }
+  uint num_free_regions() const { return _hrm->num_free_regions(); }
 
   // The number of regions that can be allocated into.
-  uint num_free_or_available_regions() const { return num_free_regions() + _hrm.available(); }
+  uint num_free_or_available_regions() const { return num_free_regions() + _hrm->available(); }
 
   MemoryUsage get_auxiliary_data_memory_usage() const {
-    return _hrm.get_auxiliary_data_memory_usage();
+    return _hrm->get_auxiliary_data_memory_usage();
   }
 
   // The number of regions that are not completely free.
@@ -1038,7 +1048,7 @@
 
 #ifdef ASSERT
   bool is_on_master_free_list(HeapRegion* hr) {
-    return _hrm.is_free(hr);
+    return _hrm->is_free(hr);
   }
 #endif // ASSERT
 
@@ -1095,13 +1105,13 @@
   // Return "TRUE" iff the given object address is in the reserved
   // region of g1.
   bool is_in_g1_reserved(const void* p) const {
-    return _hrm.reserved().contains(p);
+    return _hrm->reserved().contains(p);
   }
 
   // Returns a MemRegion that corresponds to the space that has been
   // reserved for the heap
   MemRegion g1_reserved() const {
-    return _hrm.reserved();
+    return _hrm->reserved();
   }
 
   virtual bool is_in_closed_subset(const void* p) const;
@@ -1227,6 +1237,9 @@
   // Print the maximum heap capacity.
   virtual size_t max_capacity() const;
 
+  // Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used.
+  virtual size_t max_reserved_capacity() const;
+
   virtual jlong millis_since_last_gc();
 
 
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -57,13 +57,13 @@
 // Inline functions for G1CollectedHeap
 
 // Return the region with the given index. It assumes the index is valid.
-inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }
+inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm->at(index); }
 
 // Return the region with the given index, or NULL if unmapped. It assumes the index is valid.
-inline HeapRegion* G1CollectedHeap::region_at_or_null(uint index) const { return _hrm.at_or_null(index); }
+inline HeapRegion* G1CollectedHeap::region_at_or_null(uint index) const { return _hrm->at_or_null(index); }
 
 inline HeapRegion* G1CollectedHeap::next_region_in_humongous(HeapRegion* hr) const {
-  return _hrm.next_region_in_humongous(hr);
+  return _hrm->next_region_in_humongous(hr);
 }
 
 inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
@@ -74,7 +74,7 @@
 }
 
 inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
-  return _hrm.reserved().start() + index * HeapRegion::GrainWords;
+  return _hrm->reserved().start() + index * HeapRegion::GrainWords;
 }
 
 template <class T>
@@ -83,7 +83,7 @@
   assert(is_in_g1_reserved((const void*) addr),
          "Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
          p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end()));
-  return _hrm.addr_to_region((HeapWord*) addr);
+  return _hrm->addr_to_region((HeapWord*) addr);
 }
 
 template <class T>
@@ -266,12 +266,12 @@
 }
 
 inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) {
-  assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");
+  assert(_hrm->at(region)->is_starts_humongous(), "Must start a humongous object");
   _humongous_reclaim_candidates.set_candidate(region, value);
 }
 
 inline bool G1CollectedHeap::is_humongous_reclaim_candidate(uint region) {
-  assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");
+  assert(_hrm->at(region)->is_starts_humongous(), "Must start a humongous object");
   return _humongous_reclaim_candidates.is_candidate(region);
 }
 
--- a/src/hotspot/share/gc/g1/g1CollectorPolicy.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/g1/g1CollectorPolicy.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -55,3 +55,11 @@
   size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
   _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size);
 }
+
+size_t G1CollectorPolicy::heap_reserved_size_bytes() const {
+  return _max_heap_byte_size;
+}
+
+bool G1CollectorPolicy::is_hetero_heap() const {
+  return false;
+}
--- a/src/hotspot/share/gc/g1/g1CollectorPolicy.hpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/g1/g1CollectorPolicy.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,6 +38,7 @@
 
 public:
   G1CollectorPolicy();
+  virtual size_t heap_reserved_size_bytes() const;
+  virtual bool is_hetero_heap() const;
 };
-
 #endif // SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -75,7 +75,7 @@
     set_active(true);
   } else {
     DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
-    dcqs.set_process_completed(true);
+    dcqs.set_process_completed_buffers(true);
   }
   _monitor->notify();
 }
@@ -86,7 +86,7 @@
     set_active(false);
   } else {
     DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
-    dcqs.set_process_completed(false);
+    dcqs.set_process_completed_buffers(false);
   }
 }
 
--- a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -337,10 +337,6 @@
 }
 
 class VerifyArchivePointerRegionClosure: public HeapRegionClosure {
-private:
-  G1CollectedHeap* _g1h;
-public:
-  VerifyArchivePointerRegionClosure(G1CollectedHeap* g1h) { }
   virtual bool do_heap_region(HeapRegion* r) {
    if (r->is_archive()) {
       VerifyObjectInArchiveRegionClosure verify_oop_pointers(r, false);
@@ -352,7 +348,7 @@
 
 void G1HeapVerifier::verify_archive_regions() {
   G1CollectedHeap*  g1h = G1CollectedHeap::heap();
-  VerifyArchivePointerRegionClosure cl(NULL);
+  VerifyArchivePointerRegionClosure cl;
   g1h->heap_region_iterate(&cl);
 }
 
@@ -603,14 +599,14 @@
   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 
   // First, check the explicit lists.
-  _g1h->_hrm.verify();
+  _g1h->_hrm->verify();
 
   // Finally, make sure that the region accounting in the lists is
   // consistent with what we see in the heap.
 
-  VerifyRegionListsClosure cl(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, &_g1h->_hrm);
+  VerifyRegionListsClosure cl(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, _g1h->_hrm);
   _g1h->heap_region_iterate(&cl);
-  cl.verify_counts(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, &_g1h->_hrm);
+  cl.verify_counts(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, _g1h->_hrm);
 }
 
 void G1HeapVerifier::prepare_for_verify() {
@@ -851,7 +847,7 @@
 
 bool G1HeapVerifier::check_cset_fast_test() {
   G1CheckCSetFastTableClosure cl;
-  _g1h->_hrm.iterate(&cl);
+  _g1h->_hrm->iterate(&cl);
   return !cl.failures();
 }
 #endif // PRODUCT
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1HeterogeneousCollectorPolicy.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1HeterogeneousCollectorPolicy.hpp"
+#include "logging/log.hpp"
+#include "runtime/globals_extension.hpp"
+#include "runtime/os.hpp"
+#include "utilities/formatBuffer.hpp"
+
+const double G1HeterogeneousCollectorPolicy::MaxRamFractionForYoung = 0.8;
+size_t G1HeterogeneousCollectorPolicy::MaxMemoryForYoung;
+
+static size_t calculate_reasonable_max_memory_for_young(FormatBuffer<100> &calc_str, double max_ram_fraction_for_young) {
+  julong phys_mem;
+  // If MaxRam is specified, we use that as maximum physical memory available.
+  if (FLAG_IS_DEFAULT(MaxRAM)) {
+    phys_mem = os::physical_memory();
+    calc_str.append("Physical_Memory");
+  } else {
+    phys_mem = (julong)MaxRAM;
+    calc_str.append("MaxRAM");
+  }
+
+  julong reasonable_max = phys_mem;
+
+  // If either MaxRAMFraction or MaxRAMPercentage is specified, we use them to calculate
+  // reasonable max size of young generation.
+  if (!FLAG_IS_DEFAULT(MaxRAMFraction)) {
+    reasonable_max = (julong)(phys_mem / MaxRAMFraction);
+    calc_str.append(" / MaxRAMFraction");
+  }  else if (!FLAG_IS_DEFAULT(MaxRAMPercentage)) {
+    reasonable_max = (julong)((phys_mem * MaxRAMPercentage) / 100);
+    calc_str.append(" * MaxRAMPercentage / 100");
+  }  else {
+    // We use our own fraction to calculate max size of young generation.
+    reasonable_max = phys_mem * max_ram_fraction_for_young;
+    calc_str.append(" * %0.2f", max_ram_fraction_for_young);
+  }
+
+  return (size_t)reasonable_max;
+}
+
+void G1HeterogeneousCollectorPolicy::initialize_flags() {
+
+  FormatBuffer<100> calc_str("");
+
+  MaxMemoryForYoung = calculate_reasonable_max_memory_for_young(calc_str, MaxRamFractionForYoung);
+
+  if (MaxNewSize > MaxMemoryForYoung) {
+    if (FLAG_IS_CMDLINE(MaxNewSize)) {
+      log_warning(gc, ergo)("Setting MaxNewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s))",
+                            MaxMemoryForYoung, calc_str.buffer());
+    } else {
+      log_info(gc, ergo)("Setting MaxNewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s)). "
+                         "Dram usage can be lowered by setting MaxNewSize to a lower value", MaxMemoryForYoung, calc_str.buffer());
+    }
+    MaxNewSize = MaxMemoryForYoung;
+  }
+  if (NewSize > MaxMemoryForYoung) {
+    if (FLAG_IS_CMDLINE(NewSize)) {
+      log_warning(gc, ergo)("Setting NewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s))",
+                            MaxMemoryForYoung, calc_str.buffer());
+    }
+    NewSize = MaxMemoryForYoung;
+  }
+
+  // After setting new size flags, call base class initialize_flags()
+  G1CollectorPolicy::initialize_flags();
+}
+
+size_t G1HeterogeneousCollectorPolicy::reasonable_max_memory_for_young() {
+  return MaxMemoryForYoung;
+}
+
+size_t G1HeterogeneousCollectorPolicy::heap_reserved_size_bytes() const {
+    return 2 * _max_heap_byte_size;
+}
+
+bool G1HeterogeneousCollectorPolicy::is_hetero_heap() const {
+  return true;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1HeterogeneousCollectorPolicy.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1HETEROGENEOUSCOLLECTORPOLICY_HPP
+#define SHARE_VM_GC_G1_G1HETEROGENEOUSCOLLECTORPOLICY_HPP
+
+#include "gc/g1/g1CollectorPolicy.hpp"
+#include "gc/g1/g1HeterogeneousHeapYoungGenSizer.hpp"
+
+class G1HeterogeneousCollectorPolicy : public G1CollectorPolicy {
+private:
+  // Max fraction of dram to use for young generation when MaxRAMFraction and
+  // MaxRAMPercentage are not specified on commandline.
+  static const double MaxRamFractionForYoung;
+  static size_t MaxMemoryForYoung;
+
+protected:
+  virtual void initialize_flags();
+
+public:
+  G1HeterogeneousCollectorPolicy() {}
+  virtual size_t heap_reserved_size_bytes() const;
+  virtual bool is_hetero_heap() const;
+  static size_t reasonable_max_memory_for_young();
+};
+
+#endif // SHARE_VM_GC_G1_G1HETEROGENEOUSCOLLECTORPOLICY_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1HeterogeneousHeapPolicy.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1CollectedHeap.hpp"
+#include "gc/g1/g1HeterogeneousHeapPolicy.hpp"
+#include "gc/g1/g1Policy.hpp"
+#include "gc/g1/heterogeneousHeapRegionManager.hpp"
+
+G1HeterogeneousHeapPolicy::G1HeterogeneousHeapPolicy(G1CollectorPolicy* policy, STWGCTimer* gc_timer) :
+  G1Policy(policy, gc_timer), _manager(NULL) {}
+
+// We call the super class init(), after which we provision young_list_target_length() regions in dram.
+void G1HeterogeneousHeapPolicy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) {
+  G1Policy::init(g1h, collection_set);
+  _manager = HeterogeneousHeapRegionManager::manager();
+  _manager->adjust_dram_regions((uint)young_list_target_length(), G1CollectedHeap::heap()->workers());
+}
+
+// After a collection pause, young list target length is updated. So we need to make sure we have enough regions in dram for young gen.
+void G1HeterogeneousHeapPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) {
+  G1Policy::record_collection_pause_end(pause_time_ms, cards_scanned, heap_used_bytes_before_gc);
+  _manager->adjust_dram_regions((uint)young_list_target_length(), G1CollectedHeap::heap()->workers());
+}
+
+// After a full collection, young list target length is updated. So we need to make sure we have enough regions in dram for young gen.
+void G1HeterogeneousHeapPolicy::record_full_collection_end() {
+  G1Policy::record_full_collection_end();
+  _manager->adjust_dram_regions((uint)young_list_target_length(), G1CollectedHeap::heap()->workers());
+}
+
+bool G1HeterogeneousHeapPolicy::force_upgrade_to_full() {
+  if (_manager->has_borrowed_regions()) {
+    return true;
+  }
+  return false;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1HeterogeneousHeapPolicy.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1HETEROGENEOUSHEAPPOLICY_HPP
+#define SHARE_VM_GC_G1_G1HETEROGENEOUSHEAPPOLICY_HPP
+
+#include "gc/g1/g1CollectorPolicy.hpp"
+#include "gc/g1/g1Policy.hpp"
+#include "gc/g1/heterogeneousHeapRegionManager.hpp"
+
+class G1HeterogeneousHeapPolicy : public G1Policy {
+  // Stash a pointer to the hrm.
+  HeterogeneousHeapRegionManager* _manager;
+
+public:
+  G1HeterogeneousHeapPolicy(G1CollectorPolicy* policy, STWGCTimer* gc_timer);
+
+  // initialize policy
+  virtual void init(G1CollectedHeap* g1h, G1CollectionSet* collection_set);
+  // Record end of an evacuation pause.
+  virtual void record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc);
+  // Record the end of full collection.
+  virtual void record_full_collection_end();
+
+  virtual bool force_upgrade_to_full();
+};
+#endif // SHARE_VM_GC_G1_G1HETEROGENEOUSHEAPPOLICY_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1HeterogeneousHeapYoungGenSizer.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1HeterogeneousCollectorPolicy.hpp"
+#include "gc/g1/g1HeterogeneousHeapYoungGenSizer.hpp"
+#include "gc/g1/heapRegion.hpp"
+
+G1HeterogeneousHeapYoungGenSizer::G1HeterogeneousHeapYoungGenSizer() : G1YoungGenSizer() {
+  // will be used later when min and max young size is calculated.
+  _max_young_length = (uint)(G1HeterogeneousCollectorPolicy::reasonable_max_memory_for_young() / HeapRegion::GrainBytes);
+}
+
+// Since heap is sized potentially to larger value accounting for dram + nvdimm, we need to limit
+// max young gen size to the available dram.
+// Call parent class method first and then adjust sizes based on available dram
+void G1HeterogeneousHeapYoungGenSizer::adjust_max_new_size(uint number_of_heap_regions) {
+  G1YoungGenSizer::adjust_max_new_size(number_of_heap_regions);
+  adjust_lengths_based_on_dram_memory();
+}
+
+void G1HeterogeneousHeapYoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) {
+  G1YoungGenSizer::heap_size_changed(new_number_of_heap_regions);
+  adjust_lengths_based_on_dram_memory();
+}
+
+void G1HeterogeneousHeapYoungGenSizer::adjust_lengths_based_on_dram_memory() {
+  _min_desired_young_length = MIN2(_min_desired_young_length, _max_young_length);
+  _max_desired_young_length = MIN2(_max_desired_young_length, _max_young_length);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1HeterogeneousHeapYoungGenSizer.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1HETEROGENEOUSHEAPYOUNGGENSIZER_HPP
+#define SHARE_VM_GC_G1_G1HETEROGENEOUSHEAPYOUNGGENSIZER_HPP
+
+#include "gc/g1/g1YoungGenSizer.hpp"
+
+// This class prevents the size of young generation of G1 heap to exceed dram
+// memory available. If set on command line, MaxRAM and MaxRAMFraction/MaxRAMPercentage
+// are used to determine the maximum size that young generation can grow.
+// Else we set the maximum size to 80% of dram available in the system.
+
+class G1HeterogeneousHeapYoungGenSizer : public G1YoungGenSizer {
+private:
+  // maximum no of regions that young generation can grow to. Calculated in constructor.
+  uint _max_young_length;
+  void adjust_lengths_based_on_dram_memory();
+
+public:
+  G1HeterogeneousHeapYoungGenSizer();
+
+  // Calculate the maximum length of the young gen given the number of regions
+  // depending on the sizing algorithm.
+  virtual void adjust_max_new_size(uint number_of_heap_regions);
+
+  virtual void heap_size_changed(uint new_number_of_heap_regions);
+};
+
+#endif // SHARE_VM_GC_G1_G1HETEROGENEOUSHEAPYOUNGGENSIZER_HPP
--- a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -100,6 +100,12 @@
   return reserved_size() - committed_size();
 }
 
+void G1PageBasedVirtualSpace::commit_and_set_special() {
+  commit_internal(addr_to_page_index(_low_boundary), addr_to_page_index(_high_boundary));
+  _special = true;
+  _dirty.initialize(reserved_size()/_page_size);
+}
+
 size_t G1PageBasedVirtualSpace::addr_to_page_index(char* addr) const {
   return (addr - _low_boundary) / _page_size;
 }
--- a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.hpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -136,6 +136,8 @@
   // Memory left to use/expand in this virtual space.
   size_t uncommitted_size() const;
 
+  void commit_and_set_special();
+
   bool contains(const void* p) const;
 
   MemRegion reserved() {
--- a/src/hotspot/share/gc/g1/g1Policy.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/g1/g1Policy.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -29,6 +29,7 @@
 #include "gc/g1/g1ConcurrentMark.hpp"
 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
 #include "gc/g1/g1ConcurrentRefine.hpp"
+#include "gc/g1/g1HeterogeneousHeapPolicy.hpp"
 #include "gc/g1/g1HotCardCache.hpp"
 #include "gc/g1/g1IHOPControl.hpp"
 #include "gc/g1/g1GCPhaseTimes.hpp"
@@ -46,7 +47,7 @@
 #include "utilities/growableArray.hpp"
 #include "utilities/pair.hpp"
 
-G1Policy::G1Policy(STWGCTimer* gc_timer) :
+G1Policy::G1Policy(G1CollectorPolicy* policy, STWGCTimer* gc_timer) :
   _predictor(G1ConfidencePercent / 100.0),
   _analytics(new G1Analytics(&_predictor)),
   _remset_tracker(),
@@ -62,7 +63,7 @@
   _survivor_surv_rate_group(new SurvRateGroup()),
   _reserve_factor((double) G1ReservePercent / 100.0),
   _reserve_regions(0),
-  _young_gen_sizer(),
+  _young_gen_sizer(G1YoungGenSizer::create_gen_sizer(policy)),
   _free_regions_at_end_of_collection(0),
   _max_rs_lengths(0),
   _rs_lengths_prediction(0),
@@ -83,6 +84,15 @@
 
 G1Policy::~G1Policy() {
   delete _ihop_control;
+  delete _young_gen_sizer;
+}
+
+G1Policy* G1Policy::create_policy(G1CollectorPolicy* policy, STWGCTimer* gc_timer_stw) {
+  if (policy->is_hetero_heap()) {
+    return new G1HeterogeneousHeapPolicy(policy, gc_timer_stw);
+  } else {
+    return new G1Policy(policy, gc_timer_stw);
+  }
 }
 
 G1CollectorState* G1Policy::collector_state() const { return _g1h->collector_state(); }
@@ -94,9 +104,9 @@
   assert(Heap_lock->owned_by_self(), "Locking discipline.");
 
   if (!adaptive_young_list_length()) {
-    _young_list_fixed_length = _young_gen_sizer.min_desired_young_length();
+    _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
   }
-  _young_gen_sizer.adjust_max_new_size(_g1h->max_regions());
+  _young_gen_sizer->adjust_max_new_size(_g1h->max_expandable_regions());
 
   _free_regions_at_end_of_collection = _g1h->num_free_regions();
 
@@ -176,7 +186,7 @@
   // smaller than 1.0) we'll get 1.
   _reserve_regions = (uint) ceil(reserve_regions_d);
 
-  _young_gen_sizer.heap_size_changed(new_number_of_regions);
+  _young_gen_sizer->heap_size_changed(new_number_of_regions);
 
   _ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes);
 }
@@ -195,14 +205,14 @@
   }
   desired_min_length += base_min_length;
   // make sure we don't go below any user-defined minimum bound
-  return MAX2(_young_gen_sizer.min_desired_young_length(), desired_min_length);
+  return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
 }
 
 uint G1Policy::calculate_young_list_desired_max_length() const {
   // Here, we might want to also take into account any additional
   // constraints (i.e., user-defined minimum bound). Currently, we
   // effectively don't set this bound.
-  return _young_gen_sizer.max_desired_young_length();
+  return _young_gen_sizer->max_desired_young_length();
 }
 
 uint G1Policy::update_young_list_max_and_target_length() {
@@ -218,6 +228,7 @@
 uint G1Policy::update_young_list_target_length(size_t rs_lengths) {
   YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths);
   _young_list_target_length = young_lengths.first;
+
   return young_lengths.second;
 }
 
@@ -900,7 +911,7 @@
 }
 
 bool G1Policy::adaptive_young_list_length() const {
-  return _young_gen_sizer.adaptive_young_list_length();
+  return _young_gen_sizer->adaptive_young_list_length();
 }
 
 size_t G1Policy::desired_survivor_size(uint max_regions) const {
--- a/src/hotspot/share/gc/g1/g1Policy.hpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/g1/g1Policy.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_GC_G1_G1POLICY_HPP
 #define SHARE_VM_GC_G1_G1POLICY_HPP
 
+#include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1CollectorState.hpp"
 #include "gc/g1/g1GCPhaseTimes.hpp"
 #include "gc/g1/g1InCSetState.hpp"
@@ -91,7 +92,7 @@
   // for the first time during initialization.
   uint   _reserve_regions;
 
-  G1YoungGenSizer _young_gen_sizer;
+  G1YoungGenSizer* _young_gen_sizer;
 
   uint _free_regions_at_end_of_collection;
 
@@ -282,10 +283,12 @@
   void abort_time_to_mixed_tracking();
 public:
 
-  G1Policy(STWGCTimer* gc_timer);
+  G1Policy(G1CollectorPolicy* policy, STWGCTimer* gc_timer);
 
   virtual ~G1Policy();
 
+  static G1Policy* create_policy(G1CollectorPolicy* policy, STWGCTimer* gc_timer_stw);
+
   G1CollectorState* collector_state() const;
 
   G1GCPhaseTimes* phase_times() const { return _phase_times; }
@@ -298,7 +301,7 @@
   // This should be called after the heap is resized.
   void record_new_heap_size(uint new_number_of_regions);
 
-  void init(G1CollectedHeap* g1h, G1CollectionSet* collection_set);
+  virtual void init(G1CollectedHeap* g1h, G1CollectionSet* collection_set);
 
   void note_gc_start();
 
@@ -308,11 +311,11 @@
 
   // Record the start and end of an evacuation pause.
   void record_collection_pause_start(double start_time_sec);
-  void record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc);
+  virtual void record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc);
 
   // Record the start and end of a full collection.
   void record_full_collection_start();
-  void record_full_collection_end();
+  virtual void record_full_collection_end();
 
   // Must currently be called while the world is stopped.
   void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms);
@@ -432,6 +435,10 @@
   void update_max_gc_locker_expansion();
 
   void update_survivors_policy();
+
+  virtual bool force_upgrade_to_full() {
+    return false;
+  }
 };
 
 #endif // SHARE_VM_GC_G1_G1POLICY_HPP
--- a/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,11 +25,15 @@
 #include "precompiled.hpp"
 #include "gc/g1/g1BiasedArray.hpp"
 #include "gc/g1/g1RegionToSpaceMapper.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/virtualspace.hpp"
+#include "runtime/java.hpp"
+#include "runtime/os.inline.hpp"
 #include "services/memTracker.hpp"
 #include "utilities/align.hpp"
 #include "utilities/bitMap.inline.hpp"
+#include "utilities/formatBuffer.hpp"
 
 G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs,
                                              size_t used_size,
@@ -170,16 +174,156 @@
   }
 }
 
+static bool map_nvdimm_space(ReservedSpace rs) {
+  assert(AllocateOldGenAt != NULL, "");
+  int _backing_fd = os::create_file_for_heap(AllocateOldGenAt);
+  if (_backing_fd == -1) {
+    log_error(gc, init)("Could not create file for Old generation at location %s", AllocateOldGenAt);
+    return false;
+  }
+  // commit this memory in nv-dimm
+  char* ret = os::attempt_reserve_memory_at(rs.size(), rs.base(), _backing_fd);
+
+  if (ret != rs.base()) {
+    if (ret != NULL) {
+      os::unmap_memory(rs.base(), rs.size());
+    }
+    log_error(gc, init)("Error in mapping Old Gen to given AllocateOldGenAt = %s", AllocateOldGenAt);
+    os::close(_backing_fd);
+    return false;
+  }
+
+  os::close(_backing_fd);
+  return true;
+}
+
+G1RegionToHeteroSpaceMapper::G1RegionToHeteroSpaceMapper(ReservedSpace rs,
+                                                         size_t actual_size,
+                                                         size_t page_size,
+                                                         size_t alloc_granularity,
+                                                         size_t commit_factor,
+                                                         MemoryType type) :
+  G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, type),
+  _rs(rs),
+  _num_committed_dram(0),
+  _num_committed_nvdimm(0),
+  _page_size(page_size),
+  _commit_factor(commit_factor),
+  _type(type) {
+  assert(actual_size == 2 * MaxHeapSize, "For 2-way heterogenuous heap, reserved space is two times MaxHeapSize");
+}
+
+bool G1RegionToHeteroSpaceMapper::initialize() {
+  // Since we need to re-map the reserved space - 'Xmx' to nv-dimm and 'Xmx' to dram, we need to release the reserved memory first.
+  // Because on some OSes (e.g. Windows) you cannot do a file mapping on memory reserved with regular mapping.
+  os::release_memory(_rs.base(), _rs.size());
+  // First half of size Xmx is for nv-dimm.
+  ReservedSpace rs_nvdimm = _rs.first_part(MaxHeapSize);
+  assert(rs_nvdimm.base() == _rs.base(), "We should get the same base address");
+
+  // Second half of reserved memory is mapped to dram.
+  ReservedSpace rs_dram = _rs.last_part(MaxHeapSize);
+
+  assert(rs_dram.size() == rs_nvdimm.size() && rs_nvdimm.size() == MaxHeapSize, "They all should be same");
+
+  // Reserve dram memory
+  char* base = os::attempt_reserve_memory_at(rs_dram.size(), rs_dram.base());
+  if (base != rs_dram.base()) {
+    if (base != NULL) {
+      os::release_memory(base, rs_dram.size());
+    }
+    log_error(gc, init)("Error in re-mapping memory on dram during G1 heterogenous memory initialization");
+    return false;
+  }
+
+  // We reserve and commit this entire space to NV-DIMM.
+  if (!map_nvdimm_space(rs_nvdimm)) {
+    log_error(gc, init)("Error in re-mapping memory to nv-dimm during G1 heterogenous memory initialization");
+    return false;
+  }
+
+  if (_region_granularity >= (_page_size * _commit_factor)) {
+    _dram_mapper = new G1RegionsLargerThanCommitSizeMapper(rs_dram, rs_dram.size(), _page_size, _region_granularity, _commit_factor, _type);
+  } else {
+    _dram_mapper = new G1RegionsSmallerThanCommitSizeMapper(rs_dram, rs_dram.size(), _page_size, _region_granularity, _commit_factor, _type);
+  }
+
+  _start_index_of_nvdimm = 0;
+  _start_index_of_dram = (uint)(rs_nvdimm.size() / _region_granularity);
+  return true;
+}
+
+void G1RegionToHeteroSpaceMapper::commit_regions(uint start_idx, size_t num_regions, WorkGang* pretouch_gang) {
+  uint end_idx = (start_idx + (uint)num_regions - 1);
+
+  uint num_dram = end_idx >= _start_index_of_dram ? MIN2((end_idx - _start_index_of_dram + 1), (uint)num_regions) : 0;
+  uint num_nvdimm = (uint)num_regions - num_dram;
+
+  if (num_nvdimm > 0) {
+    // We do not need to commit nv-dimm regions, since they are committed in the beginning.
+    _num_committed_nvdimm += num_nvdimm;
+  }
+  if (num_dram > 0) {
+    _dram_mapper->commit_regions(start_idx > _start_index_of_dram ? (start_idx - _start_index_of_dram) : 0, num_dram, pretouch_gang);
+    _num_committed_dram += num_dram;
+  }
+}
+
+void G1RegionToHeteroSpaceMapper::uncommit_regions(uint start_idx, size_t num_regions) {
+  uint end_idx = (start_idx + (uint)num_regions - 1);
+  uint num_dram = end_idx >= _start_index_of_dram ? MIN2((end_idx - _start_index_of_dram + 1), (uint)num_regions) : 0;
+  uint num_nvdimm = (uint)num_regions - num_dram;
+
+  if (num_nvdimm > 0) {
+    // We do not uncommit memory for nv-dimm regions.
+    _num_committed_nvdimm -= num_nvdimm;
+  }
+
+  if (num_dram > 0) {
+    _dram_mapper->uncommit_regions(start_idx > _start_index_of_dram ? (start_idx - _start_index_of_dram) : 0, num_dram);
+    _num_committed_dram -= num_dram;
+  }
+}
+
+uint G1RegionToHeteroSpaceMapper::num_committed_dram() const {
+  return _num_committed_dram;
+}
+
+uint G1RegionToHeteroSpaceMapper::num_committed_nvdimm() const {
+  return _num_committed_nvdimm;
+}
+
+G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_heap_mapper(ReservedSpace rs,
+                                                                 size_t actual_size,
+                                                                 size_t page_size,
+                                                                 size_t region_granularity,
+                                                                 size_t commit_factor,
+                                                                 MemoryType type) {
+  if (AllocateOldGenAt != NULL) {
+    G1RegionToHeteroSpaceMapper* mapper = new G1RegionToHeteroSpaceMapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
+    if (!mapper->initialize()) {
+      delete mapper;
+      return NULL;
+    }
+    return (G1RegionToSpaceMapper*)mapper;
+  } else {
+    return create_mapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
+  }
+}
+
 G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_mapper(ReservedSpace rs,
                                                             size_t actual_size,
                                                             size_t page_size,
                                                             size_t region_granularity,
                                                             size_t commit_factor,
                                                             MemoryType type) {
-
   if (region_granularity >= (page_size * commit_factor)) {
     return new G1RegionsLargerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
   } else {
     return new G1RegionsSmallerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
   }
 }
+
+void G1RegionToSpaceMapper::commit_and_set_special() {
+  _storage.commit_and_set_special();
+}
--- a/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.hpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -70,6 +70,7 @@
     return _commit_map.at(idx);
   }
 
+  void commit_and_set_special();
   virtual void commit_regions(uint start_idx, size_t num_regions = 1, WorkGang* pretouch_workers = NULL) = 0;
   virtual void uncommit_regions(uint start_idx, size_t num_regions = 1) = 0;
 
@@ -87,6 +88,37 @@
                                               size_t region_granularity,
                                               size_t byte_translation_factor,
                                               MemoryType type);
+
+  static G1RegionToSpaceMapper* create_heap_mapper(ReservedSpace rs,
+                                                   size_t actual_size,
+                                                   size_t page_size,
+                                                   size_t region_granularity,
+                                                   size_t byte_translation_factor,
+                                                   MemoryType type);
 };
 
+// G1RegionToSpaceMapper implementation where
+// part of space is mapped to dram and part to nv-dimm
+class G1RegionToHeteroSpaceMapper : public G1RegionToSpaceMapper {
+private:
+  size_t _pages_per_region;
+  ReservedSpace _rs;
+  G1RegionToSpaceMapper* _dram_mapper;
+  uint _num_committed_dram;
+  uint _num_committed_nvdimm;
+  uint _start_index_of_nvdimm;
+  uint _start_index_of_dram;
+  size_t _page_size;
+  size_t _commit_factor;
+  MemoryType _type;
+
+public:
+  G1RegionToHeteroSpaceMapper(ReservedSpace rs, size_t used_size, size_t page_size, size_t region_granularity, size_t commit_factor, MemoryType type);
+  bool initialize();
+  uint num_committed_dram() const;
+  uint num_committed_nvdimm() const;
+
+  virtual void commit_regions(uint start_idx, size_t num_regions = 1, WorkGang* pretouch_workers = NULL);
+  virtual void uncommit_regions(uint start_idx, size_t num_regions = 1);
+};
 #endif // SHARE_VM_GC_G1_G1REGIONTOSPACEMAPPER_HPP
--- a/src/hotspot/share/gc/g1/g1VMOperations.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/g1/g1VMOperations.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -138,8 +138,8 @@
       // kind of GC.
       _result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded);
     } else {
-      bool should_upgrade_to_full = !g1h->should_do_concurrent_full_gc(_gc_cause) &&
-                                    !g1h->has_regions_left_for_allocation();
+      bool should_upgrade_to_full = g1h->should_upgrade_to_full_gc(_gc_cause);
+
       if (should_upgrade_to_full) {
         // There has been a request to perform a GC to free some space. We have no
         // information on how much memory has been asked for. In case there are
--- a/src/hotspot/share/gc/g1/g1YoungGenSizer.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/g1/g1YoungGenSizer.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,12 +23,14 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/g1/g1CollectorPolicy.hpp"
+#include "gc/g1/g1HeterogeneousHeapYoungGenSizer.hpp"
 #include "gc/g1/g1YoungGenSizer.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "logging/log.hpp"
 
 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults),
-  _min_desired_young_length(0), _max_desired_young_length(0), _adaptive_size(true) {
+  _adaptive_size(true), _min_desired_young_length(0), _max_desired_young_length(0) {
 
   if (FLAG_IS_CMDLINE(NewRatio)) {
     if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
@@ -127,3 +129,11 @@
   recalculate_min_max_young_length(new_number_of_heap_regions, &_min_desired_young_length,
           &_max_desired_young_length);
 }
+
+G1YoungGenSizer* G1YoungGenSizer::create_gen_sizer(G1CollectorPolicy* policy) {
+  if (policy->is_hetero_heap()) {
+    return new G1HeterogeneousHeapYoungGenSizer();
+  } else {
+    return new G1YoungGenSizer();
+  }
+}
--- a/src/hotspot/share/gc/g1/g1YoungGenSizer.hpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/g1/g1YoungGenSizer.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_GC_G1_G1YOUNGGENSIZER_HPP
 #define SHARE_VM_GC_G1_G1YOUNGGENSIZER_HPP
 
+#include "gc/g1/g1CollectorPolicy.hpp"
 #include "utilities/globalDefinitions.hpp"
 
 // There are three command line options related to the young gen size:
@@ -63,7 +64,7 @@
 //
 // NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is
 // combined with either NewSize or MaxNewSize. (A warning message is printed.)
-class G1YoungGenSizer {
+class G1YoungGenSizer : public CHeapObj<mtGC> {
 private:
   enum SizerKind {
     SizerDefaults,
@@ -73,8 +74,6 @@
     SizerNewRatio
   };
   SizerKind _sizer_kind;
-  uint _min_desired_young_length;
-  uint _max_desired_young_length;
 
   // False when using a fixed young generation size due to command-line options,
   // true otherwise.
@@ -87,13 +86,17 @@
   // given the number of heap regions depending on the kind of sizing algorithm.
   void recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length);
 
+protected:
+  uint _min_desired_young_length;
+  uint _max_desired_young_length;
+
 public:
   G1YoungGenSizer();
   // Calculate the maximum length of the young gen given the number of regions
   // depending on the sizing algorithm.
-  void adjust_max_new_size(uint number_of_heap_regions);
+  virtual void adjust_max_new_size(uint number_of_heap_regions);
 
-  void heap_size_changed(uint new_number_of_heap_regions);
+  virtual void heap_size_changed(uint new_number_of_heap_regions);
   uint min_desired_young_length() const {
     return _min_desired_young_length;
   }
@@ -104,6 +107,8 @@
   bool adaptive_young_list_length() const {
     return _adaptive_size;
   }
+
+  static G1YoungGenSizer* create_gen_sizer(G1CollectorPolicy* policy);
 };
 
 #endif // SHARE_VM_GC_G1_G1YOUNGGENSIZER_HPP
--- a/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -40,7 +40,8 @@
              "G1YoungRemSetSamplingThread monitor",
              true,
              Monitor::_safepoint_check_never),
-    _last_periodic_gc_attempt_s(os::elapsedTime()) {
+    _last_periodic_gc_attempt_s(os::elapsedTime()),
+    _vtime_accum(0) {
   set_name("G1 Young RemSet Sampling");
   create_and_start();
 }
@@ -71,9 +72,9 @@
 
   // Check if load is lower than max.
   double recent_load;
-  if ((G1PeriodicGCSystemLoadThreshold > 0) &&
+  if ((G1PeriodicGCSystemLoadThreshold > 0.0f) &&
       (os::loadavg(&recent_load, 1) == -1 || recent_load > G1PeriodicGCSystemLoadThreshold)) {
-    log_debug(gc, periodic)("Load %1.2f is higher than threshold " UINTX_FORMAT ". Skipping.",
+    log_debug(gc, periodic)("Load %1.2f is higher than threshold %1.2f. Skipping.",
                             recent_load, G1PeriodicGCSystemLoadThreshold);
     return false;
   }
--- a/src/hotspot/share/gc/g1/g1_globals.hpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/g1/g1_globals.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -311,10 +311,21 @@
           "perform a concurrent GC as periodic GC, otherwise use a STW "    \
           "Full GC.")                                                       \
                                                                             \
-  manageable(uintx, G1PeriodicGCSystemLoadThreshold, 0,                     \
-          "Maximum recent system wide system load as returned by the 1m "   \
-          "value of getloadavg() at which G1 triggers a periodic GC. A "    \
-          "load above this value cancels a given periodic GC. A value of "  \
-          "zero disables this check.")                                      \
+  manageable(double, G1PeriodicGCSystemLoadThreshold, 0.0,                  \
+          "Maximum recent system wide load as returned by the 1m value "    \
+          "of getloadavg() at which G1 triggers a periodic GC. A load "     \
+          "above this value cancels a given periodic GC. A value of zero "  \
+          "disables this check.")                                           \
+          range(0.0, (double)max_uintx)                                     \
+                                                                            \
+  experimental(uintx, G1YoungExpansionBufferPercent, 10,                    \
+               "When heterogenous heap is enabled by AllocateOldGenAt "     \
+               "option, after every GC, young gen is re-sized which "       \
+               "involves system calls to commit/uncommit memory. To "       \
+               "reduce these calls, we keep a buffer of extra regions to "  \
+               "absorb small changes in young gen length. This flag takes " \
+               "the buffer size as an percentage of young gen length")      \
+               range(0, 100)                                                \
+
 
 #endif // SHARE_VM_GC_G1_G1_GLOBALS_HPP
--- a/src/hotspot/share/gc/g1/heapRegion.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/g1/heapRegion.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -877,8 +877,10 @@
 }
 
 G1ContiguousSpace::G1ContiguousSpace(G1BlockOffsetTable* bot) :
+  _top(NULL),
   _bot_part(bot, this),
-  _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true)
+  _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true),
+  _pre_dummy_top(NULL)
 {
 }
 
--- a/src/hotspot/share/gc/g1/heapRegionManager.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/g1/heapRegionManager.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -28,6 +28,8 @@
 #include "gc/g1/heapRegion.hpp"
 #include "gc/g1/heapRegionManager.inline.hpp"
 #include "gc/g1/heapRegionSet.inline.hpp"
+#include "gc/g1/heterogeneousHeapRegionManager.hpp"
+#include "gc/shared/collectorPolicy.hpp"
 #include "memory/allocation.hpp"
 #include "utilities/bitMap.inline.hpp"
 
@@ -54,18 +56,25 @@
 };
 
 HeapRegionManager::HeapRegionManager() :
+  _bot_mapper(NULL),
+  _cardtable_mapper(NULL),
+  _card_counts_mapper(NULL),
+  _available_map(mtGC),
+  _num_committed(0),
+  _allocated_heapregions_length(0),
   _regions(), _heap_mapper(NULL),
   _prev_bitmap_mapper(NULL),
   _next_bitmap_mapper(NULL),
-  _bot_mapper(NULL),
-  _cardtable_mapper(NULL),
-  _card_counts_mapper(NULL),
-  _free_list("Free list", new MasterFreeRegionListChecker()),
-  _available_map(mtGC),
-  _num_committed(0),
-  _allocated_heapregions_length(0)
+  _free_list("Free list", new MasterFreeRegionListChecker())
 { }
 
+HeapRegionManager* HeapRegionManager::create_manager(G1CollectedHeap* heap, G1CollectorPolicy* policy) {
+  if (policy->is_hetero_heap()) {
+    return new HeterogeneousHeapRegionManager((uint)(policy->max_heap_byte_size() / HeapRegion::GrainBytes) /*heap size as num of regions*/);
+  }
+  return new HeapRegionManager();
+}
+
 void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
                                G1RegionToSpaceMapper* prev_bitmap,
                                G1RegionToSpaceMapper* next_bitmap,
@@ -514,7 +523,7 @@
 #endif // PRODUCT
 
 HeapRegionClaimer::HeapRegionClaimer(uint n_workers) :
-    _n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm._allocated_heapregions_length), _claims(NULL) {
+    _n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm->_allocated_heapregions_length), _claims(NULL) {
   assert(n_workers > 0, "Need at least one worker.");
   uint* new_claims = NEW_C_HEAP_ARRAY(uint, _n_regions, mtGC);
   memset(new_claims, Unclaimed, sizeof(*_claims) * _n_regions);
--- a/src/hotspot/share/gc/g1/heapRegionManager.hpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/g1/heapRegionManager.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -26,8 +26,10 @@
 #define SHARE_VM_GC_G1_HEAPREGIONMANAGER_HPP
 
 #include "gc/g1/g1BiasedArray.hpp"
+#include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1RegionToSpaceMapper.hpp"
 #include "gc/g1/heapRegionSet.hpp"
+#include "gc/shared/collectorPolicy.hpp"
 #include "services/memoryUsage.hpp"
 
 class HeapRegion;
@@ -71,17 +73,10 @@
   friend class VMStructs;
   friend class HeapRegionClaimer;
 
-  G1HeapRegionTable _regions;
-
-  G1RegionToSpaceMapper* _heap_mapper;
-  G1RegionToSpaceMapper* _prev_bitmap_mapper;
-  G1RegionToSpaceMapper* _next_bitmap_mapper;
   G1RegionToSpaceMapper* _bot_mapper;
   G1RegionToSpaceMapper* _cardtable_mapper;
   G1RegionToSpaceMapper* _card_counts_mapper;
 
-  FreeRegionList _free_list;
-
   // Each bit in this bitmap indicates that the corresponding region is available
   // for allocation.
   CHeapBitMap _available_map;
@@ -95,11 +90,8 @@
   HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); }
   HeapWord* heap_end() const {return _regions.end_address_mapped(); }
 
-  void make_regions_available(uint index, uint num_regions = 1, WorkGang* pretouch_gang = NULL);
-
   // Pass down commit calls to the VirtualSpace.
   void commit_regions(uint index, size_t num_regions = 1, WorkGang* pretouch_gang = NULL);
-  void uncommit_regions(uint index, size_t num_regions = 1);
 
   // Notify other data structures about change in the heap layout.
   void update_committed_space(HeapWord* old_end, HeapWord* new_end);
@@ -117,6 +109,16 @@
   // the heap. Returns the length of the sequence found. If this value is zero, no
   // sequence could be found, otherwise res_idx contains the start index of this range.
   uint find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const;
+
+protected:
+  G1HeapRegionTable _regions;
+  G1RegionToSpaceMapper* _heap_mapper;
+  G1RegionToSpaceMapper* _prev_bitmap_mapper;
+  G1RegionToSpaceMapper* _next_bitmap_mapper;
+  FreeRegionList _free_list;
+
+  void make_regions_available(uint index, uint num_regions = 1, WorkGang* pretouch_gang = NULL);
+  void uncommit_regions(uint index, size_t num_regions = 1);
   // Allocate a new HeapRegion for the given index.
   HeapRegion* new_heap_region(uint hrm_index);
 #ifdef ASSERT
@@ -127,18 +129,25 @@
   // Empty constructor, we'll initialize it with the initialize() method.
   HeapRegionManager();
 
-  void initialize(G1RegionToSpaceMapper* heap_storage,
-                  G1RegionToSpaceMapper* prev_bitmap,
-                  G1RegionToSpaceMapper* next_bitmap,
-                  G1RegionToSpaceMapper* bot,
-                  G1RegionToSpaceMapper* cardtable,
-                  G1RegionToSpaceMapper* card_counts);
+  static HeapRegionManager* create_manager(G1CollectedHeap* heap, G1CollectorPolicy* policy);
+
+  virtual void initialize(G1RegionToSpaceMapper* heap_storage,
+                          G1RegionToSpaceMapper* prev_bitmap,
+                          G1RegionToSpaceMapper* next_bitmap,
+                          G1RegionToSpaceMapper* bot,
+                          G1RegionToSpaceMapper* cardtable,
+                          G1RegionToSpaceMapper* card_counts);
+
+  // Prepare heap regions before and after full collection.
+  // Nothing to be done in this class.
+  virtual void prepare_for_full_collection_start() {}
+  virtual void prepare_for_full_collection_end() {}
 
   // Return the "dummy" region used for G1AllocRegion. This is currently a hardwired
   // new HeapRegion that owns HeapRegion at index 0. Since at the moment we commit
   // the heap from the lowest address, this region (and its associated data
   // structures) are available and we do not need to check further.
-  HeapRegion* get_dummy_region() { return new_heap_region(0); }
+  virtual HeapRegion* get_dummy_region() { return new_heap_region(0); }
 
   // Return the HeapRegion at the given index. Assume that the index
   // is valid.
@@ -167,8 +176,8 @@
     _free_list.add_ordered(list);
   }
 
-  HeapRegion* allocate_free_region(bool is_old) {
-    HeapRegion* hr = _free_list.remove_region(is_old);
+  virtual HeapRegion* allocate_free_region(HeapRegionType type) {
+    HeapRegion* hr = _free_list.remove_region(!type.is_young());
 
     if (hr != NULL) {
       assert(hr->next() == NULL, "Single region should not have next");
@@ -202,6 +211,9 @@
   // Return the maximum number of regions in the heap.
   uint max_length() const { return (uint)_regions.length(); }
 
+  // Return maximum number of regions that heap can expand to.
+  virtual uint max_expandable_length() const { return (uint)_regions.length(); }
+
   MemoryUsage get_auxiliary_data_memory_usage() const;
 
   MemRegion reserved() const { return MemRegion(heap_bottom(), heap_end()); }
@@ -210,26 +222,26 @@
   // HeapRegions, or re-use existing ones. Returns the number of regions the
   // sequence was expanded by. If a HeapRegion allocation fails, the resulting
   // number of regions might be smaller than what's desired.
-  uint expand_by(uint num_regions, WorkGang* pretouch_workers);
+  virtual uint expand_by(uint num_regions, WorkGang* pretouch_workers);
 
   // Makes sure that the regions from start to start+num_regions-1 are available
   // for allocation. Returns the number of regions that were committed to achieve
   // this.
-  uint expand_at(uint start, uint num_regions, WorkGang* pretouch_workers);
+  virtual uint expand_at(uint start, uint num_regions, WorkGang* pretouch_workers);
 
   // Find a contiguous set of empty regions of length num. Returns the start index of
   // that set, or G1_NO_HRM_INDEX.
-  uint find_contiguous_only_empty(size_t num) { return find_contiguous(num, true); }
+  virtual uint find_contiguous_only_empty(size_t num) { return find_contiguous(num, true); }
   // Find a contiguous set of empty or unavailable regions of length num. Returns the
   // start index of that set, or G1_NO_HRM_INDEX.
-  uint find_contiguous_empty_or_unavailable(size_t num) { return find_contiguous(num, false); }
+  virtual uint find_contiguous_empty_or_unavailable(size_t num) { return find_contiguous(num, false); }
 
   HeapRegion* next_region_in_heap(const HeapRegion* r) const;
 
   // Find the highest free or uncommitted region in the reserved heap,
   // and if uncommitted, commit it. If none are available, return G1_NO_HRM_INDEX.
   // Set the 'expanded' boolean true if a new region was committed.
-  uint find_highest_free(bool* expanded);
+  virtual uint find_highest_free(bool* expanded);
 
   // Allocate the regions that contain the address range specified, committing the
   // regions if necessary. Return false if any of the regions is already committed
@@ -244,13 +256,13 @@
 
   // Uncommit up to num_regions_to_remove regions that are completely free.
   // Return the actual number of uncommitted regions.
-  uint shrink_by(uint num_regions_to_remove);
+  virtual uint shrink_by(uint num_regions_to_remove);
 
   // Uncommit a number of regions starting at the specified index, which must be available,
   // empty, and free.
   void shrink_at(uint index, size_t num_regions);
 
-  void verify();
+  virtual void verify();
 
   // Do some sanity checking.
   void verify_optional() PRODUCT_RETURN;
--- a/src/hotspot/share/gc/g1/heapRegionSet.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/g1/heapRegionSet.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -234,6 +234,21 @@
   verify_optional();
 }
 
+uint FreeRegionList::num_of_regions_in_range(uint start, uint end) const {
+  HeapRegion* cur = _head;
+  uint num = 0;
+  while (cur != NULL) {
+    uint index = cur->hrm_index();
+    if (index > end) {
+      break;
+    } else if (index >= start) {
+      num++;
+    }
+    cur = cur->next();
+  }
+  return num;
+}
+
 void FreeRegionList::verify() {
   // See comment in HeapRegionSetBase::verify() about MT safety and
   // verification.
--- a/src/hotspot/share/gc/g1/heapRegionSet.hpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/g1/heapRegionSet.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -194,6 +194,8 @@
   void remove_starting_at(HeapRegion* first, uint num_regions);
 
   virtual void verify();
+
+  uint num_of_regions_in_range(uint start, uint end) const;
 };
 
 // Iterator class that provides a convenient way to iterate over the
--- a/src/hotspot/share/gc/g1/heapRegionType.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/g1/heapRegionType.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,11 @@
 #include "gc/g1/g1HeapRegionTraceType.hpp"
 #include "gc/g1/heapRegionType.hpp"
 
+const HeapRegionType HeapRegionType::Eden      = HeapRegionType(EdenTag);
+const HeapRegionType HeapRegionType::Survivor  = HeapRegionType(SurvTag);
+const HeapRegionType HeapRegionType::Old       = HeapRegionType(OldTag);
+const HeapRegionType HeapRegionType::Humongous = HeapRegionType(StartsHumongousTag);
+
 bool HeapRegionType::is_valid(Tag tag) {
   switch (tag) {
     case FreeTag:
--- a/src/hotspot/share/gc/g1/heapRegionType.hpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/g1/heapRegionType.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -117,6 +117,9 @@
     _tag = tag;
   }
 
+  // Private constructor used static constants
+  HeapRegionType(Tag t) : _tag(t) { hrt_assert_is_valid(_tag); }
+
 public:
   // Queries
 
@@ -186,6 +189,11 @@
   G1HeapRegionTraceType::Type get_trace_type();
 
   HeapRegionType() : _tag(FreeTag) { hrt_assert_is_valid(_tag); }
+
+  static const HeapRegionType Eden;
+  static const HeapRegionType Survivor;
+  static const HeapRegionType Old;
+  static const HeapRegionType Humongous;
 };
 
 #endif // SHARE_VM_GC_G1_HEAPREGIONTYPE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/heterogeneousHeapRegionManager.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -0,0 +1,523 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1ConcurrentRefine.hpp"
+#include "gc/g1/heapRegion.hpp"
+#include "gc/g1/heapRegionManager.inline.hpp"
+#include "gc/g1/heapRegionSet.inline.hpp"
+#include "gc/g1/heterogeneousHeapRegionManager.hpp"
+#include "memory/allocation.hpp"
+
+
+HeterogeneousHeapRegionManager* HeterogeneousHeapRegionManager::manager() {
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  assert(g1h != NULL, "Uninitialized access to HeterogeneousHeapRegionManager::manager()");
+
+  HeapRegionManager* hrm = g1h->hrm();
+  assert(hrm != NULL, "Uninitialized access to HeterogeneousHeapRegionManager::manager()");
+  return (HeterogeneousHeapRegionManager*)hrm;
+}
+
+void HeterogeneousHeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
+                                                G1RegionToSpaceMapper* prev_bitmap,
+                                                G1RegionToSpaceMapper* next_bitmap,
+                                                G1RegionToSpaceMapper* bot,
+                                                G1RegionToSpaceMapper* cardtable,
+                                                G1RegionToSpaceMapper* card_counts) {
+  HeapRegionManager::initialize(heap_storage, prev_bitmap, next_bitmap, bot, cardtable, card_counts);
+
+  // We commit bitmap for all regions during initialization and mark the bitmap space as special.
+  // This allows regions to be un-committed while concurrent-marking threads are accessing the bitmap concurrently.
+  _prev_bitmap_mapper->commit_and_set_special();
+  _next_bitmap_mapper->commit_and_set_special();
+}
+
+// expand_by() is called to grow the heap. We grow into nvdimm now.
+// Dram regions are committed later as needed during mutator region allocation or
+// when young list target length is determined after gc cycle.
+uint HeterogeneousHeapRegionManager::expand_by(uint num_regions, WorkGang* pretouch_workers) {
+  uint num_regions_possible = total_regions_committed() >= max_expandable_length() ? 0 : max_expandable_length() - total_regions_committed();
+  uint num_expanded = expand_nvdimm(MIN2(num_regions, num_regions_possible), pretouch_workers);
+  return num_expanded;
+}
+
+// Expands heap starting from 'start' index. The question is should we expand from one memory (e.g. nvdimm) to another (e.g. dram).
+// Looking at the code, expand_at() is called for humongous allocation where 'start' is in nv-dimm.
+// So we only allocate regions in the same kind of memory as 'start'.
+uint HeterogeneousHeapRegionManager::expand_at(uint start, uint num_regions, WorkGang* pretouch_workers) {
+  if (num_regions == 0) {
+    return 0;
+  }
+  uint target_num_regions = MIN2(num_regions, max_expandable_length() - total_regions_committed());
+  uint end = is_in_nvdimm(start) ? end_index_of_nvdimm() : end_index_of_dram();
+
+  uint num_expanded = expand_in_range(start, end, target_num_regions, pretouch_workers);
+  assert(total_regions_committed() <= max_expandable_length(), "must be");
+  return num_expanded;
+}
+
+// This function ensures that there are 'expected_num_regions' committed regions in dram.
+// If new regions are committed, it un-commits that many regions from nv-dimm.
+// If there are already more regions committed in dram, extra regions are un-committed.
+void HeterogeneousHeapRegionManager::adjust_dram_regions(uint expected_num_regions, WorkGang* pretouch_workers) {
+
+  // Release back the extra regions allocated in evacuation failure scenario.
+  if(_no_borrowed_regions > 0) {
+    _no_borrowed_regions -= shrink_dram(_no_borrowed_regions);
+    _no_borrowed_regions -= shrink_nvdimm(_no_borrowed_regions);
+  }
+
+  if(expected_num_regions > free_list_dram_length()) {
+    // If we are going to expand DRAM, we expand a little more so that we can absorb small variations in Young gen sizing.
+    uint targeted_dram_regions = expected_num_regions * (1 + (double)G1YoungExpansionBufferPercent / 100);
+    uint to_be_made_available = targeted_dram_regions - free_list_dram_length();
+
+#ifdef ASSERT
+    uint total_committed_before = total_regions_committed();
+#endif
+    uint can_be_made_available = shrink_nvdimm(to_be_made_available);
+    uint ret = expand_dram(can_be_made_available, pretouch_workers);
+#ifdef ASSERT
+    assert(ret == can_be_made_available, "should be equal");
+    assert(total_committed_before == total_regions_committed(), "invariant not met");
+#endif
+  } else {
+    uint to_be_released = free_list_dram_length() - expected_num_regions;
+    // if number of extra DRAM regions is small, do not shrink.
+    if (to_be_released < expected_num_regions * G1YoungExpansionBufferPercent / 100) {
+      return;
+    }
+
+#ifdef ASSERT
+    uint total_committed_before = total_regions_committed();
+#endif
+    uint ret = shrink_dram(to_be_released);
+    assert(ret == to_be_released, "Should be able to shrink by given amount");
+    ret = expand_nvdimm(to_be_released, pretouch_workers);
+#ifdef ASSERT
+    assert(ret == to_be_released, "Should be able to expand by given amount");
+    assert(total_committed_before == total_regions_committed(), "invariant not met");
+#endif
+  }
+}
+
+uint HeterogeneousHeapRegionManager::total_regions_committed() const {
+  return num_committed_dram() + num_committed_nvdimm();
+}
+
+uint HeterogeneousHeapRegionManager::num_committed_dram() const {
+  // This class does not keep count of committed regions in dram and nv-dimm.
+  // G1RegionToHeteroSpaceMapper keeps this information.
+  return static_cast<G1RegionToHeteroSpaceMapper*>(_heap_mapper)->num_committed_dram();
+}
+
+uint HeterogeneousHeapRegionManager::num_committed_nvdimm() const {
+  // See comment for num_committed_dram()
+  return static_cast<G1RegionToHeteroSpaceMapper*>(_heap_mapper)->num_committed_nvdimm();
+}
+
+// Return maximum number of regions that heap can expand to.
+uint HeterogeneousHeapRegionManager::max_expandable_length() const {
+  return _max_regions;
+}
+
+uint HeterogeneousHeapRegionManager::find_unavailable_in_range(uint start_idx, uint end_idx, uint* res_idx) const {
+  guarantee(res_idx != NULL, "checking");
+  guarantee(start_idx <= (max_length() + 1), "checking");
+
+  uint num_regions = 0;
+
+  uint cur = start_idx;
+  while (cur <= end_idx && is_available(cur)) {
+    cur++;
+  }
+  if (cur == end_idx + 1) {
+    return num_regions;
+  }
+  *res_idx = cur;
+  while (cur <= end_idx && !is_available(cur)) {
+    cur++;
+  }
+  num_regions = cur - *res_idx;
+
+#ifdef ASSERT
+  for (uint i = *res_idx; i < (*res_idx + num_regions); i++) {
+    assert(!is_available(i), "just checking");
+  }
+  assert(cur == end_idx + 1 || num_regions == 0 || is_available(cur),
+    "The region at the current position %u must be available or at the end", cur);
+#endif
+  return num_regions;
+}
+
+uint HeterogeneousHeapRegionManager::expand_dram(uint num_regions, WorkGang* pretouch_workers) {
+  return expand_in_range(start_index_of_dram(), end_index_of_dram(), num_regions, pretouch_workers);
+}
+
+uint HeterogeneousHeapRegionManager::expand_nvdimm(uint num_regions, WorkGang* pretouch_workers) {
+  return expand_in_range(start_index_of_nvdimm(), end_index_of_nvdimm(), num_regions, pretouch_workers);
+}
+
+// Follows same logic as expand_at() form HeapRegionManager.
+uint HeterogeneousHeapRegionManager::expand_in_range(uint start, uint end, uint num_regions, WorkGang* pretouch_gang) {
+
+  uint so_far = 0;
+  uint chunk_start = 0;
+  uint num_last_found = 0;
+  while (so_far < num_regions &&
+         (num_last_found = find_unavailable_in_range(start, end, &chunk_start)) > 0) {
+    uint to_commit = MIN2(num_regions - so_far, num_last_found);
+    make_regions_available(chunk_start, to_commit, pretouch_gang);
+    so_far += to_commit;
+    start = chunk_start + to_commit + 1;
+  }
+
+  return so_far;
+}
+
+// Shrink in the range of indexes which are reserved for dram.
+uint HeterogeneousHeapRegionManager::shrink_dram(uint num_regions, bool update_free_list) {
+  return shrink_in_range(start_index_of_dram(), end_index_of_dram(), num_regions, update_free_list);
+}
+
+// Shrink in the range of indexes which are reserved for nv-dimm.
+uint HeterogeneousHeapRegionManager::shrink_nvdimm(uint num_regions, bool update_free_list) {
+  return shrink_in_range(start_index_of_nvdimm(), end_index_of_nvdimm(), num_regions, update_free_list);
+}
+
+// Find empty regions in given range, un-commit them and return the count.
+uint HeterogeneousHeapRegionManager::shrink_in_range(uint start, uint end, uint num_regions, bool update_free_list) {
+
+  if (num_regions == 0) {
+    return 0;
+  }
+  uint so_far = 0;
+  uint idx_last_found = 0;
+  uint num_last_found;
+  while (so_far < num_regions &&
+         (num_last_found = find_empty_in_range_reverse(start, end, &idx_last_found)) > 0) {
+    uint to_uncommit = MIN2(num_regions - so_far, num_last_found);
+    if(update_free_list) {
+      _free_list.remove_starting_at(at(idx_last_found + num_last_found - to_uncommit), to_uncommit);
+    }
+    uncommit_regions(idx_last_found + num_last_found - to_uncommit, to_uncommit);
+    so_far += to_uncommit;
+    end = idx_last_found;
+  }
+  return so_far;
+}
+
+uint HeterogeneousHeapRegionManager::find_empty_in_range_reverse(uint start_idx, uint end_idx, uint* res_idx) {
+  guarantee(res_idx != NULL, "checking");
+  guarantee(start_idx < max_length(), "checking");
+  guarantee(end_idx < max_length(), "checking");
+  if(start_idx > end_idx) {
+    return 0;
+  }
+
+  uint num_regions_found = 0;
+
+  jlong cur = end_idx;
+  while (cur >= start_idx && !(is_available(cur) && at(cur)->is_empty())) {
+    cur--;
+  }
+  if (cur == start_idx - 1) {
+    return num_regions_found;
+  }
+  jlong old_cur = cur;
+  // cur indexes the first empty region
+  while (cur >= start_idx && is_available(cur) && at(cur)->is_empty()) {
+    cur--;
+  }
+  *res_idx = cur + 1;
+  num_regions_found = old_cur - cur;
+
+#ifdef ASSERT
+  for (uint i = *res_idx; i < (*res_idx + num_regions_found); i++) {
+    assert(at(i)->is_empty(), "just checking");
+  }
+#endif
+  return num_regions_found;
+}
+
+HeapRegion* HeterogeneousHeapRegionManager::allocate_free_region(HeapRegionType type) {
+
+  // We want to prevent mutators from proceeding when we have borrowed regions from the last collection. This
+  // will force a full collection to remedy the situation.
+  // Free region requests from GC threads can proceed.
+  if(type.is_eden() || type.is_humongous()) {
+    if(has_borrowed_regions()) {
+      return NULL;
+    }
+  }
+
+  // old and humongous regions are allocated from nv-dimm; eden and survivor regions are allocated from dram
+  // assumption: dram regions take higher indexes
+  bool from_nvdimm = (type.is_old() || type.is_humongous()) ? true : false;
+  bool from_head = from_nvdimm;
+  HeapRegion* hr = _free_list.remove_region(from_head);
+
+  if (hr != NULL && ( (from_nvdimm && !is_in_nvdimm(hr->hrm_index())) || (!from_nvdimm && !is_in_dram(hr->hrm_index())) ) ) {
+    _free_list.add_ordered(hr);
+    hr = NULL;
+  }
+
+#ifdef ASSERT
+  uint total_committed_before = total_regions_committed();
+#endif
+
+  if (hr == NULL) {
+    if (!from_nvdimm) {
+      uint ret = shrink_nvdimm(1);
+      if (ret == 1) {
+        ret = expand_dram(1, NULL);
+        assert(ret == 1, "We should be able to commit one region");
+        hr = _free_list.remove_region(from_head);
+      }
+    }
+    else { /*is_old*/
+      uint ret = shrink_dram(1);
+      if (ret == 1) {
+        ret = expand_nvdimm(1, NULL);
+        assert(ret == 1, "We should be able to commit one region");
+        hr = _free_list.remove_region(from_head);
+      }
+    }
+  }
+#ifdef ASSERT
+  assert(total_committed_before == total_regions_committed(), "invariant not met");
+#endif
+
+  // When an old region is requested (which happens during collection pause) and we can't find any empty region
+  // in the set of available regions (which is an evacuation failure scenario), we borrow (or pre-allocate) an unavailable region
+  // from nv-dimm. This region is used to evacuate surviving objects from eden, survivor or old.
+  if(hr == NULL && type.is_old()) {
+    hr = borrow_old_region_for_gc();
+  }
+
+  if (hr != NULL) {
+    assert(hr->next() == NULL, "Single region should not have next");
+    assert(is_available(hr->hrm_index()), "Must be committed");
+  }
+  return hr;
+}
+
+uint HeterogeneousHeapRegionManager::find_contiguous_only_empty(size_t num) {
+  if (has_borrowed_regions()) {
+      return G1_NO_HRM_INDEX;
+  }
+  return find_contiguous(start_index_of_nvdimm(), end_index_of_nvdimm(), num, true);
+}
+
+uint HeterogeneousHeapRegionManager::find_contiguous_empty_or_unavailable(size_t num) {
+  if (has_borrowed_regions()) {
+    return G1_NO_HRM_INDEX;
+  }
+  return find_contiguous(start_index_of_nvdimm(), end_index_of_nvdimm(), num, false);
+}
+
+uint HeterogeneousHeapRegionManager::find_contiguous(size_t start, size_t end, size_t num, bool empty_only) {
+  uint found = 0;
+  size_t length_found = 0;
+  uint cur = (uint)start;
+  uint length_unavailable = 0;
+
+  while (length_found < num && cur <= end) {
+    HeapRegion* hr = _regions.get_by_index(cur);
+    if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) {
+      // This region is a potential candidate for allocation into.
+      if (!is_available(cur)) {
+        if(shrink_dram(1) == 1) {
+          uint ret = expand_in_range(cur, cur, 1, NULL);
+          assert(ret == 1, "We should be able to expand at this index");
+        } else {
+          length_unavailable++;
+        }
+      }
+      length_found++;
+    }
+    else {
+      // This region is not a candidate. The next region is the next possible one.
+      found = cur + 1;
+      length_found = 0;
+    }
+    cur++;
+  }
+
+  if (length_found == num) {
+    for (uint i = found; i < (found + num); i++) {
+      HeapRegion* hr = _regions.get_by_index(i);
+      // sanity check
+      guarantee((!empty_only && !is_available(i)) || (is_available(i) && hr != NULL && hr->is_empty()),
+                "Found region sequence starting at " UINT32_FORMAT ", length " SIZE_FORMAT
+                " that is not empty at " UINT32_FORMAT ". Hr is " PTR_FORMAT, found, num, i, p2i(hr));
+    }
+    if (!empty_only && length_unavailable > (max_expandable_length() - total_regions_committed())) {
+      // if 'length_unavailable' number of regions will be made available, we will exceed max regions.
+      return G1_NO_HRM_INDEX;
+    }
+    return found;
+  }
+  else {
+    return G1_NO_HRM_INDEX;
+  }
+}
+
+uint HeterogeneousHeapRegionManager::find_highest_free(bool* expanded) {
+  // Loop downwards from the highest dram region index, looking for an
+  // entry which is either free or not yet committed.  If not yet
+  // committed, expand_at that index.
+  uint curr = end_index_of_dram();
+  while (true) {
+    HeapRegion *hr = _regions.get_by_index(curr);
+    if (hr == NULL && !(total_regions_committed() < _max_regions)) {
+      uint res = shrink_nvdimm(1);
+      if (res == 1) {
+        res = expand_in_range(curr, curr, 1, NULL);
+        assert(res == 1, "We should be able to expand since shrink was successful");
+        *expanded = true;
+        return curr;
+      }
+    }
+    else {
+      if (hr->is_free()) {
+        *expanded = false;
+        return curr;
+      }
+    }
+    if (curr == start_index_of_dram()) {
+      return G1_NO_HRM_INDEX;
+    }
+    curr--;
+  }
+}
+
+// We need to override this since region 0 which serves are dummy region in base class may not be available here.
+// This is a corner condition when either number of regions is small. When adaptive sizing is used, initial heap size
+// could be just one region.  This region is commited in dram to be used for young generation, leaving region 0 (which is in nvdimm)
+// unavailable.
+HeapRegion* HeterogeneousHeapRegionManager::get_dummy_region() {
+  uint curr = 0;
+
+  while (curr < _regions.length()) {
+    if (is_available(curr)) {
+      return new_heap_region(curr);
+    }
+    curr++;
+  }
+  assert(false, "We should always find a region available for dummy region");
+  return NULL;
+}
+
+// First shrink in dram, then in nv-dimm.
+uint HeterogeneousHeapRegionManager::shrink_by(uint num_regions) {
+  // This call is made at end of full collection. Before making this call the region sets are tore down (tear_down_region_sets()).
+  // So shrink() calls below do not need to remove uncomitted regions from free list.
+  uint ret = shrink_dram(num_regions, false /* update_free_list */);
+  ret += shrink_nvdimm(num_regions - ret, false /* update_free_list */);
+  return ret;
+}
+
+void HeterogeneousHeapRegionManager::verify() {
+  HeapRegionManager::verify();
+}
+
+uint HeterogeneousHeapRegionManager::free_list_dram_length() const {
+  return _free_list.num_of_regions_in_range(start_index_of_dram(), end_index_of_dram());
+}
+
+uint HeterogeneousHeapRegionManager::free_list_nvdimm_length() const {
+  return _free_list.num_of_regions_in_range(start_index_of_nvdimm(), end_index_of_nvdimm());
+}
+
+bool HeterogeneousHeapRegionManager::is_in_nvdimm(uint index) const {
+  return index >= start_index_of_nvdimm() && index <= end_index_of_nvdimm();
+}
+
+bool HeterogeneousHeapRegionManager::is_in_dram(uint index) const {
+  return index >= start_index_of_dram() && index <= end_index_of_dram();
+}
+
+// We have to make sure full collection copies all surviving objects to NV-DIMM.
+// We might not have enough regions in nvdimm_set, so we need to make more regions on NV-DIMM available for full collection.
+// Note: by doing this we are breaking the in-variant that total number of committed regions is equal to current heap size.
+// After full collection ends, we will re-establish this in-variant by freeing DRAM regions.
+void HeterogeneousHeapRegionManager::prepare_for_full_collection_start() {
+  _total_commited_before_full_gc = total_regions_committed() - _no_borrowed_regions;
+  _no_borrowed_regions = 0;
+  expand_nvdimm(num_committed_dram(), NULL);
+  remove_all_free_regions();
+}
+
+// We need to bring back the total committed regions to before full collection start.
+// Unless we are close to OOM, all regular (not pinned) regions in DRAM should be free.
+// We shrink all free regions in DRAM and if needed from NV-DIMM (when there are pinned DRAM regions)
+// If we can't bring back committed regions count to _total_commited_before_full_gc, we keep the extra count in _no_borrowed_regions.
+// When this GC finishes, new regions won't be allocated since has_borrowed_regions() is true. VM will be forced to re-try GC
+// with clear soft references followed by OOM error in worst case.
+void HeterogeneousHeapRegionManager::prepare_for_full_collection_end() {
+  uint shrink_size = total_regions_committed() - _total_commited_before_full_gc;
+  uint so_far = 0;
+  uint idx_last_found = 0;
+  uint num_last_found;
+  uint end = (uint)_regions.length() - 1;
+  while (so_far < shrink_size &&
+         (num_last_found = find_empty_in_range_reverse(0, end, &idx_last_found)) > 0) {
+    uint to_uncommit = MIN2(shrink_size - so_far, num_last_found);
+    uncommit_regions(idx_last_found + num_last_found - to_uncommit, to_uncommit);
+    so_far += to_uncommit;
+    end = idx_last_found;
+  }
+  // See comment above the function.
+  _no_borrowed_regions = shrink_size - so_far;
+}
+
+uint HeterogeneousHeapRegionManager::start_index_of_dram() const { return _max_regions;}
+
+uint HeterogeneousHeapRegionManager::end_index_of_dram() const { return 2*_max_regions - 1; }
+
+uint HeterogeneousHeapRegionManager::start_index_of_nvdimm() const { return 0; }
+
+uint HeterogeneousHeapRegionManager::end_index_of_nvdimm() const { return _max_regions - 1; }
+
+// This function is called when there are no free nv-dimm regions.
+// It borrows a region from the set of unavailable regions in nv-dimm for GC purpose.
+HeapRegion* HeterogeneousHeapRegionManager::borrow_old_region_for_gc() {
+  assert(free_list_nvdimm_length() == 0, "this function should be called only when there are no nv-dimm regions in free list");
+
+  uint ret = expand_nvdimm(1, NULL);
+  if(ret != 1) {
+    return NULL;
+  }
+  HeapRegion* hr = _free_list.remove_region(true /*from_head*/);
+  assert(is_in_nvdimm(hr->hrm_index()), "allocated region should be in nv-dimm");
+  _no_borrowed_regions++;
+  return hr;
+}
+
+bool HeterogeneousHeapRegionManager::has_borrowed_regions() const {
+  return _no_borrowed_regions > 0;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/heterogeneousHeapRegionManager.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_HETEROGENEOUSHEAPREGIONMANAGER_HPP
+#define SHARE_VM_GC_G1_HETEROGENEOUSHEAPREGIONMANAGER_HPP
+
+#include "gc/g1/heapRegionManager.hpp"
+
+// This class manages heap regions on heterogenous memory comprising of dram and nv-dimm.
+// Regions in dram (dram_set) are used for young objects and archive regions (CDS).
+// Regions in nv-dimm (nvdimm_set) are used for old objects and humongous objects.
+// At any point there are some regions committed on dram and some on nv-dimm with the following guarantees:
+//   1. The total number of regions committed in dram and nv-dimm equals the current size of heap.
+//   2. Consequently, total number of regions committed is less than or equal to Xmx.
+//   3. To maintain the guarantee stated by 1., whenever one set grows (new regions committed), the other set shrinks (regions un-committed).
+//      3a. If more dram regions are needed (young generation expansion), corresponding number of regions in nv-dimm are un-committed.
+//      3b. When old generation or humongous set grows, and new regions need to be committed to nv-dimm, corresponding number of regions
+//            are un-committed in dram.
+class HeterogeneousHeapRegionManager : public HeapRegionManager {
+  const uint _max_regions;
+  uint _max_dram_regions;
+  uint _max_nvdimm_regions;
+  uint _start_index_of_nvdimm;
+  uint _total_commited_before_full_gc;
+  uint _no_borrowed_regions;
+
+  uint total_regions_committed() const;
+  uint num_committed_dram() const;
+  uint num_committed_nvdimm() const;
+
+  // Similar to find_unavailable_from_idx() function from base class, difference is this function searches in range [start, end].
+  uint find_unavailable_in_range(uint start_idx, uint end_idx, uint* res_idx) const;
+
+  // Expand into dram. Maintains the invariant that total number of committed regions is less than current heap size.
+  uint expand_dram(uint num_regions, WorkGang* pretouch_workers);
+
+  // Expand into nv-dimm.
+  uint expand_nvdimm(uint num_regions, WorkGang* pretouch_workers);
+
+  // Expand by finding unavailable regions in [start, end] range.
+  uint expand_in_range(uint start, uint end, uint num_regions, WorkGang* pretouch_workers);
+
+  // Shrink dram set of regions.
+  uint shrink_dram(uint num_regions, bool update_free_list = true);
+
+  // Shrink nv-dimm set of regions.
+  uint shrink_nvdimm(uint num_regions, bool update_free_list = true);
+
+  // Shrink regions from [start, end] range.
+  uint shrink_in_range(uint start, uint end, uint num_regions, bool update_free_list = true);
+
+  // Similar to find_empty_from_idx_reverse() in base class. Only here it searches in a range.
+  uint find_empty_in_range_reverse(uint start_idx, uint end_idx, uint* res_idx);
+
+  // Similar to find_contiguous() in base class, with [start, end] range
+  uint find_contiguous(size_t start, size_t end, size_t num, bool empty_only);
+
+  // This function is called when there are no free nv-dimm regions.
+  // It borrows a region from the set of unavailable regions in nv-dimm for GC purpose.
+  HeapRegion* borrow_old_region_for_gc();
+
+  uint free_list_dram_length() const;
+  uint free_list_nvdimm_length() const;
+
+  // is region with given index in nv-dimm?
+  bool is_in_nvdimm(uint index) const;
+  bool is_in_dram(uint index) const;
+
+public:
+
+  // Empty constructor, we'll initialize it with the initialize() method.
+  HeterogeneousHeapRegionManager(uint num_regions) : _max_regions(num_regions), _max_dram_regions(0),
+                                                     _max_nvdimm_regions(0), _start_index_of_nvdimm(0),
+                                                     _total_commited_before_full_gc(0), _no_borrowed_regions(0)
+  {}
+
+  static HeterogeneousHeapRegionManager* manager();
+
+  virtual void initialize(G1RegionToSpaceMapper* heap_storage,
+                          G1RegionToSpaceMapper* prev_bitmap,
+                          G1RegionToSpaceMapper* next_bitmap,
+                          G1RegionToSpaceMapper* bot,
+                          G1RegionToSpaceMapper* cardtable,
+                          G1RegionToSpaceMapper* card_counts);
+
+  uint start_index_of_nvdimm() const;
+  uint start_index_of_dram() const;
+  uint end_index_of_nvdimm() const;
+  uint end_index_of_dram() const;
+
+  // Override.
+  HeapRegion* get_dummy_region();
+
+  // Adjust dram_set to provision 'expected_num_regions' regions.
+  void adjust_dram_regions(uint expected_num_regions, WorkGang* pretouch_workers);
+
+  // Prepare heap regions before and after full collection.
+  void prepare_for_full_collection_start();
+  void prepare_for_full_collection_end();
+
+  virtual HeapRegion* allocate_free_region(HeapRegionType type);
+
+  // Return maximum number of regions that heap can expand to.
+  uint max_expandable_length() const;
+
+  // Override. Expand in nv-dimm.
+  uint expand_by(uint num_regions, WorkGang* pretouch_workers);
+
+  // Override.
+  uint expand_at(uint start, uint num_regions, WorkGang* pretouch_workers);
+
+  // Override. This function is called for humongous allocation, so we need to find empty regions in nv-dimm.
+  uint find_contiguous_only_empty(size_t num);
+
+  // Override. This function is called for humongous allocation, so we need to find empty or unavailable regions in nv-dimm.
+  uint find_contiguous_empty_or_unavailable(size_t num);
+
+  // Overrides base class implementation to find highest free region in dram.
+  uint find_highest_free(bool* expanded);
+
+  // Override. This fuction is called to shrink the heap, we shrink in dram first then in nv-dimm.
+  uint shrink_by(uint num_regions_to_remove);
+
+  bool has_borrowed_regions() const;
+
+  void verify();
+};
+
+#endif // SHARE_VM_GC_G1_HETEROGENEOUSHEAPREGIONMANAGER_HPP
--- a/src/hotspot/share/gc/g1/vmStructs_g1.hpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/g1/vmStructs_g1.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -53,7 +53,7 @@
   nonstatic_field(HeapRegionManager, _num_committed,    uint)                 \
                                                                               \
   nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t)               \
-  nonstatic_field(G1CollectedHeap, _hrm,                HeapRegionManager)    \
+  nonstatic_field(G1CollectedHeap, _hrm,                HeapRegionManager*)    \
   nonstatic_field(G1CollectedHeap, _g1mm,               G1MonitoringSupport*) \
   nonstatic_field(G1CollectedHeap, _old_set,            HeapRegionSetBase)    \
   nonstatic_field(G1CollectedHeap, _archive_set,        HeapRegionSetBase)    \
--- a/src/hotspot/share/gc/parallel/adjoiningGenerations.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/parallel/adjoiningGenerations.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "gc/parallel/adjoiningGenerations.hpp"
+#include "gc/parallel/adjoiningGenerationsForHeteroHeap.hpp"
 #include "gc/parallel/adjoiningVirtualSpaces.hpp"
 #include "gc/parallel/generationSizer.hpp"
 #include "gc/parallel/parallelScavengeHeap.hpp"
@@ -40,8 +41,8 @@
 AdjoiningGenerations::AdjoiningGenerations(ReservedSpace old_young_rs,
                                            GenerationSizer* policy,
                                            size_t alignment) :
-  _virtual_spaces(old_young_rs, policy->min_old_size(),
-                  policy->min_young_size(), alignment) {
+  _virtual_spaces(new AdjoiningVirtualSpaces(old_young_rs, policy->min_old_size(),
+                                             policy->min_young_size(), alignment)) {
   size_t init_low_byte_size = policy->initial_old_size();
   size_t min_low_byte_size = policy->min_old_size();
   size_t max_low_byte_size = policy->max_old_size();
@@ -61,21 +62,21 @@
     // generation.
 
     // Does the actual creation of the virtual spaces
-    _virtual_spaces.initialize(max_low_byte_size,
-                               init_low_byte_size,
-                               init_high_byte_size);
+    _virtual_spaces->initialize(max_low_byte_size,
+                                init_low_byte_size,
+                                init_high_byte_size);
 
     // Place the young gen at the high end.  Passes in the virtual space.
-    _young_gen = new ASPSYoungGen(_virtual_spaces.high(),
-                                  _virtual_spaces.high()->committed_size(),
+    _young_gen = new ASPSYoungGen(_virtual_spaces->high(),
+                                  _virtual_spaces->high()->committed_size(),
                                   min_high_byte_size,
-                                  _virtual_spaces.high_byte_size_limit());
+                                  _virtual_spaces->high_byte_size_limit());
 
     // Place the old gen at the low end. Passes in the virtual space.
-    _old_gen = new ASPSOldGen(_virtual_spaces.low(),
-                              _virtual_spaces.low()->committed_size(),
+    _old_gen = new ASPSOldGen(_virtual_spaces->low(),
+                              _virtual_spaces->low()->committed_size(),
                               min_low_byte_size,
-                              _virtual_spaces.low_byte_size_limit(),
+                              _virtual_spaces->low_byte_size_limit(),
                               "old", 1);
 
     young_gen()->initialize_work();
@@ -92,8 +93,9 @@
   } else {
 
     // Layout the reserved space for the generations.
+    // If OldGen is allocated on nv-dimm, we need to split the reservation (this is required for windows).
     ReservedSpace old_rs   =
-      virtual_spaces()->reserved_space().first_part(max_low_byte_size);
+      virtual_spaces()->reserved_space().first_part(max_low_byte_size, policy->is_hetero_heap() /* split */);
     ReservedSpace heap_rs  =
       virtual_spaces()->reserved_space().last_part(max_low_byte_size);
     ReservedSpace young_rs = heap_rs.first_part(max_high_byte_size);
@@ -117,6 +119,8 @@
   }
 }
 
+AdjoiningGenerations::AdjoiningGenerations() { }
+
 size_t AdjoiningGenerations::reserved_byte_size() {
   return virtual_spaces()->reserved_space().size();
 }
@@ -279,3 +283,13 @@
     }
   }
 }
+
+AdjoiningGenerations* AdjoiningGenerations::create_adjoining_generations(ReservedSpace old_young_rs,
+                                                                         GenerationSizer* policy,
+                                                                         size_t alignment) {
+  if (policy->is_hetero_heap() && UseAdaptiveGCBoundary) {
+    return new AdjoiningGenerationsForHeteroHeap(old_young_rs, policy, alignment);
+  } else {
+    return new AdjoiningGenerations(old_young_rs, policy, alignment);
+  }
+}
--- a/src/hotspot/share/gc/parallel/adjoiningGenerations.hpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/parallel/adjoiningGenerations.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -43,27 +43,29 @@
 class AdjoiningGenerations : public CHeapObj<mtGC> {
   friend class VMStructs;
  private:
-  // The young generation and old generation, respectively
-  PSYoungGen* _young_gen;
-  PSOldGen* _old_gen;
-
-  // The spaces used by the two generations.
-  AdjoiningVirtualSpaces _virtual_spaces;
-
   // Move boundary up to expand old gen.  Checks are made to
   // determine if the move can be done with specified limits.
   void request_old_gen_expansion(size_t desired_change_in_bytes);
   // Move boundary down to expand young gen.
   bool request_young_gen_expansion(size_t desired_change_in_bytes);
 
+ protected:
+   // The young generation and old generation, respectively
+   PSYoungGen* _young_gen;
+   PSOldGen* _old_gen;
+
+   // The spaces used by the two generations.
+   AdjoiningVirtualSpaces* _virtual_spaces;
+
  public:
   AdjoiningGenerations(ReservedSpace rs, GenerationSizer* policy, size_t alignment);
+  AdjoiningGenerations();
 
   // Accessors
   PSYoungGen* young_gen() { return _young_gen; }
   PSOldGen* old_gen() { return _old_gen; }
 
-  AdjoiningVirtualSpaces* virtual_spaces() { return &_virtual_spaces; }
+  AdjoiningVirtualSpaces* virtual_spaces() { return _virtual_spaces; }
 
   // Additional space is needed in the old generation.  Check
   // the available space and attempt to move the boundary if more space
@@ -74,7 +76,9 @@
 
   // Return the total byte size of the reserved space
   // for the adjoining generations.
-  size_t reserved_byte_size();
+  virtual size_t reserved_byte_size();
+
+  // Return new AdjoiningGenerations instance based on collector policy (specifically - whether heap is heterogeneous).
+  static AdjoiningGenerations* create_adjoining_generations(ReservedSpace rs, GenerationSizer* policy, size_t alignment);
 };
-
 #endif // SHARE_VM_GC_PARALLEL_ADJOININGGENERATIONS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/parallel/adjoiningGenerationsForHeteroHeap.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -0,0 +1,260 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/parallel/adjoiningGenerationsForHeteroHeap.hpp"
+#include "gc/parallel/adjoiningVirtualSpaces.hpp"
+#include "gc/parallel/generationSizer.hpp"
+#include "gc/parallel/parallelScavengeHeap.hpp"
+#include "gc/parallel/psFileBackedVirtualspace.hpp"
+#include "logging/log.hpp"
+#include "logging/logStream.hpp"
+#include "memory/resourceArea.hpp"
+#include "utilities/align.hpp"
+#include "utilities/ostream.hpp"
+
+// Create two virtual spaces (HeteroVirtualSpaces), low() on nv-dimm memory, high() on dram.
+// create ASPSOldGen and ASPSYoungGen the same way as in base class
+
+AdjoiningGenerationsForHeteroHeap::AdjoiningGenerationsForHeteroHeap(ReservedSpace old_young_rs, GenerationSizer* policy, size_t alignment) :
+  _total_size_limit(policy->max_heap_byte_size()) {
+  size_t init_old_byte_size = policy->initial_old_size();
+  size_t min_old_byte_size = policy->min_old_size();
+  size_t max_old_byte_size = policy->max_old_size();
+  size_t init_young_byte_size = policy->initial_young_size();
+  size_t min_young_byte_size = policy->min_young_size();
+  size_t max_young_byte_size = policy->max_young_size();
+  // create HeteroVirtualSpaces which is composed of non-overlapping virtual spaces.
+  HeteroVirtualSpaces* hetero_virtual_spaces = new HeteroVirtualSpaces(old_young_rs, min_old_byte_size,
+                                                                       min_young_byte_size, _total_size_limit, alignment);
+
+  assert(min_old_byte_size <= init_old_byte_size &&
+         init_old_byte_size <= max_old_byte_size, "Parameter check");
+  assert(min_young_byte_size <= init_young_byte_size &&
+         init_young_byte_size <= max_young_byte_size, "Parameter check");
+
+  assert(UseAdaptiveGCBoundary, "Should be used only when UseAdaptiveGCBoundary is true");
+
+  // Initialize the virtual spaces. Then pass a virtual space to each generation
+  // for initialization of the generation.
+
+  // Does the actual creation of the virtual spaces
+  hetero_virtual_spaces->initialize(max_old_byte_size, init_old_byte_size, init_young_byte_size);
+
+  _young_gen = new ASPSYoungGen(hetero_virtual_spaces->high(),
+                                hetero_virtual_spaces->high()->committed_size() /* intial_size */,
+                                min_young_byte_size,
+                                hetero_virtual_spaces->max_young_size());
+
+  _old_gen = new ASPSOldGen(hetero_virtual_spaces->low(),
+                            hetero_virtual_spaces->low()->committed_size() /* intial_size */,
+                            min_old_byte_size,
+                            hetero_virtual_spaces->max_old_size(), "old", 1);
+
+  young_gen()->initialize_work();
+  assert(young_gen()->reserved().byte_size() <= young_gen()->gen_size_limit(), "Consistency check");
+  assert(old_young_rs.size() >= young_gen()->gen_size_limit(), "Consistency check");
+
+  old_gen()->initialize_work("old", 1);
+  assert(old_gen()->reserved().byte_size() <= old_gen()->gen_size_limit(), "Consistency check");
+  assert(old_young_rs.size() >= old_gen()->gen_size_limit(), "Consistency check");
+
+  _virtual_spaces = hetero_virtual_spaces;
+}
+
+size_t AdjoiningGenerationsForHeteroHeap::required_reserved_memory(GenerationSizer* policy) {
+  // This is the size that young gen can grow to, when AdaptiveGCBoundary is true.
+  size_t max_yg_size = policy->max_heap_byte_size() - policy->min_old_size();
+  // This is the size that old gen can grow to, when AdaptiveGCBoundary is true.
+  size_t max_old_size = policy->max_heap_byte_size() - policy->min_young_size();
+
+  return max_yg_size + max_old_size;
+}
+
+// We override this function since size of reservedspace here is more than heap size and
+// callers expect this function to return heap size.
+size_t AdjoiningGenerationsForHeteroHeap::reserved_byte_size() {
+  return total_size_limit();
+}
+
+AdjoiningGenerationsForHeteroHeap::HeteroVirtualSpaces::HeteroVirtualSpaces(ReservedSpace rs, size_t min_old_byte_size, size_t min_yg_byte_size, size_t max_total_size, size_t alignment) :
+                                                                            AdjoiningVirtualSpaces(rs, min_old_byte_size, min_yg_byte_size, alignment),
+                                                                            _max_total_size(max_total_size),
+                                                                            _min_old_byte_size(min_old_byte_size), _min_young_byte_size(min_yg_byte_size),
+                                                                            _max_old_byte_size(_max_total_size - _min_young_byte_size),
+                                                                            _max_young_byte_size(_max_total_size - _min_old_byte_size) {
+}
+
+void AdjoiningGenerationsForHeteroHeap::HeteroVirtualSpaces::initialize(size_t initial_old_reserved_size, size_t init_old_byte_size,
+                                                                        size_t init_young_byte_size) {
+
+  // This is the reserved space exclusively for old generation.
+  ReservedSpace low_rs = _reserved_space.first_part(_max_old_byte_size, true);
+  // Intially we only assign 'initial_old_reserved_size' of the reserved space to old virtual space.
+  low_rs = low_rs.first_part(initial_old_reserved_size);
+
+  // This is the reserved space exclusively for young generation.
+  ReservedSpace high_rs = _reserved_space.last_part(_max_old_byte_size).first_part(_max_young_byte_size);
+
+  // Carve out 'initial_young_reserved_size' of reserved space.
+  size_t initial_young_reserved_size = _max_total_size - initial_old_reserved_size;
+  high_rs = high_rs.last_part(_max_young_byte_size - initial_young_reserved_size);
+
+  _low = new PSFileBackedVirtualSpace(low_rs, alignment(), AllocateOldGenAt);
+  if (!static_cast <PSFileBackedVirtualSpace*>(_low)->initialize()) {
+    vm_exit_during_initialization("Could not map space for old generation at given AllocateOldGenAt path");
+  }
+
+  if (!_low->expand_by(init_old_byte_size)) {
+    vm_exit_during_initialization("Could not reserve enough space for object heap");
+  }
+
+  _high = new PSVirtualSpaceHighToLow(high_rs, alignment());
+  if (!_high->expand_by(init_young_byte_size)) {
+    vm_exit_during_initialization("Could not reserve enough space for object heap");
+  }
+}
+
+// Since the virtual spaces are non-overlapping, there is no boundary as such.
+// We replicate the same behavior and maintain the same invariants as base class 'AdjoiningVirtualSpaces' by
+// increasing old generation size and decreasing young generation size by same amount.
+bool AdjoiningGenerationsForHeteroHeap::HeteroVirtualSpaces::adjust_boundary_up(size_t change_in_bytes) {
+  assert(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary, "runtime check");
+  DEBUG_ONLY(size_t total_size_before = young_vs()->reserved_size() + old_vs()->reserved_size());
+
+  size_t bytes_needed = change_in_bytes;
+  size_t uncommitted_in_old = MIN2(old_vs()->uncommitted_size(), bytes_needed);
+  bool old_expanded = false;
+
+  // 1. Try to expand old within its reserved space.
+  if (uncommitted_in_old != 0) {
+    if (!old_vs()->expand_by(uncommitted_in_old)) {
+      return false;
+    }
+    old_expanded = true;
+    bytes_needed -= uncommitted_in_old;
+    if (bytes_needed == 0) {
+      return true;
+    }
+  }
+
+  size_t bytes_to_add_in_old = 0;
+
+  // 2. Get uncommitted memory from Young virtualspace.
+  size_t young_uncommitted = MIN2(young_vs()->uncommitted_size(), bytes_needed);
+  if (young_uncommitted > 0) {
+    young_vs()->set_reserved(young_vs()->reserved_low_addr() + young_uncommitted,
+                             young_vs()->reserved_high_addr(),
+                             young_vs()->special());
+    bytes_needed -= young_uncommitted;
+    bytes_to_add_in_old = young_uncommitted;
+  }
+
+  // 3. Get committed memory from Young virtualspace
+  if (bytes_needed > 0) {
+    size_t shrink_size = align_down(bytes_needed, young_vs()->alignment());
+    bool ret = young_vs()->shrink_by(shrink_size);
+    assert(ret, "We should be able to shrink young space");
+    young_vs()->set_reserved(young_vs()->reserved_low_addr() + shrink_size,
+                             young_vs()->reserved_high_addr(),
+                             young_vs()->special());
+
+    bytes_to_add_in_old += shrink_size;
+  }
+
+  // 4. Increase size of old space
+  old_vs()->set_reserved(old_vs()->reserved_low_addr(),
+                         old_vs()->reserved_high_addr() + bytes_to_add_in_old,
+                         old_vs()->special());
+  if (!old_vs()->expand_by(bytes_to_add_in_old) && !old_expanded) {
+    return false;
+  }
+
+  DEBUG_ONLY(size_t total_size_after = young_vs()->reserved_size() + old_vs()->reserved_size());
+  assert(total_size_after == total_size_before, "should be equal");
+
+  return true;
+}
+
+// Read comment for adjust_boundary_up()
+// Increase young generation size and decrease old generation size by same amount.
+bool AdjoiningGenerationsForHeteroHeap::HeteroVirtualSpaces::adjust_boundary_down(size_t change_in_bytes) {
+  assert(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary, "runtime check");
+  DEBUG_ONLY(size_t total_size_before = young_vs()->reserved_size() + old_vs()->reserved_size());
+
+  size_t bytes_needed = change_in_bytes;
+  size_t uncommitted_in_young = MIN2(young_vs()->uncommitted_size(), bytes_needed);
+  bool young_expanded = false;
+
+  // 1. Try to expand old within its reserved space.
+  if (uncommitted_in_young > 0) {
+    if (!young_vs()->expand_by(uncommitted_in_young)) {
+      return false;
+    }
+    young_expanded = true;
+    bytes_needed -= uncommitted_in_young;
+    if (bytes_needed == 0) {
+      return true;
+    }
+  }
+
+  size_t bytes_to_add_in_young = 0;
+
+  // 2. Get uncommitted memory from Old virtualspace.
+  size_t old_uncommitted = MIN2(old_vs()->uncommitted_size(), bytes_needed);
+  if (old_uncommitted > 0) {
+    old_vs()->set_reserved(old_vs()->reserved_low_addr(),
+                           old_vs()->reserved_high_addr() - old_uncommitted,
+                           old_vs()->special());
+    bytes_needed -= old_uncommitted;
+    bytes_to_add_in_young = old_uncommitted;
+  }
+
+  // 3. Get committed memory from Old virtualspace
+  if (bytes_needed > 0) {
+    size_t shrink_size = align_down(bytes_needed, old_vs()->alignment());
+    bool ret = old_vs()->shrink_by(shrink_size);
+    assert(ret, "We should be able to shrink young space");
+           old_vs()->set_reserved(old_vs()->reserved_low_addr(),
+           old_vs()->reserved_high_addr() - shrink_size,
+           old_vs()->special());
+
+    bytes_to_add_in_young += shrink_size;
+  }
+
+  assert(bytes_to_add_in_young <= change_in_bytes, "should not be more than requested size");
+  // 4. Increase size of young space
+  young_vs()->set_reserved(young_vs()->reserved_low_addr() - bytes_to_add_in_young,
+                           young_vs()->reserved_high_addr(),
+                           young_vs()->special());
+  if (!young_vs()->expand_by(bytes_to_add_in_young) && !young_expanded) {
+    return false;
+  }
+
+  DEBUG_ONLY(size_t total_size_after = young_vs()->reserved_size() + old_vs()->reserved_size());
+  assert(total_size_after == total_size_before, "should be equal");
+
+  return true;
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/parallel/adjoiningGenerationsForHeteroHeap.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_PARALLEL_ADJOININGGENERATIONSFORHETEROHEAP_HPP
+#define SHARE_VM_GC_PARALLEL_ADJOININGGENERATIONSFORHETEROHEAP_HPP
+
+#include "gc/parallel/adjoiningGenerations.hpp"
+
+class AdjoiningGenerationsForHeteroHeap : public AdjoiningGenerations {
+  friend class VMStructs;
+private:
+  // Maximum total size of the generations. This is equal to the heap size specified by user.
+  // When adjusting young and old generation sizes, we need ensure that sum of the generation sizes does not exceed this.
+  size_t _total_size_limit;
+
+  size_t total_size_limit() const {
+    return _total_size_limit;
+  }
+
+  // HeteroVirtualSpaces creates non-overlapping virtual spaces. Here _low and _high do not share a reserved space, i.e. there is no boundary
+  // separating the two virtual spaces.
+  class HeteroVirtualSpaces : public AdjoiningVirtualSpaces {
+    size_t _max_total_size;
+    size_t _min_old_byte_size;
+    size_t _min_young_byte_size;
+    size_t _max_old_byte_size;
+    size_t _max_young_byte_size;
+
+    // Internally we access the virtual spaces using these methods. It increases readability, since we were not really
+    // dealing with adjoining virtual spaces separated by a boundary as is the case in base class.
+    // Externally they are accessed using low() and high() methods of base class.
+    PSVirtualSpace* young_vs() { return high(); }
+    PSVirtualSpace* old_vs() { return low(); }
+
+  public:
+    HeteroVirtualSpaces(ReservedSpace rs,
+                        size_t min_old_byte_size,
+                        size_t min_young_byte_size, size_t max_total_size,
+                        size_t alignment);
+
+    // Increase old generation size and decrease young generation size by same amount
+    bool adjust_boundary_up(size_t size_in_bytes);
+    // Increase young generation size and decrease old generation size by same amount
+    bool adjust_boundary_down(size_t size_in_bytes);
+
+    size_t max_young_size() const { return _max_young_byte_size; }
+    size_t max_old_size() const { return _max_old_byte_size; }
+
+    void initialize(size_t initial_old_reserved_size, size_t init_low_byte_size,
+                    size_t init_high_byte_size);
+  };
+
+public:
+  AdjoiningGenerationsForHeteroHeap(ReservedSpace rs, GenerationSizer* policy, size_t alignment);
+
+  // Given the size policy, calculate the total amount of memory that needs to be reserved.
+  // We need to reserve more memory than Xmx, since we use non-overlapping virtual spaces for the young and old generations.
+  static size_t required_reserved_memory(GenerationSizer* policy);
+
+  // Return the total byte size of the reserved space
+  size_t reserved_byte_size();
+};
+#endif // SHARE_VM_GC_PARALLEL_ADJOININGGENERATIONSFORHETEROHEAP_HPP
+
--- a/src/hotspot/share/gc/parallel/adjoiningVirtualSpaces.hpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/parallel/adjoiningVirtualSpaces.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -59,7 +59,8 @@
 // moved up consistently.  AdjoiningVirtualSpaces provide the
 // interfaces for moving the this boundary.
 
-class AdjoiningVirtualSpaces {
+class AdjoiningVirtualSpaces : public CHeapObj<mtGC> {
+protected:
   // space at the high end and the low end, respectively
   PSVirtualSpace*    _high;
   PSVirtualSpace*    _low;
@@ -84,17 +85,17 @@
                          size_t alignment);
 
   // accessors
-  PSVirtualSpace* high() { return _high; }
-  PSVirtualSpace* low()  { return _low; }
+  virtual PSVirtualSpace* high() { return _high; }
+  virtual PSVirtualSpace* low()  { return _low; }
   ReservedSpace reserved_space() { return _reserved_space; }
   size_t min_low_byte_size() { return _min_low_byte_size; }
   size_t min_high_byte_size() { return _min_high_byte_size; }
   size_t alignment() const { return _alignment; }
 
   // move boundary between the two spaces up
-  bool adjust_boundary_up(size_t size_in_bytes);
+  virtual bool adjust_boundary_up(size_t size_in_bytes);
   // and down
-  bool adjust_boundary_down(size_t size_in_bytes);
+  virtual bool adjust_boundary_down(size_t size_in_bytes);
 
   // Maximum byte size for the high space.
   size_t high_byte_size_limit() {
@@ -107,9 +108,8 @@
 
   // Sets the boundaries for the virtual spaces and commits and
   // initial size;
-  void initialize(size_t max_low_byte_size,
+  virtual void initialize(size_t max_low_byte_size,
                   size_t init_low_byte_size,
                   size_t init_high_byte_size);
 };
-
 #endif // SHARE_VM_GC_PARALLEL_ADJOININGVIRTUALSPACES_HPP
--- a/src/hotspot/share/gc/parallel/gcTaskThread.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/parallel/gcTaskThread.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -114,7 +114,6 @@
 // for tasks to be enqueued for execution.
 
 void GCTaskThread::run() {
-  this->initialize_named_thread();
   // Bind yourself to your processor.
   if (processor_id() != GCTaskManager::sentinel_worker()) {
     log_trace(gc, task, thread)("GCTaskThread::run: binding to processor %u", processor_id());
--- a/src/hotspot/share/gc/parallel/generationSizer.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/parallel/generationSizer.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -67,3 +67,11 @@
   }
   GenCollectorPolicy::initialize_size_info();
 }
+
+bool GenerationSizer::is_hetero_heap() const {
+  return false;
+}
+
+size_t GenerationSizer::heap_reserved_size_bytes() const {
+  return _max_heap_byte_size;
+}
--- a/src/hotspot/share/gc/parallel/generationSizer.hpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/parallel/generationSizer.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,7 +32,6 @@
 
 class GenerationSizer : public GenCollectorPolicy {
  private:
-
   // The alignment used for boundary between young gen and old gen
   static size_t default_gen_alignment() { return 64 * K * HeapWordSize; }
 
@@ -41,5 +40,9 @@
   void initialize_alignments();
   void initialize_flags();
   void initialize_size_info();
+
+ public:
+  virtual size_t heap_reserved_size_bytes() const;
+  virtual bool is_hetero_heap() const;
 };
 #endif // SHARE_VM_GC_PARALLEL_GENERATIONSIZER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/parallel/heterogeneousGenerationSizer.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/parallel/heterogeneousGenerationSizer.hpp"
+#include "gc/shared/collectorPolicy.hpp"
+#include "logging/log.hpp"
+#include "runtime/globals_extension.hpp"
+#include "runtime/os.hpp"
+#include "utilities/align.hpp"
+#include "utilities/formatBuffer.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+const double HeterogeneousGenerationSizer::MaxRamFractionForYoung = 0.8;
+
+// Check the available dram memory to limit NewSize and MaxNewSize before
+// calling base class initialize_flags().
+void HeterogeneousGenerationSizer::initialize_flags() {
+  FormatBuffer<100> calc_str("");
+
+  julong phys_mem;
+  // If MaxRam is specified, we use that as maximum physical memory available.
+  if (FLAG_IS_DEFAULT(MaxRAM)) {
+    phys_mem = os::physical_memory();
+    calc_str.append("Physical_Memory");
+  } else {
+    phys_mem = (julong)MaxRAM;
+    calc_str.append("MaxRAM");
+  }
+
+  julong reasonable_max = phys_mem;
+
+  // If either MaxRAMFraction or MaxRAMPercentage is specified, we use them to calculate
+  // reasonable max size of young generation.
+  if (!FLAG_IS_DEFAULT(MaxRAMFraction)) {
+    reasonable_max = (julong)(phys_mem / MaxRAMFraction);
+    calc_str.append(" / MaxRAMFraction");
+  } else if (!FLAG_IS_DEFAULT(MaxRAMPercentage)) {
+    reasonable_max = (julong)((phys_mem * MaxRAMPercentage) / 100);
+    calc_str.append(" * MaxRAMPercentage / 100");
+  } else {
+    // We use our own fraction to calculate max size of young generation.
+    reasonable_max = phys_mem * MaxRamFractionForYoung;
+    calc_str.append(" * %0.2f", MaxRamFractionForYoung);
+  }
+  reasonable_max = align_up(reasonable_max, _gen_alignment);
+
+  if (MaxNewSize > reasonable_max) {
+    if (FLAG_IS_CMDLINE(MaxNewSize)) {
+      log_warning(gc, ergo)("Setting MaxNewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s))",
+                            (size_t)reasonable_max, calc_str.buffer());
+    } else {
+      log_info(gc, ergo)("Setting MaxNewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s)). "
+                         "Dram usage can be lowered by setting MaxNewSize to a lower value", (size_t)reasonable_max, calc_str.buffer());
+    }
+    MaxNewSize = reasonable_max;
+  }
+  if (NewSize > reasonable_max) {
+    if (FLAG_IS_CMDLINE(NewSize)) {
+      log_warning(gc, ergo)("Setting NewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s))",
+                            (size_t)reasonable_max, calc_str.buffer());
+    }
+    NewSize = reasonable_max;
+  }
+
+  // After setting new size flags, call base class initialize_flags()
+  GenerationSizer::initialize_flags();
+}
+
+bool HeterogeneousGenerationSizer::is_hetero_heap() const {
+  return true;
+}
+
+size_t HeterogeneousGenerationSizer::heap_reserved_size_bytes() const {
+  if (UseAdaptiveGCBoundary) {
+    // This is the size that young gen can grow to, when UseAdaptiveGCBoundary is true.
+    size_t max_yg_size = _max_heap_byte_size - _min_old_size;
+    // This is the size that old gen can grow to, when UseAdaptiveGCBoundary is true.
+    size_t max_old_size = _max_heap_byte_size - _min_young_size;
+
+    return max_yg_size + max_old_size;
+  } else {
+    return _max_heap_byte_size;
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/parallel/heterogeneousGenerationSizer.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_PARALLEL_HETEROGENEOUSGENERATIONSIZER_HPP
+#define SHARE_VM_GC_PARALLEL_HETEROGENEOUSGENERATIONSIZER_HPP
+
+#include "gc/parallel/generationSizer.hpp"
+
+// There is a nice batch of tested generation sizing code in
+// GenCollectorPolicy. Lets reuse it!
+
+class HeterogeneousGenerationSizer : public GenerationSizer {
+private:
+  // Max fraction of dram to use for young generation when MaxRAMFraction and
+  // MaxRAMPercentage are not specified on commandline.
+  static const double MaxRamFractionForYoung;
+
+protected:
+  virtual void initialize_flags();
+
+public:
+  virtual size_t heap_reserved_size_bytes() const;
+  virtual bool is_hetero_heap() const;
+};
+#endif // SHARE_VM_GC_PARALLEL_HETEROGENEOUSGENERATIONSIZER_HPP
--- a/src/hotspot/share/gc/parallel/parallelArguments.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/parallel/parallelArguments.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -24,6 +24,7 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/parallel/heterogeneousGenerationSizer.hpp"
 #include "gc/parallel/parallelArguments.hpp"
 #include "gc/parallel/parallelScavengeHeap.hpp"
 #include "gc/shared/adaptiveSizePolicy.hpp"
@@ -93,5 +94,9 @@
 }
 
 CollectedHeap* ParallelArguments::create_heap() {
-  return create_heap_with_policy<ParallelScavengeHeap, GenerationSizer>();
+  if (AllocateOldGenAt != NULL) {
+    return create_heap_with_policy<ParallelScavengeHeap, HeterogeneousGenerationSizer>();
+  } else {
+    return create_heap_with_policy<ParallelScavengeHeap, GenerationSizer>();
+  }
 }
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "code/codeCache.hpp"
 #include "gc/parallel/adjoiningGenerations.hpp"
+#include "gc/parallel/adjoiningGenerationsForHeteroHeap.hpp"
 #include "gc/parallel/adjoiningVirtualSpaces.hpp"
 #include "gc/parallel/gcTaskManager.hpp"
 #include "gc/parallel/generationSizer.hpp"
@@ -58,7 +59,7 @@
 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
 
 jint ParallelScavengeHeap::initialize() {
-  const size_t heap_size = _collector_policy->max_heap_byte_size();
+  size_t heap_size = _collector_policy->heap_reserved_size_bytes();
 
   ReservedSpace heap_rs = Universe::reserve_heap(heap_size, _collector_policy->heap_alignment());
 
@@ -86,7 +87,7 @@
   double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
   double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
 
-  _gens = new AdjoiningGenerations(heap_rs, _collector_policy, generation_alignment());
+  _gens = AdjoiningGenerations::create_adjoining_generations(heap_rs, _collector_policy, generation_alignment());
 
   _old_gen = _gens->old_gen();
   _young_gen = _gens->young_gen();
@@ -104,7 +105,7 @@
                              GCTimeRatio
                              );
 
-  assert(!UseAdaptiveGCBoundary ||
+  assert(_collector_policy->is_hetero_heap() || !UseAdaptiveGCBoundary ||
     (old_gen()->virtual_space()->high_boundary() ==
      young_gen()->virtual_space()->low_boundary()),
     "Boundaries must meet");
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -111,6 +111,8 @@
 
   virtual CollectorPolicy* collector_policy() const { return _collector_policy; }
 
+  virtual GenerationSizer* ps_collector_policy() const { return _collector_policy; }
+
   virtual SoftRefPolicy* soft_ref_policy() { return &_soft_ref_policy; }
 
   virtual GrowableArray<GCMemoryManager*> memory_managers();
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/parallel/psFileBackedVirtualspace.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/parallel/psFileBackedVirtualspace.hpp"
+#include "memory/virtualspace.hpp"
+#include "runtime/os.inline.hpp"
+
+PSFileBackedVirtualSpace::PSFileBackedVirtualSpace(ReservedSpace rs, size_t alignment, const char* path) : PSVirtualSpace(rs, alignment),
+                                                   _file_path(path), _fd(-1), _mapping_succeeded(false) {
+  assert(!rs.special(), "ReservedSpace passed to PSFileBackedVirtualSpace cannot be special");
+}
+
+bool PSFileBackedVirtualSpace::initialize() {
+  _fd = os::create_file_for_heap(_file_path);
+  if (_fd == -1) {
+    return false;
+  }
+  // We map the reserved space to a file at initialization.
+  char* ret = os::replace_existing_mapping_with_file_mapping(reserved_low_addr(), reserved_size(), _fd);
+  if (ret != reserved_low_addr()) {
+    os::close(_fd);
+    return false;
+  }
+  // _mapping_succeeded is false if we return before this point.
+  // expand calls later check value of this flag and return error if it is false.
+  _mapping_succeeded = true;
+  _special = true;
+  os::close(_fd);
+  return true;
+}
+
+PSFileBackedVirtualSpace::PSFileBackedVirtualSpace(ReservedSpace rs, const char* path) {
+  PSFileBackedVirtualSpace(rs, os::vm_page_size(), path);
+}
+
+bool PSFileBackedVirtualSpace::expand_by(size_t bytes) {
+  assert(special(), "Since entire space is committed at initialization, _special should always be true for PSFileBackedVirtualSpace");
+
+  // if mapping did not succeed during intialization return false
+  if (!_mapping_succeeded) {
+    return false;
+  }
+  return PSVirtualSpace::expand_by(bytes);
+
+}
+
+bool PSFileBackedVirtualSpace::shrink_by(size_t bytes) {
+  assert(special(), "Since entire space is committed at initialization, _special should always be true for PSFileBackedVirtualSpace");
+  return PSVirtualSpace::shrink_by(bytes);
+}
+
+size_t PSFileBackedVirtualSpace::expand_into(PSVirtualSpace* space, size_t bytes) {
+  // not supported. Since doing this will change page mapping which will lead to large TLB penalties.
+  assert(false, "expand_into() should not be called for PSFileBackedVirtualSpace");
+  return 0;
+}
+
+void PSFileBackedVirtualSpace::release() {
+  os::close(_fd);
+  _fd = -1;
+  _file_path = NULL;
+
+  PSVirtualSpace::release();
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/parallel/psFileBackedVirtualspace.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_PARALLEL_PSFILEBACKEDVIRTUALSPACE_HPP
+#define SHARE_VM_GC_PARALLEL_PSFILEBACKEDVIRTUALSPACE_HPP
+
+#include "gc/parallel/psVirtualspace.hpp"
+
+class PSFileBackedVirtualSpace : public PSVirtualSpace {
+private:
+  const char* _file_path;
+  int _fd;
+  bool _mapping_succeeded;
+public:
+  PSFileBackedVirtualSpace(ReservedSpace rs, size_t alignment, const char* file_path);
+  PSFileBackedVirtualSpace(ReservedSpace rs, const char* file_path);
+
+  bool   initialize();
+  bool   expand_by(size_t bytes);
+  bool   shrink_by(size_t bytes);
+  size_t expand_into(PSVirtualSpace* space, size_t bytes);
+  void   release();
+};
+#endif // SHARE_VM_GC_PARALLEL_PSFILEBACKEDVIRTUALSPACE_HPP
+
--- a/src/hotspot/share/gc/parallel/psOldGen.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/parallel/psOldGen.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -27,6 +27,7 @@
 #include "gc/parallel/parallelScavengeHeap.hpp"
 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
 #include "gc/parallel/psCardTable.hpp"
+#include "gc/parallel/psFileBackedVirtualspace.hpp"
 #include "gc/parallel/psMarkSweepDecorator.hpp"
 #include "gc/parallel/psOldGen.hpp"
 #include "gc/shared/cardTableBarrierSet.hpp"
@@ -71,7 +72,14 @@
 
 void PSOldGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) {
 
-  _virtual_space = new PSVirtualSpace(rs, alignment);
+  if(ParallelScavengeHeap::heap()->ps_collector_policy()->is_hetero_heap()) {
+    _virtual_space = new PSFileBackedVirtualSpace(rs, alignment, AllocateOldGenAt);
+    if (!(static_cast <PSFileBackedVirtualSpace*>(_virtual_space))->initialize()) {
+      vm_exit_during_initialization("Could not map space for PSOldGen at given AllocateOldGenAt path");
+    }
+  } else {
+    _virtual_space = new PSVirtualSpace(rs, alignment);
+  }
   if (!_virtual_space->expand_by(_init_gen_size)) {
     vm_exit_during_initialization("Could not reserve enough space for "
                                   "object heap");
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -1995,7 +1995,10 @@
   assert(young_gen->virtual_space()->alignment() ==
          old_gen->virtual_space()->alignment(), "alignments do not match");
 
-  if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
+  // We also return false when it's a heterogenous heap because old generation cannot absorb data from eden
+  // when it is allocated on different memory (example, nv-dimm) than young.
+  if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary) ||
+      ParallelScavengeHeap::heap()->ps_collector_policy()->is_hetero_heap()) {
     return false;
   }
 
--- a/src/hotspot/share/gc/shared/barrierSetConfig.hpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/shared/barrierSetConfig.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -48,11 +48,11 @@
 #ifdef SUPPORT_BARRIER_ON_PRIMITIVES
 #define ACCESS_PRIMITIVE_SUPPORT INTERNAL_BT_BARRIER_ON_PRIMITIVES
 #else
-#define ACCESS_PRIMITIVE_SUPPORT INTERNAL_EMPTY
+#define ACCESS_PRIMITIVE_SUPPORT DECORATORS_NONE
 #endif
 
 #ifdef SUPPORT_NOT_TO_SPACE_INVARIANT
-#define ACCESS_TO_SPACE_INVARIANT_SUPPORT INTERNAL_EMPTY
+#define ACCESS_TO_SPACE_INVARIANT_SUPPORT DECORATORS_NONE
 #else
 #define ACCESS_TO_SPACE_INVARIANT_SUPPORT INTERNAL_BT_TO_SPACE_INVARIANT
 #endif
--- a/src/hotspot/share/gc/shared/concurrentGCThread.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/shared/concurrentGCThread.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -49,7 +49,6 @@
 }
 
 void ConcurrentGCThread::initialize_in_thread() {
-  this->initialize_named_thread();
   this->set_active_handles(JNIHandleBlock::allocate_block());
   // From this time Thread::current() should be working.
   assert(this == Thread::current(), "just checking");
--- a/src/hotspot/share/gc/shared/gcArguments.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/shared/gcArguments.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -28,6 +28,7 @@
 #include "runtime/arguments.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/globals_extension.hpp"
+#include "utilities/defaultStream.hpp"
 #include "utilities/macros.hpp"
 
 void GCArguments::initialize() {
@@ -53,4 +54,28 @@
     // If class unloading is disabled, also disable concurrent class unloading.
     FLAG_SET_CMDLINE(bool, ClassUnloadingWithConcurrentMark, false);
   }
+
+  if (!FLAG_IS_DEFAULT(AllocateOldGenAt)) {
+    // CompressedOops not supported when AllocateOldGenAt is set.
+    LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedOops, false));
+    LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedClassPointers, false));
+    // When AllocateOldGenAt is set, we cannot use largepages for entire heap memory.
+    // Only young gen which is allocated in dram can use large pages, but we currently don't support that.
+    FLAG_SET_DEFAULT(UseLargePages, false);
+  }
 }
+
+bool GCArguments::check_args_consistency() {
+  bool status = true;
+  if (!FLAG_IS_DEFAULT(AllocateHeapAt) && !FLAG_IS_DEFAULT(AllocateOldGenAt)) {
+    jio_fprintf(defaultStream::error_stream(),
+      "AllocateHeapAt and AllocateOldGenAt cannot be used together.\n");
+    status = false;
+  }
+  if (!FLAG_IS_DEFAULT(AllocateOldGenAt) && (UseSerialGC || UseConcMarkSweepGC || UseEpsilonGC || UseZGC)) {
+    jio_fprintf(defaultStream::error_stream(),
+      "AllocateOldGenAt is not supported for selected GC.\n");
+    status = false;
+  }
+  return status;
+}
--- a/src/hotspot/share/gc/shared/gcArguments.hpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/shared/gcArguments.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -39,6 +39,7 @@
   virtual void initialize();
   virtual size_t conservative_max_heap_alignment() = 0;
   virtual CollectedHeap* create_heap() = 0;
+  static bool check_args_consistency();
 };
 
 #endif // SHARE_GC_SHARED_GCARGUMENTS_HPP
--- a/src/hotspot/share/gc/shared/ptrQueue.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/shared/ptrQueue.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -54,7 +54,7 @@
       // No work to do.
       qset()->deallocate_buffer(node);
     } else {
-      qset()->enqueue_complete_buffer(node);
+      qset()->enqueue_completed_buffer(node);
     }
     _buf = NULL;
     set_index(0);
@@ -165,11 +165,11 @@
   _completed_buffers_tail(NULL),
   _n_completed_buffers(0),
   _process_completed_buffers_threshold(ProcessCompletedBuffersThresholdNever),
-  _process_completed(false),
-  _all_active(false),
+  _process_completed_buffers(false),
   _notify_when_complete(notify_when_complete),
   _max_completed_buffers(MaxCompletedBuffersUnlimited),
-  _completed_buffers_padding(0)
+  _completed_buffers_padding(0),
+  _all_active(false)
 {}
 
 PtrQueueSet::~PtrQueueSet() {
@@ -211,11 +211,11 @@
       BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
       _buf = NULL;         // clear shared _buf field
 
-      qset()->enqueue_complete_buffer(node);
+      qset()->enqueue_completed_buffer(node);
       assert(_buf == NULL, "multiple enqueuers appear to be racing");
     } else {
       BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
-      if (qset()->process_or_enqueue_complete_buffer(node)) {
+      if (qset()->process_or_enqueue_completed_buffer(node)) {
         // Recycle the buffer. No allocation.
         assert(_buf == BufferNode::make_buffer_from_node(node), "invariant");
         assert(capacity() == qset()->buffer_size(), "invariant");
@@ -231,7 +231,7 @@
   reset();
 }
 
-bool PtrQueueSet::process_or_enqueue_complete_buffer(BufferNode* node) {
+bool PtrQueueSet::process_or_enqueue_completed_buffer(BufferNode* node) {
   if (Thread::current()->is_Java_thread()) {
     // If the number of buffers exceeds the limit, make this Java
     // thread do the processing itself.  We don't lock to access
@@ -246,11 +246,11 @@
     }
   }
   // The buffer will be enqueued. The caller will have to get a new one.
-  enqueue_complete_buffer(node);
+  enqueue_completed_buffer(node);
   return false;
 }
 
-void PtrQueueSet::enqueue_complete_buffer(BufferNode* cbn) {
+void PtrQueueSet::enqueue_completed_buffer(BufferNode* cbn) {
   MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
   cbn->set_next(NULL);
   if (_completed_buffers_tail == NULL) {
@@ -263,36 +263,73 @@
   }
   _n_completed_buffers++;
 
-  if (!_process_completed &&
+  if (!_process_completed_buffers &&
       (_n_completed_buffers > _process_completed_buffers_threshold)) {
-    _process_completed = true;
+    _process_completed_buffers = true;
     if (_notify_when_complete) {
       _cbl_mon->notify();
     }
   }
-  DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
+  assert_completed_buffers_list_len_correct_locked();
 }
 
-size_t PtrQueueSet::completed_buffers_list_length() {
-  size_t n = 0;
-  BufferNode* cbn = _completed_buffers_head;
-  while (cbn != NULL) {
-    n++;
-    cbn = cbn->next();
+BufferNode* PtrQueueSet::get_completed_buffer(size_t stop_at) {
+  MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
+
+  if (_n_completed_buffers <= stop_at) {
+    return NULL;
   }
-  return n;
+
+  assert(_n_completed_buffers > 0, "invariant");
+  assert(_completed_buffers_head != NULL, "invariant");
+  assert(_completed_buffers_tail != NULL, "invariant");
+
+  BufferNode* bn = _completed_buffers_head;
+  _n_completed_buffers--;
+  _completed_buffers_head = bn->next();
+  if (_completed_buffers_head == NULL) {
+    assert(_n_completed_buffers == 0, "invariant");
+    _completed_buffers_tail = NULL;
+    _process_completed_buffers = false;
+  }
+  assert_completed_buffers_list_len_correct_locked();
+  bn->set_next(NULL);
+  return bn;
 }
 
-void PtrQueueSet::assert_completed_buffer_list_len_correct() {
-  MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
-  assert_completed_buffer_list_len_correct_locked();
+void PtrQueueSet::abandon_completed_buffers() {
+  BufferNode* buffers_to_delete = NULL;
+  {
+    MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
+    buffers_to_delete = _completed_buffers_head;
+    _completed_buffers_head = NULL;
+    _completed_buffers_tail = NULL;
+    _n_completed_buffers = 0;
+    _process_completed_buffers = false;
+  }
+  while (buffers_to_delete != NULL) {
+    BufferNode* bn = buffers_to_delete;
+    buffers_to_delete = bn->next();
+    bn->set_next(NULL);
+    deallocate_buffer(bn);
+  }
 }
 
-void PtrQueueSet::assert_completed_buffer_list_len_correct_locked() {
-  guarantee(completed_buffers_list_length() ==  _n_completed_buffers,
-            "Completed buffer length is wrong.");
+#ifdef ASSERT
+
+void PtrQueueSet::assert_completed_buffers_list_len_correct_locked() {
+  assert_lock_strong(_cbl_mon);
+  size_t n = 0;
+  for (BufferNode* bn = _completed_buffers_head; bn != NULL; bn = bn->next()) {
+    ++n;
+  }
+  assert(n == _n_completed_buffers,
+         "Completed buffer length is wrong: counted: " SIZE_FORMAT
+         ", expected: " SIZE_FORMAT, n, _n_completed_buffers);
 }
 
+#endif // ASSERT
+
 // Merge lists of buffers. Notify the processing threads.
 // The source queue is emptied as a result. The queues
 // must share the monitor.
@@ -315,16 +352,18 @@
   src->_n_completed_buffers = 0;
   src->_completed_buffers_head = NULL;
   src->_completed_buffers_tail = NULL;
+  src->_process_completed_buffers = false;
 
   assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL ||
          _completed_buffers_head != NULL && _completed_buffers_tail != NULL,
          "Sanity");
+  assert_completed_buffers_list_len_correct_locked();
 }
 
 void PtrQueueSet::notify_if_necessary() {
   MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
   if (_n_completed_buffers > _process_completed_buffers_threshold) {
-    _process_completed = true;
+    _process_completed_buffers = true;
     if (_notify_when_complete)
       _cbl_mon->notify();
   }
--- a/src/hotspot/share/gc/shared/ptrQueue.hpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/shared/ptrQueue.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -275,19 +275,16 @@
 // A PtrQueueSet represents resources common to a set of pointer queues.
 // In particular, the individual queues allocate buffers from this shared
 // set, and return completed buffers to the set.
-// All these variables are are protected by the TLOQ_CBL_mon. XXX ???
 class PtrQueueSet {
   BufferNode::Allocator* _allocator;
 
-protected:
   Monitor* _cbl_mon;  // Protects the fields below.
   BufferNode* _completed_buffers_head;
   BufferNode* _completed_buffers_tail;
   size_t _n_completed_buffers;
+
   size_t _process_completed_buffers_threshold;
-  volatile bool _process_completed;
-
-  bool _all_active;
+  volatile bool _process_completed_buffers;
 
   // If true, notify_all on _cbl_mon when the threshold is reached.
   bool _notify_when_complete;
@@ -297,11 +294,11 @@
   size_t _max_completed_buffers;
   size_t _completed_buffers_padding;
 
-  size_t completed_buffers_list_length();
-  void assert_completed_buffer_list_len_correct_locked();
-  void assert_completed_buffer_list_len_correct();
+  void assert_completed_buffers_list_len_correct_locked() NOT_DEBUG_RETURN;
 
 protected:
+  bool _all_active;
+
   // A mutator thread does the the work of processing a buffer.
   // Returns "true" iff the work is complete (and the buffer may be
   // deallocated).
@@ -318,6 +315,12 @@
   // arguments.
   void initialize(Monitor* cbl_mon, BufferNode::Allocator* allocator);
 
+  // For (unlocked!) iteration over the completed buffers.
+  BufferNode* completed_buffers_head() const { return _completed_buffers_head; }
+
+  // Deallocate all of the completed buffers.
+  void abandon_completed_buffers();
+
 public:
 
   // Return the buffer for a BufferNode of size buffer_size().
@@ -327,18 +330,21 @@
   // to have been allocated with a size of buffer_size().
   void deallocate_buffer(BufferNode* node);
 
-  // Declares that "buf" is a complete buffer.
-  void enqueue_complete_buffer(BufferNode* node);
+  // A completed buffer is a buffer the mutator is finished with, and
+  // is ready to be processed by the collector.  It need not be full.
+
+  // Adds node to the completed buffer list.
+  void enqueue_completed_buffer(BufferNode* node);
+
+  // If the number of completed buffers is > stop_at, then remove and
+  // return a completed buffer from the list.  Otherwise, return NULL.
+  BufferNode* get_completed_buffer(size_t stop_at = 0);
 
   // To be invoked by the mutator.
-  bool process_or_enqueue_complete_buffer(BufferNode* node);
+  bool process_or_enqueue_completed_buffer(BufferNode* node);
 
-  bool completed_buffers_exist_dirty() {
-    return _n_completed_buffers > 0;
-  }
-
-  bool process_completed_buffers() { return _process_completed; }
-  void set_process_completed(bool x) { _process_completed = x; }
+  bool process_completed_buffers() { return _process_completed_buffers; }
+  void set_process_completed_buffers(bool x) { _process_completed_buffers = x; }
 
   bool is_active() { return _all_active; }
 
--- a/src/hotspot/share/gc/shared/satbMarkQueue.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/shared/satbMarkQueue.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -180,17 +180,7 @@
 }
 
 bool SATBMarkQueueSet::apply_closure_to_completed_buffer(SATBBufferClosure* cl) {
-  BufferNode* nd = NULL;
-  {
-    MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
-    if (_completed_buffers_head != NULL) {
-      nd = _completed_buffers_head;
-      _completed_buffers_head = nd->next();
-      if (_completed_buffers_head == NULL) _completed_buffers_tail = NULL;
-      _n_completed_buffers--;
-      if (_n_completed_buffers == 0) _process_completed = false;
-    }
-  }
+  BufferNode* nd = get_completed_buffer();
   if (nd != NULL) {
     void **buf = BufferNode::make_buffer_from_node(nd);
     size_t index = nd->index();
@@ -216,7 +206,7 @@
   tty->cr();
   tty->print_cr("SATB BUFFERS [%s]", msg);
 
-  BufferNode* nd = _completed_buffers_head;
+  BufferNode* nd = completed_buffers_head();
   int i = 0;
   while (nd != NULL) {
     void** buf = BufferNode::make_buffer_from_node(nd);
@@ -238,24 +228,7 @@
 #endif // PRODUCT
 
 void SATBMarkQueueSet::abandon_partial_marking() {
-  BufferNode* buffers_to_delete = NULL;
-  {
-    MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
-    while (_completed_buffers_head != NULL) {
-      BufferNode* nd = _completed_buffers_head;
-      _completed_buffers_head = nd->next();
-      nd->set_next(buffers_to_delete);
-      buffers_to_delete = nd;
-    }
-    _completed_buffers_tail = NULL;
-    _n_completed_buffers = 0;
-    DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
-  }
-  while (buffers_to_delete != NULL) {
-    BufferNode* nd = buffers_to_delete;
-    buffers_to_delete = nd->next();
-    deallocate_buffer(nd);
-  }
+  abandon_completed_buffers();
   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
   // So we can safely manipulate these queues.
   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
--- a/src/hotspot/share/gc/shared/workgroup.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/shared/workgroup.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -297,7 +297,6 @@
 }
 
 void AbstractGangWorker::initialize() {
-  this->initialize_named_thread();
   assert(_gang != NULL, "No gang to run in");
   os::set_priority(this, NearMaxPriority);
   log_develop_trace(gc, workgang)("Running gang worker for gang %s id %u", gang()->name(), id());
--- a/src/hotspot/share/gc/z/zArguments.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/z/zArguments.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -19,7 +19,6 @@
  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  * or visit www.oracle.com if you need additional information or have any
  * questions.
- *
  */
 
 #include "precompiled.hpp"
@@ -91,6 +90,9 @@
   // Verification of stacks not (yet) supported, for the same reason
   // we need fixup_partial_loads
   DEBUG_ONLY(FLAG_SET_DEFAULT(VerifyStack, false));
+
+  // Initialize platform specific arguments
+  initialize_platform();
 }
 
 CollectedHeap* ZArguments::create_heap() {
--- a/src/hotspot/share/gc/z/zArguments.hpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/z/zArguments.hpp	Thu Jan 03 21:25:54 2019 +0100
@@ -29,6 +29,9 @@
 class CollectedHeap;
 
 class ZArguments : public GCArguments {
+private:
+  void initialize_platform();
+
 public:
   virtual void initialize();
   virtual size_t conservative_max_heap_alignment();
--- a/src/hotspot/share/gc/z/zObjectAllocator.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/z/zObjectAllocator.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -128,7 +128,8 @@
 }
 
 uintptr_t ZObjectAllocator::alloc_small_object_from_nonworker(size_t size, ZAllocationFlags flags) {
-  assert(ZThread::is_java() || ZThread::is_vm(), "Should be a Java or VM thread");
+  assert(ZThread::is_java() || ZThread::is_vm() || ZThread::is_runtime_worker(),
+         "Should be a Java, VM or Runtime worker thread");
 
   // Non-worker small page allocation can never use the reserve
   flags.set_no_reserve();
@@ -193,7 +194,8 @@
 }
 
 uintptr_t ZObjectAllocator::alloc_object_for_relocation(size_t size) {
-  assert(ZThread::is_java() || ZThread::is_worker() || ZThread::is_vm(), "Unknown thread");
+  assert(ZThread::is_java() || ZThread::is_vm() || ZThread::is_worker() || ZThread::is_runtime_worker(),
+         "Unknown thread");
 
   ZAllocationFlags flags;
   flags.set_relocation();
--- a/src/hotspot/share/gc/z/zRuntimeWorkers.cpp	Wed Dec 19 12:33:25 2018 -0500
+++ b/src/hotspot/share/gc/z/zRuntimeWorkers.cpp	Thu Jan 03 21:25:54 2019 +0100
@@ -22,7 +22,43 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/shared/workgroup.hpp"
 #include "gc/z/zRuntimeWorkers.hpp"
+#include "gc/z/zThread.hpp"
+#include "runtime/mutexLocker.hpp"
+
+class ZRuntimeWorkersInitializeTask : public AbstractGangTask {
+private:
+  const uint _nworkers;
+  uint       _started;
+  Monitor    _monitor;
+
+public:
+  ZRuntimeWorkersInitializeTask(uint nworkers) :
+      AbstractGangTask("ZRuntimeWorkersInitializeTask"),
+