changeset 53361:585902b2bfcb

Merge
author henryjen
date Tue, 15 Jan 2019 10:55:26 -0800
parents b94283cb226b 8ce4083fc831
children 36ca868f266f
files make/autoconf/flags-cflags.m4 src/hotspot/share/classfile/dictionary.hpp src/hotspot/share/classfile/systemDictionary.cpp src/hotspot/share/prims/jvm.cpp src/java.base/unix/native/libnet/net_util_md.c test/jdk/java/lang/String/AlignIndent.java test/jdk/java/net/MulticastSocket/PromiscuousIPv6.java test/jdk/java/nio/channels/DatagramChannel/PromiscuousIPv6.java test/langtools/tools/javac/RawStringLiteralLang.java test/langtools/tools/javac/RawStringLiteralLangAPI.java test/langtools/tools/javac/diags/examples/RawStringLiteral.java
diffstat 383 files changed, 8505 insertions(+), 3199 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Thu Dec 13 11:51:06 2018 -0800
+++ b/.hgtags	Tue Jan 15 10:55:26 2019 -0800
@@ -526,3 +526,7 @@
 f8fb0c86f2b3d24294d39c5685a628e1beb14ba7 jdk-12+21
 732bec44c89e8b93a38296bf690f97b7230c5b6d jdk-12+22
 eef755718cb24813031a842bbfc716a6cea18e9a jdk-12+23
+7d4397b43fa305806160785a4c7210600d59581a jdk-12+24
+7496df94b3b79f3da53925d2d137317715f11d97 jdk-12+25
+de9fd809bb475401aad188eab2264226788aad81 jdk-12+26
+f15d443f97318e9b40e6f451e327ff69ed4ec361 jdk-12+27
--- a/make/Docs.gmk	Thu Dec 13 11:51:06 2018 -0800
+++ b/make/Docs.gmk	Tue Jan 15 10:55:26 2019 -0800
@@ -517,7 +517,7 @@
   ) \
 )
 
-ifneq ($(PANDOC), )
+ifeq ($(ENABLE_PANDOC), true)
   # For all markdown files in $module/share/specs directories, convert them to
   # html, if we have pandoc (otherwise we'll just skip this).
 
--- a/make/RunTestsPrebuilt.gmk	Thu Dec 13 11:51:06 2018 -0800
+++ b/make/RunTestsPrebuilt.gmk	Tue Jan 15 10:55:26 2019 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -237,7 +237,8 @@
 else ifeq ($(OPENJDK_TARGET_OS), windows)
   NUM_CORES := $(NUMBER_OF_PROCESSORS)
   MEMORY_SIZE := $(shell \
-      $(EXPR) `wmic computersystem get totalphysicalmemory -value | $(GREP) = \
+      $(EXPR) `wmic computersystem get totalphysicalmemory -value \
+          | $(GREP) = | $(SED) 's/\\r//g' \
           | $(CUT) -d "=" -f 2-` / 1024 / 1024 \
   )
 endif
--- a/make/autoconf/basics.m4	Thu Dec 13 11:51:06 2018 -0800
+++ b/make/autoconf/basics.m4	Tue Jan 15 10:55:26 2019 -0800
@@ -610,7 +610,14 @@
   BASIC_PATH_PROGS(DF, df)
   BASIC_PATH_PROGS(CPIO, [cpio bsdcpio])
   BASIC_PATH_PROGS(NICE, nice)
+
   BASIC_PATH_PROGS(PANDOC, pandoc)
+  if test -n "$PANDOC"; then
+    ENABLE_PANDOC="true"
+  else
+    ENABLE_PANDOC="false"
+  fi
+  AC_SUBST(ENABLE_PANDOC)
 ])
 
 ###############################################################################
--- a/make/autoconf/flags-cflags.m4	Thu Dec 13 11:51:06 2018 -0800
+++ b/make/autoconf/flags-cflags.m4	Tue Jan 15 10:55:26 2019 -0800
@@ -183,7 +183,8 @@
 
       # Additional warnings that are not activated by -Wall and -Wextra
       WARNINGS_ENABLE_ADDITIONAL="-Wpointer-arith -Wsign-compare \
-          -Wunused-function -Wundef -Wunused-value -Wreturn-type"
+          -Wunused-function -Wundef -Wunused-value -Wreturn-type \
+          -Wtrampolines"
       WARNINGS_ENABLE_ADDITIONAL_CXX="-Woverloaded-virtual -Wreorder"
       WARNINGS_ENABLE_ALL_CFLAGS="-Wall -Wextra -Wformat=2 $WARNINGS_ENABLE_ADDITIONAL"
       WARNINGS_ENABLE_ALL_CXXFLAGS="$WARNINGS_ENABLE_ALL_CFLAGS $WARNINGS_ENABLE_ADDITIONAL_CXX"
--- a/make/autoconf/spec.gmk.in	Thu Dec 13 11:51:06 2018 -0800
+++ b/make/autoconf/spec.gmk.in	Tue Jan 15 10:55:26 2019 -0800
@@ -761,6 +761,7 @@
 MSVCP_DLL:=@MSVCP_DLL@
 UCRT_DLL_DIR:=@UCRT_DLL_DIR@
 STLPORT_LIB:=@STLPORT_LIB@
+ENABLE_PANDOC:=@ENABLE_PANDOC@
 
 ####################################################
 #
--- a/make/launcher/LauncherCommon.gmk	Thu Dec 13 11:51:06 2018 -0800
+++ b/make/launcher/LauncherCommon.gmk	Tue Jan 15 10:55:26 2019 -0800
@@ -203,7 +203,7 @@
 
   ifneq ($(MAN_FILES_MD), )
     # If we got markdown files, ignore the troff files
-    ifeq ($(PANDOC), )
+    ifeq ($(ENABLE_PANDOC), false)
       $(info Warning: pandoc not found. Not generating man pages)
     else
       # Create dynamic man pages from markdown using pandoc. We need
--- a/make/scripts/pandoc-html-manpage-filter.js	Thu Dec 13 11:51:06 2018 -0800
+++ b/make/scripts/pandoc-html-manpage-filter.js	Tue Jan 15 10:55:26 2019 -0800
@@ -86,7 +86,7 @@
 function change_title(type, value) {
     if (type === 'MetaInlines') {
         if (value[0].t === 'Str') {
-            var match = value[0].c.match(/^([A-Z]+)\([0-9]+\)$/);
+            var match = value[0].c.match(/^([A-Z0-9]+)\([0-9]+\)$/);
             if (match) {
                 return MetaInlines([
                         Str("The"), Space(),
--- a/src/hotspot/cpu/aarch64/aarch64.ad	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/cpu/aarch64/aarch64.ad	Tue Jan 15 10:55:26 2019 -0800
@@ -2133,7 +2133,12 @@
 }
 
 const uint Matcher::vector_shift_count_ideal_reg(int size) {
-  return Op_VecX;
+  switch(size) {
+    case  8: return Op_VecD;
+    case 16: return Op_VecX;
+  }
+  ShouldNotReachHere();
+  return 0;
 }
 
 // AES support not yet implemented
@@ -16524,32 +16529,32 @@
 %}
 
 // ------------------------------ Shift ---------------------------------------
-
-instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{
+instruct vshiftcnt8B(vecD dst, iRegIorL2I cnt) %{
+  predicate(n->as_Vector()->length_in_bytes() == 8);
   match(Set dst (LShiftCntV cnt));
-  format %{ "dup  $dst, $cnt\t# shift count (vecX)" %}
+  match(Set dst (RShiftCntV cnt));
+  format %{ "dup  $dst, $cnt\t# shift count vector (8B)" %}
+  ins_encode %{
+    __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($cnt$$reg));
+  %}
+  ins_pipe(vdup_reg_reg64);
+%}
+
+instruct vshiftcnt16B(vecX dst, iRegIorL2I cnt) %{
+  predicate(n->as_Vector()->length_in_bytes() == 16);
+  match(Set dst (LShiftCntV cnt));
+  match(Set dst (RShiftCntV cnt));
+  format %{ "dup  $dst, $cnt\t# shift count vector (16B)" %}
   ins_encode %{
     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
   %}
   ins_pipe(vdup_reg_reg128);
 %}
 
-// Right shifts on aarch64 SIMD are implemented as left shift by -ve amount
-instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{
-  match(Set dst (RShiftCntV cnt));
-  format %{ "dup  $dst, $cnt\t# shift count (vecX)\n\tneg  $dst, $dst\t T16B" %}
-  ins_encode %{
-    __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
-    __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
-  %}
-  ins_pipe(vdup_reg_reg128);
-%}
-
-instruct vsll8B(vecD dst, vecD src, vecX shift) %{
+instruct vsll8B(vecD dst, vecD src, vecD shift) %{
   predicate(n->as_Vector()->length() == 4 ||
             n->as_Vector()->length() == 8);
   match(Set dst (LShiftVB src shift));
-  match(Set dst (RShiftVB src shift));
   ins_cost(INSN_COST);
   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
   ins_encode %{
@@ -16563,7 +16568,6 @@
 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
   predicate(n->as_Vector()->length() == 16);
   match(Set dst (LShiftVB src shift));
-  match(Set dst (RShiftVB src shift));
   ins_cost(INSN_COST);
   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
   ins_encode %{
@@ -16574,29 +16578,93 @@
   ins_pipe(vshift128);
 %}
 
-instruct vsrl8B(vecD dst, vecD src, vecX shift) %{
+// Right shifts with vector shift count on aarch64 SIMD are implemented
+// as left shift by negative shift count.
+// There are two cases for vector shift count.
+//
+// Case 1: The vector shift count is from replication.
+//        |            |
+//    LoadVector  RShiftCntV
+//        |       /
+//     RShiftVI
+// Note: In inner loop, multiple neg instructions are used, which can be
+// moved to outer loop and merge into one neg instruction.
+//
+// Case 2: The vector shift count is from loading.
+// This case isn't supported by middle-end now. But it's supported by
+// panama/vectorIntrinsics(JEP 338: Vector API).
+//        |            |
+//    LoadVector  LoadVector
+//        |       /
+//     RShiftVI
+//
+
+instruct vsra8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
+  predicate(n->as_Vector()->length() == 4 ||
+            n->as_Vector()->length() == 8);
+  match(Set dst (RShiftVB src shift));
+  ins_cost(INSN_COST);
+  effect(TEMP tmp);
+  format %{ "negr  $tmp,$shift\t"
+            "sshl  $dst,$src,$tmp\t# vector (8B)" %}
+  ins_encode %{
+    __ negr(as_FloatRegister($tmp$$reg), __ T8B,
+            as_FloatRegister($shift$$reg));
+    __ sshl(as_FloatRegister($dst$$reg), __ T8B,
+            as_FloatRegister($src$$reg),
+            as_FloatRegister($tmp$$reg));
+  %}
+  ins_pipe(vshift64);
+%}
+
+instruct vsra16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
+  predicate(n->as_Vector()->length() == 16);
+  match(Set dst (RShiftVB src shift));
+  ins_cost(INSN_COST);
+  effect(TEMP tmp);
+  format %{ "negr  $tmp,$shift\t"
+            "sshl  $dst,$src,$tmp\t# vector (16B)" %}
+  ins_encode %{
+    __ negr(as_FloatRegister($tmp$$reg), __ T16B,
+            as_FloatRegister($shift$$reg));
+    __ sshl(as_FloatRegister($dst$$reg), __ T16B,
+            as_FloatRegister($src$$reg),
+            as_FloatRegister($tmp$$reg));
+  %}
+  ins_pipe(vshift128);
+%}
+
+instruct vsrl8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
   predicate(n->as_Vector()->length() == 4 ||
             n->as_Vector()->length() == 8);
   match(Set dst (URShiftVB src shift));
   ins_cost(INSN_COST);
-  format %{ "ushl  $dst,$src,$shift\t# vector (8B)" %}
-  ins_encode %{
+  effect(TEMP tmp);
+  format %{ "negr  $tmp,$shift\t"
+            "ushl  $dst,$src,$tmp\t# vector (8B)" %}
+  ins_encode %{
+    __ negr(as_FloatRegister($tmp$$reg), __ T8B,
+            as_FloatRegister($shift$$reg));
     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
             as_FloatRegister($src$$reg),
-            as_FloatRegister($shift$$reg));
+            as_FloatRegister($tmp$$reg));
   %}
   ins_pipe(vshift64);
 %}
 
-instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
+instruct vsrl16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
   predicate(n->as_Vector()->length() == 16);
   match(Set dst (URShiftVB src shift));
   ins_cost(INSN_COST);
-  format %{ "ushl  $dst,$src,$shift\t# vector (16B)" %}
-  ins_encode %{
+  effect(TEMP tmp);
+  format %{ "negr  $tmp,$shift\t"
+            "ushl  $dst,$src,$tmp\t# vector (16B)" %}
+  ins_encode %{
+    __ negr(as_FloatRegister($tmp$$reg), __ T16B,
+            as_FloatRegister($shift$$reg));
     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
             as_FloatRegister($src$$reg),
-            as_FloatRegister($shift$$reg));
+            as_FloatRegister($tmp$$reg));
   %}
   ins_pipe(vshift128);
 %}
@@ -16708,11 +16776,10 @@
   ins_pipe(vshift128_imm);
 %}
 
-instruct vsll4S(vecD dst, vecD src, vecX shift) %{
+instruct vsll4S(vecD dst, vecD src, vecD shift) %{
   predicate(n->as_Vector()->length() == 2 ||
             n->as_Vector()->length() == 4);
   match(Set dst (LShiftVS src shift));
-  match(Set dst (RShiftVS src shift));
   ins_cost(INSN_COST);
   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
   ins_encode %{
@@ -16726,7 +16793,6 @@
 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
   predicate(n->as_Vector()->length() == 8);
   match(Set dst (LShiftVS src shift));
-  match(Set dst (RShiftVS src shift));
   ins_cost(INSN_COST);
   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
   ins_encode %{
@@ -16737,29 +16803,72 @@
   ins_pipe(vshift128);
 %}
 
-instruct vsrl4S(vecD dst, vecD src, vecX shift) %{
+instruct vsra4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
+  predicate(n->as_Vector()->length() == 2 ||
+            n->as_Vector()->length() == 4);
+  match(Set dst (RShiftVS src shift));
+  ins_cost(INSN_COST);
+  effect(TEMP tmp);
+  format %{ "negr  $tmp,$shift\t"
+            "sshl  $dst,$src,$tmp\t# vector (4H)" %}
+  ins_encode %{
+    __ negr(as_FloatRegister($tmp$$reg), __ T8B,
+            as_FloatRegister($shift$$reg));
+    __ sshl(as_FloatRegister($dst$$reg), __ T4H,
+            as_FloatRegister($src$$reg),
+            as_FloatRegister($tmp$$reg));
+  %}
+  ins_pipe(vshift64);
+%}
+
+instruct vsra8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
+  predicate(n->as_Vector()->length() == 8);
+  match(Set dst (RShiftVS src shift));
+  ins_cost(INSN_COST);
+  effect(TEMP tmp);
+  format %{ "negr  $tmp,$shift\t"
+            "sshl  $dst,$src,$tmp\t# vector (8H)" %}
+  ins_encode %{
+    __ negr(as_FloatRegister($tmp$$reg), __ T16B,
+            as_FloatRegister($shift$$reg));
+    __ sshl(as_FloatRegister($dst$$reg), __ T8H,
+            as_FloatRegister($src$$reg),
+            as_FloatRegister($tmp$$reg));
+  %}
+  ins_pipe(vshift128);
+%}
+
+instruct vsrl4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
   predicate(n->as_Vector()->length() == 2 ||
             n->as_Vector()->length() == 4);
   match(Set dst (URShiftVS src shift));
   ins_cost(INSN_COST);
-  format %{ "ushl  $dst,$src,$shift\t# vector (4H)" %}
-  ins_encode %{
+  effect(TEMP tmp);
+  format %{ "negr  $tmp,$shift\t"
+            "ushl  $dst,$src,$tmp\t# vector (4H)" %}
+  ins_encode %{
+    __ negr(as_FloatRegister($tmp$$reg), __ T8B,
+            as_FloatRegister($shift$$reg));
     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
             as_FloatRegister($src$$reg),
-            as_FloatRegister($shift$$reg));
+            as_FloatRegister($tmp$$reg));
   %}
   ins_pipe(vshift64);
 %}
 
-instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
+instruct vsrl8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
   predicate(n->as_Vector()->length() == 8);
   match(Set dst (URShiftVS src shift));
   ins_cost(INSN_COST);
-  format %{ "ushl  $dst,$src,$shift\t# vector (8H)" %}
-  ins_encode %{
+  effect(TEMP tmp);
+  format %{ "negr  $tmp,$shift\t"
+            "ushl  $dst,$src,$tmp\t# vector (8H)" %}
+  ins_encode %{
+    __ negr(as_FloatRegister($tmp$$reg), __ T16B,
+            as_FloatRegister($shift$$reg));
     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
             as_FloatRegister($src$$reg),
-            as_FloatRegister($shift$$reg));
+            as_FloatRegister($tmp$$reg));
   %}
   ins_pipe(vshift128);
 %}
@@ -16871,10 +16980,9 @@
   ins_pipe(vshift128_imm);
 %}
 
-instruct vsll2I(vecD dst, vecD src, vecX shift) %{
+instruct vsll2I(vecD dst, vecD src, vecD shift) %{
   predicate(n->as_Vector()->length() == 2);
   match(Set dst (LShiftVI src shift));
-  match(Set dst (RShiftVI src shift));
   ins_cost(INSN_COST);
   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
   ins_encode %{
@@ -16888,7 +16996,6 @@
 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
   predicate(n->as_Vector()->length() == 4);
   match(Set dst (LShiftVI src shift));
-  match(Set dst (RShiftVI src shift));
   ins_cost(INSN_COST);
   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
   ins_encode %{
@@ -16899,28 +17006,70 @@
   ins_pipe(vshift128);
 %}
 
-instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
+instruct vsra2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
+  predicate(n->as_Vector()->length() == 2);
+  match(Set dst (RShiftVI src shift));
+  ins_cost(INSN_COST);
+  effect(TEMP tmp);
+  format %{ "negr  $tmp,$shift\t"
+            "sshl  $dst,$src,$tmp\t# vector (2S)" %}
+  ins_encode %{
+    __ negr(as_FloatRegister($tmp$$reg), __ T8B,
+            as_FloatRegister($shift$$reg));
+    __ sshl(as_FloatRegister($dst$$reg), __ T2S,
+            as_FloatRegister($src$$reg),
+            as_FloatRegister($tmp$$reg));
+  %}
+  ins_pipe(vshift64);
+%}
+
+instruct vsra4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
+  predicate(n->as_Vector()->length() == 4);
+  match(Set dst (RShiftVI src shift));
+  ins_cost(INSN_COST);
+  effect(TEMP tmp);
+  format %{ "negr  $tmp,$shift\t"
+            "sshl  $dst,$src,$tmp\t# vector (4S)" %}
+  ins_encode %{
+    __ negr(as_FloatRegister($tmp$$reg), __ T16B,
+            as_FloatRegister($shift$$reg));
+    __ sshl(as_FloatRegister($dst$$reg), __ T4S,
+            as_FloatRegister($src$$reg),
+            as_FloatRegister($tmp$$reg));
+  %}
+  ins_pipe(vshift128);
+%}
+
+instruct vsrl2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
   predicate(n->as_Vector()->length() == 2);
   match(Set dst (URShiftVI src shift));
   ins_cost(INSN_COST);
-  format %{ "ushl  $dst,$src,$shift\t# vector (2S)" %}
-  ins_encode %{
+  effect(TEMP tmp);
+  format %{ "negr  $tmp,$shift\t"
+            "ushl  $dst,$src,$tmp\t# vector (2S)" %}
+  ins_encode %{
+    __ negr(as_FloatRegister($tmp$$reg), __ T8B,
+            as_FloatRegister($shift$$reg));
     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
             as_FloatRegister($src$$reg),
-            as_FloatRegister($shift$$reg));
+            as_FloatRegister($tmp$$reg));
   %}
   ins_pipe(vshift64);
 %}
 
-instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
+instruct vsrl4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
   predicate(n->as_Vector()->length() == 4);
   match(Set dst (URShiftVI src shift));
   ins_cost(INSN_COST);
-  format %{ "ushl  $dst,$src,$shift\t# vector (4S)" %}
-  ins_encode %{
+  effect(TEMP tmp);
+  format %{ "negr  $tmp,$shift\t"
+            "ushl  $dst,$src,$tmp\t# vector (4S)" %}
+  ins_encode %{
+    __ negr(as_FloatRegister($tmp$$reg), __ T16B,
+            as_FloatRegister($shift$$reg));
     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
             as_FloatRegister($src$$reg),
-            as_FloatRegister($shift$$reg));
+            as_FloatRegister($tmp$$reg));
   %}
   ins_pipe(vshift128);
 %}
@@ -17006,7 +17155,6 @@
 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
   predicate(n->as_Vector()->length() == 2);
   match(Set dst (LShiftVL src shift));
-  match(Set dst (RShiftVL src shift));
   ins_cost(INSN_COST);
   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
   ins_encode %{
@@ -17017,15 +17165,36 @@
   ins_pipe(vshift128);
 %}
 
-instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
+instruct vsra2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
+  predicate(n->as_Vector()->length() == 2);
+  match(Set dst (RShiftVL src shift));
+  ins_cost(INSN_COST);
+  effect(TEMP tmp);
+  format %{ "negr  $tmp,$shift\t"
+            "sshl  $dst,$src,$tmp\t# vector (2D)" %}
+  ins_encode %{
+    __ negr(as_FloatRegister($tmp$$reg), __ T16B,
+            as_FloatRegister($shift$$reg));
+    __ sshl(as_FloatRegister($dst$$reg), __ T2D,
+            as_FloatRegister($src$$reg),
+            as_FloatRegister($tmp$$reg));
+  %}
+  ins_pipe(vshift128);
+%}
+
+instruct vsrl2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
   predicate(n->as_Vector()->length() == 2);
   match(Set dst (URShiftVL src shift));
   ins_cost(INSN_COST);
-  format %{ "ushl  $dst,$src,$shift\t# vector (2D)" %}
-  ins_encode %{
+  effect(TEMP tmp);
+  format %{ "negr  $tmp,$shift\t"
+            "ushl  $dst,$src,$tmp\t# vector (2D)" %}
+  ins_encode %{
+    __ negr(as_FloatRegister($tmp$$reg), __ T16B,
+            as_FloatRegister($shift$$reg));
     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
             as_FloatRegister($src$$reg),
-            as_FloatRegister($shift$$reg));
+            as_FloatRegister($tmp$$reg));
   %}
   ins_pipe(vshift128);
 %}
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -4896,7 +4896,7 @@
 
   // A very short string
   cmpw(cnt2, minCharsInWord);
-  br(Assembler::LT, SHORT_STRING);
+  br(Assembler::LE, SHORT_STRING);
 
   // Compare longwords
   // load first parts of strings and finish initialization while loading
@@ -4920,8 +4920,7 @@
       ldr(tmp2, Address(str2));
       cmp(cnt2, STUB_THRESHOLD);
       br(GE, STUB);
-      subsw(cnt2, cnt2, 4);
-      br(EQ, TAIL_CHECK);
+      subw(cnt2, cnt2, 4);
       eor(vtmpZ, T16B, vtmpZ, vtmpZ);
       lea(str1, Address(str1, cnt2, Address::uxtw(str1_chr_shift)));
       lea(str2, Address(str2, cnt2, Address::uxtw(str2_chr_shift)));
@@ -4937,8 +4936,7 @@
       ldrs(vtmp, Address(str2));
       cmp(cnt2, STUB_THRESHOLD);
       br(GE, STUB);
-      subsw(cnt2, cnt2, 4);
-      br(EQ, TAIL_CHECK);
+      subw(cnt2, cnt2, 4);
       lea(str1, Address(str1, cnt2, Address::uxtw(str1_chr_shift)));
       eor(vtmpZ, T16B, vtmpZ, vtmpZ);
       lea(str2, Address(str2, cnt2, Address::uxtw(str2_chr_shift)));
@@ -5650,12 +5648,12 @@
           orr(v5, T16B, Vtmp3, Vtmp4);
           uzp1(Vtmp1, T16B, Vtmp1, Vtmp2);
           uzp1(Vtmp3, T16B, Vtmp3, Vtmp4);
-          stpq(Vtmp1, Vtmp3, dst);
           uzp2(v5, T16B, v4, v5); // high bytes
           umov(tmp2, v5, D, 1);
           fmovd(tmp1, v5);
           orr(tmp1, tmp1, tmp2);
           cbnz(tmp1, LOOP_8);
+          stpq(Vtmp1, Vtmp3, dst);
           sub(len, len, 32);
           add(dst, dst, 32);
           add(src, src, 64);
@@ -5673,7 +5671,6 @@
       prfm(Address(src, SoftwarePrefetchHintDistance));
       uzp1(v4, T16B, Vtmp1, Vtmp2);
       uzp1(v5, T16B, Vtmp3, Vtmp4);
-      stpq(v4, v5, dst);
       orr(Vtmp1, T16B, Vtmp1, Vtmp2);
       orr(Vtmp3, T16B, Vtmp3, Vtmp4);
       uzp2(Vtmp1, T16B, Vtmp1, Vtmp3); // high bytes
@@ -5681,6 +5678,7 @@
       fmovd(tmp1, Vtmp1);
       orr(tmp1, tmp1, tmp2);
       cbnz(tmp1, LOOP_8);
+      stpq(v4, v5, dst);
       sub(len, len, 32);
       add(dst, dst, 32);
       add(src, src, 64);
@@ -5695,9 +5693,9 @@
       ld1(Vtmp1, T8H, src);
       uzp1(Vtmp2, T16B, Vtmp1, Vtmp1); // low bytes
       uzp2(Vtmp3, T16B, Vtmp1, Vtmp1); // high bytes
-      strd(Vtmp2, dst);
       fmovd(tmp1, Vtmp3);
       cbnz(tmp1, NEXT_1);
+      strd(Vtmp2, dst);
 
       sub(len, len, 8);
       add(dst, dst, 8);
@@ -5710,9 +5708,9 @@
     cbz(len, DONE);
     BIND(NEXT_1);
       ldrh(tmp1, Address(post(src, 2)));
-      strb(tmp1, Address(post(dst, 1)));
       tst(tmp1, 0xff00);
       br(NE, SET_RESULT);
+      strb(tmp1, Address(post(dst, 1)));
       subs(len, len, 1);
       br(GT, NEXT_1);
 
--- a/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -105,8 +105,8 @@
     // compiled code in threads for which the event is enabled.  Check here for
     // interp_only_mode if these events CAN be enabled.
 
-    __ ldrb(rscratch1, Address(rthread, JavaThread::interp_only_mode_offset()));
-    __ cbnz(rscratch1, run_compiled_code);
+    __ ldrw(rscratch1, Address(rthread, JavaThread::interp_only_mode_offset()));
+    __ cbzw(rscratch1, run_compiled_code);
     __ ldr(rscratch1, Address(method, Method::interpreter_entry_offset()));
     __ br(rscratch1);
     __ BIND(run_compiled_code);
--- a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -1886,6 +1886,7 @@
   __ restore_locals();
   __ restore_constant_pool_cache();
   __ get_method(rmethod);
+  __ get_dispatch();
 
   // The method data pointer was incremented already during
   // call profiling. We have to restore the mdp for the current bcp.
--- a/src/hotspot/cpu/arm/arm.ad	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/cpu/arm/arm.ad	Tue Jan 15 10:55:26 2019 -0800
@@ -8945,9 +8945,10 @@
 instruct cmpFastLock(flagsRegP pcc, iRegP object, iRegP box, iRegP scratch2, iRegP scratch )
 %{
   match(Set pcc (FastLock object box));
+  predicate(!(UseBiasedLocking && !UseOptoBiasInlining));
 
   effect(TEMP scratch, TEMP scratch2);
-  ins_cost(100);
+  ins_cost(DEFAULT_COST*3);
 
   format %{ "FASTLOCK  $object, $box; KILL $scratch, $scratch2" %}
   ins_encode %{
@@ -8956,6 +8957,21 @@
   ins_pipe(long_memory_op);
 %}
 
+instruct cmpFastLock_noBiasInline(flagsRegP pcc, iRegP object, iRegP box, iRegP scratch2,
+                                  iRegP scratch, iRegP scratch3) %{
+  match(Set pcc (FastLock object box));
+  predicate(UseBiasedLocking && !UseOptoBiasInlining);
+
+  effect(TEMP scratch, TEMP scratch2, TEMP scratch3);
+  ins_cost(DEFAULT_COST*5);
+
+  format %{ "FASTLOCK  $object, $box; KILL $scratch, $scratch2, $scratch3" %}
+  ins_encode %{
+    __ fast_lock($object$$Register, $box$$Register, $scratch$$Register, $scratch2$$Register, $scratch3$$Register);
+  %}
+  ins_pipe(long_memory_op);
+%}
+
 
 instruct cmpFastUnlock(flagsRegP pcc, iRegP object, iRegP box, iRegP scratch2, iRegP scratch ) %{
   match(Set pcc (FastUnlock object box));
--- a/src/hotspot/cpu/arm/macroAssembler_arm.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/cpu/arm/macroAssembler_arm.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -1971,7 +1971,7 @@
 
 
 #ifdef COMPILER2
-void MacroAssembler::fast_lock(Register Roop, Register Rbox, Register Rscratch, Register Rscratch2)
+void MacroAssembler::fast_lock(Register Roop, Register Rbox, Register Rscratch, Register Rscratch2, Register scratch3)
 {
   assert(VM_Version::supports_ldrex(), "unsupported, yet?");
 
@@ -1985,11 +1985,13 @@
   Label fast_lock, done;
 
   if (UseBiasedLocking && !UseOptoBiasInlining) {
-    Label failed;
-    biased_locking_enter(Roop, Rmark, Rscratch, false, noreg, done, failed);
-    bind(failed);
+    assert(scratch3 != noreg, "need extra temporary for -XX:-UseOptoBiasInlining");
+    biased_locking_enter(Roop, Rmark, Rscratch, false, scratch3, done, done);
+    // Fall through if lock not biased otherwise branch to done
   }
 
+  // Invariant: Rmark loaded below does not contain biased lock pattern
+
   ldr(Rmark, Address(Roop, oopDesc::mark_offset_in_bytes()));
   tst(Rmark, markOopDesc::unlocked_value);
   b(fast_lock, ne);
@@ -2016,6 +2018,9 @@
 
   bind(done);
 
+  // At this point flags are set as follows:
+  //  EQ -> Success
+  //  NE -> Failure, branch to slow path
 }
 
 void MacroAssembler::fast_unlock(Register Roop, Register Rbox, Register Rscratch, Register Rscratch2)
--- a/src/hotspot/cpu/arm/macroAssembler_arm.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/cpu/arm/macroAssembler_arm.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -371,10 +371,10 @@
   // lock_reg and obj_reg must be loaded up with the appropriate values.
   // swap_reg must be supplied.
   // tmp_reg must be supplied.
-  // Optional slow case is for implementations (interpreter and C1) which branch to
-  // slow case directly. If slow_case is NULL, then leaves condition
-  // codes set (for C2's Fast_Lock node) and jumps to done label.
-  // Falls through for the fast locking attempt.
+  // Done label is branched to with condition code EQ set if the lock is
+  // biased and we acquired it. Slow case label is branched to with
+  // condition code NE set if the lock is biased but we failed to acquire
+  // it. Otherwise fall through.
   // Returns offset of first potentially-faulting instruction for null
   // check info (currently consumed only by C1). If
   // swap_reg_contains_mark is true then returns -1 as it is assumed
@@ -1073,7 +1073,7 @@
   void restore_default_fp_mode();
 
 #ifdef COMPILER2
-  void fast_lock(Register obj, Register box, Register scratch, Register scratch2);
+  void fast_lock(Register obj, Register box, Register scratch, Register scratch2, Register scratch3 = noreg);
   void fast_unlock(Register obj, Register box, Register scratch, Register scratch2);
 #endif
 
--- a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -269,7 +269,7 @@
       // when called via a c2i.
 
       // Pass initial_caller_sp to framemanager.
-      __ mr(R21_tmp1, R1_SP);
+      __ mr(R21_sender_SP, R1_SP);
 
       // Do a light-weight C-call here, r_new_arg_entry holds the address
       // of the interpreter entry point (frame manager or native entry)
--- a/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -532,14 +532,8 @@
   // these parameters the pre-barrier does not generate
   // the load of the previous value.
 
-  // Restore caller sp for c2i case.
-#ifdef ASSERT
-  __ ld(R9_ARG7, 0, R1_SP);
-  __ ld(R10_ARG8, 0, R21_sender_SP);
-  __ cmpd(CCR0, R9_ARG7, R10_ARG8);
-  __ asm_assert_eq("backlink", 0x544);
-#endif // ASSERT
-  __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
+  // Restore caller sp for c2i case (from compiled) and for resized sender frame (from interpreted).
+  __ resize_frame_absolute(R21_sender_SP, R11_scratch1, R0);
 
   __ blr();
 
@@ -835,8 +829,13 @@
   assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "generated in wrong order");
   __ load_const_optimized(Rscratch1, (StubRoutines::throw_StackOverflowError_entry()), R0);
   __ mtctr(Rscratch1);
-  // Restore caller_sp.
+  // Restore caller_sp (c2i adapter may exist, but no shrinking of interpreted caller frame).
 #ifdef ASSERT
+  Label frame_not_shrunk;
+  __ cmpld(CCR0, R1_SP, R21_sender_SP);
+  __ ble(CCR0, frame_not_shrunk);
+  __ stop("frame shrunk", 0x546);
+  __ bind(frame_not_shrunk);
   __ ld(Rscratch1, 0, R1_SP);
   __ ld(R0, 0, R21_sender_SP);
   __ cmpd(CCR0, R0, Rscratch1);
@@ -1155,15 +1154,6 @@
     }
   }
 
-  // Pop c2i arguments (if any) off when we return.
-#ifdef ASSERT
-  __ ld(R9_ARG7, 0, R1_SP);
-  __ ld(R10_ARG8, 0, R21_sender_SP);
-  __ cmpd(CCR0, R9_ARG7, R10_ARG8);
-  __ asm_assert_eq("backlink", 0x545);
-#endif // ASSERT
-  __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
-
   if (use_instruction) {
     switch (kind) {
       case Interpreter::java_lang_math_sqrt: __ fsqrt(F1_RET, F1);          break;
@@ -1188,6 +1178,8 @@
     __ restore_LR_CR(R0);
   }
 
+  // Restore caller sp for c2i case (from compiled) and for resized sender frame (from interpreted).
+  __ resize_frame_absolute(R21_sender_SP, R11_scratch1, R0);
   __ blr();
 
   __ flush();
@@ -1843,8 +1835,8 @@
     StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
     __ kernel_crc32_singleByte(crc, data, dataLen, table, tmp, true);
 
-    // Restore caller sp for c2i case and return.
-    __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
+    // Restore caller sp for c2i case (from compiled) and for resized sender frame (from interpreted).
+    __ resize_frame_absolute(R21_sender_SP, R11_scratch1, R0);
     __ blr();
 
     // Generate a vanilla native entry as the slow path.
@@ -1931,8 +1923,8 @@
     // code compactness.
     __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, tc3, true);
 
-    // Restore caller sp for c2i case and return.
-    __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
+    // Restore caller sp for c2i case (from compiled) and for resized sender frame (from interpreted).
+    __ resize_frame_absolute(R21_sender_SP, R11_scratch1, R0);
     __ blr();
 
     // Generate a vanilla native entry as the slow path.
@@ -2019,8 +2011,8 @@
     // code compactness.
     __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, tc3, false);
 
-    // Restore caller sp for c2i case and return.
-    __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
+    // Restore caller sp for c2i case (from compiled) and for resized sender frame (from interpreted).
+    __ resize_frame_absolute(R21_sender_SP, R11_scratch1, R0);
     __ blr();
 
     BLOCK_COMMENT("} CRC32C_update{Bytes|DirectByteBuffer}");
--- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -649,7 +649,7 @@
 
     case T_FLOAT: {
       if (dest->is_single_xmm()) {
-        if (LP64_ONLY(UseAVX < 2 &&) c->is_zero_float()) {
+        if (LP64_ONLY(UseAVX <= 2 &&) c->is_zero_float()) {
           __ xorps(dest->as_xmm_float_reg(), dest->as_xmm_float_reg());
         } else {
           __ movflt(dest->as_xmm_float_reg(),
@@ -671,7 +671,7 @@
 
     case T_DOUBLE: {
       if (dest->is_double_xmm()) {
-        if (LP64_ONLY(UseAVX < 2 &&) c->is_zero_double()) {
+        if (LP64_ONLY(UseAVX <= 2 &&) c->is_zero_double()) {
           __ xorpd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg());
         } else {
           __ movdbl(dest->as_xmm_double_reg(),
--- a/src/hotspot/cpu/x86/x86.ad	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/cpu/x86/x86.ad	Tue Jan 15 10:55:26 2019 -0800
@@ -2924,11 +2924,11 @@
   match(Set dst src);
   format %{ "movdqu $dst,$src\t! load vector (16 bytes)" %}
   ins_encode %{
-    if (UseAVX < 2 || VM_Version::supports_avx512vl()) {
-      __ movdqu($dst$$XMMRegister, $src$$XMMRegister);
-    } else {
+    if (UseAVX > 2 && !VM_Version::supports_avx512vl()) {
       int vector_len = 2;
       __ evmovdquq($dst$$XMMRegister, $src$$XMMRegister, vector_len);
+    } else {
+      __ movdqu($dst$$XMMRegister, $src$$XMMRegister);
     }
   %}
   ins_pipe( fpu_reg_reg );
@@ -2939,11 +2939,11 @@
   match(Set dst src);
   format %{ "movdqu $dst,$src\t! load vector (16 bytes)" %}
   ins_encode %{
-    if (UseAVX < 2 || VM_Version::supports_avx512vl()) {
-      __ movdqu($dst$$XMMRegister, $src$$XMMRegister);
-    } else {
+    if (UseAVX > 2 && !VM_Version::supports_avx512vl()) {
       int vector_len = 2;
       __ evmovdquq($dst$$XMMRegister, $src$$XMMRegister, vector_len);
+    } else {
+      __ movdqu($dst$$XMMRegister, $src$$XMMRegister);
     }
   %}
   ins_pipe( fpu_reg_reg );
@@ -2966,11 +2966,11 @@
   match(Set dst src);
   format %{ "vmovdqu $dst,$src\t! load vector (32 bytes)" %}
   ins_encode %{
-    if (UseAVX < 2 || VM_Version::supports_avx512vl()) {
-      __ vmovdqu($dst$$XMMRegister, $src$$XMMRegister);
-    } else {
+    if (UseAVX > 2 && !VM_Version::supports_avx512vl()) {
       int vector_len = 2;
       __ evmovdquq($dst$$XMMRegister, $src$$XMMRegister, vector_len);
+    } else {
+      __ vmovdqu($dst$$XMMRegister, $src$$XMMRegister);
     }
   %}
   ins_pipe( fpu_reg_reg );
@@ -2981,11 +2981,11 @@
   match(Set dst src);
   format %{ "vmovdqu $dst,$src\t! load vector (32 bytes)" %}
   ins_encode %{
-    if (UseAVX < 2 || VM_Version::supports_avx512vl()) {
-      __ vmovdqu($dst$$XMMRegister, $src$$XMMRegister);
-    } else {
+    if (UseAVX > 2 && !VM_Version::supports_avx512vl()) {
       int vector_len = 2;
       __ evmovdquq($dst$$XMMRegister, $src$$XMMRegister, vector_len);
+    } else {
+      __ vmovdqu($dst$$XMMRegister, $src$$XMMRegister);
     }
   %}
   ins_pipe( fpu_reg_reg );
--- a/src/hotspot/cpu/x86/x86_32.ad	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/cpu/x86/x86_32.ad	Tue Jan 15 10:55:26 2019 -0800
@@ -7760,9 +7760,9 @@
   match(Set dst (MulAddS2I (Binary dst src1) (Binary src2 src3)));
   effect(KILL cr, KILL src2);
 
-  expand %{ mulI_rReg(dst, src1, cr);
-           mulI_rReg(src2, src3, cr);
-           addI_rReg(dst, src2, cr); %}
+  expand %{ mulI_eReg(dst, src1, cr);
+           mulI_eReg(src2, src3, cr);
+           addI_eReg(dst, src2, cr); %}
 %}
 
 // Multiply Register Int to Long
--- a/src/hotspot/cpu/x86/x86_64.ad	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/cpu/x86/x86_64.ad	Tue Jan 15 10:55:26 2019 -0800
@@ -4265,132 +4265,196 @@
 
 // Operands for bound floating pointer register arguments
 operand rxmm0() %{
-  constraint(ALLOC_IN_RC(xmm0_reg));  match(VecX);
-  predicate((UseSSE > 0) && (UseAVX<= 2));  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm0_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm1() %{
-  constraint(ALLOC_IN_RC(xmm1_reg));  match(VecX);
-  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm1_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm2() %{
-  constraint(ALLOC_IN_RC(xmm2_reg));  match(VecX);
-  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm2_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm3() %{
-  constraint(ALLOC_IN_RC(xmm3_reg));  match(VecX);
-  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm3_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm4() %{
-  constraint(ALLOC_IN_RC(xmm4_reg));  match(VecX);
-  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm4_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm5() %{
-  constraint(ALLOC_IN_RC(xmm5_reg));  match(VecX);
-  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm5_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm6() %{
-  constraint(ALLOC_IN_RC(xmm6_reg));  match(VecX);
-  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm6_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm7() %{
-  constraint(ALLOC_IN_RC(xmm7_reg));  match(VecX);
-  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm7_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm8() %{
-  constraint(ALLOC_IN_RC(xmm8_reg));  match(VecX);
-  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm8_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm9() %{
-  constraint(ALLOC_IN_RC(xmm9_reg));  match(VecX);
-  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm9_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm10() %{
-  constraint(ALLOC_IN_RC(xmm10_reg));  match(VecX);
-  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm10_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm11() %{
-  constraint(ALLOC_IN_RC(xmm11_reg));  match(VecX);
-  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm11_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm12() %{
-  constraint(ALLOC_IN_RC(xmm12_reg));  match(VecX);
-  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm12_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm13() %{
-  constraint(ALLOC_IN_RC(xmm13_reg));  match(VecX);
-  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm13_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm14() %{
-  constraint(ALLOC_IN_RC(xmm14_reg));  match(VecX);
-  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm14_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm15() %{
-  constraint(ALLOC_IN_RC(xmm15_reg));  match(VecX);
-  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm15_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm16() %{
-  constraint(ALLOC_IN_RC(xmm16_reg));  match(VecX);
-  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm16_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm17() %{
-  constraint(ALLOC_IN_RC(xmm17_reg));  match(VecX);
-  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm17_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm18() %{
-  constraint(ALLOC_IN_RC(xmm18_reg));  match(VecX);
-  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm18_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm19() %{
-  constraint(ALLOC_IN_RC(xmm19_reg));  match(VecX);
-  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm19_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm20() %{
-  constraint(ALLOC_IN_RC(xmm20_reg));  match(VecX);
-  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm20_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm21() %{
-  constraint(ALLOC_IN_RC(xmm21_reg));  match(VecX);
-  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm21_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm22() %{
-  constraint(ALLOC_IN_RC(xmm22_reg));  match(VecX);
-  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm22_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm23() %{
-  constraint(ALLOC_IN_RC(xmm23_reg));  match(VecX);
-  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm23_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm24() %{
-  constraint(ALLOC_IN_RC(xmm24_reg));  match(VecX);
-  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm24_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm25() %{
-  constraint(ALLOC_IN_RC(xmm25_reg));  match(VecX);
-  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm25_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm26() %{
-  constraint(ALLOC_IN_RC(xmm26_reg));  match(VecX);
-  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm26_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm27() %{
-  constraint(ALLOC_IN_RC(xmm27_reg));  match(VecX);
-  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm27_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm28() %{
-  constraint(ALLOC_IN_RC(xmm28_reg));  match(VecX);
-  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm28_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm29() %{
-  constraint(ALLOC_IN_RC(xmm29_reg));  match(VecX);
-  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm29_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm30() %{
-  constraint(ALLOC_IN_RC(xmm30_reg));  match(VecX);
-  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm30_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 operand rxmm31() %{
-  constraint(ALLOC_IN_RC(xmm31_reg));  match(VecX);
-  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+  constraint(ALLOC_IN_RC(xmm31_reg));
+  match(VecX);
+  format%{%}
+  interface(REG_INTER);
 %}
 
 //----------OPERAND CLASSES----------------------------------------------------
@@ -12651,33 +12715,6 @@
 // Execute ZGC load barrier (strong) slow path
 //
 
-// When running without XMM regs
-instruct loadBarrierSlowRegNoVec(rRegP dst, memory mem, rFlagsReg cr) %{
-
-  match(Set dst (LoadBarrierSlowReg mem));
-  predicate(MaxVectorSize < 16);
-
-  effect(DEF dst, KILL cr);
-
-  format %{"LoadBarrierSlowRegNoVec $dst, $mem" %}
-  ins_encode %{
-#if INCLUDE_ZGC
-    Register d = $dst$$Register;
-    ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
-
-    assert(d != r12, "Can't be R12!");
-    assert(d != r15, "Can't be R15!");
-    assert(d != rsp, "Can't be RSP!");
-
-    __ lea(d, $mem$$Address);
-    __ call(RuntimeAddress(bs->load_barrier_slow_stub(d)));
-#else
-    ShouldNotReachHere();
-#endif
-  %}
-  ins_pipe(pipe_slow);
-%}
-
 // For XMM and YMM enabled processors
 instruct loadBarrierSlowRegXmmAndYmm(rRegP dst, memory mem, rFlagsReg cr,
                                      rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
@@ -12686,7 +12723,7 @@
                                      rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
 
   match(Set dst (LoadBarrierSlowReg mem));
-  predicate((UseSSE > 0) && (UseAVX <= 2) && (MaxVectorSize >= 16));
+  predicate(UseAVX <= 2);
 
   effect(DEF dst, KILL cr,
          KILL x0, KILL x1, KILL x2, KILL x3,
@@ -12694,7 +12731,7 @@
          KILL x8, KILL x9, KILL x10, KILL x11,
          KILL x12, KILL x13, KILL x14, KILL x15);
 
-  format %{"LoadBarrierSlowRegXmm $dst, $mem" %}
+  format %{"LoadBarrierSlowRegXmmAndYmm $dst, $mem" %}
   ins_encode %{
 #if INCLUDE_ZGC
     Register d = $dst$$Register;
@@ -12725,7 +12762,7 @@
                                rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
 
   match(Set dst (LoadBarrierSlowReg mem));
-  predicate((UseAVX == 3) && (MaxVectorSize >= 16));
+  predicate(UseAVX == 3);
 
   effect(DEF dst, KILL cr,
          KILL x0, KILL x1, KILL x2, KILL x3,
@@ -12760,33 +12797,6 @@
 // Execute ZGC load barrier (weak) slow path
 //
 
-// When running without XMM regs
-instruct loadBarrierWeakSlowRegNoVec(rRegP dst, memory mem, rFlagsReg cr) %{
-
-  match(Set dst (LoadBarrierSlowReg mem));
-  predicate(MaxVectorSize < 16);
-
-  effect(DEF dst, KILL cr);
-
-  format %{"LoadBarrierSlowRegNoVec $dst, $mem" %}
-  ins_encode %{
-#if INCLUDE_ZGC
-    Register d = $dst$$Register;
-    ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
-
-    assert(d != r12, "Can't be R12!");
-    assert(d != r15, "Can't be R15!");
-    assert(d != rsp, "Can't be RSP!");
-
-    __ lea(d, $mem$$Address);
-    __ call(RuntimeAddress(bs->load_barrier_weak_slow_stub(d)));
-#else
-    ShouldNotReachHere();
-#endif
-  %}
-  ins_pipe(pipe_slow);
-%}
-
 // For XMM and YMM enabled processors
 instruct loadBarrierWeakSlowRegXmmAndYmm(rRegP dst, memory mem, rFlagsReg cr,
                                          rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
@@ -12795,7 +12805,7 @@
                                          rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
 
   match(Set dst (LoadBarrierWeakSlowReg mem));
-  predicate((UseSSE > 0) && (UseAVX <= 2) && (MaxVectorSize >= 16));
+  predicate(UseAVX <= 2);
 
   effect(DEF dst, KILL cr,
          KILL x0, KILL x1, KILL x2, KILL x3,
@@ -12803,7 +12813,7 @@
          KILL x8, KILL x9, KILL x10, KILL x11,
          KILL x12, KILL x13, KILL x14, KILL x15);
 
-  format %{"LoadBarrierWeakSlowRegXmm $dst, $mem" %}
+  format %{"LoadBarrierWeakSlowRegXmmAndYmm $dst, $mem" %}
   ins_encode %{
 #if INCLUDE_ZGC
     Register d = $dst$$Register;
@@ -12834,7 +12844,7 @@
                                    rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
 
   match(Set dst (LoadBarrierWeakSlowReg mem));
-  predicate((UseAVX == 3) && (MaxVectorSize >= 16));
+  predicate(UseAVX == 3);
 
   effect(DEF dst, KILL cr,
          KILL x0, KILL x1, KILL x2, KILL x3,
--- a/src/hotspot/os/linux/os_linux.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/os/linux/os_linux.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -5073,7 +5073,7 @@
   // initialize thread priority policy
   prio_init();
 
-  if (!FLAG_IS_DEFAULT(AllocateHeapAt)) {
+  if (!FLAG_IS_DEFAULT(AllocateHeapAt) || !FLAG_IS_DEFAULT(AllocateOldGenAt)) {
     set_coredump_filter(DAX_SHARED_BIT);
   }
 
--- a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -370,7 +370,6 @@
       if (thread->on_local_stack(addr)) {
         // stack overflow
         if (thread->in_stack_yellow_reserved_zone(addr)) {
-          thread->disable_stack_yellow_reserved_zone();
           if (thread->thread_state() == _thread_in_Java) {
             if (thread->in_stack_reserved_zone(addr)) {
               frame fr;
@@ -392,9 +391,11 @@
             }
             // Throw a stack overflow exception.  Guard pages will be reenabled
             // while unwinding the stack.
+            thread->disable_stack_yellow_reserved_zone();
             stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
           } else {
             // Thread was in the vm or native code.  Return and try to finish.
+            thread->disable_stack_yellow_reserved_zone();
             return 1;
           }
         } else if (thread->in_stack_red_zone(addr)) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zArguments_linux_x86.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zArguments.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/globals_extension.hpp"
+#include "utilities/debug.hpp"
+
+void ZArguments::initialize_platform() {
+#ifdef COMPILER2
+  // The C2 barrier slow path expects vector registers to be least
+  // 16 bytes wide, which is the minimum width available on all
+  // x86-64 systems. However, the user could have speficied a lower
+  // number on the command-line, in which case we print a warning
+  // and raise it to 16.
+  if (MaxVectorSize < 16) {
+    warning("ZGC requires MaxVectorSize to be at least 16");
+    FLAG_SET_DEFAULT(MaxVectorSize, 16);
+  }
+#endif
+}
--- a/src/hotspot/share/ci/ciEnv.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/ci/ciEnv.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -46,6 +46,7 @@
 
   friend class CompileBroker;
   friend class Dependencies;  // for get_object, during logging
+  friend class PrepareExtraDataClosure;
 
 private:
   Arena*           _arena;       // Alias for _ciEnv_arena except in init_shared_objects()
@@ -188,6 +189,10 @@
     }
   }
 
+  ciMetadata* cached_metadata(Metadata* o) {
+    return _factory->cached_metadata(o);
+  }
+
   ciInstance* get_instance(oop o) {
     if (o == NULL) return NULL;
     return get_object(o)->as_instance();
--- a/src/hotspot/share/ci/ciMethodData.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/ci/ciMethodData.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -78,10 +78,81 @@
   _parameters = NULL;
 }
 
+// Check for entries that reference an unloaded method
+class PrepareExtraDataClosure : public CleanExtraDataClosure {
+  MethodData*            _mdo;
+  uint64_t               _safepoint_counter;
+  GrowableArray<Method*> _uncached_methods;
+
+public:
+  PrepareExtraDataClosure(MethodData* mdo)
+    : _mdo(mdo),
+      _safepoint_counter(SafepointSynchronize::safepoint_counter()),
+      _uncached_methods()
+  { }
+
+  bool is_live(Method* m) {
+    if (!m->method_holder()->is_loader_alive()) {
+      return false;
+    }
+    if (CURRENT_ENV->cached_metadata(m) == NULL) {
+      // Uncached entries need to be pre-populated.
+      _uncached_methods.append(m);
+    }
+    return true;
+  }
+
+  bool has_safepointed() {
+    return SafepointSynchronize::safepoint_counter() != _safepoint_counter;
+  }
+
+  bool finish() {
+    if (_uncached_methods.length() == 0) {
+      // Preparation finished iff all Methods* were already cached.
+      return true;
+    }
+    // Holding locks through safepoints is bad practice.
+    MutexUnlocker mu(_mdo->extra_data_lock());
+    for (int i = 0; i < _uncached_methods.length(); ++i) {
+      if (has_safepointed()) {
+        // The metadata in the growable array might contain stale
+        // entries after a safepoint.
+        return false;
+      }
+      Method* method = _uncached_methods.at(i);
+      // Populating ciEnv caches may cause safepoints due
+      // to taking the Compile_lock with safepoint checks.
+      (void)CURRENT_ENV->get_method(method);
+    }
+    return false;
+  }
+};
+
+void ciMethodData::prepare_metadata() {
+  MethodData* mdo = get_MethodData();
+
+  for (;;) {
+    ResourceMark rm;
+    PrepareExtraDataClosure cl(mdo);
+    mdo->clean_extra_data(&cl);
+    if (cl.finish()) {
+      // When encountering uncached metadata, the Compile_lock might be
+      // acquired when creating ciMetadata handles, causing safepoints
+      // which requires a new round of preparation to clean out potentially
+      // new unloading metadata.
+      return;
+    }
+  }
+}
+
 void ciMethodData::load_extra_data() {
   MethodData* mdo = get_MethodData();
-
   MutexLocker ml(mdo->extra_data_lock());
+  // Deferred metadata cleaning due to concurrent class unloading.
+  prepare_metadata();
+  // After metadata preparation, there is no stale metadata,
+  // and no safepoints can introduce more stale metadata.
+  NoSafepointVerifier no_safepoint;
 
   // speculative trap entries also hold a pointer to a Method so need to be translated
   DataLayout* dp_src  = mdo->extra_data_base();
@@ -94,22 +165,21 @@
     // New traps in the MDO may have been added since we copied the
     // data (concurrent deoptimizations before we acquired
     // extra_data_lock above) or can be removed (a safepoint may occur
-    // in the translate_from call below) as we translate the copy:
+    // in the prepare_metadata call above) as we translate the copy:
     // update the copy as we go.
     int tag = dp_src->tag();
-    if (tag != DataLayout::arg_info_data_tag) {
-      memcpy(dp_dst, dp_src, ((intptr_t)MethodData::next_extra(dp_src)) - ((intptr_t)dp_src));
+    size_t entry_size = DataLayout::header_size_in_bytes();
+    if (tag != DataLayout::no_tag) {
+      ProfileData* src_data = dp_src->data_in();
+      entry_size = src_data->size_in_bytes();
     }
+    memcpy(dp_dst, dp_src, entry_size);
 
     switch(tag) {
     case DataLayout::speculative_trap_data_tag: {
       ciSpeculativeTrapData data_dst(dp_dst);
       SpeculativeTrapData   data_src(dp_src);
-
-      { // During translation a safepoint can happen or VM lock can be taken (e.g., Compile_lock).
-        MutexUnlocker ml(mdo->extra_data_lock());
-        data_dst.translate_from(&data_src);
-      }
+      data_dst.translate_from(&data_src);
       break;
     }
     case DataLayout::bit_data_tag:
--- a/src/hotspot/share/ci/ciMethodData.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/ci/ciMethodData.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -475,6 +475,7 @@
     return (address) _data;
   }
 
+  void prepare_metadata();
   void load_extra_data();
   ciProfileData* bci_to_extra_data(int bci, ciMethod* m, bool& two_free_slots);
 
--- a/src/hotspot/share/ci/ciObjectFactory.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/ci/ciObjectFactory.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -266,6 +266,24 @@
 }
 
 // ------------------------------------------------------------------
+// ciObjectFactory::cached_metadata
+//
+// Get the ciMetadata corresponding to some Metadata. If the ciMetadata has
+// already been created, it is returned. Otherwise, null is returned.
+ciMetadata* ciObjectFactory::cached_metadata(Metadata* key) {
+  ASSERT_IN_VM;
+
+  bool found = false;
+  int index = _ci_metadata->find_sorted<Metadata*, ciObjectFactory::metadata_compare>(key, found);
+
+  if (!found) {
+    return NULL;
+  }
+  return _ci_metadata->at(index)->as_metadata();
+}
+
+
+// ------------------------------------------------------------------
 // ciObjectFactory::get_metadata
 //
 // Get the ciMetadata corresponding to some Metadata. If the ciMetadata has
--- a/src/hotspot/share/ci/ciObjectFactory.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/ci/ciObjectFactory.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -100,6 +100,7 @@
   // Get the ciObject corresponding to some oop.
   ciObject* get(oop key);
   ciMetadata* get_metadata(Metadata* key);
+  ciMetadata* cached_metadata(Metadata* key);
   ciSymbol* get_symbol(Symbol* key);
 
   // Get the ciSymbol corresponding to one of the vmSymbols.
--- a/src/hotspot/share/classfile/dictionary.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/classfile/dictionary.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -170,7 +170,7 @@
     for (ProtectionDomainEntry* current = pd_set(); // accessed at a safepoint
                                 current != NULL;
                                 current = current->_next) {
-      guarantee(oopDesc::is_oop(current->_pd_cache->object_no_keepalive()), "Invalid oop");
+      guarantee(oopDesc::is_oop_or_null(current->_pd_cache->object_no_keepalive()), "Invalid oop");
     }
   }
 
--- a/src/hotspot/share/classfile/systemDictionary.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/classfile/systemDictionary.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1795,14 +1795,17 @@
   assert(k != NULL, "just checking");
   assert_locked_or_safepoint(Compile_lock);
 
-  // Link into hierachy. Make sure the vtables are initialized before linking into
+  k->set_init_state(InstanceKlass::loaded);
+  // make sure init_state store is already done.
+  // The compiler reads the hierarchy outside of the Compile_lock.
+  // Access ordering is used to add to hierarchy.
+
+  // Link into hierachy.
   k->append_to_sibling_list();                    // add to superklass/sibling list
   k->process_interfaces(THREAD);                  // handle all "implements" declarations
-  k->set_init_state(InstanceKlass::loaded);
+
   // Now flush all code that depended on old class hierarchy.
   // Note: must be done *after* linking k into the hierarchy (was bug 12/9/97)
-  // Also, first reinitialize vtable because it may have gotten out of synch
-  // while the new class wasn't connected to the class hierarchy.
   CodeCache::flush_dependents_on(k);
 }
 
--- a/src/hotspot/share/classfile/systemDictionaryShared.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -503,26 +503,33 @@
 Handle SystemDictionaryShared::get_shared_protection_domain(Handle class_loader,
                                                             ModuleEntry* mod, TRAPS) {
   ClassLoaderData *loader_data = mod->loader_data();
-  Handle protection_domain;
   if (mod->shared_protection_domain() == NULL) {
     Symbol* location = mod->location();
     if (location != NULL) {
-      Handle url_string = java_lang_String::create_from_symbol(
-                                 location, CHECK_(protection_domain));
+      Handle location_string = java_lang_String::create_from_symbol(
+                                     location, CHECK_NH);
+      Handle url;
       JavaValue result(T_OBJECT);
-      Klass* classLoaders_klass =
-        SystemDictionary::jdk_internal_loader_ClassLoaders_klass();
-      JavaCalls::call_static(&result, classLoaders_klass, vmSymbols::toFileURL_name(),
+      if (location->starts_with("jrt:/")) {
+        url = JavaCalls::construct_new_instance(SystemDictionary::URL_klass(),
+                                                vmSymbols::string_void_signature(),
+                                                location_string, CHECK_NH);
+      } else {
+        Klass* classLoaders_klass =
+          SystemDictionary::jdk_internal_loader_ClassLoaders_klass();
+        JavaCalls::call_static(&result, classLoaders_klass, vmSymbols::toFileURL_name(),
                                vmSymbols::toFileURL_signature(),
-                               url_string, CHECK_(protection_domain));
-      Handle url = Handle(THREAD, (oop)result.get_jobject());
+                               location_string, CHECK_NH);
+        url = Handle(THREAD, (oop)result.get_jobject());
+      }
 
-      Handle pd = get_protection_domain_from_classloader(class_loader, url, THREAD);
+      Handle pd = get_protection_domain_from_classloader(class_loader, url,
+                                                         CHECK_NH);
       mod->set_shared_protection_domain(loader_data, pd);
     }
   }
 
-  protection_domain = Handle(THREAD, mod->shared_protection_domain());
+  Handle protection_domain(THREAD, mod->shared_protection_domain());
   assert(protection_domain.not_null(), "sanity");
   return protection_domain;
 }
--- a/src/hotspot/share/code/icBuffer.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/code/icBuffer.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -53,29 +53,29 @@
     _refill_remembered(false)
 {
   Thread* thread = Thread::current();
-  assert(thread->missed_ic_stub_refill_mark() == NULL, "nesting not supported");
-  thread->set_missed_ic_stub_refill_mark(this);
+  assert(thread->missed_ic_stub_refill_verifier() == NULL, "nesting not supported");
+  thread->set_missed_ic_stub_refill_verifier(this);
 }
 
 ICRefillVerifier::~ICRefillVerifier() {
   assert(!_refill_requested || _refill_remembered,
          "Forgot to refill IC stubs after failed IC transition");
-  Thread::current()->set_missed_ic_stub_refill_mark(NULL);
+  Thread::current()->set_missed_ic_stub_refill_verifier(NULL);
 }
 
 ICRefillVerifierMark::ICRefillVerifierMark(ICRefillVerifier* verifier) {
   Thread* thread = Thread::current();
-  assert(thread->missed_ic_stub_refill_mark() == NULL, "nesting not supported");
-  thread->set_missed_ic_stub_refill_mark(this);
+  assert(thread->missed_ic_stub_refill_verifier() == NULL, "nesting not supported");
+  thread->set_missed_ic_stub_refill_verifier(verifier);
 }
 
 ICRefillVerifierMark::~ICRefillVerifierMark() {
-  Thread::current()->set_missed_ic_stub_refill_mark(NULL);
+  Thread::current()->set_missed_ic_stub_refill_verifier(NULL);
 }
 
 static ICRefillVerifier* current_ic_refill_verifier() {
   Thread* current = Thread::current();
-  ICRefillVerifier* verifier = reinterpret_cast<ICRefillVerifier*>(current->missed_ic_stub_refill_mark());
+  ICRefillVerifier* verifier = current->missed_ic_stub_refill_verifier();
   assert(verifier != NULL, "need a verifier for safety");
   return verifier;
 }
--- a/src/hotspot/share/code/nmethod.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/code/nmethod.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -1159,6 +1159,19 @@
   }
 }
 
+void nmethod::unlink_from_method(bool acquire_lock) {
+  // We need to check if both the _code and _from_compiled_code_entry_point
+  // refer to this nmethod because there is a race in setting these two fields
+  // in Method* as seen in bugid 4947125.
+  // If the vep() points to the zombie nmethod, the memory for the nmethod
+  // could be flushed and the compiler and vtable stubs could still call
+  // through it.
+  if (method() != NULL && (method()->code() == this ||
+                           method()->from_compiled_entry() == verified_entry_point())) {
+    method()->clear_code(acquire_lock);
+  }
+}
+
 /**
  * Common functionality for both make_not_entrant and make_zombie
  */
@@ -1246,17 +1259,7 @@
     JVMCI_ONLY(maybe_invalidate_installed_code());
 
     // Remove nmethod from method.
-    // We need to check if both the _code and _from_compiled_code_entry_point
-    // refer to this nmethod because there is a race in setting these two fields
-    // in Method* as seen in bugid 4947125.
-    // If the vep() points to the zombie nmethod, the memory for the nmethod
-    // could be flushed and the compiler and vtable stubs could still call
-    // through it.
-    if (method() != NULL && (method()->code() == this ||
-                             method()->from_compiled_entry() == verified_entry_point())) {
-      HandleMark hm;
-      method()->clear_code(false /* already owns Patching_lock */);
-    }
+    unlink_from_method(false /* already owns Patching_lock */);
   } // leave critical region under Patching_lock
 
 #ifdef ASSERT
@@ -1283,6 +1286,13 @@
       flush_dependencies(/*delete_immediately*/true);
     }
 
+    // Clear ICStubs to prevent back patching stubs of zombie or flushed
+    // nmethods during the next safepoint (see ICStub::finalize).
+    {
+      CompiledICLocker ml(this);
+      clear_ic_stubs();
+    }
+
     // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
     // event and it hasn't already been reported for this nmethod then
     // report it now. The event may have been reported earlier if the GC
@@ -2533,6 +2543,7 @@
         case relocInfo::section_word_type:     return "section_word";
         case relocInfo::poll_type:             return "poll";
         case relocInfo::poll_return_type:      return "poll_return";
+        case relocInfo::trampoline_stub_type:  return "trampoline_stub";
         case relocInfo::type_mask:             return "type_bit_mask";
 
         default:
--- a/src/hotspot/share/code/nmethod.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/code/nmethod.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -376,6 +376,8 @@
 
   int   comp_level() const                        { return _comp_level; }
 
+  void unlink_from_method(bool acquire_lock);
+
   // Support for oops in scopes and relocs:
   // Note: index 0 is reserved for null.
   oop   oop_at(int index) const;
--- a/src/hotspot/share/code/scopeDesc.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/code/scopeDesc.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -51,9 +51,9 @@
 }
 
 
-ScopeDesc::ScopeDesc(const ScopeDesc* parent) {
+void ScopeDesc::initialize(const ScopeDesc* parent, int decode_offset) {
   _code          = parent->_code;
-  _decode_offset = parent->_sender_decode_offset;
+  _decode_offset = decode_offset;
   _objects       = parent->_objects;
   _reexecute     = false; //reexecute only applies to the first scope
   _rethrow_exception = false;
@@ -61,6 +61,14 @@
   decode_body();
 }
 
+ScopeDesc::ScopeDesc(const ScopeDesc* parent) {
+  initialize(parent, parent->_sender_decode_offset);
+}
+
+ScopeDesc::ScopeDesc(const ScopeDesc* parent, int decode_offset) {
+  initialize(parent, decode_offset);
+}
+
 
 void ScopeDesc::decode_body() {
   if (decode_offset() == DebugInformationRecorder::serialized_null) {
--- a/src/hotspot/share/code/scopeDesc.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/code/scopeDesc.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -67,6 +67,9 @@
   // avoid a .hpp-.hpp dependency.)
   ScopeDesc(const CompiledMethod* code, int decode_offset, bool reexecute, bool rethrow_exception, bool return_oop);
 
+  // Direct access to scope
+  ScopeDesc* at_offset(int decode_offset) { return new ScopeDesc(this, decode_offset); }
+
   // JVM state
   Method* method()      const { return _method; }
   int          bci()      const { return _bci;    }
@@ -85,12 +88,16 @@
   // Returns where the scope was decoded
   int decode_offset() const { return _decode_offset; }
 
+  int sender_decode_offset() const { return _sender_decode_offset; }
+
   // Tells whether sender() returns NULL
   bool is_top() const;
 
  private:
-  // Alternative constructor
+  void initialize(const ScopeDesc* parent, int decode_offset);
+  // Alternative constructors
   ScopeDesc(const ScopeDesc* parent);
+  ScopeDesc(const ScopeDesc* parent, int decode_offset);
 
   // JVM state
   Method*       _method;
--- a/src/hotspot/share/gc/g1/g1Allocator.inline.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/g1/g1Allocator.inline.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -97,7 +97,7 @@
   }
 
   _archive_check_enabled = true;
-  size_t length = Universe::heap()->max_capacity();
+  size_t length = G1CollectedHeap::heap()->max_reserved_capacity();
   _closed_archive_region_map.initialize((HeapWord*)Universe::heap()->base(),
                                         (HeapWord*)Universe::heap()->base() + length,
                                         HeapRegion::GrainBytes);
--- a/src/hotspot/share/gc/g1/g1Arguments.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/g1/g1Arguments.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -28,6 +28,7 @@
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1HeapVerifier.hpp"
+#include "gc/g1/g1HeterogeneousCollectorPolicy.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "gc/shared/gcArguments.inline.hpp"
 #include "gc/shared/workerPolicy.hpp"
@@ -156,5 +157,9 @@
 }
 
 CollectedHeap* G1Arguments::create_heap() {
-  return create_heap_with_policy<G1CollectedHeap, G1CollectorPolicy>();
+  if (AllocateOldGenAt != NULL) {
+    return create_heap_with_policy<G1CollectedHeap, G1HeterogeneousCollectorPolicy>();
+  } else {
+    return create_heap_with_policy<G1CollectedHeap, G1CollectorPolicy>();
+  }
 }
--- a/src/hotspot/share/gc/g1/g1CardCounts.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/g1/g1CardCounts.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -63,7 +63,7 @@
 }
 
 void G1CardCounts::initialize(G1RegionToSpaceMapper* mapper) {
-  assert(_g1h->max_capacity() > 0, "initialization order");
+  assert(_g1h->max_reserved_capacity() > 0, "initialization order");
   assert(_g1h->capacity() == 0, "initialization order");
 
   if (G1ConcRSHotCardLimit > 0) {
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -161,12 +161,12 @@
 
 // Private methods.
 
-HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
+HeapRegion* G1CollectedHeap::new_region(size_t word_size, HeapRegionType type, bool do_expand) {
   assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
          "the only time we use this to allocate a humongous region is "
          "when we are allocating a single humongous region");
 
-  HeapRegion* res = _hrm.allocate_free_region(is_old);
+  HeapRegion* res = _hrm->allocate_free_region(type);
 
   if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
     // Currently, only attempts to allocate GC alloc regions set
@@ -183,7 +183,7 @@
       // always expand the heap by an amount aligned to the heap
       // region size, the free list should in theory not be empty.
       // In either case allocate_free_region() will check for NULL.
-      res = _hrm.allocate_free_region(is_old);
+      res = _hrm->allocate_free_region(type);
     } else {
       _expand_heap_after_alloc_failure = false;
     }
@@ -330,16 +330,16 @@
     // Only one region to allocate, try to use a fast path by directly allocating
     // from the free lists. Do not try to expand here, we will potentially do that
     // later.
-    HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
+    HeapRegion* hr = new_region(word_size, HeapRegionType::Humongous, false /* do_expand */);
     if (hr != NULL) {
       first = hr->hrm_index();
     }
   } else {
     // Policy: Try only empty regions (i.e. already committed first). Maybe we
     // are lucky enough to find some.
-    first = _hrm.find_contiguous_only_empty(obj_regions);
+    first = _hrm->find_contiguous_only_empty(obj_regions);
     if (first != G1_NO_HRM_INDEX) {
-      _hrm.allocate_free_regions_starting_at(first, obj_regions);
+      _hrm->allocate_free_regions_starting_at(first, obj_regions);
     }
   }
 
@@ -347,14 +347,14 @@
     // Policy: We could not find enough regions for the humongous object in the
     // free list. Look through the heap to find a mix of free and uncommitted regions.
     // If so, try expansion.
-    first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
+    first = _hrm->find_contiguous_empty_or_unavailable(obj_regions);
     if (first != G1_NO_HRM_INDEX) {
       // We found something. Make sure these regions are committed, i.e. expand
       // the heap. Alternatively we could do a defragmentation GC.
       log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B",
                                     word_size * HeapWordSize);
 
-      _hrm.expand_at(first, obj_regions, workers());
+      _hrm->expand_at(first, obj_regions, workers());
       g1_policy()->record_new_heap_size(num_regions());
 
 #ifdef ASSERT
@@ -365,7 +365,7 @@
         assert(is_on_master_free_list(hr), "sanity");
       }
 #endif
-      _hrm.allocate_free_regions_starting_at(first, obj_regions);
+      _hrm->allocate_free_regions_starting_at(first, obj_regions);
     } else {
       // Policy: Potentially trigger a defragmentation GC.
     }
@@ -554,7 +554,7 @@
 bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) {
   assert(ranges != NULL, "MemRegion array NULL");
   assert(count != 0, "No MemRegions provided");
-  MemRegion reserved = _hrm.reserved();
+  MemRegion reserved = _hrm->reserved();
   for (size_t i = 0; i < count; i++) {
     if (!reserved.contains(ranges[i].start()) || !reserved.contains(ranges[i].last())) {
       return false;
@@ -571,7 +571,7 @@
   assert(count != 0, "No MemRegions provided");
   MutexLockerEx x(Heap_lock);
 
-  MemRegion reserved = _hrm.reserved();
+  MemRegion reserved = _hrm->reserved();
   HeapWord* prev_last_addr = NULL;
   HeapRegion* prev_last_region = NULL;
 
@@ -605,7 +605,7 @@
     // range ended, and adjust the start address so we don't try to allocate
     // the same region again. If the current range is entirely within that
     // region, skip it, just adjusting the recorded top.
-    HeapRegion* start_region = _hrm.addr_to_region(start_address);
+    HeapRegion* start_region = _hrm->addr_to_region(start_address);
     if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
       start_address = start_region->end();
       if (start_address > last_address) {
@@ -615,12 +615,12 @@
       }
       start_region->set_top(start_address);
       curr_range = MemRegion(start_address, last_address + 1);
-      start_region = _hrm.addr_to_region(start_address);
+      start_region = _hrm->addr_to_region(start_address);
     }
 
     // Perform the actual region allocation, exiting if it fails.
     // Then note how much new space we have allocated.
-    if (!_hrm.allocate_containing_regions(curr_range, &commits, workers())) {
+    if (!_hrm->allocate_containing_regions(curr_range, &commits, workers())) {
       return false;
     }
     increase_used(word_size * HeapWordSize);
@@ -632,8 +632,8 @@
 
     // Mark each G1 region touched by the range as archive, add it to
     // the old set, and set top.
-    HeapRegion* curr_region = _hrm.addr_to_region(start_address);
-    HeapRegion* last_region = _hrm.addr_to_region(last_address);
+    HeapRegion* curr_region = _hrm->addr_to_region(start_address);
+    HeapRegion* last_region = _hrm->addr_to_region(last_address);
     prev_last_region = last_region;
 
     while (curr_region != NULL) {
@@ -650,7 +650,7 @@
       HeapRegion* next_region;
       if (curr_region != last_region) {
         top = curr_region->end();
-        next_region = _hrm.next_region_in_heap(curr_region);
+        next_region = _hrm->next_region_in_heap(curr_region);
       } else {
         top = last_address + 1;
         next_region = NULL;
@@ -671,7 +671,7 @@
   assert(!is_init_completed(), "Expect to be called at JVM init time");
   assert(ranges != NULL, "MemRegion array NULL");
   assert(count != 0, "No MemRegions provided");
-  MemRegion reserved = _hrm.reserved();
+  MemRegion reserved = _hrm->reserved();
   HeapWord *prev_last_addr = NULL;
   HeapRegion* prev_last_region = NULL;
 
@@ -691,8 +691,8 @@
            "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
            p2i(start_address), p2i(prev_last_addr));
 
-    HeapRegion* start_region = _hrm.addr_to_region(start_address);
-    HeapRegion* last_region = _hrm.addr_to_region(last_address);
+    HeapRegion* start_region = _hrm->addr_to_region(start_address);
+    HeapRegion* last_region = _hrm->addr_to_region(last_address);
     HeapWord* bottom_address = start_region->bottom();
 
     // Check for a range beginning in the same region in which the
@@ -708,7 +708,7 @@
       guarantee(curr_region->is_archive(),
                 "Expected archive region at index %u", curr_region->hrm_index());
       if (curr_region != last_region) {
-        curr_region = _hrm.next_region_in_heap(curr_region);
+        curr_region = _hrm->next_region_in_heap(curr_region);
       } else {
         curr_region = NULL;
       }
@@ -757,7 +757,7 @@
   assert(!is_init_completed(), "Expect to be called at JVM init time");
   assert(ranges != NULL, "MemRegion array NULL");
   assert(count != 0, "No MemRegions provided");
-  MemRegion reserved = _hrm.reserved();
+  MemRegion reserved = _hrm->reserved();
   HeapWord* prev_last_addr = NULL;
   HeapRegion* prev_last_region = NULL;
   size_t size_used = 0;
@@ -779,8 +779,8 @@
     size_used += ranges[i].byte_size();
     prev_last_addr = last_address;
 
-    HeapRegion* start_region = _hrm.addr_to_region(start_address);
-    HeapRegion* last_region = _hrm.addr_to_region(last_address);
+    HeapRegion* start_region = _hrm->addr_to_region(start_address);
+    HeapRegion* last_region = _hrm->addr_to_region(last_address);
 
     // Check for ranges that start in the same G1 region in which the previous
     // range ended, and adjust the start address so we don't try to free
@@ -791,7 +791,7 @@
       if (start_address > last_address) {
         continue;
       }
-      start_region = _hrm.addr_to_region(start_address);
+      start_region = _hrm->addr_to_region(start_address);
     }
     prev_last_region = last_region;
 
@@ -806,11 +806,11 @@
       curr_region->set_free();
       curr_region->set_top(curr_region->bottom());
       if (curr_region != last_region) {
-        curr_region = _hrm.next_region_in_heap(curr_region);
+        curr_region = _hrm->next_region_in_heap(curr_region);
       } else {
         curr_region = NULL;
       }
-      _hrm.shrink_at(curr_index, 1);
+      _hrm->shrink_at(curr_index, 1);
       uncommitted_regions++;
     }
 
@@ -1024,6 +1024,8 @@
   abandon_collection_set(collection_set());
 
   tear_down_region_sets(false /* free_list_only */);
+
+  hrm()->prepare_for_full_collection_start();
 }
 
 void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
@@ -1035,6 +1037,8 @@
 }
 
 void G1CollectedHeap::prepare_heap_for_mutators() {
+  hrm()->prepare_for_full_collection_end();
+
   // Delete metaspaces for unloaded class loaders and clean up loader_data graph
   ClassLoaderDataGraph::purge();
   MetaspaceUtils::verify_metrics();
@@ -1071,7 +1075,7 @@
 }
 
 void G1CollectedHeap::verify_after_full_collection() {
-  _hrm.verify_optional();
+  _hrm->verify_optional();
   _verifier->verify_region_sets_optional();
   _verifier->verify_after_gc(G1HeapVerifier::G1VerifyFull);
   // Clear the previous marking bitmap, if needed for bitmap verification.
@@ -1325,7 +1329,7 @@
 
 
   if (expand(expand_bytes, _workers)) {
-    _hrm.verify_optional();
+    _hrm->verify_optional();
     _verifier->verify_region_sets_optional();
     return attempt_allocation_at_safepoint(word_size,
                                            false /* expect_null_mutator_alloc_region */);
@@ -1350,7 +1354,7 @@
   uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
   assert(regions_to_expand > 0, "Must expand by at least one region");
 
-  uint expanded_by = _hrm.expand_by(regions_to_expand, pretouch_workers);
+  uint expanded_by = _hrm->expand_by(regions_to_expand, pretouch_workers);
   if (expand_time_ms != NULL) {
     *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
   }
@@ -1365,7 +1369,7 @@
     // The expansion of the virtual storage space was unsuccessful.
     // Let's see if it was because we ran out of swap.
     if (G1ExitOnExpansionFailure &&
-        _hrm.available() >= regions_to_expand) {
+        _hrm->available() >= regions_to_expand) {
       // We had head room...
       vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
     }
@@ -1380,7 +1384,7 @@
                                          HeapRegion::GrainBytes);
   uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
 
-  uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
+  uint num_regions_removed = _hrm->shrink_by(num_regions_to_remove);
   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
 
 
@@ -1408,7 +1412,7 @@
   shrink_helper(shrink_bytes);
   rebuild_region_sets(true /* free_list_only */);
 
-  _hrm.verify_optional();
+  _hrm->verify_optional();
   _verifier->verify_region_sets_optional();
 }
 
@@ -1486,7 +1490,7 @@
   _humongous_set("Humongous Region Set", new HumongousRegionSetChecker()),
   _bot(NULL),
   _listener(),
-  _hrm(),
+  _hrm(NULL),
   _allocator(NULL),
   _verifier(NULL),
   _summary_bytes_used(0),
@@ -1505,7 +1509,7 @@
   _survivor(),
   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
-  _g1_policy(new G1Policy(_gc_timer_stw)),
+  _g1_policy(G1Policy::create_policy(collector_policy, _gc_timer_stw)),
   _heap_sizing_policy(NULL),
   _collection_set(this, _g1_policy),
   _hot_card_cache(NULL),
@@ -1632,7 +1636,7 @@
   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
 
   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
-  size_t max_byte_size = collector_policy()->max_heap_byte_size();
+  size_t max_byte_size = g1_collector_policy()->heap_reserved_size_bytes();
   size_t heap_alignment = collector_policy()->heap_alignment();
 
   // Ensure that the sizes are properly aligned.
@@ -1692,12 +1696,17 @@
   ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
   size_t page_size = actual_reserved_page_size(heap_rs);
   G1RegionToSpaceMapper* heap_storage =
-    G1RegionToSpaceMapper::create_mapper(g1_rs,
-                                         g1_rs.size(),
-                                         page_size,
-                                         HeapRegion::GrainBytes,
-                                         1,
-                                         mtJavaHeap);
+    G1RegionToSpaceMapper::create_heap_mapper(g1_rs,
+                                              g1_rs.size(),
+                                              page_size,
+                                              HeapRegion::GrainBytes,
+                                              1,
+                                              mtJavaHeap);
+  if(heap_storage == NULL) {
+    vm_shutdown_during_initialization("Could not initialize G1 heap");
+    return JNI_ERR;
+  }
+
   os::trace_page_sizes("Heap",
                        collector_policy()->min_heap_byte_size(),
                        max_byte_size,
@@ -1728,7 +1737,9 @@
   G1RegionToSpaceMapper* next_bitmap_storage =
     create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
 
-  _hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
+  _hrm = HeapRegionManager::create_manager(this, g1_collector_policy());
+
+  _hrm->initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
   _card_table->initialize(cardtable_storage);
   // Do later initialization work for concurrent refinement.
   _hot_card_cache->initialize(card_counts_storage);
@@ -1743,20 +1754,20 @@
   guarantee(g1_rs.base() >= (char*)G1CardTable::card_size, "Java heap must not start within the first card.");
   // Also create a G1 rem set.
   _g1_rem_set = new G1RemSet(this, _card_table, _hot_card_cache);
-  _g1_rem_set->initialize(max_capacity(), max_regions());
+  _g1_rem_set->initialize(max_reserved_capacity(), max_regions());
 
   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
   guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
   guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
             "too many cards per region");
 
-  FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
+  FreeRegionList::set_unrealistically_long_length(max_expandable_regions() + 1);
 
   _bot = new G1BlockOffsetTable(reserved_region(), bot_storage);
 
   {
-    HeapWord* start = _hrm.reserved().start();
-    HeapWord* end = _hrm.reserved().end();
+    HeapWord* start = _hrm->reserved().start();
+    HeapWord* end = _hrm->reserved().end();
     size_t granularity = HeapRegion::GrainBytes;
 
     _in_cset_fast_test.initialize(start, end, granularity);
@@ -1807,7 +1818,7 @@
 
   // Here we allocate the dummy HeapRegion that is required by the
   // G1AllocRegion class.
-  HeapRegion* dummy_region = _hrm.get_dummy_region();
+  HeapRegion* dummy_region = _hrm->get_dummy_region();
 
   // We'll re-use the same region whether the alloc region will
   // require BOT updates or not and, if it doesn't, then a non-young
@@ -1927,16 +1938,20 @@
   return _collector_policy;
 }
 
+G1CollectorPolicy* G1CollectedHeap::g1_collector_policy() const {
+  return _collector_policy;
+}
+
 SoftRefPolicy* G1CollectedHeap::soft_ref_policy() {
   return &_soft_ref_policy;
 }
 
 size_t G1CollectedHeap::capacity() const {
-  return _hrm.length() * HeapRegion::GrainBytes;
+  return _hrm->length() * HeapRegion::GrainBytes;
 }
 
 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
-  return _hrm.total_free_bytes();
+  return _hrm->total_free_bytes();
 }
 
 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
@@ -2002,6 +2017,18 @@
   }
 }
 
+bool G1CollectedHeap::should_upgrade_to_full_gc(GCCause::Cause cause) {
+  if(g1_policy()->force_upgrade_to_full()) {
+    return true;
+  } else if (should_do_concurrent_full_gc(_gc_cause)) {
+    return false;
+  } else if (has_regions_left_for_allocation()) {
+    return false;
+  } else {
+    return true;
+  }
+}
+
 #ifndef PRODUCT
 void G1CollectedHeap::allocate_dummy_regions() {
   // Let's fill up most of the region
@@ -2152,7 +2179,7 @@
 }
 
 bool G1CollectedHeap::is_in(const void* p) const {
-  if (_hrm.reserved().contains(p)) {
+  if (_hrm->reserved().contains(p)) {
     // Given that we know that p is in the reserved space,
     // heap_region_containing() should successfully
     // return the containing region.
@@ -2166,7 +2193,7 @@
 #ifdef ASSERT
 bool G1CollectedHeap::is_in_exact(const void* p) const {
   bool contains = reserved_region().contains(p);
-  bool available = _hrm.is_available(addr_to_region((HeapWord*)p));
+  bool available = _hrm->is_available(addr_to_region((HeapWord*)p));
   if (contains && available) {
     return true;
   } else {
@@ -2197,18 +2224,18 @@
 }
 
 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
-  _hrm.iterate(cl);
+  _hrm->iterate(cl);
 }
 
 void G1CollectedHeap::heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,
                                                                  HeapRegionClaimer *hrclaimer,
                                                                  uint worker_id) const {
-  _hrm.par_iterate(cl, hrclaimer, hrclaimer->offset_for_worker(worker_id));
+  _hrm->par_iterate(cl, hrclaimer, hrclaimer->offset_for_worker(worker_id));
 }
 
 void G1CollectedHeap::heap_region_par_iterate_from_start(HeapRegionClosure* cl,
                                                          HeapRegionClaimer *hrclaimer) const {
-  _hrm.par_iterate(cl, hrclaimer, 0);
+  _hrm->par_iterate(cl, hrclaimer, 0);
 }
 
 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
@@ -2257,7 +2284,11 @@
 }
 
 size_t G1CollectedHeap::max_capacity() const {
-  return _hrm.reserved().byte_size();
+  return _hrm->max_expandable_length() * HeapRegion::GrainBytes;
+}
+
+size_t G1CollectedHeap::max_reserved_capacity() const {
+  return _hrm->max_length() * HeapRegion::GrainBytes;
 }
 
 jlong G1CollectedHeap::millis_since_last_gc() {
@@ -2347,8 +2378,8 @@
   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
             capacity()/K, used_unlocked()/K);
   st->print(" [" PTR_FORMAT ", " PTR_FORMAT ")",
-            p2i(_hrm.reserved().start()),
-            p2i(_hrm.reserved().end()));
+            p2i(_hrm->reserved().start()),
+            p2i(_hrm->reserved().end()));
   st->cr();
   st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
   uint young_regions = young_regions_count();
@@ -3131,7 +3162,7 @@
     // output from the concurrent mark thread interfering with this
     // logging output either.
 
-    _hrm.verify_optional();
+    _hrm->verify_optional();
     _verifier->verify_region_sets_optional();
 
     TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
@@ -3947,7 +3978,7 @@
                                   bool locked) {
   assert(!hr->is_free(), "the region should not be free");
   assert(!hr->is_empty(), "the region should not be empty");
-  assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
+  assert(_hrm->is_available(hr->hrm_index()), "region should be committed");
   assert(free_list != NULL, "pre-condition");
 
   if (G1VerifyBitmaps) {
@@ -3988,7 +4019,7 @@
   assert(list != NULL, "list can't be null");
   if (!list->is_empty()) {
     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
-    _hrm.insert_list_into_free_list(list);
+    _hrm->insert_list_into_free_list(list);
   }
 }
 
@@ -4521,7 +4552,7 @@
     // this is that during a full GC string deduplication needs to know if
     // a collected region was young or old when the full GC was initiated.
   }
-  _hrm.remove_all_free_regions();
+  _hrm->remove_all_free_regions();
 }
 
 void G1CollectedHeap::increase_used(size_t bytes) {
@@ -4596,7 +4627,7 @@
     _survivor.clear();
   }
 
-  RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
+  RebuildRegionSetsClosure cl(free_list_only, &_old_set, _hrm);
   heap_region_iterate(&cl);
 
   if (!free_list_only) {
@@ -4623,7 +4654,7 @@
   bool should_allocate = g1_policy()->should_allocate_mutator_region();
   if (force || should_allocate) {
     HeapRegion* new_alloc_region = new_region(word_size,
-                                              false /* is_old */,
+                                              HeapRegionType::Eden,
                                               false /* do_expand */);
     if (new_alloc_region != NULL) {
       set_region_short_lived_locked(new_alloc_region);
@@ -4667,13 +4698,19 @@
     return NULL;
   }
 
-  const bool is_survivor = dest.is_young();
+  HeapRegionType type;
+  if (dest.is_young()) {
+    type = HeapRegionType::Survivor;
+  } else {
+    type = HeapRegionType::Old;
+  }
 
   HeapRegion* new_alloc_region = new_region(word_size,
-                                            !is_survivor,
+                                            type,
                                             true /* do_expand */);
+
   if (new_alloc_region != NULL) {
-    if (is_survivor) {
+    if (type.is_survivor()) {
       new_alloc_region->set_survivor();
       _survivor.add(new_alloc_region);
       _verifier->check_bitmaps("Survivor Region Allocation", new_alloc_region);
@@ -4705,14 +4742,14 @@
 
 HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
   bool expanded = false;
-  uint index = _hrm.find_highest_free(&expanded);
+  uint index = _hrm->find_highest_free(&expanded);
 
   if (index != G1_NO_HRM_INDEX) {
     if (expanded) {
       log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B",
                                 HeapRegion::GrainWords * HeapWordSize);
     }
-    _hrm.allocate_free_regions_starting_at(index, 1);
+    _hrm->allocate_free_regions_starting_at(index, 1);
     return region_at(index);
   }
   return NULL;
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -45,6 +45,7 @@
 #include "gc/g1/g1YCTypes.hpp"
 #include "gc/g1/heapRegionManager.hpp"
 #include "gc/g1/heapRegionSet.hpp"
+#include "gc/g1/heterogeneousHeapRegionManager.hpp"
 #include "gc/shared/barrierSet.hpp"
 #include "gc/shared/collectedHeap.hpp"
 #include "gc/shared/gcHeapSummary.hpp"
@@ -194,7 +195,7 @@
   G1RegionMappingChangedListener _listener;
 
   // The sequence of all heap regions in the heap.
-  HeapRegionManager _hrm;
+  HeapRegionManager* _hrm;
 
   // Manages all allocations with regions except humongous object allocations.
   G1Allocator* _allocator;
@@ -267,6 +268,9 @@
   // (e) cause == _wb_conc_mark
   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 
+  // Return true if should upgrade to full gc after an incremental one.
+  bool should_upgrade_to_full_gc(GCCause::Cause cause);
+
   // indicates whether we are in young or mixed GC mode
   G1CollectorState _collector_state;
 
@@ -369,9 +373,9 @@
   // Try to allocate a single non-humongous HeapRegion sufficient for
   // an allocation of the given word_size. If do_expand is true,
   // attempt to expand the heap if necessary to satisfy the allocation
-  // request. If the region is to be used as an old region or for a
-  // humongous object, set is_old to true. If not, to false.
-  HeapRegion* new_region(size_t word_size, bool is_old, bool do_expand);
+  // request. 'type' takes the type of region to be allocated. (Use constants
+  // Old, Eden, Humongous, Survivor defined in HeapRegionType.)
+  HeapRegion* new_region(size_t word_size, HeapRegionType type, bool do_expand);
 
   // Initialize a contiguous set of free regions of length num_regions
   // and starting at index first so that they appear as a single
@@ -957,10 +961,13 @@
   // The current policy object for the collector.
   G1Policy* g1_policy() const { return _g1_policy; }
 
+  HeapRegionManager* hrm() const { return _hrm; }
+
   const G1CollectionSet* collection_set() const { return &_collection_set; }
   G1CollectionSet* collection_set() { return &_collection_set; }
 
   virtual CollectorPolicy* collector_policy() const;
+  virtual G1CollectorPolicy* g1_collector_policy() const;
 
   virtual SoftRefPolicy* soft_ref_policy();
 
@@ -1009,7 +1016,7 @@
   // But G1CollectedHeap doesn't yet support this.
 
   virtual bool is_maximal_no_gc() const {
-    return _hrm.available() == 0;
+    return _hrm->available() == 0;
   }
 
   // Returns whether there are any regions left in the heap for allocation.
@@ -1018,19 +1025,22 @@
   }
 
   // The current number of regions in the heap.
-  uint num_regions() const { return _hrm.length(); }
+  uint num_regions() const { return _hrm->length(); }
 
   // The max number of regions in the heap.
-  uint max_regions() const { return _hrm.max_length(); }
+  uint max_regions() const { return _hrm->max_length(); }
+
+  // Max number of regions that can be comitted.
+  uint max_expandable_regions() const { return _hrm->max_expandable_length(); }
 
   // The number of regions that are completely free.
-  uint num_free_regions() const { return _hrm.num_free_regions(); }
+  uint num_free_regions() const { return _hrm->num_free_regions(); }
 
   // The number of regions that can be allocated into.
-  uint num_free_or_available_regions() const { return num_free_regions() + _hrm.available(); }
+  uint num_free_or_available_regions() const { return num_free_regions() + _hrm->available(); }
 
   MemoryUsage get_auxiliary_data_memory_usage() const {
-    return _hrm.get_auxiliary_data_memory_usage();
+    return _hrm->get_auxiliary_data_memory_usage();
   }
 
   // The number of regions that are not completely free.
@@ -1038,7 +1048,7 @@
 
 #ifdef ASSERT
   bool is_on_master_free_list(HeapRegion* hr) {
-    return _hrm.is_free(hr);
+    return _hrm->is_free(hr);
   }
 #endif // ASSERT
 
@@ -1095,13 +1105,13 @@
   // Return "TRUE" iff the given object address is in the reserved
   // region of g1.
   bool is_in_g1_reserved(const void* p) const {
-    return _hrm.reserved().contains(p);
+    return _hrm->reserved().contains(p);
   }
 
   // Returns a MemRegion that corresponds to the space that has been
   // reserved for the heap
   MemRegion g1_reserved() const {
-    return _hrm.reserved();
+    return _hrm->reserved();
   }
 
   virtual bool is_in_closed_subset(const void* p) const;
@@ -1227,6 +1237,9 @@
   // Print the maximum heap capacity.
   virtual size_t max_capacity() const;
 
+  // Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used.
+  virtual size_t max_reserved_capacity() const;
+
   virtual jlong millis_since_last_gc();
 
 
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -57,13 +57,13 @@
 // Inline functions for G1CollectedHeap
 
 // Return the region with the given index. It assumes the index is valid.
-inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }
+inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm->at(index); }
 
 // Return the region with the given index, or NULL if unmapped. It assumes the index is valid.
-inline HeapRegion* G1CollectedHeap::region_at_or_null(uint index) const { return _hrm.at_or_null(index); }
+inline HeapRegion* G1CollectedHeap::region_at_or_null(uint index) const { return _hrm->at_or_null(index); }
 
 inline HeapRegion* G1CollectedHeap::next_region_in_humongous(HeapRegion* hr) const {
-  return _hrm.next_region_in_humongous(hr);
+  return _hrm->next_region_in_humongous(hr);
 }
 
 inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
@@ -74,7 +74,7 @@
 }
 
 inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
-  return _hrm.reserved().start() + index * HeapRegion::GrainWords;
+  return _hrm->reserved().start() + index * HeapRegion::GrainWords;
 }
 
 template <class T>
@@ -83,7 +83,7 @@
   assert(is_in_g1_reserved((const void*) addr),
          "Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
          p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end()));
-  return _hrm.addr_to_region((HeapWord*) addr);
+  return _hrm->addr_to_region((HeapWord*) addr);
 }
 
 template <class T>
@@ -266,12 +266,12 @@
 }
 
 inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) {
-  assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");
+  assert(_hrm->at(region)->is_starts_humongous(), "Must start a humongous object");
   _humongous_reclaim_candidates.set_candidate(region, value);
 }
 
 inline bool G1CollectedHeap::is_humongous_reclaim_candidate(uint region) {
-  assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");
+  assert(_hrm->at(region)->is_starts_humongous(), "Must start a humongous object");
   return _humongous_reclaim_candidates.is_candidate(region);
 }
 
--- a/src/hotspot/share/gc/g1/g1CollectorPolicy.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/g1/g1CollectorPolicy.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -55,3 +55,11 @@
   size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
   _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size);
 }
+
+size_t G1CollectorPolicy::heap_reserved_size_bytes() const {
+  return _max_heap_byte_size;
+}
+
+bool G1CollectorPolicy::is_hetero_heap() const {
+  return false;
+}
--- a/src/hotspot/share/gc/g1/g1CollectorPolicy.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/g1/g1CollectorPolicy.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,6 +38,7 @@
 
 public:
   G1CollectorPolicy();
+  virtual size_t heap_reserved_size_bytes() const;
+  virtual bool is_hetero_heap() const;
 };
-
 #endif // SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP
--- a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -603,14 +603,14 @@
   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 
   // First, check the explicit lists.
-  _g1h->_hrm.verify();
+  _g1h->_hrm->verify();
 
   // Finally, make sure that the region accounting in the lists is
   // consistent with what we see in the heap.
 
-  VerifyRegionListsClosure cl(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, &_g1h->_hrm);
+  VerifyRegionListsClosure cl(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, _g1h->_hrm);
   _g1h->heap_region_iterate(&cl);
-  cl.verify_counts(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, &_g1h->_hrm);
+  cl.verify_counts(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, _g1h->_hrm);
 }
 
 void G1HeapVerifier::prepare_for_verify() {
@@ -851,7 +851,7 @@
 
 bool G1HeapVerifier::check_cset_fast_test() {
   G1CheckCSetFastTableClosure cl;
-  _g1h->_hrm.iterate(&cl);
+  _g1h->_hrm->iterate(&cl);
   return !cl.failures();
 }
 #endif // PRODUCT
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1HeterogeneousCollectorPolicy.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1HeterogeneousCollectorPolicy.hpp"
+#include "logging/log.hpp"
+#include "runtime/globals_extension.hpp"
+#include "runtime/os.hpp"
+#include "utilities/formatBuffer.hpp"
+
+const double G1HeterogeneousCollectorPolicy::MaxRamFractionForYoung = 0.8;
+size_t G1HeterogeneousCollectorPolicy::MaxMemoryForYoung;
+
+static size_t calculate_reasonable_max_memory_for_young(FormatBuffer<100> &calc_str, double max_ram_fraction_for_young) {
+  julong phys_mem;
+  // If MaxRam is specified, we use that as maximum physical memory available.
+  if (FLAG_IS_DEFAULT(MaxRAM)) {
+    phys_mem = os::physical_memory();
+    calc_str.append("Physical_Memory");
+  } else {
+    phys_mem = (julong)MaxRAM;
+    calc_str.append("MaxRAM");
+  }
+
+  julong reasonable_max = phys_mem;
+
+  // If either MaxRAMFraction or MaxRAMPercentage is specified, we use them to calculate
+  // reasonable max size of young generation.
+  if (!FLAG_IS_DEFAULT(MaxRAMFraction)) {
+    reasonable_max = (julong)(phys_mem / MaxRAMFraction);
+    calc_str.append(" / MaxRAMFraction");
+  }  else if (!FLAG_IS_DEFAULT(MaxRAMPercentage)) {
+    reasonable_max = (julong)((phys_mem * MaxRAMPercentage) / 100);
+    calc_str.append(" * MaxRAMPercentage / 100");
+  }  else {
+    // We use our own fraction to calculate max size of young generation.
+    reasonable_max = phys_mem * max_ram_fraction_for_young;
+    calc_str.append(" * %0.2f", max_ram_fraction_for_young);
+  }
+
+  return (size_t)reasonable_max;
+}
+
+void G1HeterogeneousCollectorPolicy::initialize_flags() {
+
+  FormatBuffer<100> calc_str("");
+
+  MaxMemoryForYoung = calculate_reasonable_max_memory_for_young(calc_str, MaxRamFractionForYoung);
+
+  if (MaxNewSize > MaxMemoryForYoung) {
+    if (FLAG_IS_CMDLINE(MaxNewSize)) {
+      log_warning(gc, ergo)("Setting MaxNewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s))",
+                            MaxMemoryForYoung, calc_str.buffer());
+    } else {
+      log_info(gc, ergo)("Setting MaxNewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s)). "
+                         "Dram usage can be lowered by setting MaxNewSize to a lower value", MaxMemoryForYoung, calc_str.buffer());
+    }
+    MaxNewSize = MaxMemoryForYoung;
+  }
+  if (NewSize > MaxMemoryForYoung) {
+    if (FLAG_IS_CMDLINE(NewSize)) {
+      log_warning(gc, ergo)("Setting NewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s))",
+                            MaxMemoryForYoung, calc_str.buffer());
+    }
+    NewSize = MaxMemoryForYoung;
+  }
+
+  // After setting new size flags, call base class initialize_flags()
+  G1CollectorPolicy::initialize_flags();
+}
+
+size_t G1HeterogeneousCollectorPolicy::reasonable_max_memory_for_young() {
+  return MaxMemoryForYoung;
+}
+
+size_t G1HeterogeneousCollectorPolicy::heap_reserved_size_bytes() const {
+    return 2 * _max_heap_byte_size;
+}
+
+bool G1HeterogeneousCollectorPolicy::is_hetero_heap() const {
+  return true;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1HeterogeneousCollectorPolicy.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1HETEROGENEOUSCOLLECTORPOLICY_HPP
+#define SHARE_VM_GC_G1_G1HETEROGENEOUSCOLLECTORPOLICY_HPP
+
+#include "gc/g1/g1CollectorPolicy.hpp"
+#include "gc/g1/g1HeterogeneousHeapYoungGenSizer.hpp"
+
+class G1HeterogeneousCollectorPolicy : public G1CollectorPolicy {
+private:
+  // Max fraction of dram to use for young generation when MaxRAMFraction and
+  // MaxRAMPercentage are not specified on commandline.
+  static const double MaxRamFractionForYoung;
+  static size_t MaxMemoryForYoung;
+
+protected:
+  virtual void initialize_flags();
+
+public:
+  G1HeterogeneousCollectorPolicy() {}
+  virtual size_t heap_reserved_size_bytes() const;
+  virtual bool is_hetero_heap() const;
+  static size_t reasonable_max_memory_for_young();
+};
+
+#endif // SHARE_VM_GC_G1_G1HETEROGENEOUSCOLLECTORPOLICY_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1HeterogeneousHeapPolicy.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1CollectedHeap.hpp"
+#include "gc/g1/g1HeterogeneousHeapPolicy.hpp"
+#include "gc/g1/g1Policy.hpp"
+#include "gc/g1/heterogeneousHeapRegionManager.hpp"
+
+G1HeterogeneousHeapPolicy::G1HeterogeneousHeapPolicy(G1CollectorPolicy* policy, STWGCTimer* gc_timer) :
+  G1Policy(policy, gc_timer), _manager(NULL) {}
+
+// We call the super class init(), after which we provision young_list_target_length() regions in dram.
+void G1HeterogeneousHeapPolicy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) {
+  G1Policy::init(g1h, collection_set);
+  _manager = HeterogeneousHeapRegionManager::manager();
+  _manager->adjust_dram_regions((uint)young_list_target_length(), G1CollectedHeap::heap()->workers());
+}
+
+// After a collection pause, young list target length is updated. So we need to make sure we have enough regions in dram for young gen.
+void G1HeterogeneousHeapPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) {
+  G1Policy::record_collection_pause_end(pause_time_ms, cards_scanned, heap_used_bytes_before_gc);
+  _manager->adjust_dram_regions((uint)young_list_target_length(), G1CollectedHeap::heap()->workers());
+}
+
+// After a full collection, young list target length is updated. So we need to make sure we have enough regions in dram for young gen.
+void G1HeterogeneousHeapPolicy::record_full_collection_end() {
+  G1Policy::record_full_collection_end();
+  _manager->adjust_dram_regions((uint)young_list_target_length(), G1CollectedHeap::heap()->workers());
+}
+
+bool G1HeterogeneousHeapPolicy::force_upgrade_to_full() {
+  if (_manager->has_borrowed_regions()) {
+    return true;
+  }
+  return false;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1HeterogeneousHeapPolicy.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1HETEROGENEOUSHEAPPOLICY_HPP
+#define SHARE_VM_GC_G1_G1HETEROGENEOUSHEAPPOLICY_HPP
+
+#include "gc/g1/g1CollectorPolicy.hpp"
+#include "gc/g1/g1Policy.hpp"
+#include "gc/g1/heterogeneousHeapRegionManager.hpp"
+
+class G1HeterogeneousHeapPolicy : public G1Policy {
+  // Stash a pointer to the hrm.
+  HeterogeneousHeapRegionManager* _manager;
+
+public:
+  G1HeterogeneousHeapPolicy(G1CollectorPolicy* policy, STWGCTimer* gc_timer);
+
+  // initialize policy
+  virtual void init(G1CollectedHeap* g1h, G1CollectionSet* collection_set);
+  // Record end of an evacuation pause.
+  virtual void record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc);
+  // Record the end of full collection.
+  virtual void record_full_collection_end();
+
+  virtual bool force_upgrade_to_full();
+};
+#endif // SHARE_VM_GC_G1_G1HETEROGENEOUSHEAPPOLICY_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1HeterogeneousHeapYoungGenSizer.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1HeterogeneousCollectorPolicy.hpp"
+#include "gc/g1/g1HeterogeneousHeapYoungGenSizer.hpp"
+#include "gc/g1/heapRegion.hpp"
+
+G1HeterogeneousHeapYoungGenSizer::G1HeterogeneousHeapYoungGenSizer() : G1YoungGenSizer() {
+  // will be used later when min and max young size is calculated.
+  _max_young_length = (uint)(G1HeterogeneousCollectorPolicy::reasonable_max_memory_for_young() / HeapRegion::GrainBytes);
+}
+
+// Since heap is sized potentially to larger value accounting for dram + nvdimm, we need to limit
+// max young gen size to the available dram.
+// Call parent class method first and then adjust sizes based on available dram
+void G1HeterogeneousHeapYoungGenSizer::adjust_max_new_size(uint number_of_heap_regions) {
+  G1YoungGenSizer::adjust_max_new_size(number_of_heap_regions);
+  adjust_lengths_based_on_dram_memory();
+}
+
+void G1HeterogeneousHeapYoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) {
+  G1YoungGenSizer::heap_size_changed(new_number_of_heap_regions);
+  adjust_lengths_based_on_dram_memory();
+}
+
+void G1HeterogeneousHeapYoungGenSizer::adjust_lengths_based_on_dram_memory() {
+  _min_desired_young_length = MIN2(_min_desired_young_length, _max_young_length);
+  _max_desired_young_length = MIN2(_max_desired_young_length, _max_young_length);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1HeterogeneousHeapYoungGenSizer.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1HETEROGENEOUSHEAPYOUNGGENSIZER_HPP
+#define SHARE_VM_GC_G1_G1HETEROGENEOUSHEAPYOUNGGENSIZER_HPP
+
+#include "gc/g1/g1YoungGenSizer.hpp"
+
+// This class prevents the size of young generation of G1 heap to exceed dram
+// memory available. If set on command line, MaxRAM and MaxRAMFraction/MaxRAMPercentage
+// are used to determine the maximum size that young generation can grow.
+// Else we set the maximum size to 80% of dram available in the system.
+
+class G1HeterogeneousHeapYoungGenSizer : public G1YoungGenSizer {
+private:
+  // maximum no of regions that young generation can grow to. Calculated in constructor.
+  uint _max_young_length;
+  void adjust_lengths_based_on_dram_memory();
+
+public:
+  G1HeterogeneousHeapYoungGenSizer();
+
+  // Calculate the maximum length of the young gen given the number of regions
+  // depending on the sizing algorithm.
+  virtual void adjust_max_new_size(uint number_of_heap_regions);
+
+  virtual void heap_size_changed(uint new_number_of_heap_regions);
+};
+
+#endif // SHARE_VM_GC_G1_G1HETEROGENEOUSHEAPYOUNGGENSIZER_HPP
--- a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -100,6 +100,12 @@
   return reserved_size() - committed_size();
 }
 
+void G1PageBasedVirtualSpace::commit_and_set_special() {
+  commit_internal(addr_to_page_index(_low_boundary), addr_to_page_index(_high_boundary));
+  _special = true;
+  _dirty.initialize(reserved_size()/_page_size);
+}
+
 size_t G1PageBasedVirtualSpace::addr_to_page_index(char* addr) const {
   return (addr - _low_boundary) / _page_size;
 }
--- a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -136,6 +136,8 @@
   // Memory left to use/expand in this virtual space.
   size_t uncommitted_size() const;
 
+  void commit_and_set_special();
+
   bool contains(const void* p) const;
 
   MemRegion reserved() {
--- a/src/hotspot/share/gc/g1/g1Policy.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/g1/g1Policy.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -29,6 +29,7 @@
 #include "gc/g1/g1ConcurrentMark.hpp"
 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
 #include "gc/g1/g1ConcurrentRefine.hpp"
+#include "gc/g1/g1HeterogeneousHeapPolicy.hpp"
 #include "gc/g1/g1HotCardCache.hpp"
 #include "gc/g1/g1IHOPControl.hpp"
 #include "gc/g1/g1GCPhaseTimes.hpp"
@@ -46,7 +47,7 @@
 #include "utilities/growableArray.hpp"
 #include "utilities/pair.hpp"
 
-G1Policy::G1Policy(STWGCTimer* gc_timer) :
+G1Policy::G1Policy(G1CollectorPolicy* policy, STWGCTimer* gc_timer) :
   _predictor(G1ConfidencePercent / 100.0),
   _analytics(new G1Analytics(&_predictor)),
   _remset_tracker(),
@@ -62,7 +63,7 @@
   _survivor_surv_rate_group(new SurvRateGroup()),
   _reserve_factor((double) G1ReservePercent / 100.0),
   _reserve_regions(0),
-  _young_gen_sizer(),
+  _young_gen_sizer(G1YoungGenSizer::create_gen_sizer(policy)),
   _free_regions_at_end_of_collection(0),
   _max_rs_lengths(0),
   _rs_lengths_prediction(0),
@@ -83,6 +84,15 @@
 
 G1Policy::~G1Policy() {
   delete _ihop_control;
+  delete _young_gen_sizer;
+}
+
+G1Policy* G1Policy::create_policy(G1CollectorPolicy* policy, STWGCTimer* gc_timer_stw) {
+  if (policy->is_hetero_heap()) {
+    return new G1HeterogeneousHeapPolicy(policy, gc_timer_stw);
+  } else {
+    return new G1Policy(policy, gc_timer_stw);
+  }
 }
 
 G1CollectorState* G1Policy::collector_state() const { return _g1h->collector_state(); }
@@ -94,9 +104,9 @@
   assert(Heap_lock->owned_by_self(), "Locking discipline.");
 
   if (!adaptive_young_list_length()) {
-    _young_list_fixed_length = _young_gen_sizer.min_desired_young_length();
+    _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
   }
-  _young_gen_sizer.adjust_max_new_size(_g1h->max_regions());
+  _young_gen_sizer->adjust_max_new_size(_g1h->max_expandable_regions());
 
   _free_regions_at_end_of_collection = _g1h->num_free_regions();
 
@@ -176,7 +186,7 @@
   // smaller than 1.0) we'll get 1.
   _reserve_regions = (uint) ceil(reserve_regions_d);
 
-  _young_gen_sizer.heap_size_changed(new_number_of_regions);
+  _young_gen_sizer->heap_size_changed(new_number_of_regions);
 
   _ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes);
 }
@@ -195,14 +205,14 @@
   }
   desired_min_length += base_min_length;
   // make sure we don't go below any user-defined minimum bound
-  return MAX2(_young_gen_sizer.min_desired_young_length(), desired_min_length);
+  return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
 }
 
 uint G1Policy::calculate_young_list_desired_max_length() const {
   // Here, we might want to also take into account any additional
   // constraints (i.e., user-defined minimum bound). Currently, we
   // effectively don't set this bound.
-  return _young_gen_sizer.max_desired_young_length();
+  return _young_gen_sizer->max_desired_young_length();
 }
 
 uint G1Policy::update_young_list_max_and_target_length() {
@@ -218,6 +228,7 @@
 uint G1Policy::update_young_list_target_length(size_t rs_lengths) {
   YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths);
   _young_list_target_length = young_lengths.first;
+
   return young_lengths.second;
 }
 
@@ -900,7 +911,7 @@
 }
 
 bool G1Policy::adaptive_young_list_length() const {
-  return _young_gen_sizer.adaptive_young_list_length();
+  return _young_gen_sizer->adaptive_young_list_length();
 }
 
 size_t G1Policy::desired_survivor_size(uint max_regions) const {
--- a/src/hotspot/share/gc/g1/g1Policy.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/g1/g1Policy.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_GC_G1_G1POLICY_HPP
 #define SHARE_VM_GC_G1_G1POLICY_HPP
 
+#include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1CollectorState.hpp"
 #include "gc/g1/g1GCPhaseTimes.hpp"
 #include "gc/g1/g1InCSetState.hpp"
@@ -91,7 +92,7 @@
   // for the first time during initialization.
   uint   _reserve_regions;
 
-  G1YoungGenSizer _young_gen_sizer;
+  G1YoungGenSizer* _young_gen_sizer;
 
   uint _free_regions_at_end_of_collection;
 
@@ -282,10 +283,12 @@
   void abort_time_to_mixed_tracking();
 public:
 
-  G1Policy(STWGCTimer* gc_timer);
+  G1Policy(G1CollectorPolicy* policy, STWGCTimer* gc_timer);
 
   virtual ~G1Policy();
 
+  static G1Policy* create_policy(G1CollectorPolicy* policy, STWGCTimer* gc_timer_stw);
+
   G1CollectorState* collector_state() const;
 
   G1GCPhaseTimes* phase_times() const { return _phase_times; }
@@ -298,7 +301,7 @@
   // This should be called after the heap is resized.
   void record_new_heap_size(uint new_number_of_regions);
 
-  void init(G1CollectedHeap* g1h, G1CollectionSet* collection_set);
+  virtual void init(G1CollectedHeap* g1h, G1CollectionSet* collection_set);
 
   void note_gc_start();
 
@@ -308,11 +311,11 @@
 
   // Record the start and end of an evacuation pause.
   void record_collection_pause_start(double start_time_sec);
-  void record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc);
+  virtual void record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc);
 
   // Record the start and end of a full collection.
   void record_full_collection_start();
-  void record_full_collection_end();
+  virtual void record_full_collection_end();
 
   // Must currently be called while the world is stopped.
   void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms);
@@ -432,6 +435,10 @@
   void update_max_gc_locker_expansion();
 
   void update_survivors_policy();
+
+  virtual bool force_upgrade_to_full() {
+    return false;
+  }
 };
 
 #endif // SHARE_VM_GC_G1_G1POLICY_HPP
--- a/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,11 +25,15 @@
 #include "precompiled.hpp"
 #include "gc/g1/g1BiasedArray.hpp"
 #include "gc/g1/g1RegionToSpaceMapper.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/virtualspace.hpp"
+#include "runtime/java.hpp"
+#include "runtime/os.inline.hpp"
 #include "services/memTracker.hpp"
 #include "utilities/align.hpp"
 #include "utilities/bitMap.inline.hpp"
+#include "utilities/formatBuffer.hpp"
 
 G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs,
                                              size_t used_size,
@@ -170,16 +174,156 @@
   }
 }
 
+static bool map_nvdimm_space(ReservedSpace rs) {
+  assert(AllocateOldGenAt != NULL, "");
+  int _backing_fd = os::create_file_for_heap(AllocateOldGenAt);
+  if (_backing_fd == -1) {
+    log_error(gc, init)("Could not create file for Old generation at location %s", AllocateOldGenAt);
+    return false;
+  }
+  // commit this memory in nv-dimm
+  char* ret = os::attempt_reserve_memory_at(rs.size(), rs.base(), _backing_fd);
+
+  if (ret != rs.base()) {
+    if (ret != NULL) {
+      os::unmap_memory(rs.base(), rs.size());
+    }
+    log_error(gc, init)("Error in mapping Old Gen to given AllocateOldGenAt = %s", AllocateOldGenAt);
+    os::close(_backing_fd);
+    return false;
+  }
+
+  os::close(_backing_fd);
+  return true;
+}
+
+G1RegionToHeteroSpaceMapper::G1RegionToHeteroSpaceMapper(ReservedSpace rs,
+                                                         size_t actual_size,
+                                                         size_t page_size,
+                                                         size_t alloc_granularity,
+                                                         size_t commit_factor,
+                                                         MemoryType type) :
+  G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, type),
+  _rs(rs),
+  _num_committed_dram(0),
+  _num_committed_nvdimm(0),
+  _page_size(page_size),
+  _commit_factor(commit_factor),
+  _type(type) {
+  assert(actual_size == 2 * MaxHeapSize, "For 2-way heterogenuous heap, reserved space is two times MaxHeapSize");
+}
+
+bool G1RegionToHeteroSpaceMapper::initialize() {
+  // Since we need to re-map the reserved space - 'Xmx' to nv-dimm and 'Xmx' to dram, we need to release the reserved memory first.
+  // Because on some OSes (e.g. Windows) you cannot do a file mapping on memory reserved with regular mapping.
+  os::release_memory(_rs.base(), _rs.size());
+  // First half of size Xmx is for nv-dimm.
+  ReservedSpace rs_nvdimm = _rs.first_part(MaxHeapSize);
+  assert(rs_nvdimm.base() == _rs.base(), "We should get the same base address");
+
+  // Second half of reserved memory is mapped to dram.
+  ReservedSpace rs_dram = _rs.last_part(MaxHeapSize);
+
+  assert(rs_dram.size() == rs_nvdimm.size() && rs_nvdimm.size() == MaxHeapSize, "They all should be same");
+
+  // Reserve dram memory
+  char* base = os::attempt_reserve_memory_at(rs_dram.size(), rs_dram.base());
+  if (base != rs_dram.base()) {
+    if (base != NULL) {
+      os::release_memory(base, rs_dram.size());
+    }
+    log_error(gc, init)("Error in re-mapping memory on dram during G1 heterogenous memory initialization");
+    return false;
+  }
+
+  // We reserve and commit this entire space to NV-DIMM.
+  if (!map_nvdimm_space(rs_nvdimm)) {
+    log_error(gc, init)("Error in re-mapping memory to nv-dimm during G1 heterogenous memory initialization");
+    return false;
+  }
+
+  if (_region_granularity >= (_page_size * _commit_factor)) {
+    _dram_mapper = new G1RegionsLargerThanCommitSizeMapper(rs_dram, rs_dram.size(), _page_size, _region_granularity, _commit_factor, _type);
+  } else {
+    _dram_mapper = new G1RegionsSmallerThanCommitSizeMapper(rs_dram, rs_dram.size(), _page_size, _region_granularity, _commit_factor, _type);
+  }
+
+  _start_index_of_nvdimm = 0;
+  _start_index_of_dram = (uint)(rs_nvdimm.size() / _region_granularity);
+  return true;
+}
+
+void G1RegionToHeteroSpaceMapper::commit_regions(uint start_idx, size_t num_regions, WorkGang* pretouch_gang) {
+  uint end_idx = (start_idx + (uint)num_regions - 1);
+
+  uint num_dram = end_idx >= _start_index_of_dram ? MIN2((end_idx - _start_index_of_dram + 1), (uint)num_regions) : 0;
+  uint num_nvdimm = (uint)num_regions - num_dram;
+
+  if (num_nvdimm > 0) {
+    // We do not need to commit nv-dimm regions, since they are committed in the beginning.
+    _num_committed_nvdimm += num_nvdimm;
+  }
+  if (num_dram > 0) {
+    _dram_mapper->commit_regions(start_idx > _start_index_of_dram ? (start_idx - _start_index_of_dram) : 0, num_dram, pretouch_gang);
+    _num_committed_dram += num_dram;
+  }
+}
+
+void G1RegionToHeteroSpaceMapper::uncommit_regions(uint start_idx, size_t num_regions) {
+  uint end_idx = (start_idx + (uint)num_regions - 1);
+  uint num_dram = end_idx >= _start_index_of_dram ? MIN2((end_idx - _start_index_of_dram + 1), (uint)num_regions) : 0;
+  uint num_nvdimm = (uint)num_regions - num_dram;
+
+  if (num_nvdimm > 0) {
+    // We do not uncommit memory for nv-dimm regions.
+    _num_committed_nvdimm -= num_nvdimm;
+  }
+
+  if (num_dram > 0) {
+    _dram_mapper->uncommit_regions(start_idx > _start_index_of_dram ? (start_idx - _start_index_of_dram) : 0, num_dram);
+    _num_committed_dram -= num_dram;
+  }
+}
+
+uint G1RegionToHeteroSpaceMapper::num_committed_dram() const {
+  return _num_committed_dram;
+}
+
+uint G1RegionToHeteroSpaceMapper::num_committed_nvdimm() const {
+  return _num_committed_nvdimm;
+}
+
+G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_heap_mapper(ReservedSpace rs,
+                                                                 size_t actual_size,
+                                                                 size_t page_size,
+                                                                 size_t region_granularity,
+                                                                 size_t commit_factor,
+                                                                 MemoryType type) {
+  if (AllocateOldGenAt != NULL) {
+    G1RegionToHeteroSpaceMapper* mapper = new G1RegionToHeteroSpaceMapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
+    if (!mapper->initialize()) {
+      delete mapper;
+      return NULL;
+    }
+    return (G1RegionToSpaceMapper*)mapper;
+  } else {
+    return create_mapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
+  }
+}
+
 G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_mapper(ReservedSpace rs,
                                                             size_t actual_size,
                                                             size_t page_size,
                                                             size_t region_granularity,
                                                             size_t commit_factor,
                                                             MemoryType type) {
-
   if (region_granularity >= (page_size * commit_factor)) {
     return new G1RegionsLargerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
   } else {
     return new G1RegionsSmallerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
   }
 }
+
+void G1RegionToSpaceMapper::commit_and_set_special() {
+  _storage.commit_and_set_special();
+}
--- a/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -70,6 +70,7 @@
     return _commit_map.at(idx);
   }
 
+  void commit_and_set_special();
   virtual void commit_regions(uint start_idx, size_t num_regions = 1, WorkGang* pretouch_workers = NULL) = 0;
   virtual void uncommit_regions(uint start_idx, size_t num_regions = 1) = 0;
 
@@ -87,6 +88,37 @@
                                               size_t region_granularity,
                                               size_t byte_translation_factor,
                                               MemoryType type);
+
+  static G1RegionToSpaceMapper* create_heap_mapper(ReservedSpace rs,
+                                                   size_t actual_size,
+                                                   size_t page_size,
+                                                   size_t region_granularity,
+                                                   size_t byte_translation_factor,
+                                                   MemoryType type);
 };
 
+// G1RegionToSpaceMapper implementation where
+// part of space is mapped to dram and part to nv-dimm
+class G1RegionToHeteroSpaceMapper : public G1RegionToSpaceMapper {
+private:
+  size_t _pages_per_region;
+  ReservedSpace _rs;
+  G1RegionToSpaceMapper* _dram_mapper;
+  uint _num_committed_dram;
+  uint _num_committed_nvdimm;
+  uint _start_index_of_nvdimm;
+  uint _start_index_of_dram;
+  size_t _page_size;
+  size_t _commit_factor;
+  MemoryType _type;
+
+public:
+  G1RegionToHeteroSpaceMapper(ReservedSpace rs, size_t used_size, size_t page_size, size_t region_granularity, size_t commit_factor, MemoryType type);
+  bool initialize();
+  uint num_committed_dram() const;
+  uint num_committed_nvdimm() const;
+
+  virtual void commit_regions(uint start_idx, size_t num_regions = 1, WorkGang* pretouch_workers = NULL);
+  virtual void uncommit_regions(uint start_idx, size_t num_regions = 1);
+};
 #endif // SHARE_VM_GC_G1_G1REGIONTOSPACEMAPPER_HPP
--- a/src/hotspot/share/gc/g1/g1VMOperations.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/g1/g1VMOperations.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -138,8 +138,8 @@
       // kind of GC.
       _result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded);
     } else {
-      bool should_upgrade_to_full = !g1h->should_do_concurrent_full_gc(_gc_cause) &&
-                                    !g1h->has_regions_left_for_allocation();
+      bool should_upgrade_to_full = g1h->should_upgrade_to_full_gc(_gc_cause);
+
       if (should_upgrade_to_full) {
         // There has been a request to perform a GC to free some space. We have no
         // information on how much memory has been asked for. In case there are
--- a/src/hotspot/share/gc/g1/g1YoungGenSizer.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/g1/g1YoungGenSizer.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,12 +23,14 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/g1/g1CollectorPolicy.hpp"
+#include "gc/g1/g1HeterogeneousHeapYoungGenSizer.hpp"
 #include "gc/g1/g1YoungGenSizer.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "logging/log.hpp"
 
 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults),
-  _min_desired_young_length(0), _max_desired_young_length(0), _adaptive_size(true) {
+  _adaptive_size(true), _min_desired_young_length(0), _max_desired_young_length(0) {
 
   if (FLAG_IS_CMDLINE(NewRatio)) {
     if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
@@ -127,3 +129,11 @@
   recalculate_min_max_young_length(new_number_of_heap_regions, &_min_desired_young_length,
           &_max_desired_young_length);
 }
+
+G1YoungGenSizer* G1YoungGenSizer::create_gen_sizer(G1CollectorPolicy* policy) {
+  if (policy->is_hetero_heap()) {
+    return new G1HeterogeneousHeapYoungGenSizer();
+  } else {
+    return new G1YoungGenSizer();
+  }
+}
--- a/src/hotspot/share/gc/g1/g1YoungGenSizer.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/g1/g1YoungGenSizer.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_GC_G1_G1YOUNGGENSIZER_HPP
 #define SHARE_VM_GC_G1_G1YOUNGGENSIZER_HPP
 
+#include "gc/g1/g1CollectorPolicy.hpp"
 #include "utilities/globalDefinitions.hpp"
 
 // There are three command line options related to the young gen size:
@@ -63,7 +64,7 @@
 //
 // NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is
 // combined with either NewSize or MaxNewSize. (A warning message is printed.)
-class G1YoungGenSizer {
+class G1YoungGenSizer : public CHeapObj<mtGC> {
 private:
   enum SizerKind {
     SizerDefaults,
@@ -73,8 +74,6 @@
     SizerNewRatio
   };
   SizerKind _sizer_kind;
-  uint _min_desired_young_length;
-  uint _max_desired_young_length;
 
   // False when using a fixed young generation size due to command-line options,
   // true otherwise.
@@ -87,13 +86,17 @@
   // given the number of heap regions depending on the kind of sizing algorithm.
   void recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length);
 
+protected:
+  uint _min_desired_young_length;
+  uint _max_desired_young_length;
+
 public:
   G1YoungGenSizer();
   // Calculate the maximum length of the young gen given the number of regions
   // depending on the sizing algorithm.
-  void adjust_max_new_size(uint number_of_heap_regions);
+  virtual void adjust_max_new_size(uint number_of_heap_regions);
 
-  void heap_size_changed(uint new_number_of_heap_regions);
+  virtual void heap_size_changed(uint new_number_of_heap_regions);
   uint min_desired_young_length() const {
     return _min_desired_young_length;
   }
@@ -104,6 +107,8 @@
   bool adaptive_young_list_length() const {
     return _adaptive_size;
   }
+
+  static G1YoungGenSizer* create_gen_sizer(G1CollectorPolicy* policy);
 };
 
 #endif // SHARE_VM_GC_G1_G1YOUNGGENSIZER_HPP
--- a/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -71,9 +71,9 @@
 
   // Check if load is lower than max.
   double recent_load;
-  if ((G1PeriodicGCSystemLoadThreshold > 0) &&
+  if ((G1PeriodicGCSystemLoadThreshold > 0.0f) &&
       (os::loadavg(&recent_load, 1) == -1 || recent_load > G1PeriodicGCSystemLoadThreshold)) {
-    log_debug(gc, periodic)("Load %1.2f is higher than threshold " UINTX_FORMAT ". Skipping.",
+    log_debug(gc, periodic)("Load %1.2f is higher than threshold %1.2f. Skipping.",
                             recent_load, G1PeriodicGCSystemLoadThreshold);
     return false;
   }
--- a/src/hotspot/share/gc/g1/g1_globals.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/g1/g1_globals.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -311,10 +311,21 @@
           "perform a concurrent GC as periodic GC, otherwise use a STW "    \
           "Full GC.")                                                       \
                                                                             \
-  manageable(uintx, G1PeriodicGCSystemLoadThreshold, 0,                     \
-          "Maximum recent system wide system load as returned by the 1m "   \
-          "value of getloadavg() at which G1 triggers a periodic GC. A "    \
-          "load above this value cancels a given periodic GC. A value of "  \
-          "zero disables this check.")                                      \
+  manageable(double, G1PeriodicGCSystemLoadThreshold, 0.0,                  \
+          "Maximum recent system wide load as returned by the 1m value "    \
+          "of getloadavg() at which G1 triggers a periodic GC. A load "     \
+          "above this value cancels a given periodic GC. A value of zero "  \
+          "disables this check.")                                           \
+          range(0.0, (double)max_uintx)                                     \
+                                                                            \
+  experimental(uintx, G1YoungExpansionBufferPercent, 10,                    \
+               "When heterogenous heap is enabled by AllocateOldGenAt "     \
+               "option, after every GC, young gen is re-sized which "       \
+               "involves system calls to commit/uncommit memory. To "       \
+               "reduce these calls, we keep a buffer of extra regions to "  \
+               "absorb small changes in young gen length. This flag takes " \
+               "the buffer size as an percentage of young gen length")      \
+               range(0, 100)                                                \
+
 
 #endif // SHARE_VM_GC_G1_G1_GLOBALS_HPP
--- a/src/hotspot/share/gc/g1/heapRegionManager.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/g1/heapRegionManager.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -28,6 +28,8 @@
 #include "gc/g1/heapRegion.hpp"
 #include "gc/g1/heapRegionManager.inline.hpp"
 #include "gc/g1/heapRegionSet.inline.hpp"
+#include "gc/g1/heterogeneousHeapRegionManager.hpp"
+#include "gc/shared/collectorPolicy.hpp"
 #include "memory/allocation.hpp"
 #include "utilities/bitMap.inline.hpp"
 
@@ -54,18 +56,25 @@
 };
 
 HeapRegionManager::HeapRegionManager() :
+  _bot_mapper(NULL),
+  _cardtable_mapper(NULL),
+  _card_counts_mapper(NULL),
+  _available_map(mtGC),
+  _num_committed(0),
+  _allocated_heapregions_length(0),
   _regions(), _heap_mapper(NULL),
   _prev_bitmap_mapper(NULL),
   _next_bitmap_mapper(NULL),
-  _bot_mapper(NULL),
-  _cardtable_mapper(NULL),
-  _card_counts_mapper(NULL),
-  _free_list("Free list", new MasterFreeRegionListChecker()),
-  _available_map(mtGC),
-  _num_committed(0),
-  _allocated_heapregions_length(0)
+  _free_list("Free list", new MasterFreeRegionListChecker())
 { }
 
+HeapRegionManager* HeapRegionManager::create_manager(G1CollectedHeap* heap, G1CollectorPolicy* policy) {
+  if (policy->is_hetero_heap()) {
+    return new HeterogeneousHeapRegionManager((uint)(policy->max_heap_byte_size() / HeapRegion::GrainBytes) /*heap size as num of regions*/);
+  }
+  return new HeapRegionManager();
+}
+
 void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
                                G1RegionToSpaceMapper* prev_bitmap,
                                G1RegionToSpaceMapper* next_bitmap,
@@ -514,7 +523,7 @@
 #endif // PRODUCT
 
 HeapRegionClaimer::HeapRegionClaimer(uint n_workers) :
-    _n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm._allocated_heapregions_length), _claims(NULL) {
+    _n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm->_allocated_heapregions_length), _claims(NULL) {
   assert(n_workers > 0, "Need at least one worker.");
   uint* new_claims = NEW_C_HEAP_ARRAY(uint, _n_regions, mtGC);
   memset(new_claims, Unclaimed, sizeof(*_claims) * _n_regions);
--- a/src/hotspot/share/gc/g1/heapRegionManager.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/g1/heapRegionManager.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -26,8 +26,10 @@
 #define SHARE_VM_GC_G1_HEAPREGIONMANAGER_HPP
 
 #include "gc/g1/g1BiasedArray.hpp"
+#include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1RegionToSpaceMapper.hpp"
 #include "gc/g1/heapRegionSet.hpp"
+#include "gc/shared/collectorPolicy.hpp"
 #include "services/memoryUsage.hpp"
 
 class HeapRegion;
@@ -71,17 +73,10 @@
   friend class VMStructs;
   friend class HeapRegionClaimer;
 
-  G1HeapRegionTable _regions;
-
-  G1RegionToSpaceMapper* _heap_mapper;
-  G1RegionToSpaceMapper* _prev_bitmap_mapper;
-  G1RegionToSpaceMapper* _next_bitmap_mapper;
   G1RegionToSpaceMapper* _bot_mapper;
   G1RegionToSpaceMapper* _cardtable_mapper;
   G1RegionToSpaceMapper* _card_counts_mapper;
 
-  FreeRegionList _free_list;
-
   // Each bit in this bitmap indicates that the corresponding region is available
   // for allocation.
   CHeapBitMap _available_map;
@@ -95,11 +90,8 @@
   HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); }
   HeapWord* heap_end() const {return _regions.end_address_mapped(); }
 
-  void make_regions_available(uint index, uint num_regions = 1, WorkGang* pretouch_gang = NULL);
-
   // Pass down commit calls to the VirtualSpace.
   void commit_regions(uint index, size_t num_regions = 1, WorkGang* pretouch_gang = NULL);
-  void uncommit_regions(uint index, size_t num_regions = 1);
 
   // Notify other data structures about change in the heap layout.
   void update_committed_space(HeapWord* old_end, HeapWord* new_end);
@@ -117,6 +109,16 @@
   // the heap. Returns the length of the sequence found. If this value is zero, no
   // sequence could be found, otherwise res_idx contains the start index of this range.
   uint find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const;
+
+protected:
+  G1HeapRegionTable _regions;
+  G1RegionToSpaceMapper* _heap_mapper;
+  G1RegionToSpaceMapper* _prev_bitmap_mapper;
+  G1RegionToSpaceMapper* _next_bitmap_mapper;
+  FreeRegionList _free_list;
+
+  void make_regions_available(uint index, uint num_regions = 1, WorkGang* pretouch_gang = NULL);
+  void uncommit_regions(uint index, size_t num_regions = 1);
   // Allocate a new HeapRegion for the given index.
   HeapRegion* new_heap_region(uint hrm_index);
 #ifdef ASSERT
@@ -127,18 +129,25 @@
   // Empty constructor, we'll initialize it with the initialize() method.
   HeapRegionManager();
 
-  void initialize(G1RegionToSpaceMapper* heap_storage,
-                  G1RegionToSpaceMapper* prev_bitmap,
-                  G1RegionToSpaceMapper* next_bitmap,
-                  G1RegionToSpaceMapper* bot,
-                  G1RegionToSpaceMapper* cardtable,
-                  G1RegionToSpaceMapper* card_counts);
+  static HeapRegionManager* create_manager(G1CollectedHeap* heap, G1CollectorPolicy* policy);
+
+  virtual void initialize(G1RegionToSpaceMapper* heap_storage,
+                          G1RegionToSpaceMapper* prev_bitmap,
+                          G1RegionToSpaceMapper* next_bitmap,
+                          G1RegionToSpaceMapper* bot,
+                          G1RegionToSpaceMapper* cardtable,
+                          G1RegionToSpaceMapper* card_counts);
+
+  // Prepare heap regions before and after full collection.
+  // Nothing to be done in this class.
+  virtual void prepare_for_full_collection_start() {}
+  virtual void prepare_for_full_collection_end() {}
 
   // Return the "dummy" region used for G1AllocRegion. This is currently a hardwired
   // new HeapRegion that owns HeapRegion at index 0. Since at the moment we commit
   // the heap from the lowest address, this region (and its associated data
   // structures) are available and we do not need to check further.
-  HeapRegion* get_dummy_region() { return new_heap_region(0); }
+  virtual HeapRegion* get_dummy_region() { return new_heap_region(0); }
 
   // Return the HeapRegion at the given index. Assume that the index
   // is valid.
@@ -167,8 +176,8 @@
     _free_list.add_ordered(list);
   }
 
-  HeapRegion* allocate_free_region(bool is_old) {
-    HeapRegion* hr = _free_list.remove_region(is_old);
+  virtual HeapRegion* allocate_free_region(HeapRegionType type) {
+    HeapRegion* hr = _free_list.remove_region(!type.is_young());
 
     if (hr != NULL) {
       assert(hr->next() == NULL, "Single region should not have next");
@@ -202,6 +211,9 @@
   // Return the maximum number of regions in the heap.
   uint max_length() const { return (uint)_regions.length(); }
 
+  // Return maximum number of regions that heap can expand to.
+  virtual uint max_expandable_length() const { return (uint)_regions.length(); }
+
   MemoryUsage get_auxiliary_data_memory_usage() const;
 
   MemRegion reserved() const { return MemRegion(heap_bottom(), heap_end()); }
@@ -210,26 +222,26 @@
   // HeapRegions, or re-use existing ones. Returns the number of regions the
   // sequence was expanded by. If a HeapRegion allocation fails, the resulting
   // number of regions might be smaller than what's desired.
-  uint expand_by(uint num_regions, WorkGang* pretouch_workers);
+  virtual uint expand_by(uint num_regions, WorkGang* pretouch_workers);
 
   // Makes sure that the regions from start to start+num_regions-1 are available
   // for allocation. Returns the number of regions that were committed to achieve
   // this.
-  uint expand_at(uint start, uint num_regions, WorkGang* pretouch_workers);
+  virtual uint expand_at(uint start, uint num_regions, WorkGang* pretouch_workers);
 
   // Find a contiguous set of empty regions of length num. Returns the start index of
   // that set, or G1_NO_HRM_INDEX.
-  uint find_contiguous_only_empty(size_t num) { return find_contiguous(num, true); }
+  virtual uint find_contiguous_only_empty(size_t num) { return find_contiguous(num, true); }
   // Find a contiguous set of empty or unavailable regions of length num. Returns the
   // start index of that set, or G1_NO_HRM_INDEX.
-  uint find_contiguous_empty_or_unavailable(size_t num) { return find_contiguous(num, false); }
+  virtual uint find_contiguous_empty_or_unavailable(size_t num) { return find_contiguous(num, false); }
 
   HeapRegion* next_region_in_heap(const HeapRegion* r) const;
 
   // Find the highest free or uncommitted region in the reserved heap,
   // and if uncommitted, commit it. If none are available, return G1_NO_HRM_INDEX.
   // Set the 'expanded' boolean true if a new region was committed.
-  uint find_highest_free(bool* expanded);
+  virtual uint find_highest_free(bool* expanded);
 
   // Allocate the regions that contain the address range specified, committing the
   // regions if necessary. Return false if any of the regions is already committed
@@ -244,13 +256,13 @@
 
   // Uncommit up to num_regions_to_remove regions that are completely free.
   // Return the actual number of uncommitted regions.
-  uint shrink_by(uint num_regions_to_remove);
+  virtual uint shrink_by(uint num_regions_to_remove);
 
   // Uncommit a number of regions starting at the specified index, which must be available,
   // empty, and free.
   void shrink_at(uint index, size_t num_regions);
 
-  void verify();
+  virtual void verify();
 
   // Do some sanity checking.
   void verify_optional() PRODUCT_RETURN;
--- a/src/hotspot/share/gc/g1/heapRegionSet.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/g1/heapRegionSet.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -234,6 +234,21 @@
   verify_optional();
 }
 
+uint FreeRegionList::num_of_regions_in_range(uint start, uint end) const {
+  HeapRegion* cur = _head;
+  uint num = 0;
+  while (cur != NULL) {
+    uint index = cur->hrm_index();
+    if (index > end) {
+      break;
+    } else if (index >= start) {
+      num++;
+    }
+    cur = cur->next();
+  }
+  return num;
+}
+
 void FreeRegionList::verify() {
   // See comment in HeapRegionSetBase::verify() about MT safety and
   // verification.
--- a/src/hotspot/share/gc/g1/heapRegionSet.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/g1/heapRegionSet.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -194,6 +194,8 @@
   void remove_starting_at(HeapRegion* first, uint num_regions);
 
   virtual void verify();
+
+  uint num_of_regions_in_range(uint start, uint end) const;
 };
 
 // Iterator class that provides a convenient way to iterate over the
--- a/src/hotspot/share/gc/g1/heapRegionType.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/g1/heapRegionType.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,11 @@
 #include "gc/g1/g1HeapRegionTraceType.hpp"
 #include "gc/g1/heapRegionType.hpp"
 
+const HeapRegionType HeapRegionType::Eden      = HeapRegionType(EdenTag);
+const HeapRegionType HeapRegionType::Survivor  = HeapRegionType(SurvTag);
+const HeapRegionType HeapRegionType::Old       = HeapRegionType(OldTag);
+const HeapRegionType HeapRegionType::Humongous = HeapRegionType(StartsHumongousTag);
+
 bool HeapRegionType::is_valid(Tag tag) {
   switch (tag) {
     case FreeTag:
--- a/src/hotspot/share/gc/g1/heapRegionType.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/g1/heapRegionType.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -117,6 +117,9 @@
     _tag = tag;
   }
 
+  // Private constructor used static constants
+  HeapRegionType(Tag t) : _tag(t) { hrt_assert_is_valid(_tag); }
+
 public:
   // Queries
 
@@ -186,6 +189,11 @@
   G1HeapRegionTraceType::Type get_trace_type();
 
   HeapRegionType() : _tag(FreeTag) { hrt_assert_is_valid(_tag); }
+
+  static const HeapRegionType Eden;
+  static const HeapRegionType Survivor;
+  static const HeapRegionType Old;
+  static const HeapRegionType Humongous;
 };
 
 #endif // SHARE_VM_GC_G1_HEAPREGIONTYPE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/heterogeneousHeapRegionManager.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -0,0 +1,523 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1ConcurrentRefine.hpp"
+#include "gc/g1/heapRegion.hpp"
+#include "gc/g1/heapRegionManager.inline.hpp"
+#include "gc/g1/heapRegionSet.inline.hpp"
+#include "gc/g1/heterogeneousHeapRegionManager.hpp"
+#include "memory/allocation.hpp"
+
+
+HeterogeneousHeapRegionManager* HeterogeneousHeapRegionManager::manager() {
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  assert(g1h != NULL, "Uninitialized access to HeterogeneousHeapRegionManager::manager()");
+
+  HeapRegionManager* hrm = g1h->hrm();
+  assert(hrm != NULL, "Uninitialized access to HeterogeneousHeapRegionManager::manager()");
+  return (HeterogeneousHeapRegionManager*)hrm;
+}
+
+void HeterogeneousHeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
+                                                G1RegionToSpaceMapper* prev_bitmap,
+                                                G1RegionToSpaceMapper* next_bitmap,
+                                                G1RegionToSpaceMapper* bot,
+                                                G1RegionToSpaceMapper* cardtable,
+                                                G1RegionToSpaceMapper* card_counts) {
+  HeapRegionManager::initialize(heap_storage, prev_bitmap, next_bitmap, bot, cardtable, card_counts);
+
+  // We commit bitmap for all regions during initialization and mark the bitmap space as special.
+  // This allows regions to be un-committed while concurrent-marking threads are accessing the bitmap concurrently.
+  _prev_bitmap_mapper->commit_and_set_special();
+  _next_bitmap_mapper->commit_and_set_special();
+}
+
+// expand_by() is called to grow the heap. We grow into nvdimm now.
+// Dram regions are committed later as needed during mutator region allocation or
+// when young list target length is determined after gc cycle.
+uint HeterogeneousHeapRegionManager::expand_by(uint num_regions, WorkGang* pretouch_workers) {
+  uint num_regions_possible = total_regions_committed() >= max_expandable_length() ? 0 : max_expandable_length() - total_regions_committed();
+  uint num_expanded = expand_nvdimm(MIN2(num_regions, num_regions_possible), pretouch_workers);
+  return num_expanded;
+}
+
+// Expands heap starting from 'start' index. The question is should we expand from one memory (e.g. nvdimm) to another (e.g. dram).
+// Looking at the code, expand_at() is called for humongous allocation where 'start' is in nv-dimm.
+// So we only allocate regions in the same kind of memory as 'start'.
+uint HeterogeneousHeapRegionManager::expand_at(uint start, uint num_regions, WorkGang* pretouch_workers) {
+  if (num_regions == 0) {
+    return 0;
+  }
+  uint target_num_regions = MIN2(num_regions, max_expandable_length() - total_regions_committed());
+  uint end = is_in_nvdimm(start) ? end_index_of_nvdimm() : end_index_of_dram();
+
+  uint num_expanded = expand_in_range(start, end, target_num_regions, pretouch_workers);
+  assert(total_regions_committed() <= max_expandable_length(), "must be");
+  return num_expanded;
+}
+
+// This function ensures that there are 'expected_num_regions' committed regions in dram.
+// If new regions are committed, it un-commits that many regions from nv-dimm.
+// If there are already more regions committed in dram, extra regions are un-committed.
+void HeterogeneousHeapRegionManager::adjust_dram_regions(uint expected_num_regions, WorkGang* pretouch_workers) {
+
+  // Release back the extra regions allocated in evacuation failure scenario.
+  if(_no_borrowed_regions > 0) {
+    _no_borrowed_regions -= shrink_dram(_no_borrowed_regions);
+    _no_borrowed_regions -= shrink_nvdimm(_no_borrowed_regions);
+  }
+
+  if(expected_num_regions > free_list_dram_length()) {
+    // If we are going to expand DRAM, we expand a little more so that we can absorb small variations in Young gen sizing.
+    uint targeted_dram_regions = expected_num_regions * (1 + (double)G1YoungExpansionBufferPercent / 100);
+    uint to_be_made_available = targeted_dram_regions - free_list_dram_length();
+
+#ifdef ASSERT
+    uint total_committed_before = total_regions_committed();
+#endif
+    uint can_be_made_available = shrink_nvdimm(to_be_made_available);
+    uint ret = expand_dram(can_be_made_available, pretouch_workers);
+#ifdef ASSERT
+    assert(ret == can_be_made_available, "should be equal");
+    assert(total_committed_before == total_regions_committed(), "invariant not met");
+#endif
+  } else {
+    uint to_be_released = free_list_dram_length() - expected_num_regions;
+    // if number of extra DRAM regions is small, do not shrink.
+    if (to_be_released < expected_num_regions * G1YoungExpansionBufferPercent / 100) {
+      return;
+    }
+
+#ifdef ASSERT
+    uint total_committed_before = total_regions_committed();
+#endif
+    uint ret = shrink_dram(to_be_released);
+    assert(ret == to_be_released, "Should be able to shrink by given amount");
+    ret = expand_nvdimm(to_be_released, pretouch_workers);
+#ifdef ASSERT
+    assert(ret == to_be_released, "Should be able to expand by given amount");
+    assert(total_committed_before == total_regions_committed(), "invariant not met");
+#endif
+  }
+}
+
+uint HeterogeneousHeapRegionManager::total_regions_committed() const {
+  return num_committed_dram() + num_committed_nvdimm();
+}
+
+uint HeterogeneousHeapRegionManager::num_committed_dram() const {
+  // This class does not keep count of committed regions in dram and nv-dimm.
+  // G1RegionToHeteroSpaceMapper keeps this information.
+  return static_cast<G1RegionToHeteroSpaceMapper*>(_heap_mapper)->num_committed_dram();
+}
+
+uint HeterogeneousHeapRegionManager::num_committed_nvdimm() const {
+  // See comment for num_committed_dram()
+  return static_cast<G1RegionToHeteroSpaceMapper*>(_heap_mapper)->num_committed_nvdimm();
+}
+
+// Return maximum number of regions that heap can expand to.
+uint HeterogeneousHeapRegionManager::max_expandable_length() const {
+  return _max_regions;
+}
+
+uint HeterogeneousHeapRegionManager::find_unavailable_in_range(uint start_idx, uint end_idx, uint* res_idx) const {
+  guarantee(res_idx != NULL, "checking");
+  guarantee(start_idx <= (max_length() + 1), "checking");
+
+  uint num_regions = 0;
+
+  uint cur = start_idx;
+  while (cur <= end_idx && is_available(cur)) {
+    cur++;
+  }
+  if (cur == end_idx + 1) {
+    return num_regions;
+  }
+  *res_idx = cur;
+  while (cur <= end_idx && !is_available(cur)) {
+    cur++;
+  }
+  num_regions = cur - *res_idx;
+
+#ifdef ASSERT
+  for (uint i = *res_idx; i < (*res_idx + num_regions); i++) {
+    assert(!is_available(i), "just checking");
+  }
+  assert(cur == end_idx + 1 || num_regions == 0 || is_available(cur),
+    "The region at the current position %u must be available or at the end", cur);
+#endif
+  return num_regions;
+}
+
+uint HeterogeneousHeapRegionManager::expand_dram(uint num_regions, WorkGang* pretouch_workers) {
+  return expand_in_range(start_index_of_dram(), end_index_of_dram(), num_regions, pretouch_workers);
+}
+
+uint HeterogeneousHeapRegionManager::expand_nvdimm(uint num_regions, WorkGang* pretouch_workers) {
+  return expand_in_range(start_index_of_nvdimm(), end_index_of_nvdimm(), num_regions, pretouch_workers);
+}
+
+// Follows same logic as expand_at() form HeapRegionManager.
+uint HeterogeneousHeapRegionManager::expand_in_range(uint start, uint end, uint num_regions, WorkGang* pretouch_gang) {
+
+  uint so_far = 0;
+  uint chunk_start = 0;
+  uint num_last_found = 0;
+  while (so_far < num_regions &&
+         (num_last_found = find_unavailable_in_range(start, end, &chunk_start)) > 0) {
+    uint to_commit = MIN2(num_regions - so_far, num_last_found);
+    make_regions_available(chunk_start, to_commit, pretouch_gang);
+    so_far += to_commit;
+    start = chunk_start + to_commit + 1;
+  }
+
+  return so_far;
+}
+
+// Shrink in the range of indexes which are reserved for dram.
+uint HeterogeneousHeapRegionManager::shrink_dram(uint num_regions, bool update_free_list) {
+  return shrink_in_range(start_index_of_dram(), end_index_of_dram(), num_regions, update_free_list);
+}
+
+// Shrink in the range of indexes which are reserved for nv-dimm.
+uint HeterogeneousHeapRegionManager::shrink_nvdimm(uint num_regions, bool update_free_list) {
+  return shrink_in_range(start_index_of_nvdimm(), end_index_of_nvdimm(), num_regions, update_free_list);
+}
+
+// Find empty regions in given range, un-commit them and return the count.
+uint HeterogeneousHeapRegionManager::shrink_in_range(uint start, uint end, uint num_regions, bool update_free_list) {
+
+  if (num_regions == 0) {
+    return 0;
+  }
+  uint so_far = 0;
+  uint idx_last_found = 0;
+  uint num_last_found;
+  while (so_far < num_regions &&
+         (num_last_found = find_empty_in_range_reverse(start, end, &idx_last_found)) > 0) {
+    uint to_uncommit = MIN2(num_regions - so_far, num_last_found);
+    if(update_free_list) {
+      _free_list.remove_starting_at(at(idx_last_found + num_last_found - to_uncommit), to_uncommit);
+    }
+    uncommit_regions(idx_last_found + num_last_found - to_uncommit, to_uncommit);
+    so_far += to_uncommit;
+    end = idx_last_found;
+  }
+  return so_far;
+}
+
+uint HeterogeneousHeapRegionManager::find_empty_in_range_reverse(uint start_idx, uint end_idx, uint* res_idx) {
+  guarantee(res_idx != NULL, "checking");
+  guarantee(start_idx < max_length(), "checking");
+  guarantee(end_idx < max_length(), "checking");
+  if(start_idx > end_idx) {
+    return 0;
+  }
+
+  uint num_regions_found = 0;
+
+  jlong cur = end_idx;
+  while (cur >= start_idx && !(is_available(cur) && at(cur)->is_empty())) {
+    cur--;
+  }
+  if (cur == start_idx - 1) {
+    return num_regions_found;
+  }
+  jlong old_cur = cur;
+  // cur indexes the first empty region
+  while (cur >= start_idx && is_available(cur) && at(cur)->is_empty()) {
+    cur--;
+  }
+  *res_idx = cur + 1;
+  num_regions_found = old_cur - cur;
+
+#ifdef ASSERT
+  for (uint i = *res_idx; i < (*res_idx + num_regions_found); i++) {
+    assert(at(i)->is_empty(), "just checking");
+  }
+#endif
+  return num_regions_found;
+}
+
+HeapRegion* HeterogeneousHeapRegionManager::allocate_free_region(HeapRegionType type) {
+
+  // We want to prevent mutators from proceeding when we have borrowed regions from the last collection. This
+  // will force a full collection to remedy the situation.
+  // Free region requests from GC threads can proceed.
+  if(type.is_eden() || type.is_humongous()) {
+    if(has_borrowed_regions()) {
+      return NULL;
+    }
+  }
+
+  // old and humongous regions are allocated from nv-dimm; eden and survivor regions are allocated from dram
+  // assumption: dram regions take higher indexes
+  bool from_nvdimm = (type.is_old() || type.is_humongous()) ? true : false;
+  bool from_head = from_nvdimm;
+  HeapRegion* hr = _free_list.remove_region(from_head);
+
+  if (hr != NULL && ( (from_nvdimm && !is_in_nvdimm(hr->hrm_index())) || (!from_nvdimm && !is_in_dram(hr->hrm_index())) ) ) {
+    _free_list.add_ordered(hr);
+    hr = NULL;
+  }
+
+#ifdef ASSERT
+  uint total_committed_before = total_regions_committed();
+#endif
+
+  if (hr == NULL) {
+    if (!from_nvdimm) {
+      uint ret = shrink_nvdimm(1);
+      if (ret == 1) {
+        ret = expand_dram(1, NULL);
+        assert(ret == 1, "We should be able to commit one region");
+        hr = _free_list.remove_region(from_head);
+      }
+    }
+    else { /*is_old*/
+      uint ret = shrink_dram(1);
+      if (ret == 1) {
+        ret = expand_nvdimm(1, NULL);
+        assert(ret == 1, "We should be able to commit one region");
+        hr = _free_list.remove_region(from_head);
+      }
+    }
+  }
+#ifdef ASSERT
+  assert(total_committed_before == total_regions_committed(), "invariant not met");
+#endif
+
+  // When an old region is requested (which happens during collection pause) and we can't find any empty region
+  // in the set of available regions (which is an evacuation failure scenario), we borrow (or pre-allocate) an unavailable region
+  // from nv-dimm. This region is used to evacuate surviving objects from eden, survivor or old.
+  if(hr == NULL && type.is_old()) {
+    hr = borrow_old_region_for_gc();
+  }
+
+  if (hr != NULL) {
+    assert(hr->next() == NULL, "Single region should not have next");
+    assert(is_available(hr->hrm_index()), "Must be committed");
+  }
+  return hr;
+}
+
+uint HeterogeneousHeapRegionManager::find_contiguous_only_empty(size_t num) {
+  if (has_borrowed_regions()) {
+      return G1_NO_HRM_INDEX;
+  }
+  return find_contiguous(start_index_of_nvdimm(), end_index_of_nvdimm(), num, true);
+}
+
+uint HeterogeneousHeapRegionManager::find_contiguous_empty_or_unavailable(size_t num) {
+  if (has_borrowed_regions()) {
+    return G1_NO_HRM_INDEX;
+  }
+  return find_contiguous(start_index_of_nvdimm(), end_index_of_nvdimm(), num, false);
+}
+
+uint HeterogeneousHeapRegionManager::find_contiguous(size_t start, size_t end, size_t num, bool empty_only) {
+  uint found = 0;
+  size_t length_found = 0;
+  uint cur = (uint)start;
+  uint length_unavailable = 0;
+
+  while (length_found < num && cur <= end) {
+    HeapRegion* hr = _regions.get_by_index(cur);
+    if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) {
+      // This region is a potential candidate for allocation into.
+      if (!is_available(cur)) {
+        if(shrink_dram(1) == 1) {
+          uint ret = expand_in_range(cur, cur, 1, NULL);
+          assert(ret == 1, "We should be able to expand at this index");
+        } else {
+          length_unavailable++;
+        }
+      }
+      length_found++;
+    }
+    else {
+      // This region is not a candidate. The next region is the next possible one.
+      found = cur + 1;
+      length_found = 0;
+    }
+    cur++;
+  }
+
+  if (length_found == num) {
+    for (uint i = found; i < (found + num); i++) {
+      HeapRegion* hr = _regions.get_by_index(i);
+      // sanity check
+      guarantee((!empty_only && !is_available(i)) || (is_available(i) && hr != NULL && hr->is_empty()),
+                "Found region sequence starting at " UINT32_FORMAT ", length " SIZE_FORMAT
+                " that is not empty at " UINT32_FORMAT ". Hr is " PTR_FORMAT, found, num, i, p2i(hr));
+    }
+    if (!empty_only && length_unavailable > (max_expandable_length() - total_regions_committed())) {
+      // if 'length_unavailable' number of regions will be made available, we will exceed max regions.
+      return G1_NO_HRM_INDEX;
+    }
+    return found;
+  }
+  else {
+    return G1_NO_HRM_INDEX;
+  }
+}
+
+uint HeterogeneousHeapRegionManager::find_highest_free(bool* expanded) {
+  // Loop downwards from the highest dram region index, looking for an
+  // entry which is either free or not yet committed.  If not yet
+  // committed, expand_at that index.
+  uint curr = end_index_of_dram();
+  while (true) {
+    HeapRegion *hr = _regions.get_by_index(curr);
+    if (hr == NULL && !(total_regions_committed() < _max_regions)) {
+      uint res = shrink_nvdimm(1);
+      if (res == 1) {
+        res = expand_in_range(curr, curr, 1, NULL);
+        assert(res == 1, "We should be able to expand since shrink was successful");
+        *expanded = true;
+        return curr;
+      }
+    }
+    else {
+      if (hr->is_free()) {
+        *expanded = false;
+        return curr;
+      }
+    }
+    if (curr == start_index_of_dram()) {
+      return G1_NO_HRM_INDEX;
+    }
+    curr--;
+  }
+}
+
+// We need to override this since region 0 which serves are dummy region in base class may not be available here.
+// This is a corner condition when either number of regions is small. When adaptive sizing is used, initial heap size
+// could be just one region.  This region is commited in dram to be used for young generation, leaving region 0 (which is in nvdimm)
+// unavailable.
+HeapRegion* HeterogeneousHeapRegionManager::get_dummy_region() {
+  uint curr = 0;
+
+  while (curr < _regions.length()) {
+    if (is_available(curr)) {
+      return new_heap_region(curr);
+    }
+    curr++;
+  }
+  assert(false, "We should always find a region available for dummy region");
+  return NULL;
+}
+
+// First shrink in dram, then in nv-dimm.
+uint HeterogeneousHeapRegionManager::shrink_by(uint num_regions) {
+  // This call is made at end of full collection. Before making this call the region sets are tore down (tear_down_region_sets()).
+  // So shrink() calls below do not need to remove uncomitted regions from free list.
+  uint ret = shrink_dram(num_regions, false /* update_free_list */);
+  ret += shrink_nvdimm(num_regions - ret, false /* update_free_list */);
+  return ret;
+}
+
+void HeterogeneousHeapRegionManager::verify() {
+  HeapRegionManager::verify();
+}
+
+uint HeterogeneousHeapRegionManager::free_list_dram_length() const {
+  return _free_list.num_of_regions_in_range(start_index_of_dram(), end_index_of_dram());
+}
+
+uint HeterogeneousHeapRegionManager::free_list_nvdimm_length() const {
+  return _free_list.num_of_regions_in_range(start_index_of_nvdimm(), end_index_of_nvdimm());
+}
+
+bool HeterogeneousHeapRegionManager::is_in_nvdimm(uint index) const {
+  return index >= start_index_of_nvdimm() && index <= end_index_of_nvdimm();
+}
+
+bool HeterogeneousHeapRegionManager::is_in_dram(uint index) const {
+  return index >= start_index_of_dram() && index <= end_index_of_dram();
+}
+
+// We have to make sure full collection copies all surviving objects to NV-DIMM.
+// We might not have enough regions in nvdimm_set, so we need to make more regions on NV-DIMM available for full collection.
+// Note: by doing this we are breaking the in-variant that total number of committed regions is equal to current heap size.
+// After full collection ends, we will re-establish this in-variant by freeing DRAM regions.
+void HeterogeneousHeapRegionManager::prepare_for_full_collection_start() {
+  _total_commited_before_full_gc = total_regions_committed() - _no_borrowed_regions;
+  _no_borrowed_regions = 0;
+  expand_nvdimm(num_committed_dram(), NULL);
+  remove_all_free_regions();
+}
+
+// We need to bring back the total committed regions to before full collection start.
+// Unless we are close to OOM, all regular (not pinned) regions in DRAM should be free.
+// We shrink all free regions in DRAM and if needed from NV-DIMM (when there are pinned DRAM regions)
+// If we can't bring back committed regions count to _total_commited_before_full_gc, we keep the extra count in _no_borrowed_regions.
+// When this GC finishes, new regions won't be allocated since has_borrowed_regions() is true. VM will be forced to re-try GC
+// with clear soft references followed by OOM error in worst case.
+void HeterogeneousHeapRegionManager::prepare_for_full_collection_end() {
+  uint shrink_size = total_regions_committed() - _total_commited_before_full_gc;
+  uint so_far = 0;
+  uint idx_last_found = 0;
+  uint num_last_found;
+  uint end = (uint)_regions.length() - 1;
+  while (so_far < shrink_size &&
+         (num_last_found = find_empty_in_range_reverse(0, end, &idx_last_found)) > 0) {
+    uint to_uncommit = MIN2(shrink_size - so_far, num_last_found);
+    uncommit_regions(idx_last_found + num_last_found - to_uncommit, to_uncommit);
+    so_far += to_uncommit;
+    end = idx_last_found;
+  }
+  // See comment above the function.
+  _no_borrowed_regions = shrink_size - so_far;
+}
+
+uint HeterogeneousHeapRegionManager::start_index_of_dram() const { return _max_regions;}
+
+uint HeterogeneousHeapRegionManager::end_index_of_dram() const { return 2*_max_regions - 1; }
+
+uint HeterogeneousHeapRegionManager::start_index_of_nvdimm() const { return 0; }
+
+uint HeterogeneousHeapRegionManager::end_index_of_nvdimm() const { return _max_regions - 1; }
+
+// This function is called when there are no free nv-dimm regions.
+// It borrows a region from the set of unavailable regions in nv-dimm for GC purpose.
+HeapRegion* HeterogeneousHeapRegionManager::borrow_old_region_for_gc() {
+  assert(free_list_nvdimm_length() == 0, "this function should be called only when there are no nv-dimm regions in free list");
+
+  uint ret = expand_nvdimm(1, NULL);
+  if(ret != 1) {
+    return NULL;
+  }
+  HeapRegion* hr = _free_list.remove_region(true /*from_head*/);
+  assert(is_in_nvdimm(hr->hrm_index()), "allocated region should be in nv-dimm");
+  _no_borrowed_regions++;
+  return hr;
+}
+
+bool HeterogeneousHeapRegionManager::has_borrowed_regions() const {
+  return _no_borrowed_regions > 0;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/heterogeneousHeapRegionManager.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_HETEROGENEOUSHEAPREGIONMANAGER_HPP
+#define SHARE_VM_GC_G1_HETEROGENEOUSHEAPREGIONMANAGER_HPP
+
+#include "gc/g1/heapRegionManager.hpp"
+
+// This class manages heap regions on heterogenous memory comprising of dram and nv-dimm.
+// Regions in dram (dram_set) are used for young objects and archive regions (CDS).
+// Regions in nv-dimm (nvdimm_set) are used for old objects and humongous objects.
+// At any point there are some regions committed on dram and some on nv-dimm with the following guarantees:
+//   1. The total number of regions committed in dram and nv-dimm equals the current size of heap.
+//   2. Consequently, total number of regions committed is less than or equal to Xmx.
+//   3. To maintain the guarantee stated by 1., whenever one set grows (new regions committed), the other set shrinks (regions un-committed).
+//      3a. If more dram regions are needed (young generation expansion), corresponding number of regions in nv-dimm are un-committed.
+//      3b. When old generation or humongous set grows, and new regions need to be committed to nv-dimm, corresponding number of regions
+//            are un-committed in dram.
+class HeterogeneousHeapRegionManager : public HeapRegionManager {
+  const uint _max_regions;
+  uint _max_dram_regions;
+  uint _max_nvdimm_regions;
+  uint _start_index_of_nvdimm;
+  uint _total_commited_before_full_gc;
+  uint _no_borrowed_regions;
+
+  uint total_regions_committed() const;
+  uint num_committed_dram() const;
+  uint num_committed_nvdimm() const;
+
+  // Similar to find_unavailable_from_idx() function from base class, difference is this function searches in range [start, end].
+  uint find_unavailable_in_range(uint start_idx, uint end_idx, uint* res_idx) const;
+
+  // Expand into dram. Maintains the invariant that total number of committed regions is less than current heap size.
+  uint expand_dram(uint num_regions, WorkGang* pretouch_workers);
+
+  // Expand into nv-dimm.
+  uint expand_nvdimm(uint num_regions, WorkGang* pretouch_workers);
+
+  // Expand by finding unavailable regions in [start, end] range.
+  uint expand_in_range(uint start, uint end, uint num_regions, WorkGang* pretouch_workers);
+
+  // Shrink dram set of regions.
+  uint shrink_dram(uint num_regions, bool update_free_list = true);
+
+  // Shrink nv-dimm set of regions.
+  uint shrink_nvdimm(uint num_regions, bool update_free_list = true);
+
+  // Shrink regions from [start, end] range.
+  uint shrink_in_range(uint start, uint end, uint num_regions, bool update_free_list = true);
+
+  // Similar to find_empty_from_idx_reverse() in base class. Only here it searches in a range.
+  uint find_empty_in_range_reverse(uint start_idx, uint end_idx, uint* res_idx);
+
+  // Similar to find_contiguous() in base class, with [start, end] range
+  uint find_contiguous(size_t start, size_t end, size_t num, bool empty_only);
+
+  // This function is called when there are no free nv-dimm regions.
+  // It borrows a region from the set of unavailable regions in nv-dimm for GC purpose.
+  HeapRegion* borrow_old_region_for_gc();
+
+  uint free_list_dram_length() const;
+  uint free_list_nvdimm_length() const;
+
+  // is region with given index in nv-dimm?
+  bool is_in_nvdimm(uint index) const;
+  bool is_in_dram(uint index) const;
+
+public:
+
+  // Empty constructor, we'll initialize it with the initialize() method.
+  HeterogeneousHeapRegionManager(uint num_regions) : _max_regions(num_regions), _max_dram_regions(0),
+                                                     _max_nvdimm_regions(0), _start_index_of_nvdimm(0),
+                                                     _total_commited_before_full_gc(0), _no_borrowed_regions(0)
+  {}
+
+  static HeterogeneousHeapRegionManager* manager();
+
+  virtual void initialize(G1RegionToSpaceMapper* heap_storage,
+                          G1RegionToSpaceMapper* prev_bitmap,
+                          G1RegionToSpaceMapper* next_bitmap,
+                          G1RegionToSpaceMapper* bot,
+                          G1RegionToSpaceMapper* cardtable,
+                          G1RegionToSpaceMapper* card_counts);
+
+  uint start_index_of_nvdimm() const;
+  uint start_index_of_dram() const;
+  uint end_index_of_nvdimm() const;
+  uint end_index_of_dram() const;
+
+  // Override.
+  HeapRegion* get_dummy_region();
+
+  // Adjust dram_set to provision 'expected_num_regions' regions.
+  void adjust_dram_regions(uint expected_num_regions, WorkGang* pretouch_workers);
+
+  // Prepare heap regions before and after full collection.
+  void prepare_for_full_collection_start();
+  void prepare_for_full_collection_end();
+
+  virtual HeapRegion* allocate_free_region(HeapRegionType type);
+
+  // Return maximum number of regions that heap can expand to.
+  uint max_expandable_length() const;
+
+  // Override. Expand in nv-dimm.
+  uint expand_by(uint num_regions, WorkGang* pretouch_workers);
+
+  // Override.
+  uint expand_at(uint start, uint num_regions, WorkGang* pretouch_workers);
+
+  // Override. This function is called for humongous allocation, so we need to find empty regions in nv-dimm.
+  uint find_contiguous_only_empty(size_t num);
+
+  // Override. This function is called for humongous allocation, so we need to find empty or unavailable regions in nv-dimm.
+  uint find_contiguous_empty_or_unavailable(size_t num);
+
+  // Overrides base class implementation to find highest free region in dram.
+  uint find_highest_free(bool* expanded);
+
+  // Override. This fuction is called to shrink the heap, we shrink in dram first then in nv-dimm.
+  uint shrink_by(uint num_regions_to_remove);
+
+  bool has_borrowed_regions() const;
+
+  void verify();
+};
+
+#endif // SHARE_VM_GC_G1_HETEROGENEOUSHEAPREGIONMANAGER_HPP
--- a/src/hotspot/share/gc/g1/vmStructs_g1.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/g1/vmStructs_g1.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -53,7 +53,7 @@
   nonstatic_field(HeapRegionManager, _num_committed,    uint)                 \
                                                                               \
   nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t)               \
-  nonstatic_field(G1CollectedHeap, _hrm,                HeapRegionManager)    \
+  nonstatic_field(G1CollectedHeap, _hrm,                HeapRegionManager*)    \
   nonstatic_field(G1CollectedHeap, _g1mm,               G1MonitoringSupport*) \
   nonstatic_field(G1CollectedHeap, _old_set,            HeapRegionSetBase)    \
   nonstatic_field(G1CollectedHeap, _archive_set,        HeapRegionSetBase)    \
--- a/src/hotspot/share/gc/parallel/adjoiningGenerations.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/parallel/adjoiningGenerations.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "gc/parallel/adjoiningGenerations.hpp"
+#include "gc/parallel/adjoiningGenerationsForHeteroHeap.hpp"
 #include "gc/parallel/adjoiningVirtualSpaces.hpp"
 #include "gc/parallel/generationSizer.hpp"
 #include "gc/parallel/parallelScavengeHeap.hpp"
@@ -40,8 +41,8 @@
 AdjoiningGenerations::AdjoiningGenerations(ReservedSpace old_young_rs,
                                            GenerationSizer* policy,
                                            size_t alignment) :
-  _virtual_spaces(old_young_rs, policy->min_old_size(),
-                  policy->min_young_size(), alignment) {
+  _virtual_spaces(new AdjoiningVirtualSpaces(old_young_rs, policy->min_old_size(),
+                                             policy->min_young_size(), alignment)) {
   size_t init_low_byte_size = policy->initial_old_size();
   size_t min_low_byte_size = policy->min_old_size();
   size_t max_low_byte_size = policy->max_old_size();
@@ -61,21 +62,21 @@
     // generation.
 
     // Does the actual creation of the virtual spaces
-    _virtual_spaces.initialize(max_low_byte_size,
-                               init_low_byte_size,
-                               init_high_byte_size);
+    _virtual_spaces->initialize(max_low_byte_size,
+                                init_low_byte_size,
+                                init_high_byte_size);
 
     // Place the young gen at the high end.  Passes in the virtual space.
-    _young_gen = new ASPSYoungGen(_virtual_spaces.high(),
-                                  _virtual_spaces.high()->committed_size(),
+    _young_gen = new ASPSYoungGen(_virtual_spaces->high(),
+                                  _virtual_spaces->high()->committed_size(),
                                   min_high_byte_size,
-                                  _virtual_spaces.high_byte_size_limit());
+                                  _virtual_spaces->high_byte_size_limit());
 
     // Place the old gen at the low end. Passes in the virtual space.
-    _old_gen = new ASPSOldGen(_virtual_spaces.low(),
-                              _virtual_spaces.low()->committed_size(),
+    _old_gen = new ASPSOldGen(_virtual_spaces->low(),
+                              _virtual_spaces->low()->committed_size(),
                               min_low_byte_size,
-                              _virtual_spaces.low_byte_size_limit(),
+                              _virtual_spaces->low_byte_size_limit(),
                               "old", 1);
 
     young_gen()->initialize_work();
@@ -92,8 +93,9 @@
   } else {
 
     // Layout the reserved space for the generations.
+    // If OldGen is allocated on nv-dimm, we need to split the reservation (this is required for windows).
     ReservedSpace old_rs   =
-      virtual_spaces()->reserved_space().first_part(max_low_byte_size);
+      virtual_spaces()->reserved_space().first_part(max_low_byte_size, policy->is_hetero_heap() /* split */);
     ReservedSpace heap_rs  =
       virtual_spaces()->reserved_space().last_part(max_low_byte_size);
     ReservedSpace young_rs = heap_rs.first_part(max_high_byte_size);
@@ -117,6 +119,8 @@
   }
 }
 
+AdjoiningGenerations::AdjoiningGenerations() { }
+
 size_t AdjoiningGenerations::reserved_byte_size() {
   return virtual_spaces()->reserved_space().size();
 }
@@ -279,3 +283,13 @@
     }
   }
 }
+
+AdjoiningGenerations* AdjoiningGenerations::create_adjoining_generations(ReservedSpace old_young_rs,
+                                                                         GenerationSizer* policy,
+                                                                         size_t alignment) {
+  if (policy->is_hetero_heap() && UseAdaptiveGCBoundary) {
+    return new AdjoiningGenerationsForHeteroHeap(old_young_rs, policy, alignment);
+  } else {
+    return new AdjoiningGenerations(old_young_rs, policy, alignment);
+  }
+}
--- a/src/hotspot/share/gc/parallel/adjoiningGenerations.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/parallel/adjoiningGenerations.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -43,27 +43,29 @@
 class AdjoiningGenerations : public CHeapObj<mtGC> {
   friend class VMStructs;
  private:
-  // The young generation and old generation, respectively
-  PSYoungGen* _young_gen;
-  PSOldGen* _old_gen;
-
-  // The spaces used by the two generations.
-  AdjoiningVirtualSpaces _virtual_spaces;
-
   // Move boundary up to expand old gen.  Checks are made to
   // determine if the move can be done with specified limits.
   void request_old_gen_expansion(size_t desired_change_in_bytes);
   // Move boundary down to expand young gen.
   bool request_young_gen_expansion(size_t desired_change_in_bytes);
 
+ protected:
+   // The young generation and old generation, respectively
+   PSYoungGen* _young_gen;
+   PSOldGen* _old_gen;
+
+   // The spaces used by the two generations.
+   AdjoiningVirtualSpaces* _virtual_spaces;
+
  public:
   AdjoiningGenerations(ReservedSpace rs, GenerationSizer* policy, size_t alignment);
+  AdjoiningGenerations();
 
   // Accessors
   PSYoungGen* young_gen() { return _young_gen; }
   PSOldGen* old_gen() { return _old_gen; }
 
-  AdjoiningVirtualSpaces* virtual_spaces() { return &_virtual_spaces; }
+  AdjoiningVirtualSpaces* virtual_spaces() { return _virtual_spaces; }
 
   // Additional space is needed in the old generation.  Check
   // the available space and attempt to move the boundary if more space
@@ -74,7 +76,9 @@
 
   // Return the total byte size of the reserved space
   // for the adjoining generations.
-  size_t reserved_byte_size();
+  virtual size_t reserved_byte_size();
+
+  // Return new AdjoiningGenerations instance based on collector policy (specifically - whether heap is heterogeneous).
+  static AdjoiningGenerations* create_adjoining_generations(ReservedSpace rs, GenerationSizer* policy, size_t alignment);
 };
-
 #endif // SHARE_VM_GC_PARALLEL_ADJOININGGENERATIONS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/parallel/adjoiningGenerationsForHeteroHeap.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -0,0 +1,260 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/parallel/adjoiningGenerationsForHeteroHeap.hpp"
+#include "gc/parallel/adjoiningVirtualSpaces.hpp"
+#include "gc/parallel/generationSizer.hpp"
+#include "gc/parallel/parallelScavengeHeap.hpp"
+#include "gc/parallel/psFileBackedVirtualspace.hpp"
+#include "logging/log.hpp"
+#include "logging/logStream.hpp"
+#include "memory/resourceArea.hpp"
+#include "utilities/align.hpp"
+#include "utilities/ostream.hpp"
+
+// Create two virtual spaces (HeteroVirtualSpaces), low() on nv-dimm memory, high() on dram.
+// create ASPSOldGen and ASPSYoungGen the same way as in base class
+
+AdjoiningGenerationsForHeteroHeap::AdjoiningGenerationsForHeteroHeap(ReservedSpace old_young_rs, GenerationSizer* policy, size_t alignment) :
+  _total_size_limit(policy->max_heap_byte_size()) {
+  size_t init_old_byte_size = policy->initial_old_size();
+  size_t min_old_byte_size = policy->min_old_size();
+  size_t max_old_byte_size = policy->max_old_size();
+  size_t init_young_byte_size = policy->initial_young_size();
+  size_t min_young_byte_size = policy->min_young_size();
+  size_t max_young_byte_size = policy->max_young_size();
+  // create HeteroVirtualSpaces which is composed of non-overlapping virtual spaces.
+  HeteroVirtualSpaces* hetero_virtual_spaces = new HeteroVirtualSpaces(old_young_rs, min_old_byte_size,
+                                                                       min_young_byte_size, _total_size_limit, alignment);
+
+  assert(min_old_byte_size <= init_old_byte_size &&
+         init_old_byte_size <= max_old_byte_size, "Parameter check");
+  assert(min_young_byte_size <= init_young_byte_size &&
+         init_young_byte_size <= max_young_byte_size, "Parameter check");
+
+  assert(UseAdaptiveGCBoundary, "Should be used only when UseAdaptiveGCBoundary is true");
+
+  // Initialize the virtual spaces. Then pass a virtual space to each generation
+  // for initialization of the generation.
+
+  // Does the actual creation of the virtual spaces
+  hetero_virtual_spaces->initialize(max_old_byte_size, init_old_byte_size, init_young_byte_size);
+
+  _young_gen = new ASPSYoungGen(hetero_virtual_spaces->high(),
+                                hetero_virtual_spaces->high()->committed_size() /* intial_size */,
+                                min_young_byte_size,
+                                hetero_virtual_spaces->max_young_size());
+
+  _old_gen = new ASPSOldGen(hetero_virtual_spaces->low(),
+                            hetero_virtual_spaces->low()->committed_size() /* intial_size */,
+                            min_old_byte_size,
+                            hetero_virtual_spaces->max_old_size(), "old", 1);
+
+  young_gen()->initialize_work();
+  assert(young_gen()->reserved().byte_size() <= young_gen()->gen_size_limit(), "Consistency check");
+  assert(old_young_rs.size() >= young_gen()->gen_size_limit(), "Consistency check");
+
+  old_gen()->initialize_work("old", 1);
+  assert(old_gen()->reserved().byte_size() <= old_gen()->gen_size_limit(), "Consistency check");
+  assert(old_young_rs.size() >= old_gen()->gen_size_limit(), "Consistency check");
+
+  _virtual_spaces = hetero_virtual_spaces;
+}
+
+size_t AdjoiningGenerationsForHeteroHeap::required_reserved_memory(GenerationSizer* policy) {
+  // This is the size that young gen can grow to, when AdaptiveGCBoundary is true.
+  size_t max_yg_size = policy->max_heap_byte_size() - policy->min_old_size();
+  // This is the size that old gen can grow to, when AdaptiveGCBoundary is true.
+  size_t max_old_size = policy->max_heap_byte_size() - policy->min_young_size();
+
+  return max_yg_size + max_old_size;
+}
+
+// We override this function since size of reservedspace here is more than heap size and
+// callers expect this function to return heap size.
+size_t AdjoiningGenerationsForHeteroHeap::reserved_byte_size() {
+  return total_size_limit();
+}
+
+AdjoiningGenerationsForHeteroHeap::HeteroVirtualSpaces::HeteroVirtualSpaces(ReservedSpace rs, size_t min_old_byte_size, size_t min_yg_byte_size, size_t max_total_size, size_t alignment) :
+                                                                            AdjoiningVirtualSpaces(rs, min_old_byte_size, min_yg_byte_size, alignment),
+                                                                            _max_total_size(max_total_size),
+                                                                            _min_old_byte_size(min_old_byte_size), _min_young_byte_size(min_yg_byte_size),
+                                                                            _max_old_byte_size(_max_total_size - _min_young_byte_size),
+                                                                            _max_young_byte_size(_max_total_size - _min_old_byte_size) {
+}
+
+void AdjoiningGenerationsForHeteroHeap::HeteroVirtualSpaces::initialize(size_t initial_old_reserved_size, size_t init_old_byte_size,
+                                                                        size_t init_young_byte_size) {
+
+  // This is the reserved space exclusively for old generation.
+  ReservedSpace low_rs = _reserved_space.first_part(_max_old_byte_size, true);
+  // Intially we only assign 'initial_old_reserved_size' of the reserved space to old virtual space.
+  low_rs = low_rs.first_part(initial_old_reserved_size);
+
+  // This is the reserved space exclusively for young generation.
+  ReservedSpace high_rs = _reserved_space.last_part(_max_old_byte_size).first_part(_max_young_byte_size);
+
+  // Carve out 'initial_young_reserved_size' of reserved space.
+  size_t initial_young_reserved_size = _max_total_size - initial_old_reserved_size;
+  high_rs = high_rs.last_part(_max_young_byte_size - initial_young_reserved_size);
+
+  _low = new PSFileBackedVirtualSpace(low_rs, alignment(), AllocateOldGenAt);
+  if (!static_cast <PSFileBackedVirtualSpace*>(_low)->initialize()) {
+    vm_exit_during_initialization("Could not map space for old generation at given AllocateOldGenAt path");
+  }
+
+  if (!_low->expand_by(init_old_byte_size)) {
+    vm_exit_during_initialization("Could not reserve enough space for object heap");
+  }
+
+  _high = new PSVirtualSpaceHighToLow(high_rs, alignment());
+  if (!_high->expand_by(init_young_byte_size)) {
+    vm_exit_during_initialization("Could not reserve enough space for object heap");
+  }
+}
+
+// Since the virtual spaces are non-overlapping, there is no boundary as such.
+// We replicate the same behavior and maintain the same invariants as base class 'AdjoiningVirtualSpaces' by
+// increasing old generation size and decreasing young generation size by same amount.
+bool AdjoiningGenerationsForHeteroHeap::HeteroVirtualSpaces::adjust_boundary_up(size_t change_in_bytes) {
+  assert(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary, "runtime check");
+  DEBUG_ONLY(size_t total_size_before = young_vs()->reserved_size() + old_vs()->reserved_size());
+
+  size_t bytes_needed = change_in_bytes;
+  size_t uncommitted_in_old = MIN2(old_vs()->uncommitted_size(), bytes_needed);
+  bool old_expanded = false;
+
+  // 1. Try to expand old within its reserved space.
+  if (uncommitted_in_old != 0) {
+    if (!old_vs()->expand_by(uncommitted_in_old)) {
+      return false;
+    }
+    old_expanded = true;
+    bytes_needed -= uncommitted_in_old;
+    if (bytes_needed == 0) {
+      return true;
+    }
+  }
+
+  size_t bytes_to_add_in_old = 0;
+
+  // 2. Get uncommitted memory from Young virtualspace.
+  size_t young_uncommitted = MIN2(young_vs()->uncommitted_size(), bytes_needed);
+  if (young_uncommitted > 0) {
+    young_vs()->set_reserved(young_vs()->reserved_low_addr() + young_uncommitted,
+                             young_vs()->reserved_high_addr(),
+                             young_vs()->special());
+    bytes_needed -= young_uncommitted;
+    bytes_to_add_in_old = young_uncommitted;
+  }
+
+  // 3. Get committed memory from Young virtualspace
+  if (bytes_needed > 0) {
+    size_t shrink_size = align_down(bytes_needed, young_vs()->alignment());
+    bool ret = young_vs()->shrink_by(shrink_size);
+    assert(ret, "We should be able to shrink young space");
+    young_vs()->set_reserved(young_vs()->reserved_low_addr() + shrink_size,
+                             young_vs()->reserved_high_addr(),
+                             young_vs()->special());
+
+    bytes_to_add_in_old += shrink_size;
+  }
+
+  // 4. Increase size of old space
+  old_vs()->set_reserved(old_vs()->reserved_low_addr(),
+                         old_vs()->reserved_high_addr() + bytes_to_add_in_old,
+                         old_vs()->special());
+  if (!old_vs()->expand_by(bytes_to_add_in_old) && !old_expanded) {
+    return false;
+  }
+
+  DEBUG_ONLY(size_t total_size_after = young_vs()->reserved_size() + old_vs()->reserved_size());
+  assert(total_size_after == total_size_before, "should be equal");
+
+  return true;
+}
+
+// Read comment for adjust_boundary_up()
+// Increase young generation size and decrease old generation size by same amount.
+bool AdjoiningGenerationsForHeteroHeap::HeteroVirtualSpaces::adjust_boundary_down(size_t change_in_bytes) {
+  assert(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary, "runtime check");
+  DEBUG_ONLY(size_t total_size_before = young_vs()->reserved_size() + old_vs()->reserved_size());
+
+  size_t bytes_needed = change_in_bytes;
+  size_t uncommitted_in_young = MIN2(young_vs()->uncommitted_size(), bytes_needed);
+  bool young_expanded = false;
+
+  // 1. Try to expand old within its reserved space.
+  if (uncommitted_in_young > 0) {
+    if (!young_vs()->expand_by(uncommitted_in_young)) {
+      return false;
+    }
+    young_expanded = true;
+    bytes_needed -= uncommitted_in_young;
+    if (bytes_needed == 0) {
+      return true;
+    }
+  }
+
+  size_t bytes_to_add_in_young = 0;
+
+  // 2. Get uncommitted memory from Old virtualspace.
+  size_t old_uncommitted = MIN2(old_vs()->uncommitted_size(), bytes_needed);
+  if (old_uncommitted > 0) {
+    old_vs()->set_reserved(old_vs()->reserved_low_addr(),
+                           old_vs()->reserved_high_addr() - old_uncommitted,
+                           old_vs()->special());
+    bytes_needed -= old_uncommitted;
+    bytes_to_add_in_young = old_uncommitted;
+  }
+
+  // 3. Get committed memory from Old virtualspace
+  if (bytes_needed > 0) {
+    size_t shrink_size = align_down(bytes_needed, old_vs()->alignment());
+    bool ret = old_vs()->shrink_by(shrink_size);
+    assert(ret, "We should be able to shrink young space");
+           old_vs()->set_reserved(old_vs()->reserved_low_addr(),
+           old_vs()->reserved_high_addr() - shrink_size,
+           old_vs()->special());
+
+    bytes_to_add_in_young += shrink_size;
+  }
+
+  assert(bytes_to_add_in_young <= change_in_bytes, "should not be more than requested size");
+  // 4. Increase size of young space
+  young_vs()->set_reserved(young_vs()->reserved_low_addr() - bytes_to_add_in_young,
+                           young_vs()->reserved_high_addr(),
+                           young_vs()->special());
+  if (!young_vs()->expand_by(bytes_to_add_in_young) && !young_expanded) {
+    return false;
+  }
+
+  DEBUG_ONLY(size_t total_size_after = young_vs()->reserved_size() + old_vs()->reserved_size());
+  assert(total_size_after == total_size_before, "should be equal");
+
+  return true;
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/parallel/adjoiningGenerationsForHeteroHeap.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_PARALLEL_ADJOININGGENERATIONSFORHETEROHEAP_HPP
+#define SHARE_VM_GC_PARALLEL_ADJOININGGENERATIONSFORHETEROHEAP_HPP
+
+#include "gc/parallel/adjoiningGenerations.hpp"
+
+class AdjoiningGenerationsForHeteroHeap : public AdjoiningGenerations {
+  friend class VMStructs;
+private:
+  // Maximum total size of the generations. This is equal to the heap size specified by user.
+  // When adjusting young and old generation sizes, we need ensure that sum of the generation sizes does not exceed this.
+  size_t _total_size_limit;
+
+  size_t total_size_limit() const {
+    return _total_size_limit;
+  }
+
+  // HeteroVirtualSpaces creates non-overlapping virtual spaces. Here _low and _high do not share a reserved space, i.e. there is no boundary
+  // separating the two virtual spaces.
+  class HeteroVirtualSpaces : public AdjoiningVirtualSpaces {
+    size_t _max_total_size;
+    size_t _min_old_byte_size;
+    size_t _min_young_byte_size;
+    size_t _max_old_byte_size;
+    size_t _max_young_byte_size;
+
+    // Internally we access the virtual spaces using these methods. It increases readability, since we were not really
+    // dealing with adjoining virtual spaces separated by a boundary as is the case in base class.
+    // Externally they are accessed using low() and high() methods of base class.
+    PSVirtualSpace* young_vs() { return high(); }
+    PSVirtualSpace* old_vs() { return low(); }
+
+  public:
+    HeteroVirtualSpaces(ReservedSpace rs,
+                        size_t min_old_byte_size,
+                        size_t min_young_byte_size, size_t max_total_size,
+                        size_t alignment);
+
+    // Increase old generation size and decrease young generation size by same amount
+    bool adjust_boundary_up(size_t size_in_bytes);
+    // Increase young generation size and decrease old generation size by same amount
+    bool adjust_boundary_down(size_t size_in_bytes);
+
+    size_t max_young_size() const { return _max_young_byte_size; }
+    size_t max_old_size() const { return _max_old_byte_size; }
+
+    void initialize(size_t initial_old_reserved_size, size_t init_low_byte_size,
+                    size_t init_high_byte_size);
+  };
+
+public:
+  AdjoiningGenerationsForHeteroHeap(ReservedSpace rs, GenerationSizer* policy, size_t alignment);
+
+  // Given the size policy, calculate the total amount of memory that needs to be reserved.
+  // We need to reserve more memory than Xmx, since we use non-overlapping virtual spaces for the young and old generations.
+  static size_t required_reserved_memory(GenerationSizer* policy);
+
+  // Return the total byte size of the reserved space
+  size_t reserved_byte_size();
+};
+#endif // SHARE_VM_GC_PARALLEL_ADJOININGGENERATIONSFORHETEROHEAP_HPP
+
--- a/src/hotspot/share/gc/parallel/adjoiningVirtualSpaces.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/parallel/adjoiningVirtualSpaces.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -59,7 +59,8 @@
 // moved up consistently.  AdjoiningVirtualSpaces provide the
 // interfaces for moving the this boundary.
 
-class AdjoiningVirtualSpaces {
+class AdjoiningVirtualSpaces : public CHeapObj<mtGC> {
+protected:
   // space at the high end and the low end, respectively
   PSVirtualSpace*    _high;
   PSVirtualSpace*    _low;
@@ -84,17 +85,17 @@
                          size_t alignment);
 
   // accessors
-  PSVirtualSpace* high() { return _high; }
-  PSVirtualSpace* low()  { return _low; }
+  virtual PSVirtualSpace* high() { return _high; }
+  virtual PSVirtualSpace* low()  { return _low; }
   ReservedSpace reserved_space() { return _reserved_space; }
   size_t min_low_byte_size() { return _min_low_byte_size; }
   size_t min_high_byte_size() { return _min_high_byte_size; }
   size_t alignment() const { return _alignment; }
 
   // move boundary between the two spaces up
-  bool adjust_boundary_up(size_t size_in_bytes);
+  virtual bool adjust_boundary_up(size_t size_in_bytes);
   // and down
-  bool adjust_boundary_down(size_t size_in_bytes);
+  virtual bool adjust_boundary_down(size_t size_in_bytes);
 
   // Maximum byte size for the high space.
   size_t high_byte_size_limit() {
@@ -107,9 +108,8 @@
 
   // Sets the boundaries for the virtual spaces and commits and
   // initial size;
-  void initialize(size_t max_low_byte_size,
+  virtual void initialize(size_t max_low_byte_size,
                   size_t init_low_byte_size,
                   size_t init_high_byte_size);
 };
-
 #endif // SHARE_VM_GC_PARALLEL_ADJOININGVIRTUALSPACES_HPP
--- a/src/hotspot/share/gc/parallel/generationSizer.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/parallel/generationSizer.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -67,3 +67,11 @@
   }
   GenCollectorPolicy::initialize_size_info();
 }
+
+bool GenerationSizer::is_hetero_heap() const {
+  return false;
+}
+
+size_t GenerationSizer::heap_reserved_size_bytes() const {
+  return _max_heap_byte_size;
+}
--- a/src/hotspot/share/gc/parallel/generationSizer.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/parallel/generationSizer.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,7 +32,6 @@
 
 class GenerationSizer : public GenCollectorPolicy {
  private:
-
   // The alignment used for boundary between young gen and old gen
   static size_t default_gen_alignment() { return 64 * K * HeapWordSize; }
 
@@ -41,5 +40,9 @@
   void initialize_alignments();
   void initialize_flags();
   void initialize_size_info();
+
+ public:
+  virtual size_t heap_reserved_size_bytes() const;
+  virtual bool is_hetero_heap() const;
 };
 #endif // SHARE_VM_GC_PARALLEL_GENERATIONSIZER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/parallel/heterogeneousGenerationSizer.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/parallel/heterogeneousGenerationSizer.hpp"
+#include "gc/shared/collectorPolicy.hpp"
+#include "logging/log.hpp"
+#include "runtime/globals_extension.hpp"
+#include "runtime/os.hpp"
+#include "utilities/align.hpp"
+#include "utilities/formatBuffer.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+const double HeterogeneousGenerationSizer::MaxRamFractionForYoung = 0.8;
+
+// Check the available dram memory to limit NewSize and MaxNewSize before
+// calling base class initialize_flags().
+void HeterogeneousGenerationSizer::initialize_flags() {
+  FormatBuffer<100> calc_str("");
+
+  julong phys_mem;
+  // If MaxRam is specified, we use that as maximum physical memory available.
+  if (FLAG_IS_DEFAULT(MaxRAM)) {
+    phys_mem = os::physical_memory();
+    calc_str.append("Physical_Memory");
+  } else {
+    phys_mem = (julong)MaxRAM;
+    calc_str.append("MaxRAM");
+  }
+
+  julong reasonable_max = phys_mem;
+
+  // If either MaxRAMFraction or MaxRAMPercentage is specified, we use them to calculate
+  // reasonable max size of young generation.
+  if (!FLAG_IS_DEFAULT(MaxRAMFraction)) {
+    reasonable_max = (julong)(phys_mem / MaxRAMFraction);
+    calc_str.append(" / MaxRAMFraction");
+  } else if (!FLAG_IS_DEFAULT(MaxRAMPercentage)) {
+    reasonable_max = (julong)((phys_mem * MaxRAMPercentage) / 100);
+    calc_str.append(" * MaxRAMPercentage / 100");
+  } else {
+    // We use our own fraction to calculate max size of young generation.
+    reasonable_max = phys_mem * MaxRamFractionForYoung;
+    calc_str.append(" * %0.2f", MaxRamFractionForYoung);
+  }
+  reasonable_max = align_up(reasonable_max, _gen_alignment);
+
+  if (MaxNewSize > reasonable_max) {
+    if (FLAG_IS_CMDLINE(MaxNewSize)) {
+      log_warning(gc, ergo)("Setting MaxNewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s))",
+                            (size_t)reasonable_max, calc_str.buffer());
+    } else {
+      log_info(gc, ergo)("Setting MaxNewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s)). "
+                         "Dram usage can be lowered by setting MaxNewSize to a lower value", (size_t)reasonable_max, calc_str.buffer());
+    }
+    MaxNewSize = reasonable_max;
+  }
+  if (NewSize > reasonable_max) {
+    if (FLAG_IS_CMDLINE(NewSize)) {
+      log_warning(gc, ergo)("Setting NewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s))",
+                            (size_t)reasonable_max, calc_str.buffer());
+    }
+    NewSize = reasonable_max;
+  }
+
+  // After setting new size flags, call base class initialize_flags()
+  GenerationSizer::initialize_flags();
+}
+
+bool HeterogeneousGenerationSizer::is_hetero_heap() const {
+  return true;
+}
+
+size_t HeterogeneousGenerationSizer::heap_reserved_size_bytes() const {
+  if (UseAdaptiveGCBoundary) {
+    // This is the size that young gen can grow to, when UseAdaptiveGCBoundary is true.
+    size_t max_yg_size = _max_heap_byte_size - _min_old_size;
+    // This is the size that old gen can grow to, when UseAdaptiveGCBoundary is true.
+    size_t max_old_size = _max_heap_byte_size - _min_young_size;
+
+    return max_yg_size + max_old_size;
+  } else {
+    return _max_heap_byte_size;
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/parallel/heterogeneousGenerationSizer.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_PARALLEL_HETEROGENEOUSGENERATIONSIZER_HPP
+#define SHARE_VM_GC_PARALLEL_HETEROGENEOUSGENERATIONSIZER_HPP
+
+#include "gc/parallel/generationSizer.hpp"
+
+// There is a nice batch of tested generation sizing code in
+// GenCollectorPolicy. Lets reuse it!
+
+class HeterogeneousGenerationSizer : public GenerationSizer {
+private:
+  // Max fraction of dram to use for young generation when MaxRAMFraction and
+  // MaxRAMPercentage are not specified on commandline.
+  static const double MaxRamFractionForYoung;
+
+protected:
+  virtual void initialize_flags();
+
+public:
+  virtual size_t heap_reserved_size_bytes() const;
+  virtual bool is_hetero_heap() const;
+};
+#endif // SHARE_VM_GC_PARALLEL_HETEROGENEOUSGENERATIONSIZER_HPP
--- a/src/hotspot/share/gc/parallel/parallelArguments.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/parallel/parallelArguments.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -24,6 +24,7 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/parallel/heterogeneousGenerationSizer.hpp"
 #include "gc/parallel/parallelArguments.hpp"
 #include "gc/parallel/parallelScavengeHeap.hpp"
 #include "gc/shared/adaptiveSizePolicy.hpp"
@@ -93,5 +94,9 @@
 }
 
 CollectedHeap* ParallelArguments::create_heap() {
-  return create_heap_with_policy<ParallelScavengeHeap, GenerationSizer>();
+  if (AllocateOldGenAt != NULL) {
+    return create_heap_with_policy<ParallelScavengeHeap, HeterogeneousGenerationSizer>();
+  } else {
+    return create_heap_with_policy<ParallelScavengeHeap, GenerationSizer>();
+  }
 }
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "code/codeCache.hpp"
 #include "gc/parallel/adjoiningGenerations.hpp"
+#include "gc/parallel/adjoiningGenerationsForHeteroHeap.hpp"
 #include "gc/parallel/adjoiningVirtualSpaces.hpp"
 #include "gc/parallel/gcTaskManager.hpp"
 #include "gc/parallel/generationSizer.hpp"
@@ -58,7 +59,7 @@
 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
 
 jint ParallelScavengeHeap::initialize() {
-  const size_t heap_size = _collector_policy->max_heap_byte_size();
+  size_t heap_size = _collector_policy->heap_reserved_size_bytes();
 
   ReservedSpace heap_rs = Universe::reserve_heap(heap_size, _collector_policy->heap_alignment());
 
@@ -86,7 +87,7 @@
   double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
   double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
 
-  _gens = new AdjoiningGenerations(heap_rs, _collector_policy, generation_alignment());
+  _gens = AdjoiningGenerations::create_adjoining_generations(heap_rs, _collector_policy, generation_alignment());
 
   _old_gen = _gens->old_gen();
   _young_gen = _gens->young_gen();
@@ -104,7 +105,7 @@
                              GCTimeRatio
                              );
 
-  assert(!UseAdaptiveGCBoundary ||
+  assert(_collector_policy->is_hetero_heap() || !UseAdaptiveGCBoundary ||
     (old_gen()->virtual_space()->high_boundary() ==
      young_gen()->virtual_space()->low_boundary()),
     "Boundaries must meet");
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -111,6 +111,8 @@
 
   virtual CollectorPolicy* collector_policy() const { return _collector_policy; }
 
+  virtual GenerationSizer* ps_collector_policy() const { return _collector_policy; }
+
   virtual SoftRefPolicy* soft_ref_policy() { return &_soft_ref_policy; }
 
   virtual GrowableArray<GCMemoryManager*> memory_managers();
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/parallel/psFileBackedVirtualspace.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/parallel/psFileBackedVirtualspace.hpp"
+#include "memory/virtualspace.hpp"
+#include "runtime/os.inline.hpp"
+
+PSFileBackedVirtualSpace::PSFileBackedVirtualSpace(ReservedSpace rs, size_t alignment, const char* path) : PSVirtualSpace(rs, alignment),
+                                                   _file_path(path), _fd(-1), _mapping_succeeded(false) {
+  assert(!rs.special(), "ReservedSpace passed to PSFileBackedVirtualSpace cannot be special");
+}
+
+bool PSFileBackedVirtualSpace::initialize() {
+  _fd = os::create_file_for_heap(_file_path);
+  if (_fd == -1) {
+    return false;
+  }
+  // We map the reserved space to a file at initialization.
+  char* ret = os::replace_existing_mapping_with_file_mapping(reserved_low_addr(), reserved_size(), _fd);
+  if (ret != reserved_low_addr()) {
+    os::close(_fd);
+    return false;
+  }
+  // _mapping_succeeded is false if we return before this point.
+  // expand calls later check value of this flag and return error if it is false.
+  _mapping_succeeded = true;
+  _special = true;
+  os::close(_fd);
+  return true;
+}
+
+PSFileBackedVirtualSpace::PSFileBackedVirtualSpace(ReservedSpace rs, const char* path) {
+  PSFileBackedVirtualSpace(rs, os::vm_page_size(), path);
+}
+
+bool PSFileBackedVirtualSpace::expand_by(size_t bytes) {
+  assert(special(), "Since entire space is committed at initialization, _special should always be true for PSFileBackedVirtualSpace");
+
+  // if mapping did not succeed during intialization return false
+  if (!_mapping_succeeded) {
+    return false;
+  }
+  return PSVirtualSpace::expand_by(bytes);
+
+}
+
+bool PSFileBackedVirtualSpace::shrink_by(size_t bytes) {
+  assert(special(), "Since entire space is committed at initialization, _special should always be true for PSFileBackedVirtualSpace");
+  return PSVirtualSpace::shrink_by(bytes);
+}
+
+size_t PSFileBackedVirtualSpace::expand_into(PSVirtualSpace* space, size_t bytes) {
+  // not supported. Since doing this will change page mapping which will lead to large TLB penalties.
+  assert(false, "expand_into() should not be called for PSFileBackedVirtualSpace");
+  return 0;
+}
+
+void PSFileBackedVirtualSpace::release() {
+  os::close(_fd);
+  _fd = -1;
+  _file_path = NULL;
+
+  PSVirtualSpace::release();
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/parallel/psFileBackedVirtualspace.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_PARALLEL_PSFILEBACKEDVIRTUALSPACE_HPP
+#define SHARE_VM_GC_PARALLEL_PSFILEBACKEDVIRTUALSPACE_HPP
+
+#include "gc/parallel/psVirtualspace.hpp"
+
+class PSFileBackedVirtualSpace : public PSVirtualSpace {
+private:
+  const char* _file_path;
+  int _fd;
+  bool _mapping_succeeded;
+public:
+  PSFileBackedVirtualSpace(ReservedSpace rs, size_t alignment, const char* file_path);
+  PSFileBackedVirtualSpace(ReservedSpace rs, const char* file_path);
+
+  bool   initialize();
+  bool   expand_by(size_t bytes);
+  bool   shrink_by(size_t bytes);
+  size_t expand_into(PSVirtualSpace* space, size_t bytes);
+  void   release();
+};
+#endif // SHARE_VM_GC_PARALLEL_PSFILEBACKEDVIRTUALSPACE_HPP
+
--- a/src/hotspot/share/gc/parallel/psOldGen.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/parallel/psOldGen.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -27,6 +27,7 @@
 #include "gc/parallel/parallelScavengeHeap.hpp"
 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
 #include "gc/parallel/psCardTable.hpp"
+#include "gc/parallel/psFileBackedVirtualspace.hpp"
 #include "gc/parallel/psMarkSweepDecorator.hpp"
 #include "gc/parallel/psOldGen.hpp"
 #include "gc/shared/cardTableBarrierSet.hpp"
@@ -71,7 +72,14 @@
 
 void PSOldGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) {
 
-  _virtual_space = new PSVirtualSpace(rs, alignment);
+  if(ParallelScavengeHeap::heap()->ps_collector_policy()->is_hetero_heap()) {
+    _virtual_space = new PSFileBackedVirtualSpace(rs, alignment, AllocateOldGenAt);
+    if (!(static_cast <PSFileBackedVirtualSpace*>(_virtual_space))->initialize()) {
+      vm_exit_during_initialization("Could not map space for PSOldGen at given AllocateOldGenAt path");
+    }
+  } else {
+    _virtual_space = new PSVirtualSpace(rs, alignment);
+  }
   if (!_virtual_space->expand_by(_init_gen_size)) {
     vm_exit_during_initialization("Could not reserve enough space for "
                                   "object heap");
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -1995,7 +1995,10 @@
   assert(young_gen->virtual_space()->alignment() ==
          old_gen->virtual_space()->alignment(), "alignments do not match");
 
-  if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
+  // We also return false when it's a heterogenous heap because old generation cannot absorb data from eden
+  // when it is allocated on different memory (example, nv-dimm) than young.
+  if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary) ||
+      ParallelScavengeHeap::heap()->ps_collector_policy()->is_hetero_heap()) {
     return false;
   }
 
--- a/src/hotspot/share/gc/shared/barrierSet.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/shared/barrierSet.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -275,11 +275,7 @@
     template <typename T>
     static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
                                       arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
-                                      size_t length) {
-      return Raw::oop_arraycopy(src_obj, src_offset_in_bytes, src_raw,
-                                dst_obj, dst_offset_in_bytes, dst_raw,
-                                length);
-    }
+                                      size_t length);
 
     // Off-heap oop accesses. These accessors get resolved when
     // IN_HEAP is not set (e.g. when using the NativeAccess API), it is
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/barrierSet.inline.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2019, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_VM_GC_SHARED_BARRIERSET_INLINE_HPP
+#define SHARE_VM_GC_SHARED_BARRIERSET_INLINE_HPP
+
+#include "gc/shared/barrierSet.hpp"
+#include "oops/accessDecorators.hpp"
+#include "oops/arrayOop.hpp"
+#include "oops/compressedOops.inline.hpp"
+#include "oops/objArrayOop.inline.hpp"
+#include "oops/oop.hpp"
+
+template <DecoratorSet decorators, typename BarrierSetT>
+template <typename T>
+inline bool BarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
+                                                                                      arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
+                                                                                      size_t length) {
+  T* src = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
+  T* dst = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
+
+  if (!HasDecorator<decorators, ARRAYCOPY_CHECKCAST>::value) {
+    // Covariant, copy without checks
+    return Raw::oop_arraycopy(NULL, 0, src, NULL, 0, dst, length);
+  }
+
+  // Copy each element with checking casts
+  Klass* const dst_klass = objArrayOop(dst_obj)->element_klass();
+  for (const T* const end = src + length; src < end; src++, dst++) {
+    const T elem = *src;
+    if (!oopDesc::is_instanceof_or_null(CompressedOops::decode(elem), dst_klass)) {
+      return false;
+    }
+    *dst = elem;
+  }
+
+  return true;
+}
+
+#endif // SHARE_VM_GC_SHARED_BARRIERSET_INLINE_HPP
--- a/src/hotspot/share/gc/shared/gcArguments.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/shared/gcArguments.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -28,6 +28,7 @@
 #include "runtime/arguments.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/globals_extension.hpp"
+#include "utilities/defaultStream.hpp"
 #include "utilities/macros.hpp"
 
 void GCArguments::initialize() {
@@ -53,4 +54,28 @@
     // If class unloading is disabled, also disable concurrent class unloading.
     FLAG_SET_CMDLINE(bool, ClassUnloadingWithConcurrentMark, false);
   }
+
+  if (!FLAG_IS_DEFAULT(AllocateOldGenAt)) {
+    // CompressedOops not supported when AllocateOldGenAt is set.
+    LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedOops, false));
+    LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedClassPointers, false));
+    // When AllocateOldGenAt is set, we cannot use largepages for entire heap memory.
+    // Only young gen which is allocated in dram can use large pages, but we currently don't support that.
+    FLAG_SET_DEFAULT(UseLargePages, false);
+  }
 }
+
+bool GCArguments::check_args_consistency() {
+  bool status = true;
+  if (!FLAG_IS_DEFAULT(AllocateHeapAt) && !FLAG_IS_DEFAULT(AllocateOldGenAt)) {
+    jio_fprintf(defaultStream::error_stream(),
+      "AllocateHeapAt and AllocateOldGenAt cannot be used together.\n");
+    status = false;
+  }
+  if (!FLAG_IS_DEFAULT(AllocateOldGenAt) && (UseSerialGC || UseConcMarkSweepGC || UseEpsilonGC || UseZGC)) {
+    jio_fprintf(defaultStream::error_stream(),
+      "AllocateOldGenAt is not supported for selected GC.\n");
+    status = false;
+  }
+  return status;
+}
--- a/src/hotspot/share/gc/shared/gcArguments.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/shared/gcArguments.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -39,6 +39,7 @@
   virtual void initialize();
   virtual size_t conservative_max_heap_alignment() = 0;
   virtual CollectedHeap* create_heap() = 0;
+  static bool check_args_consistency();
 };
 
 #endif // SHARE_GC_SHARED_GCARGUMENTS_HPP
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -983,7 +983,7 @@
   Node* c = new ProjNode(call,TypeFunc::Control);
   c = igvn.transform(c);
   Node* m = new ProjNode(call, TypeFunc::Memory);
-  c = igvn.transform(m);
+  m = igvn.transform(m);
 
   Node* dest = ac->in(ArrayCopyNode::Dest);
   assert(dest->is_AddP(), "bad input");
--- a/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -189,6 +189,15 @@
   bs->register_potential_barrier_node(this);
 }
 
+uint LoadBarrierNode::size_of() const {
+  return sizeof(*this);
+}
+
+uint LoadBarrierNode::cmp(const Node& n) const {
+  ShouldNotReachHere();
+  return 0;
+}
+
 const Type *LoadBarrierNode::bottom_type() const {
   const Type** floadbarrier = (const Type **)(Compile::current()->type_arena()->Amalloc_4((Number_of_Outputs)*sizeof(Type*)));
   Node* in_oop = in(Oop);
@@ -198,6 +207,11 @@
   return TypeTuple::make(Number_of_Outputs, floadbarrier);
 }
 
+const TypePtr* LoadBarrierNode::adr_type() const {
+  ShouldNotReachHere();
+  return NULL;
+}
+
 const Type *LoadBarrierNode::Value(PhaseGVN *phase) const {
   const Type** floadbarrier = (const Type **)(phase->C->type_arena()->Amalloc_4((Number_of_Outputs)*sizeof(Type*)));
   const Type* val_t = phase->type(in(Oop));
@@ -441,6 +455,11 @@
   return NULL;
 }
 
+uint LoadBarrierNode::match_edge(uint idx) const {
+  ShouldNotReachHere();
+  return 0;
+}
+
 void LoadBarrierNode::fix_similar_in_uses(PhaseIterGVN* igvn) {
   Node* out_res = proj_out_or_null(Oop);
   if (out_res == NULL) {
@@ -1151,7 +1170,7 @@
   if (lb->in(LoadBarrierNode::Oop)->is_Phi()) {
     Node* oop_phi = lb->in(LoadBarrierNode::Oop);
 
-    if (oop_phi->in(2) == oop_phi) {
+    if ((oop_phi->req() != 3) || (oop_phi->in(2) == oop_phi)) {
       // Ignore phis with only one input
       return false;
     }
--- a/src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -62,10 +62,14 @@
                   bool oop_reload_allowed);
 
   virtual int Opcode() const;
+  virtual uint size_of() const;
+  virtual uint cmp(const Node& n) const;
   virtual const Type *bottom_type() const;
+  virtual const TypePtr* adr_type() const;
   virtual const Type *Value(PhaseGVN *phase) const;
   virtual Node *Identity(PhaseGVN *phase);
   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+  virtual uint match_edge(uint idx) const;
 
   LoadBarrierNode* has_dominating_barrier(PhaseIdealLoop* phase,
                                           bool linear_only,
--- a/src/hotspot/share/gc/z/zArguments.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/z/zArguments.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -19,7 +19,6 @@
  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  * or visit www.oracle.com if you need additional information or have any
  * questions.
- *
  */
 
 #include "precompiled.hpp"
@@ -91,6 +90,9 @@
   // Verification of stacks not (yet) supported, for the same reason
   // we need fixup_partial_loads
   DEBUG_ONLY(FLAG_SET_DEFAULT(VerifyStack, false));
+
+  // Initialize platform specific arguments
+  initialize_platform();
 }
 
 CollectedHeap* ZArguments::create_heap() {
--- a/src/hotspot/share/gc/z/zArguments.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/z/zArguments.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -29,6 +29,9 @@
 class CollectedHeap;
 
 class ZArguments : public GCArguments {
+private:
+  void initialize_platform();
+
 public:
   virtual void initialize();
   virtual size_t conservative_max_heap_alignment();
--- a/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -42,6 +42,11 @@
   }
 
   if (nm->is_unloading()) {
+    // We don't need to take the lock when unlinking nmethods from
+    // the Method, because it is only concurrently unlinked by
+    // the entry barrier, which acquires the per nmethod lock.
+    nm->unlink_from_method(false /* acquire_lock */);
+
     // We can end up calling nmethods that are unloading
     // since we clear compiled ICs lazily. Returning false
     // will re-resovle the call and update the compiled IC.
--- a/src/hotspot/share/gc/z/zNMethodTable.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/z/zNMethodTable.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -611,15 +611,20 @@
       return;
     }
 
+    ZLocker<ZReentrantLock> locker(ZNMethodTable::lock_for_nmethod(nm));
+
     if (nm->is_unloading()) {
       // Unlinking of the dependencies must happen before the
       // handshake separating unlink and purge.
       nm->flush_dependencies(false /* delete_immediately */);
+
+      // We don't need to take the lock when unlinking nmethods from
+      // the Method, because it is only concurrently unlinked by
+      // the entry barrier, which acquires the per nmethod lock.
+      nm->unlink_from_method(false /* acquire_lock */);
       return;
     }
 
-    ZLocker<ZReentrantLock> locker(ZNMethodTable::lock_for_nmethod(nm));
-
     // Heal oops and disarm
     ZNMethodOopClosure cl;
     ZNMethodTable::entry_oops_do(entry, &cl);
--- a/src/hotspot/share/gc/z/zObjectAllocator.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/z/zObjectAllocator.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -128,7 +128,8 @@
 }
 
 uintptr_t ZObjectAllocator::alloc_small_object_from_nonworker(size_t size, ZAllocationFlags flags) {
-  assert(ZThread::is_java() || ZThread::is_vm(), "Should be a Java or VM thread");
+  assert(ZThread::is_java() || ZThread::is_vm() || ZThread::is_runtime_worker(),
+         "Should be a Java, VM or Runtime worker thread");
 
   // Non-worker small page allocation can never use the reserve
   flags.set_no_reserve();
@@ -193,7 +194,8 @@
 }
 
 uintptr_t ZObjectAllocator::alloc_object_for_relocation(size_t size) {
-  assert(ZThread::is_java() || ZThread::is_worker() || ZThread::is_vm(), "Unknown thread");
+  assert(ZThread::is_java() || ZThread::is_vm() || ZThread::is_worker() || ZThread::is_runtime_worker(),
+         "Unknown thread");
 
   ZAllocationFlags flags;
   flags.set_relocation();
--- a/src/hotspot/share/gc/z/zRuntimeWorkers.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/z/zRuntimeWorkers.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -22,7 +22,43 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/shared/workgroup.hpp"
 #include "gc/z/zRuntimeWorkers.hpp"
+#include "gc/z/zThread.hpp"
+#include "runtime/mutexLocker.hpp"
+
+class ZRuntimeWorkersInitializeTask : public AbstractGangTask {
+private:
+  const uint _nworkers;
+  uint       _started;
+  Monitor    _monitor;
+
+public:
+  ZRuntimeWorkersInitializeTask(uint nworkers) :
+      AbstractGangTask("ZRuntimeWorkersInitializeTask"),
+      _nworkers(nworkers),
+      _started(0),
+      _monitor(Monitor::leaf,
+               "ZRuntimeWorkersInitialize",
+               false /* allow_vm_block */,
+               Monitor::_safepoint_check_never) {}
+
+  virtual void work(uint worker_id) {
+    // Register as runtime worker
+    ZThread::set_runtime_worker();
+
+    // Wait for all threads to start
+    MonitorLockerEx ml(&_monitor, Monitor::_no_safepoint_check_flag);
+    if (++_started == _nworkers) {
+      // All threads started
+      ml.notify_all();
+    } else {
+      while (_started != _nworkers) {
+        ml.wait(Monitor::_no_safepoint_check_flag);
+      }
+    }
+  }
+};
 
 ZRuntimeWorkers::ZRuntimeWorkers() :
     _workers("RuntimeWorker",
@@ -35,6 +71,15 @@
   // Initialize worker threads
   _workers.initialize_workers();
   _workers.update_active_workers(nworkers());
+  if (_workers.active_workers() != nworkers()) {
+    vm_exit_during_initialization("Failed to create ZRuntimeWorkers");
+  }
+
+  // Execute task to register threads as runtime workers. This also
+  // helps reduce latency in early safepoints, which otherwise would
+  // have to take on any warmup costs.
+  ZRuntimeWorkersInitializeTask task(nworkers());
+  _workers.run_task(&task);
 }
 
 uint ZRuntimeWorkers::nworkers() const {
--- a/src/hotspot/share/gc/z/zThread.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/z/zThread.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -31,6 +31,7 @@
 __thread bool      ZThread::_is_vm;
 __thread bool      ZThread::_is_java;
 __thread bool      ZThread::_is_worker;
+__thread bool      ZThread::_is_runtime_worker;
 __thread uint      ZThread::_worker_id;
 
 void ZThread::initialize() {
@@ -40,7 +41,8 @@
   _id = (uintptr_t)thread;
   _is_vm = thread->is_VM_thread();
   _is_java = thread->is_Java_thread();
-  _is_worker = thread->is_Worker_thread();
+  _is_worker = false;
+  _is_runtime_worker = false;
   _worker_id = (uint)-1;
 }
 
@@ -56,6 +58,16 @@
   return "Unknown";
 }
 
+void ZThread::set_worker() {
+  ensure_initialized();
+  _is_worker = true;
+}
+
+void ZThread::set_runtime_worker() {
+  ensure_initialized();
+  _is_runtime_worker = true;
+}
+
 bool ZThread::has_worker_id() {
   return _initialized &&
          _is_worker &&
--- a/src/hotspot/share/gc/z/zThread.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/z/zThread.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -29,6 +29,8 @@
 
 class ZThread : public AllStatic {
   friend class ZTask;
+  friend class ZWorkersInitializeTask;
+  friend class ZRuntimeWorkersInitializeTask;
 
 private:
   static __thread bool      _initialized;
@@ -36,6 +38,7 @@
   static __thread bool      _is_vm;
   static __thread bool      _is_java;
   static __thread bool      _is_worker;
+  static __thread bool      _is_runtime_worker;
   static __thread uint      _worker_id;
 
   static void initialize();
@@ -46,6 +49,9 @@
     }
   }
 
+  static void set_worker();
+  static void set_runtime_worker();
+
   static bool has_worker_id();
   static void set_worker_id(uint worker_id);
   static void clear_worker_id();
@@ -73,6 +79,11 @@
     return _is_worker;
   }
 
+  static bool is_runtime_worker() {
+    ensure_initialized();
+    return _is_runtime_worker;
+  }
+
   static uint worker_id() {
     assert(has_worker_id(), "Worker id not initialized");
     return _worker_id;
--- a/src/hotspot/share/gc/z/zWorkers.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/gc/z/zWorkers.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -24,6 +24,7 @@
 #include "precompiled.hpp"
 #include "gc/z/zGlobals.hpp"
 #include "gc/z/zTask.hpp"
+#include "gc/z/zThread.hpp"
 #include "gc/z/zWorkers.inline.hpp"
 #include "runtime/os.hpp"
 #include "runtime/mutexLocker.hpp"
@@ -64,20 +65,26 @@
   return calculate_nworkers(12.5);
 }
 
-class ZWorkersWarmupTask : public ZTask {
+class ZWorkersInitializeTask : public ZTask {
 private:
   const uint _nworkers;
   uint       _started;
   Monitor    _monitor;
 
 public:
-  ZWorkersWarmupTask(uint nworkers) :
-      ZTask("ZWorkersWarmupTask"),
+  ZWorkersInitializeTask(uint nworkers) :
+      ZTask("ZWorkersInitializeTask"),
       _nworkers(nworkers),
       _started(0),
-      _monitor(Monitor::leaf, "ZWorkersWarmup", false, Monitor::_safepoint_check_never) {}
+      _monitor(Monitor::leaf,
+               "ZWorkersInitialize",
+               false /* allow_vm_block */,
+               Monitor::_safepoint_check_never) {}
 
   virtual void work() {
+    // Register as worker
+    ZThread::set_worker();
+
     // Wait for all threads to start
     MonitorLockerEx ml(&_monitor, Monitor::_no_safepoint_check_flag);
     if (++_started == _nworkers) {
@@ -107,10 +114,10 @@
     vm_exit_during_initialization("Failed to create ZWorkers");
   }
 
-  // Warm up worker threads by having them execute a dummy task.
-  // This helps reduce latency in early GC pauses, which otherwise
-  // would have to take on any warmup costs.
-  ZWorkersWarmupTask task(nworkers());
+  // Execute task to register threads as workers. This also helps
+  // reduce latency in early GC pauses, which otherwise would have
+  // to take on any warmup costs.
+  ZWorkersInitializeTask task(nworkers());
   run(&task, nworkers());
 }
 
--- a/src/hotspot/share/include/jvm.h	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/include/jvm.h	Tue Jan 15 10:55:26 2019 -0800
@@ -625,6 +625,15 @@
 JNIEXPORT jobject JNICALL
 JVM_GetInheritedAccessControlContext(JNIEnv *env, jclass cls);
 
+/*
+ * Ensure that code doing a stackwalk and using javaVFrame::locals() to
+ * get the value will see a materialized value and not a scalar-replaced
+ * null value.
+ */
+#define JVM_EnsureMaterializedForStackWalk(env, value) \
+    do {} while(0) // Nothing to do.  The fact that the value escaped
+                   // through a native method is enough.
+
 JNIEXPORT jobject JNICALL
 JVM_GetStackAccessControlContext(JNIEnv *env, jclass cls);
 
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -164,7 +164,7 @@
 uintptr_t JfrSymbolId::unsafe_anonymous_klass_name_hash_code(const InstanceKlass* ik) {
   assert(ik != NULL, "invariant");
   assert(ik->is_unsafe_anonymous(), "invariant");
-  const oop mirror = ik->java_mirror();
+  const oop mirror = ik->java_mirror_no_keepalive();
   assert(mirror != NULL, "invariant");
   return (uintptr_t)mirror->identity_hash();
 }
@@ -174,7 +174,7 @@
   assert(ik->is_unsafe_anonymous(), "invariant");
   assert(0 == hashcode, "invariant");
   char* anonymous_symbol = NULL;
-  const oop mirror = ik->java_mirror();
+  const oop mirror = ik->java_mirror_no_keepalive();
   assert(mirror != NULL, "invariant");
   char hash_buf[40];
   hashcode = unsafe_anonymous_klass_name_hash_code(ik);
--- a/src/hotspot/share/oops/access.inline.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/oops/access.inline.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -25,6 +25,7 @@
 #ifndef SHARE_OOPS_ACCESS_INLINE_HPP
 #define SHARE_OOPS_ACCESS_INLINE_HPP
 
+#include "gc/shared/barrierSet.inline.hpp"
 #include "gc/shared/barrierSetConfig.inline.hpp"
 #include "oops/access.hpp"
 #include "oops/accessBackend.inline.hpp"
--- a/src/hotspot/share/oops/instanceKlass.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/oops/instanceKlass.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2186,6 +2186,7 @@
   for (int m = 0; m < methods()->length(); m++) {
     MethodData* mdo = methods()->at(m)->method_data();
     if (mdo != NULL) {
+      MutexLockerEx ml(SafepointSynchronize::is_at_safepoint() ? NULL : mdo->extra_data_lock());
       mdo->clean_method_data(/*always_clean*/false);
     }
   }
@@ -3662,14 +3663,14 @@
   }
 }
 
+void InstanceKlass::set_init_state(ClassState state) {
 #ifdef ASSERT
-void InstanceKlass::set_init_state(ClassState state) {
   bool good_state = is_shared() ? (_init_state <= state)
                                                : (_init_state < state);
   assert(good_state || state == allocated, "illegal state transition");
+#endif
   _init_state = (u1)state;
 }
-#endif
 
 #if INCLUDE_JVMTI
 
--- a/src/hotspot/share/oops/instanceKlass.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/oops/instanceKlass.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1242,11 +1242,7 @@
 
 private:
   // initialization state
-#ifdef ASSERT
   void set_init_state(ClassState state);
-#else
-  void set_init_state(ClassState state) { _init_state = (u1)state; }
-#endif
   void set_rewritten()                  { _misc_flags |= _misc_rewritten; }
   void set_init_thread(Thread *thread)  { _init_thread = thread; }
 
--- a/src/hotspot/share/oops/methodData.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/oops/methodData.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -1653,11 +1653,6 @@
   }
 }
 
-class CleanExtraDataClosure : public StackObj {
-public:
-  virtual bool is_live(Method* m) = 0;
-};
-
 // Check for entries that reference an unloaded method
 class CleanExtraDataKlassClosure : public CleanExtraDataClosure {
   bool _always_clean;
--- a/src/hotspot/share/oops/methodData.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/oops/methodData.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -1943,7 +1943,11 @@
 // adjusted in the event of a change in control flow.
 //
 
-class CleanExtraDataClosure;
+class CleanExtraDataClosure : public StackObj {
+public:
+  virtual bool is_live(Method* m) = 0;
+};
+
 
 class MethodData : public Metadata {
   friend class VMStructs;
@@ -2116,11 +2120,12 @@
   static bool profile_parameters_jsr292_only();
   static bool profile_all_parameters();
 
-  void clean_extra_data(CleanExtraDataClosure* cl);
   void clean_extra_data_helper(DataLayout* dp, int shift, bool reset = false);
   void verify_extra_data_clean(CleanExtraDataClosure* cl);
 
 public:
+  void clean_extra_data(CleanExtraDataClosure* cl);
+
   static int header_size() {
     return sizeof(MethodData)/wordSize;
   }
--- a/src/hotspot/share/opto/callnode.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/opto/callnode.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -1271,6 +1271,14 @@
   return (TypeFunc::Parms == idx);
 }
 
+void SafePointNode::disconnect_from_root(PhaseIterGVN *igvn) {
+  assert(Opcode() == Op_SafePoint, "only value for safepoint in loops");
+  int nb = igvn->C->root()->find_prec_edge(this);
+  if (nb != -1) {
+    igvn->C->root()->rm_prec(nb);
+  }
+}
+
 //==============  SafePointScalarObjectNode  ==============
 
 SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp,
--- a/src/hotspot/share/opto/callnode.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/opto/callnode.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -462,6 +462,8 @@
     return !_replaced_nodes.is_empty();
   }
 
+  void disconnect_from_root(PhaseIterGVN *igvn);
+
   // Standard Node stuff
   virtual int            Opcode() const;
   virtual bool           pinned() const { return true; }
--- a/src/hotspot/share/opto/compile.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/opto/compile.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -2184,6 +2184,23 @@
   return true;
 }
 
+// Remove edges from "root" to each SafePoint at a backward branch.
+// They were inserted during parsing (see add_safepoint()) to make
+// infinite loops without calls or exceptions visible to root, i.e.,
+// useful.
+void Compile::remove_root_to_sfpts_edges() {
+  Node *r = root();
+  if (r != NULL) {
+    for (uint i = r->req(); i < r->len(); ++i) {
+      Node *n = r->in(i);
+      if (n != NULL && n->is_SafePoint()) {
+        r->rm_prec(i);
+        --i;
+      }
+    }
+  }
+}
+
 //------------------------------Optimize---------------------------------------
 // Given a graph, optimize it.
 void Compile::Optimize() {
@@ -2244,6 +2261,10 @@
     if (failing())  return;
   }
 
+  // Now that all inlining is over, cut edge from root to loop
+  // safepoints
+  remove_root_to_sfpts_edges();
+
   // Remove the speculative part of types and clean up the graph from
   // the extra CastPP nodes whose only purpose is to carry them. Do
   // that early so that optimizations are not disrupted by the extra
@@ -3248,8 +3269,10 @@
             break;
           }
         }
-        assert(proj != NULL, "must be found");
-        p->subsume_by(proj, this);
+        assert(proj != NULL || p->_con == TypeFunc::I_O, "io may be dropped at an infinite loop");
+        if (proj != NULL) {
+          p->subsume_by(proj, this);
+        }
       }
     }
     break;
@@ -3469,8 +3492,7 @@
   }
   case Op_CmpUL: {
     if (!Matcher::has_match_rule(Op_CmpUL)) {
-      // We don't support unsigned long comparisons. Set 'max_idx_expr'
-      // to max_julong if < 0 to make the signed comparison fail.
+      // No support for unsigned long comparisons
       ConINode* sign_pos = new ConINode(TypeInt::make(BitsPerLong - 1));
       Node* sign_bit_mask = new RShiftLNode(n->in(1), sign_pos);
       Node* orl = new OrLNode(n->in(1), sign_bit_mask);
--- a/src/hotspot/share/opto/compile.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/opto/compile.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -1088,6 +1088,7 @@
   void inline_string_calls(bool parse_time);
   void inline_boxing_calls(PhaseIterGVN& igvn);
   bool optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode);
+  void remove_root_to_sfpts_edges();
 
   // Matching, CFG layout, allocation, code generation
   PhaseCFG*         cfg()                       { return _cfg; }
--- a/src/hotspot/share/opto/escape.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/opto/escape.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -1727,7 +1727,8 @@
     //
     Node* n = field->ideal_node();
     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
-      if (n->fast_out(i)->is_LoadStore()) {
+      Node* u = n->fast_out(i);
+      if (u->is_LoadStore() || (u->is_Mem() && u->as_Mem()->is_mismatched_access())) {
         jobj->set_scalar_replaceable(false);
         return;
       }
--- a/src/hotspot/share/opto/loopPredicate.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/opto/loopPredicate.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -1308,7 +1308,7 @@
 // range checks between the pre and main loop to validate the value
 // of the main loop induction variable. Make a copy of the predicates
 // here with an opaque node as a place holder for the value (will be
-// updated by PhaseIdealLoop::update_skeleton_predicate()).
+// updated by PhaseIdealLoop::clone_skeleton_predicate()).
 ProjNode* PhaseIdealLoop::insert_skeleton_predicate(IfNode* iff, IdealLoopTree *loop,
                                                     ProjNode* proj, ProjNode *predicate_proj,
                                                     ProjNode* upper_bound_proj,
--- a/src/hotspot/share/opto/loopTransform.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/opto/loopTransform.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -354,6 +354,10 @@
   // check for vectorized loops, any peeling done was already applied
   if (_head->is_CountedLoop() && _head->as_CountedLoop()->do_unroll_only()) return false;
 
+  if (_head->is_CountedLoop() && _head->as_CountedLoop()->trip_count() == 1) {
+    return false;
+  }
+
   while( test != _head ) {      // Scan till run off top of loop
     if( test->is_If() ) {       // Test?
       Node *ctrl = phase->get_ctrl(test->in(1));
@@ -1063,8 +1067,9 @@
 // CastII/ConvI2L nodes cause some data paths to die. For consistency,
 // the control paths must die too but the range checks were removed by
 // predication. The range checks that we add here guarantee that they do.
-void PhaseIdealLoop::duplicate_predicates_helper(Node* predicate, Node* castii, IdealLoopTree* outer_loop,
-                                                 LoopNode* outer_main_head, uint dd_main_head) {
+void PhaseIdealLoop::duplicate_predicates_helper(Node* predicate, Node* start, Node* end,
+                                                 IdealLoopTree* outer_loop, LoopNode* outer_main_head,
+                                                 uint dd_main_head) {
   if (predicate != NULL) {
     IfNode* iff = predicate->in(0)->as_If();
     ProjNode* uncommon_proj = iff->proj_out(1 - predicate->as_Proj()->_con);
@@ -1080,13 +1085,14 @@
       if (uncommon_proj->unique_ctrl_out() != rgn)
         break;
       if (iff->in(1)->Opcode() == Op_Opaque4) {
+        assert(skeleton_predicate_has_opaque(iff), "unexpected");
         // Clone the predicate twice and initialize one with the initial
         // value of the loop induction variable. Leave the other predicate
         // to be initialized when increasing the stride during loop unrolling.
-        prev_proj = update_skeleton_predicate(iff, castii, predicate, uncommon_proj, current_proj, outer_loop, prev_proj);
-        Node* value = new Opaque1Node(C, castii);
-        register_new_node(value, current_proj);
-        prev_proj = update_skeleton_predicate(iff, value, predicate, uncommon_proj, current_proj, outer_loop, prev_proj);
+        prev_proj = clone_skeleton_predicate(iff, start, predicate, uncommon_proj, current_proj, outer_loop, prev_proj);
+        assert(skeleton_predicate_has_opaque(prev_proj->in(0)->as_If()) == (start->Opcode() == Op_Opaque1), "");
+        prev_proj = clone_skeleton_predicate(iff, end, predicate, uncommon_proj, current_proj, outer_loop, prev_proj);
+        assert(skeleton_predicate_has_opaque(prev_proj->in(0)->as_If()) == (end->Opcode() == Op_Opaque1), "");
         // Remove the skeleton predicate from the pre-loop
         _igvn.replace_input_of(iff, 1, _igvn.intcon(1));
       }
@@ -1097,9 +1103,47 @@
   }
 }
 
-Node* PhaseIdealLoop::update_skeleton_predicate(Node* iff, Node* value, Node* predicate, Node* uncommon_proj,
-                                                Node* current_proj, IdealLoopTree* outer_loop, Node* prev_proj) {
-  bool clone = (outer_loop != NULL); // Clone the predicate?
+static bool skeleton_follow_inputs(Node* n, int op) {
+  return (n->is_Bool() ||
+          n->is_Cmp() ||
+          op == Op_AndL ||
+          op == Op_OrL ||
+          op == Op_RShiftL ||
+          op == Op_LShiftL ||
+          op == Op_AddL ||
+          op == Op_AddI ||
+          op == Op_MulL ||
+          op == Op_MulI ||
+          op == Op_SubL ||
+          op == Op_SubI ||
+          op == Op_ConvI2L);
+}
+
+bool PhaseIdealLoop::skeleton_predicate_has_opaque(IfNode* iff) {
+  ResourceMark rm;
+  Unique_Node_List wq;
+  wq.push(iff->in(1)->in(1));
+  for (uint i = 0; i < wq.size(); i++) {
+    Node* n = wq.at(i);
+    int op = n->Opcode();
+    if (skeleton_follow_inputs(n, op)) {
+      for (uint j = 1; j < n->req(); j++) {
+        Node* m = n->in(j);
+        if (m != NULL) {
+          wq.push(m);
+        }
+      }
+      continue;
+    }
+    if (op == Op_Opaque1) {
+      return true;
+    }
+  }
+  return false;
+}
+
+Node* PhaseIdealLoop::clone_skeleton_predicate(Node* iff, Node* value, Node* predicate, Node* uncommon_proj,
+                                               Node* current_proj, IdealLoopTree* outer_loop, Node* prev_proj) {
   Node_Stack to_clone(2);
   to_clone.push(iff->in(1), 1);
   uint current = C->unique();
@@ -1114,28 +1158,11 @@
     uint i = to_clone.index();
     Node* m = n->in(i);
     int op = m->Opcode();
-    if (m->is_Bool() ||
-        m->is_Cmp() ||
-        op == Op_AndL ||
-        op == Op_OrL ||
-        op == Op_RShiftL ||
-        op == Op_LShiftL ||
-        op == Op_AddL ||
-        op == Op_AddI ||
-        op == Op_MulL ||
-        op == Op_MulI ||
-        op == Op_SubL ||
-        op == Op_SubI ||
-        op == Op_ConvI2L) {
+    if (skeleton_follow_inputs(m, op)) {
         to_clone.push(m, 1);
         continue;
     }
     if (op == Op_Opaque1) {
-      if (!clone) {
-        // Update the input of the Opaque1Node and exit
-        _igvn.replace_input_of(m, 1, value);
-        return prev_proj;
-      }
       if (n->_idx < current) {
         n = n->clone();
       }
@@ -1157,20 +1184,17 @@
       }
       Node* next = to_clone.node();
       j = to_clone.index();
-      if (clone && cur->_idx >= current) {
+      if (next->in(j) != cur) {
+        assert(cur->_idx >= current || next->in(j)->Opcode() == Op_Opaque1, "new node or Opaque1 being replaced");
         if (next->_idx < current) {
           next = next->clone();
           register_new_node(next, current_proj);
           to_clone.set_node(next);
         }
-        assert(next->in(j) != cur, "input should have been cloned");
         next->set_req(j, cur);
       }
     }
   } while (result == NULL);
-  if (!clone) {
-    return NULL;
-  }
   assert(result->_idx >= current, "new node expected");
 
   Node* proj = predicate->clone();
@@ -1193,8 +1217,9 @@
   return proj;
 }
 
-void PhaseIdealLoop::duplicate_predicates(CountedLoopNode* pre_head, Node* castii, IdealLoopTree* outer_loop,
-                                          LoopNode* outer_main_head, uint dd_main_head) {
+void PhaseIdealLoop::duplicate_predicates(CountedLoopNode* pre_head, Node* start, Node* end,
+                                          IdealLoopTree* outer_loop, LoopNode* outer_main_head,
+                                          uint dd_main_head) {
   if (UseLoopPredicate) {
     Node* entry = pre_head->in(LoopNode::EntryControl);
     Node* predicate = NULL;
@@ -1210,8 +1235,8 @@
       }
     }
     predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
-    duplicate_predicates_helper(predicate, castii, outer_loop, outer_main_head, dd_main_head);
-    duplicate_predicates_helper(profile_predicate, castii, outer_loop, outer_main_head, dd_main_head);
+    duplicate_predicates_helper(predicate, start, end, outer_loop, outer_main_head, dd_main_head);
+    duplicate_predicates_helper(profile_predicate, start, end, outer_loop, outer_main_head, dd_main_head);
   }
 }
 
@@ -1358,7 +1383,9 @@
   // CastII for the main loop:
   Node* castii = cast_incr_before_loop( pre_incr, min_taken, main_head );
   assert(castii != NULL, "no castII inserted");
-  duplicate_predicates(pre_head, castii, outer_loop, outer_main_head, dd_main_head);
+  Node* opaque_castii = new Opaque1Node(C, castii);
+  register_new_node(opaque_castii, outer_main_head->in(LoopNode::EntryControl));
+  duplicate_predicates(pre_head, castii, opaque_castii, outer_loop, outer_main_head, dd_main_head);
 
   // Step B4: Shorten the pre-loop to run only 1 iteration (for now).
   // RCE and alignment may change this later.
@@ -1637,6 +1664,49 @@
   return !is_member(_phase->get_loop(n_c));
 }
 
+void PhaseIdealLoop::update_skeleton_predicates(Node* ctrl, CountedLoopNode* loop_head, Node* init, int stride_con) {
+  // Search for skeleton predicates and update them according to the new stride
+  Node* entry = ctrl;
+  Node* prev_proj = ctrl;
+  LoopNode* outer_loop_head = loop_head->skip_strip_mined();
+  IdealLoopTree* outer_loop = get_loop(outer_loop_head);
+  while (entry != NULL && entry->is_Proj() && entry->in(0)->is_If()) {
+    IfNode* iff = entry->in(0)->as_If();
+    ProjNode* proj = iff->proj_out(1 - entry->as_Proj()->_con);
+    if (proj->unique_ctrl_out()->Opcode() != Op_Halt) {
+      break;
+    }
+    if (iff->in(1)->Opcode() == Op_Opaque4) {
+      // Look for predicate with an Opaque1 node that can be used as a template
+      if (!skeleton_predicate_has_opaque(iff)) {
+        // No Opaque1 node? It's either the check for the first value
+        // of the first iteration or the check for the last value of
+        // the first iteration of an unrolled loop. We can't
+        // tell. Kill it in any case.
+        _igvn.replace_input_of(iff, 1, iff->in(1)->in(2));
+      } else {
+        // Add back the predicate for the value at the beginning of the first entry
+        prev_proj = clone_skeleton_predicate(iff, init, entry, proj, ctrl, outer_loop, prev_proj);
+        assert(!skeleton_predicate_has_opaque(prev_proj->in(0)->as_If()), "unexpected");
+        // Compute the value of the loop induction variable at the end of the
+        // first iteration of the unrolled loop: init + new_stride_con - init_inc
+        int init_inc = stride_con/loop_head->unrolled_count();
+        assert(init_inc != 0, "invalid loop increment");
+        int new_stride_con = stride_con * 2;
+        Node* max_value = _igvn.intcon(new_stride_con - init_inc);
+        max_value = new AddINode(init, max_value);
+        register_new_node(max_value, get_ctrl(iff->in(1)));
+        prev_proj = clone_skeleton_predicate(iff, max_value, entry, proj, ctrl, outer_loop, prev_proj);
+        assert(!skeleton_predicate_has_opaque(prev_proj->in(0)->as_If()), "unexpected");
+      }
+    }
+    entry = entry->in(0)->in(0);
+  }
+  if (prev_proj != ctrl) {
+    _igvn.replace_input_of(outer_loop_head, LoopNode::EntryControl, prev_proj);
+    set_idom(outer_loop_head, prev_proj, dom_depth(outer_loop_head));
+  }
+}
 
 //------------------------------do_unroll--------------------------------------
 // Unroll the loop body one step - make each trip do 2 iterations.
@@ -1702,29 +1772,7 @@
   assert(old_trip_count > 1 &&
       (!adjust_min_trip || stride_p <= (1<<3)*loop_head->unrolled_count()), "sanity");
 
-  if (UseLoopPredicate) {
-    // Search for skeleton predicates and update them according to the new stride
-    Node* entry = ctrl;
-    while (entry != NULL && entry->is_Proj() && entry->in(0)->is_If()) {
-      IfNode* iff = entry->in(0)->as_If();
-      ProjNode* proj = iff->proj_out(1 - entry->as_Proj()->_con);
-      if (proj->unique_ctrl_out()->Opcode() != Op_Halt) {
-        break;
-      }
-      if (iff->in(1)->Opcode() == Op_Opaque4) {
-        // Compute the value of the loop induction variable at the end of the
-        // first iteration of the unrolled loop: init + new_stride_con - init_inc
-        int init_inc = stride_con/loop_head->unrolled_count();
-        assert(init_inc != 0, "invalid loop increment");
-        int new_stride_con = stride_con * 2;
-        Node* max_value = _igvn.intcon(new_stride_con - init_inc);
-        max_value = new AddINode(init, max_value);
-        register_new_node(max_value, get_ctrl(iff->in(1)));
-        update_skeleton_predicate(iff, max_value);
-      }
-      entry = entry->in(0)->in(0);
-    }
-  }
+  update_skeleton_predicates(ctrl, loop_head, init, stride_con);
 
   // Adjust loop limit to keep valid iterations number after unroll.
   // Use (limit - stride) instead of (((limit - init)/stride) & (-2))*stride
@@ -2035,13 +2083,20 @@
 
 //------------------------------adjust_limit-----------------------------------
 // Helper function for add_constraint().
-Node* PhaseIdealLoop::adjust_limit(int stride_con, Node * scale, Node *offset, Node *rc_limit, Node *loop_limit, Node *pre_ctrl) {
+Node* PhaseIdealLoop::adjust_limit(int stride_con, Node * scale, Node *offset, Node *rc_limit, Node *loop_limit, Node *pre_ctrl, bool round_up) {
   // Compute "I :: (limit-offset)/scale"
   Node *con = new SubINode(rc_limit, offset);
   register_new_node(con, pre_ctrl);
   Node *X = new DivINode(0, con, scale);
   register_new_node(X, pre_ctrl);
 
+  // When the absolute value of scale is greater than one, the integer
+  // division may round limit down so add one to the limit.
+  if (round_up) {
+    X = new AddINode(X, _igvn.intcon(1));
+    register_new_node(X, pre_ctrl);
+  }
+
   // Adjust loop limit
   loop_limit = (stride_con > 0)
                ? (Node*)(new MinINode(loop_limit, X))
@@ -2082,7 +2137,7 @@
     // (upper_limit-offset) may overflow or underflow.
     // But it is fine since main loop will either have
     // less iterations or will be skipped in such case.
-    *main_limit = adjust_limit(stride_con, scale, offset, upper_limit, *main_limit, pre_ctrl);
+    *main_limit = adjust_limit(stride_con, scale, offset, upper_limit, *main_limit, pre_ctrl, false);
 
     // The underflow limit: low_limit <= scale*I+offset.
     // For pre-loop compute
@@ -2117,7 +2172,8 @@
       // max(pre_limit, original_limit) is used in do_range_check().
     }
     // Pass (-stride) to indicate pre_loop_cond = NOT(main_loop_cond);
-    *pre_limit = adjust_limit((-stride_con), scale, offset, low_limit, *pre_limit, pre_ctrl);
+    *pre_limit = adjust_limit((-stride_con), scale, offset, low_limit, *pre_limit, pre_ctrl,
+                              scale_con > 1 && stride_con > 0);
 
   } else { // stride_con*scale_con < 0
     // For negative stride*scale pre-loop checks for overflow and
@@ -2143,7 +2199,8 @@
     Node *plus_one = new AddINode(offset, one);
     register_new_node( plus_one, pre_ctrl );
     // Pass (-stride) to indicate pre_loop_cond = NOT(main_loop_cond);
-    *pre_limit = adjust_limit((-stride_con), scale, plus_one, upper_limit, *pre_limit, pre_ctrl);
+    *pre_limit = adjust_limit((-stride_con), scale, plus_one, upper_limit, *pre_limit, pre_ctrl,
+                              scale_con < -1 && stride_con > 0);
 
     if (low_limit->get_int() == -max_jint) {
       // We need this guard when scale*main_limit+offset >= limit
@@ -2177,7 +2234,8 @@
     //       I > (low_limit-(offset+1))/scale
     //   )
 
-    *main_limit = adjust_limit(stride_con, scale, plus_one, low_limit, *main_limit, pre_ctrl);
+    *main_limit = adjust_limit(stride_con, scale, plus_one, low_limit, *main_limit, pre_ctrl,
+                               false);
   }
 }
 
@@ -2282,16 +2340,16 @@
 // eliminated by iteration splitting.
 Node* PhaseIdealLoop::add_range_check_predicate(IdealLoopTree* loop, CountedLoopNode* cl,
                                                 Node* predicate_proj, int scale_con, Node* offset,
-                                                Node* limit, jint stride_con) {
+                                                Node* limit, jint stride_con, Node* value) {
   bool overflow = false;
-  BoolNode* bol = rc_predicate(loop, predicate_proj, scale_con, offset, cl->init_trip(), NULL, stride_con, limit, (stride_con > 0) != (scale_con > 0), overflow);
+  BoolNode* bol = rc_predicate(loop, predicate_proj, scale_con, offset, value, NULL, stride_con, limit, (stride_con > 0) != (scale_con > 0), overflow);
   Node* opaque_bol = new Opaque4Node(C, bol, _igvn.intcon(1));
   register_new_node(opaque_bol, predicate_proj);
   IfNode* new_iff = NULL;
   if (overflow) {
-    new_iff = new IfNode(predicate_proj, bol, PROB_MAX, COUNT_UNKNOWN);
+    new_iff = new IfNode(predicate_proj, opaque_bol, PROB_MAX, COUNT_UNKNOWN);
   } else {
-    new_iff = new RangeCheckNode(predicate_proj, bol, PROB_MAX, COUNT_UNKNOWN);
+    new_iff = new RangeCheckNode(predicate_proj, opaque_bol, PROB_MAX, COUNT_UNKNOWN);
   }
   register_control(new_iff, loop->_parent, predicate_proj);
   Node* iffalse = new IfFalseNode(new_iff);
@@ -2483,7 +2541,23 @@
           add_constraint( stride_con, scale_con, offset, zero, limit, pre_ctrl, &pre_limit, &main_limit );
           // (0-offset)/scale could be outside of loop iterations range.
           conditional_rc = true;
-          predicate_proj = add_range_check_predicate(loop, cl, predicate_proj, scale_con, offset, limit, stride_con);
+          Node* init = cl->init_trip();
+          Node* opaque_init = new Opaque1Node(C, init);
+          register_new_node(opaque_init, predicate_proj);
+          // template predicate so it can be updated on next unrolling
+          predicate_proj = add_range_check_predicate(loop, cl, predicate_proj, scale_con, offset, limit, stride_con, opaque_init);
+          assert(skeleton_predicate_has_opaque(predicate_proj->in(0)->as_If()), "unexpected");
+          // predicate on first value of first iteration
+          predicate_proj = add_range_check_predicate(loop, cl, predicate_proj, scale_con, offset, limit, stride_con, init);
+          assert(!skeleton_predicate_has_opaque(predicate_proj->in(0)->as_If()), "unexpected");
+          int init_inc = stride_con/cl->unrolled_count();
+          assert(init_inc != 0, "invalid loop increment");
+          Node* max_value = _igvn.intcon(stride_con - init_inc);
+          max_value = new AddINode(init, max_value);
+          register_new_node(max_value, predicate_proj);
+          // predicate on last value of first iteration (in case unrolling has already happened)
+          predicate_proj = add_range_check_predicate(loop, cl, predicate_proj, scale_con, offset, limit, stride_con, max_value);
+          assert(!skeleton_predicate_has_opaque(predicate_proj->in(0)->as_If()), "unexpected");
         } else {
           if (PrintOpto) {
             tty->print_cr("missed RCE opportunity");
--- a/src/hotspot/share/opto/loopnode.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/opto/loopnode.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -302,7 +302,7 @@
   void set_slp_max_unroll(int unroll_factor) { _slp_maximum_unroll_factor = unroll_factor; }
   int  slp_max_unroll() const                { return _slp_maximum_unroll_factor; }
 
-  virtual LoopNode* skip_strip_mined(int expect_opaq = 1);
+  virtual LoopNode* skip_strip_mined(int expect_skeleton = 1);
   OuterStripMinedLoopNode* outer_loop() const;
   virtual IfTrueNode* outer_loop_tail() const;
   virtual OuterStripMinedLoopEndNode* outer_loop_end() const;
@@ -747,12 +747,14 @@
   }
 
   Node* cast_incr_before_loop(Node* incr, Node* ctrl, Node* loop);
-  void duplicate_predicates_helper(Node* predicate, Node* castii, IdealLoopTree* outer_loop,
+  void duplicate_predicates_helper(Node* predicate, Node* start, Node* end, IdealLoopTree* outer_loop,
                                    LoopNode* outer_main_head, uint dd_main_head);
-  void duplicate_predicates(CountedLoopNode* pre_head, Node* castii, IdealLoopTree* outer_loop,
+  void duplicate_predicates(CountedLoopNode* pre_head, Node* start, Node* end, IdealLoopTree* outer_loop,
                             LoopNode* outer_main_head, uint dd_main_head);
-  Node* update_skeleton_predicate(Node* iff, Node* value, Node* predicate = NULL, Node* uncommon_proj = NULL,
-                                  Node* current_proj = NULL, IdealLoopTree* outer_loop = NULL, Node* prev_proj = NULL);
+  Node* clone_skeleton_predicate(Node* iff, Node* value, Node* predicate, Node* uncommon_proj,
+                                  Node* current_proj, IdealLoopTree* outer_loop, Node* prev_proj);
+  bool skeleton_predicate_has_opaque(IfNode* iff);
+  void update_skeleton_predicates(Node* ctrl, CountedLoopNode* loop_head, Node* init, int stride_con);
   void insert_loop_limit_check(ProjNode* limit_check_proj, Node* cmp_limit, Node* bol);
 
 public:
@@ -1128,7 +1130,7 @@
                                       Deoptimization::DeoptReason reason);
   Node* add_range_check_predicate(IdealLoopTree* loop, CountedLoopNode* cl,
                                   Node* predicate_proj, int scale_con, Node* offset,
-                                  Node* limit, jint stride_con);
+                                  Node* limit, jint stride_con, Node* value);
 
   // Helper function to collect predicate for eliminating the useless ones
   void collect_potentially_useful_predicates(IdealLoopTree *loop, Unique_Node_List &predicate_opaque1);
@@ -1190,7 +1192,7 @@
   // loop.  Scale_con, offset and limit are all loop invariant.
   void add_constraint( int stride_con, int scale_con, Node *offset, Node *low_limit, Node *upper_limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit );
   // Helper function for add_constraint().
-  Node* adjust_limit( int stride_con, Node * scale, Node *offset, Node *rc_limit, Node *loop_limit, Node *pre_ctrl );
+  Node* adjust_limit(int stride_con, Node * scale, Node *offset, Node *rc_limit, Node *loop_limit, Node *pre_ctrl, bool round_up);
 
   // Partially peel loop up through last_peel node.
   bool partial_peel( IdealLoopTree *loop, Node_List &old_new );
@@ -1304,6 +1306,14 @@
   bool identical_backtoback_ifs(Node *n);
   bool can_split_if(Node *n_ctrl);
 
+  // Determine if a method is too big for a/another round of split-if, based on
+  // a magic (approximate) ratio derived from the equally magic constant 35000,
+  // previously used for this purpose (but without relating to the node limit).
+  bool must_throttle_split_if() {
+    uint threshold = C->max_node_limit() * 2 / 5;
+    return C->live_nodes() > threshold;
+  }
+
   bool _created_loop_node;
 public:
   void set_created_loop_node() { _created_loop_node = true; }
--- a/src/hotspot/share/opto/loopopts.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/opto/loopopts.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -1024,8 +1024,7 @@
     }
   }
 
-  // Use same limit as split_if_with_blocks_post
-  if( C->live_nodes() > 35000 ) return n; // Method too big
+  if (must_throttle_split_if()) return n;
 
   // Split 'n' through the merge point if it is profitable
   Node *phi = split_thru_phi( n, n_blk, policy );
@@ -1143,9 +1142,10 @@
   return true;
 }
 
-bool PhaseIdealLoop::can_split_if(Node *n_ctrl) {
-  if (C->live_nodes() > 35000) {
-    return false; // Method too big
+
+bool PhaseIdealLoop::can_split_if(Node* n_ctrl) {
+  if (must_throttle_split_if()) {
+    return false;
   }
 
   // Do not do 'split-if' if irreducible loops are present.
@@ -1462,12 +1462,13 @@
 // Check for aggressive application of 'split-if' optimization,
 // using basic block level info.
 void PhaseIdealLoop::split_if_with_blocks(VectorSet &visited, Node_Stack &nstack, bool last_round) {
-  Node *n = C->root();
-  visited.set(n->_idx); // first, mark node as visited
+  Node* root = C->root();
+  visited.set(root->_idx); // first, mark root as visited
   // Do pre-visit work for root
-  n = split_if_with_blocks_pre( n );
-  uint cnt = n->outcnt();
-  uint i   = 0;
+  Node* n   = split_if_with_blocks_pre(root);
+  uint  cnt = n->outcnt();
+  uint  i   = 0;
+
   while (true) {
     // Visit all children
     if (i < cnt) {
@@ -1475,7 +1476,7 @@
       ++i;
       if (use->outcnt() != 0 && !visited.test_set(use->_idx)) {
         // Now do pre-visit work for this use
-        use = split_if_with_blocks_pre( use );
+        use = split_if_with_blocks_pre(use);
         nstack.push(n, i); // Save parent and next use's index.
         n   = use;         // Process all children of current use.
         cnt = use->outcnt();
@@ -1486,7 +1487,10 @@
       // All of n's children have been processed, complete post-processing.
       if (cnt != 0 && !n->is_Con()) {
         assert(has_node(n), "no dead nodes");
-        split_if_with_blocks_post( n, last_round );
+        split_if_with_blocks_post(n, last_round);
+      }
+      if (must_throttle_split_if()) {
+        nstack.clear();
       }
       if (nstack.is_empty()) {
         // Finished all nodes on stack.
--- a/src/hotspot/share/opto/node.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/opto/node.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -37,6 +37,7 @@
 #include "opto/node.hpp"
 #include "opto/opcodes.hpp"
 #include "opto/regmask.hpp"
+#include "opto/rootnode.hpp"
 #include "opto/type.hpp"
 #include "utilities/copy.hpp"
 #include "utilities/macros.hpp"
@@ -1310,6 +1311,9 @@
 
   while (nstack.size() > 0) {
     dead = nstack.pop();
+    if (dead->Opcode() == Op_SafePoint) {
+      dead->as_SafePoint()->disconnect_from_root(igvn);
+    }
     if (dead->outcnt() > 0) {
       // Keep dead node on stack until all uses are processed.
       nstack.push(dead);
@@ -1367,7 +1371,7 @@
         igvn->C->remove_range_check_cast(cast);
       }
       if (dead->Opcode() == Op_Opaque4) {
-        igvn->C->remove_range_check_cast(dead);
+        igvn->C->remove_opaque4_node(dead);
       }
       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
       bs->unregister_potential_barrier_node(dead);
--- a/src/hotspot/share/opto/phaseX.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/opto/phaseX.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -430,20 +430,6 @@
 
   // Disconnect 'useless' nodes that are adjacent to useful nodes
   C->remove_useless_nodes(_useful);
-
-  // Remove edges from "root" to each SafePoint at a backward branch.
-  // They were inserted during parsing (see add_safepoint()) to make infinite
-  // loops without calls or exceptions visible to root, i.e., useful.
-  Node *root = C->root();
-  if( root != NULL ) {
-    for( uint i = root->req(); i < root->len(); ++i ) {
-      Node *n = root->in(i);
-      if( n != NULL && n->is_SafePoint() ) {
-        root->rm_prec(i);
-        --i;
-      }
-    }
-  }
 }
 
 //=============================================================================
@@ -1354,6 +1340,9 @@
 
   while (_stack.is_nonempty()) {
     dead = _stack.node();
+    if (dead->Opcode() == Op_SafePoint) {
+      dead->as_SafePoint()->disconnect_from_root(this);
+    }
     uint progress_state = _stack.index();
     assert(dead != C->root(), "killing root, eh?");
     assert(!dead->is_top(), "add check for top when pushing");
@@ -1456,6 +1445,9 @@
 //------------------------------subsume_node-----------------------------------
 // Remove users from node 'old' and add them to node 'nn'.
 void PhaseIterGVN::subsume_node( Node *old, Node *nn ) {
+  if (old->Opcode() == Op_SafePoint) {
+    old->as_SafePoint()->disconnect_from_root(this);
+  }
   assert( old != hash_find(old), "should already been removed" );
   assert( old != C->top(), "cannot subsume top node");
   // Copy debug or profile information to the new version:
--- a/src/hotspot/share/prims/jni.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/prims/jni.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -823,9 +823,7 @@
 
   HOTSPOT_JNI_ISSAMEOBJECT_ENTRY(env, r1, r2);
 
-  oop a = JNIHandles::resolve(r1);
-  oop b = JNIHandles::resolve(r2);
-  jboolean ret = oopDesc::equals(a, b) ? JNI_TRUE : JNI_FALSE;
+  jboolean ret = JNIHandles::is_same_object(r1, r2) ? JNI_TRUE : JNI_FALSE;
 
   HOTSPOT_JNI_ISSAMEOBJECT_RETURN(ret);
   return ret;
--- a/src/hotspot/share/prims/jvm.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/prims/jvm.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -61,6 +61,7 @@
 #include "runtime/handles.inline.hpp"
 #include "runtime/init.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
+#include "runtime/deoptimization.hpp"
 #include "runtime/java.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/jfieldIDWorkaround.hpp"
@@ -1229,11 +1230,10 @@
   oop protection_domain = NULL;
 
   // Iterate through Java frames
-  RegisterMap reg_map(thread);
-  javaVFrame *vf = thread->last_java_vframe(&reg_map);
-  for (; vf != NULL; vf = vf->java_sender()) {
+  vframeStream vfst(thread);
+  for(; !vfst.at_end(); vfst.next()) {
     // get method of frame
-    Method* method = vf->method();
+    Method* method = vfst.method();
 
     // stop at the first privileged frame
     if (method->method_holder() == SystemDictionary::AccessController_klass() &&
@@ -1242,13 +1242,15 @@
       // this frame is privileged
       is_privileged = true;
 
-      javaVFrame *priv = vf;                        // executePrivileged
-      javaVFrame *caller_fr = priv->java_sender();  // doPrivileged
-      caller_fr = caller_fr->java_sender();         // caller
+      javaVFrame *priv = vfst.asJavaVFrame();       // executePrivileged
 
       StackValueCollection* locals = priv->locals();
-      privileged_context = locals->obj_at(1);
-      Handle caller      = locals->obj_at(2);
+      StackValue* ctx_sv = locals->at(1); // AccessControlContext context
+      StackValue* clr_sv = locals->at(2); // Class<?> caller
+      assert(!ctx_sv->obj_is_scalar_replaced(), "found scalar-replaced object");
+      assert(!clr_sv->obj_is_scalar_replaced(), "found scalar-replaced object");
+      privileged_context    = ctx_sv->get_obj();
+      Handle caller         = clr_sv->get_obj();
 
       Klass *caller_klass = java_lang_Class::as_Klass(caller());
       protection_domain  = caller_klass->protection_domain();
--- a/src/hotspot/share/prims/whitebox.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/prims/whitebox.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -82,6 +82,7 @@
 #include "gc/g1/g1ConcurrentMark.hpp"
 #include "gc/g1/g1ConcurrentMarkThread.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
+#include "gc/g1/heterogeneousHeapRegionManager.hpp"
 #endif // INCLUDE_G1GC
 #if INCLUDE_PARALLELGC
 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
@@ -499,6 +500,113 @@
 
 #endif // INCLUDE_G1GC
 
+#if INCLUDE_G1GC || INCLUDE_PARALLELGC
+WB_ENTRY(jlong, WB_DramReservedStart(JNIEnv* env, jobject o))
+#if INCLUDE_G1GC
+  if (UseG1GC) {
+    G1CollectedHeap* g1h = G1CollectedHeap::heap();
+    if (g1h->g1_collector_policy()->is_hetero_heap()) {
+      uint start_region = HeterogeneousHeapRegionManager::manager()->start_index_of_dram();
+      return (jlong)(g1h->base() + start_region * HeapRegion::GrainBytes);
+    } else {
+      return (jlong)g1h->base();
+    }
+  }
+#endif // INCLUDE_G1GC
+#if INCLUDE_PARALLELGC
+  if (UseParallelGC) {
+    ParallelScavengeHeap* ps_heap = ParallelScavengeHeap::heap();
+    if (AllocateOldGenAt != NULL) {
+      MemRegion reserved = ps_heap->young_gen()->reserved();
+      return (jlong)reserved.start();
+    } else {
+      return (jlong)ps_heap->base();
+    }
+  }
+#endif // INCLUDE_PARALLELGC
+  THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_DramReservedStart: enabled only for G1 and Parallel GC");
+WB_END
+
+WB_ENTRY(jlong, WB_DramReservedEnd(JNIEnv* env, jobject o))
+#if INCLUDE_G1GC
+  if (UseG1GC) {
+    G1CollectedHeap* g1h = G1CollectedHeap::heap();
+    if (g1h->g1_collector_policy()->is_hetero_heap()) {
+      uint end_region = HeterogeneousHeapRegionManager::manager()->end_index_of_dram();
+      return (jlong)(g1h->base() + (end_region + 1) * HeapRegion::GrainBytes - 1);
+    } else {
+      return (jlong)g1h->base() + g1h->collector_policy()->max_heap_byte_size();
+    }
+  }
+#endif // INCLUDE_G1GC
+#if INCLUDE_PARALLELGC
+  if (UseParallelGC) {
+    ParallelScavengeHeap* ps_heap = ParallelScavengeHeap::heap();
+    if (AllocateOldGenAt != NULL) {
+      MemRegion reserved = ps_heap->young_gen()->reserved();
+      return (jlong)reserved.end();
+    } else {
+      return (jlong)ps_heap->reserved_region().end();
+    }
+  }
+#endif // INCLUDE_PARALLELGC
+  THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_DramReservedEnd: enabled only for G1 and Parallel GC");
+WB_END
+
+WB_ENTRY(jlong, WB_NvdimmReservedStart(JNIEnv* env, jobject o))
+#if INCLUDE_G1GC
+  if (UseG1GC) {
+    G1CollectedHeap* g1h = G1CollectedHeap::heap();
+    if (g1h->g1_collector_policy()->is_hetero_heap()) {
+      uint start_region = HeterogeneousHeapRegionManager::manager()->start_index_of_nvdimm();
+      return (jlong)(g1h->base() + start_region * HeapRegion::GrainBytes);
+    } else {
+      THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_NvdimmReservedStart: Old gen is not allocated on NV-DIMM using AllocateOldGenAt flag");
+    }
+  }
+#endif // INCLUDE_G1GC
+#if INCLUDE_PARALLELGC
+  if (UseParallelGC) {
+    ParallelScavengeHeap* ps_heap = ParallelScavengeHeap::heap();
+    if (AllocateOldGenAt != NULL) {
+      MemRegion reserved = ps_heap->old_gen()->reserved();
+      return (jlong)reserved.start();
+    } else {
+      THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_NvdimmReservedStart: Old gen is not allocated on NV-DIMM using AllocateOldGenAt flag");
+    }
+  }
+#endif // INCLUDE_PARALLELGC
+  THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_NvdimmReservedStart: enabled only for G1 and Parallel GC");
+WB_END
+
+WB_ENTRY(jlong, WB_NvdimmReservedEnd(JNIEnv* env, jobject o))
+#if INCLUDE_G1GC
+  if (UseG1GC) {
+    G1CollectedHeap* g1h = G1CollectedHeap::heap();
+    if (g1h->g1_collector_policy()->is_hetero_heap()) {
+      uint end_region = HeterogeneousHeapRegionManager::manager()->start_index_of_nvdimm();
+      return (jlong)(g1h->base() + (end_region + 1) * HeapRegion::GrainBytes - 1);
+    } else {
+      THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_NvdimmReservedEnd: Old gen is not allocated on NV-DIMM using AllocateOldGenAt flag");
+    }
+  }
+#endif // INCLUDE_G1GC
+#if INCLUDE_PARALLELGC
+  if (UseParallelGC) {
+    ParallelScavengeHeap* ps_heap = ParallelScavengeHeap::heap();
+    if (AllocateOldGenAt != NULL) {
+      MemRegion reserved = ps_heap->old_gen()->reserved();
+      return (jlong)reserved.end();
+      } else {
+      THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_NvdimmReservedEnd: Old gen is not allocated on NV-DIMM using AllocateOldGenAt flag");
+    }
+  }
+#endif // INCLUDE_PARALLELGC
+  THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_NvdimmReservedEnd: enabled only for G1 and Parallel GC");
+WB_END
+
+#endif // INCLUDE_G1GC || INCLUDE_PARALLELGC
+
 #if INCLUDE_PARALLELGC
 
 WB_ENTRY(jlong, WB_PSVirtualSpaceAlignment(JNIEnv* env, jobject o))
@@ -2054,6 +2162,12 @@
                                                       (void*)&WB_G1AuxiliaryMemoryUsage  },
   {CC"g1GetMixedGCInfo",   CC"(I)[J",                 (void*)&WB_G1GetMixedGCInfo },
 #endif // INCLUDE_G1GC
+#if INCLUDE_G1GC || INCLUDE_PARALLELGC
+  {CC"dramReservedStart",   CC"()J",                  (void*)&WB_DramReservedStart },
+  {CC"dramReservedEnd",     CC"()J",                  (void*)&WB_DramReservedEnd },
+  {CC"nvdimmReservedStart", CC"()J",                  (void*)&WB_NvdimmReservedStart },
+  {CC"nvdimmReservedEnd",   CC"()J",                  (void*)&WB_NvdimmReservedEnd },
+#endif // INCLUDE_G1GC || INCLUDE_PARALLELGC
 #if INCLUDE_PARALLELGC
   {CC"psVirtualSpaceAlignment",CC"()J",               (void*)&WB_PSVirtualSpaceAlignment},
   {CC"psHeapGenerationAlignment",CC"()J",             (void*)&WB_PSHeapGenerationAlignment},
--- a/src/hotspot/share/runtime/arguments.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/runtime/arguments.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -2062,6 +2062,9 @@
       log_warning(arguments) ("NUMA support for Heap depends on the file system when AllocateHeapAt option is used.\n");
     }
   }
+
+  status = status && GCArguments::check_args_consistency();
+
   return status;
 }
 
@@ -2953,6 +2956,7 @@
   }
 #endif // LINUX
   fix_appclasspath();
+
   return JNI_OK;
 }
 
@@ -3107,6 +3111,10 @@
       BytecodeVerificationRemote = true;
       log_info(cds)("All non-system classes will be verified (-Xverify:remote) during CDS dump time.");
     }
+
+    // Compilation is already disabled if the user specifies -Xshare:dump.
+    // Disable compilation in case user specifies -XX:+DumpSharedSpaces instead of -Xshare:dump.
+    set_mode_flags(_int);
   }
   if (UseSharedSpaces && patch_mod_javabase) {
     no_shared_spaces("CDS is disabled when " JAVA_BASE_NAME " module is patched.");
@@ -3811,6 +3819,7 @@
 
 #if defined(AIX)
   UNSUPPORTED_OPTION(AllocateHeapAt);
+  UNSUPPORTED_OPTION(AllocateOldGenAt);
 #endif
 
   ArgumentsExt::report_unsupported_options();
--- a/src/hotspot/share/runtime/globals.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/runtime/globals.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -2575,6 +2575,12 @@
           "Path to the directoy where a temporary file will be created "    \
           "to use as the backing store for Java Heap.")                     \
                                                                             \
+  experimental(ccstr, AllocateOldGenAt, NULL,                               \
+          "Path to the directoy where a temporary file will be "            \
+          "created to use as the backing store for old generation."         \
+          "File of size Xmx is pre-allocated for performance reason, so"    \
+          "we need that much space available")                              \
+                                                                            \
   develop(bool, VerifyMetaspace, false,                                     \
           "Verify metaspace on chunk movements.")                           \
                                                                             \
--- a/src/hotspot/share/runtime/jniHandles.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/runtime/jniHandles.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -152,17 +152,11 @@
 oop JNIHandles::resolve_external_guard(jobject handle) {
   oop result = NULL;
   if (handle != NULL) {
-    result = resolve_impl<true /* external_guard */ >(handle);
+    result = resolve_impl<0 /* decorators */, true /* external_guard */>(handle);
   }
   return result;
 }
 
-oop JNIHandles::resolve_jweak(jweak handle) {
-  assert(handle != NULL, "precondition");
-  assert(is_jweak(handle), "precondition");
-  return NativeAccess<ON_PHANTOM_OOP_REF>::oop_load(jweak_ptr(handle));
-}
-
 bool JNIHandles::is_global_weak_cleared(jweak handle) {
   assert(handle != NULL, "precondition");
   assert(is_jweak(handle), "not a weak handle");
@@ -318,7 +312,7 @@
 class VerifyJNIHandles: public OopClosure {
 public:
   virtual void do_oop(oop* root) {
-    guarantee(oopDesc::is_oop(RawAccess<>::oop_load(root)), "Invalid oop");
+    guarantee(oopDesc::is_oop_or_null(RawAccess<>::oop_load(root)), "Invalid oop");
   }
   virtual void do_oop(narrowOop* root) { ShouldNotReachHere(); }
 };
--- a/src/hotspot/share/runtime/jniHandles.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/runtime/jniHandles.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -42,8 +42,10 @@
   inline static oop* jobject_ptr(jobject handle); // NOT jweak!
   inline static oop* jweak_ptr(jobject handle);
 
-  template<bool external_guard> inline static oop resolve_impl(jobject handle);
-  static oop resolve_jweak(jweak handle);
+  template <DecoratorSet decorators, bool external_guard> inline static oop resolve_impl(jobject handle);
+
+  // Resolve handle into oop, without keeping the object alive
+  inline static oop resolve_no_keepalive(jobject handle);
 
   // This method is not inlined in order to avoid circular includes between
   // this header file and thread.hpp.
@@ -70,6 +72,9 @@
   // Resolve externally provided handle into oop with some guards
   static oop resolve_external_guard(jobject handle);
 
+  // Check for equality without keeping objects alive
+  static bool is_same_object(jobject handle1, jobject handle2);
+
   // Local handles
   static jobject make_local(oop obj);
   static jobject make_local(JNIEnv* env, oop obj);    // Fast version when env is known
--- a/src/hotspot/share/runtime/jniHandles.inline.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/runtime/jniHandles.inline.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -49,15 +49,15 @@
 }
 
 // external_guard is true if called from resolve_external_guard.
-template<bool external_guard>
+template <DecoratorSet decorators, bool external_guard>
 inline oop JNIHandles::resolve_impl(jobject handle) {
   assert(handle != NULL, "precondition");
   assert(!current_thread_in_native(), "must not be in native");
   oop result;
   if (is_jweak(handle)) {       // Unlikely
-    result = resolve_jweak(handle);
+    result = NativeAccess<ON_PHANTOM_OOP_REF|decorators>::oop_load(jweak_ptr(handle));
   } else {
-    result = NativeAccess<>::oop_load(jobject_ptr(handle));
+    result = NativeAccess<decorators>::oop_load(jobject_ptr(handle));
     // Construction of jobjects canonicalize a null value into a null
     // jobject, so for non-jweak the pointee should never be null.
     assert(external_guard || result != NULL, "Invalid JNI handle");
@@ -68,14 +68,28 @@
 inline oop JNIHandles::resolve(jobject handle) {
   oop result = NULL;
   if (handle != NULL) {
-    result = resolve_impl<false /* external_guard */ >(handle);
+    result = resolve_impl<0 /* decorators */, false /* external_guard */>(handle);
   }
   return result;
 }
 
+inline oop JNIHandles::resolve_no_keepalive(jobject handle) {
+  oop result = NULL;
+  if (handle != NULL) {
+    result = resolve_impl<AS_NO_KEEPALIVE, false /* external_guard */>(handle);
+  }
+  return result;
+}
+
+inline bool JNIHandles::is_same_object(jobject handle1, jobject handle2) {
+  oop obj1 = resolve_no_keepalive(handle1);
+  oop obj2 = resolve_no_keepalive(handle2);
+  return oopDesc::equals(obj1, obj2);
+}
+
 inline oop JNIHandles::resolve_non_null(jobject handle) {
   assert(handle != NULL, "JNI handle should not be null");
-  oop result = resolve_impl<false /* external_guard */ >(handle);
+  oop result = resolve_impl<0 /* decorators */, false /* external_guard */>(handle);
   assert(result != NULL, "NULL read from jni handle");
   return result;
 }
--- a/src/hotspot/share/runtime/sweeper.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/runtime/sweeper.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -149,7 +149,7 @@
 long     NMethodSweeper::_last_sweep                   = 0;    // Value of _time_counter when the last sweep happened
 int      NMethodSweeper::_seen                         = 0;    // Nof. nmethod we have currently processed in current pass of CodeCache
 
-volatile bool NMethodSweeper::_should_sweep            = true; // Indicates if we should invoke the sweeper
+volatile bool NMethodSweeper::_should_sweep            = false;// Indicates if we should invoke the sweeper
 volatile bool NMethodSweeper::_force_sweep             = false;// Indicates if we should force a sweep
 volatile int  NMethodSweeper::_bytes_changed           = 0;    // Counts the total nmethod size if the nmethod changed from:
                                                                //   1) alive       -> not_entrant
@@ -717,12 +717,6 @@
     // stack we can safely convert it to a zombie method
     OrderAccess::loadload(); // _stack_traversal_mark and _state
     if (cm->can_convert_to_zombie()) {
-      // Clear ICStubs to prevent back patching stubs of zombie or flushed
-      // nmethods during the next safepoint (see ICStub::finalize).
-      {
-        CompiledICLocker ml(cm);
-        cm->clear_ic_stubs();
-      }
       // Code cache state change is tracked in make_zombie()
       cm->make_zombie();
       SWEEP(cm);
--- a/src/hotspot/share/runtime/thread.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/runtime/thread.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -231,7 +231,7 @@
   set_active_handles(NULL);
   set_free_handle_block(NULL);
   set_last_handle_mark(NULL);
-  DEBUG_ONLY(_missed_ic_stub_refill_mark = NULL);
+  DEBUG_ONLY(_missed_ic_stub_refill_verifier = NULL);
 
   // This initial value ==> never claimed.
   _oops_do_parity = 0;
@@ -2603,8 +2603,7 @@
 }
 
 void JavaThread::enable_stack_reserved_zone() {
-  assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
-  assert(_stack_guard_state != stack_guard_enabled, "already enabled");
+  assert(_stack_guard_state == stack_guard_reserved_disabled, "inconsistent state");
 
   // The base notation is from the stack's point of view, growing downward.
   // We need to adjust it to work correctly with guard_memory()
@@ -2622,11 +2621,10 @@
 }
 
 void JavaThread::disable_stack_reserved_zone() {
-  assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
-  assert(_stack_guard_state != stack_guard_reserved_disabled, "already disabled");
+  assert(_stack_guard_state == stack_guard_enabled, "inconsistent state");
 
   // Simply return if called for a thread that does not use guard pages.
-  if (_stack_guard_state == stack_guard_unused) return;
+  if (_stack_guard_state != stack_guard_enabled) return;
 
   // The base notation is from the stack's point of view, growing downward.
   // We need to adjust it to work correctly with guard_memory()
--- a/src/hotspot/share/runtime/thread.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/runtime/thread.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -81,6 +81,7 @@
 
 class GCTaskQueue;
 class ThreadClosure;
+class ICRefillVerifier;
 class IdealGraphPrinter;
 
 class Metadata;
@@ -329,15 +330,15 @@
  private:
 
 #ifdef ASSERT
-  void* _missed_ic_stub_refill_mark;
+  ICRefillVerifier* _missed_ic_stub_refill_verifier;
 
  public:
-  void* missed_ic_stub_refill_mark() {
-    return _missed_ic_stub_refill_mark;
+  ICRefillVerifier* missed_ic_stub_refill_verifier() {
+    return _missed_ic_stub_refill_verifier;
   }
 
-  void set_missed_ic_stub_refill_mark(void* mark) {
-    _missed_ic_stub_refill_mark = mark;
+  void set_missed_ic_stub_refill_verifier(ICRefillVerifier* verifier) {
+    _missed_ic_stub_refill_verifier = verifier;
   }
 #endif
 
--- a/src/hotspot/share/runtime/vframe.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/runtime/vframe.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -452,8 +452,10 @@
   _stop_at_java_call_stub = stop_at_java_call_stub;
 
   // skip top frame, as it may not be at safepoint
+  _prev_frame = top_frame;
   _frame  = top_frame.sender(&_reg_map);
   while (!fill_from_frame()) {
+    _prev_frame = _frame;
     _frame = _frame.sender(&_reg_map);
   }
 }
@@ -534,6 +536,37 @@
   }
 }
 
+javaVFrame* vframeStreamCommon::asJavaVFrame() {
+  javaVFrame* result = NULL;
+  if (_mode == compiled_mode) {
+    guarantee(_frame.is_compiled_frame(), "expected compiled Java frame");
+
+    // lazy update to register map
+    bool update_map = true;
+    RegisterMap map(_thread, update_map);
+    frame f = _prev_frame.sender(&map);
+
+    guarantee(f.is_compiled_frame(), "expected compiled Java frame");
+
+    compiledVFrame* cvf = compiledVFrame::cast(vframe::new_vframe(&f, &map, _thread));
+
+    guarantee(cvf->cb() == cb(), "wrong code blob");
+
+    // get the same scope as this stream
+    cvf = cvf->at_scope(_decode_offset, _vframe_id);
+
+    guarantee(cvf->scope()->decode_offset() == _decode_offset, "wrong scope");
+    guarantee(cvf->scope()->sender_decode_offset() == _sender_decode_offset, "wrong scope");
+    guarantee(cvf->vframe_id() == _vframe_id, "wrong vframe");
+
+    result = cvf;
+  } else {
+    result = javaVFrame::cast(vframe::new_vframe(&_frame, &_reg_map, _thread));
+  }
+  guarantee(result->method() == method(), "wrong method");
+  return result;
+}
+
 
 #ifndef PRODUCT
 void vframe::print() {
--- a/src/hotspot/share/runtime/vframe.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/runtime/vframe.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -278,12 +278,16 @@
 class vframeStreamCommon : StackObj {
  protected:
   // common
+  frame        _prev_frame;
   frame        _frame;
   JavaThread*  _thread;
   RegisterMap  _reg_map;
   enum { interpreted_mode, compiled_mode, at_end_mode } _mode;
 
+  // For compiled_mode
+  int _decode_offset;
   int _sender_decode_offset;
+  int _vframe_id;
 
   // Cached information
   Method* _method;
@@ -320,6 +324,8 @@
       return (CompiledMethod*) cb();
   }
 
+  javaVFrame* asJavaVFrame();
+
   // Frame type
   inline bool is_interpreted_frame() const;
   inline bool is_entry_frame() const;
--- a/src/hotspot/share/runtime/vframe.inline.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/runtime/vframe.inline.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -44,6 +44,7 @@
 
   // handle general case
   do {
+    _prev_frame = _frame;
     _frame = _frame.sender(&_reg_map);
   } while (!fill_from_frame());
 }
@@ -59,6 +60,7 @@
 
   _frame = _thread->last_frame();
   while (!fill_from_frame()) {
+    _prev_frame = _frame;
     _frame = _frame.sender(&_reg_map);
   }
 }
@@ -68,12 +70,14 @@
     return false;
   }
   fill_from_compiled_frame(_sender_decode_offset);
+  ++_vframe_id;
   return true;
 }
 
 
 inline void vframeStreamCommon::fill_from_compiled_frame(int decode_offset) {
   _mode = compiled_mode;
+  _decode_offset = decode_offset;
 
   // Range check to detect ridiculous offsets.
   if (decode_offset == DebugInformationRecorder::serialized_null ||
@@ -118,6 +122,8 @@
 inline void vframeStreamCommon::fill_from_compiled_native_frame() {
   _mode = compiled_mode;
   _sender_decode_offset = DebugInformationRecorder::serialized_null;
+  _decode_offset = DebugInformationRecorder::serialized_null;
+  _vframe_id = 0;
   _method = nm()->method();
   _bci = 0;
 }
@@ -187,6 +193,7 @@
         decode_offset = pc_desc->scope_decode_offset();
       }
       fill_from_compiled_frame(decode_offset);
+      _vframe_id = 0;
     }
     return true;
   }
--- a/src/hotspot/share/runtime/vframe_hp.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/runtime/vframe_hp.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -252,6 +252,14 @@
   guarantee(_scope != NULL, "scope must be present");
 }
 
+compiledVFrame* compiledVFrame::at_scope(int decode_offset, int vframe_id) {
+  if (scope()->decode_offset() != decode_offset) {
+    ScopeDesc* scope = this->scope()->at_offset(decode_offset);
+    return new compiledVFrame(frame_pointer(), register_map(), thread(), scope, vframe_id);
+  }
+  assert(_vframe_id == vframe_id, "wrong frame id");
+  return this;
+}
 
 bool compiledVFrame::is_top() const {
   // FIX IT: Remove this when new native stubs are in place
--- a/src/hotspot/share/runtime/vframe_hp.hpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/runtime/vframe_hp.hpp	Tue Jan 15 10:55:26 2019 -0800
@@ -72,6 +72,9 @@
   // Returns the scopeDesc
   ScopeDesc* scope() const { return _scope; }
 
+  // Return the compiledVFrame for the desired scope
+  compiledVFrame* at_scope(int decode_offset, int vframe_id);
+
   // Returns SynchronizationEntryBCI or bci() (used for synchronization)
   int raw_bci() const;
 
--- a/src/hotspot/share/runtime/vmThread.cpp	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/hotspot/share/runtime/vmThread.cpp	Tue Jan 15 10:55:26 2019 -0800
@@ -206,7 +206,7 @@
   if (is_armed()) {
     jlong delay = (os::javaTimeMillis() - _arm_time);
     if (delay > AbortVMOnVMOperationTimeoutDelay) {
-      fatal("VM operation took too long: " SIZE_FORMAT " ms (timeout: " SIZE_FORMAT " ms)",
+      fatal("VM operation took too long: " JLONG_FORMAT " ms (timeout: " INTX_FORMAT " ms)",
             delay, AbortVMOnVMOperationTimeoutDelay);
     }
   }
--- a/src/java.base/share/classes/java/lang/Class.java	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/java.base/share/classes/java/lang/Class.java	Tue Jan 15 10:55:26 2019 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1994, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1994, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -3420,8 +3420,8 @@
         StringBuilder sb = new StringBuilder();
         sb.append(getName() + "." + name + "(");
         if (argTypes != null) {
-            Stream.of(argTypes).map(c -> {return (c == null) ? "null" : c.getName();}).
-                collect(Collectors.joining(","));
+            sb.append(Stream.of(argTypes).map(c -> {return (c == null) ? "null" : c.getName();}).
+                      collect(Collectors.joining(",")));
         }
         sb.append(")");
         return sb.toString();
--- a/src/java.base/share/classes/java/lang/ClassLoader.java	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/java.base/share/classes/java/lang/ClassLoader.java	Tue Jan 15 10:55:26 2019 -0800
@@ -1864,12 +1864,12 @@
      * <p> The default system class loader is an implementation-dependent
      * instance of this class.
      *
-     * <p> If the system property "{@code java.system.class.loader}" is defined
-     * when this method is first invoked then the value of that property is
-     * taken to be the name of a class that will be returned as the system
-     * class loader.  The class is loaded using the default system class loader
-     * and must define a public constructor that takes a single parameter of
-     * type {@code ClassLoader} which is used as the delegation parent.  An
+     * <p> If the system property "{@systemProperty java.system.class.loader}"
+     * is defined when this method is first invoked then the value of that
+     * property is taken to be the name of a class that will be returned as the
+     * system class loader. The class is loaded using the default system class
+     * loader and must define a public constructor that takes a single parameter
+     * of type {@code ClassLoader} which is used as the delegation parent. An
      * instance is then created using this constructor with the default system
      * class loader as the parameter.  The resulting class loader is defined
      * to be the system class loader. During construction, the class loader
--- a/src/java.base/share/classes/java/lang/String.java	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/java.base/share/classes/java/lang/String.java	Tue Jan 15 10:55:26 2019 -0800
@@ -664,7 +664,7 @@
      *          object.
      */
     public int length() {
-        return isLatin1() ? value.length : value.length >> UTF16;
+        return value.length >> coder();
     }
 
     /**
@@ -2813,8 +2813,7 @@
      * lines are then concatenated and returned.
      * <p>
      * If {@code n > 0} then {@code n} spaces (U+0020) are inserted at the
-     * beginning of each line. {@link String#isBlank() Blank lines} are
-     * unaffected.
+     * beginning of each line.
      * <p>
      * If {@code n < 0} then up to {@code n}
      * {@link Character#isWhitespace(int) white space characters} are removed
@@ -2849,7 +2848,7 @@
                                              : lines();
         if (n > 0) {
             final String spaces = " ".repeat(n);
-            stream = stream.map(s -> s.isBlank() ? s : spaces + s);
+            stream = stream.map(s -> spaces + s);
         } else if (n == Integer.MIN_VALUE) {
             stream = stream.map(s -> s.stripLeading());
         } else if (n < 0) {
@@ -2869,119 +2868,12 @@
     }
 
     /**
-     * Removes vertical and horizontal white space margins from around the
-     * essential body of a multi-line string, while preserving relative
-     * indentation.
-     * <p>
-     * This string is first conceptually separated into lines as if by
-     * {@link String#lines()}.
-     * <p>
-     * Then, the <i>minimum indentation</i> (min) is determined as follows. For
-     * each non-blank line (as defined by {@link String#isBlank()}), the
-     * leading {@link Character#isWhitespace(int) white space} characters are
-     * counted. The <i>min</i> value is the smallest of these counts.
-     * <p>
-     * For each non-blank line, <i>min</i> leading white space characters are
-     * removed. Each white space character is treated as a single character. In
-     * particular, the tab character {@code "\t"} (U+0009) is considered a
-     * single character; it is not expanded.
-     * <p>
-     * Leading and trailing blank lines, if any, are removed. Trailing spaces are
-     * preserved.
-     * <p>
-     * Each line is suffixed with a line feed character {@code "\n"} (U+000A).
-     * <p>
-     * Finally, the lines are concatenated into a single string and returned.
-     *
-     * @apiNote
-     * This method's primary purpose is to shift a block of lines as far as
-     * possible to the left, while preserving relative indentation. Lines
-     * that were indented the least will thus have no leading white space.
-     *
-     * Example:
-     * <blockquote><pre>
-     * `
-     *      This is the first line
-     *          This is the second line
-     * `.align();
-     *
-     * returns
-     * This is the first line
-     *     This is the second line
-     * </pre></blockquote>
-     *
-     * @return string with margins removed and line terminators normalized
-     *
-     * @see String#lines()
-     * @see String#isBlank()
-     * @see String#indent(int)
-     * @see Character#isWhitespace(int)
-     *
-     * @since 12
-     */
-    public String align() {
-        return align(0);
-    }
-
-    /**
-     * Removes vertical and horizontal white space margins from around the
-     * essential body of a multi-line string, while preserving relative
-     * indentation and with optional indentation adjustment.
-     * <p>
-     * Invoking this method is equivalent to:
-     * <blockquote>
-     *  {@code this.align().indent(n)}
-     * </blockquote>
-     *
-     * @apiNote
-     * Examples:
-     * <blockquote><pre>
-     * `
-     *      This is the first line
-     *          This is the second line
-     * `.align(0);
-     *
-     * returns
-     * This is the first line
-     *     This is the second line
-     *
-     *
-     * `
-     *    This is the first line
-     *       This is the second line
-     * `.align(4);
-     * returns
-     *     This is the first line
-     *         This is the second line
-     * </pre></blockquote>
-     *
-     * @param n  number of leading white space characters
-     *           to add or remove
-     *
-     * @return string with margins removed, indentation adjusted and
-     *         line terminators normalized
-     *
-     * @see String#align()
-     *
-     * @since 12
-     */
-    public String align(int n) {
-        if (isEmpty()) {
-            return "";
-        }
-        int outdent = lines().filter(not(String::isBlank))
-                             .mapToInt(String::indexOfNonWhitespace)
-                             .min()
-                             .orElse(0);
-        // overflow-conscious code
-        int indent = n - outdent;
-        return indent(indent > n ? Integer.MIN_VALUE : indent, true);
-    }
-
-    /**
      * This method allows the application of a function to {@code this}
      * string. The function should expect a single String argument
      * and produce an {@code R} result.
+     * <p>
+     * Any exception thrown by {@code f()} will be propagated to the
+     * caller.
      *
      * @param f    functional interface to a apply
      *
--- a/src/java.base/share/classes/java/lang/constant/ClassDesc.java	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/java.base/share/classes/java/lang/constant/ClassDesc.java	Tue Jan 15 10:55:26 2019 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -97,7 +97,10 @@
      */
     static ClassDesc of(String packageName, String className) {
         ConstantUtils.validateBinaryClassName(requireNonNull(packageName));
-        validateMemberName(requireNonNull(className));
+        if (packageName.isEmpty()) {
+            return of(className);
+        }
+        validateMemberName(requireNonNull(className), false);
         return ofDescriptor(String.format("L%s%s%s;",
                                           binaryToInternal(packageName),
                                           (packageName.length() > 0 ? "/" : ""),
@@ -130,6 +133,9 @@
      */
     static ClassDesc ofDescriptor(String descriptor) {
         requireNonNull(descriptor);
+        if (descriptor.isEmpty()) {
+            throw new IllegalArgumentException(String.format("not a valid reference type descriptor: %s", descriptor));
+        }
         int depth = ConstantUtils.arrayDepth(descriptor);
         if (depth > ConstantUtils.MAX_ARRAY_TYPE_DESC_DIMENSIONS) {
             throw new IllegalArgumentException(String.format("Cannot create an array type descriptor with more than %d dimensions",
@@ -192,7 +198,7 @@
      * @throws IllegalArgumentException if the nested class name is invalid
      */
     default ClassDesc nested(String nestedName) {
-        validateMemberName(nestedName);
+        validateMemberName(nestedName, false);
         if (!isClassOrInterface())
             throw new IllegalStateException("Outer class is not a class or interface type");
         return ClassDesc.ofDescriptor(String.format("%s$%s;", dropLastChar(descriptorString()), nestedName));
--- a/src/java.base/share/classes/java/lang/constant/ConstantUtils.java	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/java.base/share/classes/java/lang/constant/ConstantUtils.java	Tue Jan 15 10:55:26 2019 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -65,7 +65,7 @@
      * @return the name passed if valid
      * @throws IllegalArgumentException if the member name is invalid
      */
-    public static String validateMemberName(String name) {
+    public static String validateMemberName(String name, boolean method) {
         requireNonNull(name);
         if (name.length() == 0)
             throw new IllegalArgumentException("zero-length member name");
@@ -73,7 +73,7 @@
             char ch = name.charAt(i);
             if (ch == '.' || ch == ';' || ch == '[' || ch == '/')
                 throw new IllegalArgumentException("Invalid member name: " + name);
-            if (ch == '<' || ch == '>') {
+            if (method && (ch == '<' || ch == '>')) {
                 if (!pointyNames.contains(name))
                     throw new IllegalArgumentException("Invalid member name: " + name);
             }
@@ -126,8 +126,8 @@
 
         ++cur;  // skip '('
         while (cur < end && descriptor.charAt(cur) != ')') {
-            int len = matchSig(descriptor, cur, end);
-            if (len == 0 || descriptor.charAt(cur) == 'V')
+            int len = skipOverFieldSignature(descriptor, cur, end, false);
+            if (len == 0)
                 throw new IllegalArgumentException("Bad method descriptor: " + descriptor);
             ptypes.add(descriptor.substring(cur, cur + len));
             cur += len;
@@ -136,41 +136,103 @@
             throw new IllegalArgumentException("Bad method descriptor: " + descriptor);
         ++cur;  // skip ')'
 
-        int rLen = matchSig(descriptor, cur, end);
+        int rLen = skipOverFieldSignature(descriptor, cur, end, true);
         if (rLen == 0 || cur + rLen != end)
             throw new IllegalArgumentException("Bad method descriptor: " + descriptor);
         ptypes.add(0, descriptor.substring(cur, cur + rLen));
         return ptypes;
     }
 
+    private static final char JVM_SIGNATURE_ARRAY = '[';
+    private static final char JVM_SIGNATURE_BYTE = 'B';
+    private static final char JVM_SIGNATURE_CHAR = 'C';
+    private static final char JVM_SIGNATURE_CLASS = 'L';
+    private static final char JVM_SIGNATURE_ENDCLASS = ';';
+    private static final char JVM_SIGNATURE_ENUM = 'E';
+    private static final char JVM_SIGNATURE_FLOAT = 'F';
+    private static final char JVM_SIGNATURE_DOUBLE = 'D';
+    private static final char JVM_SIGNATURE_FUNC = '(';
+    private static final char JVM_SIGNATURE_ENDFUNC = ')';
+    private static final char JVM_SIGNATURE_INT = 'I';
+    private static final char JVM_SIGNATURE_LONG = 'J';
+    private static final char JVM_SIGNATURE_SHORT = 'S';
+    private static final char JVM_SIGNATURE_VOID = 'V';
+    private static final char JVM_SIGNATURE_BOOLEAN = 'Z';
+
     /**
      * Validates that the characters at [start, end) within the provided string
      * describe a valid field type descriptor.
-     *
-     * @param str the descriptor string
+     * @param descriptor the descriptor string
      * @param start the starting index into the string
      * @param end the ending index within the string
+     * @param voidOK is void acceptable?
      * @return the length of the descriptor, or 0 if it is not a descriptor
      * @throws IllegalArgumentException if the descriptor string is not valid
      */
-    static int matchSig(String str, int start, int end) {
-        if (start >= end || start >= str.length() || end > str.length())
-            return 0;
-        char c = str.charAt(start);
-        if (c == 'L') {
-            int endc = str.indexOf(';', start);
-            int badc = str.indexOf('.', start);
-            if (badc >= 0 && badc < endc)
-                return 0;
-            badc = str.indexOf('[', start);
-            if (badc >= 0 && badc < endc)
-                return 0;
-            return (endc < 0) ? 0 : endc - start + 1;
-        } else if (c == '[') {
-            int t = matchSig(str, start+1, end);
-            return (t > 0) ? t + 1 : 0;
-        } else {
-            return ("IJCSBFDZV".indexOf(c) >= 0) ? 1 : 0;
+    @SuppressWarnings("fallthrough")
+    static int skipOverFieldSignature(String descriptor, int start, int end, boolean voidOK) {
+        int arrayDim = 0;
+        int index = start;
+        while (index < end) {
+            switch (descriptor.charAt(index)) {
+                case JVM_SIGNATURE_VOID: if (!voidOK) { return index; }
+                case JVM_SIGNATURE_BOOLEAN:
+                case JVM_SIGNATURE_BYTE:
+                case JVM_SIGNATURE_CHAR:
+                case JVM_SIGNATURE_SHORT:
+                case JVM_SIGNATURE_INT:
+                case JVM_SIGNATURE_FLOAT:
+                case JVM_SIGNATURE_LONG:
+                case JVM_SIGNATURE_DOUBLE:
+                    return index - start + 1;
+                case JVM_SIGNATURE_CLASS:
+                    // Skip leading 'L' and ignore first appearance of ';'
+                    index++;
+                    int indexOfSemi = descriptor.indexOf(';', index);
+                    if (indexOfSemi != -1) {
+                        String unqualifiedName = descriptor.substring(index, indexOfSemi);
+                        boolean legal = verifyUnqualifiedClassName(unqualifiedName);
+                        if (!legal) {
+                            return 0;
+                        }
+                        return index - start + unqualifiedName.length() + 1;
+                    }
+                    return 0;
+                case JVM_SIGNATURE_ARRAY:
+                    arrayDim++;
+                    if (arrayDim > MAX_ARRAY_TYPE_DESC_DIMENSIONS) {
+                        throw new IllegalArgumentException(String.format("Cannot create an array type descriptor with more than %d dimensions",
+                                ConstantUtils.MAX_ARRAY_TYPE_DESC_DIMENSIONS));
+                    }
+                    // The rest of what's there better be a legal descriptor
+                    index++;
+                    voidOK = false;
+                    break;
+                default:
+                    return 0;
+            }
         }
+        return 0;
+    }
+
+    static boolean verifyUnqualifiedClassName(String name) {
+        for (int index = 0; index < name.length(); index++) {
+            char ch = name.charAt(index);
+            if (ch < 128) {
+                if (ch == '.' || ch == ';' || ch == '[' ) {
+                    return false;   // do not permit '.', ';', or '['
+                }
+                if (ch == '/') {
+                    // check for '//' or leading or trailing '/' which are not legal
+                    // unqualified name must not be empty
+                    if (index == 0 || index + 1 >= name.length() || name.charAt(index + 1) == '/') {
+                        return false;
+                    }
+                }
+            } else {
+                index ++;
+            }
+        }
+        return true;
     }
 }
--- a/src/java.base/share/classes/java/lang/constant/DirectMethodHandleDescImpl.java	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/java.base/share/classes/java/lang/constant/DirectMethodHandleDescImpl.java	Tue Jan 15 10:55:26 2019 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -68,7 +68,7 @@
 
         requireNonNull(kind);
         validateClassOrInterface(requireNonNull(owner));
-        validateMemberName(requireNonNull(name));
+        validateMemberName(requireNonNull(name), true);
         requireNonNull(type);
 
         switch (kind) {
--- a/src/java.base/share/classes/java/lang/constant/DynamicCallSiteDesc.java	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/java.base/share/classes/java/lang/constant/DynamicCallSiteDesc.java	Tue Jan 15 10:55:26 2019 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -75,7 +75,7 @@
                                 String invocationName,
                                 MethodTypeDesc invocationType,
                                 ConstantDesc[] bootstrapArgs) {
-        this.invocationName = validateMemberName(requireNonNull(invocationName));
+        this.invocationName = validateMemberName(requireNonNull(invocationName), true);
         this.invocationType = requireNonNull(invocationType);
         this.bootstrapMethod = requireNonNull(bootstrapMethod);
         this.bootstrapArgs = requireNonNull(bootstrapArgs.clone());
--- a/src/java.base/share/classes/java/lang/constant/DynamicConstantDesc.java	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/java.base/share/classes/java/lang/constant/DynamicConstantDesc.java	Tue Jan 15 10:55:26 2019 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -96,7 +96,7 @@
                                   ClassDesc constantType,
                                   ConstantDesc... bootstrapArgs) {
         this.bootstrapMethod = requireNonNull(bootstrapMethod);
-        this.constantName = validateMemberName(requireNonNull(constantName));
+        this.constantName = validateMemberName(requireNonNull(constantName), true);
         this.constantType = requireNonNull(constantType);
         this.bootstrapArgs = requireNonNull(bootstrapArgs).clone();
 
--- a/src/java.base/share/classes/java/lang/constant/ReferenceClassDescImpl.java	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/java.base/share/classes/java/lang/constant/ReferenceClassDescImpl.java	Tue Jan 15 10:55:26 2019 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -49,7 +49,7 @@
      */
     ReferenceClassDescImpl(String descriptor) {
         requireNonNull(descriptor);
-        int len = ConstantUtils.matchSig(descriptor, 0, descriptor.length());
+        int len = ConstantUtils.skipOverFieldSignature(descriptor, 0, descriptor.length(), false);
         if (len == 0 || len == 1
             || len != descriptor.length())
             throw new IllegalArgumentException(String.format("not a valid reference type descriptor: %s", descriptor));
--- a/src/java.base/share/classes/java/lang/invoke/VarHandle.java	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/java.base/share/classes/java/lang/invoke/VarHandle.java	Tue Jan 15 10:55:26 2019 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1865,35 +1865,6 @@
     }
 
     /**
-     * Compare this {@linkplain VarHandle} with another object for equality.
-     * Two {@linkplain VarHandle}s are considered equal if they both describe the
-     * same instance field, both describe the same static field, both describe
-     * array elements for arrays with the same component type, or both describe
-     * the same component of an off-heap structure.
-     *
-     * @param o the other object
-     * @return Whether this {@linkplain VarHandle} is equal to the other object
-     */
-    @Override
-    public final boolean equals(Object o) {
-        if (this == o) return true;
-        if (o == null || getClass() != o.getClass()) return false;
-
-        VarHandle that = (VarHandle) o;
-        return accessModeType(AccessMode.GET).equals(that.accessModeType(AccessMode.GET)) &&
-               internalEquals(that);
-    }
-
-    abstract boolean internalEquals(VarHandle vh);
-
-    @Override
-    public final int hashCode() {
-        return 31 * accessModeType(AccessMode.GET).hashCode() + internalHashCode();
-    }
-
-    abstract int internalHashCode();
-
-    /**
      * Returns a compact textual description of this {@linkplain VarHandle},
      * including the type of variable described, and a description of its coordinates.
      *
--- a/src/java.base/share/classes/java/lang/invoke/X-VarHandle.java.template	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/java.base/share/classes/java/lang/invoke/X-VarHandle.java.template	Tue Jan 15 10:55:26 2019 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -64,17 +64,6 @@
         }
 
         @Override
-        final boolean internalEquals(VarHandle vh) {
-            FieldInstanceReadOnly that = (FieldInstanceReadOnly) vh;
-            return fieldOffset == that.fieldOffset;
-        }
-
-        @Override
-        final int internalHashCode() {
-            return Long.hashCode(fieldOffset);
-        }
-
-        @Override
         public Optional<VarHandleDesc> describeConstable() {
             var receiverTypeRef = receiverType.describeConstable();
             var fieldTypeRef = {#if[Object]?fieldType:$type$.class}.describeConstable();
@@ -350,17 +339,6 @@
         }
 
         @Override
-        final boolean internalEquals(VarHandle vh) {
-            FieldStaticReadOnly that = (FieldStaticReadOnly) vh;
-            return base == that.base && fieldOffset == that.fieldOffset;
-        }
-
-        @Override
-        final int internalHashCode() {
-            return 31 * Long.hashCode(fieldOffset) + base.hashCode();
-        }
-
-        @Override
         public Optional<VarHandleDesc> describeConstable() {
             var fieldTypeRef = {#if[Object]?fieldType:$type$.class}.describeConstable();
             if (!fieldTypeRef.isPresent())
@@ -640,20 +618,6 @@
         }
 
         @Override
-        final boolean internalEquals(VarHandle vh) {
-            // Equality of access mode types of AccessMode.GET is sufficient for
-            // equality checks
-            return true;
-        }
-
-        @Override
-        final int internalHashCode() {
-            // The hash code of the access mode types of AccessMode.GET is
-            // sufficient for hash code generation
-            return 0;
-        }
-
-        @Override
         public Optional<VarHandleDesc> describeConstable() {
             var arrayTypeRef = {#if[Object]?arrayType:$type$[].class}.describeConstable();
             if (!arrayTypeRef.isPresent())
--- a/src/java.base/share/classes/java/lang/invoke/X-VarHandleByteArrayView.java.template	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/java.base/share/classes/java/lang/invoke/X-VarHandleByteArrayView.java.template	Tue Jan 15 10:55:26 2019 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -67,17 +67,6 @@
             super(form);
             this.be = be;
         }
-
-        @Override
-        final boolean internalEquals(VarHandle vh) {
-            ByteArrayViewVarHandle that = (ByteArrayViewVarHandle) vh;
-            return be == that.be;
-        }
-
-        @Override
-        final int internalHashCode() {
-            return Boolean.hashCode(be);
-        }
     }
 
     static final class ArrayHandle extends ByteArrayViewVarHandle {
--- a/src/java.base/share/classes/java/net/URL.java	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/java.base/share/classes/java/net/URL.java	Tue Jan 15 10:55:26 2019 -0800
@@ -304,7 +304,7 @@
      *     or all providers have been exhausted.
      * <li>If the previous step fails to find a protocol handler, the
      *     constructor reads the value of the system property:
-     *     <blockquote>{@code
+     *     <blockquote>{@systemProperty
      *         java.protocol.handler.pkgs
      *     }</blockquote>
      *     If the value of that system property is not {@code null},
--- a/src/java.base/share/classes/java/security/AccessController.java	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/java.base/share/classes/java/security/AccessController.java	Tue Jan 15 10:55:26 2019 -0800
@@ -710,6 +710,13 @@
     }
 
     /**
+     * The value needs to be physically located in the frame, so that it
+     * can be found by a stack walk.
+     */
+    @Hidden
+    private static native void ensureMaterializedForStackWalk(Object o);
+
+    /**
      * Sanity check that the caller context is indeed privileged.
      *
      * Used by executePrivileged to make sure the frame is properly
@@ -734,6 +741,11 @@
                           AccessControlContext context,
                           Class<?> caller)
     {
+        // Ensure context has a physical value in the frame
+        if (context != null) {
+            ensureMaterializedForStackWalk(context);
+        }
+
         assert isPrivileged(); // sanity check invariant
         T result = action.run();
         assert isPrivileged(); // sanity check invariant
@@ -742,7 +754,6 @@
         // retrieved by getStackAccessControlContext().
         Reference.reachabilityFence(context);
         Reference.reachabilityFence(caller);
-        Reference.reachabilityFence(action);
         return result;
     }
 
@@ -761,6 +772,11 @@
                           Class<?> caller)
         throws Exception
     {
+        // Ensure context has a physical value in the frame
+        if (context != null) {
+            ensureMaterializedForStackWalk(context);
+        }
+
         assert isPrivileged(); // sanity check invariant
         T result = action.run();
         assert isPrivileged(); // sanity check invariant
@@ -769,7 +785,6 @@
         // retrieved by getStackAccessControlContext().
         Reference.reachabilityFence(context);
         Reference.reachabilityFence(caller);
-        Reference.reachabilityFence(action);
         return result;
     }
 
--- a/src/java.base/share/classes/java/time/chrono/JapaneseEra.java	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/java.base/share/classes/java/time/chrono/JapaneseEra.java	Tue Jan 15 10:55:26 2019 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -94,7 +94,7 @@
  * dates before Meiji 6, January 1 are not supported.
  * The number of the valid eras may increase, as new eras may be
  * defined by the Japanese government. Once an era is defined,
- * subsequent versions of this class will add a singleton instance
+ * future versions of the platform may add a singleton instance
  * for it. The defined era is expected to have a consecutive integer
  * associated with it.
  *
--- a/src/java.base/share/classes/java/time/zone/ZoneRulesProvider.java	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/java.base/share/classes/java/time/zone/ZoneRulesProvider.java	Tue Jan 15 10:55:26 2019 -0800
@@ -99,7 +99,7 @@
  * <p>
  * The Java virtual machine has a default provider that provides zone rules
  * for the time-zones defined by IANA Time Zone Database (TZDB). If the system
- * property {@code java.time.zone.DefaultZoneRulesProvider} is defined then
+ * property {@systemProperty java.time.zone.DefaultZoneRulesProvider} is defined then
  * it is taken to be the fully-qualified name of a concrete ZoneRulesProvider
  * class to be loaded as the default provider, using the system class loader.
  * If this system property is not defined, a system-default provider will be
--- a/src/java.base/share/classes/java/util/Currency.java	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/java.base/share/classes/java/util/Currency.java	Tue Jan 15 10:55:26 2019 -0800
@@ -60,7 +60,7 @@
  * the <code>getInstance</code> methods.
  * <p>
  * Users can supersede the Java runtime currency data by means of the system
- * property {@code java.util.currency.data}. If this system property is
+ * property {@systemProperty java.util.currency.data}. If this system property is
  * defined then its value is the location of a properties file, the contents of
  * which are key/value pairs of the ISO 3166 country codes and the ISO 4217
  * currency data respectively.  The value part consists of three ISO 4217 values
--- a/src/java.base/share/classes/java/util/PropertyResourceBundle.java	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/java.base/share/classes/java/util/PropertyResourceBundle.java	Tue Jan 15 10:55:26 2019 -0800
@@ -115,7 +115,7 @@
  * input stream, then the {@code PropertyResourceBundle} instance resets to the state
  * before the exception, re-reads the input stream in {@code ISO-8859-1}, and
  * continues reading. If the system property
- * {@code java.util.PropertyResourceBundle.encoding} is set to either
+ * {@systemProperty java.util.PropertyResourceBundle.encoding} is set to either
  * "ISO-8859-1" or "UTF-8", the input stream is solely read in that encoding,
  * and throws the exception if it encounters an invalid sequence.
  * If "ISO-8859-1" is specified, characters that cannot be represented in
--- a/src/java.base/share/classes/java/util/jar/Manifest.java	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/java.base/share/classes/java/util/jar/Manifest.java	Tue Jan 15 10:55:26 2019 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -58,14 +58,10 @@
     // associated JarVerifier, not null when called by JarFile::getManifest.
     private final JarVerifier jv;
 
-    // name of the corresponding jar archive if available.
-    private final String jarFilename;
-
     /**
      * Constructs a new, empty Manifest.
      */
     public Manifest() {
-        jarFilename = null;
         jv = null;
     }
 
@@ -84,7 +80,7 @@
      *
      * @param is the input stream containing manifest data
      * @param jarFilename the name of the corresponding jar archive if available
-     * @throws IOException if an I/O error has occured
+     * @throws IOException if an I/O error has occurred
      */
     Manifest(InputStream is, String jarFilename) throws IOException {
         this(null, is, jarFilename);
@@ -93,10 +89,14 @@
     /**
      * Constructs a new Manifest from the specified input stream
      * and associates it with a JarVerifier.
+     *
+     * @param jv the JarVerifier to use
+     * @param is the input stream containing manifest data
+     * @param jarFilename the name of the corresponding jar archive if available
+     * @throws IOException if an I/O error has occurred
      */
     Manifest(JarVerifier jv, InputStream is, String jarFilename) throws IOException {
-        read(is);
-        this.jarFilename = jarFilename;
+        read(is, jarFilename);
         this.jv = jv;
     }
 
@@ -108,7 +108,6 @@
     public Manifest(Manifest man) {
         attr.putAll(man.getMainAttributes());
         entries.putAll(man.getEntries());
-        jarFilename = null;
         jv = man.jv;
     }
 
@@ -250,6 +249,10 @@
      * @exception IOException if an I/O error has occurred
      */
     public void read(InputStream is) throws IOException {
+        read(is, null);
+    }
+
+    private void read(InputStream is, String jarFilename) throws IOException {
         // Buffered input stream for reading manifest data
         FastInputStream fis = new FastInputStream(is);
         // Line buffer
@@ -285,7 +288,7 @@
             if (name == null) {
                 name = parseName(lbuf, len);
                 if (name == null) {
-                    throw new IOException("invalid manifest format"
+                    throw new IOException("invalid manifest format ("
                               + getErrorPosition(jarFilename, lineNumber) + ")");
                 }
                 if (fis.peek() == ' ') {
--- a/src/java.base/share/classes/java/util/jar/Pack200.java	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/java.base/share/classes/java/util/jar/Pack200.java	Tue Jan 15 10:55:26 2019 -0800
@@ -112,7 +112,7 @@
     /**
      * Obtain new instance of a class that implements Packer.
      * <ul>
-     * <li><p>If the system property {@code java.util.jar.Pack200.Packer}
+     * <li><p>If the system property {@systemProperty java.util.jar.Pack200.Packer}
      * is defined, then the value is taken to be the fully-qualified name
      * of a concrete implementation class, which must implement Packer.
      * This class is loaded and instantiated.  If this process fails
@@ -138,7 +138,7 @@
     /**
      * Obtain new instance of a class that implements Unpacker.
      * <ul>
-     * <li><p>If the system property {@code java.util.jar.Pack200.Unpacker}
+     * <li><p>If the system property {@systemProperty java.util.jar.Pack200.Unpacker}
      * is defined, then the value is taken to be the fully-qualified
      * name of a concrete implementation class, which must implement Unpacker.
      * The class is loaded and instantiated.  If this process fails
--- a/src/java.base/share/classes/java/util/spi/LocaleServiceProvider.java	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/java.base/share/classes/java/util/spi/LocaleServiceProvider.java	Tue Jan 15 10:55:26 2019 -0800
@@ -113,7 +113,7 @@
  * described above as if the locale was not supported.
  * <p>
  * The search order of locale sensitive services can
- * be configured by using the "java.locale.providers" system property.
+ * be configured by using the {@systemProperty java.locale.providers} system property.
  * This system property declares the user's preferred order for looking up
  * the locale sensitive services separated by a comma. It is only read at
  * the Java runtime startup, so the later call to System.setProperty() won't
--- a/src/java.base/share/classes/sun/security/provider/certpath/DistributionPointFetcher.java	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/java.base/share/classes/sun/security/provider/certpath/DistributionPointFetcher.java	Tue Jan 15 10:55:26 2019 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -770,7 +770,7 @@
          *
          * In practice, conforming CAs MUST use the key identifier method,
          * and MUST include authority key identifier extension in all CRLs
-         * issued. [section 5.2.1, RFC 2459]
+         * issued. [section 5.2.1, RFC 5280]
          */
         AuthorityKeyIdentifierExtension crlAKID = crl.getAuthKeyIdExtension();
         issuerSelector.setSkiAndSerialNumber(crlAKID);
--- a/src/java.base/share/classes/sun/security/provider/certpath/ForwardBuilder.java	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/java.base/share/classes/sun/security/provider/certpath/ForwardBuilder.java	Tue Jan 15 10:55:26 2019 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -668,7 +668,7 @@
      * Verifies a matching certificate.
      *
      * This method executes the validation steps in the PKIX path
-     * validation algorithm <draft-ietf-pkix-new-part1-08.txt> which were
+     * validation algorithm, RFC 5280, which were
      * not satisfied by the selection criteria used by getCertificates()
      * to find the certs and only the steps that can be executed in a
      * forward direction (target to trust anchor). Those steps that can
--- a/src/java.base/share/classes/sun/security/ssl/Alert.java	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/java.base/share/classes/sun/security/ssl/Alert.java	Tue Jan 15 10:55:26 2019 -0800
@@ -122,11 +122,17 @@
             reason = (cause != null) ? cause.getMessage() : "";
         }
 
-        SSLException ssle = (this == UNEXPECTED_MESSAGE) ?
-                new SSLProtocolException(reason) :
-                (handshakeOnly ?
-                        new SSLHandshakeException(reason) :
-                        new SSLException(reason));
+        SSLException ssle;
+        if ((cause != null) && (cause instanceof IOException)) {
+            ssle = new SSLException(reason);
+        } else if ((this == UNEXPECTED_MESSAGE)) {
+            ssle = new SSLProtocolException(reason);
+        } else if (handshakeOnly) {
+            ssle = new SSLHandshakeException(reason);
+        } else {
+            ssle = new SSLException(reason);
+        }
+
         if (cause != null) {
             ssle.initCause(cause);
         }
@@ -187,7 +193,7 @@
             //      AlertDescription description;
             //  } Alert;
             if (m.remaining() != 2) {
-                context.fatal(Alert.ILLEGAL_PARAMETER,
+                throw context.fatal(Alert.ILLEGAL_PARAMETER,
                     "Invalid Alert message: no sufficient data");
             }
 
@@ -241,14 +247,14 @@
                 if (tc.peerUserCanceled) {
                     tc.closeOutbound();
                 } else if (tc.handshakeContext != null) {
-                    tc.fatal(Alert.UNEXPECTED_MESSAGE,
+                    throw tc.fatal(Alert.UNEXPECTED_MESSAGE,
                             "Received close_notify during handshake");
                 }
             } else if (alert == Alert.USER_CANCELED) {
                 if (level == Level.WARNING) {
                     tc.peerUserCanceled = true;
                 } else {
-                    tc.fatal(alert,
+                    throw tc.fatal(alert,
                             "Received fatal close_notify alert", true, null);
                 }
             } else if ((level == Level.WARNING) && (alert != null)) {
@@ -263,7 +269,7 @@
                             alert != Alert.NO_CERTIFICATE ||
                             (tc.sslConfig.clientAuthType !=
                                     ClientAuthType.CLIENT_AUTH_REQUESTED)) {
-                        tc.fatal(Alert.HANDSHAKE_FAILURE,
+                        throw tc.fatal(Alert.HANDSHAKE_FAILURE,
                             "received handshake warning: " + alert.description);
                     }  // Otherwise, ignore the warning
                 }   // Otherwise, ignore the warning.
@@ -276,7 +282,7 @@
                     diagnostic = "Received fatal alert: " + alert.description;
                 }
 
-                tc.fatal(alert, diagnostic, true, null);
+                throw tc.fatal(alert, diagnostic, true, null);
             }
         }
     }
--- a/src/java.base/share/classes/sun/security/ssl/AlpnExtension.java	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/java.base/share/classes/sun/security/ssl/AlpnExtension.java	Tue Jan 15 10:55:26 2019 -0800
@@ -174,7 +174,8 @@
                         SSLLogger.severe(
                                 "Application protocol name cannot be empty");
                     }
-                    chc.conContext.fatal(Alert.ILLEGAL_PARAMETER,
+
+                    throw chc.conContext.fatal(Alert.ILLEGAL_PARAMETER,
                             "Application protocol name cannot be empty");
                 }
 
@@ -189,7 +190,8 @@
                                 ") exceeds the size limit (" +
                                 MAX_AP_LENGTH + " bytes)");
                     }
-                    chc.conContext.fatal(Alert.ILLEGAL_PARAMETER,
+
+                    throw chc.conContext.fatal(Alert.ILLEGAL_PARAMETER,
                                 "Application protocol name (" + ap +
                                 ") exceeds the size limit (" +
                                 MAX_AP_LENGTH + " bytes)");
@@ -204,7 +206,8 @@
                                 ") exceed the size limit (" +
                                 MAX_AP_LIST_LENGTH + " bytes)");
                     }
-                    chc.conContext.fatal(Alert.ILLEGAL_PARAMETER,
+
+                    throw chc.conContext.fatal(Alert.ILLEGAL_PARAMETER,
                                 "The configured application protocols (" +
                                 Arrays.toString(laps) +
                                 ") exceed the size limit (" +
@@ -283,8 +286,7 @@
             try {
                 spec = new AlpnSpec(buffer);
             } catch (IOException ioe) {
-                shc.conContext.fatal(Alert.UNEXPECTED_MESSAGE, ioe);
-                return;     // fatal() always throws, make the compiler happy.
+                throw shc.conContext.fatal(Alert.UNEXPECTED_MESSAGE, ioe);
             }
 
             // Update the context.
@@ -302,7 +304,7 @@
                 }
 
                 if (!matched) {
-                    shc.conContext.fatal(Alert.NO_APPLICATION_PROTOCOL,
+                    throw shc.conContext.fatal(Alert.NO_APPLICATION_PROTOCOL,
                             "No matching application layer protocol values");
                 }
             }   // Otherwise, applicationProtocol will be set by the
@@ -379,7 +381,8 @@
                     if ((shc.applicationProtocol == null) ||
                             (!shc.applicationProtocol.isEmpty() &&
                             !alps.contains(shc.applicationProtocol))) {
-                        shc.conContext.fatal(Alert.NO_APPLICATION_PROTOCOL,
+                        throw shc.conContext.fatal(
+                            Alert.NO_APPLICATION_PROTOCOL,
                             "No matching application layer protocol values");
                     }
                 }
@@ -391,7 +394,8 @@
                     if ((shc.applicationProtocol == null) ||
                             (!shc.applicationProtocol.isEmpty() &&
                             !alps.contains(shc.applicationProtocol))) {
-                        shc.conContext.fatal(Alert.NO_APPLICATION_PROTOCOL,
+                        throw shc.conContext.fatal(
+                            Alert.NO_APPLICATION_PROTOCOL,
                             "No matching application layer protocol values");
                     }
                 }
@@ -454,7 +458,7 @@
             if (requestedAlps == null ||
                     requestedAlps.applicationProtocols == null ||
                     requestedAlps.applicationProtocols.isEmpty()) {
-                chc.conContext.fatal(Alert.UNEXPECTED_MESSAGE,
+                throw chc.conContext.fatal(Alert.UNEXPECTED_MESSAGE,
                     "Unexpected " + SSLExtension.CH_ALPN.name + " extension");
             }
 
@@ -463,13 +467,12 @@
             try {
                 spec = new AlpnSpec(buffer);
             } catch (IOException ioe) {
-                chc.conContext.fatal(Alert.UNEXPECTED_MESSAGE, ioe);
-                return;     // fatal() always throws, make the compiler happy.
+                throw chc.conContext.fatal(Alert.UNEXPECTED_MESSAGE, ioe);
             }
 
             // Only one application protocol is allowed.
             if (spec.applicationProtocols.size() != 1) {
-                chc.conContext.fatal(Alert.UNEXPECTED_MESSAGE,
+                throw chc.conContext.fatal(Alert.UNEXPECTED_MESSAGE,
                     "Invalid " + SSLExtension.CH_ALPN.name + " extension: " +
                     "Only one application protocol name " +
                     "is allowed in ServerHello message");
@@ -478,7 +481,7 @@
             // The respond application protocol must be one of the requested.
             if (!requestedAlps.applicationProtocols.containsAll(
                     spec.applicationProtocols)) {
-                chc.conContext.fatal(Alert.UNEXPECTED_MESSAGE,
+                throw chc.conContext.fatal(Alert.UNEXPECTED_MESSAGE,
                     "Invalid " + SSLExtension.CH_ALPN.name + " extension: " +
                     "Only client specified application protocol " +
                     "is allowed in ServerHello message");
--- a/src/java.base/share/classes/sun/security/ssl/CertSignAlgsExtension.java	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/java.base/share/classes/sun/security/ssl/CertSignAlgsExtension.java	Tue Jan 15 10:55:26 2019 -0800
@@ -153,8 +153,7 @@
             try {
                 spec = new SignatureSchemesSpec(buffer);
             } catch (IOException ioe) {
-                shc.conContext.fatal(Alert.UNEXPECTED_MESSAGE, ioe);
-                return;     // fatal() always throws, make the compiler happy.
+                throw shc.conContext.fatal(Alert.UNEXPECTED_MESSAGE, ioe);
             }
 
             // Update the context.
@@ -297,8 +296,7 @@
             try {
                 spec = new SignatureSchemesSpec(buffer);
             } catch (IOException ioe) {
-                chc.conContext.fatal(Alert.UNEXPECTED_MESSAGE, ioe);
-                return;     // fatal() always throws, make the compiler happy.
+                throw chc.conContext.fatal(Alert.UNEXPECTED_MESSAGE, ioe);
             }
 
             // Update the context.
--- a/src/java.base/share/classes/sun/security/ssl/CertStatusExtension.java	Thu Dec 13 11:51:06 2018 -0800
+++ b/src/java.base/share/classes/sun/security/ssl/CertStatusExtension.java	Tue Jan 15 10:55:26 2019 -0800
@@ -606,8 +606,7 @@
             try {
                 spec = new CertStatusRequestSpec(buffer);
             } catch (IOException ioe) {
-                shc.conContext.fatal(Alert.UNEXPECTED_MESSAGE, ioe);
-                return;     // fatal() always throws, make the compiler happy.
+                throw shc.conContext.fatal(Alert.UNEXPECTED_MESSAGE, ioe);
             }
 
             // Update the context.
@@ -711,13 +710,13 @@
             CertStatusRequestSpec requestedCsr = (CertStatusRequestSpec)
                     chc.handshakeExtensions.get(CH_STATUS_REQUEST);
             if (requestedCsr == null) {
-                chc.conContext.fatal(Alert.UNEXPECTED_MESSAGE,
+                throw chc.conContext.fatal(Alert.UNEXPECTED_MESSAGE,
                     "Unexpected status_request extension in ServerHello");
             }
 
             // Parse the extension.
             if (buffer.hasRemaining()) {
-                chc.conContext.fatal(Alert.UNEXPECTED_MESSAGE,
+                throw chc.conContext.fatal(Alert.UNEXPECTED_MESSAGE,
                   "Invalid status_request extension in ServerHello message: " +
                   "the extension data must be empty");
             }
@@ -964,8 +963,7 @@
             try {
                 spec = new CertStatusRequestV2Spec(buffer);
             } catch (IOException ioe) {
-                shc.conContext.fatal(Alert.UNEXPECTED_MESSAGE, ioe);
-                return;     // fatal() always throws, make the compiler happy.
+                throw shc.conContext.fatal(Alert.UNEXPECTED_MESSAGE, ioe);
             }
 
             // Update the context.
@@ -1067,13 +1065,13 @@
             CertStatusRequestV2Spec requestedCsr = (CertStatusRequestV2Spec)
                     chc.handshakeExtensions.get(CH_STATUS_REQUEST_V2);
             if (requestedCsr == null) {
-                chc.conContext.fatal(Alert.UNEXPECTED_MESSAGE,
+                throw chc.conContext.fatal(Alert.UNEXPECTED_MESSAGE,
                     "Unexpected status_request_v2 extension in ServerHello");
             }
 
             // Parse the extension.
             if (buffer.hasRemaining()) {
-                chc.conContext.fatal(Alert.UNEXPECTED_MESSAGE,
+                throw chc.conContext.fatal(Alert.UNEXPECTED_MESSAGE,
                   "Invalid status_request_v2 extension in ServerHello: " +
                   "the extension data must be empty");
             }
@@ -1157,10 +1155,10 @@
                                 respBytes);
                 producedData = certResp.toByteArray();
             } catch (CertificateException ce) {
-                shc.conContext.fatal(Alert.BAD_CERTIFICATE,
+                throw shc.conContext.fatal(Alert.BAD_CERTIFICATE,
                         "Failed to parse server certificates", ce);
             } catch (IOException ioe) {
-                shc.conContext.fatal(Alert.BAD_CERT_STATUS_RESPONSE,
+                throw shc.conContext.fatal(Alert.BAD_CERT_STATUS_RESPONSE,
                         "Failed to parse certificate status response", ioe);
             }
 
@@ -1188,8 +1186,7 @@
             try {
                 spec = new CertStatusResponseSpec(buffer);
             } catch (IOException ioe) {
-                chc.conContext.fatal(Alert.DECODE_ERROR, ioe);
-                return;     // fatal() always th