changeset 51287:4f11cac95184

Merge
author prr
date Tue, 12 Jun 2018 15:14:22 -0700
parents 11f36b771afd aeb7fb702890
children e88bc8f0321c
files test/jdk/java/lang/Thread/StopThrowable.java
diffstat 415 files changed, 31261 insertions(+), 566 deletions(-) [+]
line wrap: on
line diff
--- a/make/InitSupport.gmk	Tue Jun 12 14:53:57 2018 -0700
+++ b/make/InitSupport.gmk	Tue Jun 12 15:14:22 2018 -0700
@@ -34,6 +34,9 @@
 
 ifeq ($(HAS_SPEC),)
 
+  # COMMA is defined in spec.gmk, but that is not included yet
+  COMMA := ,
+
   # Include the corresponding closed file, if present.
   ifneq ($(CUSTOM_MAKE_DIR), )
     -include $(CUSTOM_MAKE_DIR)/InitSupport.gmk
@@ -531,8 +534,6 @@
 define ParseLogOption
   ifneq ($$(findstring $1, $$(LOG)),)
     override $2 := true
-    # COMMA is defined in spec.gmk, but that is not included yet
-    COMMA := ,
     # First try to remove ",<option>" if it exists, otherwise just remove "<option>"
     LOG_STRIPPED := $$(subst $1,, $$(subst $$(COMMA)$$(strip $1),, $$(LOG)))
     # We might have ended up with a leading comma. Remove it. Need override
@@ -550,8 +551,6 @@
     # Make words of out comma-separated list and find the one with opt=val
     value := $$(strip $$(subst $$(strip $1)=,, $$(filter $$(strip $1)=%, $$(subst $$(COMMA), , $$(LOG)))))
     override $2 := $$(value)
-    # COMMA is defined in spec.gmk, but that is not included yet
-    COMMA := ,
     # First try to remove ",<option>" if it exists, otherwise just remove "<option>"
     LOG_STRIPPED := $$(subst $$(strip $1)=$$(value),, \
         $$(subst $$(COMMA)$$(strip $1)=$$(value),, $$(LOG)))
--- a/make/autoconf/hotspot.m4	Tue Jun 12 14:53:57 2018 -0700
+++ b/make/autoconf/hotspot.m4	Tue Jun 12 15:14:22 2018 -0700
@@ -25,7 +25,7 @@
 
 # All valid JVM features, regardless of platform
 VALID_JVM_FEATURES="compiler1 compiler2 zero minimal dtrace jvmti jvmci \
-    graal vm-structs jni-check services management cmsgc g1gc parallelgc serialgc nmt cds \
+    graal vm-structs jni-check services management cmsgc epsilongc g1gc parallelgc serialgc zgc nmt cds \
     static-build link-time-opt aot jfr"
 
 # Deprecated JVM features (these are ignored, but with a warning)
@@ -328,6 +328,19 @@
     fi
   fi
 
+  # Only enable ZGC on Linux x86_64
+  AC_MSG_CHECKING([if zgc should be built])
+  if HOTSPOT_CHECK_JVM_FEATURE(zgc); then
+    if test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = "xx86_64"; then
+      AC_MSG_RESULT([yes])
+    else
+      DISABLED_JVM_FEATURES="$DISABLED_JVM_FEATURES zgc"
+      AC_MSG_RESULT([no, platform not supported])
+    fi
+  else
+    AC_MSG_RESULT([no])
+  fi
+
   # Turn on additional features based on other parts of configure
   if test "x$INCLUDE_DTRACE" = "xtrue"; then
     JVM_FEATURES="$JVM_FEATURES dtrace"
@@ -410,7 +423,7 @@
   fi
 
   # All variants but minimal (and custom) get these features
-  NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES cmsgc g1gc parallelgc serialgc jni-check jvmti management nmt services vm-structs"
+  NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES cmsgc g1gc parallelgc serialgc epsilongc jni-check jvmti management nmt services vm-structs"
   if test "x$ENABLE_CDS" = "xtrue"; then
     NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES cds"
   fi
--- a/make/autoconf/spec.gmk.in	Tue Jun 12 14:53:57 2018 -0700
+++ b/make/autoconf/spec.gmk.in	Tue Jun 12 15:14:22 2018 -0700
@@ -858,7 +858,12 @@
 else ifneq ($(DEBUG_LEVEL), release)
   DEBUG_PART := -$(DEBUG_LEVEL)
 endif
-JDK_BUNDLE_NAME := jdk-$(BASE_NAME)_bin$(DEBUG_PART).tar.gz
+ifeq ($(OPENJDK_TARGET_OS), windows)
+  JDK_BUNDLE_EXTENSION := zip
+else
+  JDK_BUNDLE_EXTENSION := tar.gz
+endif
+JDK_BUNDLE_NAME := jdk-$(BASE_NAME)_bin$(DEBUG_PART).$(JDK_BUNDLE_EXTENSION)
 JDK_SYMBOLS_BUNDLE_NAME := jdk-$(BASE_NAME)_bin$(DEBUG_PART)-symbols.tar.gz
 TEST_DEMOS_BUNDLE_NAME := jdk-$(BASE_NAME)_bin-tests-demos$(DEBUG_PART).tar.gz
 TEST_BUNDLE_NAME := jdk-$(BASE_NAME)_bin-tests$(DEBUG_PART).tar.gz
--- a/make/conf/jib-profiles.js	Tue Jun 12 14:53:57 2018 -0700
+++ b/make/conf/jib-profiles.js	Tue Jun 12 15:14:22 2018 -0700
@@ -272,13 +272,14 @@
      */
     common.main_profile_artifacts = function (o) {
         var jdk_subdir = (o.jdk_subdir != null ? o.jdk_subdir : "jdk-" + data.version);
+        var jdk_suffix = (o.jdk_suffix != null ? o.jdk_suffix : "tar.gz");
         var pf = o.platform
         return {
             artifacts: {
                 jdk: {
-                    local: "bundles/\\(jdk.*bin.tar.gz\\)",
+                    local: "bundles/\\(jdk.*bin." + jdk_suffix + "\\)",
                     remote: [
-                        "bundles/" + pf + "/jdk-" + data.version + "_" + pf + "_bin.tar.gz",
+                        "bundles/" + pf + "/jdk-" + data.version + "_" + pf + "_bin." + jdk_suffix,
                         "bundles/" + pf + "/\\1"
                     ],
                     subdir: jdk_subdir,
@@ -320,13 +321,14 @@
      */
     common.debug_profile_artifacts = function (o) {
         var jdk_subdir = "jdk-" + data.version + "/fastdebug";
+        var jdk_suffix = (o.jdk_suffix != null ? o.jdk_suffix : "tar.gz");
         var pf = o.platform
         return {
             artifacts: {
                 jdk: {
-                    local: "bundles/\\(jdk.*bin-debug.tar.gz\\)",
+                    local: "bundles/\\(jdk.*bin-debug." + jdk_suffix + "\\)",
                     remote: [
-                        "bundles/" + pf + "/jdk-" + data.version + "_" + pf + "_bin-debug.tar.gz",
+                        "bundles/" + pf + "/jdk-" + data.version + "_" + pf + "_bin-debug." + jdk_suffix,
                         "bundles/" + pf + "/\\1"
                     ],
                     subdir: jdk_subdir,
@@ -590,9 +592,11 @@
         },
         "windows-x64": {
             platform: "windows-x64",
+            jdk_suffix: "zip",
         },
         "windows-x86": {
             platform: "windows-x86",
+            jdk_suffix: "zip",
         },
        "linux-aarch64": {
             platform: "linux-aarch64",
@@ -690,6 +694,14 @@
                        profiles[openName].artifacts["jdk"].remote));
     });
 
+    // Enable ZGC in linux-x64-open builds
+    [ "linux-x64-open" ].forEach(function (name) {
+        var configureArgs = { configure_args: [ "--with-jvm-features=zgc" ] };
+        var debugName = name + common.debug_suffix;
+        profiles[name] = concatObjects(profiles[name], configureArgs);
+        profiles[debugName] = concatObjects(profiles[debugName], configureArgs);
+    });
+
     // Profiles used to run tests. Used in JPRT and Mach 5.
     var testOnlyProfiles = {
         "run-test-jprt": {
--- a/make/data/jdwp/jdwp.spec	Tue Jun 12 14:53:57 2018 -0700
+++ b/make/data/jdwp/jdwp.spec	Tue Jun 12 15:14:22 2018 -0700
@@ -1997,8 +1997,7 @@
         )
     )
     (Command Stop=10
-        "Stops the thread with an asynchronous exception, as if done by "
-        "java.lang.Thread.stop "
+        "Stops the thread with an asynchronous exception. "
         (Out
             (threadObject thread "The thread object ID. ")
             (object throwable "Asynchronous exception. This object must "
--- a/make/hotspot/lib/JvmFeatures.gmk	Tue Jun 12 14:53:57 2018 -0700
+++ b/make/hotspot/lib/JvmFeatures.gmk	Tue Jun 12 15:14:22 2018 -0700
@@ -155,6 +155,16 @@
   JVM_EXCLUDE_FILES += psMarkSweep.cpp psMarkSweepDecorator.cpp
 endif
 
+ifneq ($(call check-jvm-feature, epsilongc), true)
+  JVM_CFLAGS_FEATURES += -DINCLUDE_EPSILONGC=0
+  JVM_EXCLUDE_PATTERNS += gc/epsilon
+endif
+
+ifneq ($(call check-jvm-feature, zgc), true)
+  JVM_CFLAGS_FEATURES += -DINCLUDE_ZGC=0
+  JVM_EXCLUDE_PATTERNS += gc/z
+endif
+
 ifneq ($(call check-jvm-feature, jfr), true)
   JVM_CFLAGS_FEATURES += -DINCLUDE_JFR=0
   JVM_EXCLUDE_PATTERNS += jfr
--- a/make/lib/CoreLibraries.gmk	Tue Jun 12 14:53:57 2018 -0700
+++ b/make/lib/CoreLibraries.gmk	Tue Jun 12 15:14:22 2018 -0700
@@ -347,7 +347,8 @@
       EXCLUDE_FILES := $(LIBJLI_EXCLUDE_FILES), \
       EXTRA_FILES := $(LIBJLI_EXTRA_FILES), \
       OPTIMIZATION := HIGH, \
-      CFLAGS := $(STATIC_LIBRARY_FLAGS) $(LIBJLI_CFLAGS_JDKLIB) $(LIBJLI_CFLAGS), \
+      CFLAGS := $(STATIC_LIBRARY_FLAGS) $(LIBJLI_CFLAGS_JDKLIB) $(LIBJLI_CFLAGS) \
+          $(addprefix -I, $(LIBJLI_SRC_DIRS)), \
       ARFLAGS := $(ARFLAGS), \
       OBJECT_DIR := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/libjli_static))
 
--- a/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -73,19 +73,20 @@
   }
 
   if (_index->is_cpu_register()) {
-    __ mov(r22, _index->as_register());
+    __ mov(rscratch1, _index->as_register());
   } else {
-    __ mov(r22, _index->as_jint());
+    __ mov(rscratch1, _index->as_jint());
   }
   Runtime1::StubID stub_id;
   if (_throw_index_out_of_bounds_exception) {
     stub_id = Runtime1::throw_index_exception_id;
   } else {
     assert(_array != NULL, "sanity");
-    __ mov(r23, _array->as_pointer_register());
+    __ mov(rscratch2, _array->as_pointer_register());
     stub_id = Runtime1::throw_range_check_failed_id;
   }
-  __ far_call(RuntimeAddress(Runtime1::entry_for(stub_id)), NULL, rscratch2);
+  __ lea(lr, RuntimeAddress(Runtime1::entry_for(stub_id)));
+  __ blr(lr);
   ce->add_call_info_here(_info);
   ce->verify_oop_map(_info);
   debug_only(__ should_not_reach_here());
--- a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -323,7 +323,7 @@
 
 
 // target: the entry point of the method that creates and posts the exception oop
-// has_argument: true if the exception needs arguments (passed in r22 and r23)
+// has_argument: true if the exception needs arguments (passed in rscratch1 and rscratch2)
 
 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
   // make a frame and preserve the caller's caller-save registers
@@ -332,7 +332,9 @@
   if (!has_argument) {
     call_offset = __ call_RT(noreg, noreg, target);
   } else {
-    call_offset = __ call_RT(noreg, noreg, target, r22, r23);
+    __ mov(c_rarg1, rscratch1);
+    __ mov(c_rarg2, rscratch2);
+    call_offset = __ call_RT(noreg, noreg, target);
   }
   OopMapSet* oop_maps = new OopMapSet();
   oop_maps->add_gc_map(call_offset, oop_map);
--- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -56,6 +56,15 @@
     }
     break;
   }
+  case T_BOOLEAN: __ load_unsigned_byte (dst, src); break;
+  case T_BYTE:    __ load_signed_byte   (dst, src); break;
+  case T_CHAR:    __ load_unsigned_short(dst, src); break;
+  case T_SHORT:   __ load_signed_short  (dst, src); break;
+  case T_INT:     __ ldrw               (dst, src); break;
+  case T_LONG:    __ ldr                (dst, src); break;
+  case T_ADDRESS: __ ldr                (dst, src); break;
+  case T_FLOAT:   __ ldrs               (v0, src);  break;
+  case T_DOUBLE:  __ ldrd               (v0, src);  break;
   default: Unimplemented();
   }
 }
@@ -84,6 +93,18 @@
     }
     break;
   }
+  case T_BOOLEAN:
+    __ andw(val, val, 0x1);  // boolean is true if LSB is 1
+    __ strb(val, dst);
+    break;
+  case T_BYTE:    __ strb(val, dst); break;
+  case T_CHAR:    __ strh(val, dst); break;
+  case T_SHORT:   __ strh(val, dst); break;
+  case T_INT:     __ strw(val, dst); break;
+  case T_LONG:    __ str (val, dst); break;
+  case T_ADDRESS: __ str (val, dst); break;
+  case T_FLOAT:   __ strs(v0,  dst); break;
+  case T_DOUBLE:  __ strd(v0,  dst); break;
   default: Unimplemented();
   }
 }
--- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -268,9 +268,6 @@
 void InterpreterMacroAssembler::load_resolved_reference_at_index(
                                            Register result, Register index, Register tmp) {
   assert_different_registers(result, index);
-  // convert from field index to resolved_references() index and from
-  // word index to byte offset. Since this is a java object, it can be compressed
-  lslw(index, index, LogBytesPerHeapOop);
 
   get_constant_pool(result);
   // load pointer for resolved_references[] objArray
@@ -278,8 +275,8 @@
   ldr(result, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes()));
   resolve_oop_handle(result, tmp);
   // Add in the index
-  add(result, result, index);
-  load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
+  add(index, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
+  load_heap_oop(result, Address(result, index, Address::uxtw(LogBytesPerHeapOop)));
 }
 
 void InterpreterMacroAssembler::load_resolved_klass_at_offset(
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -2113,7 +2113,6 @@
 #endif
 
 void MacroAssembler::resolve_jobject(Register value, Register thread, Register tmp) {
-  BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
   Label done, not_weak;
   cbz(value, done);           // Use NULL as-is.
 
@@ -2121,15 +2120,15 @@
   tbz(r0, 0, not_weak);    // Test for jweak tag.
 
   // Resolve jweak.
-  bs->load_at(this, IN_ROOT | ON_PHANTOM_OOP_REF, T_OBJECT,
-                    value, Address(value, -JNIHandles::weak_tag_value), tmp, thread);
+  access_load_at(T_OBJECT, IN_ROOT | ON_PHANTOM_OOP_REF, value,
+                 Address(value, -JNIHandles::weak_tag_value), tmp, thread);
   verify_oop(value);
   b(done);
 
   bind(not_weak);
   // Resolve (untagged) jobject.
-  bs->load_at(this, IN_CONCURRENT_ROOT, T_OBJECT,
-                    value, Address(value, 0), tmp, thread);
+  access_load_at(T_OBJECT, IN_CONCURRENT_ROOT, value, Address(value, 0), tmp,
+                 thread);
   verify_oop(value);
   bind(done);
 }
@@ -3664,9 +3663,8 @@
 // ((OopHandle)result).resolve();
 void MacroAssembler::resolve_oop_handle(Register result, Register tmp) {
   // OopHandle::resolve is an indirection.
-  BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
-  bs->load_at(this, IN_CONCURRENT_ROOT, T_OBJECT,
-                    result, Address(result, 0), tmp, rthread);
+  access_load_at(T_OBJECT, IN_CONCURRENT_ROOT,
+                 result, Address(result, 0), tmp, noreg);
 }
 
 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp) {
--- a/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -141,7 +141,7 @@
   __ verify_oop(method_temp);
   __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes())), temp2);
   __ verify_oop(method_temp);
-  __ ldr(method_temp, Address(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes())));
+  __ access_load_at(T_ADDRESS, IN_HEAP, method_temp, Address(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes())), noreg, noreg);
 
   if (VerifyMethodHandles && !for_compiler_entry) {
     // make sure recv is already on stack
@@ -340,7 +340,7 @@
         verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
       }
       __ load_heap_oop(rmethod, member_vmtarget);
-      __ ldr(rmethod, vmtarget_method);
+      __ access_load_at(T_ADDRESS, IN_HEAP, rmethod, vmtarget_method, noreg, noreg);
       break;
 
     case vmIntrinsics::_linkToStatic:
@@ -348,7 +348,7 @@
         verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
       }
       __ load_heap_oop(rmethod, member_vmtarget);
-      __ ldr(rmethod, vmtarget_method);
+      __ access_load_at(T_ADDRESS, IN_HEAP, rmethod, vmtarget_method, noreg, noreg);
       break;
 
     case vmIntrinsics::_linkToVirtual:
@@ -362,7 +362,7 @@
 
       // pick out the vtable index from the MemberName, and then we can discard it:
       Register temp2_index = temp2;
-      __ ldr(temp2_index, member_vmindex);
+      __ access_load_at(T_ADDRESS, IN_HEAP, temp2_index, member_vmindex, noreg, noreg);
 
       if (VerifyMethodHandles) {
         Label L_index_ok;
@@ -394,7 +394,7 @@
       __ verify_klass_ptr(temp3_intf);
 
       Register rindex = rmethod;
-      __ ldr(rindex, member_vmindex);
+      __ access_load_at(T_ADDRESS, IN_HEAP, rindex, member_vmindex, noreg, noreg);
       if (VerifyMethodHandles) {
         Label L;
         __ cmpw(rindex, 0U);
--- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -760,8 +760,8 @@
   // r0: array
   // r1: index
   index_check(r0, r1); // leaves index in r1, kills rscratch1
-  __ lea(r1, Address(r0, r1, Address::uxtw(2)));
-  __ ldrw(r0, Address(r1, arrayOopDesc::base_offset_in_bytes(T_INT)));
+  __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_INT) >> 2);
+  __ access_load_at(T_INT, IN_HEAP | IN_HEAP_ARRAY, r0, Address(r0, r1, Address::uxtw(2)), noreg, noreg);
 }
 
 void TemplateTable::laload()
@@ -772,8 +772,8 @@
   // r0: array
   // r1: index
   index_check(r0, r1); // leaves index in r1, kills rscratch1
-  __ lea(r1, Address(r0, r1, Address::uxtw(3)));
-  __ ldr(r0, Address(r1,  arrayOopDesc::base_offset_in_bytes(T_LONG)));
+  __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_LONG) >> 3);
+  __ access_load_at(T_LONG, IN_HEAP | IN_HEAP_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg);
 }
 
 void TemplateTable::faload()
@@ -784,8 +784,8 @@
   // r0: array
   // r1: index
   index_check(r0, r1); // leaves index in r1, kills rscratch1
-  __ lea(r1,  Address(r0, r1, Address::uxtw(2)));
-  __ ldrs(v0, Address(r1,  arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
+  __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_FLOAT) >> 2);
+  __ access_load_at(T_FLOAT, IN_HEAP | IN_HEAP_ARRAY, r0, Address(r0, r1, Address::uxtw(2)), noreg, noreg);
 }
 
 void TemplateTable::daload()
@@ -796,8 +796,8 @@
   // r0: array
   // r1: index
   index_check(r0, r1); // leaves index in r1, kills rscratch1
-  __ lea(r1,  Address(r0, r1, Address::uxtw(3)));
-  __ ldrd(v0, Address(r1,  arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
+  __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
+  __ access_load_at(T_DOUBLE, IN_HEAP | IN_HEAP_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg);
 }
 
 void TemplateTable::aaload()
@@ -808,10 +808,9 @@
   // r0: array
   // r1: index
   index_check(r0, r1); // leaves index in r1, kills rscratch1
-  int s = (UseCompressedOops ? 2 : 3);
-  __ lea(r1, Address(r0, r1, Address::uxtw(s)));
+  __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
   do_oop_load(_masm,
-              Address(r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT)),
+              Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)),
               r0,
               IN_HEAP_ARRAY);
 }
@@ -824,8 +823,8 @@
   // r0: array
   // r1: index
   index_check(r0, r1); // leaves index in r1, kills rscratch1
-  __ lea(r1,  Address(r0, r1, Address::uxtw(0)));
-  __ load_signed_byte(r0, Address(r1,  arrayOopDesc::base_offset_in_bytes(T_BYTE)));
+  __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0);
+  __ access_load_at(T_BYTE, IN_HEAP | IN_HEAP_ARRAY, r0, Address(r0, r1, Address::uxtw(0)), noreg, noreg);
 }
 
 void TemplateTable::caload()
@@ -836,8 +835,8 @@
   // r0: array
   // r1: index
   index_check(r0, r1); // leaves index in r1, kills rscratch1
-  __ lea(r1,  Address(r0, r1, Address::uxtw(1)));
-  __ load_unsigned_short(r0, Address(r1,  arrayOopDesc::base_offset_in_bytes(T_CHAR)));
+  __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
+  __ access_load_at(T_CHAR, IN_HEAP | IN_HEAP_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
 }
 
 // iload followed by caload frequent pair
@@ -853,8 +852,8 @@
   // r0: array
   // r1: index
   index_check(r0, r1); // leaves index in r1, kills rscratch1
-  __ lea(r1,  Address(r0, r1, Address::uxtw(1)));
-  __ load_unsigned_short(r0, Address(r1,  arrayOopDesc::base_offset_in_bytes(T_CHAR)));
+  __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
+  __ access_load_at(T_CHAR, IN_HEAP | IN_HEAP_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
 }
 
 void TemplateTable::saload()
@@ -865,8 +864,8 @@
   // r0: array
   // r1: index
   index_check(r0, r1); // leaves index in r1, kills rscratch1
-  __ lea(r1,  Address(r0, r1, Address::uxtw(1)));
-  __ load_signed_short(r0, Address(r1,  arrayOopDesc::base_offset_in_bytes(T_SHORT)));
+  __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_SHORT) >> 1);
+  __ access_load_at(T_SHORT, IN_HEAP | IN_HEAP_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
 }
 
 void TemplateTable::iload(int n)
@@ -1059,9 +1058,8 @@
   // r1: index
   // r3: array
   index_check(r3, r1); // prefer index in r1
-  __ lea(rscratch1, Address(r3, r1, Address::uxtw(2)));
-  __ strw(r0, Address(rscratch1,
-                      arrayOopDesc::base_offset_in_bytes(T_INT)));
+  __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_INT) >> 2);
+  __ access_store_at(T_INT, IN_HEAP | IN_HEAP_ARRAY, Address(r3, r1, Address::uxtw(2)), r0, noreg, noreg);
 }
 
 void TemplateTable::lastore() {
@@ -1072,9 +1070,8 @@
   // r1: index
   // r3: array
   index_check(r3, r1); // prefer index in r1
-  __ lea(rscratch1, Address(r3, r1, Address::uxtw(3)));
-  __ str(r0, Address(rscratch1,
-                      arrayOopDesc::base_offset_in_bytes(T_LONG)));
+  __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_LONG) >> 3);
+  __ access_store_at(T_LONG, IN_HEAP | IN_HEAP_ARRAY, Address(r3, r1, Address::uxtw(3)), r0, noreg, noreg);
 }
 
 void TemplateTable::fastore() {
@@ -1085,9 +1082,8 @@
   // r1:  index
   // r3:  array
   index_check(r3, r1); // prefer index in r1
-  __ lea(rscratch1, Address(r3, r1, Address::uxtw(2)));
-  __ strs(v0, Address(rscratch1,
-                      arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
+  __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_FLOAT) >> 2);
+  __ access_store_at(T_FLOAT, IN_HEAP | IN_HEAP_ARRAY, Address(r3, r1, Address::uxtw(2)), noreg /* ftos */, noreg, noreg);
 }
 
 void TemplateTable::dastore() {
@@ -1098,9 +1094,8 @@
   // r1:  index
   // r3:  array
   index_check(r3, r1); // prefer index in r1
-  __ lea(rscratch1, Address(r3, r1, Address::uxtw(3)));
-  __ strd(v0, Address(rscratch1,
-                      arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
+  __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
+  __ access_store_at(T_DOUBLE, IN_HEAP | IN_HEAP_ARRAY, Address(r3, r1, Address::uxtw(3)), noreg /* dtos */, noreg, noreg);
 }
 
 void TemplateTable::aastore() {
@@ -1111,10 +1106,10 @@
   __ ldr(r2, at_tos_p1()); // index
   __ ldr(r3, at_tos_p2()); // array
 
-  Address element_address(r4, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
+  Address element_address(r3, r4, Address::uxtw(LogBytesPerHeapOop));
 
   index_check(r3, r2);     // kills r1
-  __ lea(r4, Address(r3, r2, Address::uxtw(UseCompressedOops? 2 : 3)));
+  __ add(r4, r2, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
 
   // do array store check - check for NULL value first
   __ cbz(r0, is_null);
@@ -1176,9 +1171,8 @@
   __ andw(r0, r0, 1);  // if it is a T_BOOLEAN array, mask the stored value to 0/1
   __ bind(L_skip);
 
-  __ lea(rscratch1, Address(r3, r1, Address::uxtw(0)));
-  __ strb(r0, Address(rscratch1,
-                      arrayOopDesc::base_offset_in_bytes(T_BYTE)));
+  __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0);
+  __ access_store_at(T_BYTE, IN_HEAP | IN_HEAP_ARRAY, Address(r3, r1, Address::uxtw(0)), r0, noreg, noreg);
 }
 
 void TemplateTable::castore()
@@ -1190,9 +1184,8 @@
   // r1: index
   // r3: array
   index_check(r3, r1); // prefer index in r1
-  __ lea(rscratch1, Address(r3, r1, Address::uxtw(1)));
-  __ strh(r0, Address(rscratch1,
-                      arrayOopDesc::base_offset_in_bytes(T_CHAR)));
+  __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
+  __ access_store_at(T_CHAR, IN_HEAP | IN_HEAP_ARRAY, Address(r3, r1, Address::uxtw(1)), r0, noreg, noreg);
 }
 
 void TemplateTable::sastore()
@@ -2513,7 +2506,7 @@
   if (is_static) rc = may_not_rewrite;
 
   // btos
-  __ load_signed_byte(r0, field);
+  __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
   __ push(btos);
   // Rewrite bytecode to be faster
   if (rc == may_rewrite) {
@@ -2526,7 +2519,7 @@
   __ br(Assembler::NE, notBool);
 
   // ztos (same code as btos)
-  __ ldrsb(r0, field);
+  __ access_load_at(T_BOOLEAN, IN_HEAP, r0, field, noreg, noreg);
   __ push(ztos);
   // Rewrite bytecode to be faster
   if (rc == may_rewrite) {
@@ -2550,7 +2543,7 @@
   __ cmp(flags, itos);
   __ br(Assembler::NE, notInt);
   // itos
-  __ ldrw(r0, field);
+  __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
   __ push(itos);
   // Rewrite bytecode to be faster
   if (rc == may_rewrite) {
@@ -2562,7 +2555,7 @@
   __ cmp(flags, ctos);
   __ br(Assembler::NE, notChar);
   // ctos
-  __ load_unsigned_short(r0, field);
+  __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
   __ push(ctos);
   // Rewrite bytecode to be faster
   if (rc == may_rewrite) {
@@ -2574,7 +2567,7 @@
   __ cmp(flags, stos);
   __ br(Assembler::NE, notShort);
   // stos
-  __ load_signed_short(r0, field);
+  __ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg);
   __ push(stos);
   // Rewrite bytecode to be faster
   if (rc == may_rewrite) {
@@ -2586,7 +2579,7 @@
   __ cmp(flags, ltos);
   __ br(Assembler::NE, notLong);
   // ltos
-  __ ldr(r0, field);
+  __ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg);
   __ push(ltos);
   // Rewrite bytecode to be faster
   if (rc == may_rewrite) {
@@ -2598,7 +2591,7 @@
   __ cmp(flags, ftos);
   __ br(Assembler::NE, notFloat);
   // ftos
-  __ ldrs(v0, field);
+  __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
   __ push(ftos);
   // Rewrite bytecode to be faster
   if (rc == may_rewrite) {
@@ -2612,7 +2605,7 @@
   __ br(Assembler::NE, notDouble);
 #endif
   // dtos
-  __ ldrd(v0, field);
+  __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
   __ push(dtos);
   // Rewrite bytecode to be faster
   if (rc == may_rewrite) {
@@ -2750,7 +2743,7 @@
   {
     __ pop(btos);
     if (!is_static) pop_and_check_object(obj);
-    __ strb(r0, field);
+    __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg);
     if (rc == may_rewrite) {
       patch_bytecode(Bytecodes::_fast_bputfield, bc, r1, true, byte_no);
     }
@@ -2765,8 +2758,7 @@
   {
     __ pop(ztos);
     if (!is_static) pop_and_check_object(obj);
-    __ andw(r0, r0, 0x1);
-    __ strb(r0, field);
+    __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg);
     if (rc == may_rewrite) {
       patch_bytecode(Bytecodes::_fast_zputfield, bc, r1, true, byte_no);
     }
@@ -2797,7 +2789,7 @@
   {
     __ pop(itos);
     if (!is_static) pop_and_check_object(obj);
-    __ strw(r0, field);
+    __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg);
     if (rc == may_rewrite) {
       patch_bytecode(Bytecodes::_fast_iputfield, bc, r1, true, byte_no);
     }
@@ -2812,7 +2804,7 @@
   {
     __ pop(ctos);
     if (!is_static) pop_and_check_object(obj);
-    __ strh(r0, field);
+    __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg);
     if (rc == may_rewrite) {
       patch_bytecode(Bytecodes::_fast_cputfield, bc, r1, true, byte_no);
     }
@@ -2827,7 +2819,7 @@
   {
     __ pop(stos);
     if (!is_static) pop_and_check_object(obj);
-    __ strh(r0, field);
+    __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg);
     if (rc == may_rewrite) {
       patch_bytecode(Bytecodes::_fast_sputfield, bc, r1, true, byte_no);
     }
@@ -2842,7 +2834,7 @@
   {
     __ pop(ltos);
     if (!is_static) pop_and_check_object(obj);
-    __ str(r0, field);
+    __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg);
     if (rc == may_rewrite) {
       patch_bytecode(Bytecodes::_fast_lputfield, bc, r1, true, byte_no);
     }
@@ -2857,7 +2849,7 @@
   {
     __ pop(ftos);
     if (!is_static) pop_and_check_object(obj);
-    __ strs(v0, field);
+    __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg);
     if (rc == may_rewrite) {
       patch_bytecode(Bytecodes::_fast_fputfield, bc, r1, true, byte_no);
     }
@@ -2874,7 +2866,7 @@
   {
     __ pop(dtos);
     if (!is_static) pop_and_check_object(obj);
-    __ strd(v0, field);
+    __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg);
     if (rc == may_rewrite) {
       patch_bytecode(Bytecodes::_fast_dputfield, bc, r1, true, byte_no);
     }
@@ -3005,27 +2997,28 @@
     do_oop_store(_masm, field, r0, IN_HEAP);
     break;
   case Bytecodes::_fast_lputfield:
-    __ str(r0, field);
+    __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg);
     break;
   case Bytecodes::_fast_iputfield:
-    __ strw(r0, field);
+    __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg);
     break;
   case Bytecodes::_fast_zputfield:
-    __ andw(r0, r0, 0x1);  // boolean is true if LSB is 1
-    // fall through to bputfield
+    __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg);
+    break;
   case Bytecodes::_fast_bputfield:
-    __ strb(r0, field);
+    __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg);
     break;
   case Bytecodes::_fast_sputfield:
-    // fall through
+    __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg);
+    break;
   case Bytecodes::_fast_cputfield:
-    __ strh(r0, field);
+    __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg);
     break;
   case Bytecodes::_fast_fputfield:
-    __ strs(v0, field);
+    __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg);
     break;
   case Bytecodes::_fast_dputfield:
-    __ strd(v0, field);
+    __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg);
     break;
   default:
     ShouldNotReachHere();
@@ -3098,25 +3091,25 @@
     __ verify_oop(r0);
     break;
   case Bytecodes::_fast_lgetfield:
-    __ ldr(r0, field);
+    __ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg);
     break;
   case Bytecodes::_fast_igetfield:
-    __ ldrw(r0, field);
+    __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
     break;
   case Bytecodes::_fast_bgetfield:
-    __ load_signed_byte(r0, field);
+    __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
     break;
   case Bytecodes::_fast_sgetfield:
-    __ load_signed_short(r0, field);
+    __ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg);
     break;
   case Bytecodes::_fast_cgetfield:
-    __ load_unsigned_short(r0, field);
+    __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
     break;
   case Bytecodes::_fast_fgetfield:
-    __ ldrs(v0, field);
+    __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
     break;
   case Bytecodes::_fast_dgetfield:
-    __ ldrd(v0, field);
+    __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* dtos */, field, noreg, noreg);
     break;
   default:
     ShouldNotReachHere();
@@ -3161,14 +3154,14 @@
   __ null_check(r0);
   switch (state) {
   case itos:
-    __ ldrw(r0, Address(r0, r1, Address::lsl(0)));
+    __ access_load_at(T_INT, IN_HEAP, r0, Address(r0, r1, Address::lsl(0)), noreg, noreg);
     break;
   case atos:
     do_oop_load(_masm, Address(r0, r1, Address::lsl(0)), r0, IN_HEAP);
     __ verify_oop(r0);
     break;
   case ftos:
-    __ ldrs(v0, Address(r0, r1, Address::lsl(0)));
+    __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, Address(r0, r1, Address::lsl(0)), noreg, noreg);
     break;
   default:
     ShouldNotReachHere();
--- a/src/hotspot/cpu/arm/macroAssembler_arm.cpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/cpu/arm/macroAssembler_arm.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -1367,9 +1367,12 @@
   // Bump total bytes allocated by this thread
   Label done;
 
-  ldr(tmp, Address(Rthread, in_bytes(JavaThread::allocated_bytes_offset())));
+  // Borrow the Rthread for alloc counter
+  Register Ralloc = Rthread;
+  add(Ralloc, Ralloc, in_bytes(JavaThread::allocated_bytes_offset()));
+  ldr(tmp, Address(Ralloc));
   adds(tmp, tmp, size_in_bytes);
-  str(tmp, Address(Rthread, in_bytes(JavaThread::allocated_bytes_offset())), cc);
+  str(tmp, Address(Ralloc), cc);
   b(done, cc);
 
   // Increment the high word and store single-copy atomically (that is an unlikely scenario on typical embedded systems as it means >4GB has been allocated)
@@ -1387,14 +1390,17 @@
   }
   push(RegisterSet(low, high));
 
-  ldrd(low, Address(Rthread, in_bytes(JavaThread::allocated_bytes_offset())));
+  ldrd(low, Address(Ralloc));
   adds(low, low, size_in_bytes);
   adc(high, high, 0);
-  strd(low, Address(Rthread, in_bytes(JavaThread::allocated_bytes_offset())));
+  strd(low, Address(Ralloc));
 
   pop(RegisterSet(low, high));
 
   bind(done);
+
+  // Unborrow the Rthread
+  sub(Rthread, Ralloc, in_bytes(JavaThread::allocated_bytes_offset()));
 #endif // AARCH64
 }
 
--- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -1346,7 +1346,11 @@
       __ decode_heap_oop(dest->as_register());
     }
 #endif
-    __ verify_oop(dest->as_register());
+
+    // Load barrier has not yet been applied, so ZGC can't verify the oop here
+    if (!UseZGC) {
+      __ verify_oop(dest->as_register());
+    }
   } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
 #ifdef _LP64
     if (UseCompressedClassPointers) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,458 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "gc/z/zBarrier.inline.hpp"
+#include "gc/z/zBarrierSet.hpp"
+#include "gc/z/zBarrierSetAssembler.hpp"
+#include "gc/z/zBarrierSetRuntime.hpp"
+#include "runtime/stubCodeGenerator.hpp"
+#include "utilities/macros.hpp"
+#ifdef COMPILER1
+#include "c1/c1_LIRAssembler.hpp"
+#include "c1/c1_MacroAssembler.hpp"
+#include "gc/z/c1/zBarrierSetC1.hpp"
+#endif // COMPILER1
+
+#undef __
+#define __ masm->
+
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) /* nothing */
+#else
+#define BLOCK_COMMENT(str) __ block_comment(str)
+#endif
+
+static void call_vm(MacroAssembler* masm,
+                    address entry_point,
+                    Register arg0,
+                    Register arg1) {
+  // Setup arguments
+  if (arg1 == c_rarg0) {
+    if (arg0 == c_rarg1) {
+      __ xchgptr(c_rarg1, c_rarg0);
+    } else {
+      __ movptr(c_rarg1, arg1);
+      __ movptr(c_rarg0, arg0);
+    }
+  } else {
+    if (arg0 != c_rarg0) {
+      __ movptr(c_rarg0, arg0);
+    }
+    if (arg1 != c_rarg1) {
+      __ movptr(c_rarg1, arg1);
+    }
+  }
+
+  // Call VM
+  __ MacroAssembler::call_VM_leaf_base(entry_point, 2);
+}
+
+void ZBarrierSetAssembler::load_at(MacroAssembler* masm,
+                                   DecoratorSet decorators,
+                                   BasicType type,
+                                   Register dst,
+                                   Address src,
+                                   Register tmp1,
+                                   Register tmp_thread) {
+  if (!ZBarrierSet::barrier_needed(decorators, type)) {
+    // Barrier not needed
+    BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
+    return;
+  }
+
+  BLOCK_COMMENT("ZBarrierSetAssembler::load_at {");
+
+  // Allocate scratch register
+  Register scratch = tmp1;
+  if (tmp1 == noreg) {
+    scratch = r12;
+    __ push(scratch);
+  }
+
+  assert_different_registers(dst, scratch);
+
+  Label done;
+
+  //
+  // Fast Path
+  //
+
+  // Load address
+  __ lea(scratch, src);
+
+  // Load oop at address
+  __ movptr(dst, Address(scratch, 0));
+
+  // Test address bad mask
+  __ testptr(dst, address_bad_mask_from_thread(r15_thread));
+  __ jcc(Assembler::zero, done);
+
+  //
+  // Slow path
+  //
+
+  // Save registers
+  __ push(rax);
+  __ push(rcx);
+  __ push(rdx);
+  __ push(rdi);
+  __ push(rsi);
+  __ push(r8);
+  __ push(r9);
+  __ push(r10);
+  __ push(r11);
+
+  // We may end up here from generate_native_wrapper, then the method may have
+  // floats as arguments, and we must spill them before calling the VM runtime
+  // leaf. From the interpreter all floats are passed on the stack.
+  assert(Argument::n_float_register_parameters_j == 8, "Assumption");
+  const int xmm_size = wordSize * 2;
+  const int xmm_spill_size = xmm_size * Argument::n_float_register_parameters_j;
+  __ subptr(rsp, xmm_spill_size);
+  __ movdqu(Address(rsp, xmm_size * 7), xmm7);
+  __ movdqu(Address(rsp, xmm_size * 6), xmm6);
+  __ movdqu(Address(rsp, xmm_size * 5), xmm5);
+  __ movdqu(Address(rsp, xmm_size * 4), xmm4);
+  __ movdqu(Address(rsp, xmm_size * 3), xmm3);
+  __ movdqu(Address(rsp, xmm_size * 2), xmm2);
+  __ movdqu(Address(rsp, xmm_size * 1), xmm1);
+  __ movdqu(Address(rsp, xmm_size * 0), xmm0);
+
+  // Call VM
+  call_vm(masm, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), dst, scratch);
+
+  // Restore registers
+  __ movdqu(xmm0, Address(rsp, xmm_size * 0));
+  __ movdqu(xmm1, Address(rsp, xmm_size * 1));
+  __ movdqu(xmm2, Address(rsp, xmm_size * 2));
+  __ movdqu(xmm3, Address(rsp, xmm_size * 3));
+  __ movdqu(xmm4, Address(rsp, xmm_size * 4));
+  __ movdqu(xmm5, Address(rsp, xmm_size * 5));
+  __ movdqu(xmm6, Address(rsp, xmm_size * 6));
+  __ movdqu(xmm7, Address(rsp, xmm_size * 7));
+  __ addptr(rsp, xmm_spill_size);
+
+  __ pop(r11);
+  __ pop(r10);
+  __ pop(r9);
+  __ pop(r8);
+  __ pop(rsi);
+  __ pop(rdi);
+  __ pop(rdx);
+  __ pop(rcx);
+
+  if (dst == rax) {
+    __ addptr(rsp, wordSize);
+  } else {
+    __ movptr(dst, rax);
+    __ pop(rax);
+  }
+
+  __ bind(done);
+
+  // Restore scratch register
+  if (tmp1 == noreg) {
+    __ pop(scratch);
+  }
+
+  BLOCK_COMMENT("} ZBarrierSetAssembler::load_at");
+}
+
+#ifdef ASSERT
+
+void ZBarrierSetAssembler::store_at(MacroAssembler* masm,
+                                    DecoratorSet decorators,
+                                    BasicType type,
+                                    Address dst,
+                                    Register src,
+                                    Register tmp1,
+                                    Register tmp2) {
+  BLOCK_COMMENT("ZBarrierSetAssembler::store_at {");
+
+  // Verify oop store
+  if (type == T_OBJECT || type == T_ARRAY) {
+    // Note that src could be noreg, which means we
+    // are storing null and can skip verification.
+    if (src != noreg) {
+      Label done;
+      __ testptr(src, address_bad_mask_from_thread(r15_thread));
+      __ jcc(Assembler::zero, done);
+      __ stop("Verify oop store failed");
+      __ should_not_reach_here();
+      __ bind(done);
+    }
+  }
+
+  // Store value
+  BarrierSetAssembler::store_at(masm, decorators, type, dst, src, tmp1, tmp2);
+
+  BLOCK_COMMENT("} ZBarrierSetAssembler::store_at");
+}
+
+#endif // ASSERT
+
+void ZBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm,
+                                              DecoratorSet decorators,
+                                              BasicType type,
+                                              Register src,
+                                              Register dst,
+                                              Register count) {
+  if (!ZBarrierSet::barrier_needed(decorators, type)) {
+    // Barrier not needed
+    return;
+  }
+
+  BLOCK_COMMENT("ZBarrierSetAssembler::arraycopy_prologue {");
+
+  // Save registers
+  __ pusha();
+
+  // Call VM
+  call_vm(masm, ZBarrierSetRuntime::load_barrier_on_oop_array_addr(), src, count);
+
+  // Restore registers
+  __ popa();
+
+  BLOCK_COMMENT("} ZBarrierSetAssembler::arraycopy_prologue");
+}
+
+void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm,
+                                                         Register jni_env,
+                                                         Register obj,
+                                                         Register tmp,
+                                                         Label& slowpath) {
+  BLOCK_COMMENT("ZBarrierSetAssembler::try_resolve_jobject_in_native {");
+
+  // Resolve jobject
+  BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath);
+
+  // Test address bad mask
+  __ testptr(obj, address_bad_mask_from_jni_env(jni_env));
+  __ jcc(Assembler::notZero, slowpath);
+
+  BLOCK_COMMENT("} ZBarrierSetAssembler::try_resolve_jobject_in_native");
+}
+
+#ifdef COMPILER1
+
+#undef __
+#define __ ce->masm()->
+
+void ZBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce,
+                                                         LIR_Opr ref) const {
+  __ testptr(ref->as_register(), address_bad_mask_from_thread(r15_thread));
+}
+
+void ZBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce,
+                                                         ZLoadBarrierStubC1* stub) const {
+  // Stub entry
+  __ bind(*stub->entry());
+
+  Register ref = stub->ref()->as_register();
+  Register ref_addr = noreg;
+
+  if (stub->ref_addr()->is_register()) {
+    // Address already in register
+    ref_addr = stub->ref_addr()->as_pointer_register();
+  } else {
+    // Load address into tmp register
+    ce->leal(stub->ref_addr(), stub->tmp(), stub->patch_code(), stub->patch_info());
+    ref_addr = stub->tmp()->as_pointer_register();
+  }
+
+  assert_different_registers(ref, ref_addr, noreg);
+
+  // Save rax unless it is the result register
+  if (ref != rax) {
+    __ push(rax);
+  }
+
+  // Setup arguments and call runtime stub
+  __ subptr(rsp, 2 * BytesPerWord);
+  ce->store_parameter(ref_addr, 1);
+  ce->store_parameter(ref, 0);
+  __ call(RuntimeAddress(stub->runtime_stub()));
+  __ addptr(rsp, 2 * BytesPerWord);
+
+  // Verify result
+  __ verify_oop(rax, "Bad oop");
+
+  // Restore rax unless it is the result register
+  if (ref != rax) {
+    __ movptr(ref, rax);
+    __ pop(rax);
+  }
+
+  // Stub exit
+  __ jmp(*stub->continuation());
+}
+
+#undef __
+#define __ sasm->
+
+void ZBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
+                                                                 DecoratorSet decorators) const {
+  // Enter and save registers
+  __ enter();
+  __ save_live_registers_no_oop_map(true /* save_fpu_registers */);
+
+  // Setup arguments
+  __ load_parameter(1, c_rarg1);
+  __ load_parameter(0, c_rarg0);
+
+  // Call VM
+  __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), c_rarg0, c_rarg1);
+
+  // Restore registers and return
+  __ restore_live_registers_except_rax(true /* restore_fpu_registers */);
+  __ leave();
+  __ ret(0);
+}
+
+#endif // COMPILER1
+
+#undef __
+#define __ cgen->assembler()->
+
+// Generates a register specific stub for calling
+// ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or
+// ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded().
+//
+// The raddr register serves as both input and output for this stub. When the stub is
+// called the raddr register contains the object field address (oop*) where the bad oop
+// was loaded from, which caused the slow path to be taken. On return from the stub the
+// raddr register contains the good/healed oop returned from
+// ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or
+// ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded().
+static address generate_load_barrier_stub(StubCodeGenerator* cgen, Register raddr, DecoratorSet decorators) {
+  // Don't generate stub for invalid registers
+  if (raddr == rsp || raddr == r12 || raddr == r15) {
+    return NULL;
+  }
+
+  // Create stub name
+  char name[64];
+  const bool weak = (decorators & ON_WEAK_OOP_REF) != 0;
+  os::snprintf(name, sizeof(name), "load_barrier%s_stub_%s", weak ? "_weak" : "", raddr->name());
+
+  __ align(CodeEntryAlignment);
+  StubCodeMark mark(cgen, "StubRoutines", os::strdup(name, mtCode));
+  address start = __ pc();
+
+  // Save live registers
+  if (raddr != rax) {
+    __ push(rax);
+  }
+  if (raddr != rcx) {
+    __ push(rcx);
+  }
+  if (raddr != rdx) {
+    __ push(rdx);
+  }
+  if (raddr != rsi) {
+    __ push(rsi);
+  }
+  if (raddr != rdi) {
+    __ push(rdi);
+  }
+  if (raddr != r8) {
+    __ push(r8);
+  }
+  if (raddr != r9) {
+    __ push(r9);
+  }
+  if (raddr != r10) {
+    __ push(r10);
+  }
+  if (raddr != r11) {
+    __ push(r11);
+  }
+
+  // Setup arguments
+  if (c_rarg1 != raddr) {
+    __ movq(c_rarg1, raddr);
+  }
+  __ movq(c_rarg0, Address(raddr, 0));
+
+  // Call barrier function
+  __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), c_rarg0, c_rarg1);
+
+  // Move result returned in rax to raddr, if needed
+  if (raddr != rax) {
+    __ movq(raddr, rax);
+  }
+
+  // Restore saved registers
+  if (raddr != r11) {
+    __ pop(r11);
+  }
+  if (raddr != r10) {
+    __ pop(r10);
+  }
+  if (raddr != r9) {
+    __ pop(r9);
+  }
+  if (raddr != r8) {
+    __ pop(r8);
+  }
+  if (raddr != rdi) {
+    __ pop(rdi);
+  }
+  if (raddr != rsi) {
+    __ pop(rsi);
+  }
+  if (raddr != rdx) {
+    __ pop(rdx);
+  }
+  if (raddr != rcx) {
+    __ pop(rcx);
+  }
+  if (raddr != rax) {
+    __ pop(rax);
+  }
+
+  __ ret(0);
+
+  return start;
+}
+
+#undef __
+
+void ZBarrierSetAssembler::barrier_stubs_init() {
+  // Load barrier stubs
+  int stub_code_size = 256 * 16; // Rough estimate of code size
+
+  ResourceMark rm;
+  BufferBlob* bb = BufferBlob::create("zgc_load_barrier_stubs", stub_code_size);
+  CodeBuffer buf(bb);
+  StubCodeGenerator cgen(&buf);
+
+  Register rr = as_Register(0);
+  for (int i = 0; i < RegisterImpl::number_of_registers; i++) {
+    _load_barrier_slow_stub[i] = generate_load_barrier_stub(&cgen, rr, ON_STRONG_OOP_REF);
+    _load_barrier_weak_slow_stub[i] = generate_load_barrier_stub(&cgen, rr, ON_WEAK_OOP_REF);
+    rr = rr->successor();
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef CPU_X86_GC_Z_ZBARRIERSETASSEMBLER_X86_HPP
+#define CPU_X86_GC_Z_ZBARRIERSETASSEMBLER_X86_HPP
+
+#ifdef COMPILER1
+class LIR_Assembler;
+class LIR_OprDesc;
+typedef LIR_OprDesc* LIR_Opr;
+class StubAssembler;
+class ZLoadBarrierStubC1;
+#endif // COMPILER1
+
+class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase {
+  address _load_barrier_slow_stub[RegisterImpl::number_of_registers];
+  address _load_barrier_weak_slow_stub[RegisterImpl::number_of_registers];
+
+public:
+  ZBarrierSetAssembler() :
+    _load_barrier_slow_stub(),
+    _load_barrier_weak_slow_stub() {}
+
+  address load_barrier_slow_stub(Register reg) { return _load_barrier_slow_stub[reg->encoding()]; }
+  address load_barrier_weak_slow_stub(Register reg) { return _load_barrier_weak_slow_stub[reg->encoding()]; }
+
+  virtual void load_at(MacroAssembler* masm,
+                       DecoratorSet decorators,
+                       BasicType type,
+                       Register dst,
+                       Address src,
+                       Register tmp1,
+                       Register tmp_thread);
+
+#ifdef ASSERT
+  virtual void store_at(MacroAssembler* masm,
+                        DecoratorSet decorators,
+                        BasicType type,
+                        Address dst,
+                        Register src,
+                        Register tmp1,
+                        Register tmp2);
+#endif // ASSERT
+
+  virtual void arraycopy_prologue(MacroAssembler* masm,
+                                  DecoratorSet decorators,
+                                  BasicType type,
+                                  Register src,
+                                  Register dst,
+                                  Register count);
+
+  virtual void try_resolve_jobject_in_native(MacroAssembler* masm,
+                                             Register jni_env,
+                                             Register obj,
+                                             Register tmp,
+                                             Label& slowpath);
+
+#ifdef COMPILER1
+  void generate_c1_load_barrier_test(LIR_Assembler* ce,
+                                     LIR_Opr ref) const;
+
+  void generate_c1_load_barrier_stub(LIR_Assembler* ce,
+                                     ZLoadBarrierStubC1* stub) const;
+
+  void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
+                                             DecoratorSet decorators) const;
+#endif // COMPILER1
+
+  virtual void barrier_stubs_init();
+};
+
+#endif // CPU_X86_GC_Z_ZBARRIERSETASSEMBLER_X86_HPP
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -44,6 +44,9 @@
 #ifdef COMPILER2
 #include "opto/runtime.hpp"
 #endif
+#if INCLUDE_ZGC
+#include "gc/z/zThreadLocalData.hpp"
+#endif
 
 // Declaration and definition of StubGenerator (no .hpp file).
 // For a more detailed description of the stub routine structure
@@ -1026,6 +1029,15 @@
     // make sure object is 'reasonable'
     __ testptr(rax, rax);
     __ jcc(Assembler::zero, exit); // if obj is NULL it is OK
+
+#if INCLUDE_ZGC
+    if (UseZGC) {
+      // Check if metadata bits indicate a bad oop
+      __ testptr(rax, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset()));
+      __ jcc(Assembler::notZero, error);
+    }
+#endif
+
     // Check if the oop is in the right area of memory
     __ movptr(c_rarg2, rax);
     __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask());
--- a/src/hotspot/cpu/x86/x86.ad	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/cpu/x86/x86.ad	Tue Jun 12 15:14:22 2018 -0700
@@ -1067,6 +1067,138 @@
 #endif
                       );
 
+reg_class xmm0_reg(XMM0, XMM0b, XMM0c, XMM0d);
+reg_class ymm0_reg(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h);
+reg_class zmm0_reg(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h, XMM0i, XMM0j, XMM0k, XMM0l, XMM0m, XMM0n, XMM0o, XMM0p);
+
+reg_class xmm1_reg(XMM1, XMM1b, XMM1c, XMM1d);
+reg_class ymm1_reg(XMM1, XMM1b, XMM1c, XMM1d, XMM1e, XMM1f, XMM1g, XMM1h);
+reg_class zmm1_reg(XMM1, XMM1b, XMM1c, XMM1d, XMM1e, XMM1f, XMM1g, XMM1h, XMM1i, XMM1j, XMM1k, XMM1l, XMM1m, XMM1n, XMM1o, XMM1p);
+
+reg_class xmm2_reg(XMM2, XMM2b, XMM2c, XMM2d);
+reg_class ymm2_reg(XMM2, XMM2b, XMM2c, XMM2d, XMM2e, XMM2f, XMM2g, XMM2h);
+reg_class zmm2_reg(XMM2, XMM2b, XMM2c, XMM2d, XMM2e, XMM2f, XMM2g, XMM2h, XMM2i, XMM2j, XMM2k, XMM2l, XMM2m, XMM2n, XMM2o, XMM2p);
+
+reg_class xmm3_reg(XMM3, XMM3b, XMM3c, XMM3d);
+reg_class ymm3_reg(XMM3, XMM3b, XMM3c, XMM3d, XMM3e, XMM3f, XMM3g, XMM3h);
+reg_class zmm3_reg(XMM3, XMM3b, XMM3c, XMM3d, XMM3e, XMM3f, XMM3g, XMM3h, XMM3i, XMM3j, XMM3k, XMM3l, XMM3m, XMM3n, XMM3o, XMM3p);
+
+reg_class xmm4_reg(XMM4, XMM4b, XMM4c, XMM4d);
+reg_class ymm4_reg(XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h);
+reg_class zmm4_reg(XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h, XMM4i, XMM4j, XMM4k, XMM4l, XMM4m, XMM4n, XMM4o, XMM4p);
+
+reg_class xmm5_reg(XMM5, XMM5b, XMM5c, XMM5d);
+reg_class ymm5_reg(XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h);
+reg_class zmm5_reg(XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h, XMM5i, XMM5j, XMM5k, XMM5l, XMM5m, XMM5n, XMM5o, XMM5p);
+
+reg_class xmm6_reg(XMM6, XMM6b, XMM6c, XMM6d);
+reg_class ymm6_reg(XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h);
+reg_class zmm6_reg(XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h, XMM6i, XMM6j, XMM6k, XMM6l, XMM6m, XMM6n, XMM6o, XMM6p);
+
+reg_class xmm7_reg(XMM7, XMM7b, XMM7c, XMM7d);
+reg_class ymm7_reg(XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h);
+reg_class zmm7_reg(XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h, XMM7i, XMM7j, XMM7k, XMM7l, XMM7m, XMM7n, XMM7o, XMM7p);
+
+#ifdef _LP64
+
+reg_class xmm8_reg(XMM8, XMM8b, XMM8c, XMM8d);
+reg_class ymm8_reg(XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h);
+reg_class zmm8_reg(XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h, XMM8i, XMM8j, XMM8k, XMM8l, XMM8m, XMM8n, XMM8o, XMM8p);
+
+reg_class xmm9_reg(XMM9, XMM9b, XMM9c, XMM9d);
+reg_class ymm9_reg(XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h);
+reg_class zmm9_reg(XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h, XMM9i, XMM9j, XMM9k, XMM9l, XMM9m, XMM9n, XMM9o, XMM9p);
+
+reg_class xmm10_reg(XMM10, XMM10b, XMM10c, XMM10d);
+reg_class ymm10_reg(XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h);
+reg_class zmm10_reg(XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h, XMM10i, XMM10j, XMM10k, XMM10l, XMM10m, XMM10n, XMM10o, XMM10p);
+
+reg_class xmm11_reg(XMM11, XMM11b, XMM11c, XMM11d);
+reg_class ymm11_reg(XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h);
+reg_class zmm11_reg(XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h, XMM11i, XMM11j, XMM11k, XMM11l, XMM11m, XMM11n, XMM11o, XMM11p);
+
+reg_class xmm12_reg(XMM12, XMM12b, XMM12c, XMM12d);
+reg_class ymm12_reg(XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h);
+reg_class zmm12_reg(XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h, XMM12i, XMM12j, XMM12k, XMM12l, XMM12m, XMM12n, XMM12o, XMM12p);
+
+reg_class xmm13_reg(XMM13, XMM13b, XMM13c, XMM13d);
+reg_class ymm13_reg(XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h);
+reg_class zmm13_reg(XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h, XMM13i, XMM13j, XMM13k, XMM13l, XMM13m, XMM13n, XMM13o, XMM13p);
+
+reg_class xmm14_reg(XMM14, XMM14b, XMM14c, XMM14d);
+reg_class ymm14_reg(XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h);
+reg_class zmm14_reg(XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h, XMM14i, XMM14j, XMM14k, XMM14l, XMM14m, XMM14n, XMM14o, XMM14p);
+
+reg_class xmm15_reg(XMM15, XMM15b, XMM15c, XMM15d);
+reg_class ymm15_reg(XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h);
+reg_class zmm15_reg(XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h, XMM15i, XMM15j, XMM15k, XMM15l, XMM15m, XMM15n, XMM15o, XMM15p);
+
+reg_class xmm16_reg(XMM16, XMM16b, XMM16c, XMM16d);
+reg_class ymm16_reg(XMM16, XMM16b, XMM16c, XMM16d, XMM16e, XMM16f, XMM16g, XMM16h);
+reg_class zmm16_reg(XMM16, XMM16b, XMM16c, XMM16d, XMM16e, XMM16f, XMM16g, XMM16h, XMM16i, XMM16j, XMM16k, XMM16l, XMM16m, XMM16n, XMM16o, XMM16p);
+
+reg_class xmm17_reg(XMM17, XMM17b, XMM17c, XMM17d);
+reg_class ymm17_reg(XMM17, XMM17b, XMM17c, XMM17d, XMM17e, XMM17f, XMM17g, XMM17h);
+reg_class zmm17_reg(XMM17, XMM17b, XMM17c, XMM17d, XMM17e, XMM17f, XMM17g, XMM17h, XMM17i, XMM17j, XMM17k, XMM17l, XMM17m, XMM17n, XMM17o, XMM17p);
+
+reg_class xmm18_reg(XMM18, XMM18b, XMM18c, XMM18d);
+reg_class ymm18_reg(XMM18, XMM18b, XMM18c, XMM18d, XMM18e, XMM18f, XMM18g, XMM18h);
+reg_class zmm18_reg(XMM18, XMM18b, XMM18c, XMM18d, XMM18e, XMM18f, XMM18g, XMM18h, XMM18i, XMM18j, XMM18k, XMM18l, XMM18m, XMM18n, XMM18o, XMM18p);
+
+reg_class xmm19_reg(XMM19, XMM19b, XMM19c, XMM19d);
+reg_class ymm19_reg(XMM19, XMM19b, XMM19c, XMM19d, XMM19e, XMM19f, XMM19g, XMM19h);
+reg_class zmm19_reg(XMM19, XMM19b, XMM19c, XMM19d, XMM19e, XMM19f, XMM19g, XMM19h, XMM19i, XMM19j, XMM19k, XMM19l, XMM19m, XMM19n, XMM19o, XMM19p);
+
+reg_class xmm20_reg(XMM20, XMM20b, XMM20c, XMM20d);
+reg_class ymm20_reg(XMM20, XMM20b, XMM20c, XMM20d, XMM20e, XMM20f, XMM20g, XMM20h);
+reg_class zmm20_reg(XMM20, XMM20b, XMM20c, XMM20d, XMM20e, XMM20f, XMM20g, XMM20h, XMM20i, XMM20j, XMM20k, XMM20l, XMM20m, XMM20n, XMM20o, XMM20p);
+
+reg_class xmm21_reg(XMM21, XMM21b, XMM21c, XMM21d);
+reg_class ymm21_reg(XMM21, XMM21b, XMM21c, XMM21d, XMM21e, XMM21f, XMM21g, XMM21h);
+reg_class zmm21_reg(XMM21, XMM21b, XMM21c, XMM21d, XMM21e, XMM21f, XMM21g, XMM21h, XMM21i, XMM21j, XMM21k, XMM21l, XMM21m, XMM21n, XMM21o, XMM21p);
+
+reg_class xmm22_reg(XMM22, XMM22b, XMM22c, XMM22d);
+reg_class ymm22_reg(XMM22, XMM22b, XMM22c, XMM22d, XMM22e, XMM22f, XMM22g, XMM22h);
+reg_class zmm22_reg(XMM22, XMM22b, XMM22c, XMM22d, XMM22e, XMM22f, XMM22g, XMM22h, XMM22i, XMM22j, XMM22k, XMM22l, XMM22m, XMM22n, XMM22o, XMM22p);
+
+reg_class xmm23_reg(XMM23, XMM23b, XMM23c, XMM23d);
+reg_class ymm23_reg(XMM23, XMM23b, XMM23c, XMM23d, XMM23e, XMM23f, XMM23g, XMM23h);
+reg_class zmm23_reg(XMM23, XMM23b, XMM23c, XMM23d, XMM23e, XMM23f, XMM23g, XMM23h, XMM23i, XMM23j, XMM23k, XMM23l, XMM23m, XMM23n, XMM23o, XMM23p);
+
+reg_class xmm24_reg(XMM24, XMM24b, XMM24c, XMM24d);
+reg_class ymm24_reg(XMM24, XMM24b, XMM24c, XMM24d, XMM24e, XMM24f, XMM24g, XMM24h);
+reg_class zmm24_reg(XMM24, XMM24b, XMM24c, XMM24d, XMM24e, XMM24f, XMM24g, XMM24h, XMM24i, XMM24j, XMM24k, XMM24l, XMM24m, XMM24n, XMM24o, XMM24p);
+
+reg_class xmm25_reg(XMM25, XMM25b, XMM25c, XMM25d);
+reg_class ymm25_reg(XMM25, XMM25b, XMM25c, XMM25d, XMM25e, XMM25f, XMM25g, XMM25h);
+reg_class zmm25_reg(XMM25, XMM25b, XMM25c, XMM25d, XMM25e, XMM25f, XMM25g, XMM25h, XMM25i, XMM25j, XMM25k, XMM25l, XMM25m, XMM25n, XMM25o, XMM25p);
+
+reg_class xmm26_reg(XMM26, XMM26b, XMM26c, XMM26d);
+reg_class ymm26_reg(XMM26, XMM26b, XMM26c, XMM26d, XMM26e, XMM26f, XMM26g, XMM26h);
+reg_class zmm26_reg(XMM26, XMM26b, XMM26c, XMM26d, XMM26e, XMM26f, XMM26g, XMM26h, XMM26i, XMM26j, XMM26k, XMM26l, XMM26m, XMM26n, XMM26o, XMM26p);
+
+reg_class xmm27_reg(XMM27, XMM27b, XMM27c, XMM27d);
+reg_class ymm27_reg(XMM27, XMM27b, XMM27c, XMM27d, XMM27e, XMM27f, XMM27g, XMM27h);
+reg_class zmm27_reg(XMM27, XMM27b, XMM27c, XMM27d, XMM27e, XMM27f, XMM27g, XMM27h, XMM27i, XMM27j, XMM27k, XMM27l, XMM27m, XMM27n, XMM27o, XMM27p);
+
+reg_class xmm28_reg(XMM28, XMM28b, XMM28c, XMM28d);
+reg_class ymm28_reg(XMM28, XMM28b, XMM28c, XMM28d, XMM28e, XMM28f, XMM28g, XMM28h);
+reg_class zmm28_reg(XMM28, XMM28b, XMM28c, XMM28d, XMM28e, XMM28f, XMM28g, XMM28h, XMM28i, XMM28j, XMM28k, XMM28l, XMM28m, XMM28n, XMM28o, XMM28p);
+
+reg_class xmm29_reg(XMM29, XMM29b, XMM29c, XMM29d);
+reg_class ymm29_reg(XMM29, XMM29b, XMM29c, XMM29d, XMM29e, XMM29f, XMM29g, XMM29h);
+reg_class zmm29_reg(XMM29, XMM29b, XMM29c, XMM29d, XMM29e, XMM29f, XMM29g, XMM29h, XMM29i, XMM29j, XMM29k, XMM29l, XMM29m, XMM29n, XMM29o, XMM29p);
+
+reg_class xmm30_reg(XMM30, XMM30b, XMM30c, XMM30d);
+reg_class ymm30_reg(XMM30, XMM30b, XMM30c, XMM30d, XMM30e, XMM30f, XMM30g, XMM30h);
+reg_class zmm30_reg(XMM30, XMM30b, XMM30c, XMM30d, XMM30e, XMM30f, XMM30g, XMM30h, XMM30i, XMM30j, XMM30k, XMM30l, XMM30m, XMM30n, XMM30o, XMM30p);
+
+reg_class xmm31_reg(XMM31, XMM31b, XMM31c, XMM31d);
+reg_class ymm31_reg(XMM31, XMM31b, XMM31c, XMM31d, XMM31e, XMM31f, XMM31g, XMM31h);
+reg_class zmm31_reg(XMM31, XMM31b, XMM31c, XMM31d, XMM31e, XMM31f, XMM31g, XMM31h, XMM31i, XMM31j, XMM31k, XMM31l, XMM31m, XMM31n, XMM31o, XMM31p);
+
+#endif
+
 %}
 
 
--- a/src/hotspot/cpu/x86/x86_64.ad	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/cpu/x86/x86_64.ad	Tue Jun 12 15:14:22 2018 -0700
@@ -538,6 +538,12 @@
 
 %}
 
+source_hpp %{
+#if INCLUDE_ZGC
+#include "gc/z/zBarrierSetAssembler.hpp"
+#endif
+%}
+
 //----------SOURCE BLOCK-------------------------------------------------------
 // This is a block of C++ code which provides values, functions, and
 // definitions necessary in the rest of the architecture description
@@ -4221,6 +4227,135 @@
   %}
 %}
 
+// Operands for bound floating pointer register arguments
+operand rxmm0() %{
+  constraint(ALLOC_IN_RC(xmm0_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX<= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm1() %{
+  constraint(ALLOC_IN_RC(xmm1_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm2() %{
+  constraint(ALLOC_IN_RC(xmm2_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm3() %{
+  constraint(ALLOC_IN_RC(xmm3_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm4() %{
+  constraint(ALLOC_IN_RC(xmm4_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm5() %{
+  constraint(ALLOC_IN_RC(xmm5_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm6() %{
+  constraint(ALLOC_IN_RC(xmm6_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm7() %{
+  constraint(ALLOC_IN_RC(xmm7_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm8() %{
+  constraint(ALLOC_IN_RC(xmm8_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm9() %{
+  constraint(ALLOC_IN_RC(xmm9_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm10() %{
+  constraint(ALLOC_IN_RC(xmm10_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm11() %{
+  constraint(ALLOC_IN_RC(xmm11_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm12() %{
+  constraint(ALLOC_IN_RC(xmm12_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm13() %{
+  constraint(ALLOC_IN_RC(xmm13_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm14() %{
+  constraint(ALLOC_IN_RC(xmm14_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm15() %{
+  constraint(ALLOC_IN_RC(xmm15_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm16() %{
+  constraint(ALLOC_IN_RC(xmm16_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm17() %{
+  constraint(ALLOC_IN_RC(xmm17_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm18() %{
+  constraint(ALLOC_IN_RC(xmm18_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm19() %{
+  constraint(ALLOC_IN_RC(xmm19_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm20() %{
+  constraint(ALLOC_IN_RC(xmm20_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm21() %{
+  constraint(ALLOC_IN_RC(xmm21_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm22() %{
+  constraint(ALLOC_IN_RC(xmm22_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm23() %{
+  constraint(ALLOC_IN_RC(xmm23_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm24() %{
+  constraint(ALLOC_IN_RC(xmm24_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm25() %{
+  constraint(ALLOC_IN_RC(xmm25_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm26() %{
+  constraint(ALLOC_IN_RC(xmm26_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm27() %{
+  constraint(ALLOC_IN_RC(xmm27_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm28() %{
+  constraint(ALLOC_IN_RC(xmm28_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm29() %{
+  constraint(ALLOC_IN_RC(xmm29_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm30() %{
+  constraint(ALLOC_IN_RC(xmm30_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm31() %{
+  constraint(ALLOC_IN_RC(xmm31_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
 
 //----------OPERAND CLASSES----------------------------------------------------
 // Operand Classes are groups of operands that are used as to simplify
@@ -11547,6 +11682,16 @@
   ins_pipe(ialu_cr_reg_mem);
 %}
 
+instruct testL_reg_mem2(rFlagsReg cr, rRegP src, memory mem, immL0 zero)
+%{
+  match(Set cr (CmpL (AndL (CastP2X src) (LoadL mem)) zero));
+
+  format %{ "testq   $src, $mem" %}
+  opcode(0x85);
+  ins_encode(REX_reg_mem_wide(src, mem), OpcP, reg_mem(src, mem));
+  ins_pipe(ialu_cr_reg_mem);
+%}
+
 // Manifest a CmpL result in an integer register.  Very painful.
 // This is the test to avoid.
 instruct cmpL3_reg_reg(rRegI dst, rRegL src1, rRegL src2, rFlagsReg flags)
@@ -12320,6 +12465,223 @@
   ins_pipe(pipe_jmp);
 %}
 
+//
+// Execute ZGC load barrier (strong) slow path
+//
+
+// When running without XMM regs
+instruct loadBarrierSlowRegNoVec(rRegP dst, memory mem, rFlagsReg cr) %{
+
+  match(Set dst (LoadBarrierSlowReg mem));
+  predicate(MaxVectorSize < 16);
+
+  effect(DEF dst, KILL cr);
+
+  format %{"LoadBarrierSlowRegNoVec $dst, $mem" %}
+  ins_encode %{
+#if INCLUDE_ZGC
+    Register d = $dst$$Register;
+    ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+
+    assert(d != r12, "Can't be R12!");
+    assert(d != r15, "Can't be R15!");
+    assert(d != rsp, "Can't be RSP!");
+
+    __ lea(d, $mem$$Address);
+    __ call(RuntimeAddress(bs->load_barrier_slow_stub(d)));
+#else
+    ShouldNotReachHere();
+#endif
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+// For XMM and YMM enabled processors
+instruct loadBarrierSlowRegXmmAndYmm(rRegP dst, memory mem, rFlagsReg cr,
+                                     rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
+                                     rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
+                                     rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
+                                     rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
+
+  match(Set dst (LoadBarrierSlowReg mem));
+  predicate((UseSSE > 0) && (UseAVX <= 2) && (MaxVectorSize >= 16));
+
+  effect(DEF dst, KILL cr,
+         KILL x0, KILL x1, KILL x2, KILL x3,
+         KILL x4, KILL x5, KILL x6, KILL x7,
+         KILL x8, KILL x9, KILL x10, KILL x11,
+         KILL x12, KILL x13, KILL x14, KILL x15);
+
+  format %{"LoadBarrierSlowRegXmm $dst, $mem" %}
+  ins_encode %{
+#if INCLUDE_ZGC
+    Register d = $dst$$Register;
+    ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+
+    assert(d != r12, "Can't be R12!");
+    assert(d != r15, "Can't be R15!");
+    assert(d != rsp, "Can't be RSP!");
+
+    __ lea(d, $mem$$Address);
+    __ call(RuntimeAddress(bs->load_barrier_slow_stub(d)));
+#else
+    ShouldNotReachHere();
+#endif
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+// For ZMM enabled processors
+instruct loadBarrierSlowRegZmm(rRegP dst, memory mem, rFlagsReg cr,
+                               rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
+                               rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
+                               rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
+                               rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
+                               rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19,
+                               rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
+                               rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
+                               rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
+
+  match(Set dst (LoadBarrierSlowReg mem));
+  predicate((UseAVX == 3) && (MaxVectorSize >= 16));
+
+  effect(DEF dst, KILL cr,
+         KILL x0, KILL x1, KILL x2, KILL x3,
+         KILL x4, KILL x5, KILL x6, KILL x7,
+         KILL x8, KILL x9, KILL x10, KILL x11,
+         KILL x12, KILL x13, KILL x14, KILL x15,
+         KILL x16, KILL x17, KILL x18, KILL x19,
+         KILL x20, KILL x21, KILL x22, KILL x23,
+         KILL x24, KILL x25, KILL x26, KILL x27,
+         KILL x28, KILL x29, KILL x30, KILL x31);
+
+  format %{"LoadBarrierSlowRegZmm $dst, $mem" %}
+  ins_encode %{
+#if INCLUDE_ZGC
+    Register d = $dst$$Register;
+    ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+
+    assert(d != r12, "Can't be R12!");
+    assert(d != r15, "Can't be R15!");
+    assert(d != rsp, "Can't be RSP!");
+
+    __ lea(d, $mem$$Address);
+    __ call(RuntimeAddress(bs->load_barrier_slow_stub(d)));
+#else
+    ShouldNotReachHere();
+#endif
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+//
+// Execute ZGC load barrier (weak) slow path
+//
+
+// When running without XMM regs
+instruct loadBarrierWeakSlowRegNoVec(rRegP dst, memory mem, rFlagsReg cr) %{
+
+  match(Set dst (LoadBarrierSlowReg mem));
+  predicate(MaxVectorSize < 16);
+
+  effect(DEF dst, KILL cr);
+
+  format %{"LoadBarrierSlowRegNoVec $dst, $mem" %}
+  ins_encode %{
+#if INCLUDE_ZGC
+    Register d = $dst$$Register;
+    ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+
+    assert(d != r12, "Can't be R12!");
+    assert(d != r15, "Can't be R15!");
+    assert(d != rsp, "Can't be RSP!");
+
+    __ lea(d, $mem$$Address);
+    __ call(RuntimeAddress(bs->load_barrier_weak_slow_stub(d)));
+#else
+    ShouldNotReachHere();
+#endif
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+// For XMM and YMM enabled processors
+instruct loadBarrierWeakSlowRegXmmAndYmm(rRegP dst, memory mem, rFlagsReg cr,
+                                         rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
+                                         rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
+                                         rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
+                                         rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
+
+  match(Set dst (LoadBarrierWeakSlowReg mem));
+  predicate((UseSSE > 0) && (UseAVX <= 2) && (MaxVectorSize >= 16));
+
+  effect(DEF dst, KILL cr,
+         KILL x0, KILL x1, KILL x2, KILL x3,
+         KILL x4, KILL x5, KILL x6, KILL x7,
+         KILL x8, KILL x9, KILL x10, KILL x11,
+         KILL x12, KILL x13, KILL x14, KILL x15);
+
+  format %{"LoadBarrierWeakSlowRegXmm $dst, $mem" %}
+  ins_encode %{
+#if INCLUDE_ZGC
+    Register d = $dst$$Register;
+    ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+
+    assert(d != r12, "Can't be R12!");
+    assert(d != r15, "Can't be R15!");
+    assert(d != rsp, "Can't be RSP!");
+
+    __ lea(d,$mem$$Address);
+    __ call(RuntimeAddress(bs->load_barrier_weak_slow_stub(d)));
+#else
+    ShouldNotReachHere();
+#endif
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+// For ZMM enabled processors
+instruct loadBarrierWeakSlowRegZmm(rRegP dst, memory mem, rFlagsReg cr,
+                                   rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
+                                   rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
+                                   rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
+                                   rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
+                                   rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19,
+                                   rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
+                                   rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
+                                   rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
+
+  match(Set dst (LoadBarrierWeakSlowReg mem));
+  predicate((UseAVX == 3) && (MaxVectorSize >= 16));
+
+  effect(DEF dst, KILL cr,
+         KILL x0, KILL x1, KILL x2, KILL x3,
+         KILL x4, KILL x5, KILL x6, KILL x7,
+         KILL x8, KILL x9, KILL x10, KILL x11,
+         KILL x12, KILL x13, KILL x14, KILL x15,
+         KILL x16, KILL x17, KILL x18, KILL x19,
+         KILL x20, KILL x21, KILL x22, KILL x23,
+         KILL x24, KILL x25, KILL x26, KILL x27,
+         KILL x28, KILL x29, KILL x30, KILL x31);
+
+  format %{"LoadBarrierWeakSlowRegZmm $dst, $mem" %}
+  ins_encode %{
+#if INCLUDE_ZGC
+    Register d = $dst$$Register;
+    ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+
+    assert(d != r12, "Can't be R12!");
+    assert(d != r15, "Can't be R15!");
+    assert(d != rsp, "Can't be RSP!");
+
+    __ lea(d,$mem$$Address);
+    __ call(RuntimeAddress(bs->load_barrier_weak_slow_stub(d)));
+#else
+    ShouldNotReachHere();
+#endif
+  %}
+  ins_pipe(pipe_slow);
+%}
 
 // ============================================================================
 // This name is KNOWN by the ADLC and cannot be changed.
--- a/src/hotspot/os/aix/os_aix.cpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/os/aix/os_aix.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -899,8 +899,12 @@
   // guard pages might not fit on the tiny stack created.
   int ret = pthread_attr_setstacksize(&attr, stack_size);
   if (ret != 0) {
-    log_warning(os, thread)("The thread stack size specified is invalid: " SIZE_FORMAT "k",
+    log_warning(os, thread)("The %sthread stack size specified is invalid: " SIZE_FORMAT "k",
+                            (thr_type == compiler_thread) ? "compiler " : ((thr_type == java_thread) ? "" : "VM "),
                             stack_size / K);
+    thread->set_osthread(NULL);
+    delete osthread;
+    return false;
   }
 
   // Save some cycles and a page by disabling OS guard pages where we have our own
--- a/src/hotspot/os/linux/os_linux.cpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/os/linux/os_linux.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -1988,6 +1988,8 @@
 
   os::Linux::print_full_memory_info(st);
 
+  os::Linux::print_proc_sys_info(st);
+
   os::Linux::print_container_info(st);
 }
 
@@ -2120,6 +2122,24 @@
   st->cr();
 }
 
+void os::Linux::print_proc_sys_info(outputStream* st) {
+  st->cr();
+  st->print_cr("/proc/sys/kernel/threads-max (system-wide limit on the number of threads):");
+  _print_ascii_file("/proc/sys/kernel/threads-max", st);
+  st->cr();
+  st->cr();
+
+  st->print_cr("/proc/sys/vm/max_map_count (maximum number of memory map areas a process may have):");
+  _print_ascii_file("/proc/sys/vm/max_map_count", st);
+  st->cr();
+  st->cr();
+
+  st->print_cr("/proc/sys/kernel/pid_max (system-wide limit on number of process identifiers):");
+  _print_ascii_file("/proc/sys/kernel/pid_max", st);
+  st->cr();
+  st->cr();
+}
+
 void os::Linux::print_full_memory_info(outputStream* st) {
   st->print("\n/proc/meminfo:\n");
   _print_ascii_file("/proc/meminfo", st);
@@ -3106,7 +3126,10 @@
 bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) {
   int mincore_return_value;
   const size_t stripe = 1024;  // query this many pages each time
-  unsigned char vec[stripe];
+  unsigned char vec[stripe + 1];
+  // set a guard
+  vec[stripe] = 'X';
+
   const size_t page_sz = os::vm_page_size();
   size_t pages = size / page_sz;
 
@@ -3118,7 +3141,9 @@
   int loops = (pages + stripe - 1) / stripe;
   int committed_pages = 0;
   address loop_base = start;
-  for (int index = 0; index < loops; index ++) {
+  bool found_range = false;
+
+  for (int index = 0; index < loops && !found_range; index ++) {
     assert(pages > 0, "Nothing to do");
     int pages_to_query = (pages >= stripe) ? stripe : pages;
     pages -= pages_to_query;
@@ -3133,12 +3158,14 @@
       return false;
     }
 
+    assert(vec[stripe] == 'X', "overflow guard");
     assert(mincore_return_value == 0, "Range must be valid");
     // Process this stripe
     for (int vecIdx = 0; vecIdx < pages_to_query; vecIdx ++) {
       if ((vec[vecIdx] & 0x01) == 0) { // not committed
         // End of current contiguous region
         if (committed_start != NULL) {
+          found_range = true;
           break;
         }
       } else { // committed
--- a/src/hotspot/os/linux/os_linux.hpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/os/linux/os_linux.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -113,6 +113,7 @@
   static void print_container_info(outputStream* st);
   static void print_distro_info(outputStream* st);
   static void print_libversion_info(outputStream* st);
+  static void print_proc_sys_info(outputStream* st);
 
  public:
   static bool _stack_is_executable;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zAddress_linux_x86.inline.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef OS_CPU_LINUX_X86_ZADDRESS_LINUX_X86_INLINE_HPP
+#define OS_CPU_LINUX_X86_ZADDRESS_LINUX_X86_INLINE_HPP
+
+inline uintptr_t ZAddress::address(uintptr_t value) {
+  return value;
+}
+
+#endif // OS_CPU_LINUX_X86_ZADDRESS_LINUX_X86_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zBackingFile_linux_x86.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,360 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zArray.inline.hpp"
+#include "gc/z/zBackingFile_linux_x86.hpp"
+#include "gc/z/zBackingPath_linux_x86.hpp"
+#include "gc/z/zErrno.hpp"
+#include "gc/z/zLargePages.inline.hpp"
+#include "logging/log.hpp"
+#include "runtime/init.hpp"
+#include "runtime/os.hpp"
+#include "utilities/align.hpp"
+#include "utilities/debug.hpp"
+
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/statfs.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+// Filesystem names
+#define ZFILESYSTEM_TMPFS                "tmpfs"
+#define ZFILESYSTEM_HUGETLBFS            "hugetlbfs"
+
+// Sysfs file for transparent huge page on tmpfs
+#define ZFILENAME_SHMEM_ENABLED          "/sys/kernel/mm/transparent_hugepage/shmem_enabled"
+
+// Default mount points
+#define ZMOUNTPOINT_TMPFS                "/dev/shm"
+#define ZMOUNTPOINT_HUGETLBFS            "/hugepages"
+
+// Java heap filename
+#define ZFILENAME_HEAP                   "java_heap"
+
+// Support for building on older Linux systems
+#ifndef __NR_memfd_create
+#define __NR_memfd_create                319
+#endif
+#ifndef MFD_CLOEXEC
+#define MFD_CLOEXEC                      0x0001U
+#endif
+#ifndef MFD_HUGETLB
+#define MFD_HUGETLB                      0x0004U
+#endif
+#ifndef O_CLOEXEC
+#define O_CLOEXEC                        02000000
+#endif
+#ifndef O_TMPFILE
+#define O_TMPFILE                        (020000000 | O_DIRECTORY)
+#endif
+
+// Filesystem types, see statfs(2)
+#ifndef TMPFS_MAGIC
+#define TMPFS_MAGIC                      0x01021994
+#endif
+#ifndef HUGETLBFS_MAGIC
+#define HUGETLBFS_MAGIC                  0x958458f6
+#endif
+
+static int z_memfd_create(const char *name, unsigned int flags) {
+  return syscall(__NR_memfd_create, name, flags);
+}
+
+ZBackingFile::ZBackingFile() :
+    _fd(-1),
+    _filesystem(0),
+    _initialized(false) {
+
+  // Create backing file
+  _fd = create_fd(ZFILENAME_HEAP);
+  if (_fd == -1) {
+    return;
+  }
+
+  // Get filesystem type
+  struct statfs statfs_buf;
+  if (fstatfs(_fd, &statfs_buf) == -1) {
+    ZErrno err;
+    log_error(gc, init)("Failed to determine filesystem type for backing file (%s)", err.to_string());
+    return;
+  }
+  _filesystem = statfs_buf.f_type;
+
+  // Make sure we're on a supported filesystem
+  if (!is_tmpfs() && !is_hugetlbfs()) {
+    log_error(gc, init)("Backing file must be located on a %s or a %s filesystem", ZFILESYSTEM_TMPFS, ZFILESYSTEM_HUGETLBFS);
+    return;
+  }
+
+  // Make sure the filesystem type matches requested large page type
+  if (ZLargePages::is_transparent() && !is_tmpfs()) {
+    log_error(gc, init)("-XX:+UseTransparentHugePages can only be enable when using a %s filesystem", ZFILESYSTEM_TMPFS);
+    return;
+  }
+
+  if (ZLargePages::is_transparent() && !tmpfs_supports_transparent_huge_pages()) {
+    log_error(gc, init)("-XX:+UseTransparentHugePages on a %s filesystem not supported by kernel", ZFILESYSTEM_TMPFS);
+    return;
+  }
+
+  if (ZLargePages::is_explicit() && !is_hugetlbfs()) {
+    log_error(gc, init)("-XX:+UseLargePages (without -XX:+UseTransparentHugePages) can only be enabled when using a %s filesystem", ZFILESYSTEM_HUGETLBFS);
+    return;
+  }
+
+  if (!ZLargePages::is_explicit() && is_hugetlbfs()) {
+    log_error(gc, init)("-XX:+UseLargePages must be enabled when using a %s filesystem", ZFILESYSTEM_HUGETLBFS);
+    return;
+  }
+
+  // Successfully initialized
+  _initialized = true;
+}
+
+int ZBackingFile::create_mem_fd(const char* name) const {
+  // Create file name
+  char filename[PATH_MAX];
+  snprintf(filename, sizeof(filename), "%s%s", name, ZLargePages::is_explicit() ? ".hugetlb" : "");
+
+  // Create file
+  const int extra_flags = ZLargePages::is_explicit() ? MFD_HUGETLB : 0;
+  const int fd = z_memfd_create(filename, MFD_CLOEXEC | extra_flags);
+  if (fd == -1) {
+    ZErrno err;
+    log_debug(gc, init)("Failed to create memfd file (%s)",
+                        ((UseLargePages && err == EINVAL) ? "Hugepages not supported" : err.to_string()));
+    return -1;
+  }
+
+  log_debug(gc, init)("Heap backed by file /memfd:%s", filename);
+
+  return fd;
+}
+
+int ZBackingFile::create_file_fd(const char* name) const {
+  const char* const filesystem = ZLargePages::is_explicit() ? ZFILESYSTEM_HUGETLBFS : ZFILESYSTEM_TMPFS;
+  const char* const mountpoint = ZLargePages::is_explicit() ? ZMOUNTPOINT_HUGETLBFS : ZMOUNTPOINT_TMPFS;
+
+  // Find mountpoint
+  ZBackingPath path(filesystem, mountpoint);
+  if (path.get() == NULL) {
+    log_error(gc, init)("Use -XX:ZPath to specify the path to a %s filesystem", filesystem);
+    return -1;
+  }
+
+  // Try to create an anonymous file using the O_TMPFILE flag. Note that this
+  // flag requires kernel >= 3.11. If this fails we fall back to open/unlink.
+  const int fd_anon = open(path.get(), O_TMPFILE|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
+  if (fd_anon == -1) {
+    ZErrno err;
+    log_debug(gc, init)("Failed to create anonymouns file in %s (%s)", path.get(),
+                        (err == EINVAL ? "Not supported" : err.to_string()));
+  } else {
+    // Get inode number for anonymous file
+    struct stat stat_buf;
+    if (fstat(fd_anon, &stat_buf) == -1) {
+      ZErrno err;
+      log_error(gc, init)("Failed to determine inode number for anonymous file (%s)", err.to_string());
+      return -1;
+    }
+
+    log_debug(gc, init)("Heap backed by file %s/#" UINT64_FORMAT, path.get(), (uint64_t)stat_buf.st_ino);
+
+    return fd_anon;
+  }
+
+  log_debug(gc, init)("Falling back to open/unlink");
+
+  // Create file name
+  char filename[PATH_MAX];
+  snprintf(filename, sizeof(filename), "%s/%s.%d", path.get(), name, os::current_process_id());
+
+  // Create file
+  const int fd = open(filename, O_CREAT|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
+  if (fd == -1) {
+    ZErrno err;
+    log_error(gc, init)("Failed to create file %s (%s)", filename, err.to_string());
+    return -1;
+  }
+
+  // Unlink file
+  if (unlink(filename) == -1) {
+    ZErrno err;
+    log_error(gc, init)("Failed to unlink file %s (%s)", filename, err.to_string());
+    return -1;
+  }
+
+  log_debug(gc, init)("Heap backed by file %s", filename);
+
+  return fd;
+}
+
+int ZBackingFile::create_fd(const char* name) const {
+  if (ZPath == NULL) {
+    // If the path is not explicitly specified, then we first try to create a memfd file
+    // instead of looking for a tmpfd/hugetlbfs mount point. Note that memfd_create() might
+    // not be supported at all (requires kernel >= 3.17), or it might not support large
+    // pages (requires kernel >= 4.14). If memfd_create() fails, then we try to create a
+    // file on an accessible tmpfs or hugetlbfs mount point.
+    const int fd = create_mem_fd(name);
+    if (fd != -1) {
+      return fd;
+    }
+
+    log_debug(gc, init)("Falling back to searching for an accessible moint point");
+  }
+
+  return create_file_fd(name);
+}
+
+bool ZBackingFile::is_initialized() const {
+  return _initialized;
+}
+
+int ZBackingFile::fd() const {
+  return _fd;
+}
+
+bool ZBackingFile::is_tmpfs() const {
+  return _filesystem == TMPFS_MAGIC;
+}
+
+bool ZBackingFile::is_hugetlbfs() const {
+  return _filesystem == HUGETLBFS_MAGIC;
+}
+
+bool ZBackingFile::tmpfs_supports_transparent_huge_pages() const {
+  // If the shmem_enabled file exists and is readable then we
+  // know the kernel supports transparent huge pages for tmpfs.
+  return access(ZFILENAME_SHMEM_ENABLED, R_OK) == 0;
+}
+
+bool ZBackingFile::try_split_and_expand_tmpfs(size_t offset, size_t length, size_t alignment) const {
+  // Try first smaller part.
+  const size_t offset0 = offset;
+  const size_t length0 = align_up(length / 2, alignment);
+  if (!try_expand_tmpfs(offset0, length0, alignment)) {
+    return false;
+  }
+
+  // Try second smaller part.
+  const size_t offset1 = offset0 + length0;
+  const size_t length1 = length - length0;
+  if (!try_expand_tmpfs(offset1, length1, alignment)) {
+    return false;
+  }
+
+  return true;
+}
+
+bool ZBackingFile::try_expand_tmpfs(size_t offset, size_t length, size_t alignment) const {
+  assert(length > 0, "Invalid length");
+  assert(is_aligned(length, alignment), "Invalid length");
+
+  ZErrno err = posix_fallocate(_fd, offset, length);
+
+  if (err == EINTR && length > alignment) {
+    // Calling posix_fallocate() with a large length can take a long
+    // time to complete. When running profilers, such as VTune, this
+    // syscall will be constantly interrupted by signals. Expanding
+    // the file in smaller steps avoids this problem.
+    return try_split_and_expand_tmpfs(offset, length, alignment);
+  }
+
+  if (err) {
+    log_error(gc)("Failed to allocate backing file (%s)", err.to_string());
+    return false;
+  }
+
+  return true;
+}
+
+bool ZBackingFile::expand_tmpfs(size_t offset, size_t length) const {
+  assert(is_tmpfs(), "Wrong filesystem");
+  return try_expand_tmpfs(offset, length, os::vm_page_size());
+}
+
+bool ZBackingFile::expand_hugetlbfs(size_t offset, size_t length) const {
+  assert(is_hugetlbfs(), "Wrong filesystem");
+
+  // Prior to kernel 4.3, hugetlbfs did not support posix_fallocate().
+  // Instead of posix_fallocate() we can use a well-known workaround,
+  // which involves truncating the file to requested size and then try
+  // to map it to verify that there are enough huge pages available to
+  // back it.
+  while (ftruncate(_fd, offset + length) == -1) {
+    ZErrno err;
+    if (err != EINTR) {
+      log_error(gc)("Failed to truncate backing file (%s)", err.to_string());
+      return false;
+    }
+  }
+
+  // If we fail mapping during initialization, i.e. when we are pre-mapping
+  // the heap, then we wait and retry a few times before giving up. Otherwise
+  // there is a risk that running JVMs back-to-back will fail, since there
+  // is a delay between process termination and the huge pages owned by that
+  // process being returned to the huge page pool and made available for new
+  // allocations.
+  void* addr = MAP_FAILED;
+  const int max_attempts = 3;
+  for (int attempt = 1; attempt <= max_attempts; attempt++) {
+    addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset);
+    if (addr != MAP_FAILED || is_init_completed()) {
+      // Mapping was successful or initialization phase has completed
+      break;
+    }
+
+    ZErrno err;
+    log_debug(gc)("Failed to map backing file (%s), attempt %d of %d",
+                  err.to_string(), attempt, max_attempts);
+
+    // Wait and retry in one second, in the hope that
+    // huge pages will be available by then.
+    sleep(1);
+  }
+
+  if (addr == MAP_FAILED) {
+    // Not enough huge pages left
+    ZErrno err;
+    log_error(gc)("Failed to map backing file (%s)", err.to_string());
+    return false;
+  }
+
+  // Successful mapping, unmap again. From now on the pages we mapped
+  // will be reserved for this file.
+  if (munmap(addr, length) == -1) {
+    ZErrno err;
+    log_error(gc)("Failed to unmap backing file (%s)", err.to_string());
+    return false;
+  }
+
+  return true;
+}
+
+bool ZBackingFile::expand(size_t offset, size_t length) const {
+  return is_hugetlbfs() ? expand_hugetlbfs(offset, length) : expand_tmpfs(offset, length);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zBackingFile_linux_x86.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef OS_CPU_LINUX_X86_ZBACKINGFILE_LINUX_X86_HPP
+#define OS_CPU_LINUX_X86_ZBACKINGFILE_LINUX_X86_HPP
+
+#include "memory/allocation.hpp"
+
+class ZBackingFile {
+private:
+  int      _fd;
+  uint64_t _filesystem;
+  bool     _initialized;
+
+  int create_mem_fd(const char* name) const;
+  int create_file_fd(const char* name) const;
+  int create_fd(const char* name) const;
+
+  bool is_tmpfs() const;
+  bool is_hugetlbfs() const;
+  bool tmpfs_supports_transparent_huge_pages() const;
+
+  bool try_split_and_expand_tmpfs(size_t offset, size_t length, size_t alignment) const;
+  bool try_expand_tmpfs(size_t offset, size_t length, size_t alignment) const;
+  bool expand_tmpfs(size_t offset, size_t length) const;
+
+  bool expand_hugetlbfs(size_t offset, size_t length) const;
+
+public:
+  ZBackingFile();
+
+  bool is_initialized() const;
+
+  int fd() const;
+  bool expand(size_t offset, size_t length) const;
+};
+
+#endif // OS_CPU_LINUX_X86_ZBACKINGFILE_LINUX_X86_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zBackingPath_linux_x86.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zArray.inline.hpp"
+#include "gc/z/zBackingPath_linux_x86.hpp"
+#include "gc/z/zErrno.hpp"
+#include "logging/log.hpp"
+
+#include <stdio.h>
+#include <unistd.h>
+
+// Mount information, see proc(5) for more details.
+#define PROC_SELF_MOUNTINFO        "/proc/self/mountinfo"
+
+ZBackingPath::ZBackingPath(const char* filesystem, const char* preferred_path) {
+  if (ZPath != NULL) {
+    // Use specified path
+    _path = strdup(ZPath);
+  } else {
+    // Find suitable path
+    _path = find_mountpoint(filesystem, preferred_path);
+  }
+}
+
+ZBackingPath::~ZBackingPath() {
+  free(_path);
+  _path = NULL;
+}
+
+char* ZBackingPath::get_mountpoint(const char* line, const char* filesystem) const {
+  char* line_mountpoint = NULL;
+  char* line_filesystem = NULL;
+
+  // Parse line and return a newly allocated string containing the mountpoint if
+  // the line contains a matching filesystem and the mountpoint is accessible by
+  // the current user.
+  if (sscanf(line, "%*u %*u %*u:%*u %*s %ms %*[^-]- %ms", &line_mountpoint, &line_filesystem) != 2 ||
+      strcmp(line_filesystem, filesystem) != 0 ||
+      access(line_mountpoint, R_OK|W_OK|X_OK) != 0) {
+    // Not a matching or accessible filesystem
+    free(line_mountpoint);
+    line_mountpoint = NULL;
+  }
+
+  free(line_filesystem);
+
+  return line_mountpoint;
+}
+
+void ZBackingPath::get_mountpoints(ZArray<char*>* mountpoints, const char* filesystem) const {
+  FILE* fd = fopen(PROC_SELF_MOUNTINFO, "r");
+  if (fd == NULL) {
+    ZErrno err;
+    log_error(gc, init)("Failed to open %s: %s", PROC_SELF_MOUNTINFO, err.to_string());
+    return;
+  }
+
+  char* line = NULL;
+  size_t length = 0;
+
+  while (getline(&line, &length, fd) != -1) {
+    char* const mountpoint = get_mountpoint(line, filesystem);
+    if (mountpoint != NULL) {
+      mountpoints->add(mountpoint);
+    }
+  }
+
+  free(line);
+  fclose(fd);
+}
+
+void ZBackingPath::free_mountpoints(ZArray<char*>* mountpoints) const {
+  ZArrayIterator<char*> iter(mountpoints);
+  for (char* mountpoint; iter.next(&mountpoint);) {
+    free(mountpoint);
+  }
+  mountpoints->clear();
+}
+
+char* ZBackingPath::find_mountpoint(const char* filesystem, const char* preferred_mountpoint) const {
+  char* path = NULL;
+  ZArray<char*> mountpoints;
+
+  get_mountpoints(&mountpoints, filesystem);
+
+  if (mountpoints.size() == 0) {
+    // No filesystem found
+    log_error(gc, init)("Failed to find an accessible %s filesystem", filesystem);
+  } else if (mountpoints.size() == 1) {
+    // One filesystem found
+    path = strdup(mountpoints.at(0));
+  } else if (mountpoints.size() > 1) {
+    // More than one filesystem found
+    ZArrayIterator<char*> iter(&mountpoints);
+    for (char* mountpoint; iter.next(&mountpoint);) {
+      if (!strcmp(mountpoint, preferred_mountpoint)) {
+        // Preferred mount point found
+        path = strdup(mountpoint);
+        break;
+      }
+    }
+
+    if (path == NULL) {
+      // Preferred mount point not found
+      log_error(gc, init)("More than one %s filesystem found:", filesystem);
+      ZArrayIterator<char*> iter2(&mountpoints);
+      for (char* mountpoint; iter2.next(&mountpoint);) {
+        log_error(gc, init)("  %s", mountpoint);
+      }
+    }
+  }
+
+  free_mountpoints(&mountpoints);
+
+  return path;
+}
+
+const char* ZBackingPath::get() const {
+  return _path;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zBackingPath_linux_x86.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef OS_CPU_LINUX_X86_ZBACKINGPATH_LINUX_X86_HPP
+#define OS_CPU_LINUX_X86_ZBACKINGPATH_LINUX_X86_HPP
+
+#include "gc/z/zArray.hpp"
+#include "memory/allocation.hpp"
+
+class ZBackingPath : public StackObj {
+private:
+  char* _path;
+
+  char* get_mountpoint(const char* line, const char* filesystem) const;
+  void get_mountpoints(ZArray<char*>* mountpoints, const char* filesystem) const;
+  void free_mountpoints(ZArray<char*>* mountpoints) const;
+  char* find_mountpoint(const char* filesystem, const char* preferred_mountpoint) const;
+
+public:
+  ZBackingPath(const char* filesystem, const char* preferred_path);
+  ~ZBackingPath();
+
+  const char* get() const;
+};
+
+#endif // OS_CPU_LINUX_X86_ZBACKINGPATH_LINUX_X86_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zGlobals_linux_x86.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zGlobals.hpp"
+
+uintptr_t ZAddressReservedStart() {
+  return ZAddressMetadataMarked0;
+}
+
+uintptr_t ZAddressReservedEnd() {
+  return ZAddressMetadataRemapped + ZAddressOffsetMax;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zGlobals_linux_x86.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef OS_CPU_LINUX_X86_ZGLOBALS_LINUX_X86_HPP
+#define OS_CPU_LINUX_X86_ZGLOBALS_LINUX_X86_HPP
+
+//
+// Page Allocation Tiers
+// ---------------------
+//
+//  Page Type     Page Size     Object Size Limit     Object Alignment
+//  ------------------------------------------------------------------
+//  Small         2M            <= 265K               <MinObjAlignmentInBytes>
+//  Medium        32M           <= 4M                 4K
+//  Large         X*M           > 4M                  2M
+//  ------------------------------------------------------------------
+//
+//
+// Address Space & Pointer Layout
+// ------------------------------
+//
+//  +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
+//  .                                .
+//  .                                .
+//  .                                .
+//  +--------------------------------+ 0x0000140000000000 (20TB)
+//  |         Remapped View          |
+//  +--------------------------------+ 0x0000100000000000 (16TB)
+//  |     (Reserved, but unused)     |
+//  +--------------------------------+ 0x00000c0000000000 (12TB)
+//  |         Marked1 View           |
+//  +--------------------------------+ 0x0000080000000000 (8TB)
+//  |         Marked0 View           |
+//  +--------------------------------+ 0x0000040000000000 (4TB)
+//  .                                .
+//  +--------------------------------+ 0x0000000000000000
+//
+//
+//   6                 4 4 4  4 4                                             0
+//   3                 7 6 5  2 1                                             0
+//  +-------------------+-+----+-----------------------------------------------+
+//  |00000000 00000000 0|0|1111|11 11111111 11111111 11111111 11111111 11111111|
+//  +-------------------+-+----+-----------------------------------------------+
+//  |                   | |    |
+//  |                   | |    * 41-0 Object Offset (42-bits, 4TB address space)
+//  |                   | |
+//  |                   | * 45-42 Metadata Bits (4-bits)  0001 = Marked0      (Address view 4-8TB)
+//  |                   |                                 0010 = Marked1      (Address view 8-12TB)
+//  |                   |                                 0100 = Remapped     (Address view 16-20TB)
+//  |                   |                                 1000 = Finalizable  (Address view N/A)
+//  |                   |
+//  |                   * 46-46 Unused (1-bit, always zero)
+//  |
+//  * 63-47 Fixed (17-bits, always zero)
+//
+
+const size_t    ZPlatformPageSizeSmallShift   = 21; // 2M
+
+const size_t    ZPlatformAddressOffsetBits    = 42; // 4TB
+
+const uintptr_t ZPlatformAddressMetadataShift = ZPlatformAddressOffsetBits;
+
+const uintptr_t ZPlatformAddressSpaceStart    = (uintptr_t)1 << ZPlatformAddressOffsetBits;
+const uintptr_t ZPlatformAddressSpaceSize     = ((uintptr_t)1 << ZPlatformAddressOffsetBits) * 4;
+
+const size_t    ZPlatformCacheLineSize        = 64;
+
+#endif // OS_CPU_LINUX_X86_ZGLOBALS_LINUX_X86_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zLargePages_linux_x86.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zLargePages.hpp"
+#include "runtime/globals.hpp"
+
+void ZLargePages::initialize_platform() {
+  if (UseLargePages) {
+    if (UseTransparentHugePages) {
+      _state = Transparent;
+    } else {
+      _state = Explicit;
+    }
+  } else {
+    _state = Disabled;
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zNUMA_linux_x86.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "gc/z/zErrno.hpp"
+#include "gc/z/zCPU.hpp"
+#include "gc/z/zNUMA.hpp"
+#include "runtime/os.hpp"
+#include "utilities/debug.hpp"
+
+#include <unistd.h>
+#include <sys/syscall.h>
+
+#ifndef MPOL_F_NODE
+#define MPOL_F_NODE     (1<<0)  /* return next IL mode instead of node mask */
+#endif
+
+#ifndef MPOL_F_ADDR
+#define MPOL_F_ADDR     (1<<1)  /* look up vma using address */
+#endif
+
+static int z_get_mempolicy(uint32_t* mode, const unsigned long *nmask, unsigned long maxnode, uintptr_t addr, int flags) {
+  return syscall(__NR_get_mempolicy, mode, nmask, maxnode, addr, flags);
+}
+
+void ZNUMA::initialize_platform() {
+  _enabled = UseNUMA;
+}
+
+uint32_t ZNUMA::count() {
+  if (!_enabled) {
+    // NUMA support not enabled
+    return 1;
+  }
+
+  return os::Linux::numa_max_node() + 1;
+}
+
+uint32_t ZNUMA::id() {
+  if (!_enabled) {
+    // NUMA support not enabled
+    return 0;
+  }
+
+  return os::Linux::get_node_by_cpu(ZCPU::id());
+}
+
+uint32_t ZNUMA::memory_id(uintptr_t addr) {
+  if (!_enabled) {
+    // NUMA support not enabled, assume everything belongs to node zero
+    return 0;
+  }
+
+  uint32_t id = (uint32_t)-1;
+
+  if (z_get_mempolicy(&id, NULL, 0, addr, MPOL_F_NODE | MPOL_F_ADDR) == -1) {
+    ZErrno err;
+    fatal("Failed to get NUMA id for memory at " PTR_FORMAT " (%s)", addr, err.to_string());
+  }
+
+  assert(id < count(), "Invalid NUMA id");
+
+  return id;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zPhysicalMemoryBacking_linux_x86.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zAddress.inline.hpp"
+#include "gc/z/zBackingFile_linux_x86.hpp"
+#include "gc/z/zErrno.hpp"
+#include "gc/z/zLargePages.inline.hpp"
+#include "gc/z/zMemory.hpp"
+#include "gc/z/zNUMA.hpp"
+#include "gc/z/zPhysicalMemory.inline.hpp"
+#include "gc/z/zPhysicalMemoryBacking_linux_x86.hpp"
+#include "logging/log.hpp"
+#include "runtime/os.hpp"
+#include "utilities/align.hpp"
+#include "utilities/debug.hpp"
+
+#include <stdio.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+
+// Support for building on older Linux systems
+#ifndef MADV_HUGEPAGE
+#define MADV_HUGEPAGE                        14
+#endif
+
+// Proc file entry for max map mount
+#define ZFILENAME_PROC_MAX_MAP_COUNT         "/proc/sys/vm/max_map_count"
+
+ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity, size_t granule_size) :
+    _manager(),
+    _file(),
+    _granule_size(granule_size) {
+
+  // Check and warn if max map count seems too low
+  check_max_map_count(max_capacity, granule_size);
+}
+
+void ZPhysicalMemoryBacking::check_max_map_count(size_t max_capacity, size_t granule_size) const {
+  const char* const filename = ZFILENAME_PROC_MAX_MAP_COUNT;
+  FILE* const file = fopen(filename, "r");
+  if (file == NULL) {
+    // Failed to open file, skip check
+    log_debug(gc)("Failed to open %s", filename);
+    return;
+  }
+
+  size_t actual_max_map_count = 0;
+  const int result = fscanf(file, SIZE_FORMAT, &actual_max_map_count);
+  fclose(file);
+  if (result != 1) {
+    // Failed to read file, skip check
+    log_debug(gc)("Failed to read %s", filename);
+    return;
+  }
+
+  // The required max map count is impossible to calculate exactly since subsystems
+  // other than ZGC are also creating memory mappings, and we have no control over that.
+  // However, ZGC tends to create the most mappings and dominate the total count.
+  // In the worst cases, ZGC will map each granule three times, i.e. once per heap view.
+  // We speculate that we need another 20% to allow for non-ZGC subsystems to map memory.
+  const size_t required_max_map_count = (max_capacity / granule_size) * 3 * 1.2;
+  if (actual_max_map_count < required_max_map_count) {
+    log_warning(gc)("The system limit on number of memory mappings "
+                    "per process might be too low for the given");
+    log_warning(gc)("Java heap size (" SIZE_FORMAT "M). Please "
+                    "adjust %s to allow for at least", max_capacity / M, filename);
+    log_warning(gc)(SIZE_FORMAT " mappings (current limit is " SIZE_FORMAT "). "
+                    "Continuing execution with the current limit could",
+                    required_max_map_count, actual_max_map_count);
+    log_warning(gc)("lead to a fatal error down the line, due to failed "
+                    "attempts to map memory.");
+  }
+}
+
+bool ZPhysicalMemoryBacking::is_initialized() const {
+  return _file.is_initialized();
+}
+
+bool ZPhysicalMemoryBacking::expand(size_t from, size_t to) {
+  const size_t size = to - from;
+
+  // Expand
+  if (!_file.expand(from, size)) {
+    return false;
+  }
+
+  // Add expanded space to free list
+  _manager.free(from, size);
+
+  return true;
+}
+
+ZPhysicalMemory ZPhysicalMemoryBacking::alloc(size_t size) {
+  assert(is_aligned(size, _granule_size), "Invalid size");
+
+  ZPhysicalMemory pmem;
+
+  // Allocate segments
+  for (size_t allocated = 0; allocated < size; allocated += _granule_size) {
+    const uintptr_t start = _manager.alloc_from_front(_granule_size);
+    assert(start != UINTPTR_MAX, "Allocation should never fail");
+    pmem.add_segment(ZPhysicalMemorySegment(start, _granule_size));
+  }
+
+  return pmem;
+}
+
+void ZPhysicalMemoryBacking::free(ZPhysicalMemory pmem) {
+  const size_t nsegments = pmem.nsegments();
+
+  // Free segments
+  for (size_t i = 0; i < nsegments; i++) {
+    const ZPhysicalMemorySegment segment = pmem.segment(i);
+    _manager.free(segment.start(), segment.size());
+  }
+}
+
+void ZPhysicalMemoryBacking::map_failed(ZErrno err) const {
+  if (err == ENOMEM) {
+    fatal("Failed to map memory. Please check the system limit on number of "
+          "memory mappings allowed per process (see %s)", ZFILENAME_PROC_MAX_MAP_COUNT);
+  } else {
+    fatal("Failed to map memory (%s)", err.to_string());
+  }
+}
+
+void ZPhysicalMemoryBacking::advise_view(uintptr_t addr, size_t size) const {
+  if (madvise((void*)addr, size, MADV_HUGEPAGE) == -1) {
+    ZErrno err;
+    log_error(gc)("Failed to advise use of transparent huge pages (%s)", err.to_string());
+  }
+}
+
+void ZPhysicalMemoryBacking::pretouch_view(uintptr_t addr, size_t size) const {
+  const size_t page_size = ZLargePages::is_explicit() ? os::large_page_size() : os::vm_page_size();
+  os::pretouch_memory((void*)addr, (void*)(addr + size), page_size);
+}
+
+void ZPhysicalMemoryBacking::map_view(ZPhysicalMemory pmem, uintptr_t addr, bool pretouch) const {
+  const size_t nsegments = pmem.nsegments();
+
+  // Map segments
+  for (size_t i = 0; i < nsegments; i++) {
+    const ZPhysicalMemorySegment segment = pmem.segment(i);
+    const size_t size = segment.size();
+    const void* const res = mmap((void*)addr, size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, _file.fd(), segment.start());
+    if (res == MAP_FAILED) {
+      ZErrno err;
+      map_failed(err);
+    }
+
+    // Advise on use of transparent huge pages before touching it
+    if (ZLargePages::is_transparent()) {
+      advise_view(addr, size);
+    }
+
+    // NUMA interleave memory before touching it
+    ZNUMA::memory_interleave(addr, size);
+
+    if (pretouch) {
+      pretouch_view(addr, size);
+    }
+
+    addr += size;
+  }
+}
+
+void ZPhysicalMemoryBacking::unmap_view(ZPhysicalMemory pmem, uintptr_t addr) const {
+  // Note that we must keep the address space reservation intact and just detach
+  // the backing memory. For this reason we map a new anonymous, non-accessible
+  // and non-reserved page over the mapping instead of actually unmapping.
+  const size_t size = pmem.size();
+  const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
+  if (res == MAP_FAILED) {
+    ZErrno err;
+    map_failed(err);
+  }
+}
+
+uintptr_t ZPhysicalMemoryBacking::nmt_address(uintptr_t offset) const {
+  // From an NMT point of view we treat the first heap mapping (marked0) as committed
+  return ZAddress::marked0(offset);
+}
+
+void ZPhysicalMemoryBacking::map(ZPhysicalMemory pmem, uintptr_t offset) const {
+  if (ZUnmapBadViews) {
+    // Only map the good view, for debugging only
+    map_view(pmem, ZAddress::good(offset), AlwaysPreTouch);
+  } else {
+    // Map all views
+    map_view(pmem, ZAddress::marked0(offset), AlwaysPreTouch);
+    map_view(pmem, ZAddress::marked1(offset), AlwaysPreTouch);
+    map_view(pmem, ZAddress::remapped(offset), AlwaysPreTouch);
+  }
+}
+
+void ZPhysicalMemoryBacking::unmap(ZPhysicalMemory pmem, uintptr_t offset) const {
+  if (ZUnmapBadViews) {
+    // Only map the good view, for debugging only
+    unmap_view(pmem, ZAddress::good(offset));
+  } else {
+    // Unmap all views
+    unmap_view(pmem, ZAddress::marked0(offset));
+    unmap_view(pmem, ZAddress::marked1(offset));
+    unmap_view(pmem, ZAddress::remapped(offset));
+  }
+}
+
+void ZPhysicalMemoryBacking::flip(ZPhysicalMemory pmem, uintptr_t offset) const {
+  assert(ZUnmapBadViews, "Should be enabled");
+  const uintptr_t addr_good = ZAddress::good(offset);
+  const uintptr_t addr_bad = ZAddress::is_marked(ZAddressGoodMask) ? ZAddress::remapped(offset) : ZAddress::marked(offset);
+  // Map/Unmap views
+  map_view(pmem, addr_good, false /* pretouch */);
+  unmap_view(pmem, addr_bad);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zPhysicalMemoryBacking_linux_x86.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef OS_CPU_LINUX_X86_ZPHYSICALMEMORYBACKING_LINUX_X86_HPP
+#define OS_CPU_LINUX_X86_ZPHYSICALMEMORYBACKING_LINUX_X86_HPP
+
+#include "gc/z/zBackingFile_linux_x86.hpp"
+#include "gc/z/zMemory.hpp"
+
+class ZErrno;
+class ZPhysicalMemory;
+
+class ZPhysicalMemoryBacking {
+private:
+  ZMemoryManager _manager;
+  ZBackingFile   _file;
+  const size_t   _granule_size;
+
+  void check_max_map_count(size_t max_capacity, size_t granule_size) const;
+  void map_failed(ZErrno err) const;
+
+  void advise_view(uintptr_t addr, size_t size) const;
+  void pretouch_view(uintptr_t addr, size_t size) const;
+  void map_view(ZPhysicalMemory pmem, uintptr_t addr, bool pretouch) const;
+  void unmap_view(ZPhysicalMemory pmem, uintptr_t addr) const;
+
+public:
+  ZPhysicalMemoryBacking(size_t max_capacity, size_t granule_size);
+
+  bool is_initialized() const;
+
+  bool expand(size_t from, size_t to);
+  ZPhysicalMemory alloc(size_t size);
+  void free(ZPhysicalMemory pmem);
+
+  uintptr_t nmt_address(uintptr_t offset) const;
+
+  void map(ZPhysicalMemory pmem, uintptr_t offset) const;
+  void unmap(ZPhysicalMemory pmem, uintptr_t offset) const;
+  void flip(ZPhysicalMemory pmem, uintptr_t offset) const;
+};
+
+#endif // OS_CPU_LINUX_X86_ZPHYSICALMEMORYBACKING_LINUX_X86_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zVirtualMemory_linux_x86.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zVirtualMemory.hpp"
+#include "logging/log.hpp"
+
+#include <sys/mman.h>
+#include <sys/types.h>
+
+bool ZVirtualMemoryManager::reserve(uintptr_t start, size_t size) {
+  // Reserve address space
+  const uintptr_t actual_start = (uintptr_t)mmap((void*)start, size, PROT_NONE,
+                                                 MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
+  if (actual_start != start) {
+    log_error(gc)("Failed to reserve address space for Java heap");
+    return false;
+  }
+
+  return true;
+}
--- a/src/hotspot/share/adlc/formssel.cpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/share/adlc/formssel.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -2282,6 +2282,9 @@
   if (strcmp(name, "RegD") == 0) size = 2;
   if (strcmp(name, "RegL") == 0) size = 2;
   if (strcmp(name, "RegN") == 0) size = 1;
+  if (strcmp(name, "VecX") == 0) size = 4;
+  if (strcmp(name, "VecY") == 0) size = 8;
+  if (strcmp(name, "VecZ") == 0) size = 16;
   if (strcmp(name, "RegP") == 0) size = globalAD->get_preproc_def("_LP64") ? 2 : 1;
   if (size == 0) {
     return false;
@@ -3509,6 +3512,7 @@
     "ClearArray",
     "GetAndSetB", "GetAndSetS", "GetAndAddI", "GetAndSetI", "GetAndSetP",
     "GetAndAddB", "GetAndAddS", "GetAndAddL", "GetAndSetL", "GetAndSetN",
+    "LoadBarrierSlowReg", "LoadBarrierWeakSlowReg"
   };
   int cnt = sizeof(needs_ideal_memory_list)/sizeof(char*);
   if( strcmp(_opType,"PrefetchAllocation")==0 )
--- a/src/hotspot/share/asm/codeBuffer.hpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/share/asm/codeBuffer.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -337,6 +337,7 @@
 
 class CodeBuffer: public StackObj {
   friend class CodeSection;
+  friend class StubCodeGenerator;
 
  private:
   // CodeBuffers must be allocated on the stack except for a single
--- a/src/hotspot/share/classfile/vmSymbols.cpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/share/classfile/vmSymbols.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -756,6 +756,9 @@
 #endif // COMPILER1
 #ifdef COMPILER2
   case vmIntrinsics::_clone:
+#if INCLUDE_ZGC
+    if (UseZGC) return true;
+#endif
   case vmIntrinsics::_copyOf:
   case vmIntrinsics::_copyOfRange:
     // These intrinsics use both the objectcopy and the arraycopy
--- a/src/hotspot/share/code/codeCache.cpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/share/code/codeCache.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -1609,6 +1609,7 @@
 }
 
 void CodeCache::print_summary(outputStream* st, bool detailed) {
+  int full_count = 0;
   FOR_ALL_HEAPS(heap_iterator) {
     CodeHeap* heap = (*heap_iterator);
     size_t total = (heap->high_boundary() - heap->low_boundary());
@@ -1627,6 +1628,8 @@
                    p2i(heap->low_boundary()),
                    p2i(heap->high()),
                    p2i(heap->high_boundary()));
+
+      full_count += get_codemem_full_count(heap->code_blob_type());
     }
   }
 
@@ -1638,6 +1641,10 @@
                  "enabled" : Arguments::mode() == Arguments::_int ?
                  "disabled (interpreter mode)" :
                  "disabled (not enough contiguous free space left)");
+    st->print_cr("              stopped_count=%d, restarted_count=%d",
+                 CompileBroker::get_total_compiler_stopped_count(),
+                 CompileBroker::get_total_compiler_restarted_count());
+    st->print_cr(" full_count=%d", full_count);
   }
 }
 
--- a/src/hotspot/share/compiler/compileBroker.cpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/share/compiler/compileBroker.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -170,21 +170,23 @@
 elapsedTimer CompileBroker::_t_invalidated_compilation;
 elapsedTimer CompileBroker::_t_bailedout_compilation;
 
-int CompileBroker::_total_bailout_count          = 0;
-int CompileBroker::_total_invalidated_count      = 0;
-int CompileBroker::_total_compile_count          = 0;
-int CompileBroker::_total_osr_compile_count      = 0;
-int CompileBroker::_total_standard_compile_count = 0;
+int CompileBroker::_total_bailout_count            = 0;
+int CompileBroker::_total_invalidated_count        = 0;
+int CompileBroker::_total_compile_count            = 0;
+int CompileBroker::_total_osr_compile_count        = 0;
+int CompileBroker::_total_standard_compile_count   = 0;
+int CompileBroker::_total_compiler_stopped_count   = 0;
+int CompileBroker::_total_compiler_restarted_count = 0;
 
-int CompileBroker::_sum_osr_bytes_compiled       = 0;
-int CompileBroker::_sum_standard_bytes_compiled  = 0;
-int CompileBroker::_sum_nmethod_size             = 0;
-int CompileBroker::_sum_nmethod_code_size        = 0;
+int CompileBroker::_sum_osr_bytes_compiled         = 0;
+int CompileBroker::_sum_standard_bytes_compiled    = 0;
+int CompileBroker::_sum_nmethod_size               = 0;
+int CompileBroker::_sum_nmethod_code_size          = 0;
 
-long CompileBroker::_peak_compilation_time       = 0;
+long CompileBroker::_peak_compilation_time         = 0;
 
-CompileQueue* CompileBroker::_c2_compile_queue   = NULL;
-CompileQueue* CompileBroker::_c1_compile_queue   = NULL;
+CompileQueue* CompileBroker::_c2_compile_queue     = NULL;
+CompileQueue* CompileBroker::_c1_compile_queue     = NULL;
 
 
 
--- a/src/hotspot/share/compiler/compileBroker.hpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/share/compiler/compileBroker.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -219,6 +219,8 @@
   static int _total_native_compile_count;
   static int _total_osr_compile_count;
   static int _total_standard_compile_count;
+  static int _total_compiler_stopped_count;
+  static int _total_compiler_restarted_count;
   static int _sum_osr_bytes_compiled;
   static int _sum_standard_bytes_compiled;
   static int _sum_nmethod_size;
@@ -338,7 +340,15 @@
   static bool set_should_compile_new_jobs(jint new_state) {
     // Return success if the current caller set it
     jint old = Atomic::cmpxchg(new_state, &_should_compile_new_jobs, 1-new_state);
-    return (old == (1-new_state));
+    bool success = (old == (1-new_state));
+    if (success) {
+      if (new_state == run_compilation) {
+        _total_compiler_restarted_count++;
+      } else {
+        _total_compiler_stopped_count++;
+      }
+    }
+    return success;
   }
 
   static void disable_compilation_forever() {
@@ -393,18 +403,20 @@
 
   static CompileLog* get_log(CompilerThread* ct);
 
-  static int get_total_compile_count() {          return _total_compile_count; }
-  static int get_total_bailout_count() {          return _total_bailout_count; }
-  static int get_total_invalidated_count() {      return _total_invalidated_count; }
-  static int get_total_native_compile_count() {   return _total_native_compile_count; }
-  static int get_total_osr_compile_count() {      return _total_osr_compile_count; }
-  static int get_total_standard_compile_count() { return _total_standard_compile_count; }
-  static int get_sum_osr_bytes_compiled() {       return _sum_osr_bytes_compiled; }
-  static int get_sum_standard_bytes_compiled() {  return _sum_standard_bytes_compiled; }
-  static int get_sum_nmethod_size() {             return _sum_nmethod_size;}
-  static int get_sum_nmethod_code_size() {        return _sum_nmethod_code_size; }
-  static long get_peak_compilation_time() {       return _peak_compilation_time; }
-  static long get_total_compilation_time() {      return _t_total_compilation.milliseconds(); }
+  static int get_total_compile_count() {            return _total_compile_count; }
+  static int get_total_bailout_count() {            return _total_bailout_count; }
+  static int get_total_invalidated_count() {        return _total_invalidated_count; }
+  static int get_total_native_compile_count() {     return _total_native_compile_count; }
+  static int get_total_osr_compile_count() {        return _total_osr_compile_count; }
+  static int get_total_standard_compile_count() {   return _total_standard_compile_count; }
+  static int get_total_compiler_stopped_count() {   return _total_compiler_stopped_count; }
+  static int get_total_compiler_restarted_count() { return _total_compiler_restarted_count; }
+  static int get_sum_osr_bytes_compiled() {         return _sum_osr_bytes_compiled; }
+  static int get_sum_standard_bytes_compiled() {    return _sum_standard_bytes_compiled; }
+  static int get_sum_nmethod_size() {               return _sum_nmethod_size;}
+  static int get_sum_nmethod_code_size() {          return _sum_nmethod_code_size; }
+  static long get_peak_compilation_time() {         return _peak_compilation_time; }
+  static long get_total_compilation_time() {        return _t_total_compilation.milliseconds(); }
 
   // Log that compilation profiling is skipped because metaspace is full.
   static void log_metaspace_failure();
--- a/src/hotspot/share/compiler/compilerDirectives.hpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/share/compiler/compilerDirectives.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -66,7 +66,8 @@
     cflags(VectorizeDebug,          uintx, 0, VectorizeDebug) \
     cflags(CloneMapDebug,           bool, false, CloneMapDebug) \
     cflags(IGVPrintLevel,           intx, PrintIdealGraphLevel, IGVPrintLevel) \
-    cflags(MaxNodeLimit,            intx, MaxNodeLimit, MaxNodeLimit)
+    cflags(MaxNodeLimit,            intx, MaxNodeLimit, MaxNodeLimit) \
+ZGC_ONLY(cflags(ZOptimizeLoadBarriers, bool, ZOptimizeLoadBarriers, ZOptimizeLoadBarriers))
 #else
   #define compilerdirectives_c2_flags(cflags)
 #endif
--- a/src/hotspot/share/compiler/disassembler.cpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/share/compiler/disassembler.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -155,6 +155,7 @@
   CodeStrings   _strings;
   outputStream* _output;
   address       _start, _end;
+  ptrdiff_t     _offset;
 
   char          _option_buf[512];
   char          _print_raw;
@@ -191,7 +192,8 @@
   void print_address(address value);
 
  public:
-  decode_env(CodeBlob* code, outputStream* output, CodeStrings c = CodeStrings());
+  decode_env(CodeBlob* code, outputStream* output,
+             CodeStrings c = CodeStrings(), ptrdiff_t offset = 0);
 
   address decode_instructions(address start, address end);
 
@@ -221,13 +223,15 @@
   const char* options() { return _option_buf; }
 };
 
-decode_env::decode_env(CodeBlob* code, outputStream* output, CodeStrings c) {
+decode_env::decode_env(CodeBlob* code, outputStream* output, CodeStrings c,
+                       ptrdiff_t offset) {
   memset(this, 0, sizeof(*this)); // Beware, this zeroes bits of fields.
   _output = output ? output : tty;
   _code = code;
   if (code != NULL && code->is_nmethod())
     _nm = (nmethod*) code;
   _strings.copy(c);
+  _offset = offset;
 
   // by default, output pc but not bytes:
   _print_pc       = true;
@@ -354,7 +358,7 @@
   if (cb != NULL) {
     cb->print_block_comment(st, p);
   }
-  _strings.print_block_comment(st, (intptr_t)(p - _start));
+  _strings.print_block_comment(st, (intptr_t)(p - _start + _offset));
   if (_print_pc) {
     st->print("  " PTR_FORMAT ": ", p2i(p));
   }
@@ -507,10 +511,11 @@
   env.decode_instructions(cb->code_begin(), cb->code_end());
 }
 
-void Disassembler::decode(address start, address end, outputStream* st, CodeStrings c) {
+void Disassembler::decode(address start, address end, outputStream* st, CodeStrings c,
+                          ptrdiff_t offset) {
   ttyLocker ttyl;
   if (!load_library())  return;
-  decode_env env(CodeCache::find_blob_unsafe(start), st, c);
+  decode_env env(CodeCache::find_blob_unsafe(start), st, c, offset);
   env.decode_instructions(start, end);
 }
 
--- a/src/hotspot/share/compiler/disassembler.hpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/share/compiler/disassembler.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -75,7 +75,8 @@
   }
   static void decode(CodeBlob *cb,               outputStream* st = NULL);
   static void decode(nmethod* nm,                outputStream* st = NULL);
-  static void decode(address begin, address end, outputStream* st = NULL, CodeStrings c = CodeStrings());
+  static void decode(address begin, address end, outputStream* st = NULL,
+                     CodeStrings c = CodeStrings(), ptrdiff_t offset = 0);
 };
 
 #endif // SHARE_VM_COMPILER_DISASSEMBLER_HPP
--- a/src/hotspot/share/compiler/oopMap.cpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/share/compiler/oopMap.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -380,8 +380,12 @@
           continue;
         }
 #ifdef ASSERT
-        if ((((uintptr_t)loc & (sizeof(*loc)-1)) != 0) ||
-            !Universe::heap()->is_in_or_null(*loc)) {
+        // We can not verify the oop here if we are using ZGC, the oop
+        // will be bad in case we had a safepoint between a load and a
+        // load barrier.
+        if (!UseZGC &&
+            ((((uintptr_t)loc & (sizeof(*loc)-1)) != 0) ||
+             !Universe::heap()->is_in_or_null(*loc))) {
           tty->print_cr("# Found non oop pointer.  Dumping state at failure");
           // try to dump out some helpful debugging information
           trace_codeblob_maps(fr, reg_map);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/epsilon/epsilonArguments.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/epsilon/epsilonArguments.hpp"
+#include "gc/epsilon/epsilonHeap.hpp"
+#include "gc/epsilon/epsilonCollectorPolicy.hpp"
+#include "gc/shared/gcArguments.inline.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/globals_extension.hpp"
+#include "runtime/vm_version.hpp"
+#include "utilities/macros.hpp"
+
+size_t EpsilonArguments::conservative_max_heap_alignment() {
+  return UseLargePages ? os::large_page_size() : os::vm_page_size();
+}
+
+void EpsilonArguments::initialize() {
+  GCArguments::initialize();
+
+  assert(UseEpsilonGC, "Sanity");
+
+  // Forcefully exit when OOME is detected. Nothing we can do at that point.
+  if (FLAG_IS_DEFAULT(ExitOnOutOfMemoryError)) {
+    FLAG_SET_DEFAULT(ExitOnOutOfMemoryError, true);
+  }
+
+  if (EpsilonMaxTLABSize < MinTLABSize) {
+    warning("EpsilonMaxTLABSize < MinTLABSize, adjusting it to " SIZE_FORMAT, MinTLABSize);
+    EpsilonMaxTLABSize = MinTLABSize;
+  }
+
+  if (!EpsilonElasticTLAB && EpsilonElasticTLABDecay) {
+    warning("Disabling EpsilonElasticTLABDecay because EpsilonElasticTLAB is disabled");
+    FLAG_SET_DEFAULT(EpsilonElasticTLABDecay, false);
+  }
+
+#ifdef COMPILER2
+  // Enable loop strip mining: there are still non-GC safepoints, no need to make it worse
+  if (FLAG_IS_DEFAULT(UseCountedLoopSafepoints)) {
+    FLAG_SET_DEFAULT(UseCountedLoopSafepoints, true);
+    if (FLAG_IS_DEFAULT(LoopStripMiningIter)) {
+      FLAG_SET_DEFAULT(LoopStripMiningIter, 1000);
+    }
+  }
+#endif
+}
+
+CollectedHeap* EpsilonArguments::create_heap() {
+  return create_heap_with_policy<EpsilonHeap, EpsilonCollectorPolicy>();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/epsilon/epsilonArguments.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_EPSILON_EPSILONARGUMENTS_HPP
+#define SHARE_GC_EPSILON_EPSILONARGUMENTS_HPP
+
+#include "gc/shared/gcArguments.hpp"
+
+class CollectedHeap;
+
+class EpsilonArguments : public GCArguments {
+public:
+  virtual void initialize();
+  virtual size_t conservative_max_heap_alignment();
+  virtual CollectedHeap* create_heap();
+};
+
+#endif // SHARE_GC_EPSILON_EPSILONARGUMENTS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/epsilon/epsilonBarrierSet.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "runtime/thread.hpp"
+#include "gc/epsilon/epsilonBarrierSet.hpp"
+#include "gc/epsilon/epsilonThreadLocalData.hpp"
+#include "gc/shared/collectorPolicy.hpp"
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/barrierSetAssembler.hpp"
+#include "utilities/macros.hpp"
+#ifdef COMPILER1
+#include "gc/shared/c1/barrierSetC1.hpp"
+#endif
+#ifdef COMPILER2
+#include "gc/shared/c2/barrierSetC2.hpp"
+#endif
+
+EpsilonBarrierSet::EpsilonBarrierSet() : BarrierSet(
+          make_barrier_set_assembler<BarrierSetAssembler>(),
+          make_barrier_set_c1<BarrierSetC1>(),
+          make_barrier_set_c2<BarrierSetC2>(),
+          BarrierSet::FakeRtti(BarrierSet::EpsilonBarrierSet)) {};
+
+void EpsilonBarrierSet::on_thread_create(Thread *thread) {
+  EpsilonThreadLocalData::create(thread);
+}
+
+void EpsilonBarrierSet::on_thread_destroy(Thread *thread) {
+  EpsilonThreadLocalData::destroy(thread);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/epsilon/epsilonBarrierSet.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_EPSILON_BARRIERSET_HPP
+#define SHARE_VM_GC_EPSILON_BARRIERSET_HPP
+
+#include "gc/shared/barrierSetAssembler.hpp"
+#include "gc/shared/barrierSet.hpp"
+
+// No interaction with application is required for Epsilon, and therefore
+// the barrier set is empty.
+class EpsilonBarrierSet: public BarrierSet {
+  friend class VMStructs;
+
+public:
+  EpsilonBarrierSet();
+
+  virtual void print_on(outputStream *st) const {}
+
+  virtual void on_thread_create(Thread* thread);
+  virtual void on_thread_destroy(Thread* thread);
+
+  template <DecoratorSet decorators, typename BarrierSetT = EpsilonBarrierSet>
+  class AccessBarrier: public BarrierSet::AccessBarrier<decorators, BarrierSetT> {};
+};
+
+template<>
+struct BarrierSet::GetName<EpsilonBarrierSet> {
+  static const BarrierSet::Name value = BarrierSet::EpsilonBarrierSet;
+};
+
+template<>
+struct BarrierSet::GetType<BarrierSet::EpsilonBarrierSet> {
+  typedef ::EpsilonBarrierSet type;
+};
+
+#endif // SHARE_VM_GC_EPSILON_BARRIERSET_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/epsilon/epsilonCollectorPolicy.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_EPSILON_COLLECTORPOLICY_HPP
+#define SHARE_VM_GC_EPSILON_COLLECTORPOLICY_HPP
+
+#include "gc/shared/collectorPolicy.hpp"
+
+class EpsilonCollectorPolicy: public CollectorPolicy {
+protected:
+  virtual void initialize_alignments() {
+    size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
+    size_t align = MAX2((size_t)os::vm_allocation_granularity(), page_size);
+    _space_alignment = align;
+    _heap_alignment  = align;
+  }
+
+public:
+  EpsilonCollectorPolicy() : CollectorPolicy() {};
+};
+
+#endif // SHARE_VM_GC_EPSILON_COLLECTORPOLICY_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/epsilon/epsilonHeap.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,285 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/epsilon/epsilonHeap.hpp"
+#include "gc/epsilon/epsilonMemoryPool.hpp"
+#include "gc/epsilon/epsilonThreadLocalData.hpp"
+#include "memory/allocation.hpp"
+#include "memory/allocation.inline.hpp"
+#include "memory/resourceArea.hpp"
+
+jint EpsilonHeap::initialize() {
+  size_t align = _policy->heap_alignment();
+  size_t init_byte_size = align_up(_policy->initial_heap_byte_size(), align);
+  size_t max_byte_size  = align_up(_policy->max_heap_byte_size(), align);
+
+  // Initialize backing storage
+  ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, align);
+  _virtual_space.initialize(heap_rs, init_byte_size);
+
+  MemRegion committed_region((HeapWord*)_virtual_space.low(),          (HeapWord*)_virtual_space.high());
+  MemRegion  reserved_region((HeapWord*)_virtual_space.low_boundary(), (HeapWord*)_virtual_space.high_boundary());
+
+  initialize_reserved_region(reserved_region.start(), reserved_region.end());
+
+  _space = new ContiguousSpace();
+  _space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true);
+
+  // Precompute hot fields
+  _max_tlab_size = MIN2(CollectedHeap::max_tlab_size(), EpsilonMaxTLABSize / HeapWordSize);
+  _step_counter_update = MIN2<size_t>(max_byte_size / 16, EpsilonUpdateCountersStep);
+  _step_heap_print = (EpsilonPrintHeapSteps == 0) ? SIZE_MAX : (max_byte_size / EpsilonPrintHeapSteps);
+  _decay_time_ns = (int64_t) EpsilonTLABDecayTime * NANOSECS_PER_MILLISEC;
+
+  // Enable monitoring
+  _monitoring_support = new EpsilonMonitoringSupport(this);
+  _last_counter_update = 0;
+  _last_heap_print = 0;
+
+  // Install barrier set
+  BarrierSet::set_barrier_set(new EpsilonBarrierSet());
+
+  // All done, print out the configuration
+  if (init_byte_size != max_byte_size) {
+    log_info(gc)("Resizeable heap; starting at " SIZE_FORMAT "M, max: " SIZE_FORMAT "M, step: " SIZE_FORMAT "M",
+                 init_byte_size / M, max_byte_size / M, EpsilonMinHeapExpand / M);
+  } else {
+    log_info(gc)("Non-resizeable heap; start/max: " SIZE_FORMAT "M", init_byte_size / M);
+  }
+
+  if (UseTLAB) {
+    log_info(gc)("Using TLAB allocation; max: " SIZE_FORMAT "K", _max_tlab_size * HeapWordSize / K);
+    if (EpsilonElasticTLAB) {
+      log_info(gc)("Elastic TLABs enabled; elasticity: %.2fx", EpsilonTLABElasticity);
+    }
+    if (EpsilonElasticTLABDecay) {
+      log_info(gc)("Elastic TLABs decay enabled; decay time: " SIZE_FORMAT "ms", EpsilonTLABDecayTime);
+    }
+  } else {
+    log_info(gc)("Not using TLAB allocation");
+  }
+
+  return JNI_OK;
+}
+
+void EpsilonHeap::post_initialize() {
+  CollectedHeap::post_initialize();
+}
+
+void EpsilonHeap::initialize_serviceability() {
+  _pool = new EpsilonMemoryPool(this);
+  _memory_manager.add_pool(_pool);
+}
+
+GrowableArray<GCMemoryManager*> EpsilonHeap::memory_managers() {
+  GrowableArray<GCMemoryManager*> memory_managers(1);
+  memory_managers.append(&_memory_manager);
+  return memory_managers;
+}
+
+GrowableArray<MemoryPool*> EpsilonHeap::memory_pools() {
+  GrowableArray<MemoryPool*> memory_pools(1);
+  memory_pools.append(_pool);
+  return memory_pools;
+}
+
+size_t EpsilonHeap::unsafe_max_tlab_alloc(Thread* thr) const {
+  // Return max allocatable TLAB size, and let allocation path figure out
+  // the actual TLAB allocation size.
+  return _max_tlab_size;
+}
+
+EpsilonHeap* EpsilonHeap::heap() {
+  CollectedHeap* heap = Universe::heap();
+  assert(heap != NULL, "Uninitialized access to EpsilonHeap::heap()");
+  assert(heap->kind() == CollectedHeap::Epsilon, "Not an Epsilon heap");
+  return (EpsilonHeap*)heap;
+}
+
+HeapWord* EpsilonHeap::allocate_work(size_t size) {
+  HeapWord* res = _space->par_allocate(size);
+
+  while (res == NULL) {
+    // Allocation failed, attempt expansion, and retry:
+    MutexLockerEx ml(Heap_lock);
+
+    size_t space_left = max_capacity() - capacity();
+    size_t want_space = MAX2(size, EpsilonMinHeapExpand);
+
+    if (want_space < space_left) {
+      // Enough space to expand in bulk:
+      bool expand = _virtual_space.expand_by(want_space);
+      assert(expand, "Should be able to expand");
+    } else if (size < space_left) {
+      // No space to expand in bulk, and this allocation is still possible,
+      // take all the remaining space:
+      bool expand = _virtual_space.expand_by(space_left);
+      assert(expand, "Should be able to expand");
+    } else {
+      // No space left:
+      return NULL;
+    }
+
+    _space->set_end((HeapWord *) _virtual_space.high());
+    res = _space->par_allocate(size);
+  }
+
+  size_t used = _space->used();
+
+  // Allocation successful, update counters
+  {
+    size_t last = _last_counter_update;
+    if ((used - last >= _step_counter_update) && Atomic::cmpxchg(used, &_last_counter_update, last) == last) {
+      _monitoring_support->update_counters();
+    }
+  }
+
+  // ...and print the occupancy line, if needed
+  {
+    size_t last = _last_heap_print;
+    if ((used - last >= _step_heap_print) && Atomic::cmpxchg(used, &_last_heap_print, last) == last) {
+      log_info(gc)("Heap: " SIZE_FORMAT "M reserved, " SIZE_FORMAT "M (%.2f%%) committed, " SIZE_FORMAT "M (%.2f%%) used",
+                   max_capacity() / M,
+                   capacity() / M,
+                   capacity() * 100.0 / max_capacity(),
+                   used / M,
+                   used * 100.0 / max_capacity());
+    }
+  }
+
+  return res;
+}
+
+HeapWord* EpsilonHeap::allocate_new_tlab(size_t min_size,
+                                         size_t requested_size,
+                                         size_t* actual_size) {
+  Thread* thread = Thread::current();
+
+  // Defaults in case elastic paths are not taken
+  bool fits = true;
+  size_t size = requested_size;
+  size_t ergo_tlab = requested_size;
+  int64_t time = 0;
+
+  if (EpsilonElasticTLAB) {
+    ergo_tlab = EpsilonThreadLocalData::ergo_tlab_size(thread);
+
+    if (EpsilonElasticTLABDecay) {
+      int64_t last_time = EpsilonThreadLocalData::last_tlab_time(thread);
+      time = (int64_t) os::javaTimeNanos();
+
+      assert(last_time <= time, "time should be monotonic");
+
+      // If the thread had not allocated recently, retract the ergonomic size.
+      // This conserves memory when the thread had initial burst of allocations,
+      // and then started allocating only sporadically.
+      if (last_time != 0 && (time - last_time > _decay_time_ns)) {
+        ergo_tlab = 0;
+        EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
+      }
+    }
+
+    // If we can fit the allocation under current TLAB size, do so.
+    // Otherwise, we want to elastically increase the TLAB size.
+    fits = (requested_size <= ergo_tlab);
+    if (!fits) {
+      size = (size_t) (ergo_tlab * EpsilonTLABElasticity);
+    }
+  }
+
+  // Always honor boundaries
+  size = MAX2(min_size, MIN2(_max_tlab_size, size));
+
+  if (log_is_enabled(Trace, gc)) {
+    ResourceMark rm;
+    log_trace(gc)("TLAB size for \"%s\" (Requested: " SIZE_FORMAT "K, Min: " SIZE_FORMAT
+                          "K, Max: " SIZE_FORMAT "K, Ergo: " SIZE_FORMAT "K) -> " SIZE_FORMAT "K",
+                  thread->name(),
+                  requested_size * HeapWordSize / K,
+                  min_size * HeapWordSize / K,
+                  _max_tlab_size * HeapWordSize / K,
+                  ergo_tlab * HeapWordSize / K,
+                  size * HeapWordSize / K);
+  }
+
+  // All prepared, let's do it!
+  HeapWord* res = allocate_work(size);
+
+  if (res != NULL) {
+    // Allocation successful
+    *actual_size = size;
+    if (EpsilonElasticTLABDecay) {
+      EpsilonThreadLocalData::set_last_tlab_time(thread, time);
+    }
+    if (EpsilonElasticTLAB && !fits) {
+      // If we requested expansion, this is our new ergonomic TLAB size
+      EpsilonThreadLocalData::set_ergo_tlab_size(thread, size);
+    }
+  } else {
+    // Allocation failed, reset ergonomics to try and fit smaller TLABs
+    if (EpsilonElasticTLAB) {
+      EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
+    }
+  }
+
+  return res;
+}
+
+HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) {
+  *gc_overhead_limit_was_exceeded = false;
+  return allocate_work(size);
+}
+
+void EpsilonHeap::collect(GCCause::Cause cause) {
+  log_info(gc)("GC request for \"%s\" is ignored", GCCause::to_string(cause));
+  _monitoring_support->update_counters();
+}
+
+void EpsilonHeap::do_full_collection(bool clear_all_soft_refs) {
+  log_info(gc)("Full GC request for \"%s\" is ignored", GCCause::to_string(gc_cause()));
+  _monitoring_support->update_counters();
+}
+
+void EpsilonHeap::safe_object_iterate(ObjectClosure *cl) {
+  _space->safe_object_iterate(cl);
+}
+
+void EpsilonHeap::print_on(outputStream *st) const {
+  st->print_cr("Epsilon Heap");
+
+  // Cast away constness:
+  ((VirtualSpace)_virtual_space).print_on(st);
+
+  st->print_cr("Allocation space:");
+  _space->print_on(st);
+}
+
+void EpsilonHeap::print_tracing_info() const {
+  Log(gc) log;
+  size_t allocated_kb = used() / K;
+  log.info("Total allocated: " SIZE_FORMAT " KB",
+           allocated_kb);
+  log.info("Average allocation rate: " SIZE_FORMAT " KB/sec",
+           (size_t)(allocated_kb * NANOSECS_PER_SEC / os::elapsed_counter()));
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/epsilon/epsilonHeap.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_EPSILON_COLLECTEDHEAP_HPP
+#define SHARE_VM_GC_EPSILON_COLLECTEDHEAP_HPP
+
+#include "gc/shared/collectedHeap.hpp"
+#include "gc/shared/softRefPolicy.hpp"
+#include "gc/shared/space.hpp"
+#include "services/memoryManager.hpp"
+#include "gc/epsilon/epsilonCollectorPolicy.hpp"
+#include "gc/epsilon/epsilonMonitoringSupport.hpp"
+#include "gc/epsilon/epsilonBarrierSet.hpp"
+#include "gc/epsilon/epsilon_globals.hpp"
+
+class EpsilonHeap : public CollectedHeap {
+  friend class VMStructs;
+private:
+  EpsilonCollectorPolicy* _policy;
+  SoftRefPolicy _soft_ref_policy;
+  EpsilonMonitoringSupport* _monitoring_support;
+  MemoryPool* _pool;
+  GCMemoryManager _memory_manager;
+  ContiguousSpace* _space;
+  VirtualSpace _virtual_space;
+  size_t _max_tlab_size;
+  size_t _step_counter_update;
+  size_t _step_heap_print;
+  int64_t _decay_time_ns;
+  volatile size_t _last_counter_update;
+  volatile size_t _last_heap_print;
+
+public:
+  static EpsilonHeap* heap();
+
+  EpsilonHeap(EpsilonCollectorPolicy* p) :
+          _policy(p),
+          _memory_manager("Epsilon Heap", "") {};
+
+  virtual Name kind() const {
+    return CollectedHeap::Epsilon;
+  }
+
+  virtual const char* name() const {
+    return "Epsilon";
+  }
+
+  virtual CollectorPolicy* collector_policy() const {
+    return _policy;
+  }
+
+  virtual SoftRefPolicy* soft_ref_policy() {
+    return &_soft_ref_policy;
+  }
+
+  virtual jint initialize();
+  virtual void post_initialize();
+  virtual void initialize_serviceability();
+
+  virtual GrowableArray<GCMemoryManager*> memory_managers();
+  virtual GrowableArray<MemoryPool*> memory_pools();
+
+  virtual size_t max_capacity() const { return _virtual_space.reserved_size();  }
+  virtual size_t capacity()     const { return _virtual_space.committed_size(); }
+  virtual size_t used()         const { return _space->used(); }
+
+  virtual bool is_in(const void* p) const {
+    return _space->is_in(p);
+  }
+
+  virtual bool is_scavengable(oop obj) {
+    // No GC is going to happen, therefore no objects ever move.
+    return false;
+  }
+
+  virtual bool is_maximal_no_gc() const {
+    // No GC is going to happen. Return "we are at max", when we are about to fail.
+    return used() == capacity();
+  }
+
+  // Allocation
+  HeapWord* allocate_work(size_t size);
+  virtual HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
+  virtual HeapWord* allocate_new_tlab(size_t min_size,
+                                      size_t requested_size,
+                                      size_t* actual_size);
+
+  // TLAB allocation
+  virtual bool supports_tlab_allocation()           const { return true;           }
+  virtual size_t tlab_capacity(Thread* thr)         const { return capacity();     }
+  virtual size_t tlab_used(Thread* thr)             const { return used();         }
+  virtual size_t max_tlab_size()                    const { return _max_tlab_size; }
+  virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
+
+  virtual void collect(GCCause::Cause cause);
+  virtual void do_full_collection(bool clear_all_soft_refs);
+
+  // Heap walking support
+  virtual void safe_object_iterate(ObjectClosure* cl);
+  virtual void object_iterate(ObjectClosure* cl) {
+    safe_object_iterate(cl);
+  }
+
+  // No support for block parsing.
+  virtual HeapWord* block_start(const void* addr) const { return NULL;  }
+  virtual size_t block_size(const HeapWord* addr) const { return 0;     }
+  virtual bool block_is_obj(const HeapWord* addr) const { return false; }
+
+  // No GC threads
+  virtual void print_gc_threads_on(outputStream* st) const {}
+  virtual void gc_threads_do(ThreadClosure* tc) const {}
+
+  // No heap verification
+  virtual void prepare_for_verify() {}
+  virtual void verify(VerifyOption option) {}
+
+  virtual jlong millis_since_last_gc() {
+    // Report time since the VM start
+    return os::elapsed_counter() / NANOSECS_PER_MILLISEC;
+  }
+
+  virtual void print_on(outputStream* st) const;
+  virtual void print_tracing_info() const;
+
+};
+
+#endif // SHARE_VM_GC_EPSILON_COLLECTEDHEAP_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/epsilon/epsilonMemoryPool.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/epsilon/epsilonHeap.hpp"
+#include "gc/epsilon/epsilonMemoryPool.hpp"
+
+EpsilonMemoryPool::EpsilonMemoryPool(EpsilonHeap* heap) :
+        _heap(heap),
+        CollectedMemoryPool("Epsilon Heap",
+                            heap->capacity(),
+                            heap->max_capacity(),
+                            false) {
+  assert(UseEpsilonGC, "sanity");
+}
+
+MemoryUsage EpsilonMemoryPool::get_memory_usage() {
+  size_t initial_sz = initial_size();
+  size_t max_sz     = max_size();
+  size_t used       = used_in_bytes();
+  size_t committed  = committed_in_bytes();
+
+  return MemoryUsage(initial_sz, used, committed, max_sz);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/epsilon/epsilonMemoryPool.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_EPSILON_EPSILONMEMORYPOOL_HPP
+#define SHARE_VM_GC_EPSILON_EPSILONMEMORYPOOL_HPP
+
+#include "gc/epsilon/epsilonHeap.hpp"
+#include "services/memoryPool.hpp"
+#include "services/memoryUsage.hpp"
+#include "utilities/macros.hpp"
+
+class EpsilonMemoryPool : public CollectedMemoryPool {
+private:
+  EpsilonHeap* _heap;
+
+public:
+  EpsilonMemoryPool(EpsilonHeap* heap);
+  size_t committed_in_bytes() { return _heap->capacity();     }
+  size_t used_in_bytes()      { return _heap->used();         }
+  size_t max_size()     const { return _heap->max_capacity(); }
+  MemoryUsage get_memory_usage();
+};
+
+#endif // SHARE_VM_GC_EPSILON_EPSILONMEMORYPOOL_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/epsilon/epsilonMonitoringSupport.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/epsilon/epsilonMonitoringSupport.hpp"
+#include "gc/epsilon/epsilonHeap.hpp"
+#include "gc/shared/generationCounters.hpp"
+#include "memory/allocation.hpp"
+#include "memory/allocation.inline.hpp"
+#include "memory/metaspaceCounters.hpp"
+#include "memory/resourceArea.hpp"
+#include "services/memoryService.hpp"
+
+class EpsilonSpaceCounters: public CHeapObj<mtGC> {
+  friend class VMStructs;
+
+private:
+  PerfVariable* _capacity;
+  PerfVariable* _used;
+  char*         _name_space;
+
+public:
+  EpsilonSpaceCounters(const char* name,
+                 int ordinal,
+                 size_t max_size,
+                 size_t initial_capacity,
+                 GenerationCounters* gc) {
+    if (UsePerfData) {
+      EXCEPTION_MARK;
+      ResourceMark rm;
+
+      const char* cns = PerfDataManager::name_space(gc->name_space(), "space", ordinal);
+
+      _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1, mtGC);
+      strcpy(_name_space, cns);
+
+      const char* cname = PerfDataManager::counter_name(_name_space, "name");
+      PerfDataManager::create_string_constant(SUN_GC, cname, name, CHECK);
+
+      cname = PerfDataManager::counter_name(_name_space, "maxCapacity");
+      PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes, (jlong)max_size, CHECK);
+
+      cname = PerfDataManager::counter_name(_name_space, "capacity");
+      _capacity = PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes, initial_capacity, CHECK);
+
+      cname = PerfDataManager::counter_name(_name_space, "used");
+      _used = PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes, (jlong) 0, CHECK);
+
+      cname = PerfDataManager::counter_name(_name_space, "initCapacity");
+      PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes, initial_capacity, CHECK);
+    }
+  }
+
+  ~EpsilonSpaceCounters() {
+    if (_name_space != NULL) {
+      FREE_C_HEAP_ARRAY(char, _name_space);
+    }
+  }
+
+  inline void update_all(size_t capacity, size_t used) {
+    _capacity->set_value(capacity);
+    _used->set_value(used);
+  }
+};
+
+class EpsilonGenerationCounters : public GenerationCounters {
+private:
+  EpsilonHeap* _heap;
+public:
+  EpsilonGenerationCounters(EpsilonHeap* heap) :
+          GenerationCounters("Heap", 1, 1, 0, heap->max_capacity(), heap->capacity()),
+          _heap(heap)
+  {};
+
+  virtual void update_all() {
+    _current_size->set_value(_heap->capacity());
+  }
+};
+
+EpsilonMonitoringSupport::EpsilonMonitoringSupport(EpsilonHeap* heap) {
+  _heap_counters  = new EpsilonGenerationCounters(heap);
+  _space_counters = new EpsilonSpaceCounters("Heap", 0, heap->max_capacity(), 0, _heap_counters);
+}
+
+void EpsilonMonitoringSupport::update_counters() {
+  MemoryService::track_memory_usage();
+
+  if (UsePerfData) {
+    EpsilonHeap* heap = EpsilonHeap::heap();
+    size_t used = heap->used();
+    size_t capacity = heap->capacity();
+    _heap_counters->update_all();
+    _space_counters->update_all(capacity, used);
+    MetaspaceCounters::update_performance_counters();
+    CompressedClassSpaceCounters::update_performance_counters();
+  }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/epsilon/epsilonMonitoringSupport.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_EPSILON_EPSILONMONITORINGSUPPORT_HPP
+#define SHARE_VM_GC_EPSILON_EPSILONMONITORINGSUPPORT_HPP
+
+#include "memory/allocation.hpp"
+
+class GenerationCounters;
+class EpsilonSpaceCounters;
+class EpsilonHeap;
+
+class EpsilonMonitoringSupport : public CHeapObj<mtGC> {
+private:
+  GenerationCounters*   _heap_counters;
+  EpsilonSpaceCounters* _space_counters;
+
+public:
+  EpsilonMonitoringSupport(EpsilonHeap* heap);
+  void update_counters();
+};
+
+#endif // SHARE_VM_GC_EPSILON_EPSILONMONITORINGSUPPORT_HPP
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/epsilon/epsilonThreadLocalData.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_EPSILON_EPSILONTHREADLOCALDATA_HPP
+#define SHARE_VM_GC_EPSILON_EPSILONTHREADLOCALDATA_HPP
+
+#include "runtime/thread.hpp"
+#include "utilities/debug.hpp"
+
+class EpsilonThreadLocalData {
+private:
+  size_t _ergo_tlab_size;
+  int64_t _last_tlab_time;
+
+  EpsilonThreadLocalData() :
+          _ergo_tlab_size(0),
+          _last_tlab_time(0) {}
+
+  static EpsilonThreadLocalData* data(Thread* thread) {
+    assert(UseEpsilonGC, "Sanity");
+    return thread->gc_data<EpsilonThreadLocalData>();
+  }
+
+public:
+  static void create(Thread* thread) {
+    new (data(thread)) EpsilonThreadLocalData();
+  }
+
+  static void destroy(Thread* thread) {
+    data(thread)->~EpsilonThreadLocalData();
+  }
+
+  static size_t ergo_tlab_size(Thread *thread) {
+    return data(thread)->_ergo_tlab_size;
+  }
+
+  static int64_t last_tlab_time(Thread *thread) {
+    return data(thread)->_last_tlab_time;
+  }
+
+  static void set_ergo_tlab_size(Thread *thread, size_t val) {
+    data(thread)->_ergo_tlab_size = val;
+  }
+
+  static void set_last_tlab_time(Thread *thread, int64_t time) {
+    data(thread)->_last_tlab_time = time;
+  }
+};
+
+#endif // SHARE_VM_GC_EPSILON_EPSILONTHREADLOCALDATA_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/epsilon/epsilon_globals.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_EPSILON_GLOBALS_HPP
+#define SHARE_VM_GC_EPSILON_GLOBALS_HPP
+
+#include "runtime/globals.hpp"
+//
+// Defines all globals flags used by the Epsilon GC.
+//
+
+#define GC_EPSILON_FLAGS(develop,                                           \
+                    develop_pd,                                             \
+                    product,                                                \
+                    product_pd,                                             \
+                    diagnostic,                                             \
+                    diagnostic_pd,                                          \
+                    experimental,                                           \
+                    notproduct,                                             \
+                    manageable,                                             \
+                    product_rw,                                             \
+                    lp64_product,                                           \
+                    range,                                                  \
+                    constraint,                                             \
+                    writeable)                                              \
+                                                                            \
+  experimental(size_t, EpsilonPrintHeapSteps, 20,                           \
+          "Print heap occupancy stats with this number of steps. "          \
+          "0 turns the printing off.")                                      \
+          range(0, max_intx)                                                \
+                                                                            \
+  experimental(size_t, EpsilonUpdateCountersStep, 1 * M,                    \
+          "Update heap occupancy counters after allocating this much "      \
+          "memory. Higher values would make allocations faster at "         \
+          "the expense of lower resolution in heap counters.")              \
+          range(1, max_intx)                                                \
+                                                                            \
+  experimental(size_t, EpsilonMaxTLABSize, 4 * M,                           \
+          "Max TLAB size to use with Epsilon GC. Larger value improves "    \
+          "performance at the expense of per-thread memory waste. This "    \
+          "asks TLAB machinery to cap TLAB sizes at this value.")           \
+          range(1, max_intx)                                                \
+                                                                            \
+  experimental(bool, EpsilonElasticTLAB, true,                              \
+          "Use elastic policy to manage TLAB sizes. This conserves memory " \
+          "for non-actively allocating threads, even when they request "    \
+          "large TLABs for themselves. Active threads would experience "    \
+          "smaller TLABs until policy catches up.")                         \
+                                                                            \
+  experimental(bool, EpsilonElasticTLABDecay, true,                         \
+          "Use timed decays to shrik TLAB sizes. This conserves memory "    \
+          "for the threads that allocate in bursts of different sizes, "    \
+          "for example the small/rare allocations coming after the initial "\
+          "large burst.")                                                   \
+                                                                            \
+  experimental(double, EpsilonTLABElasticity, 1.1,                          \
+          "Multiplier to use when deciding on next TLAB size. Larger value "\
+          "improves performance at the expense of per-thread memory waste. "\
+          "Lower value improves memory footprint, but penalizes actively "  \
+          "allocating threads.")                                            \
+          range(1, max_intx)                                                \
+                                                                            \
+  experimental(size_t, EpsilonTLABDecayTime, 1000,                          \
+          "TLAB sizing policy decays to initial size after thread had not " \
+          "allocated for this long. Time is in milliseconds. Lower value "  \
+          "improves memory footprint, but penalizes actively allocating "   \
+          "threads.")                                                       \
+          range(1, max_intx)                                                \
+                                                                            \
+  experimental(size_t, EpsilonMinHeapExpand, 128 * M,                       \
+          "Min expansion step for heap. Larger value improves performance " \
+          "at the potential expense of memory waste.")                      \
+          range(1, max_intx)
+
+#endif // SHARE_VM_GC_EPSILON_GLOBALS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/epsilon/vmStructs_epsilon.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2018, Red Hat Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_EPSILON_VMSTRUCTS_HPP
+#define SHARE_GC_EPSILON_VMSTRUCTS_HPP
+
+#include "gc/epsilon/epsilonHeap.hpp"
+#include "gc/shared/space.hpp"
+#include "memory/virtualspace.hpp"
+
+#define VM_STRUCTS_EPSILONGC(nonstatic_field,                       \
+                            volatile_nonstatic_field,               \
+                            static_field)                           \
+  nonstatic_field(EpsilonHeap, _virtual_space, VirtualSpace)        \
+  nonstatic_field(EpsilonHeap, _space, ContiguousSpace*)
+
+#define VM_TYPES_EPSILONGC(declare_type,                            \
+                          declare_toplevel_type,                    \
+                          declare_integer_type)                     \
+  declare_type(EpsilonHeap, CollectedHeap)
+
+#define VM_INT_CONSTANTS_EPSILONGC(declare_constant,                \
+                                  declare_constant_with_value)
+
+#endif // SHARE_GC_EPSILON_VMSTRUCTS_HPP
--- a/src/hotspot/share/gc/parallel/pcTasks.cpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/share/gc/parallel/pcTasks.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -149,19 +149,16 @@
 void RefProcTaskExecutor::execute(ProcessTask& task)
 {
   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-  uint parallel_gc_threads = heap->gc_task_manager()->workers();
   uint active_gc_threads = heap->gc_task_manager()->active_workers();
   OopTaskQueueSet* qset = ParCompactionManager::stack_array();
   ParallelTaskTerminator terminator(active_gc_threads, qset);
   GCTaskQueue* q = GCTaskQueue::create();
-  for(uint i=0; i<parallel_gc_threads; i++) {
+  for(uint i=0; i<active_gc_threads; i++) {
     q->enqueue(new RefProcTaskProxy(task, i));
   }
-  if (task.marks_oops_alive()) {
-    if (parallel_gc_threads>1) {
-      for (uint j=0; j<active_gc_threads; j++) {
-        q->enqueue(new StealMarkingTask(&terminator));
-      }
+  if (task.marks_oops_alive() && (active_gc_threads>1)) {
+    for (uint j=0; j<active_gc_threads; j++) {
+      q->enqueue(new StealMarkingTask(&terminator));
     }
   }
   PSParallelCompact::gc_task_manager()->execute_and_wait(q);
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -2112,7 +2112,10 @@
 
     ReferenceProcessorStats stats;
     ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->max_num_queues());
+
     if (ref_processor()->processing_is_mt()) {
+      ref_processor()->set_active_mt_degree(active_gc_threads);
+
       RefProcTaskExecutor task_executor;
       stats = ref_processor()->process_discovered_references(
         is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
--- a/src/hotspot/share/gc/shared/barrierSet.hpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/share/gc/shared/barrierSet.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -103,17 +103,17 @@
   ~BarrierSet() { }
 
   template <class BarrierSetAssemblerT>
-  BarrierSetAssembler* make_barrier_set_assembler() {
+  static BarrierSetAssembler* make_barrier_set_assembler() {
     return NOT_ZERO(new BarrierSetAssemblerT()) ZERO_ONLY(NULL);
   }
 
   template <class BarrierSetC1T>
-  BarrierSetC1* make_barrier_set_c1() {
+  static BarrierSetC1* make_barrier_set_c1() {
     return COMPILER1_PRESENT(new BarrierSetC1T()) NOT_COMPILER1(NULL);
   }
 
   template <class BarrierSetC2T>
-  BarrierSetC2* make_barrier_set_c2() {
+  static BarrierSetC2* make_barrier_set_c2() {
     return COMPILER2_PRESENT(new BarrierSetC2T()) NOT_COMPILER2(NULL);
   }
 
--- a/src/hotspot/share/gc/shared/barrierSetConfig.hpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/share/gc/shared/barrierSetConfig.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -30,7 +30,9 @@
 // Do something for each concrete barrier set part of the build.
 #define FOR_EACH_CONCRETE_BARRIER_SET_DO(f)          \
   f(CardTableBarrierSet)                             \
-  G1GC_ONLY(f(G1BarrierSet))
+  EPSILONGC_ONLY(f(EpsilonBarrierSet))               \
+  G1GC_ONLY(f(G1BarrierSet))                         \
+  ZGC_ONLY(f(ZBarrierSet))
 
 #define FOR_EACH_ABSTRACT_BARRIER_SET_DO(f)          \
   f(ModRef)
--- a/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -30,8 +30,14 @@
 #include "gc/shared/modRefBarrierSet.inline.hpp"
 #include "gc/shared/cardTableBarrierSet.inline.hpp"
 
+#if INCLUDE_EPSILONGC
+#include "gc/epsilon/epsilonBarrierSet.hpp"
+#endif
 #if INCLUDE_G1GC
-#include "gc/g1/g1BarrierSet.inline.hpp" // G1 support
+#include "gc/g1/g1BarrierSet.inline.hpp"
+#endif
+#if INCLUDE_ZGC
+#include "gc/z/zBarrierSet.inline.hpp"
 #endif
 
 #endif // SHARE_VM_GC_SHARED_BARRIERSETCONFIG_INLINE_HPP
--- a/src/hotspot/share/gc/shared/collectedHeap.hpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/share/gc/shared/collectedHeap.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -89,6 +89,7 @@
 //     CMSHeap
 //   G1CollectedHeap
 //   ParallelScavengeHeap
+//   ZCollectedHeap
 //
 class CollectedHeap : public CHeapObj<mtInternal> {
   friend class VMStructs;
@@ -206,7 +207,9 @@
     Serial,
     Parallel,
     CMS,
-    G1
+    G1,
+    Epsilon,
+    Z
   };
 
   static inline size_t filler_array_max_size() {
--- a/src/hotspot/share/gc/shared/gcCause.cpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/share/gc/shared/gcCause.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -105,6 +105,21 @@
     case _dcmd_gc_run:
       return "Diagnostic Command";
 
+    case _z_timer:
+      return "Timer";
+
+    case _z_warmup:
+      return "Warmup";
+
+    case _z_allocation_rate:
+      return "Allocation Rate";
+
+    case _z_allocation_stall:
+      return "Allocation Stall";
+
+    case _z_proactive:
+      return "Proactive";
+
     case _last_gc_cause:
       return "ILLEGAL VALUE - last gc cause - ILLEGAL VALUE";
 
--- a/src/hotspot/share/gc/shared/gcCause.hpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/share/gc/shared/gcCause.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -78,6 +78,12 @@
 
     _dcmd_gc_run,
 
+    _z_timer,
+    _z_warmup,
+    _z_allocation_rate,
+    _z_allocation_stall,
+    _z_proactive,
+
     _last_gc_cause
   };
 
--- a/src/hotspot/share/gc/shared/gcConfig.cpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/share/gc/shared/gcConfig.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -31,6 +31,9 @@
 #if INCLUDE_CMSGC
 #include "gc/cms/cmsArguments.hpp"
 #endif
+#if INCLUDE_EPSILONGC
+#include "gc/epsilon/epsilonArguments.hpp"
+#endif
 #if INCLUDE_G1GC
 #include "gc/g1/g1Arguments.hpp"
 #endif
@@ -40,6 +43,9 @@
 #if INCLUDE_SERIALGC
 #include "gc/serial/serialArguments.hpp"
 #endif
+#if INCLUDE_ZGC
+#include "gc/z/zArguments.hpp"
+#endif
 
 struct SupportedGC {
   bool&               _flag;
@@ -52,18 +58,22 @@
 };
 
      CMSGC_ONLY(static CMSArguments      cmsArguments;)
+ EPSILONGC_ONLY(static EpsilonArguments  epsilonArguments;)
       G1GC_ONLY(static G1Arguments       g1Arguments;)
 PARALLELGC_ONLY(static ParallelArguments parallelArguments;)
   SERIALGC_ONLY(static SerialArguments   serialArguments;)
+       ZGC_ONLY(static ZArguments        zArguments;)
 
 // Table of supported GCs, for translating between command
 // line flag, CollectedHeap::Name and GCArguments instance.
 static const SupportedGC SupportedGCs[] = {
        CMSGC_ONLY_ARG(SupportedGC(UseConcMarkSweepGC, CollectedHeap::CMS,      cmsArguments,      "concurrent mark sweep gc"))
+   EPSILONGC_ONLY_ARG(SupportedGC(UseEpsilonGC,       CollectedHeap::Epsilon,  epsilonArguments,  "epsilon gc"))
         G1GC_ONLY_ARG(SupportedGC(UseG1GC,            CollectedHeap::G1,       g1Arguments,       "g1 gc"))
   PARALLELGC_ONLY_ARG(SupportedGC(UseParallelGC,      CollectedHeap::Parallel, parallelArguments, "parallel gc"))
   PARALLELGC_ONLY_ARG(SupportedGC(UseParallelOldGC,   CollectedHeap::Parallel, parallelArguments, "parallel gc"))
     SERIALGC_ONLY_ARG(SupportedGC(UseSerialGC,        CollectedHeap::Serial,   serialArguments,   "serial gc"))
+         ZGC_ONLY_ARG(SupportedGC(UseZGC,             CollectedHeap::Z,        zArguments,        "z gc"))
 };
 
 #define FOR_EACH_SUPPORTED_GC(var) \
@@ -88,10 +98,12 @@
   }
 
   NOT_CMSGC(     UNSUPPORTED_OPTION(UseConcMarkSweepGC));
+  NOT_EPSILONGC( UNSUPPORTED_OPTION(UseEpsilonGC);)
   NOT_G1GC(      UNSUPPORTED_OPTION(UseG1GC);)
   NOT_PARALLELGC(UNSUPPORTED_OPTION(UseParallelGC);)
   NOT_PARALLELGC(UNSUPPORTED_OPTION(UseParallelOldGC));
   NOT_SERIALGC(  UNSUPPORTED_OPTION(UseSerialGC);)
+  NOT_ZGC(       UNSUPPORTED_OPTION(UseZGC);)
 }
 
 bool GCConfig::is_no_gc_selected() {
--- a/src/hotspot/share/gc/shared/gcConfiguration.cpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/share/gc/shared/gcConfiguration.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -43,6 +43,10 @@
     return ParNew;
   }
 
+  if (UseZGC) {
+    return NA;
+  }
+
   return DefNew;
 }
 
@@ -59,6 +63,10 @@
     return ParallelOld;
   }
 
+  if (UseZGC) {
+    return Z;
+  }
+
   return SerialOld;
 }
 
--- a/src/hotspot/share/gc/shared/gcName.hpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/share/gc/shared/gcName.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -38,6 +38,8 @@
   ConcurrentMarkSweep,
   G1Old,
   G1Full,
+  Z,
+  NA,
   GCNameEndSentinel
 };
 
@@ -55,6 +57,8 @@
       case ConcurrentMarkSweep: return "ConcurrentMarkSweep";
       case G1Old: return "G1Old";
       case G1Full: return "G1Full";
+      case Z: return "Z";
+      case NA: return "N/A";
       default: ShouldNotReachHere(); return NULL;
     }
   }
--- a/src/hotspot/share/gc/shared/gcThreadLocalData.hpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/share/gc/shared/gcThreadLocalData.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -40,6 +40,6 @@
 // should consider placing frequently accessed fields first in
 // T, so that field offsets relative to Thread are small, which
 // often allows for a more compact instruction encoding.
-typedef uint64_t GCThreadLocalData[14]; // 112 bytes
+typedef uint64_t GCThreadLocalData[18]; // 144 bytes
 
 #endif // SHARE_GC_SHARED_GCTHREADLOCALDATA_HPP
--- a/src/hotspot/share/gc/shared/gc_globals.hpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/share/gc/shared/gc_globals.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -29,6 +29,9 @@
 #if INCLUDE_CMSGC
 #include "gc/cms/cms_globals.hpp"
 #endif
+#if INCLUDE_EPSILONGC
+#include "gc/epsilon/epsilon_globals.hpp"
+#endif
 #if INCLUDE_G1GC
 #include "gc/g1/g1_globals.hpp"
 #endif
@@ -38,6 +41,9 @@
 #if INCLUDE_SERIALGC
 #include "gc/serial/serial_globals.hpp"
 #endif
+#if INCLUDE_ZGC
+#include "gc/z/z_globals.hpp"
+#endif
 
 #define GC_FLAGS(develop,                                                   \
                  develop_pd,                                                \
@@ -70,6 +76,22 @@
     constraint,                                                             \
     writeable))                                                             \
                                                                             \
+  EPSILONGC_ONLY(GC_EPSILON_FLAGS(                                          \
+    develop,                                                                \
+    develop_pd,                                                             \
+    product,                                                                \
+    product_pd,                                                             \
+    diagnostic,                                                             \
+    diagnostic_pd,                                                          \
+    experimental,                                                           \
+    notproduct,                                                             \
+    manageable,                                                             \
+    product_rw,                                                             \
+    lp64_product,                                                           \
+    range,                                                                  \
+    constraint,                                                             \
+    writeable))                                                             \
+                                                                            \
   G1GC_ONLY(GC_G1_FLAGS(                                                    \
     develop,                                                                \
     develop_pd,                                                             \
@@ -118,6 +140,22 @@
     constraint,                                                             \
     writeable))                                                             \
                                                                             \
+  ZGC_ONLY(GC_Z_FLAGS(                                                      \
+    develop,                                                                \
+    develop_pd,                                                             \
+    product,                                                                \
+    product_pd,                                                             \
+    diagnostic,                                                             \
+    diagnostic_pd,                                                          \
+    experimental,                                                           \
+    notproduct,                                                             \
+    manageable,                                                             \
+    product_rw,                                                             \
+    lp64_product,                                                           \
+    range,                                                                  \
+    constraint,                                                             \
+    writeable))                                                             \
+                                                                            \
   /* gc */                                                                  \
                                                                             \
   product(bool, UseConcMarkSweepGC, false,                                  \
@@ -135,6 +173,12 @@
   product(bool, UseParallelOldGC, false,                                    \
           "Use the Parallel Old garbage collector")                         \
                                                                             \
+  experimental(bool, UseEpsilonGC, false,                                   \
+          "Use the Epsilon (no-op) garbage collector")                      \
+                                                                            \
+  experimental(bool, UseZGC, false,                                         \
+          "Use the Z garbage collector")                                    \
+                                                                            \
   product(uint, ParallelGCThreads, 0,                                       \
           "Number of parallel threads parallel gc will use")                \
           constraint(ParallelGCThreadsConstraintFunc,AfterErgo)             \
--- a/src/hotspot/share/gc/shared/oopStorage.cpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/share/gc/shared/oopStorage.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -52,9 +52,7 @@
   assert(_next == NULL, "deleting attached block");
 }
 
-OopStorage::AllocateList::AllocateList(const AllocateEntry& (*get_entry)(const Block& block)) :
-  _head(NULL), _tail(NULL), _get_entry(get_entry)
-{}
+OopStorage::AllocateList::AllocateList() : _head(NULL), _tail(NULL) {}
 
 OopStorage::AllocateList::~AllocateList() {
   // ~OopStorage() empties its lists before destroying them.
@@ -68,8 +66,8 @@
     assert(_tail == NULL, "invariant");
     _head = _tail = &block;
   } else {
-    _get_entry(block)._next = old;
-    _get_entry(*old)._prev = &block;
+    block.allocate_entry()._next = old;
+    old->allocate_entry()._prev = &block;
     _head = &block;
   }
 }
@@ -80,14 +78,14 @@
     assert(_head == NULL, "invariant");
     _head = _tail = &block;
   } else {
-    _get_entry(*old)._next = &block;
-    _get_entry(block)._prev = old;
+    old->allocate_entry()._next = &block;
+    block.allocate_entry()._prev = old;
     _tail = &block;
   }
 }
 
 void OopStorage::AllocateList::unlink(const Block& block) {
-  const AllocateEntry& block_entry = _get_entry(block);
+  const AllocateEntry& block_entry = block.allocate_entry();
   const Block* prev_blk = block_entry._prev;
   const Block* next_blk = block_entry._next;
   block_entry._prev = NULL;
@@ -98,15 +96,15 @@
     _head = _tail = NULL;
   } else if (prev_blk == NULL) {
     assert(_head == &block, "invariant");
-    _get_entry(*next_blk)._prev = NULL;
+    next_blk->allocate_entry()._prev = NULL;
     _head = next_blk;
   } else if (next_blk == NULL) {
     assert(_tail == &block, "invariant");
-    _get_entry(*prev_blk)._next = NULL;
+    prev_blk->allocate_entry()._next = NULL;
     _tail = prev_blk;
   } else {
-    _get_entry(*next_blk)._prev = prev_blk;
-    _get_entry(*prev_blk)._next = next_blk;
+    next_blk->allocate_entry()._prev = prev_blk;
+    prev_blk->allocate_entry()._next = next_blk;
   }
 }
 
@@ -232,10 +230,6 @@
   const_cast<OopStorage* volatile&>(_owner) = NULL;
 }
 
-const OopStorage::AllocateEntry& OopStorage::Block::get_allocate_entry(const Block& block) {
-  return block._allocate_entry;
-}
-
 size_t OopStorage::Block::allocation_size() {
   // _data must be first member, so aligning Block aligns _data.
   STATIC_ASSERT(_data_pos == 0);
@@ -769,7 +763,7 @@
                        Mutex* active_mutex) :
   _name(dup_name(name)),
   _active_array(ActiveArray::create(initial_active_array_size)),
-  _allocate_list(&Block::get_allocate_entry),
+  _allocate_list(),
   _deferred_updates(NULL),
   _allocate_mutex(allocate_mutex),
   _active_mutex(active_mutex),
--- a/src/hotspot/share/gc/shared/oopStorage.hpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/share/gc/shared/oopStorage.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -178,14 +178,13 @@
   class AllocateList {
     const Block* _head;
     const Block* _tail;
-    const AllocateEntry& (*_get_entry)(const Block& block);
 
     // Noncopyable.
     AllocateList(const AllocateList&);
     AllocateList& operator=(const AllocateList&);
 
   public:
-    AllocateList(const AllocateEntry& (*get_entry)(const Block& block));
+    AllocateList();
     ~AllocateList();
 
     Block* head();
--- a/src/hotspot/share/gc/shared/oopStorage.inline.hpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/share/gc/shared/oopStorage.inline.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -158,7 +158,7 @@
   Block& operator=(const Block&);
 
 public:
-  static const AllocateEntry& get_allocate_entry(const Block& block);
+  const AllocateEntry& allocate_entry() const;
 
   static size_t allocation_size();
   static size_t allocation_alignment_shift();
@@ -214,19 +214,19 @@
 }
 
 inline OopStorage::Block* OopStorage::AllocateList::prev(Block& block) {
-  return const_cast<Block*>(_get_entry(block)._prev);
+  return const_cast<Block*>(block.allocate_entry()._prev);
 }
 
 inline OopStorage::Block* OopStorage::AllocateList::next(Block& block) {
-  return const_cast<Block*>(_get_entry(block)._next);
+  return const_cast<Block*>(block.allocate_entry()._next);
 }
 
 inline const OopStorage::Block* OopStorage::AllocateList::prev(const Block& block) const {
-  return _get_entry(block)._prev;
+  return block.allocate_entry()._prev;
 }
 
 inline const OopStorage::Block* OopStorage::AllocateList::next(const Block& block) const {
-  return _get_entry(block)._next;
+  return block.allocate_entry()._next;
 }
 
 template<typename Closure>
@@ -296,7 +296,11 @@
   return SkipNullFn<F>(f);
 }
 
-// Inline Block accesses for use in iteration inner loop.
+// Inline Block accesses for use in iteration loops.
+
+inline const OopStorage::AllocateEntry& OopStorage::Block::allocate_entry() const {
+  return _allocate_entry;
+}
 
 inline void OopStorage::Block::check_index(unsigned index) const {
   assert(index < ARRAY_SIZE(_data), "Index out of bounds: %u", index);
--- a/src/hotspot/share/gc/shared/specialized_oop_closures.hpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/share/gc/shared/specialized_oop_closures.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -35,6 +35,9 @@
 #if INCLUDE_SERIALGC
 #include "gc/serial/serial_specialized_oop_closures.hpp"
 #endif
+#if INCLUDE_ZGC
+#include "gc/z/z_specialized_oop_closures.hpp"
+#endif
 
 // The following OopClosure types get specialized versions of
 // "oop_oop_iterate" that invoke the closures' do_oop methods
@@ -67,7 +70,8 @@
   SERIALGC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_MS(f))       \
      CMSGC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_CMS(f))      \
       G1GC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1(f))       \
-      G1GC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1FULL(f))
+      G1GC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1FULL(f))   \
+       ZGC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_Z(f))
 
 // We separate these out, because sometime the general one has
 // a different definition from the specialized ones, and sometimes it
--- a/src/hotspot/share/gc/shared/vmStructs_gc.hpp	Tue Jun 12 14:53:57 2018 -0700
+++ b/src/hotspot/share/gc/shared/vmStructs_gc.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -37,6 +37,9 @@
 #if INCLUDE_CMSGC
 #include "gc/cms/vmStructs_cms.hpp"
 #endif
+#if INCLUDE_EPSILONGC
+#include "gc/epsilon/vmStructs_epsilon.hpp"
+#endif
 #if INCLUDE_G1GC
 #include "gc/g1/vmStructs_g1.hpp"
 #endif
@@ -47,6 +50,9 @@
 #include "gc/serial/defNewGeneration.hpp"
 #include "gc/serial/vmStructs_serial.hpp"
 #endif
+#if INCLUDE_ZGC
+#include "gc/z/vmStructs_z.hpp"
+#endif
 
 #define VM_STRUCTS_GC(nonstatic_field,                                                                                               \
                       volatile_nonstatic_field,                                                                                      \
@@ -55,6 +61,9 @@
   CMSGC_ONLY(VM_STRUCTS_CMSGC(nonstatic_field,                                                                                       \
                               volatile_nonstatic_field,                                                                              \
                               static_field))                                                                                         \
+  EPSILONGC_ONLY(VM_STRUCTS_EPSILONGC(nonstatic_field,                                                                               \
+                                      volatile_nonstatic_field,                                                                      \
+                                      static_field))                                                                                 \
   G1GC_ONLY(VM_STRUCTS_G1GC(nonstatic_field,                                                                                         \
                             volatile_nonstatic_field,                                                                                \
                             static_field))                                                                                           \
@@ -64,6 +73,10 @@
   SERIALGC_ONLY(VM_STRUCTS_SERIALGC(nonstatic_field,                                                                                 \
                                     volatile_nonstatic_field,                                                                        \
                                     static_field))                                                                                   \
+  ZGC_ONLY(VM_STRUCTS_ZGC(nonstatic_field,                                                                                           \
+                          volatile_nonstatic_field,                                                                                  \
+                          static_field))                                                                                             \
+                                                                                                                                     \
   /**********************************************************************************/                                               \
   /* Generation and Space hierarchies                                               */                                               \
   /**********************************************************************************/                                               \
@@ -153,6 +166,9 @@
   CMSGC_ONLY(VM_TYPES_CMSGC(declare_type,                                 \
                             declare_toplevel_type,                        \
                             declare_integer_type))                        \
+  EPSILONGC_ONLY(VM_TYPES_EPSILONGC(declare_type,                         \
+                                    declare_toplevel_type,                \
+                                    declare_integer_type))                \
   G1GC_ONLY(VM_TYPES_G1GC(declare_type,                                   \
                           declare_toplevel_type,                          \
                           declare_integer_type))                          \
@@ -162,6 +178,10 @@
   SERIALGC_ONLY(VM_TYPES_SERIALGC(declare_type,                           \
                                   declare_toplevel_type,                  \
                                   declare_integer_type))                  \
+  ZGC_ONLY(VM_TYPES_ZGC(declare_type,                                     \
+                        declare_toplevel_type,                            \
+                        declare_integer_type))                            \
+                                                                          \
   /******************************************/                            \
   /* Generation and space hierarchies       */                            \
   /* (needed for run-time type information) */                            \
@@ -225,12 +245,16 @@
                             declare_constant_with_value)                    \
   CMSGC_ONLY(VM_INT_CONSTANTS_CMSGC(declare_constant,                       \
                                     declare_constant_with_value))           \
+  EPSILONGC_ONLY(VM_INT_CONSTANTS_EPSILONGC(declare_constant,               \
+                                            declare_constant_with_value))   \
   G1GC_ONLY(VM_INT_CONSTANTS_G1GC(declare_constant,                         \
                                   declare_constant_with_value))             \
   PARALLELGC_ONLY(VM_INT_CONSTANTS_PARALLELGC(declare_constant,             \
                                               declare_constant_with_value)) \
   SERIALGC_ONLY(VM_INT_CONSTANTS_SERIALGC(declare_constant,                 \
                                           declare_constant_with_value))     \
+  ZGC_ONLY(VM_INT_CONSTANTS_ZGC(declare_constant,                           \
+                                declare_constant_with_value))               \
                                                                             \
   /********************************************/                            \
   /* Generation and Space Hierarchy Constants */                            \
@@ -274,5 +298,7 @@
   declare_constant(Generation::LogOfGenGrain)                               \
   declare_constant(Generation::GenGrain)                                    \
 
+#define VM_LONG_CONSTANTS_GC(declare_constant)                              \
+  ZGC_ONLY(VM_LONG_CONSTANTS_ZGC(declare_constant))
 
 #endif // SHARE_GC_SHARED_VMSTRUCTS_GC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,247 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "c1/c1_LIR.hpp"
+#include "c1/c1_LIRGenerator.hpp"
+#include "c1/c1_CodeStubs.hpp"
+#include "gc/z/c1/zBarrierSetC1.hpp"
+#include "gc/z/zBarrierSet.hpp"
+#include "gc/z/zBarrierSetAssembler.hpp"
+#include "gc/z/zThreadLocalData.hpp"
+#include "utilities/macros.hpp"
+
+ZLoadBarrierStubC1::ZLoadBarrierStubC1(LIRAccess& access, LIR_Opr ref, address runtime_stub) :
+    _decorators(access.decorators()),
+    _ref_addr(access.resolved_addr()),
+    _ref(ref),
+    _tmp(LIR_OprFact::illegalOpr),
+    _patch_info(access.patch_emit_info()),
+    _runtime_stub(runtime_stub) {
+
+  // Allocate tmp register if needed
+  if (!_ref_addr->is_register()) {
+    assert(_ref_addr->is_address(), "Must be an address");
+    if (_ref_addr->as_address_ptr()->index()->is_valid() ||
+        _ref_addr->as_address_ptr()->disp() != 0) {
+      // Has index or displacement, need tmp register to load address into
+      _tmp = access.gen()->new_pointer_register();
+    } else {
+      // No index or displacement, address available in base register
+      _ref_addr = _ref_addr->as_address_ptr()->base();
+    }
+  }
+
+  assert(_ref->is_register(), "Must be a register");
+  assert(_ref_addr->is_register() != _tmp->is_register(), "Only one should be a register");
+}
+
+DecoratorSet ZLoadBarrierStubC1::decorators() const {
+  return _decorators;
+}
+
+LIR_Opr ZLoadBarrierStubC1::ref() const {
+  return _ref;
+}
+
+LIR_Opr ZLoadBarrierStubC1::ref_addr() const {
+  return _ref_addr;
+}
+
+LIR_Opr ZLoadBarrierStubC1::tmp() const {
+  return _tmp;
+}
+
+LIR_PatchCode ZLoadBarrierStubC1::patch_code() const {
+  return (_decorators & C1_NEEDS_PATCHING) != 0 ? lir_patch_normal : lir_patch_none;
+}
+
+CodeEmitInfo*& ZLoadBarrierStubC1::patch_info() {
+  return _patch_info;
+}
+
+address ZLoadBarrierStubC1::runtime_stub() const {
+  return _runtime_stub;
+}
+
+void ZLoadBarrierStubC1::visit(LIR_OpVisitState* visitor) {
+  if (_patch_info != NULL) {
+    visitor->do_slow_case(_patch_info);
+  } else {
+    visitor->do_slow_case();
+  }
+
+  visitor->do_input(_ref_addr);
+  visitor->do_output(_ref);
+
+  if (_tmp->is_valid()) {
+    visitor->do_temp(_tmp);
+  }
+}
+
+void ZLoadBarrierStubC1::emit_code(LIR_Assembler* ce) {
+  ZBarrierSet::assembler()->generate_c1_load_barrier_stub(ce, this);
+}
+
+#ifndef PRODUCT
+void ZLoadBarrierStubC1::print_name(outputStream* out) const {
+  out->print("ZLoadBarrierStubC1");
+}
+#endif // PRODUCT
+
+class LIR_OpZLoadBarrierTest : public LIR_Op {
+private:
+  LIR_Opr _opr;
+
+public:
+  LIR_OpZLoadBarrierTest(LIR_Opr opr) :
+      LIR_Op(),
+      _opr(opr) {}
+
+  virtual void visit(LIR_OpVisitState* state) {
+    state->do_input(_opr);
+  }
+
+  virtual void emit_code(LIR_Assembler* ce) {
+    ZBarrierSet::assembler()->generate_c1_load_barrier_test(ce, _opr);
+  }
+
+  virtual void print_instr(outputStream* out) const {
+    _opr->print(out);
+    out->print(" ");
+  }
+
+#ifndef PRODUCT
+  virtual const char* name() const {
+    return "lir_z_load_barrier_test";
+  }
+#endif // PRODUCT
+};
+
+static bool barrier_needed(LIRAccess& access) {
+  return ZBarrierSet::barrier_needed(access.decorators(), access.type());
+}
+
+ZBarrierSetC1::ZBarrierSetC1() :
+    _load_barrier_on_oop_field_preloaded_runtime_stub(NULL),
+    _load_barrier_on_weak_oop_field_preloaded_runtime_stub(NULL) {}
+
+address ZBarrierSetC1::load_barrier_on_oop_field_preloaded_runtime_stub(DecoratorSet decorators) const {
+  assert((decorators & ON_PHANTOM_OOP_REF) == 0, "Unsupported decorator");
+  //assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Unsupported decorator");
+
+  if ((decorators & ON_WEAK_OOP_REF) != 0) {
+    return _load_barrier_on_weak_oop_field_preloaded_runtime_stub;
+  } else {
+    return _load_barrier_on_oop_field_preloaded_runtime_stub;
+  }
+}
+
+#ifdef ASSERT
+#define __ access.gen()->lir(__FILE__, __LINE__)->
+#else
+#define __ access.gen()->lir()->
+#endif
+
+void ZBarrierSetC1::load_barrier(LIRAccess& access, LIR_Opr result) const {
+  // Fast path
+  __ append(new LIR_OpZLoadBarrierTest(result));
+
+  // Slow path
+  const address runtime_stub = load_barrier_on_oop_field_preloaded_runtime_stub(access.decorators());
+  CodeStub* const stub = new ZLoadBarrierStubC1(access, result, runtime_stub);
+  __ branch(lir_cond_notEqual, T_ADDRESS, stub);
+  __ branch_destination(stub->continuation());
+}
+
+#undef __
+
+void ZBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
+  BarrierSetC1::load_at_resolved(access, result);
+
+  if (barrier_needed(access)) {
+    load_barrier(access, result);
+  }
+}
+
+static void pre_load_barrier(LIRAccess& access) {
+  DecoratorSet decorators = access.decorators();
+
+  // Downgrade access to MO_UNORDERED
+  decorators = (decorators & ~MO_DECORATOR_MASK) | MO_UNORDERED;
+
+  // Remove C1_WRITE_ACCESS
+  decorators = (decorators & ~C1_WRITE_ACCESS);
+
+  // Generate synthetic load at
+  access.gen()->access_load_at(decorators,
+                               access.type(),
+                               access.base().item(),
+                               access.offset().opr(),
+                               access.gen()->new_register(access.type()),
+                               NULL /* patch_emit_info */,
+                               NULL /* load_emit_info */);
+}
+
+LIR_Opr ZBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) {
+  if (barrier_needed(access)) {
+    pre_load_barrier(access);
+  }
+
+  return BarrierSetC1::atomic_xchg_at_resolved(access, value);
+}
+
+LIR_Opr ZBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
+  if (barrier_needed(access)) {
+    pre_load_barrier(access);
+  }
+
+  return BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value);
+}
+
+class ZLoadBarrierRuntimeStubCodeGenClosure : public StubAssemblerCodeGenClosure {
+private:
+  const DecoratorSet _decorators;
+
+public:
+  ZLoadBarrierRuntimeStubCodeGenClosure(DecoratorSet decorators) :
+      _decorators(decorators) {}
+
+  virtual OopMapSet* generate_code(StubAssembler* sasm) {
+    ZBarrierSet::assembler()->generate_c1_load_barrier_runtime_stub(sasm, _decorators);
+    return NULL;
+  }
+};
+
+static address generate_c1_runtime_stub(BufferBlob* blob, DecoratorSet decorators, const char* name) {
+  ZLoadBarrierRuntimeStubCodeGenClosure cl(decorators);
+  CodeBlob* const code_blob = Runtime1::generate_blob(blob, -1 /* stub_id */, name, false /* expect_oop_map*/, &cl);
+  return code_blob->code_begin();
+}
+
+void ZBarrierSetC1::generate_c1_runtime_stubs(BufferBlob* blob) {
+  _load_barrier_on_oop_field_preloaded_runtime_stub =
+    generate_c1_runtime_stub(blob, ON_STRONG_OOP_REF, "load_barrier_on_oop_field_preloaded_runtime_stub");
+  _load_barrier_on_weak_oop_field_preloaded_runtime_stub =
+    generate_c1_runtime_stub(blob, ON_WEAK_OOP_REF, "load_barrier_on_weak_oop_field_preloaded_runtime_stub");
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/c1/zBarrierSetC1.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_C1_ZBARRIERSETC1_HPP
+#define SHARE_GC_Z_C1_ZBARRIERSETC1_HPP
+
+#include "c1/c1_CodeStubs.hpp"
+#include "c1/c1_IR.hpp"
+#include "c1/c1_LIR.hpp"
+#include "gc/shared/c1/barrierSetC1.hpp"
+#include "oops/accessDecorators.hpp"
+
+class ZLoadBarrierStubC1 : public CodeStub {
+private:
+  DecoratorSet  _decorators;
+  LIR_Opr       _ref_addr;
+  LIR_Opr       _ref;
+  LIR_Opr       _tmp;
+  CodeEmitInfo* _patch_info;
+  address       _runtime_stub;
+
+public:
+  ZLoadBarrierStubC1(LIRAccess& access, LIR_Opr ref, address runtime_stub);
+
+  DecoratorSet decorators() const;
+  LIR_Opr ref() const;
+  LIR_Opr ref_addr() const;
+  LIR_Opr tmp() const;
+  LIR_PatchCode patch_code() const;
+  CodeEmitInfo*& patch_info();
+  address runtime_stub() const;
+
+  virtual void emit_code(LIR_Assembler* ce);
+  virtual void visit(LIR_OpVisitState* visitor);
+
+#ifndef PRODUCT
+  virtual void print_name(outputStream* out) const;
+#endif // PRODUCT
+};
+
+class ZBarrierSetC1 : public BarrierSetC1 {
+private:
+  address _load_barrier_on_oop_field_preloaded_runtime_stub;
+  address _load_barrier_on_weak_oop_field_preloaded_runtime_stub;
+
+  address load_barrier_on_oop_field_preloaded_runtime_stub(DecoratorSet decorators) const;
+  void load_barrier(LIRAccess& access, LIR_Opr result) const;
+
+protected:
+  virtual void load_at_resolved(LIRAccess& access, LIR_Opr result);
+  virtual LIR_Opr atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value);
+  virtual LIR_Opr atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value);
+
+public:
+  ZBarrierSetC1();
+
+  virtual void generate_c1_runtime_stubs(BufferBlob* blob);
+};
+
+#endif // SHARE_GC_Z_C1_ZBARRIERSETC1_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,1480 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "opto/compile.hpp"
+#include "opto/castnode.hpp"
+#include "opto/graphKit.hpp"
+#include "opto/idealKit.hpp"
+#include "opto/loopnode.hpp"
+#include "opto/macro.hpp"
+#include "opto/node.hpp"
+#include "opto/type.hpp"
+#include "utilities/macros.hpp"
+#include "gc/z/c2/zBarrierSetC2.hpp"
+#include "gc/z/zThreadLocalData.hpp"
+#include "gc/z/zBarrierSetRuntime.hpp"
+
+ZBarrierSetC2State::ZBarrierSetC2State(Arena* comp_arena)
+  : _load_barrier_nodes(new (comp_arena) GrowableArray<LoadBarrierNode*>(comp_arena, 8,  0, NULL)) {}
+
+int ZBarrierSetC2State::load_barrier_count() const {
+  return _load_barrier_nodes->length();
+}
+
+void ZBarrierSetC2State::add_load_barrier_node(LoadBarrierNode * n) {
+  assert(!_load_barrier_nodes->contains(n), " duplicate entry in expand list");
+  _load_barrier_nodes->append(n);
+}
+
+void ZBarrierSetC2State::remove_load_barrier_node(LoadBarrierNode * n) {
+  // this function may be called twice for a node so check
+  // that the node is in the array before attempting to remove it
+  if (_load_barrier_nodes->contains(n)) {
+    _load_barrier_nodes->remove(n);
+  }
+}
+
+LoadBarrierNode* ZBarrierSetC2State::load_barrier_node(int idx) const {
+  return _load_barrier_nodes->at(idx);
+}
+
+void* ZBarrierSetC2::create_barrier_state(Arena* comp_arena) const {
+  return new(comp_arena) ZBarrierSetC2State(comp_arena);
+}
+
+ZBarrierSetC2State* ZBarrierSetC2::state() const {
+  return reinterpret_cast<ZBarrierSetC2State*>(Compile::current()->barrier_set_state());
+}
+
+bool ZBarrierSetC2::is_gc_barrier_node(Node* node) const {
+  return node->is_LoadBarrier();
+}
+
+void ZBarrierSetC2::register_potential_barrier_node(Node* node) const {
+  if (node->is_LoadBarrier()) {
+    state()->add_load_barrier_node(node->as_LoadBarrier());
+  }
+}
+
+void ZBarrierSetC2::unregister_potential_barrier_node(Node* node) const {
+  if (node->is_LoadBarrier()) {
+    state()->remove_load_barrier_node(node->as_LoadBarrier());
+  }
+}
+
+void ZBarrierSetC2::eliminate_useless_gc_barriers(Unique_Node_List &useful) const {
+  // Remove useless LoadBarrier nodes
+  ZBarrierSetC2State* s = state();
+  for (int i = s->load_barrier_count()-1; i >= 0; i--) {
+    LoadBarrierNode* n = s->load_barrier_node(i);
+    if (!useful.member(n)) {
+      unregister_potential_barrier_node(n);
+    }
+  }
+}
+
+void ZBarrierSetC2::enqueue_useful_gc_barrier(Unique_Node_List &worklist, Node* node) const {
+  if (node->is_LoadBarrier() && !node->as_LoadBarrier()->has_true_uses()) {
+    worklist.push(node);
+  }
+}
+
+void ZBarrierSetC2::find_dominating_barriers(PhaseIterGVN& igvn) {
+  // Look for dominating barriers on the same address only once all
+  // other loop opts are over: loop opts may cause a safepoint to be
+  // inserted between a barrier and its dominating barrier.
+  Compile* C = Compile::current();
+  ZBarrierSetC2* bs = (ZBarrierSetC2*)BarrierSet::barrier_set()->barrier_set_c2();
+  ZBarrierSetC2State* s = bs->state();
+  if (s->load_barrier_count() >= 2) {
+    Compile::TracePhase tp("idealLoop", &C->timers[Phase::_t_idealLoop]);
+    PhaseIdealLoop ideal_loop(igvn, true, false, true);
+    if (C->major_progress()) C->print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
+  }
+}
+
+void ZBarrierSetC2::add_users_to_worklist(Unique_Node_List* worklist) const {
+  // Permanent temporary workaround
+  // Loadbarriers may have non-obvious dead uses keeping them alive during parsing. The use is
+  // removed by RemoveUseless (after parsing, before optimize) but the barriers won't be added to
+  // the worklist. Unless we add them explicitly they are not guaranteed to end up there.
+  ZBarrierSetC2State* s = state();
+
+  for (int i = 0; i < s->load_barrier_count(); i++) {
+    LoadBarrierNode* n = s->load_barrier_node(i);
+    worklist->push(n);
+  }
+}
+
+const TypeFunc* ZBarrierSetC2::load_barrier_Type() const {
+  const Type** fields;
+
+  // Create input types (domain)
+  fields = TypeTuple::fields(2);
+  fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;
+  fields[TypeFunc::Parms+1] = TypeOopPtr::BOTTOM;
+  const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
+
+  // Create result type (range)
+  fields = TypeTuple::fields(1);
+  fields[TypeFunc::Parms+0] = TypeInstPtr::BOTTOM;
+  const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
+
+  return TypeFunc::make(domain, range);
+}
+
+// == LoadBarrierNode ==
+
+LoadBarrierNode::LoadBarrierNode(Compile* C,
+                                 Node* c,
+                                 Node* mem,
+                                 Node* val,
+                                 Node* adr,
+                                 bool weak,
+                                 bool writeback,
+                                 bool oop_reload_allowed) :
+    MultiNode(Number_of_Inputs),
+    _weak(weak),
+    _writeback(writeback),
+    _oop_reload_allowed(oop_reload_allowed) {
+  init_req(Control, c);
+  init_req(Memory, mem);
+  init_req(Oop, val);
+  init_req(Address, adr);
+  init_req(Similar, C->top());
+
+  init_class_id(Class_LoadBarrier);
+  BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
+  bs->register_potential_barrier_node(this);
+}
+
+const Type *LoadBarrierNode::bottom_type() const {
+  const Type** floadbarrier = (const Type **)(Compile::current()->type_arena()->Amalloc_4((Number_of_Outputs)*sizeof(Type*)));
+  Node* in_oop = in(Oop);
+  floadbarrier[Control] = Type::CONTROL;
+  floadbarrier[Memory] = Type::MEMORY;
+  floadbarrier[Oop] = in_oop == NULL ? Type::TOP : in_oop->bottom_type();
+  return TypeTuple::make(Number_of_Outputs, floadbarrier);
+}
+
+const Type *LoadBarrierNode::Value(PhaseGVN *phase) const {
+  const Type** floadbarrier = (const Type **)(phase->C->type_arena()->Amalloc_4((Number_of_Outputs)*sizeof(Type*)));
+  const Type* val_t = phase->type(in(Oop));
+  floadbarrier[Control] = Type::CONTROL;
+  floadbarrier[Memory] = Type::MEMORY;
+  floadbarrier[Oop] = val_t;
+  return TypeTuple::make(Number_of_Outputs, floadbarrier);
+}
+
+bool LoadBarrierNode::is_dominator(PhaseIdealLoop* phase, bool linear_only, Node *d, Node *n) {
+  if (phase != NULL) {
+    return phase->is_dominator(d, n);
+  }
+
+  for (int i = 0; i < 10 && n != NULL; i++) {
+    n = IfNode::up_one_dom(n, linear_only);
+    if (n == d) {
+      return true;
+    }
+  }
+
+  return false;
+}
+
+LoadBarrierNode* LoadBarrierNode::has_dominating_barrier(PhaseIdealLoop* phase, bool linear_only, bool look_for_similar) {
+  Node* val = in(LoadBarrierNode::Oop);
+  if (in(Similar)->is_Proj() && in(Similar)->in(0)->is_LoadBarrier()) {
+    LoadBarrierNode* lb = in(Similar)->in(0)->as_LoadBarrier();
+    assert(lb->in(Address) == in(Address), "");
+    // Load barrier on Similar edge dominates so if it now has the Oop field it can replace this barrier.
+    if (lb->in(Oop) == in(Oop)) {
+      return lb;
+    }
+    // Follow chain of load barrier through Similar edges
+    while (!lb->in(Similar)->is_top()) {
+      lb = lb->in(Similar)->in(0)->as_LoadBarrier();
+      assert(lb->in(Address) == in(Address), "");
+    }
+    if (lb != in(Similar)->in(0)) {
+      return lb;
+    }
+  }
+  for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
+    Node* u = val->fast_out(i);
+    if (u != this && u->is_LoadBarrier() && u->in(Oop) == val && u->as_LoadBarrier()->has_true_uses()) {
+      Node* this_ctrl = in(LoadBarrierNode::Control);
+      Node* other_ctrl = u->in(LoadBarrierNode::Control);
+      if (is_dominator(phase, linear_only, other_ctrl, this_ctrl)) {
+        return u->as_LoadBarrier();
+      }
+    }
+  }
+
+  if (ZVerifyLoadBarriers || can_be_eliminated()) {
+    return NULL;
+  }
+
+  if (!look_for_similar) {
+    return NULL;
+  }
+
+  Node* addr = in(LoadBarrierNode::Address);
+  for (DUIterator_Fast imax, i = addr->fast_outs(imax); i < imax; i++) {
+    Node* u = addr->fast_out(i);
+    if (u != this && u->is_LoadBarrier() && u->as_LoadBarrier()->has_true_uses()) {
+      Node* this_ctrl = in(LoadBarrierNode::Control);
+      Node* other_ctrl = u->in(LoadBarrierNode::Control);
+      if (is_dominator(phase, linear_only, other_ctrl, this_ctrl)) {
+        ResourceMark rm;
+        Unique_Node_List wq;
+        wq.push(in(LoadBarrierNode::Control));
+        bool ok = true;
+        bool dom_found = false;
+        for (uint next = 0; next < wq.size(); ++next) {
+          Node *n = wq.at(next);
+          if (n->is_top()) {
+            return NULL;
+          }
+          assert(n->is_CFG(), "");
+          if (n->is_SafePoint()) {
+            ok = false;
+            break;
+          }
+          if (n == u) {
+            dom_found = true;
+            continue;
+          }
+          if (n->is_Region()) {
+            for (uint i = 1; i < n->req(); i++) {
+              Node* m = n->in(i);
+              if (m != NULL) {
+                wq.push(m);
+              }
+            }
+          } else {
+            Node* m = n->in(0);
+            if (m != NULL) {
+              wq.push(m);
+            }
+          }
+        }
+        if (ok) {
+          assert(dom_found, "");
+          return u->as_LoadBarrier();;
+        }
+        break;
+      }
+    }
+  }
+
+  return NULL;
+}
+
+void LoadBarrierNode::push_dominated_barriers(PhaseIterGVN* igvn) const {
+  // change to that barrier may affect a dominated barrier so re-push those
+  Node* val = in(LoadBarrierNode::Oop);
+
+  for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
+    Node* u = val->fast_out(i);
+    if (u != this && u->is_LoadBarrier() && u->in(Oop) == val) {
+      Node* this_ctrl = in(Control);
+      Node* other_ctrl = u->in(Control);
+      if (is_dominator(NULL, false, this_ctrl, other_ctrl)) {
+        igvn->_worklist.push(u);
+      }
+    }
+
+    Node* addr = in(LoadBarrierNode::Address);
+    for (DUIterator_Fast imax, i = addr->fast_outs(imax); i < imax; i++) {
+      Node* u = addr->fast_out(i);
+      if (u != this && u->is_LoadBarrier() && u->in(Similar)->is_top()) {
+        Node* this_ctrl = in(Control);
+        Node* other_ctrl = u->in(Control);
+        if (is_dominator(NULL, false, this_ctrl, other_ctrl)) {
+          igvn->_worklist.push(u);
+        }
+      }
+    }
+  }
+}
+
+Node *LoadBarrierNode::Identity(PhaseGVN *phase) {
+  if (!phase->C->directive()->ZOptimizeLoadBarriersOption) {
+    return this;
+  }
+
+  bool redundant_addr = false;
+  LoadBarrierNode* dominating_barrier = has_dominating_barrier(NULL, true, false);
+  if (dominating_barrier != NULL) {
+    assert(dominating_barrier->in(Oop) == in(Oop), "");
+    return dominating_barrier;
+  }
+
+  return this;
+}
+
+Node *LoadBarrierNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+  if (remove_dead_region(phase, can_reshape)) {
+    return this;
+  }
+
+  Node* val = in(Oop);
+  Node* mem = in(Memory);
+  Node* ctrl = in(Control);
+  Node* adr = in(Address);
+  assert(val->Opcode() != Op_LoadN, "");
+
+  if (mem->is_MergeMem()) {
+    Node* new_mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
+    set_req(Memory, new_mem);
+    if (mem->outcnt() == 0 && can_reshape) {
+      phase->is_IterGVN()->_worklist.push(mem);
+    }
+
+    return this;
+  }
+
+  bool optimizeLoadBarriers = phase->C->directive()->ZOptimizeLoadBarriersOption;
+  LoadBarrierNode* dominating_barrier = optimizeLoadBarriers ? has_dominating_barrier(NULL, !can_reshape, !phase->C->major_progress()) : NULL;
+  if (dominating_barrier != NULL && dominating_barrier->in(Oop) != in(Oop)) {
+    assert(in(Address) == dominating_barrier->in(Address), "");
+    set_req(Similar, dominating_barrier->proj_out(Oop));
+    return this;
+  }
+
+  bool eliminate = (optimizeLoadBarriers && !(val->is_Phi() || val->Opcode() == Op_LoadP || val->Opcode() == Op_GetAndSetP || val->is_DecodeN())) ||
+                   (can_reshape && (dominating_barrier != NULL || !has_true_uses()));
+
+  if (eliminate) {
+    if (can_reshape) {
+      PhaseIterGVN* igvn = phase->is_IterGVN();
+      Node* out_ctrl = proj_out_or_null(Control);
+      Node* out_res = proj_out_or_null(Oop);
+
+      if (out_ctrl != NULL) {
+        igvn->replace_node(out_ctrl, ctrl);
+      }
+
+      // That transformation may cause the Similar edge on the load barrier to be invalid
+      fix_similar_in_uses(igvn);
+      if (out_res != NULL) {
+        if (dominating_barrier != NULL) {
+          igvn->replace_node(out_res, dominating_barrier->proj_out(Oop));
+        } else {
+          igvn->replace_node(out_res, val);
+        }
+      }
+    }
+
+    return new ConINode(TypeInt::ZERO);
+  }
+
+  // If the Similar edge is no longer a load barrier, clear it
+  Node* similar = in(Similar);
+  if (!similar->is_top() && !(similar->is_Proj() && similar->in(0)->is_LoadBarrier())) {
+    set_req(Similar, phase->C->top());
+    return this;
+  }
+
+  if (can_reshape) {
+    // If this barrier is linked through the Similar edge by a
+    // dominated barrier and both barriers have the same Oop field,
+    // the dominated barrier can go away, so push it for reprocessing.
+    // We also want to avoid a barrier to depend on another dominating
+    // barrier through its Similar edge that itself depend on another
+    // barrier through its Similar edge and rather have the first
+    // depend on the third.
+    PhaseIterGVN* igvn = phase->is_IterGVN();
+    Node* out_res = proj_out(Oop);
+    for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) {
+      Node* u = out_res->fast_out(i);
+      if (u->is_LoadBarrier() && u->in(Similar) == out_res &&
+          (u->in(Oop) == val || !u->in(Similar)->is_top())) {
+        igvn->_worklist.push(u);
+      }
+    }
+
+    push_dominated_barriers(igvn);
+  }
+
+  return NULL;
+}
+
+void LoadBarrierNode::fix_similar_in_uses(PhaseIterGVN* igvn) {
+  Node* out_res = proj_out_or_null(Oop);
+  if (out_res == NULL) {
+    return;
+  }
+
+  for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) {
+    Node* u = out_res->fast_out(i);
+    if (u->is_LoadBarrier() && u->in(Similar) == out_res) {
+      igvn->replace_input_of(u, Similar, igvn->C->top());
+      --i;
+      --imax;
+    }
+  }
+}
+
+bool LoadBarrierNode::has_true_uses() const {
+  Node* out_res = proj_out_or_null(Oop);
+  if (out_res == NULL) {
+    return false;
+  }
+
+  for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) {
+    Node* u = out_res->fast_out(i);
+    if (!u->is_LoadBarrier() || u->in(Similar) != out_res) {
+      return true;
+    }
+  }
+
+  return false;
+}
+
+// == Accesses ==
+
+Node* ZBarrierSetC2::make_cas_loadbarrier(C2AtomicAccess& access) const {
+  assert(!UseCompressedOops, "Not allowed");
+  CompareAndSwapNode* cas = (CompareAndSwapNode*)access.raw_access();
+  PhaseGVN& gvn = access.kit()->gvn();
+  Compile* C = Compile::current();
+  GraphKit* kit = access.kit();
+
+  Node* in_ctrl     = cas->in(MemNode::Control);
+  Node* in_mem      = cas->in(MemNode::Memory);
+  Node* in_adr      = cas->in(MemNode::Address);
+  Node* in_val      = cas->in(MemNode::ValueIn);
+  Node* in_expected = cas->in(LoadStoreConditionalNode::ExpectedIn);
+
+  float likely                   = PROB_LIKELY(0.999);
+
+  const TypePtr *adr_type        = gvn.type(in_adr)->isa_ptr();
+  Compile::AliasType* alias_type = C->alias_type(adr_type);
+  int alias_idx                  = C->get_alias_index(adr_type);
+
+  // Outer check - true: continue, false: load and check
+  Node* region   = new RegionNode(3);
+  Node* phi      = new PhiNode(region, TypeInt::BOOL);
+  Node* phi_mem  = new PhiNode(region, Type::MEMORY, adr_type);
+
+  // Inner check - is the healed ref equal to the expected
+  Node* region2  = new RegionNode(3);
+  Node* phi2     = new PhiNode(region2, TypeInt::BOOL);
+  Node* phi_mem2 = new PhiNode(region2, Type::MEMORY, adr_type);
+
+  // CAS node returns 0 or 1
+  Node* cmp     = gvn.transform(new CmpINode(cas, kit->intcon(0)));
+  Node* bol     = gvn.transform(new BoolNode(cmp, BoolTest::ne))->as_Bool();
+  IfNode* iff   = gvn.transform(new IfNode(in_ctrl, bol, likely, COUNT_UNKNOWN))->as_If();
+  Node* then    = gvn.transform(new IfTrueNode(iff));
+  Node* elsen   = gvn.transform(new IfFalseNode(iff));
+
+  Node* scmemproj1   = gvn.transform(new SCMemProjNode(cas));
+
+  kit->set_memory(scmemproj1, alias_idx);
+  phi_mem->init_req(1, scmemproj1);
+  phi_mem2->init_req(2, scmemproj1);
+
+  // CAS fail - reload and heal oop
+  Node* reload      = kit->make_load(elsen, in_adr, TypeOopPtr::BOTTOM, T_OBJECT, MemNode::unordered);
+  Node* barrier     = gvn.transform(new LoadBarrierNode(C, elsen, scmemproj1, reload, in_adr, false, true, false));
+  Node* barrierctrl = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Control));
+  Node* barrierdata = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Oop));
+
+  // Check load
+  Node* tmpX    = gvn.transform(new CastP2XNode(NULL, barrierdata));
+  Node* in_expX = gvn.transform(new CastP2XNode(NULL, in_expected));
+  Node* cmp2    = gvn.transform(new CmpXNode(tmpX, in_expX));
+  Node *bol2    = gvn.transform(new BoolNode(cmp2, BoolTest::ne))->as_Bool();
+  IfNode* iff2  = gvn.transform(new IfNode(barrierctrl, bol2, likely, COUNT_UNKNOWN))->as_If();
+  Node* then2   = gvn.transform(new IfTrueNode(iff2));
+  Node* elsen2  = gvn.transform(new IfFalseNode(iff2));
+
+  // redo CAS
+  Node* cas2       = gvn.transform(new CompareAndSwapPNode(elsen2, kit->memory(alias_idx), in_adr, in_val, in_expected, cas->order()));
+  Node* scmemproj2 = gvn.transform(new SCMemProjNode(cas2));
+  kit->set_control(elsen2);
+  kit->set_memory(scmemproj2, alias_idx);
+
+  // Merge inner flow - check if healed oop was equal too expected.
+  region2->set_req(1, kit->control());
+  region2->set_req(2, then2);
+  phi2->set_req(1, cas2);
+  phi2->set_req(2, kit->intcon(0));
+  phi_mem2->init_req(1, scmemproj2);
+  kit->set_memory(phi_mem2, alias_idx);
+
+  // Merge outer flow - then check if first cas succeded
+  region->set_req(1, then);
+  region->set_req(2, region2);
+  phi->set_req(1, kit->intcon(1));
+  phi->set_req(2, phi2);
+  phi_mem->init_req(2, phi_mem2);
+  kit->set_memory(phi_mem, alias_idx);
+
+  gvn.transform(region2);
+  gvn.transform(phi2);
+  gvn.transform(phi_mem2);
+  gvn.transform(region);
+  gvn.transform(phi);
+  gvn.transform(phi_mem);
+
+  kit->set_control(region);
+  kit->insert_mem_bar(Op_MemBarCPUOrder);
+
+  return phi;
+}
+
+Node* ZBarrierSetC2::make_cmpx_loadbarrier(C2AtomicAccess& access) const {
+  CompareAndExchangePNode* cmpx = (CompareAndExchangePNode*)access.raw_access();
+  GraphKit* kit = access.kit();
+  PhaseGVN& gvn = kit->gvn();
+  Compile* C = Compile::current();
+
+  Node* in_ctrl     = cmpx->in(MemNode::Control);
+  Node* in_mem      = cmpx->in(MemNode::Memory);
+  Node* in_adr      = cmpx->in(MemNode::Address);
+  Node* in_val      = cmpx->in(MemNode::ValueIn);
+  Node* in_expected = cmpx->in(LoadStoreConditionalNode::ExpectedIn);
+
+  float likely                   = PROB_LIKELY(0.999);
+
+  const TypePtr *adr_type        = cmpx->get_ptr_type();
+  Compile::AliasType* alias_type = C->alias_type(adr_type);
+  int alias_idx                  = C->get_alias_index(adr_type);
+
+  // Outer check - true: continue, false: load and check
+  Node* region  = new RegionNode(3);
+  Node* phi     = new PhiNode(region, adr_type);
+
+  // Inner check - is the healed ref equal to the expected
+  Node* region2 = new RegionNode(3);
+  Node* phi2    = new PhiNode(region2, adr_type);
+
+  // Check if cmpx succeded
+  Node* cmp     = gvn.transform(new CmpPNode(cmpx, in_expected));
+  Node* bol     = gvn.transform(new BoolNode(cmp, BoolTest::eq))->as_Bool();
+  IfNode* iff   = gvn.transform(new IfNode(in_ctrl, bol, likely, COUNT_UNKNOWN))->as_If();
+  Node* then    = gvn.transform(new IfTrueNode(iff));
+  Node* elsen   = gvn.transform(new IfFalseNode(iff));
+
+  Node* scmemproj1  = gvn.transform(new SCMemProjNode(cmpx));
+  kit->set_memory(scmemproj1, alias_idx);
+
+  // CAS fail - reload and heal oop
+  Node* reload      = kit->make_load(elsen, in_adr, TypeOopPtr::BOTTOM, T_OBJECT, MemNode::unordered);
+  Node* barrier     = gvn.transform(new LoadBarrierNode(C, elsen, scmemproj1, reload, in_adr, false, true, false));
+  Node* barrierctrl = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Control));
+  Node* barrierdata = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Oop));
+
+  // Check load
+  Node* tmpX    = gvn.transform(new CastP2XNode(NULL, barrierdata));
+  Node* in_expX = gvn.transform(new CastP2XNode(NULL, in_expected));
+  Node* cmp2    = gvn.transform(new CmpXNode(tmpX, in_expX));
+  Node *bol2    = gvn.transform(new BoolNode(cmp2, BoolTest::ne))->as_Bool();
+  IfNode* iff2  = gvn.transform(new IfNode(barrierctrl, bol2, likely, COUNT_UNKNOWN))->as_If();
+  Node* then2   = gvn.transform(new IfTrueNode(iff2));
+  Node* elsen2  = gvn.transform(new IfFalseNode(iff2));
+
+  // Redo CAS
+  Node* cmpx2      = gvn.transform(new CompareAndExchangePNode(elsen2, kit->memory(alias_idx), in_adr, in_val, in_expected, adr_type, cmpx->get_ptr_type(), cmpx->order()));
+  Node* scmemproj2 = gvn.transform(new SCMemProjNode(cmpx2));
+  kit->set_control(elsen2);
+  kit->set_memory(scmemproj2, alias_idx);
+
+  // Merge inner flow - check if healed oop was equal too expected.
+  region2->set_req(1, kit->control());
+  region2->set_req(2, then2);
+  phi2->set_req(1, cmpx2);
+  phi2->set_req(2, barrierdata);
+
+  // Merge outer flow - then check if first cas succeded
+  region->set_req(1, then);
+  region->set_req(2, region2);
+  phi->set_req(1, cmpx);
+  phi->set_req(2, phi2);
+
+  gvn.transform(region2);
+  gvn.transform(phi2);
+  gvn.transform(region);
+  gvn.transform(phi);
+
+  kit->set_control(region);
+  kit->set_memory(in_mem, alias_idx);
+  kit->insert_mem_bar(Op_MemBarCPUOrder);
+
+  return phi;
+}
+
+Node* ZBarrierSetC2::load_barrier(GraphKit* kit, Node* val, Node* adr, bool weak, bool writeback, bool oop_reload_allowed) const {
+  PhaseGVN& gvn = kit->gvn();
+  Node* barrier = new LoadBarrierNode(Compile::current(), kit->control(), kit->memory(TypeRawPtr::BOTTOM), val, adr, weak, writeback, oop_reload_allowed);
+  Node* transformed_barrier = gvn.transform(barrier);
+
+  if (transformed_barrier->is_LoadBarrier()) {
+    if (barrier == transformed_barrier) {
+      kit->set_control(gvn.transform(new ProjNode(barrier, LoadBarrierNode::Control)));
+    }
+    return gvn.transform(new ProjNode(transformed_barrier, LoadBarrierNode::Oop));
+  } else {
+    return val;
+  }
+}
+
+static bool barrier_needed(C2Access access) {
+  return ZBarrierSet::barrier_needed(access.decorators(), access.type());
+}
+
+Node* ZBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
+  Node* p = BarrierSetC2::load_at_resolved(access, val_type);
+  if (!barrier_needed(access)) {
+    return p;
+  }
+
+  bool conc_root = (access.decorators() & IN_CONCURRENT_ROOT) != 0;
+  bool weak = (access.decorators() & ON_WEAK_OOP_REF) != 0;
+
+  GraphKit* kit = access.kit();
+  PhaseGVN& gvn = kit->gvn();
+  Node* adr = access.addr().node();
+  Node* heap_base_oop = access.base();
+  bool unsafe = (access.decorators() & C2_UNSAFE_ACCESS) != 0;
+  if (unsafe) {
+    if (!ZVerifyLoadBarriers) {
+      p = load_barrier(kit, p, adr);
+    } else {
+      if (!TypePtr::NULL_PTR->higher_equal(gvn.type(heap_base_oop))) {
+        p = load_barrier(kit, p, adr);
+      } else {
+        IdealKit ideal(kit);
+        IdealVariable res(ideal);
+#define __ ideal.
+        __ declarations_done();
+        __ set(res, p);
+        __ if_then(heap_base_oop, BoolTest::ne, kit->null(), PROB_UNLIKELY(0.999)); {
+          kit->sync_kit(ideal);
+          p = load_barrier(kit, p, adr);
+          __ set(res, p);
+          __ sync_kit(kit);
+        } __ end_if();
+        kit->final_sync(ideal);
+        p = __ value(res);
+#undef __
+      }
+    }
+    return p;
+  } else {
+    return load_barrier(access.kit(), p, access.addr().node(), weak, true, true);
+  }
+}
+
+Node* ZBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access, Node* expected_val,
+                                                    Node* new_val, const Type* val_type) const {
+  Node* result = BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, val_type);
+  if (!barrier_needed(access)) {
+    return result;
+  }
+
+  access.set_needs_pinning(false);
+  return make_cmpx_loadbarrier(access);
+}
+
+Node* ZBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access, Node* expected_val,
+                                                     Node* new_val, const Type* value_type) const {
+  Node* result = BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
+  if (!barrier_needed(access)) {
+    return result;
+  }
+
+  Node* load_store = access.raw_access();
+  bool weak_cas = (access.decorators() & C2_WEAK_CMPXCHG) != 0;
+  bool expected_is_null = (expected_val->get_ptr_type() == TypePtr::NULL_PTR);
+
+  if (!expected_is_null) {
+    if (weak_cas) {
+      access.set_needs_pinning(false);
+      load_store = make_cas_loadbarrier(access);
+    } else {
+      access.set_needs_pinning(false);
+      load_store = make_cas_loadbarrier(access);
+    }
+  }
+
+  return load_store;
+}
+
+Node* ZBarrierSetC2::atomic_xchg_at_resolved(C2AtomicAccess& access, Node* new_val, const Type* val_type) const {
+  Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, new_val, val_type);
+  if (!barrier_needed(access)) {
+    return result;
+  }
+
+  Node* load_store = access.raw_access();
+  Node* adr = access.addr().node();
+
+  return load_barrier(access.kit(), load_store, adr, false, false, false);
+}
+
+// == Macro Expansion ==
+
+void ZBarrierSetC2::expand_loadbarrier_node(PhaseMacroExpand* phase, LoadBarrierNode* barrier) const {
+  Node* in_ctrl = barrier->in(LoadBarrierNode::Control);
+  Node* in_mem  = barrier->in(LoadBarrierNode::Memory);
+  Node* in_val  = barrier->in(LoadBarrierNode::Oop);
+  Node* in_adr  = barrier->in(LoadBarrierNode::Address);
+
+  Node* out_ctrl = barrier->proj_out(LoadBarrierNode::Control);
+  Node* out_res  = barrier->proj_out(LoadBarrierNode::Oop);
+
+  PhaseIterGVN &igvn = phase->igvn();
+
+  if (ZVerifyLoadBarriers) {
+    igvn.replace_node(out_res, in_val);
+    igvn.replace_node(out_ctrl, in_ctrl);
+    return;
+  }
+
+  if (barrier->can_be_eliminated()) {
+    // Clone and pin the load for this barrier below the dominating
+    // barrier: the load cannot be allowed to float above the
+    // dominating barrier
+    Node* load = in_val;
+
+    if (load->is_Load()) {
+      Node* new_load = load->clone();
+      Node* addp = new_load->in(MemNode::Address);
+      assert(addp->is_AddP() || addp->is_Phi() || addp->is_Load(), "bad address");
+      Node* cast = new CastPPNode(addp, igvn.type(addp), true);
+      Node* ctrl = NULL;
+      Node* similar = barrier->in(LoadBarrierNode::Similar);
+      if (similar->is_Phi()) {
+        // already expanded
+        ctrl = similar->in(0);
+      } else {
+        assert(similar->is_Proj() && similar->in(0)->is_LoadBarrier(), "unexpected graph shape");
+        ctrl = similar->in(0)->as_LoadBarrier()->proj_out(LoadBarrierNode::Control);
+      }
+      assert(ctrl != NULL, "bad control");
+      cast->set_req(0, ctrl);
+      igvn.transform(cast);
+      new_load->set_req(MemNode::Address, cast);
+      igvn.transform(new_load);
+
+      igvn.replace_node(out_res, new_load);
+      igvn.replace_node(out_ctrl, in_ctrl);
+      return;
+    }
+    // cannot eliminate
+  }
+
+  // There are two cases that require the basic loadbarrier
+  // 1) When the writeback of a healed oop must be avoided (swap)
+  // 2) When we must guarantee that no reload of is done (swap, cas, cmpx)
+  if (!barrier->is_writeback()) {
+    assert(!barrier->oop_reload_allowed(), "writeback barriers should be marked as requires oop");
+  }
+
+  if (!barrier->oop_reload_allowed()) {
+    expand_loadbarrier_basic(phase, barrier);
+  } else {
+    expand_loadbarrier_optimized(phase, barrier);
+  }
+}
+
+// Basic loadbarrier using conventional arg passing
+void ZBarrierSetC2::expand_loadbarrier_basic(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const {
+  PhaseIterGVN &igvn = phase->igvn();
+
+  Node* in_ctrl = barrier->in(LoadBarrierNode::Control);
+  Node* in_mem  = barrier->in(LoadBarrierNode::Memory);
+  Node* in_val  = barrier->in(LoadBarrierNode::Oop);
+  Node* in_adr  = barrier->in(LoadBarrierNode::Address);
+
+  Node* out_ctrl = barrier->proj_out(LoadBarrierNode::Control);
+  Node* out_res  = barrier->proj_out(LoadBarrierNode::Oop);
+
+  float unlikely  = PROB_UNLIKELY(0.999);
+  const Type* in_val_maybe_null_t = igvn.type(in_val);
+
+  Node* jthread = igvn.transform(new ThreadLocalNode());
+  Node* adr = phase->basic_plus_adr(jthread, in_bytes(ZThreadLocalData::address_bad_mask_offset()));
+  Node* bad_mask = igvn.transform(LoadNode::make(igvn, in_ctrl, in_mem, adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered));
+  Node* cast = igvn.transform(new CastP2XNode(in_ctrl, in_val));
+  Node* obj_masked = igvn.transform(new AndXNode(cast, bad_mask));
+  Node* cmp = igvn.transform(new CmpXNode(obj_masked, igvn.zerocon(TypeX_X->basic_type())));
+  Node *bol = igvn.transform(new BoolNode(cmp, BoolTest::ne))->as_Bool();
+  IfNode* iff = igvn.transform(new IfNode(in_ctrl, bol, unlikely, COUNT_UNKNOWN))->as_If();
+  Node* then = igvn.transform(new IfTrueNode(iff));
+  Node* elsen = igvn.transform(new IfFalseNode(iff));
+
+  Node* result_region;
+  Node* result_val;
+
+  result_region = new RegionNode(3);
+  result_val = new PhiNode(result_region, TypeInstPtr::BOTTOM);
+
+  result_region->set_req(1, elsen);
+  Node* res = igvn.transform(new CastPPNode(in_val, in_val_maybe_null_t));
+  res->init_req(0, elsen);
+  result_val->set_req(1, res);
+
+  const TypeFunc *tf = load_barrier_Type();
+  Node* call;
+  if (barrier->is_weak()) {
+    call = new CallLeafNode(tf,
+                            ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded_addr(),
+                            "ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded",
+                            TypeRawPtr::BOTTOM);
+  } else {
+    call = new CallLeafNode(tf,
+                            ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(),
+                            "ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded",
+                            TypeRawPtr::BOTTOM);
+  }
+
+  call->init_req(TypeFunc::Control, then);
+  call->init_req(TypeFunc::I_O    , phase->top());
+  call->init_req(TypeFunc::Memory , in_mem);
+  call->init_req(TypeFunc::FramePtr, phase->top());
+  call->init_req(TypeFunc::ReturnAdr, phase->top());
+  call->init_req(TypeFunc::Parms+0, in_val);
+  if (barrier->is_writeback()) {
+    call->init_req(TypeFunc::Parms+1, in_adr);
+  } else {
+    // when slow path is called with a null adr, the healed oop will not be written back
+    call->init_req(TypeFunc::Parms+1, igvn.zerocon(T_OBJECT));
+  }
+  call = igvn.transform(call);
+
+  Node* ctrl = igvn.transform(new ProjNode(call, TypeFunc::Control));
+  res = igvn.transform(new ProjNode(call, TypeFunc::Parms));
+  res = igvn.transform(new CheckCastPPNode(ctrl, res, in_val_maybe_null_t));
+
+  result_region->set_req(2, ctrl);
+  result_val->set_req(2, res);
+
+  result_region = igvn.transform(result_region);
+  result_val = igvn.transform(result_val);
+
+  if (out_ctrl != NULL) { // added if cond
+    igvn.replace_node(out_ctrl, result_region);
+  }
+  igvn.replace_node(out_res, result_val);
+}
+
+// Optimized, low spill, loadbarrier variant using stub specialized on register used
+void ZBarrierSetC2::expand_loadbarrier_optimized(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const {
+  PhaseIterGVN &igvn = phase->igvn();
+#ifdef PRINT_NODE_TRAVERSALS
+  Node* preceding_barrier_node = barrier->in(LoadBarrierNode::Oop);
+#endif
+
+  Node* in_ctrl = barrier->in(LoadBarrierNode::Control);
+  Node* in_mem = barrier->in(LoadBarrierNode::Memory);
+  Node* in_val = barrier->in(LoadBarrierNode::Oop);
+  Node* in_adr = barrier->in(LoadBarrierNode::Address);
+
+  Node* out_ctrl = barrier->proj_out(LoadBarrierNode::Control);
+  Node* out_res = barrier->proj_out(LoadBarrierNode::Oop);
+
+  assert(barrier->in(LoadBarrierNode::Oop) != NULL, "oop to loadbarrier node cannot be null");
+
+#ifdef PRINT_NODE_TRAVERSALS
+  tty->print("\n\n\nBefore barrier optimization:\n");
+  traverse(barrier, out_ctrl, out_res, -1);
+
+  tty->print("\nBefore barrier optimization:  preceding_barrier_node\n");
+  traverse(preceding_barrier_node, out_ctrl, out_res, -1);
+#endif
+
+  float unlikely  = PROB_UNLIKELY(0.999);
+
+  Node* jthread = igvn.transform(new ThreadLocalNode());
+  Node* adr = phase->basic_plus_adr(jthread, in_bytes(ZThreadLocalData::address_bad_mask_offset()));
+  Node* bad_mask = igvn.transform(LoadNode::make(igvn, in_ctrl, in_mem, adr,
+                                                 TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(),
+                                                 MemNode::unordered));
+  Node* cast = igvn.transform(new CastP2XNode(in_ctrl, in_val));
+  Node* obj_masked = igvn.transform(new AndXNode(cast, bad_mask));
+  Node* cmp = igvn.transform(new CmpXNode(obj_masked, igvn.zerocon(TypeX_X->basic_type())));
+  Node *bol = igvn.transform(new BoolNode(cmp, BoolTest::ne))->as_Bool();
+  IfNode* iff = igvn.transform(new IfNode(in_ctrl, bol, unlikely, COUNT_UNKNOWN))->as_If();
+  Node* then = igvn.transform(new IfTrueNode(iff));
+  Node* elsen = igvn.transform(new IfFalseNode(iff));
+
+  Node* slow_path_surrogate;
+  if (!barrier->is_weak()) {
+    slow_path_surrogate = igvn.transform(new LoadBarrierSlowRegNode(then, in_mem, in_adr, in_val->adr_type(),
+                                                                    (const TypePtr*) in_val->bottom_type(), MemNode::unordered));
+  } else {
+    slow_path_surrogate = igvn.transform(new LoadBarrierWeakSlowRegNode(then, in_mem, in_adr, in_val->adr_type(),
+                                                                        (const TypePtr*) in_val->bottom_type(), MemNode::unordered));
+  }
+
+  Node *new_loadp;
+  new_loadp = slow_path_surrogate;
+  // create the final region/phi pair to converge cntl/data paths to downstream code
+  Node* result_region = igvn.transform(new RegionNode(3));
+  result_region->set_req(1, then);
+  result_region->set_req(2, elsen);
+
+  Node* result_phi = igvn.transform(new PhiNode(result_region, TypeInstPtr::BOTTOM));
+  result_phi->set_req(1, new_loadp);
+  result_phi->set_req(2, barrier->in(LoadBarrierNode::Oop));
+
+  // finally, connect the original outputs to the barrier region and phi to complete the expansion/substitution
+  // igvn.replace_node(out_ctrl, result_region);
+  if (out_ctrl != NULL) { // added if cond
+    igvn.replace_node(out_ctrl, result_region);
+  }
+  igvn.replace_node(out_res, result_phi);
+
+  assert(barrier->outcnt() == 0,"LoadBarrier macro node has non-null outputs after expansion!");
+
+#ifdef PRINT_NODE_TRAVERSALS
+  tty->print("\nAfter barrier optimization:  old out_ctrl\n");
+  traverse(out_ctrl, out_ctrl, out_res, -1);
+  tty->print("\nAfter barrier optimization:  old out_res\n");
+  traverse(out_res, out_ctrl, out_res, -1);
+  tty->print("\nAfter barrier optimization:  old barrier\n");
+  traverse(barrier, out_ctrl, out_res, -1);
+  tty->print("\nAfter barrier optimization:  preceding_barrier_node\n");
+  traverse(preceding_barrier_node, result_region, result_phi, -1);
+#endif
+
+  return;
+}
+
+bool ZBarrierSetC2::expand_macro_nodes(PhaseMacroExpand* macro) const {
+  Compile* C = Compile::current();
+  PhaseIterGVN &igvn = macro->igvn();
+  ZBarrierSetC2State* s = state();
+  if (s->load_barrier_count() > 0) {
+#ifdef ASSERT
+    verify_gc_barriers(false);
+#endif
+    igvn.set_delay_transform(true);
+    int skipped = 0;
+    while (s->load_barrier_count() > skipped) {
+      int load_barrier_count = s->load_barrier_count();
+      LoadBarrierNode * n = s->load_barrier_node(load_barrier_count-1-skipped);
+      if (igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())) {
+        // node is unreachable, so don't try to expand it
+        s->remove_load_barrier_node(n);
+        continue;
+      }
+      if (!n->can_be_eliminated()) {
+        skipped++;
+        continue;
+      }
+      expand_loadbarrier_node(macro, n);
+      assert(s->load_barrier_count() < load_barrier_count, "must have deleted a node from load barrier list");
+      if (C->failing())  return true;
+    }
+    while (s->load_barrier_count() > 0) {
+      int load_barrier_count = s->load_barrier_count();
+      LoadBarrierNode* n = s->load_barrier_node(load_barrier_count - 1);
+      assert(!(igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())), "should have been processed already");
+      assert(!n->can_be_eliminated(), "should have been processed already");
+      expand_loadbarrier_node(macro, n);
+      assert(s->load_barrier_count() < load_barrier_count, "must have deleted a node from load barrier list");
+      if (C->failing())  return true;
+    }
+    igvn.set_delay_transform(false);
+    igvn.optimize();
+    if (C->failing())  return true;
+  }
+  return false;
+}
+
+// == Loop optimization ==
+
+static bool replace_with_dominating_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, bool last_round) {
+  PhaseIterGVN &igvn = phase->igvn();
+  Compile* C = Compile::current();
+
+  LoadBarrierNode* lb2 = lb->has_dominating_barrier(phase, false, last_round);
+  if (lb2 != NULL) {
+    if (lb->in(LoadBarrierNode::Oop) != lb2->in(LoadBarrierNode::Oop)) {
+      assert(lb->in(LoadBarrierNode::Address) == lb2->in(LoadBarrierNode::Address), "");
+      igvn.replace_input_of(lb, LoadBarrierNode::Similar, lb2->proj_out(LoadBarrierNode::Oop));
+      C->set_major_progress();
+    } else  {
+      // That transformation may cause the Similar edge on dominated load barriers to be invalid
+      lb->fix_similar_in_uses(&igvn);
+
+      Node* val = lb->proj_out(LoadBarrierNode::Oop);
+      assert(lb2->has_true_uses(), "");
+      assert(lb2->in(LoadBarrierNode::Oop) == lb->in(LoadBarrierNode::Oop), "");
+
+      phase->lazy_update(lb, lb->in(LoadBarrierNode::Control));
+      phase->lazy_replace(lb->proj_out(LoadBarrierNode::Control), lb->in(LoadBarrierNode::Control));
+      igvn.replace_node(val, lb2->proj_out(LoadBarrierNode::Oop));
+
+      return true;
+    }
+  }
+  return false;
+}
+
+static Node* find_dominating_memory(PhaseIdealLoop* phase, Node* mem, Node* dom, int i) {
+  assert(dom->is_Region() || i == -1, "");
+  Node* m = mem;
+  while(phase->is_dominator(dom, phase->has_ctrl(m) ? phase->get_ctrl(m) : m->in(0))) {
+    if (m->is_Mem()) {
+      assert(m->as_Mem()->adr_type() == TypeRawPtr::BOTTOM, "");
+      m = m->in(MemNode::Memory);
+    } else if (m->is_MergeMem()) {
+      m = m->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
+    } else if (m->is_Phi()) {
+      if (m->in(0) == dom && i != -1) {
+        m = m->in(i);
+        break;
+      } else {
+        m = m->in(LoopNode::EntryControl);
+      }
+    } else if (m->is_Proj()) {
+      m = m->in(0);
+    } else if (m->is_SafePoint() || m->is_MemBar()) {
+      m = m->in(TypeFunc::Memory);
+    } else {
+#ifdef ASSERT
+      m->dump();
+#endif
+      ShouldNotReachHere();
+    }
+  }
+  return m;
+}
+
+static LoadBarrierNode* clone_load_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, Node* ctl, Node* mem, Node* oop_in) {
+  PhaseIterGVN &igvn = phase->igvn();
+  Compile* C = Compile::current();
+  Node* the_clone = lb->clone();
+  the_clone->set_req(LoadBarrierNode::Control, ctl);
+  the_clone->set_req(LoadBarrierNode::Memory, mem);
+  if (oop_in != NULL) {
+    the_clone->set_req(LoadBarrierNode::Oop, oop_in);
+  }
+
+  LoadBarrierNode* new_lb = the_clone->as_LoadBarrier();
+  igvn.register_new_node_with_optimizer(new_lb);
+  IdealLoopTree *loop = phase->get_loop(new_lb->in(0));
+  phase->set_ctrl(new_lb, new_lb->in(0));
+  phase->set_loop(new_lb, loop);
+  phase->set_idom(new_lb, new_lb->in(0), phase->dom_depth(new_lb->in(0))+1);
+  if (!loop->_child) {
+    loop->_body.push(new_lb);
+  }
+
+  Node* proj_ctl = new ProjNode(new_lb, LoadBarrierNode::Control);
+  igvn.register_new_node_with_optimizer(proj_ctl);
+  phase->set_ctrl(proj_ctl, proj_ctl->in(0));
+  phase->set_loop(proj_ctl, loop);
+  phase->set_idom(proj_ctl, new_lb, phase->dom_depth(new_lb)+1);
+  if (!loop->_child) {
+    loop->_body.push(proj_ctl);
+  }
+
+  Node* proj_oop = new ProjNode(new_lb, LoadBarrierNode::Oop);
+  phase->register_new_node(proj_oop, new_lb);
+
+  if (!new_lb->in(LoadBarrierNode::Similar)->is_top()) {
+    LoadBarrierNode* similar = new_lb->in(LoadBarrierNode::Similar)->in(0)->as_LoadBarrier();
+    if (!phase->is_dominator(similar, ctl)) {
+      igvn.replace_input_of(new_lb, LoadBarrierNode::Similar, C->top());
+    }
+  }
+
+  return new_lb;
+}
+
+static void replace_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, Node* new_val) {
+  PhaseIterGVN &igvn = phase->igvn();
+  Node* val = lb->proj_out(LoadBarrierNode::Oop);
+  igvn.replace_node(val, new_val);
+  phase->lazy_update(lb, lb->in(LoadBarrierNode::Control));
+  phase->lazy_replace(lb->proj_out(LoadBarrierNode::Control), lb->in(LoadBarrierNode::Control));
+}
+
+static bool split_barrier_thru_phi(PhaseIdealLoop* phase, LoadBarrierNode* lb) {
+  PhaseIterGVN &igvn = phase->igvn();
+  Compile* C = Compile::current();
+
+  if (lb->in(LoadBarrierNode::Oop)->is_Phi()) {
+    Node* oop_phi = lb->in(LoadBarrierNode::Oop);
+
+    if (oop_phi->in(2) == oop_phi) {
+      // Ignore phis with only one input
+      return false;
+    }
+
+    if (phase->is_dominator(phase->get_ctrl(lb->in(LoadBarrierNode::Address)),
+                            oop_phi->in(0)) && phase->get_ctrl(lb->in(LoadBarrierNode::Address)) != oop_phi->in(0)) {
+      // That transformation may cause the Similar edge on dominated load barriers to be invalid
+      lb->fix_similar_in_uses(&igvn);
+
+      RegionNode* region = oop_phi->in(0)->as_Region();
+
+      int backedge = LoopNode::LoopBackControl;
+      if (region->is_Loop() && region->in(backedge)->is_Proj() && region->in(backedge)->in(0)->is_If()) {
+        Node* c = region->in(backedge)->in(0)->in(0);
+        assert(c->unique_ctrl_out() == region->in(backedge)->in(0), "");
+        Node* oop = lb->in(LoadBarrierNode::Oop)->in(backedge);
+        Node* oop_c = phase->has_ctrl(oop) ? phase->get_ctrl(oop) : oop;
+        if (!phase->is_dominator(oop_c, c)) {
+          return false;
+        }
+      }
+
+      // If the node on the backedge above the phi is the node itself - we have a self loop.
+      // Don't clone - this will be folded later.
+      if (oop_phi->in(LoopNode::LoopBackControl) == lb->proj_out(LoadBarrierNode::Oop)) {
+        return false;
+      }
+
+      bool is_strip_mined = region->is_CountedLoop() && region->as_CountedLoop()->is_strip_mined();
+      Node *phi = oop_phi->clone();
+
+      for (uint i = 1; i < region->req(); i++) {
+        Node* ctrl = region->in(i);
+        if (ctrl != C->top()) {
+          assert(!phase->is_dominator(ctrl, region) || region->is_Loop(), "");
+
+          Node* mem = lb->in(LoadBarrierNode::Memory);
+          Node* m = find_dominating_memory(phase, mem, region, i);
+
+          if (region->is_Loop() && i == LoopNode::LoopBackControl && ctrl->is_Proj() && ctrl->in(0)->is_If()) {
+            ctrl = ctrl->in(0)->in(0);
+          } else if (region->is_Loop() && is_strip_mined) {
+            // If this is a strip mined loop, control must move above OuterStripMinedLoop
+            assert(i == LoopNode::EntryControl, "check");
+            assert(ctrl->is_OuterStripMinedLoop(), "sanity");
+            ctrl = ctrl->as_OuterStripMinedLoop()->in(LoopNode::EntryControl);
+          }
+
+          LoadBarrierNode* new_lb = clone_load_barrier(phase, lb, ctrl, m, lb->in(LoadBarrierNode::Oop)->in(i));
+          Node* out_ctrl = new_lb->proj_out(LoadBarrierNode::Control);
+
+          if (is_strip_mined && (i == LoopNode::EntryControl)) {
+            assert(region->in(i)->is_OuterStripMinedLoop(), "");
+            igvn.replace_input_of(region->in(i), i, out_ctrl);
+          } else if (ctrl == region->in(i)) {
+            igvn.replace_input_of(region, i, out_ctrl);
+          } else {
+            Node* iff = region->in(i)->in(0);
+            igvn.replace_input_of(iff, 0, out_ctrl);
+            phase->set_idom(iff, out_ctrl, phase->dom_depth(out_ctrl)+1);
+          }
+          phi->set_req(i, new_lb->proj_out(LoadBarrierNode::Oop));
+        }
+      }
+      phase->register_new_node(phi, region);
+      replace_barrier(phase, lb, phi);
+
+      if (region->is_Loop()) {
+        // Load barrier moved to the back edge of the Loop may now
+        // have a safepoint on the path to the barrier on the Similar
+        // edge
+        igvn.replace_input_of(phi->in(LoopNode::LoopBackControl)->in(0), LoadBarrierNode::Similar, C->top());
+        Node* head = region->in(LoopNode::EntryControl);
+        phase->set_idom(region, head, phase->dom_depth(head)+1);
+        phase->recompute_dom_depth();
+        if (head->is_CountedLoop() && head->as_CountedLoop()->is_main_loop()) {
+          head->as_CountedLoop()->set_normal_loop();
+        }
+      }
+
+      return true;
+    }
+  }
+
+  return false;
+}
+
+static bool move_out_of_loop(PhaseIdealLoop* phase, LoadBarrierNode* lb) {
+  PhaseIterGVN &igvn = phase->igvn();
+  IdealLoopTree *lb_loop = phase->get_loop(lb->in(0));
+  if (lb_loop != phase->ltree_root() && !lb_loop->_irreducible) {
+    Node* oop_ctrl = phase->get_ctrl(lb->in(LoadBarrierNode::Oop));
+    IdealLoopTree *oop_loop = phase->get_loop(oop_ctrl);
+    IdealLoopTree* adr_loop = phase->get_loop(phase->get_ctrl(lb->in(LoadBarrierNode::Address)));
+    if (!lb_loop->is_member(oop_loop) && !lb_loop->is_member(adr_loop)) {
+      // That transformation may cause the Similar edge on dominated load barriers to be invalid
+      lb->fix_similar_in_uses(&igvn);
+
+      Node* head = lb_loop->_head;
+      assert(head->is_Loop(), "");
+
+      if (phase->is_dominator(head, oop_ctrl)) {
+        assert(oop_ctrl->Opcode() == Op_CProj && oop_ctrl->in(0)->Opcode() == Op_NeverBranch, "");
+        assert(lb_loop->is_member(phase->get_loop(oop_ctrl->in(0)->in(0))), "");
+        return false;
+      }
+
+      if (head->is_CountedLoop()) {
+        CountedLoopNode* cloop = head->as_CountedLoop();
+        if (cloop->is_main_loop()) {
+          cloop->set_normal_loop();
+        }
+        // When we are moving barrier out of a counted loop,
+        // make sure we move it all the way out of the strip mined outer loop.
+        if (cloop->is_strip_mined()) {
+          head = cloop->outer_loop();
+        }
+      }
+
+      Node* mem = lb->in(LoadBarrierNode::Memory);
+      Node* m = find_dominating_memory(phase, mem, head, -1);
+
+      LoadBarrierNode* new_lb = clone_load_barrier(phase, lb, head->in(LoopNode::EntryControl), m, NULL);
+
+      assert(phase->idom(head) == head->in(LoopNode::EntryControl), "");
+      Node* proj_ctl = new_lb->proj_out(LoadBarrierNode::Control);
+      igvn.replace_input_of(head, LoopNode::EntryControl, proj_ctl);
+      phase->set_idom(head, proj_ctl, phase->dom_depth(proj_ctl) + 1);
+
+      replace_barrier(phase, lb, new_lb->proj_out(LoadBarrierNode::Oop));
+
+      phase->recompute_dom_depth();
+
+      return true;
+    }
+  }
+
+  return false;
+}
+
+static bool common_barriers(PhaseIdealLoop* phase, LoadBarrierNode* lb) {
+  PhaseIterGVN &igvn = phase->igvn();
+  Node* in_val = lb->in(LoadBarrierNode::Oop);
+  for (DUIterator_Fast imax, i = in_val->fast_outs(imax); i < imax; i++) {
+    Node* u = in_val->fast_out(i);
+    if (u != lb && u->is_LoadBarrier() && u->as_LoadBarrier()->has_true_uses()) {
+      Node* this_ctrl = lb->in(LoadBarrierNode::Control);
+      Node* other_ctrl = u->in(LoadBarrierNode::Control);
+
+      Node* lca = phase->dom_lca(this_ctrl, other_ctrl);
+      bool ok = true;
+
+      Node* proj1 = NULL;
+      Node* proj2 = NULL;
+
+      while (this_ctrl != lca && ok) {
+        if (this_ctrl->in(0) != NULL &&
+            this_ctrl->in(0)->is_MultiBranch()) {
+          if (this_ctrl->in(0)->in(0) == lca) {
+            assert(proj1 == NULL, "");
+            assert(this_ctrl->is_Proj(), "");
+            proj1 = this_ctrl;
+          } else if (!(this_ctrl->in(0)->is_If() && this_ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none))) {
+            ok = false;
+          }
+        }
+        this_ctrl = phase->idom(this_ctrl);
+      }
+      while (other_ctrl != lca && ok) {
+        if (other_ctrl->in(0) != NULL &&
+            other_ctrl->in(0)->is_MultiBranch()) {
+          if (other_ctrl->in(0)->in(0) == lca) {
+            assert(other_ctrl->is_Proj(), "");
+            assert(proj2 == NULL, "");
+            proj2 = other_ctrl;
+          } else if (!(other_ctrl->in(0)->is_If() && other_ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none))) {
+            ok = false;
+          }
+        }
+        other_ctrl = phase->idom(other_ctrl);
+      }
+      assert(proj1 == NULL || proj2 == NULL || proj1->in(0) == proj2->in(0), "");
+      if (ok && proj1 && proj2 && proj1 != proj2 && proj1->in(0)->is_If()) {
+        // That transformation may cause the Similar edge on dominated load barriers to be invalid
+        lb->fix_similar_in_uses(&igvn);
+        u->as_LoadBarrier()->fix_similar_in_uses(&igvn);
+
+        Node* split = lca->unique_ctrl_out();
+        assert(split->in(0) == lca, "");
+
+        Node* mem = lb->in(LoadBarrierNode::Memory);
+        Node* m = find_dominating_memory(phase, mem, split, -1);
+        LoadBarrierNode* new_lb = clone_load_barrier(phase, lb, lca, m, NULL);
+
+        Node* proj_ctl = new_lb->proj_out(LoadBarrierNode::Control);
+        igvn.replace_input_of(split, 0, new_lb->proj_out(LoadBarrierNode::Control));
+        phase->set_idom(split, proj_ctl, phase->dom_depth(proj_ctl)+1);
+
+        Node* proj_oop = new_lb->proj_out(LoadBarrierNode::Oop);
+        replace_barrier(phase, lb, proj_oop);
+        replace_barrier(phase, u->as_LoadBarrier(), proj_oop);
+
+        phase->recompute_dom_depth();
+
+        return true;
+      }
+    }
+  }
+
+  return false;
+}
+
+static void optimize_load_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, bool last_round) {
+  Compile* C = Compile::current();
+
+  if (!C->directive()->ZOptimizeLoadBarriersOption) {
+    return;
+  }
+
+  if (lb->has_true_uses()) {
+    if (replace_with_dominating_barrier(phase, lb, last_round)) {
+      return;
+    }
+
+    if (split_barrier_thru_phi(phase, lb)) {
+      return;
+    }
+
+    if (move_out_of_loop(phase, lb)) {
+      return;
+    }
+
+    if (common_barriers(phase, lb)) {
+      return;
+    }
+  }
+}
+
+void ZBarrierSetC2::loop_optimize_gc_barrier(PhaseIdealLoop* phase, Node* node, bool last_round) {
+  if (node->is_LoadBarrier()) {
+    optimize_load_barrier(phase, node->as_LoadBarrier(), last_round);
+  }
+}
+
+// == Verification ==
+
+#ifdef ASSERT
+
+static bool look_for_barrier(Node* n, bool post_parse, VectorSet& visited) {
+  if (visited.test_set(n->_idx)) {
+    return true;
+  }
+
+  for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
+    Node* u = n->fast_out(i);
+    if (u->is_LoadBarrier()) {
+    } else if ((u->is_Phi() || u->is_CMove()) && !post_parse) {
+      if (!look_for_barrier(u, post_parse, visited)) {
+        return false;
+      }
+    } else if (u->Opcode() == Op_EncodeP || u->Opcode() == Op_DecodeN) {
+      if (!look_for_barrier(u, post_parse, visited)) {
+        return false;
+      }
+    } else if (u->Opcode() != Op_SCMemProj) {
+      tty->print("bad use"); u->dump();
+      return false;
+    }
+  }
+
+  return true;
+}
+
+void ZBarrierSetC2::verify_gc_barriers(bool post_parse) const {
+  ZBarrierSetC2State* s = state();
+  Compile* C = Compile::current();
+  ResourceMark rm;
+  VectorSet visited(Thread::current()->resource_area());
+  for (int i = 0; i < s->load_barrier_count(); i++) {
+    LoadBarrierNode* n = s->load_barrier_node(i);
+
+    // The dominating barrier on the same address if it exists and
+    // this barrier must not be applied on the value from the same
+    // load otherwise the value is not reloaded before it's used the
+    // second time.
+    assert(n->in(LoadBarrierNode::Similar)->is_top() ||
+           (n->in(LoadBarrierNode::Similar)->in(0)->is_LoadBarrier() &&
+            n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Address) == n->in(LoadBarrierNode::Address) &&
+            n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Oop) != n->in(LoadBarrierNode::Oop)),
+           "broken similar edge");
+
+    assert(post_parse || n->as_LoadBarrier()->has_true_uses(),
+           "found unneeded load barrier");
+
+    // Several load barrier nodes chained through their Similar edge
+    // break the code that remove the barriers in final graph reshape.
+    assert(n->in(LoadBarrierNode::Similar)->is_top() ||
+           (n->in(LoadBarrierNode::Similar)->in(0)->is_LoadBarrier() &&
+            n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Similar)->is_top()),
+           "chain of Similar load barriers");
+
+    if (!n->in(LoadBarrierNode::Similar)->is_top()) {
+      ResourceMark rm;
+      Unique_Node_List wq;
+      Node* other = n->in(LoadBarrierNode::Similar)->in(0);
+      wq.push(n);
+      bool ok = true;
+      bool dom_found = false;
+      for (uint next = 0; next < wq.size(); ++next) {
+        Node *n = wq.at(next);
+        assert(n->is_CFG(), "");
+        assert(!n->is_SafePoint(), "");
+
+        if (n == other) {
+          continue;
+        }
+
+        if (n->is_Region()) {
+          for (uint i = 1; i < n->req(); i++) {
+            Node* m = n->in(i);
+            if (m != NULL) {
+              wq.push(m);
+            }
+          }
+        } else {
+          Node* m = n->in(0);
+          if (m != NULL) {
+            wq.push(m);
+          }
+        }
+      }
+    }
+
+    if (ZVerifyLoadBarriers) {
+      if ((n->is_Load() || n->is_LoadStore()) && n->bottom_type()->make_oopptr() != NULL) {
+        visited.Clear();
+        bool found = look_for_barrier(n, post_parse, visited);
+        if (!found) {
+          n->dump(1);
+          n->dump(-3);
+          stringStream ss;
+          C->method()->print_short_name(&ss);
+          tty->print_cr("-%s-", ss.as_string());
+          assert(found, "");
+        }
+      }
+    }
+  }
+}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_C2_ZBARRIERSETC2_HPP
+#define SHARE_GC_Z_C2_ZBARRIERSETC2_HPP
+
+#include "gc/shared/c2/barrierSetC2.hpp"
+#include "memory/allocation.hpp"
+#include "opto/node.hpp"
+#include "utilities/growableArray.hpp"
+
+class LoadBarrierNode : public MultiNode {
+private:
+  bool _weak;
+  bool _writeback;          // Controls if the barrier writes the healed oop back to memory
+                            // A swap on a memory location must never write back the healed oop
+  bool _oop_reload_allowed; // Controls if the barrier are allowed to reload the oop from memory
+                            // before healing, otherwise both the oop and the address must be passed to the
+                            // barrier from the oop
+
+  static bool is_dominator(PhaseIdealLoop* phase, bool linear_only, Node *d, Node *n);
+  void push_dominated_barriers(PhaseIterGVN* igvn) const;
+
+public:
+  enum {
+    Control,
+    Memory,
+    Oop,
+    Address,
+    Number_of_Outputs = Address,
+    Similar,
+    Number_of_Inputs
+  };
+
+  LoadBarrierNode(Compile* C,
+                  Node* c,
+                  Node* mem,
+                  Node* val,
+                  Node* adr,
+                  bool weak,
+                  bool writeback,
+                  bool oop_reload_allowed);
+
+  virtual int Opcode() const;
+  virtual const Type *bottom_type() const;
+  virtual const Type *Value(PhaseGVN *phase) const;
+  virtual Node *Identity(PhaseGVN *phase);
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+
+  LoadBarrierNode* has_dominating_barrier(PhaseIdealLoop* phase,
+                                          bool linear_only,
+                                          bool look_for_similar);
+
+  void fix_similar_in_uses(PhaseIterGVN* igvn);
+
+  bool has_true_uses() const;
+
+  bool can_be_eliminated() const {
+    return !in(Similar)->is_top();
+  }
+
+  bool is_weak() const {
+    return _weak;
+  }
+
+  bool is_writeback() const {
+    return _writeback;
+  }
+
+  bool oop_reload_allowed() const {
+    return _oop_reload_allowed;
+  }
+};
+
+class LoadBarrierSlowRegNode : public LoadPNode {
+public:
+  LoadBarrierSlowRegNode(Node *c,
+                         Node *mem,
+                         Node *adr,
+                         const TypePtr *at,
+                         const TypePtr* t,
+                         MemOrd mo,
+                         ControlDependency control_dependency = DependsOnlyOnTest)
+    : LoadPNode(c, mem, adr, at, t, mo, control_dependency) {}
+
+  virtual const char * name() {
+    return "LoadBarrierSlowRegNode";
+  }
+
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) {
+    return NULL;
+  }
+
+  virtual int Opcode() const;
+};
+
+class LoadBarrierWeakSlowRegNode : public LoadPNode {
+public:
+  LoadBarrierWeakSlowRegNode(Node *c,
+                             Node *mem,
+                             Node *adr,
+                             const TypePtr *at,
+                             const TypePtr* t,
+                             MemOrd mo,
+                             ControlDependency control_dependency = DependsOnlyOnTest)
+    : LoadPNode(c, mem, adr, at, t, mo, control_dependency) {}
+
+  virtual const char * name() {
+    return "LoadBarrierWeakSlowRegNode";
+  }
+
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) {
+    return NULL;
+  }
+
+  virtual int Opcode() const;
+};
+
+class ZBarrierSetC2State : public ResourceObj {
+private:
+  // List of load barrier nodes which need to be expanded before matching
+  GrowableArray<LoadBarrierNode*>* _load_barrier_nodes;
+
+public:
+  ZBarrierSetC2State(Arena* comp_arena);
+  int load_barrier_count() const;
+  void add_load_barrier_node(LoadBarrierNode* n);
+  void remove_load_barrier_node(LoadBarrierNode* n);
+  LoadBarrierNode* load_barrier_node(int idx) const;
+};
+
+class ZBarrierSetC2 : public BarrierSetC2 {
+private:
+  ZBarrierSetC2State* state() const;
+  Node* make_cas_loadbarrier(C2AtomicAccess& access) const;
+  Node* make_cmpx_loadbarrier(C2AtomicAccess& access) const;
+  void expand_loadbarrier_basic(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const;
+  void expand_loadbarrier_node(PhaseMacroExpand* phase, LoadBarrierNode* barrier) const;
+  void expand_loadbarrier_optimized(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const;
+  const TypeFunc* load_barrier_Type() const;
+
+protected:
+  virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const;
+  virtual Node* atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access,
+                                               Node* expected_val,
+                                               Node* new_val,
+                                               const Type* val_type) const;
+  virtual Node* atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access,
+                                                Node* expected_val,
+                                                Node* new_val,
+                                                const Type* value_type) const;
+  virtual Node* atomic_xchg_at_resolved(C2AtomicAccess& access,
+                                        Node* new_val,
+                                        const Type* val_type) const;
+
+public:
+  Node* load_barrier(GraphKit* kit,
+                     Node* val,
+                     Node* adr,
+                     bool weak = false,
+                     bool writeback = true,
+                     bool oop_reload_allowed = true) const;
+
+  virtual void* create_barrier_state(Arena* comp_arena) const;
+  virtual bool is_gc_barrier_node(Node* node) const;
+  virtual void eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const { }
+  virtual void eliminate_useless_gc_barriers(Unique_Node_List &useful) const;
+  virtual void add_users_to_worklist(Unique_Node_List* worklist) const;
+  virtual void enqueue_useful_gc_barrier(Unique_Node_List &worklist, Node* node) const;
+  virtual void register_potential_barrier_node(Node* node) const;
+  virtual void unregister_potential_barrier_node(Node* node) const;
+  virtual bool array_copy_requires_gc_barriers(BasicType type) const { return true; }
+  virtual Node* step_over_gc_barrier(Node* c) const { return c; }
+  // If the BarrierSetC2 state has kept macro nodes in its compilation unit state to be
+  // expanded later, then now is the time to do so.
+  virtual bool expand_macro_nodes(PhaseMacroExpand* macro) const;
+
+  static void find_dominating_barriers(PhaseIterGVN& igvn);
+  static void loop_optimize_gc_barrier(PhaseIdealLoop* phase, Node* node, bool last_round);
+
+#ifdef ASSERT
+  virtual void verify_gc_barriers(bool post_parse) const;
+#endif
+};
+
+#endif // SHARE_GC_Z_C2_ZBARRIERSETC2_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/vmStructs_z.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/vmStructs_z.hpp"
+
+ZGlobalsForVMStructs::ZGlobalsForVMStructs() :
+    _ZGlobalPhase(&ZGlobalPhase),
+    _ZAddressGoodMask(&ZAddressGoodMask),
+    _ZAddressBadMask(&ZAddressBadMask),
+    _ZAddressWeakBadMask(&ZAddressWeakBadMask),
+    _ZObjectAlignmentSmallShift(&ZObjectAlignmentSmallShift),
+    _ZObjectAlignmentSmall(&ZObjectAlignmentSmall) {
+}
+
+ZGlobalsForVMStructs ZGlobalsForVMStructs::_instance;
+ZGlobalsForVMStructs* ZGlobalsForVMStructs::_instance_p = &ZGlobalsForVMStructs::_instance;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/vmStructs_z.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_Z_VMSTRUCTS_Z_HPP
+#define SHARE_VM_GC_Z_VMSTRUCTS_Z_HPP
+
+#include "gc/z/zAddressRangeMap.hpp"
+#include "gc/z/zCollectedHeap.hpp"
+#include "gc/z/zHeap.hpp"
+#include "gc/z/zPageAllocator.hpp"
+#include "gc/z/zPhysicalMemory.hpp"
+#include "utilities/macros.hpp"
+
+// Expose some ZGC globals to the SA agent.
+class ZGlobalsForVMStructs {
+  static ZGlobalsForVMStructs _instance;
+
+public:
+  static ZGlobalsForVMStructs* _instance_p;
+
+  ZGlobalsForVMStructs();
+
+  uint32_t* _ZGlobalPhase;
+
+  uintptr_t* _ZAddressGoodMask;
+  uintptr_t* _ZAddressBadMask;
+  uintptr_t* _ZAddressWeakBadMask;
+
+  const int* _ZObjectAlignmentSmallShift;
+  const int* _ZObjectAlignmentSmall;
+};
+
+typedef ZAddressRangeMap<ZPageTableEntry, ZPageSizeMinShift> ZAddressRangeMapForPageTable;
+
+#define VM_STRUCTS_ZGC(nonstatic_field, volatile_nonstatic_field, static_field)                      \
+  static_field(ZGlobalsForVMStructs,            _instance_p,          ZGlobalsForVMStructs*)         \
+  nonstatic_field(ZGlobalsForVMStructs,         _ZGlobalPhase,        uint32_t*)                     \
+  nonstatic_field(ZGlobalsForVMStructs,         _ZAddressGoodMask,    uintptr_t*)                    \
+  nonstatic_field(ZGlobalsForVMStructs,         _ZAddressBadMask,     uintptr_t*)                    \
+  nonstatic_field(ZGlobalsForVMStructs,         _ZAddressWeakBadMask, uintptr_t*)                    \
+  nonstatic_field(ZGlobalsForVMStructs,         _ZObjectAlignmentSmallShift, const int*)             \
+  nonstatic_field(ZGlobalsForVMStructs,         _ZObjectAlignmentSmall, const int*)                  \
+                                                                                                     \
+  nonstatic_field(ZCollectedHeap,               _heap,                ZHeap)                         \
+                                                                                                     \
+  nonstatic_field(ZHeap,                        _page_allocator,      ZPageAllocator)                \
+  nonstatic_field(ZHeap,                        _pagetable,           ZPageTable)                    \
+                                                                                                     \
+  nonstatic_field(ZPage,                        _type,                const uint8_t)                 \
+  nonstatic_field(ZPage,                        _virtual,             const ZVirtualMemory)          \
+  nonstatic_field(ZPage,                        _forwarding,          ZForwardingTable)              \
+                                                                                                     \
+  nonstatic_field(ZPageAllocator,               _physical,            ZPhysicalMemoryManager)        \
+  nonstatic_field(ZPageAllocator,               _used,                size_t)                        \
+                                                                                                     \
+  nonstatic_field(ZPageTable,                   _map,                 ZAddressRangeMapForPageTable)  \
+                                                                                                     \
+  nonstatic_field(ZAddressRangeMapForPageTable, _map,                 ZPageTableEntry* const)        \
+                                                                                                     \
+  nonstatic_field(ZVirtualMemory,                _start,              uintptr_t)                     \
+  nonstatic_field(ZVirtualMemory,                _end,                uintptr_t)                     \
+                                                                                                     \
+  nonstatic_field(ZForwardingTable,              _table,              ZForwardingTableEntry*)        \
+  nonstatic_field(ZForwardingTable,              _size,               size_t)                        \
+                                                                                                     \
+  nonstatic_field(ZPhysicalMemoryManager,        _max_capacity,       const size_t)                  \
+  nonstatic_field(ZPhysicalMemoryManager,        _capacity,           size_t)
+
+#define VM_INT_CONSTANTS_ZGC(declare_constant, declare_constant_with_value)                          \
+  declare_constant(ZPhaseRelocate)                                                                   \
+  declare_constant(ZPageTypeSmall)                                                                   \
+  declare_constant(ZPageTypeMedium)                                                                  \
+  declare_constant(ZPageTypeLarge)                                                                   \
+  declare_constant(ZObjectAlignmentMediumShift)                                                      \
+  declare_constant(ZObjectAlignmentLargeShift)
+
+#define VM_LONG_CONSTANTS_ZGC(declare_constant)                                                      \
+  declare_constant(ZPageSizeSmallShift)                                                              \
+  declare_constant(ZPageSizeMediumShift)                                                             \
+  declare_constant(ZPageSizeMinShift)                                                                \
+  declare_constant(ZAddressOffsetShift)                                                              \
+  declare_constant(ZAddressOffsetBits)                                                               \
+  declare_constant(ZAddressOffsetMask)                                                               \
+  declare_constant(ZAddressSpaceStart)
+
+#define VM_TYPES_ZGC(declare_type, declare_toplevel_type, declare_integer_type)                      \
+  declare_toplevel_type(ZGlobalsForVMStructs)                                                        \
+  declare_type(ZCollectedHeap, CollectedHeap)                                                        \
+  declare_toplevel_type(ZHeap)                                                                       \
+  declare_toplevel_type(ZPage)                                                                       \
+  declare_toplevel_type(ZPageAllocator)                                                              \
+  declare_toplevel_type(ZPageTable)                                                                  \
+  declare_toplevel_type(ZPageTableEntry)                                                             \
+  declare_toplevel_type(ZAddressRangeMapForPageTable)                                                \
+  declare_toplevel_type(ZVirtualMemory)                                                              \
+  declare_toplevel_type(ZForwardingTable)                                                            \
+  declare_toplevel_type(ZForwardingTableEntry)                                                       \
+  declare_toplevel_type(ZPhysicalMemoryManager)
+
+#endif // SHARE_VM_GC_Z_VMSTRUCTS_Z_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zAddress.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zAddress.inline.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "runtime/thread.hpp"
+
+void ZAddressMasks::set_good_mask(uintptr_t mask) {
+  uintptr_t old_bad_mask = ZAddressBadMask;
+  ZAddressGoodMask = mask;
+  ZAddressBadMask = ZAddressGoodMask ^ ZAddressMetadataMask;
+  ZAddressWeakBadMask = (ZAddressGoodMask | ZAddressMetadataRemapped | ZAddressMetadataFinalizable) ^ ZAddressMetadataMask;
+}
+
+void ZAddressMasks::initialize() {
+  ZAddressMetadataMarked = ZAddressMetadataMarked0;
+  set_good_mask(ZAddressMetadataRemapped);
+}
+
+void ZAddressMasks::flip_to_marked() {
+  ZAddressMetadataMarked ^= (ZAddressMetadataMarked0 | ZAddressMetadataMarked1);
+  set_good_mask(ZAddressMetadataMarked);
+}
+
+void ZAddressMasks::flip_to_remapped() {
+  set_good_mask(ZAddressMetadataRemapped);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zAddress.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZADDRESS_HPP
+#define SHARE_GC_Z_ZADDRESS_HPP
+
+#include "memory/allocation.hpp"
+
+class ZAddress : public AllStatic {
+public:
+  static bool is_null(uintptr_t value);
+  static bool is_bad(uintptr_t value);
+  static bool is_good(uintptr_t value);
+  static bool is_good_or_null(uintptr_t value);
+  static bool is_weak_bad(uintptr_t value);
+  static bool is_weak_good(uintptr_t value);
+  static bool is_weak_good_or_null(uintptr_t value);
+  static bool is_marked(uintptr_t value);
+  static bool is_finalizable(uintptr_t value);
+  static bool is_remapped(uintptr_t value);
+
+  static uintptr_t address(uintptr_t value);
+  static uintptr_t offset(uintptr_t value);
+  static uintptr_t good(uintptr_t value);
+  static uintptr_t good_or_null(uintptr_t value);
+  static uintptr_t finalizable_good(uintptr_t value);
+  static uintptr_t marked(uintptr_t value);
+  static uintptr_t marked0(uintptr_t value);
+  static uintptr_t marked1(uintptr_t value);
+  static uintptr_t remapped(uintptr_t value);
+  static uintptr_t remapped_or_null(uintptr_t value);
+};
+
+class ZAddressMasks : public AllStatic {
+  friend class ZAddressTest;
+
+private:
+  static void set_good_mask(uintptr_t mask);
+
+public:
+  static void initialize();
+  static void flip_to_marked();
+  static void flip_to_remapped();
+};
+
+#endif // SHARE_GC_Z_ZADDRESS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zAddress.inline.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZADDRESS_INLINE_HPP
+#define SHARE_GC_Z_ZADDRESS_INLINE_HPP
+
+#include "gc/z/zAddress.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "utilities/macros.hpp"
+#include OS_CPU_HEADER_INLINE(gc/z/zAddress)
+
+inline bool ZAddress::is_null(uintptr_t value) {
+  return value == 0;
+}
+
+inline bool ZAddress::is_bad(uintptr_t value) {
+  return value & ZAddressBadMask;
+}
+
+inline bool ZAddress::is_good(uintptr_t value) {
+  return !is_bad(value) && !is_null(value);
+}
+
+inline bool ZAddress::is_good_or_null(uintptr_t value) {
+  // Checking if an address is "not bad" is an optimized version of
+  // checking if it's "good or null", which eliminates an explicit
+  // null check. However, the implicit null check only checks that
+  // the mask bits are zero, not that the entire address is zero.
+  // This means that an address without mask bits would pass through
+  // the barrier as if it was null. This should be harmless as such
+  // addresses should ever be passed through the barrier.
+  const bool result = !is_bad(value);
+  assert((is_good(value) || is_null(value)) == result, "Bad address");
+  return result;
+}
+
+inline bool ZAddress::is_weak_bad(uintptr_t value) {
+  return value & ZAddressWeakBadMask;
+}
+
+inline bool ZAddress::is_weak_good(uintptr_t value) {
+  return !is_weak_bad(value) && !is_null(value);
+}
+
+inline bool ZAddress::is_weak_good_or_null(uintptr_t value) {
+  return !is_weak_bad(value);
+}
+
+inline bool ZAddress::is_marked(uintptr_t value) {
+  return value & ZAddressMetadataMarked;
+}
+
+inline bool ZAddress::is_finalizable(uintptr_t value) {
+  return value & ZAddressMetadataFinalizable;
+}
+
+inline bool ZAddress::is_remapped(uintptr_t value) {
+  return value & ZAddressMetadataRemapped;
+}
+
+inline uintptr_t ZAddress::offset(uintptr_t value) {
+  return value & ZAddressOffsetMask;
+}
+
+inline uintptr_t ZAddress::good(uintptr_t value) {
+  return address(offset(value) | ZAddressGoodMask);
+}
+
+inline uintptr_t ZAddress::good_or_null(uintptr_t value) {
+  return is_null(value) ? 0 : good(value);
+}
+
+inline uintptr_t ZAddress::finalizable_good(uintptr_t value) {
+  return address(offset(value) | ZAddressMetadataFinalizable | ZAddressGoodMask);
+}
+
+inline uintptr_t ZAddress::marked(uintptr_t value) {
+  return address(offset(value) | ZAddressMetadataMarked);
+}
+
+inline uintptr_t ZAddress::marked0(uintptr_t value) {
+  return address(offset(value) | ZAddressMetadataMarked0);
+}
+
+inline uintptr_t ZAddress::marked1(uintptr_t value) {
+  return address(offset(value) | ZAddressMetadataMarked1);
+}
+
+inline uintptr_t ZAddress::remapped(uintptr_t value) {
+  return address(offset(value) | ZAddressMetadataRemapped);
+}
+
+inline uintptr_t ZAddress::remapped_or_null(uintptr_t value) {
+  return is_null(value) ? 0 : remapped(value);
+}
+
+#endif // SHARE_GC_Z_ZADDRESS_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zAddressRangeMap.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZADDRESSRANGEMAP_HPP
+#define SHARE_GC_Z_ZADDRESSRANGEMAP_HPP
+
+#include "memory/allocation.hpp"
+
+template<typename T, size_t AddressRangeShift>
+class ZAddressRangeMapIterator;
+
+template <typename T, size_t AddressRangeShift>
+class ZAddressRangeMap {
+  friend class VMStructs;
+  friend class ZAddressRangeMapIterator<T, AddressRangeShift>;
+
+private:
+  T* const _map;
+
+  size_t index_for_addr(uintptr_t addr) const;
+  size_t size() const;
+
+public:
+  ZAddressRangeMap();
+  ~ZAddressRangeMap();
+
+  T get(uintptr_t addr) const;
+  void put(uintptr_t addr, T value);
+};
+
+template <typename T, size_t AddressRangeShift>
+class ZAddressRangeMapIterator : public StackObj {
+public:
+  const ZAddressRangeMap<T, AddressRangeShift>* const _map;
+  size_t                                              _next;
+
+public:
+  ZAddressRangeMapIterator(const ZAddressRangeMap<T, AddressRangeShift>* map);
+
+  bool next(T* value);
+};
+
+#endif // SHARE_GC_Z_ZADDRESSRANGEMAP_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zAddressRangeMap.inline.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZADDRESSRANGEMAP_INLINE_HPP
+#define SHARE_GC_Z_ZADDRESSRANGEMAP_INLINE_HPP
+
+#include "gc/z/zAddress.inline.hpp"
+#include "gc/z/zAddressRangeMap.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "memory/allocation.inline.hpp"
+
+template <typename T, size_t AddressRangeShift>
+ZAddressRangeMap<T, AddressRangeShift>::ZAddressRangeMap() :
+    _map(MmapArrayAllocator<T>::allocate(size(), mtGC)) {}
+
+template <typename T, size_t AddressRangeShift>
+ZAddressRangeMap<T, AddressRangeShift>::~ZAddressRangeMap() {
+  MmapArrayAllocator<T>::free(_map, size());
+}
+
+template <typename T, size_t AddressRangeShift>
+size_t ZAddressRangeMap<T, AddressRangeShift>::index_for_addr(uintptr_t addr) const {
+  assert(!ZAddress::is_null(addr), "Invalid address");
+
+  const size_t index = ZAddress::offset(addr) >> AddressRangeShift;
+  assert(index < size(), "Invalid index");
+
+  return index;
+}
+
+template <typename T, size_t AddressRangeShift>
+size_t ZAddressRangeMap<T, AddressRangeShift>::size() const {
+  return ZAddressOffsetMax >> AddressRangeShift;
+}
+
+template <typename T, size_t AddressRangeShift>
+T ZAddressRangeMap<T, AddressRangeShift>::get(uintptr_t addr) const {
+  const uintptr_t index = index_for_addr(addr);
+  return _map[index];
+}
+
+template <typename T, size_t AddressRangeShift>
+void ZAddressRangeMap<T, AddressRangeShift>::put(uintptr_t addr, T value) {
+  const uintptr_t index = index_for_addr(addr);
+  _map[index] = value;
+}
+
+template <typename T, size_t AddressRangeShift>
+inline ZAddressRangeMapIterator<T, AddressRangeShift>::ZAddressRangeMapIterator(const ZAddressRangeMap<T, AddressRangeShift>* map) :
+    _map(map),
+    _next(0) {}
+
+template <typename T, size_t AddressRangeShift>
+inline bool ZAddressRangeMapIterator<T, AddressRangeShift>::next(T* value) {
+  if (_next < _map->size()) {
+    *value = _map->_map[_next++];
+    return true;
+  }
+
+  // End of map
+  return false;
+}
+
+#endif // SHARE_GC_Z_ZADDRESSRANGEMAP_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zAllocationFlags.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZALLOCATIONFLAGS_HPP
+#define SHARE_GC_Z_ZALLOCATIONFLAGS_HPP
+
+#include "gc/z/zBitField.hpp"
+#include "memory/allocation.hpp"
+
+//
+// Allocation flags layout
+// -----------------------
+//
+//   7   4 3 2 1 0
+//  +---+-+-+-+-+-+
+//  |000|1|1|1|1|1|
+//  +---+-+-+-+-+-+
+//  |   | | | | |
+//  |   | | | | * 0-0 Java Thread Flag (1-bit)
+//  |   | | | |
+//  |   | | | * 1-1 Worker Thread Flag (1-bit)
+//  |   | | |
+//  |   | | * 2-2 Non-Blocking Flag (1-bit)
+//  |   | |
+//  |   | * 3-3 Relocation Flag (1-bit)
+//  |   |
+//  |   * 4-4 No Reserve Flag (1-bit)
+//  |
+//  * 7-5 Unused (3-bits)
+//
+
+class ZAllocationFlags {
+private:
+  typedef ZBitField<uint8_t, bool, 0, 1> field_java_thread;
+  typedef ZBitField<uint8_t, bool, 1, 1> field_worker_thread;
+  typedef ZBitField<uint8_t, bool, 2, 1> field_non_blocking;
+  typedef ZBitField<uint8_t, bool, 3, 1> field_relocation;
+  typedef ZBitField<uint8_t, bool, 4, 1> field_no_reserve;
+
+  uint8_t _flags;
+
+public:
+  ZAllocationFlags() :
+      _flags(0) {}
+
+  void set_java_thread() {
+    _flags |= field_java_thread::encode(true);
+  }
+
+  void set_worker_thread() {
+    _flags |= field_worker_thread::encode(true);
+  }
+
+  void set_non_blocking() {
+    _flags |= field_non_blocking::encode(true);
+  }
+
+  void set_relocation() {
+    _flags |= field_relocation::encode(true);
+  }
+
+  void set_no_reserve() {
+    _flags |= field_no_reserve::encode(true);
+  }
+
+  bool java_thread() const {
+    return field_java_thread::decode(_flags);
+  }
+
+  bool worker_thread() const {
+    return field_worker_thread::decode(_flags);
+  }
+
+  bool non_blocking() const {
+    return field_non_blocking::decode(_flags);
+  }
+
+  bool relocation() const {
+    return field_relocation::decode(_flags);
+  }
+
+  bool no_reserve() const {
+    return field_no_reserve::decode(_flags);
+  }
+};
+
+#endif // SHARE_GC_Z_ZALLOCATIONFLAGS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zArguments.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zArguments.hpp"
+#include "gc/z/zCollectedHeap.hpp"
+#include "gc/z/zCollectorPolicy.hpp"
+#include "gc/z/zWorkers.hpp"
+#include "gc/shared/gcArguments.inline.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/globals_extension.hpp"
+
+size_t ZArguments::conservative_max_heap_alignment() {
+  return 0;
+}
+
+void ZArguments::initialize() {
+  GCArguments::initialize();
+
+  // Enable NUMA by default
+  if (FLAG_IS_DEFAULT(UseNUMA)) {
+    FLAG_SET_DEFAULT(UseNUMA, true);
+  }
+
+  // Disable biased locking by default
+  if (FLAG_IS_DEFAULT(UseBiasedLocking)) {
+    FLAG_SET_DEFAULT(UseBiasedLocking, false);
+  }
+
+  // Select number of parallel threads
+  if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
+    FLAG_SET_DEFAULT(ParallelGCThreads, ZWorkers::calculate_nparallel());
+  }
+
+  if (ParallelGCThreads == 0) {
+    vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:ParallelGCThreads=0");
+  }
+
+  // Select number of concurrent threads
+  if (FLAG_IS_DEFAULT(ConcGCThreads)) {
+    FLAG_SET_DEFAULT(ConcGCThreads, ZWorkers::calculate_nconcurrent());
+  }
+
+  if (ConcGCThreads == 0) {
+    vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:ConcGCThreads=0");
+  }
+
+#ifdef COMPILER2
+  // Enable loop strip mining by default
+  if (FLAG_IS_DEFAULT(UseCountedLoopSafepoints)) {
+    FLAG_SET_DEFAULT(UseCountedLoopSafepoints, true);
+    if (FLAG_IS_DEFAULT(LoopStripMiningIter)) {
+      FLAG_SET_DEFAULT(LoopStripMiningIter, 1000);
+    }
+  }
+#endif
+
+  // To avoid asserts in set_active_workers()
+  FLAG_SET_DEFAULT(UseDynamicNumberOfGCThreads, true);
+
+  // CompressedOops/UseCompressedClassPointers not supported
+  FLAG_SET_DEFAULT(UseCompressedOops, false);
+  FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
+
+  // ClassUnloading not (yet) supported
+  FLAG_SET_DEFAULT(ClassUnloading, false);
+  FLAG_SET_DEFAULT(ClassUnloadingWithConcurrentMark, false);
+
+  // Verification before startup and after exit not (yet) supported
+  FLAG_SET_DEFAULT(VerifyDuringStartup, false);
+  FLAG_SET_DEFAULT(VerifyBeforeExit, false);
+
+  // Verification of stacks not (yet) supported, for the same reason
+  // we need fixup_partial_loads
+  DEBUG_ONLY(FLAG_SET_DEFAULT(VerifyStack, false));
+
+  // JVMCI not (yet) supported
+  if (EnableJVMCI) {
+    vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:+EnableJVMCI");
+  }
+}
+
+CollectedHeap* ZArguments::create_heap() {
+  return create_heap_with_policy<ZCollectedHeap, ZCollectorPolicy>();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zArguments.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZARGUMENTS_HPP
+#define SHARE_GC_Z_ZARGUMENTS_HPP
+
+#include "gc/shared/gcArguments.hpp"
+
+class CollectedHeap;
+
+class ZArguments : public GCArguments {
+public:
+  virtual void initialize();
+  virtual size_t conservative_max_heap_alignment();
+  virtual CollectedHeap* create_heap();
+};
+
+#endif // SHARE_GC_Z_ZARGUMENTS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zArray.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZARRAY_HPP
+#define SHARE_GC_Z_ZARRAY_HPP
+
+#include "memory/allocation.hpp"
+
+template <typename T>
+class ZArray {
+private:
+  static const size_t initial_capacity = 32;
+
+  T*     _array;
+  size_t _size;
+  size_t _capacity;
+
+  // Copy and assignment are not allowed
+  ZArray(const ZArray<T>& array);
+  ZArray<T>& operator=(const ZArray<T>& array);
+
+  void expand(size_t new_capacity);
+
+public:
+  ZArray();
+  ~ZArray();
+
+  size_t size() const;
+  bool is_empty() const;
+
+  T at(size_t index) const;
+
+  void add(T value);
+  void clear();
+};
+
+template <typename T, bool parallel>
+class ZArrayIteratorImpl : public StackObj {
+private:
+  ZArray<T>* const _array;
+  size_t           _next;
+
+public:
+  ZArrayIteratorImpl(ZArray<T>* array);
+
+  bool next(T* elem);
+};
+
+// Iterator types
+#define ZARRAY_SERIAL      false
+#define ZARRAY_PARALLEL    true
+
+template <typename T>
+class ZArrayIterator : public ZArrayIteratorImpl<T, ZARRAY_SERIAL> {
+public:
+  ZArrayIterator(ZArray<T>* array) :
+      ZArrayIteratorImpl<T, ZARRAY_SERIAL>(array) {}
+};
+
+template <typename T>
+class ZArrayParallelIterator : public ZArrayIteratorImpl<T, ZARRAY_PARALLEL> {
+public:
+  ZArrayParallelIterator(ZArray<T>* array) :
+      ZArrayIteratorImpl<T, ZARRAY_PARALLEL>(array) {}
+};
+
+#endif // SHARE_GC_Z_ZARRAY_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zArray.inline.hpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZARRAY_INLINE_HPP
+#define SHARE_GC_Z_ZARRAY_INLINE_HPP
+
+#include "gc/z/zArray.hpp"
+#include "memory/allocation.inline.hpp"
+#include "runtime/atomic.hpp"
+
+template <typename T>
+inline ZArray<T>::ZArray() :
+    _array(NULL),
+    _size(0),
+    _capacity(0) {}
+
+template <typename T>
+inline ZArray<T>::~ZArray() {
+  if (_array != NULL) {
+    FREE_C_HEAP_ARRAY(T, _array);
+  }
+}
+
+template <typename T>
+inline size_t ZArray<T>::size() const {
+  return _size;
+}
+
+template <typename T>
+inline bool ZArray<T>::is_empty() const {
+  return size() == 0;
+}
+
+template <typename T>
+inline T ZArray<T>::at(size_t index) const {
+  assert(index < _size, "Index out of bounds");
+  return _array[index];
+}
+
+template <typename T>
+inline void ZArray<T>::expand(size_t new_capacity) {
+  T* new_array = NEW_C_HEAP_ARRAY(T, new_capacity, mtGC);
+  if (_array != NULL) {
+    memcpy(new_array, _array, sizeof(T) * _capacity);
+    FREE_C_HEAP_ARRAY(T, _array);
+  }
+
+  _array = new_array;
+  _capacity = new_capacity;
+}
+
+template <typename T>
+inline void ZArray<T>::add(T value) {
+  if (_size == _capacity) {
+    const size_t new_capacity = (_capacity > 0) ? _capacity * 2 : initial_capacity;
+    expand(new_capacity);
+  }
+
+  _array[_size++] = value;
+}
+
+template <typename T>
+inline void ZArray<T>::clear() {
+  _size = 0;
+}
+
+template <typename T, bool parallel>
+inline ZArrayIteratorImpl<T, parallel>::ZArrayIteratorImpl(ZArray<T>* array) :
+    _array(array),
+    _next(0) {}
+
+template <typename T, bool parallel>
+inline bool ZArrayIteratorImpl<T, parallel>::next(T* elem) {
+  if (parallel) {
+    const size_t next = Atomic::add(1u, &_next) - 1u;
+    if (next < _array->size()) {
+      *elem = _array->at(next);
+      return true;
+    }
+  } else {
+    if (_next < _array->size()) {
+      *elem = _array->at(_next++);
+      return true;
+    }
+  }
+
+  // No more elements
+  return false;
+}
+
+#endif // SHARE_GC_Z_ZARRAY_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zBarrier.cpp	Tue Jun 12 15:14:22 2018 -0700
@@ -0,0 +1,270 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zBarrier.inline.hpp"
+#include "gc/z/zHeap.inline.hpp"
+#include "gc/z/zOop.inline.hpp"
+#include "gc/z/zOopClosures.inline.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/safepoint.hpp"
+#include "utilities/debug.hpp"
+
+bool ZBarrier::during_mark() {
+  return ZGlobalPhase == ZPhaseMark;
+}
+
+bool ZBarrier::during_relocate() {
+  return ZGlobalPhase == ZPhaseRelocate;
+}
+
+template <bool finalizable>
+bool ZBarrier::should_mark_through(uintptr_t addr) {
+  // Finalizable marked oops can still exists on the heap after marking
+  // has completed, in which case we just want to convert this into a
+  // good oop and not push it on the mark stack.
+  if (!during_mark()) {
+    assert(ZAddress::is_marked(addr), "Should be marked");
+    assert(ZAddress::is_finalizable(addr), "Should be finalizable");
+    return false;
+  }
+
+  // During marking, we mark through already marked oops to avoid having
+  // some large part of the object graph hidden behind a pushed, but not
+  // yet flushed, entry on a mutator mark stack. Always marking through
+  // allows the GC workers to proceed through the object graph even if a
+  // mutator touched an oop first, which in turn will reduce the risk of
+  // having to flush mark stacks multiple times to terminate marking.
+  //
+  // However, when doing finalizable marking we don't always want to mark
+  // through. First, marking through an already strongly marked oop would
+  // be wasteful, since we will then proceed to do finalizable marking on
+  // an object which is, or will be, marked strongly. Second, marking
+  // through an already finalizable marked oop would also be wasteful,
+  // since such oops can never end up on a mutator mark stack and can
+  // therefore not hide some part of the object graph from GC workers.
+  if (finalizable) {
+    return !ZAddress::is_marked(addr);
+  }
+
+  // Mark through
+  return true;
+}
+
+template <bool finalizable, bool publish>
+uintptr_t ZBarrier::mark(uintptr_t addr) {
+  uintptr_t good_addr;
+
+  if (ZAddress::is_marked(addr)) {
+    // Already marked, but try to mark though anyway
+    good_addr = ZAddress::good(addr);
+  } else if (ZAddress::is_remapped(addr)) {
+    // Already remapped, but also needs to be marked
+    good_addr = ZAddress::good(addr);
+  } else {
+    // Needs to be both remapped and marked
+    good_addr = remap(addr);
+  }
+
+  // Mark
+  if (should_mark_through<finalizable>(addr)) {
+    ZHeap::heap()->mark_object<finalizable, publish>(good_addr);
+  }
+
+  return good_addr;
+}
+
+uintptr_t ZBarrier::remap(uintptr_t addr) {
+  assert(!ZAddress::is_good(addr), "Should not be good");
+  assert(!ZAddress::is_weak_good(addr), "Should not be weak good");
+
+  if (ZHeap::heap()->is_relocating(addr)) {
+    // Forward
+    return ZHeap::heap()->forward_object(addr);
+  }
+
+  // Remap
+  return ZAddress::good(addr);
+}
+
+uintptr_t ZBarrier::relocate(uintptr_t addr) {
+  assert(!ZAddress::is_good(addr), "Should not be good");
+  assert(!ZAddress::is_weak_good(addr), "Should not be weak good");
+
+  if (ZHeap::heap()->is_relocating(addr)) {
+    // Relocate
+    return ZHeap::heap()->relocate_object(addr);
+  }
+
+  // Remap
+  return ZAddress::good(addr);
+}
+
+uintptr_t ZBarrier::relocate_or_mark(uintptr_t addr) {
+  return during_relocate() ? relocate(addr) : mark<Strong, Publish>(addr);
+}
+
+uintptr_t ZBarrier::relocate_or_remap(uintptr_t addr) {
+  return during_relocate() ? relocate(addr) : remap(addr);
+}
+
+//
+// Load barrier
+//
+uintptr_t ZBarrier::load_barrier_on_oop_slow_path(uintptr_t addr) {
+  return relocate_or_mark(addr);
+}
+
+void ZBarrier::load_barrier_on_oop_fields(oop o) {
+  assert(ZOop::is_good(o), "Should be good");
+  ZLoadBarrierOopClosure cl;
+  o->oop_iterate(&cl);
+}
+
+//
+// Weak load barrier
+//
+uintptr_t ZBarrier::weak_load_barrier_on_oop_slow_path(uintptr_t addr) {
+  return ZAddress::is_weak_good(addr) ? ZAddress::good(addr) : relocate_or_remap(addr);
+}
+
+uintptr_t ZBarrier::weak_load_barrier_on_weak_oop_slow_path(uintptr_t addr) {
+  const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr);
+  if (ZHeap::heap()->is_object_strongly_live(good_addr)) {
+    return good_addr;
+  }
+
+  // Not strongly live
+  return 0;
+}
+
+uintptr_t ZBarrier::weak_load_barrier_on_phantom_oop_slow_path(uintptr_t addr) {
+  const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr);
+  if (ZHeap::heap()->is_object_live(good_addr)) {
+    return good_addr;
+  }
+
+  // Not live
+  return 0;
+}
+
+//
+// Keep alive barrier
+//
+uintptr_t ZBarrier::keep_alive_barrier_on_weak_oop_slow_path(uintptr_t addr) {
+  const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr);
+  assert(ZHeap::heap()->is_object_strongly_live(good_addr), "Should be live");
+  return good_addr;
+}
+
+uintptr_t ZBarrier::keep_alive_barrier_on_phantom_oop_slow_path(uintptr_t addr) {
+  const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr);
+  assert(ZHeap::heap()->is_object_live(good_addr), "Should be live");
+  return good_addr;
+}
+
+//
+// Mark barrier
+//
+uintptr_t ZBarrier::mark_barrier_on_oop_slow_path(uintptr_t addr) {
+  return mark<Strong, Overflow>(addr);
+}