changeset 50873:60bcfec25032

ZGC: The Z Garbage Collector v92
author duke
date Fri, 08 Jun 2018 18:24:45 +0200
parents 22751e393c31
children 3f7d18a79e8e
files make/autoconf/hotspot.m4 make/conf/jib-profiles.js make/hotspot/lib/JvmFeatures.gmk src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.hpp src/hotspot/cpu/x86/stubGenerator_x86_64.cpp src/hotspot/cpu/x86/x86.ad src/hotspot/cpu/x86/x86_64.ad src/hotspot/os_cpu/linux_x86/gc/z/zAddress_linux_x86.inline.hpp src/hotspot/os_cpu/linux_x86/gc/z/zBackingFile_linux_x86.cpp src/hotspot/os_cpu/linux_x86/gc/z/zBackingFile_linux_x86.hpp src/hotspot/os_cpu/linux_x86/gc/z/zBackingPath_linux_x86.cpp src/hotspot/os_cpu/linux_x86/gc/z/zBackingPath_linux_x86.hpp src/hotspot/os_cpu/linux_x86/gc/z/zGlobals_linux_x86.cpp src/hotspot/os_cpu/linux_x86/gc/z/zGlobals_linux_x86.hpp src/hotspot/os_cpu/linux_x86/gc/z/zLargePages_linux_x86.cpp src/hotspot/os_cpu/linux_x86/gc/z/zNUMA_linux_x86.cpp src/hotspot/os_cpu/linux_x86/gc/z/zPhysicalMemoryBacking_linux_x86.cpp src/hotspot/os_cpu/linux_x86/gc/z/zPhysicalMemoryBacking_linux_x86.hpp src/hotspot/os_cpu/linux_x86/gc/z/zVirtualMemory_linux_x86.cpp src/hotspot/share/adlc/formssel.cpp src/hotspot/share/classfile/vmSymbols.cpp src/hotspot/share/compiler/compilerDirectives.hpp src/hotspot/share/compiler/oopMap.cpp src/hotspot/share/gc/shared/barrierSetConfig.hpp src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp src/hotspot/share/gc/shared/collectedHeap.hpp src/hotspot/share/gc/shared/gcCause.cpp src/hotspot/share/gc/shared/gcCause.hpp src/hotspot/share/gc/shared/gcConfig.cpp src/hotspot/share/gc/shared/gcConfiguration.cpp src/hotspot/share/gc/shared/gcName.hpp src/hotspot/share/gc/shared/gcThreadLocalData.hpp src/hotspot/share/gc/shared/gc_globals.hpp src/hotspot/share/gc/shared/specialized_oop_closures.hpp src/hotspot/share/gc/shared/vmStructs_gc.hpp src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp src/hotspot/share/gc/z/c1/zBarrierSetC1.hpp src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp src/hotspot/share/gc/z/vmStructs_z.cpp src/hotspot/share/gc/z/vmStructs_z.hpp src/hotspot/share/gc/z/zAddress.cpp src/hotspot/share/gc/z/zAddress.hpp src/hotspot/share/gc/z/zAddress.inline.hpp src/hotspot/share/gc/z/zAddressRangeMap.hpp src/hotspot/share/gc/z/zAddressRangeMap.inline.hpp src/hotspot/share/gc/z/zAllocationFlags.hpp src/hotspot/share/gc/z/zArguments.cpp src/hotspot/share/gc/z/zArguments.hpp src/hotspot/share/gc/z/zArray.hpp src/hotspot/share/gc/z/zArray.inline.hpp src/hotspot/share/gc/z/zBarrier.cpp src/hotspot/share/gc/z/zBarrier.hpp src/hotspot/share/gc/z/zBarrier.inline.hpp src/hotspot/share/gc/z/zBarrierSet.cpp src/hotspot/share/gc/z/zBarrierSet.hpp src/hotspot/share/gc/z/zBarrierSet.inline.hpp src/hotspot/share/gc/z/zBarrierSetAssembler.cpp src/hotspot/share/gc/z/zBarrierSetAssembler.hpp src/hotspot/share/gc/z/zBarrierSetRuntime.cpp src/hotspot/share/gc/z/zBarrierSetRuntime.hpp src/hotspot/share/gc/z/zBitField.hpp src/hotspot/share/gc/z/zBitMap.hpp src/hotspot/share/gc/z/zBitMap.inline.hpp src/hotspot/share/gc/z/zCPU.cpp src/hotspot/share/gc/z/zCPU.hpp src/hotspot/share/gc/z/zCollectedHeap.cpp src/hotspot/share/gc/z/zCollectedHeap.hpp src/hotspot/share/gc/z/zCollectorPolicy.cpp src/hotspot/share/gc/z/zCollectorPolicy.hpp src/hotspot/share/gc/z/zDebug.gdb src/hotspot/share/gc/z/zDirector.cpp src/hotspot/share/gc/z/zDirector.hpp src/hotspot/share/gc/z/zDriver.cpp src/hotspot/share/gc/z/zDriver.hpp src/hotspot/share/gc/z/zErrno.cpp src/hotspot/share/gc/z/zErrno.hpp src/hotspot/share/gc/z/zForwardingTable.cpp src/hotspot/share/gc/z/zForwardingTable.hpp src/hotspot/share/gc/z/zForwardingTable.inline.hpp src/hotspot/share/gc/z/zForwardingTableEntry.hpp src/hotspot/share/gc/z/zFuture.hpp src/hotspot/share/gc/z/zFuture.inline.hpp src/hotspot/share/gc/z/zGlobals.cpp src/hotspot/share/gc/z/zGlobals.hpp src/hotspot/share/gc/z/zHash.hpp src/hotspot/share/gc/z/zHash.inline.hpp src/hotspot/share/gc/z/zHeap.cpp src/hotspot/share/gc/z/zHeap.hpp src/hotspot/share/gc/z/zHeap.inline.hpp src/hotspot/share/gc/z/zHeapIterator.cpp src/hotspot/share/gc/z/zHeapIterator.hpp src/hotspot/share/gc/z/zInitialize.cpp src/hotspot/share/gc/z/zInitialize.hpp src/hotspot/share/gc/z/zLargePages.cpp src/hotspot/share/gc/z/zLargePages.hpp src/hotspot/share/gc/z/zLargePages.inline.hpp src/hotspot/share/gc/z/zList.hpp src/hotspot/share/gc/z/zList.inline.hpp src/hotspot/share/gc/z/zLiveMap.cpp src/hotspot/share/gc/z/zLiveMap.hpp src/hotspot/share/gc/z/zLiveMap.inline.hpp src/hotspot/share/gc/z/zLock.hpp src/hotspot/share/gc/z/zLock.inline.hpp src/hotspot/share/gc/z/zMark.cpp src/hotspot/share/gc/z/zMark.hpp src/hotspot/share/gc/z/zMark.inline.hpp src/hotspot/share/gc/z/zMarkCache.cpp src/hotspot/share/gc/z/zMarkCache.hpp src/hotspot/share/gc/z/zMarkCache.inline.hpp src/hotspot/share/gc/z/zMarkStack.cpp src/hotspot/share/gc/z/zMarkStack.hpp src/hotspot/share/gc/z/zMarkStack.inline.hpp src/hotspot/share/gc/z/zMarkStackEntry.hpp src/hotspot/share/gc/z/zMarkTerminate.hpp src/hotspot/share/gc/z/zMarkTerminate.inline.hpp src/hotspot/share/gc/z/zMemory.cpp src/hotspot/share/gc/z/zMemory.hpp src/hotspot/share/gc/z/zMemory.inline.hpp src/hotspot/share/gc/z/zMessagePort.hpp src/hotspot/share/gc/z/zMessagePort.inline.hpp src/hotspot/share/gc/z/zMetronome.cpp src/hotspot/share/gc/z/zMetronome.hpp src/hotspot/share/gc/z/zNMethodTable.cpp src/hotspot/share/gc/z/zNMethodTable.hpp src/hotspot/share/gc/z/zNMethodTableEntry.hpp src/hotspot/share/gc/z/zNUMA.cpp src/hotspot/share/gc/z/zNUMA.hpp src/hotspot/share/gc/z/zObjectAllocator.cpp src/hotspot/share/gc/z/zObjectAllocator.hpp src/hotspot/share/gc/z/zOop.hpp src/hotspot/share/gc/z/zOop.inline.hpp src/hotspot/share/gc/z/zOopClosures.cpp src/hotspot/share/gc/z/zOopClosures.hpp src/hotspot/share/gc/z/zOopClosures.inline.hpp src/hotspot/share/gc/z/zPage.cpp src/hotspot/share/gc/z/zPage.hpp src/hotspot/share/gc/z/zPage.inline.hpp src/hotspot/share/gc/z/zPageAllocator.cpp src/hotspot/share/gc/z/zPageAllocator.hpp src/hotspot/share/gc/z/zPageCache.cpp src/hotspot/share/gc/z/zPageCache.hpp src/hotspot/share/gc/z/zPageCache.inline.hpp src/hotspot/share/gc/z/zPageTable.cpp src/hotspot/share/gc/z/zPageTable.hpp src/hotspot/share/gc/z/zPageTable.inline.hpp src/hotspot/share/gc/z/zPageTableEntry.hpp src/hotspot/share/gc/z/zPhysicalMemory.cpp src/hotspot/share/gc/z/zPhysicalMemory.hpp src/hotspot/share/gc/z/zPhysicalMemory.inline.hpp src/hotspot/share/gc/z/zPreMappedMemory.cpp src/hotspot/share/gc/z/zPreMappedMemory.hpp src/hotspot/share/gc/z/zPreMappedMemory.inline.hpp src/hotspot/share/gc/z/zReferenceProcessor.cpp src/hotspot/share/gc/z/zReferenceProcessor.hpp src/hotspot/share/gc/z/zRelocate.cpp src/hotspot/share/gc/z/zRelocate.hpp src/hotspot/share/gc/z/zRelocationSet.cpp src/hotspot/share/gc/z/zRelocationSet.hpp src/hotspot/share/gc/z/zRelocationSet.inline.hpp src/hotspot/share/gc/z/zRelocationSetSelector.cpp src/hotspot/share/gc/z/zRelocationSetSelector.hpp src/hotspot/share/gc/z/zResurrection.cpp src/hotspot/share/gc/z/zResurrection.hpp src/hotspot/share/gc/z/zResurrection.inline.hpp src/hotspot/share/gc/z/zRootsIterator.cpp src/hotspot/share/gc/z/zRootsIterator.hpp src/hotspot/share/gc/z/zRuntimeWorkers.cpp src/hotspot/share/gc/z/zRuntimeWorkers.hpp src/hotspot/share/gc/z/zServiceability.cpp src/hotspot/share/gc/z/zServiceability.hpp src/hotspot/share/gc/z/zStat.cpp src/hotspot/share/gc/z/zStat.hpp src/hotspot/share/gc/z/zTask.cpp src/hotspot/share/gc/z/zTask.hpp src/hotspot/share/gc/z/zThread.cpp src/hotspot/share/gc/z/zThread.hpp src/hotspot/share/gc/z/zThreadLocalData.hpp src/hotspot/share/gc/z/zTracer.cpp src/hotspot/share/gc/z/zTracer.hpp src/hotspot/share/gc/z/zTracer.inline.hpp src/hotspot/share/gc/z/zUtils.cpp src/hotspot/share/gc/z/zUtils.hpp src/hotspot/share/gc/z/zUtils.inline.hpp src/hotspot/share/gc/z/zValue.hpp src/hotspot/share/gc/z/zVirtualMemory.cpp src/hotspot/share/gc/z/zVirtualMemory.hpp src/hotspot/share/gc/z/zVirtualMemory.inline.hpp src/hotspot/share/gc/z/zWeakRootsProcessor.cpp src/hotspot/share/gc/z/zWeakRootsProcessor.hpp src/hotspot/share/gc/z/zWorkers.cpp src/hotspot/share/gc/z/zWorkers.hpp src/hotspot/share/gc/z/zWorkers.inline.hpp src/hotspot/share/gc/z/z_globals.hpp src/hotspot/share/gc/z/z_specialized_oop_closures.hpp src/hotspot/share/jfr/metadata/metadata.xml src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.hpp src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeManager.cpp src/hotspot/share/logging/logPrefix.hpp src/hotspot/share/logging/logTag.hpp src/hotspot/share/memory/metaspace.hpp src/hotspot/share/opto/classes.cpp src/hotspot/share/opto/classes.hpp src/hotspot/share/opto/compile.cpp src/hotspot/share/opto/compile.hpp src/hotspot/share/opto/escape.cpp src/hotspot/share/opto/idealKit.cpp src/hotspot/share/opto/idealKit.hpp src/hotspot/share/opto/lcm.cpp src/hotspot/share/opto/loopnode.cpp src/hotspot/share/opto/loopnode.hpp src/hotspot/share/opto/loopopts.cpp src/hotspot/share/opto/macro.cpp src/hotspot/share/opto/matcher.cpp src/hotspot/share/opto/memnode.cpp src/hotspot/share/opto/node.cpp src/hotspot/share/opto/node.hpp src/hotspot/share/opto/opcodes.cpp src/hotspot/share/opto/opcodes.hpp src/hotspot/share/opto/phasetype.hpp src/hotspot/share/opto/vectornode.cpp src/hotspot/share/prims/jvmtiTagMap.cpp src/hotspot/share/prims/whitebox.cpp src/hotspot/share/runtime/jniHandles.cpp src/hotspot/share/runtime/stackValue.cpp src/hotspot/share/runtime/vmStructs.cpp src/hotspot/share/runtime/vm_operations.hpp src/hotspot/share/utilities/macros.hpp src/java.base/share/legal/c-libutl.md src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/HSDB.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/CollectedHeap.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/CollectedHeapName.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GCCause.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GCName.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZAddress.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZAddressRangeMapForPageTable.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZBarrier.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZCollectedHeap.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZForwardingTable.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZForwardingTableCursor.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZForwardingTableEntry.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZGlobals.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZGlobalsForVMStructs.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZHash.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZHeap.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZOop.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZPage.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZPageAllocator.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZPageTable.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZPageTableEntry.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZPhysicalMemoryManager.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZVirtualMemory.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/Universe.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Oop.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/OopField.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VMOps.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/HeapSummary.java src/jdk.jfr/share/conf/jfr/default.jfc src/jdk.jfr/share/conf/jfr/profile.jfc test/hotspot/gtest/gc/z/test_zAddress.cpp test/hotspot/gtest/gc/z/test_zArray.cpp test/hotspot/gtest/gc/z/test_zBitField.cpp test/hotspot/gtest/gc/z/test_zBitMap.cpp test/hotspot/gtest/gc/z/test_zForwardingTable.cpp test/hotspot/gtest/gc/z/test_zList.cpp test/hotspot/gtest/gc/z/test_zLiveMap.cpp test/hotspot/gtest/gc/z/test_zPhysicalMemory.cpp test/hotspot/gtest/gc/z/test_zUtils.cpp test/hotspot/gtest/gc/z/test_zVirtualMemory.cpp
diffstat 272 files changed, 26582 insertions(+), 71 deletions(-) [+]
line wrap: on
line diff
--- a/make/autoconf/hotspot.m4	Fri Jun 08 18:24:45 2018 +0200
+++ b/make/autoconf/hotspot.m4	Fri Jun 08 18:24:45 2018 +0200
@@ -25,7 +25,7 @@
 
 # All valid JVM features, regardless of platform
 VALID_JVM_FEATURES="compiler1 compiler2 zero minimal dtrace jvmti jvmci \
-    graal vm-structs jni-check services management cmsgc g1gc parallelgc serialgc nmt cds \
+    graal vm-structs jni-check services management cmsgc g1gc parallelgc serialgc zgc nmt cds \
     static-build link-time-opt aot jfr"
 
 # Deprecated JVM features (these are ignored, but with a warning)
@@ -328,6 +328,19 @@
     fi
   fi
 
+  # Only enable ZGC on Linux x86_64
+  AC_MSG_CHECKING([if zgc should be built])
+  if HOTSPOT_CHECK_JVM_FEATURE(zgc); then
+    if test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = "xx86_64"; then
+      AC_MSG_RESULT([yes])
+    else
+      DISABLED_JVM_FEATURES="$DISABLED_JVM_FEATURES zgc"
+      AC_MSG_RESULT([no, platform not supported])
+    fi
+  else
+    AC_MSG_RESULT([no])
+  fi
+
   # Turn on additional features based on other parts of configure
   if test "x$INCLUDE_DTRACE" = "xtrue"; then
     JVM_FEATURES="$JVM_FEATURES dtrace"
--- a/make/conf/jib-profiles.js	Fri Jun 08 18:24:45 2018 +0200
+++ b/make/conf/jib-profiles.js	Fri Jun 08 18:24:45 2018 +0200
@@ -715,6 +715,14 @@
                        profiles[openName].artifacts["jdk"].remote));
     });
 
+    // Enable ZGC in linux-x64-open builds
+    [ "linux-x64-open" ].forEach(function (name) {
+        var configureArgs = { configure_args: [ "--with-jvm-features=zgc" ] };
+        var debugName = name + common.debug_suffix;
+        profiles[name] = concatObjects(profiles[name], configureArgs);
+        profiles[debugName] = concatObjects(profiles[debugName], configureArgs);
+    });
+
     // Profiles used to run tests. Used in JPRT and Mach 5.
     var testOnlyProfiles = {
         "run-test-jprt": {
--- a/make/hotspot/lib/JvmFeatures.gmk	Fri Jun 08 18:24:45 2018 +0200
+++ b/make/hotspot/lib/JvmFeatures.gmk	Fri Jun 08 18:24:45 2018 +0200
@@ -155,6 +155,11 @@
   JVM_EXCLUDE_FILES += psMarkSweep.cpp psMarkSweepDecorator.cpp
 endif
 
+ifneq ($(call check-jvm-feature, zgc), true)
+  JVM_CFLAGS_FEATURES += -DINCLUDE_ZGC=0
+  JVM_EXCLUDE_PATTERNS += gc/z
+endif
+
 ifneq ($(call check-jvm-feature, jfr), true)
   JVM_CFLAGS_FEATURES += -DINCLUDE_JFR=0
   JVM_EXCLUDE_PATTERNS += jfr
--- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp	Fri Jun 08 18:24:45 2018 +0200
+++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp	Fri Jun 08 18:24:45 2018 +0200
@@ -1346,7 +1346,11 @@
       __ decode_heap_oop(dest->as_register());
     }
 #endif
-    __ verify_oop(dest->as_register());
+
+    // Load barrier has not yet been applied, so ZGC can't verify the oop here
+    if (!UseZGC) {
+      __ verify_oop(dest->as_register());
+    }
   } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
 #ifdef _LP64
     if (UseCompressedClassPointers) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,458 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "gc/z/zBarrier.inline.hpp"
+#include "gc/z/zBarrierSet.hpp"
+#include "gc/z/zBarrierSetAssembler.hpp"
+#include "gc/z/zBarrierSetRuntime.hpp"
+#include "runtime/stubCodeGenerator.hpp"
+#include "utilities/macros.hpp"
+#ifdef COMPILER1
+#include "c1/c1_LIRAssembler.hpp"
+#include "c1/c1_MacroAssembler.hpp"
+#include "gc/z/c1/zBarrierSetC1.hpp"
+#endif // COMPILER1
+
+#undef __
+#define __ masm->
+
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) /* nothing */
+#else
+#define BLOCK_COMMENT(str) __ block_comment(str)
+#endif
+
+static void call_vm(MacroAssembler* masm,
+                    address entry_point,
+                    Register arg0,
+                    Register arg1) {
+  // Setup arguments
+  if (arg1 == c_rarg0) {
+    if (arg0 == c_rarg1) {
+      __ xchgptr(c_rarg1, c_rarg0);
+    } else {
+      __ movptr(c_rarg1, arg1);
+      __ movptr(c_rarg0, arg0);
+    }
+  } else {
+    if (arg0 != c_rarg0) {
+      __ movptr(c_rarg0, arg0);
+    }
+    if (arg1 != c_rarg1) {
+      __ movptr(c_rarg1, arg1);
+    }
+  }
+
+  // Call VM
+  __ MacroAssembler::call_VM_leaf_base(entry_point, 2);
+}
+
+void ZBarrierSetAssembler::load_at(MacroAssembler* masm,
+                                   DecoratorSet decorators,
+                                   BasicType type,
+                                   Register dst,
+                                   Address src,
+                                   Register tmp1,
+                                   Register tmp_thread) {
+  if (!ZBarrierSet::barrier_needed(decorators, type)) {
+    // Barrier not needed
+    BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
+    return;
+  }
+
+  BLOCK_COMMENT("ZBarrierSetAssembler::load_at {");
+
+  // Allocate scratch register
+  Register scratch = tmp1;
+  if (tmp1 == noreg) {
+    scratch = r12;
+    __ push(scratch);
+  }
+
+  assert_different_registers(dst, scratch);
+
+  Label done;
+
+  //
+  // Fast Path
+  //
+
+  // Load address
+  __ lea(scratch, src);
+
+  // Load oop at address
+  __ movptr(dst, Address(scratch, 0));
+
+  // Test address bad mask
+  __ testptr(dst, address_bad_mask_from_thread(r15_thread));
+  __ jcc(Assembler::zero, done);
+
+  //
+  // Slow path
+  //
+
+  // Save registers
+  __ push(rax);
+  __ push(rcx);
+  __ push(rdx);
+  __ push(rdi);
+  __ push(rsi);
+  __ push(r8);
+  __ push(r9);
+  __ push(r10);
+  __ push(r11);
+
+  // We may end up here from generate_native_wrapper, then the method may have
+  // floats as arguments, and we must spill them before calling the VM runtime
+  // leaf. From the interpreter all floats are passed on the stack.
+  assert(Argument::n_float_register_parameters_j == 8, "Assumption");
+  const int xmm_size = wordSize * 2;
+  const int xmm_spill_size = xmm_size * Argument::n_float_register_parameters_j;
+  __ subptr(rsp, xmm_spill_size);
+  __ movdqu(Address(rsp, xmm_size * 7), xmm7);
+  __ movdqu(Address(rsp, xmm_size * 6), xmm6);
+  __ movdqu(Address(rsp, xmm_size * 5), xmm5);
+  __ movdqu(Address(rsp, xmm_size * 4), xmm4);
+  __ movdqu(Address(rsp, xmm_size * 3), xmm3);
+  __ movdqu(Address(rsp, xmm_size * 2), xmm2);
+  __ movdqu(Address(rsp, xmm_size * 1), xmm1);
+  __ movdqu(Address(rsp, xmm_size * 0), xmm0);
+
+  // Call VM
+  call_vm(masm, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), dst, scratch);
+
+  // Restore registers
+  __ movdqu(xmm0, Address(rsp, xmm_size * 0));
+  __ movdqu(xmm1, Address(rsp, xmm_size * 1));
+  __ movdqu(xmm2, Address(rsp, xmm_size * 2));
+  __ movdqu(xmm3, Address(rsp, xmm_size * 3));
+  __ movdqu(xmm4, Address(rsp, xmm_size * 4));
+  __ movdqu(xmm5, Address(rsp, xmm_size * 5));
+  __ movdqu(xmm6, Address(rsp, xmm_size * 6));
+  __ movdqu(xmm7, Address(rsp, xmm_size * 7));
+  __ addptr(rsp, xmm_spill_size);
+
+  __ pop(r11);
+  __ pop(r10);
+  __ pop(r9);
+  __ pop(r8);
+  __ pop(rsi);
+  __ pop(rdi);
+  __ pop(rdx);
+  __ pop(rcx);
+
+  if (dst == rax) {
+    __ addptr(rsp, wordSize);
+  } else {
+    __ movptr(dst, rax);
+    __ pop(rax);
+  }
+
+  __ bind(done);
+
+  // Restore scratch register
+  if (tmp1 == noreg) {
+    __ pop(scratch);
+  }
+
+  BLOCK_COMMENT("} ZBarrierSetAssembler::load_at");
+}
+
+#ifdef ASSERT
+
+void ZBarrierSetAssembler::store_at(MacroAssembler* masm,
+                                    DecoratorSet decorators,
+                                    BasicType type,
+                                    Address dst,
+                                    Register src,
+                                    Register tmp1,
+                                    Register tmp2) {
+  BLOCK_COMMENT("ZBarrierSetAssembler::store_at {");
+
+  // Verify oop store
+  if (type == T_OBJECT || type == T_ARRAY) {
+    // Note that src could be noreg, which means we
+    // are storing null and can skip verification.
+    if (src != noreg) {
+      Label done;
+      __ testptr(src, address_bad_mask_from_thread(r15_thread));
+      __ jcc(Assembler::zero, done);
+      __ stop("Verify oop store failed");
+      __ should_not_reach_here();
+      __ bind(done);
+    }
+  }
+
+  // Store value
+  BarrierSetAssembler::store_at(masm, decorators, type, dst, src, tmp1, tmp2);
+
+  BLOCK_COMMENT("} ZBarrierSetAssembler::store_at");
+}
+
+#endif // ASSERT
+
+void ZBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm,
+                                              DecoratorSet decorators,
+                                              BasicType type,
+                                              Register src,
+                                              Register dst,
+                                              Register count) {
+  if (!ZBarrierSet::barrier_needed(decorators, type)) {
+    // Barrier not needed
+    return;
+  }
+
+  BLOCK_COMMENT("ZBarrierSetAssembler::arraycopy_prologue {");
+
+  // Save registers
+  __ pusha();
+
+  // Call VM
+  call_vm(masm, ZBarrierSetRuntime::load_barrier_on_oop_array_addr(), src, count);
+
+  // Restore registers
+  __ popa();
+
+  BLOCK_COMMENT("} ZBarrierSetAssembler::arraycopy_prologue");
+}
+
+void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm,
+                                                         Register jni_env,
+                                                         Register obj,
+                                                         Register tmp,
+                                                         Label& slowpath) {
+  BLOCK_COMMENT("ZBarrierSetAssembler::try_resolve_jobject_in_native {");
+
+  // Resolve jobject
+  BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath);
+
+  // Test address bad mask
+  __ testptr(obj, address_bad_mask_from_jni_env(jni_env));
+  __ jcc(Assembler::notZero, slowpath);
+
+  BLOCK_COMMENT("} ZBarrierSetAssembler::try_resolve_jobject_in_native");
+}
+
+#ifdef COMPILER1
+
+#undef __
+#define __ ce->masm()->
+
+void ZBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce,
+                                                         LIR_Opr ref) const {
+  __ testptr(ref->as_register(), address_bad_mask_from_thread(r15_thread));
+}
+
+void ZBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce,
+                                                         ZLoadBarrierStubC1* stub) const {
+  // Stub entry
+  __ bind(*stub->entry());
+
+  Register ref = stub->ref()->as_register();
+  Register ref_addr = noreg;
+
+  if (stub->ref_addr()->is_register()) {
+    // Address already in register
+    ref_addr = stub->ref_addr()->as_pointer_register();
+  } else {
+    // Load address into tmp register
+    ce->leal(stub->ref_addr(), stub->tmp(), stub->patch_code(), stub->patch_info());
+    ref_addr = stub->tmp()->as_pointer_register();
+  }
+
+  assert_different_registers(ref, ref_addr, noreg);
+
+  // Save rax unless it is the result register
+  if (ref != rax) {
+    __ push(rax);
+  }
+
+  // Setup arguments and call runtime stub
+  __ subptr(rsp, 2 * BytesPerWord);
+  ce->store_parameter(ref_addr, 1);
+  ce->store_parameter(ref, 0);
+  __ call(RuntimeAddress(stub->runtime_stub()));
+  __ addptr(rsp, 2 * BytesPerWord);
+
+  // Verify result
+  __ verify_oop(rax, "Bad oop");
+
+  // Restore rax unless it is the result register
+  if (ref != rax) {
+    __ movptr(ref, rax);
+    __ pop(rax);
+  }
+
+  // Stub exit
+  __ jmp(*stub->continuation());
+}
+
+#undef __
+#define __ sasm->
+
+void ZBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
+                                                                 DecoratorSet decorators) const {
+  // Enter and save registers
+  __ enter();
+  __ save_live_registers_no_oop_map(true /* save_fpu_registers */);
+
+  // Setup arguments
+  __ load_parameter(1, c_rarg1);
+  __ load_parameter(0, c_rarg0);
+
+  // Call VM
+  __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), c_rarg0, c_rarg1);
+
+  // Restore registers and return
+  __ restore_live_registers_except_rax(true /* restore_fpu_registers */);
+  __ leave();
+  __ ret(0);
+}
+
+#endif // COMPILER1
+
+#undef __
+#define __ cgen->assembler()->
+
+// Generates a register specific stub for calling
+// ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or
+// ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded().
+//
+// The raddr register serves as both input and output for this stub. When the stub is
+// called the raddr register contains the object field address (oop*) where the bad oop
+// was loaded from, which caused the slow path to be taken. On return from the stub the
+// raddr register contains the good/healed oop returned from
+// ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or
+// ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded().
+static address generate_load_barrier_stub(StubCodeGenerator* cgen, Register raddr, DecoratorSet decorators) {
+  // Don't generate stub for invalid registers
+  if (raddr == rsp || raddr == r12 || raddr == r15) {
+    return NULL;
+  }
+
+  // Create stub name
+  char name[64];
+  const bool weak = (decorators & ON_WEAK_OOP_REF) != 0;
+  os::snprintf(name, sizeof(name), "load_barrier%s_stub_%s", weak ? "_weak" : "", raddr->name());
+
+  __ align(CodeEntryAlignment);
+  StubCodeMark mark(cgen, "StubRoutines", os::strdup(name, mtCode));
+  address start = __ pc();
+
+  // Save live registers
+  if (raddr != rax) {
+    __ push(rax);
+  }
+  if (raddr != rcx) {
+    __ push(rcx);
+  }
+  if (raddr != rdx) {
+    __ push(rdx);
+  }
+  if (raddr != rsi) {
+    __ push(rsi);
+  }
+  if (raddr != rdi) {
+    __ push(rdi);
+  }
+  if (raddr != r8) {
+    __ push(r8);
+  }
+  if (raddr != r9) {
+    __ push(r9);
+  }
+  if (raddr != r10) {
+    __ push(r10);
+  }
+  if (raddr != r11) {
+    __ push(r11);
+  }
+
+  // Setup arguments
+  if (c_rarg1 != raddr) {
+    __ movq(c_rarg1, raddr);
+  }
+  __ movq(c_rarg0, Address(raddr, 0));
+
+  // Call barrier function
+  __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), c_rarg0, c_rarg1);
+
+  // Move result returned in rax to raddr, if needed
+  if (raddr != rax) {
+    __ movq(raddr, rax);
+  }
+
+  // Restore saved registers
+  if (raddr != r11) {
+    __ pop(r11);
+  }
+  if (raddr != r10) {
+    __ pop(r10);
+  }
+  if (raddr != r9) {
+    __ pop(r9);
+  }
+  if (raddr != r8) {
+    __ pop(r8);
+  }
+  if (raddr != rdi) {
+    __ pop(rdi);
+  }
+  if (raddr != rsi) {
+    __ pop(rsi);
+  }
+  if (raddr != rdx) {
+    __ pop(rdx);
+  }
+  if (raddr != rcx) {
+    __ pop(rcx);
+  }
+  if (raddr != rax) {
+    __ pop(rax);
+  }
+
+  __ ret(0);
+
+  return start;
+}
+
+#undef __
+
+void ZBarrierSetAssembler::barrier_stubs_init() {
+  // Load barrier stubs
+  int stub_code_size = 256 * 16; // Rough estimate of code size
+
+  ResourceMark rm;
+  BufferBlob* bb = BufferBlob::create("zgc_load_barrier_stubs", stub_code_size);
+  CodeBuffer buf(bb);
+  StubCodeGenerator cgen(&buf);
+
+  Register rr = as_Register(0);
+  for (int i = 0; i < RegisterImpl::number_of_registers; i++) {
+    _load_barrier_slow_stub[i] = generate_load_barrier_stub(&cgen, rr, ON_STRONG_OOP_REF);
+    _load_barrier_weak_slow_stub[i] = generate_load_barrier_stub(&cgen, rr, ON_WEAK_OOP_REF);
+    rr = rr->successor();
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef CPU_X86_GC_Z_ZBARRIERSETASSEMBLER_X86_HPP
+#define CPU_X86_GC_Z_ZBARRIERSETASSEMBLER_X86_HPP
+
+#ifdef COMPILER1
+class LIR_Assembler;
+class LIR_OprDesc;
+typedef LIR_OprDesc* LIR_Opr;
+class StubAssembler;
+class ZLoadBarrierStubC1;
+#endif // COMPILER1
+
+class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase {
+  address _load_barrier_slow_stub[RegisterImpl::number_of_registers];
+  address _load_barrier_weak_slow_stub[RegisterImpl::number_of_registers];
+
+public:
+  ZBarrierSetAssembler() :
+    _load_barrier_slow_stub(),
+    _load_barrier_weak_slow_stub() {}
+
+  address load_barrier_slow_stub(Register reg) { return _load_barrier_slow_stub[reg->encoding()]; }
+  address load_barrier_weak_slow_stub(Register reg) { return _load_barrier_weak_slow_stub[reg->encoding()]; }
+
+  virtual void load_at(MacroAssembler* masm,
+                       DecoratorSet decorators,
+                       BasicType type,
+                       Register dst,
+                       Address src,
+                       Register tmp1,
+                       Register tmp_thread);
+
+#ifdef ASSERT
+  virtual void store_at(MacroAssembler* masm,
+                        DecoratorSet decorators,
+                        BasicType type,
+                        Address dst,
+                        Register src,
+                        Register tmp1,
+                        Register tmp2);
+#endif // ASSERT
+
+  virtual void arraycopy_prologue(MacroAssembler* masm,
+                                  DecoratorSet decorators,
+                                  BasicType type,
+                                  Register src,
+                                  Register dst,
+                                  Register count);
+
+  virtual void try_resolve_jobject_in_native(MacroAssembler* masm,
+                                             Register jni_env,
+                                             Register obj,
+                                             Register tmp,
+                                             Label& slowpath);
+
+#ifdef COMPILER1
+  void generate_c1_load_barrier_test(LIR_Assembler* ce,
+                                     LIR_Opr ref) const;
+
+  void generate_c1_load_barrier_stub(LIR_Assembler* ce,
+                                     ZLoadBarrierStubC1* stub) const;
+
+  void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
+                                             DecoratorSet decorators) const;
+#endif // COMPILER1
+
+  virtual void barrier_stubs_init();
+};
+
+#endif // CPU_X86_GC_Z_ZBARRIERSETASSEMBLER_X86_HPP
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp	Fri Jun 08 18:24:45 2018 +0200
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp	Fri Jun 08 18:24:45 2018 +0200
@@ -44,6 +44,9 @@
 #ifdef COMPILER2
 #include "opto/runtime.hpp"
 #endif
+#if INCLUDE_ZGC
+#include "gc/z/zThreadLocalData.hpp"
+#endif
 
 // Declaration and definition of StubGenerator (no .hpp file).
 // For a more detailed description of the stub routine structure
@@ -1026,6 +1029,15 @@
     // make sure object is 'reasonable'
     __ testptr(rax, rax);
     __ jcc(Assembler::zero, exit); // if obj is NULL it is OK
+
+#if INCLUDE_ZGC
+    if (UseZGC) {
+      // Check if metadata bits indicate a bad oop
+      __ testptr(rax, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset()));
+      __ jcc(Assembler::notZero, error);
+    }
+#endif
+
     // Check if the oop is in the right area of memory
     __ movptr(c_rarg2, rax);
     __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask());
--- a/src/hotspot/cpu/x86/x86.ad	Fri Jun 08 18:24:45 2018 +0200
+++ b/src/hotspot/cpu/x86/x86.ad	Fri Jun 08 18:24:45 2018 +0200
@@ -1067,6 +1067,138 @@
 #endif
                       );
 
+reg_class xmm0_reg(XMM0, XMM0b, XMM0c, XMM0d);
+reg_class ymm0_reg(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h);
+reg_class zmm0_reg(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h, XMM0i, XMM0j, XMM0k, XMM0l, XMM0m, XMM0n, XMM0o, XMM0p);
+
+reg_class xmm1_reg(XMM1, XMM1b, XMM1c, XMM1d);
+reg_class ymm1_reg(XMM1, XMM1b, XMM1c, XMM1d, XMM1e, XMM1f, XMM1g, XMM1h);
+reg_class zmm1_reg(XMM1, XMM1b, XMM1c, XMM1d, XMM1e, XMM1f, XMM1g, XMM1h, XMM1i, XMM1j, XMM1k, XMM1l, XMM1m, XMM1n, XMM1o, XMM1p);
+
+reg_class xmm2_reg(XMM2, XMM2b, XMM2c, XMM2d);
+reg_class ymm2_reg(XMM2, XMM2b, XMM2c, XMM2d, XMM2e, XMM2f, XMM2g, XMM2h);
+reg_class zmm2_reg(XMM2, XMM2b, XMM2c, XMM2d, XMM2e, XMM2f, XMM2g, XMM2h, XMM2i, XMM2j, XMM2k, XMM2l, XMM2m, XMM2n, XMM2o, XMM2p);
+
+reg_class xmm3_reg(XMM3, XMM3b, XMM3c, XMM3d);
+reg_class ymm3_reg(XMM3, XMM3b, XMM3c, XMM3d, XMM3e, XMM3f, XMM3g, XMM3h);
+reg_class zmm3_reg(XMM3, XMM3b, XMM3c, XMM3d, XMM3e, XMM3f, XMM3g, XMM3h, XMM3i, XMM3j, XMM3k, XMM3l, XMM3m, XMM3n, XMM3o, XMM3p);
+
+reg_class xmm4_reg(XMM4, XMM4b, XMM4c, XMM4d);
+reg_class ymm4_reg(XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h);
+reg_class zmm4_reg(XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h, XMM4i, XMM4j, XMM4k, XMM4l, XMM4m, XMM4n, XMM4o, XMM4p);
+
+reg_class xmm5_reg(XMM5, XMM5b, XMM5c, XMM5d);
+reg_class ymm5_reg(XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h);
+reg_class zmm5_reg(XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h, XMM5i, XMM5j, XMM5k, XMM5l, XMM5m, XMM5n, XMM5o, XMM5p);
+
+reg_class xmm6_reg(XMM6, XMM6b, XMM6c, XMM6d);
+reg_class ymm6_reg(XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h);
+reg_class zmm6_reg(XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h, XMM6i, XMM6j, XMM6k, XMM6l, XMM6m, XMM6n, XMM6o, XMM6p);
+
+reg_class xmm7_reg(XMM7, XMM7b, XMM7c, XMM7d);
+reg_class ymm7_reg(XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h);
+reg_class zmm7_reg(XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h, XMM7i, XMM7j, XMM7k, XMM7l, XMM7m, XMM7n, XMM7o, XMM7p);
+
+#ifdef _LP64
+
+reg_class xmm8_reg(XMM8, XMM8b, XMM8c, XMM8d);
+reg_class ymm8_reg(XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h);
+reg_class zmm8_reg(XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h, XMM8i, XMM8j, XMM8k, XMM8l, XMM8m, XMM8n, XMM8o, XMM8p);
+
+reg_class xmm9_reg(XMM9, XMM9b, XMM9c, XMM9d);
+reg_class ymm9_reg(XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h);
+reg_class zmm9_reg(XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h, XMM9i, XMM9j, XMM9k, XMM9l, XMM9m, XMM9n, XMM9o, XMM9p);
+
+reg_class xmm10_reg(XMM10, XMM10b, XMM10c, XMM10d);
+reg_class ymm10_reg(XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h);
+reg_class zmm10_reg(XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h, XMM10i, XMM10j, XMM10k, XMM10l, XMM10m, XMM10n, XMM10o, XMM10p);
+
+reg_class xmm11_reg(XMM11, XMM11b, XMM11c, XMM11d);
+reg_class ymm11_reg(XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h);
+reg_class zmm11_reg(XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h, XMM11i, XMM11j, XMM11k, XMM11l, XMM11m, XMM11n, XMM11o, XMM11p);
+
+reg_class xmm12_reg(XMM12, XMM12b, XMM12c, XMM12d);
+reg_class ymm12_reg(XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h);
+reg_class zmm12_reg(XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h, XMM12i, XMM12j, XMM12k, XMM12l, XMM12m, XMM12n, XMM12o, XMM12p);
+
+reg_class xmm13_reg(XMM13, XMM13b, XMM13c, XMM13d);
+reg_class ymm13_reg(XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h);
+reg_class zmm13_reg(XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h, XMM13i, XMM13j, XMM13k, XMM13l, XMM13m, XMM13n, XMM13o, XMM13p);
+
+reg_class xmm14_reg(XMM14, XMM14b, XMM14c, XMM14d);
+reg_class ymm14_reg(XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h);
+reg_class zmm14_reg(XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h, XMM14i, XMM14j, XMM14k, XMM14l, XMM14m, XMM14n, XMM14o, XMM14p);
+
+reg_class xmm15_reg(XMM15, XMM15b, XMM15c, XMM15d);
+reg_class ymm15_reg(XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h);
+reg_class zmm15_reg(XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h, XMM15i, XMM15j, XMM15k, XMM15l, XMM15m, XMM15n, XMM15o, XMM15p);
+
+reg_class xmm16_reg(XMM16, XMM16b, XMM16c, XMM16d);
+reg_class ymm16_reg(XMM16, XMM16b, XMM16c, XMM16d, XMM16e, XMM16f, XMM16g, XMM16h);
+reg_class zmm16_reg(XMM16, XMM16b, XMM16c, XMM16d, XMM16e, XMM16f, XMM16g, XMM16h, XMM16i, XMM16j, XMM16k, XMM16l, XMM16m, XMM16n, XMM16o, XMM16p);
+
+reg_class xmm17_reg(XMM17, XMM17b, XMM17c, XMM17d);
+reg_class ymm17_reg(XMM17, XMM17b, XMM17c, XMM17d, XMM17e, XMM17f, XMM17g, XMM17h);
+reg_class zmm17_reg(XMM17, XMM17b, XMM17c, XMM17d, XMM17e, XMM17f, XMM17g, XMM17h, XMM17i, XMM17j, XMM17k, XMM17l, XMM17m, XMM17n, XMM17o, XMM17p);
+
+reg_class xmm18_reg(XMM18, XMM18b, XMM18c, XMM18d);
+reg_class ymm18_reg(XMM18, XMM18b, XMM18c, XMM18d, XMM18e, XMM18f, XMM18g, XMM18h);
+reg_class zmm18_reg(XMM18, XMM18b, XMM18c, XMM18d, XMM18e, XMM18f, XMM18g, XMM18h, XMM18i, XMM18j, XMM18k, XMM18l, XMM18m, XMM18n, XMM18o, XMM18p);
+
+reg_class xmm19_reg(XMM19, XMM19b, XMM19c, XMM19d);
+reg_class ymm19_reg(XMM19, XMM19b, XMM19c, XMM19d, XMM19e, XMM19f, XMM19g, XMM19h);
+reg_class zmm19_reg(XMM19, XMM19b, XMM19c, XMM19d, XMM19e, XMM19f, XMM19g, XMM19h, XMM19i, XMM19j, XMM19k, XMM19l, XMM19m, XMM19n, XMM19o, XMM19p);
+
+reg_class xmm20_reg(XMM20, XMM20b, XMM20c, XMM20d);
+reg_class ymm20_reg(XMM20, XMM20b, XMM20c, XMM20d, XMM20e, XMM20f, XMM20g, XMM20h);
+reg_class zmm20_reg(XMM20, XMM20b, XMM20c, XMM20d, XMM20e, XMM20f, XMM20g, XMM20h, XMM20i, XMM20j, XMM20k, XMM20l, XMM20m, XMM20n, XMM20o, XMM20p);
+
+reg_class xmm21_reg(XMM21, XMM21b, XMM21c, XMM21d);
+reg_class ymm21_reg(XMM21, XMM21b, XMM21c, XMM21d, XMM21e, XMM21f, XMM21g, XMM21h);
+reg_class zmm21_reg(XMM21, XMM21b, XMM21c, XMM21d, XMM21e, XMM21f, XMM21g, XMM21h, XMM21i, XMM21j, XMM21k, XMM21l, XMM21m, XMM21n, XMM21o, XMM21p);
+
+reg_class xmm22_reg(XMM22, XMM22b, XMM22c, XMM22d);
+reg_class ymm22_reg(XMM22, XMM22b, XMM22c, XMM22d, XMM22e, XMM22f, XMM22g, XMM22h);
+reg_class zmm22_reg(XMM22, XMM22b, XMM22c, XMM22d, XMM22e, XMM22f, XMM22g, XMM22h, XMM22i, XMM22j, XMM22k, XMM22l, XMM22m, XMM22n, XMM22o, XMM22p);
+
+reg_class xmm23_reg(XMM23, XMM23b, XMM23c, XMM23d);
+reg_class ymm23_reg(XMM23, XMM23b, XMM23c, XMM23d, XMM23e, XMM23f, XMM23g, XMM23h);
+reg_class zmm23_reg(XMM23, XMM23b, XMM23c, XMM23d, XMM23e, XMM23f, XMM23g, XMM23h, XMM23i, XMM23j, XMM23k, XMM23l, XMM23m, XMM23n, XMM23o, XMM23p);
+
+reg_class xmm24_reg(XMM24, XMM24b, XMM24c, XMM24d);
+reg_class ymm24_reg(XMM24, XMM24b, XMM24c, XMM24d, XMM24e, XMM24f, XMM24g, XMM24h);
+reg_class zmm24_reg(XMM24, XMM24b, XMM24c, XMM24d, XMM24e, XMM24f, XMM24g, XMM24h, XMM24i, XMM24j, XMM24k, XMM24l, XMM24m, XMM24n, XMM24o, XMM24p);
+
+reg_class xmm25_reg(XMM25, XMM25b, XMM25c, XMM25d);
+reg_class ymm25_reg(XMM25, XMM25b, XMM25c, XMM25d, XMM25e, XMM25f, XMM25g, XMM25h);
+reg_class zmm25_reg(XMM25, XMM25b, XMM25c, XMM25d, XMM25e, XMM25f, XMM25g, XMM25h, XMM25i, XMM25j, XMM25k, XMM25l, XMM25m, XMM25n, XMM25o, XMM25p);
+
+reg_class xmm26_reg(XMM26, XMM26b, XMM26c, XMM26d);
+reg_class ymm26_reg(XMM26, XMM26b, XMM26c, XMM26d, XMM26e, XMM26f, XMM26g, XMM26h);
+reg_class zmm26_reg(XMM26, XMM26b, XMM26c, XMM26d, XMM26e, XMM26f, XMM26g, XMM26h, XMM26i, XMM26j, XMM26k, XMM26l, XMM26m, XMM26n, XMM26o, XMM26p);
+
+reg_class xmm27_reg(XMM27, XMM27b, XMM27c, XMM27d);
+reg_class ymm27_reg(XMM27, XMM27b, XMM27c, XMM27d, XMM27e, XMM27f, XMM27g, XMM27h);
+reg_class zmm27_reg(XMM27, XMM27b, XMM27c, XMM27d, XMM27e, XMM27f, XMM27g, XMM27h, XMM27i, XMM27j, XMM27k, XMM27l, XMM27m, XMM27n, XMM27o, XMM27p);
+
+reg_class xmm28_reg(XMM28, XMM28b, XMM28c, XMM28d);
+reg_class ymm28_reg(XMM28, XMM28b, XMM28c, XMM28d, XMM28e, XMM28f, XMM28g, XMM28h);
+reg_class zmm28_reg(XMM28, XMM28b, XMM28c, XMM28d, XMM28e, XMM28f, XMM28g, XMM28h, XMM28i, XMM28j, XMM28k, XMM28l, XMM28m, XMM28n, XMM28o, XMM28p);
+
+reg_class xmm29_reg(XMM29, XMM29b, XMM29c, XMM29d);
+reg_class ymm29_reg(XMM29, XMM29b, XMM29c, XMM29d, XMM29e, XMM29f, XMM29g, XMM29h);
+reg_class zmm29_reg(XMM29, XMM29b, XMM29c, XMM29d, XMM29e, XMM29f, XMM29g, XMM29h, XMM29i, XMM29j, XMM29k, XMM29l, XMM29m, XMM29n, XMM29o, XMM29p);
+
+reg_class xmm30_reg(XMM30, XMM30b, XMM30c, XMM30d);
+reg_class ymm30_reg(XMM30, XMM30b, XMM30c, XMM30d, XMM30e, XMM30f, XMM30g, XMM30h);
+reg_class zmm30_reg(XMM30, XMM30b, XMM30c, XMM30d, XMM30e, XMM30f, XMM30g, XMM30h, XMM30i, XMM30j, XMM30k, XMM30l, XMM30m, XMM30n, XMM30o, XMM30p);
+
+reg_class xmm31_reg(XMM31, XMM31b, XMM31c, XMM31d);
+reg_class ymm31_reg(XMM31, XMM31b, XMM31c, XMM31d, XMM31e, XMM31f, XMM31g, XMM31h);
+reg_class zmm31_reg(XMM31, XMM31b, XMM31c, XMM31d, XMM31e, XMM31f, XMM31g, XMM31h, XMM31i, XMM31j, XMM31k, XMM31l, XMM31m, XMM31n, XMM31o, XMM31p);
+
+#endif
+
 %}
 
 
--- a/src/hotspot/cpu/x86/x86_64.ad	Fri Jun 08 18:24:45 2018 +0200
+++ b/src/hotspot/cpu/x86/x86_64.ad	Fri Jun 08 18:24:45 2018 +0200
@@ -538,6 +538,12 @@
 
 %}
 
+source_hpp %{
+#if INCLUDE_ZGC
+#include "gc/z/zBarrierSetAssembler.hpp"
+#endif
+%}
+
 //----------SOURCE BLOCK-------------------------------------------------------
 // This is a block of C++ code which provides values, functions, and
 // definitions necessary in the rest of the architecture description
@@ -4221,6 +4227,135 @@
   %}
 %}
 
+// Operands for bound floating pointer register arguments
+operand rxmm0() %{
+  constraint(ALLOC_IN_RC(xmm0_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX<= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm1() %{
+  constraint(ALLOC_IN_RC(xmm1_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm2() %{
+  constraint(ALLOC_IN_RC(xmm2_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm3() %{
+  constraint(ALLOC_IN_RC(xmm3_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm4() %{
+  constraint(ALLOC_IN_RC(xmm4_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm5() %{
+  constraint(ALLOC_IN_RC(xmm5_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm6() %{
+  constraint(ALLOC_IN_RC(xmm6_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm7() %{
+  constraint(ALLOC_IN_RC(xmm7_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm8() %{
+  constraint(ALLOC_IN_RC(xmm8_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm9() %{
+  constraint(ALLOC_IN_RC(xmm9_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm10() %{
+  constraint(ALLOC_IN_RC(xmm10_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm11() %{
+  constraint(ALLOC_IN_RC(xmm11_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm12() %{
+  constraint(ALLOC_IN_RC(xmm12_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm13() %{
+  constraint(ALLOC_IN_RC(xmm13_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm14() %{
+  constraint(ALLOC_IN_RC(xmm14_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm15() %{
+  constraint(ALLOC_IN_RC(xmm15_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm16() %{
+  constraint(ALLOC_IN_RC(xmm16_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm17() %{
+  constraint(ALLOC_IN_RC(xmm17_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm18() %{
+  constraint(ALLOC_IN_RC(xmm18_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm19() %{
+  constraint(ALLOC_IN_RC(xmm19_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm20() %{
+  constraint(ALLOC_IN_RC(xmm20_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm21() %{
+  constraint(ALLOC_IN_RC(xmm21_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm22() %{
+  constraint(ALLOC_IN_RC(xmm22_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm23() %{
+  constraint(ALLOC_IN_RC(xmm23_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm24() %{
+  constraint(ALLOC_IN_RC(xmm24_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm25() %{
+  constraint(ALLOC_IN_RC(xmm25_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm26() %{
+  constraint(ALLOC_IN_RC(xmm26_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm27() %{
+  constraint(ALLOC_IN_RC(xmm27_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm28() %{
+  constraint(ALLOC_IN_RC(xmm28_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm29() %{
+  constraint(ALLOC_IN_RC(xmm29_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm30() %{
+  constraint(ALLOC_IN_RC(xmm30_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm31() %{
+  constraint(ALLOC_IN_RC(xmm31_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
 
 //----------OPERAND CLASSES----------------------------------------------------
 // Operand Classes are groups of operands that are used as to simplify
@@ -11547,6 +11682,16 @@
   ins_pipe(ialu_cr_reg_mem);
 %}
 
+instruct testL_reg_mem2(rFlagsReg cr, rRegP src, memory mem, immL0 zero)
+%{
+  match(Set cr (CmpL (AndL (CastP2X src) (LoadL mem)) zero));
+
+  format %{ "testq   $src, $mem" %}
+  opcode(0x85);
+  ins_encode(REX_reg_mem_wide(src, mem), OpcP, reg_mem(src, mem));
+  ins_pipe(ialu_cr_reg_mem);
+%}
+
 // Manifest a CmpL result in an integer register.  Very painful.
 // This is the test to avoid.
 instruct cmpL3_reg_reg(rRegI dst, rRegL src1, rRegL src2, rFlagsReg flags)
@@ -12320,6 +12465,223 @@
   ins_pipe(pipe_jmp);
 %}
 
+//
+// Execute ZGC load barrier (strong) slow path
+//
+
+// When running without XMM regs
+instruct loadBarrierSlowRegNoVec(rRegP dst, memory mem, rFlagsReg cr) %{
+
+  match(Set dst (LoadBarrierSlowReg mem));
+  predicate(MaxVectorSize < 16);
+
+  effect(DEF dst, KILL cr);
+
+  format %{"LoadBarrierSlowRegNoVec $dst, $mem" %}
+  ins_encode %{
+#if INCLUDE_ZGC
+    Register d = $dst$$Register;
+    ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+
+    assert(d != r12, "Can't be R12!");
+    assert(d != r15, "Can't be R15!");
+    assert(d != rsp, "Can't be RSP!");
+
+    __ lea(d, $mem$$Address);
+    __ call(RuntimeAddress(bs->load_barrier_slow_stub(d)));
+#else
+    ShouldNotReachHere();
+#endif
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+// For XMM and YMM enabled processors
+instruct loadBarrierSlowRegXmmAndYmm(rRegP dst, memory mem, rFlagsReg cr,
+                                     rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
+                                     rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
+                                     rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
+                                     rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
+
+  match(Set dst (LoadBarrierSlowReg mem));
+  predicate((UseSSE > 0) && (UseAVX <= 2) && (MaxVectorSize >= 16));
+
+  effect(DEF dst, KILL cr,
+         KILL x0, KILL x1, KILL x2, KILL x3,
+         KILL x4, KILL x5, KILL x6, KILL x7,
+         KILL x8, KILL x9, KILL x10, KILL x11,
+         KILL x12, KILL x13, KILL x14, KILL x15);
+
+  format %{"LoadBarrierSlowRegXmm $dst, $mem" %}
+  ins_encode %{
+#if INCLUDE_ZGC
+    Register d = $dst$$Register;
+    ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+
+    assert(d != r12, "Can't be R12!");
+    assert(d != r15, "Can't be R15!");
+    assert(d != rsp, "Can't be RSP!");
+
+    __ lea(d, $mem$$Address);
+    __ call(RuntimeAddress(bs->load_barrier_slow_stub(d)));
+#else
+    ShouldNotReachHere();
+#endif
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+// For ZMM enabled processors
+instruct loadBarrierSlowRegZmm(rRegP dst, memory mem, rFlagsReg cr,
+                               rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
+                               rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
+                               rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
+                               rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
+                               rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19,
+                               rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
+                               rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
+                               rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
+
+  match(Set dst (LoadBarrierSlowReg mem));
+  predicate((UseAVX == 3) && (MaxVectorSize >= 16));
+
+  effect(DEF dst, KILL cr,
+         KILL x0, KILL x1, KILL x2, KILL x3,
+         KILL x4, KILL x5, KILL x6, KILL x7,
+         KILL x8, KILL x9, KILL x10, KILL x11,
+         KILL x12, KILL x13, KILL x14, KILL x15,
+         KILL x16, KILL x17, KILL x18, KILL x19,
+         KILL x20, KILL x21, KILL x22, KILL x23,
+         KILL x24, KILL x25, KILL x26, KILL x27,
+         KILL x28, KILL x29, KILL x30, KILL x31);
+
+  format %{"LoadBarrierSlowRegZmm $dst, $mem" %}
+  ins_encode %{
+#if INCLUDE_ZGC
+    Register d = $dst$$Register;
+    ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+
+    assert(d != r12, "Can't be R12!");
+    assert(d != r15, "Can't be R15!");
+    assert(d != rsp, "Can't be RSP!");
+
+    __ lea(d, $mem$$Address);
+    __ call(RuntimeAddress(bs->load_barrier_slow_stub(d)));
+#else
+    ShouldNotReachHere();
+#endif
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+//
+// Execute ZGC load barrier (weak) slow path
+//
+
+// When running without XMM regs
+instruct loadBarrierWeakSlowRegNoVec(rRegP dst, memory mem, rFlagsReg cr) %{
+
+  match(Set dst (LoadBarrierSlowReg mem));
+  predicate(MaxVectorSize < 16);
+
+  effect(DEF dst, KILL cr);
+
+  format %{"LoadBarrierSlowRegNoVec $dst, $mem" %}
+  ins_encode %{
+#if INCLUDE_ZGC
+    Register d = $dst$$Register;
+    ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+
+    assert(d != r12, "Can't be R12!");
+    assert(d != r15, "Can't be R15!");
+    assert(d != rsp, "Can't be RSP!");
+
+    __ lea(d, $mem$$Address);
+    __ call(RuntimeAddress(bs->load_barrier_weak_slow_stub(d)));
+#else
+    ShouldNotReachHere();
+#endif
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+// For XMM and YMM enabled processors
+instruct loadBarrierWeakSlowRegXmmAndYmm(rRegP dst, memory mem, rFlagsReg cr,
+                                         rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
+                                         rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
+                                         rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
+                                         rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
+
+  match(Set dst (LoadBarrierWeakSlowReg mem));
+  predicate((UseSSE > 0) && (UseAVX <= 2) && (MaxVectorSize >= 16));
+
+  effect(DEF dst, KILL cr,
+         KILL x0, KILL x1, KILL x2, KILL x3,
+         KILL x4, KILL x5, KILL x6, KILL x7,
+         KILL x8, KILL x9, KILL x10, KILL x11,
+         KILL x12, KILL x13, KILL x14, KILL x15);
+
+  format %{"LoadBarrierWeakSlowRegXmm $dst, $mem" %}
+  ins_encode %{
+#if INCLUDE_ZGC
+    Register d = $dst$$Register;
+    ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+
+    assert(d != r12, "Can't be R12!");
+    assert(d != r15, "Can't be R15!");
+    assert(d != rsp, "Can't be RSP!");
+
+    __ lea(d,$mem$$Address);
+    __ call(RuntimeAddress(bs->load_barrier_weak_slow_stub(d)));
+#else
+    ShouldNotReachHere();
+#endif
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+// For ZMM enabled processors
+instruct loadBarrierWeakSlowRegZmm(rRegP dst, memory mem, rFlagsReg cr,
+                                   rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
+                                   rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
+                                   rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
+                                   rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
+                                   rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19,
+                                   rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
+                                   rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
+                                   rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
+
+  match(Set dst (LoadBarrierWeakSlowReg mem));
+  predicate((UseAVX == 3) && (MaxVectorSize >= 16));
+
+  effect(DEF dst, KILL cr,
+         KILL x0, KILL x1, KILL x2, KILL x3,
+         KILL x4, KILL x5, KILL x6, KILL x7,
+         KILL x8, KILL x9, KILL x10, KILL x11,
+         KILL x12, KILL x13, KILL x14, KILL x15,
+         KILL x16, KILL x17, KILL x18, KILL x19,
+         KILL x20, KILL x21, KILL x22, KILL x23,
+         KILL x24, KILL x25, KILL x26, KILL x27,
+         KILL x28, KILL x29, KILL x30, KILL x31);
+
+  format %{"LoadBarrierWeakSlowRegZmm $dst, $mem" %}
+  ins_encode %{
+#if INCLUDE_ZGC
+    Register d = $dst$$Register;
+    ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+
+    assert(d != r12, "Can't be R12!");
+    assert(d != r15, "Can't be R15!");
+    assert(d != rsp, "Can't be RSP!");
+
+    __ lea(d,$mem$$Address);
+    __ call(RuntimeAddress(bs->load_barrier_weak_slow_stub(d)));
+#else
+    ShouldNotReachHere();
+#endif
+  %}
+  ins_pipe(pipe_slow);
+%}
 
 // ============================================================================
 // This name is KNOWN by the ADLC and cannot be changed.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zAddress_linux_x86.inline.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef OS_CPU_LINUX_X86_ZADDRESS_LINUX_X86_INLINE_HPP
+#define OS_CPU_LINUX_X86_ZADDRESS_LINUX_X86_INLINE_HPP
+
+inline uintptr_t ZAddress::address(uintptr_t value) {
+  return value;
+}
+
+#endif // OS_CPU_LINUX_X86_ZADDRESS_LINUX_X86_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zBackingFile_linux_x86.cpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,360 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zArray.inline.hpp"
+#include "gc/z/zBackingFile_linux_x86.hpp"
+#include "gc/z/zBackingPath_linux_x86.hpp"
+#include "gc/z/zErrno.hpp"
+#include "gc/z/zLargePages.inline.hpp"
+#include "logging/log.hpp"
+#include "runtime/init.hpp"
+#include "runtime/os.hpp"
+#include "utilities/align.hpp"
+#include "utilities/debug.hpp"
+
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/statfs.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+// Filesystem names
+#define ZFILESYSTEM_TMPFS                "tmpfs"
+#define ZFILESYSTEM_HUGETLBFS            "hugetlbfs"
+
+// Sysfs file for transparent huge page on tmpfs
+#define ZFILENAME_SHMEM_ENABLED          "/sys/kernel/mm/transparent_hugepage/shmem_enabled"
+
+// Default mount points
+#define ZMOUNTPOINT_TMPFS                "/dev/shm"
+#define ZMOUNTPOINT_HUGETLBFS            "/hugepages"
+
+// Java heap filename
+#define ZFILENAME_HEAP                   "java_heap"
+
+// Support for building on older Linux systems
+#ifndef __NR_memfd_create
+#define __NR_memfd_create                319
+#endif
+#ifndef MFD_CLOEXEC
+#define MFD_CLOEXEC                      0x0001U
+#endif
+#ifndef MFD_HUGETLB
+#define MFD_HUGETLB                      0x0004U
+#endif
+#ifndef O_CLOEXEC
+#define O_CLOEXEC                        02000000
+#endif
+#ifndef O_TMPFILE
+#define O_TMPFILE                        (020000000 | O_DIRECTORY)
+#endif
+
+// Filesystem types, see statfs(2)
+#ifndef TMPFS_MAGIC
+#define TMPFS_MAGIC                      0x01021994
+#endif
+#ifndef HUGETLBFS_MAGIC
+#define HUGETLBFS_MAGIC                  0x958458f6
+#endif
+
+static int z_memfd_create(const char *name, unsigned int flags) {
+  return syscall(__NR_memfd_create, name, flags);
+}
+
+ZBackingFile::ZBackingFile() :
+    _fd(-1),
+    _filesystem(0),
+    _initialized(false) {
+
+  // Create backing file
+  _fd = create_fd(ZFILENAME_HEAP);
+  if (_fd == -1) {
+    return;
+  }
+
+  // Get filesystem type
+  struct statfs statfs_buf;
+  if (fstatfs(_fd, &statfs_buf) == -1) {
+    ZErrno err;
+    log_error(gc, init)("Failed to determine filesystem type for backing file (%s)", err.to_string());
+    return;
+  }
+  _filesystem = statfs_buf.f_type;
+
+  // Make sure we're on a supported filesystem
+  if (!is_tmpfs() && !is_hugetlbfs()) {
+    log_error(gc, init)("Backing file must be located on a %s or a %s filesystem", ZFILESYSTEM_TMPFS, ZFILESYSTEM_HUGETLBFS);
+    return;
+  }
+
+  // Make sure the filesystem type matches requested large page type
+  if (ZLargePages::is_transparent() && !is_tmpfs()) {
+    log_error(gc, init)("-XX:+UseTransparentHugePages can only be enable when using a %s filesystem", ZFILESYSTEM_TMPFS);
+    return;
+  }
+
+  if (ZLargePages::is_transparent() && !tmpfs_supports_transparent_huge_pages()) {
+    log_error(gc, init)("-XX:+UseTransparentHugePages on a %s filesystem not supported by kernel", ZFILESYSTEM_TMPFS);
+    return;
+  }
+
+  if (ZLargePages::is_explicit() && !is_hugetlbfs()) {
+    log_error(gc, init)("-XX:+UseLargePages (without -XX:+UseTransparentHugePages) can only be enabled when using a %s filesystem", ZFILESYSTEM_HUGETLBFS);
+    return;
+  }
+
+  if (!ZLargePages::is_explicit() && is_hugetlbfs()) {
+    log_error(gc, init)("-XX:+UseLargePages must be enabled when using a %s filesystem", ZFILESYSTEM_HUGETLBFS);
+    return;
+  }
+
+  // Successfully initialized
+  _initialized = true;
+}
+
+int ZBackingFile::create_mem_fd(const char* name) const {
+  // Create file name
+  char filename[PATH_MAX];
+  snprintf(filename, sizeof(filename), "%s%s", name, ZLargePages::is_explicit() ? ".hugetlb" : "");
+
+  // Create file
+  const int extra_flags = ZLargePages::is_explicit() ? MFD_HUGETLB : 0;
+  const int fd = z_memfd_create(filename, MFD_CLOEXEC | extra_flags);
+  if (fd == -1) {
+    ZErrno err;
+    log_debug(gc, init)("Failed to create memfd file (%s)",
+                        ((UseLargePages && err == EINVAL) ? "Hugepages not supported" : err.to_string()));
+    return -1;
+  }
+
+  log_debug(gc, init)("Heap backed by file /memfd:%s", filename);
+
+  return fd;
+}
+
+int ZBackingFile::create_file_fd(const char* name) const {
+  const char* const filesystem = ZLargePages::is_explicit() ? ZFILESYSTEM_HUGETLBFS : ZFILESYSTEM_TMPFS;
+  const char* const mountpoint = ZLargePages::is_explicit() ? ZMOUNTPOINT_HUGETLBFS : ZMOUNTPOINT_TMPFS;
+
+  // Find mountpoint
+  ZBackingPath path(filesystem, mountpoint);
+  if (path.get() == NULL) {
+    log_error(gc, init)("Use -XX:ZPath to specify the path to a %s filesystem", filesystem);
+    return -1;
+  }
+
+  // Try to create an anonymous file using the O_TMPFILE flag. Note that this
+  // flag requires kernel >= 3.11. If this fails we fall back to open/unlink.
+  const int fd_anon = open(path.get(), O_TMPFILE|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
+  if (fd_anon == -1) {
+    ZErrno err;
+    log_debug(gc, init)("Failed to create anonymouns file in %s (%s)", path.get(),
+                        (err == EINVAL ? "Not supported" : err.to_string()));
+  } else {
+    // Get inode number for anonymous file
+    struct stat stat_buf;
+    if (fstat(fd_anon, &stat_buf) == -1) {
+      ZErrno err;
+      log_error(gc, init)("Failed to determine inode number for anonymous file (%s)", err.to_string());
+      return -1;
+    }
+
+    log_debug(gc, init)("Heap backed by file %s/#" UINT64_FORMAT, path.get(), (uint64_t)stat_buf.st_ino);
+
+    return fd_anon;
+  }
+
+  log_debug(gc, init)("Falling back to open/unlink");
+
+  // Create file name
+  char filename[PATH_MAX];
+  snprintf(filename, sizeof(filename), "%s/%s.%d", path.get(), name, os::current_process_id());
+
+  // Create file
+  const int fd = open(filename, O_CREAT|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
+  if (fd == -1) {
+    ZErrno err;
+    log_error(gc, init)("Failed to create file %s (%s)", filename, err.to_string());
+    return -1;
+  }
+
+  // Unlink file
+  if (unlink(filename) == -1) {
+    ZErrno err;
+    log_error(gc, init)("Failed to unlink file %s (%s)", filename, err.to_string());
+    return -1;
+  }
+
+  log_debug(gc, init)("Heap backed by file %s", filename);
+
+  return fd;
+}
+
+int ZBackingFile::create_fd(const char* name) const {
+  if (ZPath == NULL) {
+    // If the path is not explicitly specified, then we first try to create a memfd file
+    // instead of looking for a tmpfd/hugetlbfs mount point. Note that memfd_create() might
+    // not be supported at all (requires kernel >= 3.17), or it might not support large
+    // pages (requires kernel >= 4.14). If memfd_create() fails, then we try to create a
+    // file on an accessible tmpfs or hugetlbfs mount point.
+    const int fd = create_mem_fd(name);
+    if (fd != -1) {
+      return fd;
+    }
+
+    log_debug(gc, init)("Falling back to searching for an accessible moint point");
+  }
+
+  return create_file_fd(name);
+}
+
+bool ZBackingFile::is_initialized() const {
+  return _initialized;
+}
+
+int ZBackingFile::fd() const {
+  return _fd;
+}
+
+bool ZBackingFile::is_tmpfs() const {
+  return _filesystem == TMPFS_MAGIC;
+}
+
+bool ZBackingFile::is_hugetlbfs() const {
+  return _filesystem == HUGETLBFS_MAGIC;
+}
+
+bool ZBackingFile::tmpfs_supports_transparent_huge_pages() const {
+  // If the shmem_enabled file exists and is readable then we
+  // know the kernel supports transparent huge pages for tmpfs.
+  return access(ZFILENAME_SHMEM_ENABLED, R_OK) == 0;
+}
+
+bool ZBackingFile::try_split_and_expand_tmpfs(size_t offset, size_t length, size_t alignment) const {
+  // Try first smaller part.
+  const size_t offset0 = offset;
+  const size_t length0 = align_up(length / 2, alignment);
+  if (!try_expand_tmpfs(offset0, length0, alignment)) {
+    return false;
+  }
+
+  // Try second smaller part.
+  const size_t offset1 = offset0 + length0;
+  const size_t length1 = length - length0;
+  if (!try_expand_tmpfs(offset1, length1, alignment)) {
+    return false;
+  }
+
+  return true;
+}
+
+bool ZBackingFile::try_expand_tmpfs(size_t offset, size_t length, size_t alignment) const {
+  assert(length > 0, "Invalid length");
+  assert(is_aligned(length, alignment), "Invalid length");
+
+  ZErrno err = posix_fallocate(_fd, offset, length);
+
+  if (err == EINTR && length > alignment) {
+    // Calling posix_fallocate() with a large length can take a long
+    // time to complete. When running profilers, such as VTune, this
+    // syscall will be constantly interrupted by signals. Expanding
+    // the file in smaller steps avoids this problem.
+    return try_split_and_expand_tmpfs(offset, length, alignment);
+  }
+
+  if (err) {
+    log_error(gc)("Failed to allocate backing file (%s)", err.to_string());
+    return false;
+  }
+
+  return true;
+}
+
+bool ZBackingFile::expand_tmpfs(size_t offset, size_t length) const {
+  assert(is_tmpfs(), "Wrong filesystem");
+  return try_expand_tmpfs(offset, length, os::vm_page_size());
+}
+
+bool ZBackingFile::expand_hugetlbfs(size_t offset, size_t length) const {
+  assert(is_hugetlbfs(), "Wrong filesystem");
+
+  // Prior to kernel 4.3, hugetlbfs did not support posix_fallocate().
+  // Instead of posix_fallocate() we can use a well-known workaround,
+  // which involves truncating the file to requested size and then try
+  // to map it to verify that there are enough huge pages available to
+  // back it.
+  while (ftruncate(_fd, offset + length) == -1) {
+    ZErrno err;
+    if (err != EINTR) {
+      log_error(gc)("Failed to truncate backing file (%s)", err.to_string());
+      return false;
+    }
+  }
+
+  // If we fail mapping during initialization, i.e. when we are pre-mapping
+  // the heap, then we wait and retry a few times before giving up. Otherwise
+  // there is a risk that running JVMs back-to-back will fail, since there
+  // is a delay between process termination and the huge pages owned by that
+  // process being returned to the huge page pool and made available for new
+  // allocations.
+  void* addr = MAP_FAILED;
+  const int max_attempts = 3;
+  for (int attempt = 1; attempt <= max_attempts; attempt++) {
+    addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset);
+    if (addr != MAP_FAILED || is_init_completed()) {
+      // Mapping was successful or initialization phase has completed
+      break;
+    }
+
+    ZErrno err;
+    log_debug(gc)("Failed to map backing file (%s), attempt %d of %d",
+                  err.to_string(), attempt, max_attempts);
+
+    // Wait and retry in one second, in the hope that
+    // huge pages will be available by then.
+    sleep(1);
+  }
+
+  if (addr == MAP_FAILED) {
+    // Not enough huge pages left
+    ZErrno err;
+    log_error(gc)("Failed to map backing file (%s)", err.to_string());
+    return false;
+  }
+
+  // Successful mapping, unmap again. From now on the pages we mapped
+  // will be reserved for this file.
+  if (munmap(addr, length) == -1) {
+    ZErrno err;
+    log_error(gc)("Failed to unmap backing file (%s)", err.to_string());
+    return false;
+  }
+
+  return true;
+}
+
+bool ZBackingFile::expand(size_t offset, size_t length) const {
+  return is_hugetlbfs() ? expand_hugetlbfs(offset, length) : expand_tmpfs(offset, length);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zBackingFile_linux_x86.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef OS_CPU_LINUX_X86_ZBACKINGFILE_LINUX_X86_HPP
+#define OS_CPU_LINUX_X86_ZBACKINGFILE_LINUX_X86_HPP
+
+#include "memory/allocation.hpp"
+
+class ZBackingFile {
+private:
+  int      _fd;
+  uint64_t _filesystem;
+  bool     _initialized;
+
+  int create_mem_fd(const char* name) const;
+  int create_file_fd(const char* name) const;
+  int create_fd(const char* name) const;
+
+  bool is_tmpfs() const;
+  bool is_hugetlbfs() const;
+  bool tmpfs_supports_transparent_huge_pages() const;
+
+  bool try_split_and_expand_tmpfs(size_t offset, size_t length, size_t alignment) const;
+  bool try_expand_tmpfs(size_t offset, size_t length, size_t alignment) const;
+  bool expand_tmpfs(size_t offset, size_t length) const;
+
+  bool expand_hugetlbfs(size_t offset, size_t length) const;
+
+public:
+  ZBackingFile();
+
+  bool is_initialized() const;
+
+  int fd() const;
+  bool expand(size_t offset, size_t length) const;
+};
+
+#endif // OS_CPU_LINUX_X86_ZBACKINGFILE_LINUX_X86_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zBackingPath_linux_x86.cpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zArray.inline.hpp"
+#include "gc/z/zBackingPath_linux_x86.hpp"
+#include "gc/z/zErrno.hpp"
+#include "logging/log.hpp"
+
+#include <stdio.h>
+#include <unistd.h>
+
+// Mount information, see proc(5) for more details.
+#define PROC_SELF_MOUNTINFO        "/proc/self/mountinfo"
+
+ZBackingPath::ZBackingPath(const char* filesystem, const char* preferred_path) {
+  if (ZPath != NULL) {
+    // Use specified path
+    _path = strdup(ZPath);
+  } else {
+    // Find suitable path
+    _path = find_mountpoint(filesystem, preferred_path);
+  }
+}
+
+ZBackingPath::~ZBackingPath() {
+  free(_path);
+  _path = NULL;
+}
+
+char* ZBackingPath::get_mountpoint(const char* line, const char* filesystem) const {
+  char* line_mountpoint = NULL;
+  char* line_filesystem = NULL;
+
+  // Parse line and return a newly allocated string containing the mountpoint if
+  // the line contains a matching filesystem and the mountpoint is accessible by
+  // the current user.
+  if (sscanf(line, "%*u %*u %*u:%*u %*s %ms %*[^-]- %ms", &line_mountpoint, &line_filesystem) != 2 ||
+      strcmp(line_filesystem, filesystem) != 0 ||
+      access(line_mountpoint, R_OK|W_OK|X_OK) != 0) {
+    // Not a matching or accessible filesystem
+    free(line_mountpoint);
+    line_mountpoint = NULL;
+  }
+
+  free(line_filesystem);
+
+  return line_mountpoint;
+}
+
+void ZBackingPath::get_mountpoints(ZArray<char*>* mountpoints, const char* filesystem) const {
+  FILE* fd = fopen(PROC_SELF_MOUNTINFO, "r");
+  if (fd == NULL) {
+    ZErrno err;
+    log_error(gc, init)("Failed to open %s: %s", PROC_SELF_MOUNTINFO, err.to_string());
+    return;
+  }
+
+  char* line = NULL;
+  size_t length = 0;
+
+  while (getline(&line, &length, fd) != -1) {
+    char* const mountpoint = get_mountpoint(line, filesystem);
+    if (mountpoint != NULL) {
+      mountpoints->add(mountpoint);
+    }
+  }
+
+  free(line);
+  fclose(fd);
+}
+
+void ZBackingPath::free_mountpoints(ZArray<char*>* mountpoints) const {
+  ZArrayIterator<char*> iter(mountpoints);
+  for (char* mountpoint; iter.next(&mountpoint);) {
+    free(mountpoint);
+  }
+  mountpoints->clear();
+}
+
+char* ZBackingPath::find_mountpoint(const char* filesystem, const char* preferred_mountpoint) const {
+  char* path = NULL;
+  ZArray<char*> mountpoints;
+
+  get_mountpoints(&mountpoints, filesystem);
+
+  if (mountpoints.size() == 0) {
+    // No filesystem found
+    log_error(gc, init)("Failed to find an accessible %s filesystem", filesystem);
+  } else if (mountpoints.size() == 1) {
+    // One filesystem found
+    path = strdup(mountpoints.at(0));
+  } else if (mountpoints.size() > 1) {
+    // More than one filesystem found
+    ZArrayIterator<char*> iter(&mountpoints);
+    for (char* mountpoint; iter.next(&mountpoint);) {
+      if (!strcmp(mountpoint, preferred_mountpoint)) {
+        // Preferred mount point found
+        path = strdup(mountpoint);
+        break;
+      }
+    }
+
+    if (path == NULL) {
+      // Preferred mount point not found
+      log_error(gc, init)("More than one %s filesystem found:", filesystem);
+      ZArrayIterator<char*> iter2(&mountpoints);
+      for (char* mountpoint; iter2.next(&mountpoint);) {
+        log_error(gc, init)("  %s", mountpoint);
+      }
+    }
+  }
+
+  free_mountpoints(&mountpoints);
+
+  return path;
+}
+
+const char* ZBackingPath::get() const {
+  return _path;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zBackingPath_linux_x86.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef OS_CPU_LINUX_X86_ZBACKINGPATH_LINUX_X86_HPP
+#define OS_CPU_LINUX_X86_ZBACKINGPATH_LINUX_X86_HPP
+
+#include "gc/z/zArray.hpp"
+#include "memory/allocation.hpp"
+
+class ZBackingPath : public StackObj {
+private:
+  char* _path;
+
+  char* get_mountpoint(const char* line, const char* filesystem) const;
+  void get_mountpoints(ZArray<char*>* mountpoints, const char* filesystem) const;
+  void free_mountpoints(ZArray<char*>* mountpoints) const;
+  char* find_mountpoint(const char* filesystem, const char* preferred_mountpoint) const;
+
+public:
+  ZBackingPath(const char* filesystem, const char* preferred_path);
+  ~ZBackingPath();
+
+  const char* get() const;
+};
+
+#endif // OS_CPU_LINUX_X86_ZBACKINGPATH_LINUX_X86_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zGlobals_linux_x86.cpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zGlobals.hpp"
+
+uintptr_t ZAddressReservedStart() {
+  return ZAddressMetadataMarked0;
+}
+
+uintptr_t ZAddressReservedEnd() {
+  return ZAddressMetadataRemapped + ZAddressOffsetMax;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zGlobals_linux_x86.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef OS_CPU_LINUX_X86_ZGLOBALS_LINUX_X86_HPP
+#define OS_CPU_LINUX_X86_ZGLOBALS_LINUX_X86_HPP
+
+//
+// Page Allocation Tiers
+// ---------------------
+//
+//  Page Type     Page Size     Object Size Limit     Object Alignment
+//  ------------------------------------------------------------------
+//  Small         2M            <= 265K               <MinObjAlignmentInBytes>
+//  Medium        32M           <= 4M                 4K
+//  Large         X*M           > 4M                  2M
+//  ------------------------------------------------------------------
+//
+//
+// Address Space & Pointer Layout
+// ------------------------------
+//
+//  +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
+//  .                                .
+//  .                                .
+//  .                                .
+//  +--------------------------------+ 0x0000140000000000 (20TB)
+//  |         Remapped View          |
+//  +--------------------------------+ 0x0000100000000000 (16TB)
+//  |     (Reserved, but unused)     |
+//  +--------------------------------+ 0x00000c0000000000 (12TB)
+//  |         Marked1 View           |
+//  +--------------------------------+ 0x0000080000000000 (8TB)
+//  |         Marked0 View           |
+//  +--------------------------------+ 0x0000040000000000 (4TB)
+//  .                                .
+//  +--------------------------------+ 0x0000000000000000
+//
+//
+//   6                 4 4 4  4 4                                             0
+//   3                 7 6 5  2 1                                             0
+//  +-------------------+-+----+-----------------------------------------------+
+//  |00000000 00000000 0|0|1111|11 11111111 11111111 11111111 11111111 11111111|
+//  +-------------------+-+----+-----------------------------------------------+
+//  |                   | |    |
+//  |                   | |    * 41-0 Object Offset (42-bits, 4TB address space)
+//  |                   | |
+//  |                   | * 45-42 Metadata Bits (4-bits)  0001 = Marked0      (Address view 4-8TB)
+//  |                   |                                 0010 = Marked1      (Address view 8-12TB)
+//  |                   |                                 0100 = Remapped     (Address view 16-20TB)
+//  |                   |                                 1000 = Finalizable  (Address view N/A)
+//  |                   |
+//  |                   * 46-46 Unused (1-bit, always zero)
+//  |
+//  * 63-47 Fixed (17-bits, always zero)
+//
+
+const size_t    ZPlatformPageSizeSmallShift   = 21; // 2M
+
+const size_t    ZPlatformAddressOffsetBits    = 42; // 4TB
+
+const uintptr_t ZPlatformAddressMetadataShift = ZPlatformAddressOffsetBits;
+
+const uintptr_t ZPlatformAddressSpaceStart    = (uintptr_t)1 << ZPlatformAddressOffsetBits;
+const uintptr_t ZPlatformAddressSpaceSize     = ((uintptr_t)1 << ZPlatformAddressOffsetBits) * 4;
+
+const size_t    ZPlatformCacheLineSize        = 64;
+
+#endif // OS_CPU_LINUX_X86_ZGLOBALS_LINUX_X86_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zLargePages_linux_x86.cpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zLargePages.hpp"
+#include "runtime/globals.hpp"
+
+void ZLargePages::initialize_platform() {
+  if (UseLargePages) {
+    if (UseTransparentHugePages) {
+      _state = Transparent;
+    } else {
+      _state = Explicit;
+    }
+  } else {
+    _state = Disabled;
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zNUMA_linux_x86.cpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "gc/z/zErrno.hpp"
+#include "gc/z/zCPU.hpp"
+#include "gc/z/zNUMA.hpp"
+#include "runtime/os.hpp"
+#include "utilities/debug.hpp"
+
+#include <unistd.h>
+#include <sys/syscall.h>
+
+#ifndef MPOL_F_NODE
+#define MPOL_F_NODE     (1<<0)  /* return next IL mode instead of node mask */
+#endif
+
+#ifndef MPOL_F_ADDR
+#define MPOL_F_ADDR     (1<<1)  /* look up vma using address */
+#endif
+
+static int z_get_mempolicy(uint32_t* mode, const unsigned long *nmask, unsigned long maxnode, uintptr_t addr, int flags) {
+  return syscall(__NR_get_mempolicy, mode, nmask, maxnode, addr, flags);
+}
+
+void ZNUMA::initialize_platform() {
+  _enabled = UseNUMA;
+}
+
+uint32_t ZNUMA::count() {
+  if (!_enabled) {
+    // NUMA support not enabled
+    return 1;
+  }
+
+  return os::Linux::numa_max_node() + 1;
+}
+
+uint32_t ZNUMA::id() {
+  if (!_enabled) {
+    // NUMA support not enabled
+    return 0;
+  }
+
+  return os::Linux::get_node_by_cpu(ZCPU::id());
+}
+
+uint32_t ZNUMA::memory_id(uintptr_t addr) {
+  if (!_enabled) {
+    // NUMA support not enabled, assume everything belongs to node zero
+    return 0;
+  }
+
+  uint32_t id = (uint32_t)-1;
+
+  if (z_get_mempolicy(&id, NULL, 0, addr, MPOL_F_NODE | MPOL_F_ADDR) == -1) {
+    ZErrno err;
+    fatal("Failed to get NUMA id for memory at " PTR_FORMAT " (%s)", addr, err.to_string());
+  }
+
+  assert(id < count(), "Invalid NUMA id");
+
+  return id;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zPhysicalMemoryBacking_linux_x86.cpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zAddress.inline.hpp"
+#include "gc/z/zBackingFile_linux_x86.hpp"
+#include "gc/z/zErrno.hpp"
+#include "gc/z/zLargePages.inline.hpp"
+#include "gc/z/zMemory.hpp"
+#include "gc/z/zNUMA.hpp"
+#include "gc/z/zPhysicalMemory.inline.hpp"
+#include "gc/z/zPhysicalMemoryBacking_linux_x86.hpp"
+#include "logging/log.hpp"
+#include "runtime/os.hpp"
+#include "utilities/align.hpp"
+#include "utilities/debug.hpp"
+
+#include <stdio.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+
+// Support for building on older Linux systems
+#ifndef MADV_HUGEPAGE
+#define MADV_HUGEPAGE                        14
+#endif
+
+// Proc file entry for max map mount
+#define ZFILENAME_PROC_MAX_MAP_COUNT         "/proc/sys/vm/max_map_count"
+
+ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity, size_t granule_size) :
+    _manager(),
+    _file(),
+    _granule_size(granule_size) {
+
+  // Check and warn if max map count seems too low
+  check_max_map_count(max_capacity, granule_size);
+}
+
+void ZPhysicalMemoryBacking::check_max_map_count(size_t max_capacity, size_t granule_size) const {
+  const char* const filename = ZFILENAME_PROC_MAX_MAP_COUNT;
+  FILE* const file = fopen(filename, "r");
+  if (file == NULL) {
+    // Failed to open file, skip check
+    log_debug(gc)("Failed to open %s", filename);
+    return;
+  }
+
+  size_t actual_max_map_count = 0;
+  const int result = fscanf(file, SIZE_FORMAT, &actual_max_map_count);
+  fclose(file);
+  if (result != 1) {
+    // Failed to read file, skip check
+    log_debug(gc)("Failed to read %s", filename);
+    return;
+  }
+
+  // The required max map count is impossible to calculate exactly since subsystems
+  // other than ZGC are also creating memory mappings, and we have no control over that.
+  // However, ZGC tends to create the most mappings and dominate the total count.
+  // In the worst cases, ZGC will map each granule three times, i.e. once per heap view.
+  // We speculate that we need another 20% to allow for non-ZGC subsystems to map memory.
+  const size_t required_max_map_count = (max_capacity / granule_size) * 3 * 1.2;
+  if (actual_max_map_count < required_max_map_count) {
+    log_warning(gc)("The system limit on number of memory mappings "
+                    "per process might be too low for the given");
+    log_warning(gc)("Java heap size (" SIZE_FORMAT "M). Please "
+                    "adjust %s to allow for at least", max_capacity / M, filename);
+    log_warning(gc)(SIZE_FORMAT " mappings (current limit is " SIZE_FORMAT "). "
+                    "Continuing execution with the current limit could",
+                    required_max_map_count, actual_max_map_count);
+    log_warning(gc)("lead to a fatal error down the line, due to failed "
+                    "attempts to map memory.");
+  }
+}
+
+bool ZPhysicalMemoryBacking::is_initialized() const {
+  return _file.is_initialized();
+}
+
+bool ZPhysicalMemoryBacking::expand(size_t from, size_t to) {
+  const size_t size = to - from;
+
+  // Expand
+  if (!_file.expand(from, size)) {
+    return false;
+  }
+
+  // Add expanded space to free list
+  _manager.free(from, size);
+
+  return true;
+}
+
+ZPhysicalMemory ZPhysicalMemoryBacking::alloc(size_t size) {
+  assert(is_aligned(size, _granule_size), "Invalid size");
+
+  ZPhysicalMemory pmem;
+
+  // Allocate segments
+  for (size_t allocated = 0; allocated < size; allocated += _granule_size) {
+    const uintptr_t start = _manager.alloc_from_front(_granule_size);
+    assert(start != UINTPTR_MAX, "Allocation should never fail");
+    pmem.add_segment(ZPhysicalMemorySegment(start, _granule_size));
+  }
+
+  return pmem;
+}
+
+void ZPhysicalMemoryBacking::free(ZPhysicalMemory pmem) {
+  const size_t nsegments = pmem.nsegments();
+
+  // Free segments
+  for (size_t i = 0; i < nsegments; i++) {
+    const ZPhysicalMemorySegment segment = pmem.segment(i);
+    _manager.free(segment.start(), segment.size());
+  }
+}
+
+void ZPhysicalMemoryBacking::map_failed(ZErrno err) const {
+  if (err == ENOMEM) {
+    fatal("Failed to map memory. Please check the system limit on number of "
+          "memory mappings allowed per process (see %s)", ZFILENAME_PROC_MAX_MAP_COUNT);
+  } else {
+    fatal("Failed to map memory (%s)", err.to_string());
+  }
+}
+
+void ZPhysicalMemoryBacking::advise_view(uintptr_t addr, size_t size) const {
+  if (madvise((void*)addr, size, MADV_HUGEPAGE) == -1) {
+    ZErrno err;
+    log_error(gc)("Failed to advise use of transparent huge pages (%s)", err.to_string());
+  }
+}
+
+void ZPhysicalMemoryBacking::pretouch_view(uintptr_t addr, size_t size) const {
+  const size_t page_size = ZLargePages::is_explicit() ? os::large_page_size() : os::vm_page_size();
+  os::pretouch_memory((void*)addr, (void*)(addr + size), page_size);
+}
+
+void ZPhysicalMemoryBacking::map_view(ZPhysicalMemory pmem, uintptr_t addr, bool pretouch) const {
+  const size_t nsegments = pmem.nsegments();
+
+  // Map segments
+  for (size_t i = 0; i < nsegments; i++) {
+    const ZPhysicalMemorySegment segment = pmem.segment(i);
+    const size_t size = segment.size();
+    const void* const res = mmap((void*)addr, size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, _file.fd(), segment.start());
+    if (res == MAP_FAILED) {
+      ZErrno err;
+      map_failed(err);
+    }
+
+    // Advise on use of transparent huge pages before touching it
+    if (ZLargePages::is_transparent()) {
+      advise_view(addr, size);
+    }
+
+    // NUMA interleave memory before touching it
+    ZNUMA::memory_interleave(addr, size);
+
+    if (pretouch) {
+      pretouch_view(addr, size);
+    }
+
+    addr += size;
+  }
+}
+
+void ZPhysicalMemoryBacking::unmap_view(ZPhysicalMemory pmem, uintptr_t addr) const {
+  // Note that we must keep the address space reservation intact and just detach
+  // the backing memory. For this reason we map a new anonymous, non-accessible
+  // and non-reserved page over the mapping instead of actually unmapping.
+  const size_t size = pmem.size();
+  const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
+  if (res == MAP_FAILED) {
+    ZErrno err;
+    map_failed(err);
+  }
+}
+
+uintptr_t ZPhysicalMemoryBacking::nmt_address(uintptr_t offset) const {
+  // From an NMT point of view we treat the first heap mapping (marked0) as committed
+  return ZAddress::marked0(offset);
+}
+
+void ZPhysicalMemoryBacking::map(ZPhysicalMemory pmem, uintptr_t offset) const {
+  if (ZUnmapBadViews) {
+    // Only map the good view, for debugging only
+    map_view(pmem, ZAddress::good(offset), AlwaysPreTouch);
+  } else {
+    // Map all views
+    map_view(pmem, ZAddress::marked0(offset), AlwaysPreTouch);
+    map_view(pmem, ZAddress::marked1(offset), AlwaysPreTouch);
+    map_view(pmem, ZAddress::remapped(offset), AlwaysPreTouch);
+  }
+}
+
+void ZPhysicalMemoryBacking::unmap(ZPhysicalMemory pmem, uintptr_t offset) const {
+  if (ZUnmapBadViews) {
+    // Only map the good view, for debugging only
+    unmap_view(pmem, ZAddress::good(offset));
+  } else {
+    // Unmap all views
+    unmap_view(pmem, ZAddress::marked0(offset));
+    unmap_view(pmem, ZAddress::marked1(offset));
+    unmap_view(pmem, ZAddress::remapped(offset));
+  }
+}
+
+void ZPhysicalMemoryBacking::flip(ZPhysicalMemory pmem, uintptr_t offset) const {
+  assert(ZUnmapBadViews, "Should be enabled");
+  const uintptr_t addr_good = ZAddress::good(offset);
+  const uintptr_t addr_bad = ZAddress::is_marked(ZAddressGoodMask) ? ZAddress::remapped(offset) : ZAddress::marked(offset);
+  // Map/Unmap views
+  map_view(pmem, addr_good, false /* pretouch */);
+  unmap_view(pmem, addr_bad);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zPhysicalMemoryBacking_linux_x86.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef OS_CPU_LINUX_X86_ZPHYSICALMEMORYBACKING_LINUX_X86_HPP
+#define OS_CPU_LINUX_X86_ZPHYSICALMEMORYBACKING_LINUX_X86_HPP
+
+#include "gc/z/zBackingFile_linux_x86.hpp"
+#include "gc/z/zMemory.hpp"
+
+class ZErrno;
+class ZPhysicalMemory;
+
+class ZPhysicalMemoryBacking {
+private:
+  ZMemoryManager _manager;
+  ZBackingFile   _file;
+  const size_t   _granule_size;
+
+  void check_max_map_count(size_t max_capacity, size_t granule_size) const;
+  void map_failed(ZErrno err) const;
+
+  void advise_view(uintptr_t addr, size_t size) const;
+  void pretouch_view(uintptr_t addr, size_t size) const;
+  void map_view(ZPhysicalMemory pmem, uintptr_t addr, bool pretouch) const;
+  void unmap_view(ZPhysicalMemory pmem, uintptr_t addr) const;
+
+public:
+  ZPhysicalMemoryBacking(size_t max_capacity, size_t granule_size);
+
+  bool is_initialized() const;
+
+  bool expand(size_t from, size_t to);
+  ZPhysicalMemory alloc(size_t size);
+  void free(ZPhysicalMemory pmem);
+
+  uintptr_t nmt_address(uintptr_t offset) const;
+
+  void map(ZPhysicalMemory pmem, uintptr_t offset) const;
+  void unmap(ZPhysicalMemory pmem, uintptr_t offset) const;
+  void flip(ZPhysicalMemory pmem, uintptr_t offset) const;
+};
+
+#endif // OS_CPU_LINUX_X86_ZPHYSICALMEMORYBACKING_LINUX_X86_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zVirtualMemory_linux_x86.cpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zVirtualMemory.hpp"
+#include "logging/log.hpp"
+
+#include <sys/mman.h>
+#include <sys/types.h>
+
+bool ZVirtualMemoryManager::reserve(uintptr_t start, size_t size) {
+  // Reserve address space
+  const uintptr_t actual_start = (uintptr_t)mmap((void*)start, size, PROT_NONE,
+                                                 MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
+  if (actual_start != start) {
+    log_error(gc)("Failed to reserve address space for Java heap");
+    return false;
+  }
+
+  return true;
+}
--- a/src/hotspot/share/adlc/formssel.cpp	Fri Jun 08 18:24:45 2018 +0200
+++ b/src/hotspot/share/adlc/formssel.cpp	Fri Jun 08 18:24:45 2018 +0200
@@ -2282,6 +2282,9 @@
   if (strcmp(name, "RegD") == 0) size = 2;
   if (strcmp(name, "RegL") == 0) size = 2;
   if (strcmp(name, "RegN") == 0) size = 1;
+  if (strcmp(name, "VecX") == 0) size = 4;
+  if (strcmp(name, "VecY") == 0) size = 8;
+  if (strcmp(name, "VecZ") == 0) size = 16;
   if (strcmp(name, "RegP") == 0) size = globalAD->get_preproc_def("_LP64") ? 2 : 1;
   if (size == 0) {
     return false;
@@ -3509,6 +3512,7 @@
     "ClearArray",
     "GetAndSetB", "GetAndSetS", "GetAndAddI", "GetAndSetI", "GetAndSetP",
     "GetAndAddB", "GetAndAddS", "GetAndAddL", "GetAndSetL", "GetAndSetN",
+    "LoadBarrierSlowReg", "LoadBarrierWeakSlowReg"
   };
   int cnt = sizeof(needs_ideal_memory_list)/sizeof(char*);
   if( strcmp(_opType,"PrefetchAllocation")==0 )
--- a/src/hotspot/share/classfile/vmSymbols.cpp	Fri Jun 08 18:24:45 2018 +0200
+++ b/src/hotspot/share/classfile/vmSymbols.cpp	Fri Jun 08 18:24:45 2018 +0200
@@ -756,6 +756,9 @@
 #endif // COMPILER1
 #ifdef COMPILER2
   case vmIntrinsics::_clone:
+#if INCLUDE_ZGC
+    if (UseZGC) return true;
+#endif
   case vmIntrinsics::_copyOf:
   case vmIntrinsics::_copyOfRange:
     // These intrinsics use both the objectcopy and the arraycopy
--- a/src/hotspot/share/compiler/compilerDirectives.hpp	Fri Jun 08 18:24:45 2018 +0200
+++ b/src/hotspot/share/compiler/compilerDirectives.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -66,7 +66,8 @@
     cflags(VectorizeDebug,          uintx, 0, VectorizeDebug) \
     cflags(CloneMapDebug,           bool, false, CloneMapDebug) \
     cflags(IGVPrintLevel,           intx, PrintIdealGraphLevel, IGVPrintLevel) \
-    cflags(MaxNodeLimit,            intx, MaxNodeLimit, MaxNodeLimit)
+    cflags(MaxNodeLimit,            intx, MaxNodeLimit, MaxNodeLimit) \
+ZGC_ONLY(cflags(ZOptimizeLoadBarriers, bool, ZOptimizeLoadBarriers, ZOptimizeLoadBarriers))
 #else
   #define compilerdirectives_c2_flags(cflags)
 #endif
--- a/src/hotspot/share/compiler/oopMap.cpp	Fri Jun 08 18:24:45 2018 +0200
+++ b/src/hotspot/share/compiler/oopMap.cpp	Fri Jun 08 18:24:45 2018 +0200
@@ -380,8 +380,12 @@
           continue;
         }
 #ifdef ASSERT
-        if ((((uintptr_t)loc & (sizeof(*loc)-1)) != 0) ||
-            !Universe::heap()->is_in_or_null(*loc)) {
+        // We can not verify the oop here if we are using ZGC, the oop
+        // will be bad in case we had a safepoint between a load and a
+        // load barrier.
+        if (!UseZGC &&
+            ((((uintptr_t)loc & (sizeof(*loc)-1)) != 0) ||
+             !Universe::heap()->is_in_or_null(*loc))) {
           tty->print_cr("# Found non oop pointer.  Dumping state at failure");
           // try to dump out some helpful debugging information
           trace_codeblob_maps(fr, reg_map);
--- a/src/hotspot/share/gc/shared/barrierSetConfig.hpp	Fri Jun 08 18:24:45 2018 +0200
+++ b/src/hotspot/share/gc/shared/barrierSetConfig.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -30,7 +30,8 @@
 // Do something for each concrete barrier set part of the build.
 #define FOR_EACH_CONCRETE_BARRIER_SET_DO(f)          \
   f(CardTableBarrierSet)                             \
-  G1GC_ONLY(f(G1BarrierSet))
+  G1GC_ONLY(f(G1BarrierSet))                         \
+  ZGC_ONLY(f(ZBarrierSet))
 
 #define FOR_EACH_ABSTRACT_BARRIER_SET_DO(f)          \
   f(ModRef)
--- a/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp	Fri Jun 08 18:24:45 2018 +0200
+++ b/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -26,12 +26,13 @@
 #define SHARE_VM_GC_SHARED_BARRIERSETCONFIG_INLINE_HPP
 
 #include "gc/shared/barrierSetConfig.hpp"
-
 #include "gc/shared/modRefBarrierSet.inline.hpp"
 #include "gc/shared/cardTableBarrierSet.inline.hpp"
-
 #if INCLUDE_G1GC
-#include "gc/g1/g1BarrierSet.inline.hpp" // G1 support
+#include "gc/g1/g1BarrierSet.inline.hpp"
+#endif
+#if INCLUDE_ZGC
+#include "gc/z/zBarrierSet.inline.hpp"
 #endif
 
 #endif // SHARE_VM_GC_SHARED_BARRIERSETCONFIG_INLINE_HPP
--- a/src/hotspot/share/gc/shared/collectedHeap.hpp	Fri Jun 08 18:24:45 2018 +0200
+++ b/src/hotspot/share/gc/shared/collectedHeap.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -89,6 +89,7 @@
 //     CMSHeap
 //   G1CollectedHeap
 //   ParallelScavengeHeap
+//   ZCollectedHeap
 //
 class CollectedHeap : public CHeapObj<mtInternal> {
   friend class VMStructs;
@@ -206,7 +207,8 @@
     Serial,
     Parallel,
     CMS,
-    G1
+    G1,
+    Z
   };
 
   static inline size_t filler_array_max_size() {
--- a/src/hotspot/share/gc/shared/gcCause.cpp	Fri Jun 08 18:24:45 2018 +0200
+++ b/src/hotspot/share/gc/shared/gcCause.cpp	Fri Jun 08 18:24:45 2018 +0200
@@ -105,6 +105,21 @@
     case _dcmd_gc_run:
       return "Diagnostic Command";
 
+    case _z_timer:
+      return "Timer";
+
+    case _z_warmup:
+      return "Warmup";
+
+    case _z_allocation_rate:
+      return "Allocation Rate";
+
+    case _z_allocation_stall:
+      return "Allocation Stall";
+
+    case _z_proactive:
+      return "Proactive";
+
     case _last_gc_cause:
       return "ILLEGAL VALUE - last gc cause - ILLEGAL VALUE";
 
--- a/src/hotspot/share/gc/shared/gcCause.hpp	Fri Jun 08 18:24:45 2018 +0200
+++ b/src/hotspot/share/gc/shared/gcCause.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -78,6 +78,12 @@
 
     _dcmd_gc_run,
 
+    _z_timer,
+    _z_warmup,
+    _z_allocation_rate,
+    _z_allocation_stall,
+    _z_proactive,
+
     _last_gc_cause
   };
 
--- a/src/hotspot/share/gc/shared/gcConfig.cpp	Fri Jun 08 18:24:45 2018 +0200
+++ b/src/hotspot/share/gc/shared/gcConfig.cpp	Fri Jun 08 18:24:45 2018 +0200
@@ -40,6 +40,9 @@
 #if INCLUDE_SERIALGC
 #include "gc/serial/serialArguments.hpp"
 #endif
+#if INCLUDE_ZGC
+#include "gc/z/zArguments.hpp"
+#endif
 
 struct SupportedGC {
   bool&               _flag;
@@ -55,6 +58,7 @@
       G1GC_ONLY(static G1Arguments       g1Arguments;)
 PARALLELGC_ONLY(static ParallelArguments parallelArguments;)
   SERIALGC_ONLY(static SerialArguments   serialArguments;)
+       ZGC_ONLY(static ZArguments        zArguments;)
 
 // Table of supported GCs, for translating between command
 // line flag, CollectedHeap::Name and GCArguments instance.
@@ -64,6 +68,7 @@
   PARALLELGC_ONLY_ARG(SupportedGC(UseParallelGC,      CollectedHeap::Parallel, parallelArguments, "parallel gc"))
   PARALLELGC_ONLY_ARG(SupportedGC(UseParallelOldGC,   CollectedHeap::Parallel, parallelArguments, "parallel gc"))
     SERIALGC_ONLY_ARG(SupportedGC(UseSerialGC,        CollectedHeap::Serial,   serialArguments,   "serial gc"))
+         ZGC_ONLY_ARG(SupportedGC(UseZGC,             CollectedHeap::Z,        zArguments,        "z gc"))
 };
 
 #define FOR_EACH_SUPPORTED_GC(var) \
@@ -92,6 +97,7 @@
   NOT_PARALLELGC(UNSUPPORTED_OPTION(UseParallelGC);)
   NOT_PARALLELGC(UNSUPPORTED_OPTION(UseParallelOldGC));
   NOT_SERIALGC(  UNSUPPORTED_OPTION(UseSerialGC);)
+  NOT_ZGC(       UNSUPPORTED_OPTION(UseZGC);)
 }
 
 bool GCConfig::is_no_gc_selected() {
--- a/src/hotspot/share/gc/shared/gcConfiguration.cpp	Fri Jun 08 18:24:45 2018 +0200
+++ b/src/hotspot/share/gc/shared/gcConfiguration.cpp	Fri Jun 08 18:24:45 2018 +0200
@@ -43,6 +43,10 @@
     return ParNew;
   }
 
+  if (UseZGC) {
+    return NA;
+  }
+
   return DefNew;
 }
 
@@ -59,6 +63,10 @@
     return ParallelOld;
   }
 
+  if (UseZGC) {
+    return Z;
+  }
+
   return SerialOld;
 }
 
--- a/src/hotspot/share/gc/shared/gcName.hpp	Fri Jun 08 18:24:45 2018 +0200
+++ b/src/hotspot/share/gc/shared/gcName.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -38,6 +38,8 @@
   ConcurrentMarkSweep,
   G1Old,
   G1Full,
+  Z,
+  NA,
   GCNameEndSentinel
 };
 
@@ -55,6 +57,8 @@
       case ConcurrentMarkSweep: return "ConcurrentMarkSweep";
       case G1Old: return "G1Old";
       case G1Full: return "G1Full";
+      case Z: return "Z";
+      case NA: return "N/A";
       default: ShouldNotReachHere(); return NULL;
     }
   }
--- a/src/hotspot/share/gc/shared/gcThreadLocalData.hpp	Fri Jun 08 18:24:45 2018 +0200
+++ b/src/hotspot/share/gc/shared/gcThreadLocalData.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -40,6 +40,6 @@
 // should consider placing frequently accessed fields first in
 // T, so that field offsets relative to Thread are small, which
 // often allows for a more compact instruction encoding.
-typedef uint64_t GCThreadLocalData[14]; // 112 bytes
+typedef uint64_t GCThreadLocalData[18]; // 144 bytes
 
 #endif // SHARE_GC_SHARED_GCTHREADLOCALDATA_HPP
--- a/src/hotspot/share/gc/shared/gc_globals.hpp	Fri Jun 08 18:24:45 2018 +0200
+++ b/src/hotspot/share/gc/shared/gc_globals.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -38,6 +38,9 @@
 #if INCLUDE_SERIALGC
 #include "gc/serial/serial_globals.hpp"
 #endif
+#if INCLUDE_ZGC
+#include "gc/z/z_globals.hpp"
+#endif
 
 #define GC_FLAGS(develop,                                                   \
                  develop_pd,                                                \
@@ -118,6 +121,22 @@
     constraint,                                                             \
     writeable))                                                             \
                                                                             \
+  ZGC_ONLY(GC_Z_FLAGS(                                                      \
+    develop,                                                                \
+    develop_pd,                                                             \
+    product,                                                                \
+    product_pd,                                                             \
+    diagnostic,                                                             \
+    diagnostic_pd,                                                          \
+    experimental,                                                           \
+    notproduct,                                                             \
+    manageable,                                                             \
+    product_rw,                                                             \
+    lp64_product,                                                           \
+    range,                                                                  \
+    constraint,                                                             \
+    writeable))                                                             \
+                                                                            \
   /* gc */                                                                  \
                                                                             \
   product(bool, UseConcMarkSweepGC, false,                                  \
@@ -135,6 +154,9 @@
   product(bool, UseParallelOldGC, false,                                    \
           "Use the Parallel Old garbage collector")                         \
                                                                             \
+  experimental(bool, UseZGC, false,                                         \
+          "Use the Z garbage collector")                                    \
+                                                                            \
   product(uint, ParallelGCThreads, 0,                                       \
           "Number of parallel threads parallel gc will use")                \
           constraint(ParallelGCThreadsConstraintFunc,AfterErgo)             \
--- a/src/hotspot/share/gc/shared/specialized_oop_closures.hpp	Fri Jun 08 18:24:45 2018 +0200
+++ b/src/hotspot/share/gc/shared/specialized_oop_closures.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -35,6 +35,9 @@
 #if INCLUDE_SERIALGC
 #include "gc/serial/serial_specialized_oop_closures.hpp"
 #endif
+#if INCLUDE_ZGC
+#include "gc/z/z_specialized_oop_closures.hpp"
+#endif
 
 // The following OopClosure types get specialized versions of
 // "oop_oop_iterate" that invoke the closures' do_oop methods
@@ -67,7 +70,8 @@
   SERIALGC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_MS(f))       \
      CMSGC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_CMS(f))      \
       G1GC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1(f))       \
-      G1GC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1FULL(f))
+      G1GC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1FULL(f))   \
+       ZGC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_Z(f))
 
 // We separate these out, because sometime the general one has
 // a different definition from the specialized ones, and sometimes it
--- a/src/hotspot/share/gc/shared/vmStructs_gc.hpp	Fri Jun 08 18:24:45 2018 +0200
+++ b/src/hotspot/share/gc/shared/vmStructs_gc.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -47,6 +47,9 @@
 #include "gc/serial/defNewGeneration.hpp"
 #include "gc/serial/vmStructs_serial.hpp"
 #endif
+#if INCLUDE_ZGC
+#include "gc/z/vmStructs_z.hpp"
+#endif
 
 #define VM_STRUCTS_GC(nonstatic_field,                                                                                               \
                       volatile_nonstatic_field,                                                                                      \
@@ -64,6 +67,10 @@
   SERIALGC_ONLY(VM_STRUCTS_SERIALGC(nonstatic_field,                                                                                 \
                                     volatile_nonstatic_field,                                                                        \
                                     static_field))                                                                                   \
+  ZGC_ONLY(VM_STRUCTS_ZGC(nonstatic_field,                                                                                           \
+                          volatile_nonstatic_field,                                                                                  \
+                          static_field))                                                                                             \
+                                                                                                                                     \
   /**********************************************************************************/                                               \
   /* Generation and Space hierarchies                                               */                                               \
   /**********************************************************************************/                                               \
@@ -162,6 +169,10 @@
   SERIALGC_ONLY(VM_TYPES_SERIALGC(declare_type,                           \
                                   declare_toplevel_type,                  \
                                   declare_integer_type))                  \
+  ZGC_ONLY(VM_TYPES_ZGC(declare_type,                                     \
+                        declare_toplevel_type,                            \
+                        declare_integer_type))                            \
+                                                                          \
   /******************************************/                            \
   /* Generation and space hierarchies       */                            \
   /* (needed for run-time type information) */                            \
@@ -231,6 +242,8 @@
                                               declare_constant_with_value)) \
   SERIALGC_ONLY(VM_INT_CONSTANTS_SERIALGC(declare_constant,                 \
                                           declare_constant_with_value))     \
+  ZGC_ONLY(VM_INT_CONSTANTS_ZGC(declare_constant,                           \
+                                declare_constant_with_value))               \
                                                                             \
   /********************************************/                            \
   /* Generation and Space Hierarchy Constants */                            \
@@ -274,5 +287,7 @@
   declare_constant(Generation::LogOfGenGrain)                               \
   declare_constant(Generation::GenGrain)                                    \
 
+#define VM_LONG_CONSTANTS_GC(declare_constant)                              \
+  ZGC_ONLY(VM_LONG_CONSTANTS_ZGC(declare_constant))
 
 #endif // SHARE_GC_SHARED_VMSTRUCTS_GC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,247 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "c1/c1_LIR.hpp"
+#include "c1/c1_LIRGenerator.hpp"
+#include "c1/c1_CodeStubs.hpp"
+#include "gc/z/c1/zBarrierSetC1.hpp"
+#include "gc/z/zBarrierSet.hpp"
+#include "gc/z/zBarrierSetAssembler.hpp"
+#include "gc/z/zThreadLocalData.hpp"
+#include "utilities/macros.hpp"
+
+ZLoadBarrierStubC1::ZLoadBarrierStubC1(LIRAccess& access, LIR_Opr ref, address runtime_stub) :
+    _decorators(access.decorators()),
+    _ref_addr(access.resolved_addr()),
+    _ref(ref),
+    _tmp(LIR_OprFact::illegalOpr),
+    _patch_info(access.patch_emit_info()),
+    _runtime_stub(runtime_stub) {
+
+  // Allocate tmp register if needed
+  if (!_ref_addr->is_register()) {
+    assert(_ref_addr->is_address(), "Must be an address");
+    if (_ref_addr->as_address_ptr()->index()->is_valid() ||
+        _ref_addr->as_address_ptr()->disp() != 0) {
+      // Has index or displacement, need tmp register to load address into
+      _tmp = access.gen()->new_pointer_register();
+    } else {
+      // No index or displacement, address available in base register
+      _ref_addr = _ref_addr->as_address_ptr()->base();
+    }
+  }
+
+  assert(_ref->is_register(), "Must be a register");
+  assert(_ref_addr->is_register() != _tmp->is_register(), "Only one should be a register");
+}
+
+DecoratorSet ZLoadBarrierStubC1::decorators() const {
+  return _decorators;
+}
+
+LIR_Opr ZLoadBarrierStubC1::ref() const {
+  return _ref;
+}
+
+LIR_Opr ZLoadBarrierStubC1::ref_addr() const {
+  return _ref_addr;
+}
+
+LIR_Opr ZLoadBarrierStubC1::tmp() const {
+  return _tmp;
+}
+
+LIR_PatchCode ZLoadBarrierStubC1::patch_code() const {
+  return (_decorators & C1_NEEDS_PATCHING) != 0 ? lir_patch_normal : lir_patch_none;
+}
+
+CodeEmitInfo*& ZLoadBarrierStubC1::patch_info() {
+  return _patch_info;
+}
+
+address ZLoadBarrierStubC1::runtime_stub() const {
+  return _runtime_stub;
+}
+
+void ZLoadBarrierStubC1::visit(LIR_OpVisitState* visitor) {
+  if (_patch_info != NULL) {
+    visitor->do_slow_case(_patch_info);
+  } else {
+    visitor->do_slow_case();
+  }
+
+  visitor->do_input(_ref_addr);
+  visitor->do_output(_ref);
+
+  if (_tmp->is_valid()) {
+    visitor->do_temp(_tmp);
+  }
+}
+
+void ZLoadBarrierStubC1::emit_code(LIR_Assembler* ce) {
+  ZBarrierSet::assembler()->generate_c1_load_barrier_stub(ce, this);
+}
+
+#ifndef PRODUCT
+void ZLoadBarrierStubC1::print_name(outputStream* out) const {
+  out->print("ZLoadBarrierStubC1");
+}
+#endif // PRODUCT
+
+class LIR_OpZLoadBarrierTest : public LIR_Op {
+private:
+  LIR_Opr _opr;
+
+public:
+  LIR_OpZLoadBarrierTest(LIR_Opr opr) :
+      LIR_Op(),
+      _opr(opr) {}
+
+  virtual void visit(LIR_OpVisitState* state) {
+    state->do_input(_opr);
+  }
+
+  virtual void emit_code(LIR_Assembler* ce) {
+    ZBarrierSet::assembler()->generate_c1_load_barrier_test(ce, _opr);
+  }
+
+  virtual void print_instr(outputStream* out) const {
+    _opr->print(out);
+    out->print(" ");
+  }
+
+#ifndef PRODUCT
+  virtual const char* name() const {
+    return "lir_z_load_barrier_test";
+  }
+#endif // PRODUCT
+};
+
+static bool barrier_needed(LIRAccess& access) {
+  return ZBarrierSet::barrier_needed(access.decorators(), access.type());
+}
+
+ZBarrierSetC1::ZBarrierSetC1() :
+    _load_barrier_on_oop_field_preloaded_runtime_stub(NULL),
+    _load_barrier_on_weak_oop_field_preloaded_runtime_stub(NULL) {}
+
+address ZBarrierSetC1::load_barrier_on_oop_field_preloaded_runtime_stub(DecoratorSet decorators) const {
+  assert((decorators & ON_PHANTOM_OOP_REF) == 0, "Unsupported decorator");
+  //assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Unsupported decorator");
+
+  if ((decorators & ON_WEAK_OOP_REF) != 0) {
+    return _load_barrier_on_weak_oop_field_preloaded_runtime_stub;
+  } else {
+    return _load_barrier_on_oop_field_preloaded_runtime_stub;
+  }
+}
+
+#ifdef ASSERT
+#define __ access.gen()->lir(__FILE__, __LINE__)->
+#else
+#define __ access.gen()->lir()->
+#endif
+
+void ZBarrierSetC1::load_barrier(LIRAccess& access, LIR_Opr result) const {
+  // Fast path
+  __ append(new LIR_OpZLoadBarrierTest(result));
+
+  // Slow path
+  const address runtime_stub = load_barrier_on_oop_field_preloaded_runtime_stub(access.decorators());
+  CodeStub* const stub = new ZLoadBarrierStubC1(access, result, runtime_stub);
+  __ branch(lir_cond_notEqual, T_ADDRESS, stub);
+  __ branch_destination(stub->continuation());
+}
+
+#undef __
+
+void ZBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
+  BarrierSetC1::load_at_resolved(access, result);
+
+  if (barrier_needed(access)) {
+    load_barrier(access, result);
+  }
+}
+
+static void pre_load_barrier(LIRAccess& access) {
+  DecoratorSet decorators = access.decorators();
+
+  // Downgrade access to MO_UNORDERED
+  decorators = (decorators & ~MO_DECORATOR_MASK) | MO_UNORDERED;
+
+  // Remove C1_WRITE_ACCESS
+  decorators = (decorators & ~C1_WRITE_ACCESS);
+
+  // Generate synthetic load at
+  access.gen()->access_load_at(decorators,
+                               access.type(),
+                               access.base().item(),
+                               access.offset().opr(),
+                               access.gen()->new_register(access.type()),
+                               NULL /* patch_emit_info */,
+                               NULL /* load_emit_info */);
+}
+
+LIR_Opr ZBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) {
+  if (barrier_needed(access)) {
+    pre_load_barrier(access);
+  }
+
+  return BarrierSetC1::atomic_xchg_at_resolved(access, value);
+}
+
+LIR_Opr ZBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
+  if (barrier_needed(access)) {
+    pre_load_barrier(access);
+  }
+
+  return BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value);
+}
+
+class ZLoadBarrierRuntimeStubCodeGenClosure : public StubAssemblerCodeGenClosure {
+private:
+  const DecoratorSet _decorators;
+
+public:
+  ZLoadBarrierRuntimeStubCodeGenClosure(DecoratorSet decorators) :
+      _decorators(decorators) {}
+
+  virtual OopMapSet* generate_code(StubAssembler* sasm) {
+    ZBarrierSet::assembler()->generate_c1_load_barrier_runtime_stub(sasm, _decorators);
+    return NULL;
+  }
+};
+
+static address generate_c1_runtime_stub(BufferBlob* blob, DecoratorSet decorators, const char* name) {
+  ZLoadBarrierRuntimeStubCodeGenClosure cl(decorators);
+  CodeBlob* const code_blob = Runtime1::generate_blob(blob, -1 /* stub_id */, name, false /* expect_oop_map*/, &cl);
+  return code_blob->code_begin();
+}
+
+void ZBarrierSetC1::generate_c1_runtime_stubs(BufferBlob* blob) {
+  _load_barrier_on_oop_field_preloaded_runtime_stub =
+    generate_c1_runtime_stub(blob, ON_STRONG_OOP_REF, "load_barrier_on_oop_field_preloaded_runtime_stub");
+  _load_barrier_on_weak_oop_field_preloaded_runtime_stub =
+    generate_c1_runtime_stub(blob, ON_WEAK_OOP_REF, "load_barrier_on_weak_oop_field_preloaded_runtime_stub");
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/c1/zBarrierSetC1.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_C1_ZBARRIERSETC1_HPP
+#define SHARE_GC_Z_C1_ZBARRIERSETC1_HPP
+
+#include "c1/c1_CodeStubs.hpp"
+#include "c1/c1_IR.hpp"
+#include "c1/c1_LIR.hpp"
+#include "gc/shared/c1/barrierSetC1.hpp"
+#include "oops/accessDecorators.hpp"
+
+class ZLoadBarrierStubC1 : public CodeStub {
+private:
+  DecoratorSet  _decorators;
+  LIR_Opr       _ref_addr;
+  LIR_Opr       _ref;
+  LIR_Opr       _tmp;
+  CodeEmitInfo* _patch_info;
+  address       _runtime_stub;
+
+public:
+  ZLoadBarrierStubC1(LIRAccess& access, LIR_Opr ref, address runtime_stub);
+
+  DecoratorSet decorators() const;
+  LIR_Opr ref() const;
+  LIR_Opr ref_addr() const;
+  LIR_Opr tmp() const;
+  LIR_PatchCode patch_code() const;
+  CodeEmitInfo*& patch_info();
+  address runtime_stub() const;
+
+  virtual void emit_code(LIR_Assembler* ce);
+  virtual void visit(LIR_OpVisitState* visitor);
+
+#ifndef PRODUCT
+  virtual void print_name(outputStream* out) const;
+#endif // PRODUCT
+};
+
+class ZBarrierSetC1 : public BarrierSetC1 {
+private:
+  address _load_barrier_on_oop_field_preloaded_runtime_stub;
+  address _load_barrier_on_weak_oop_field_preloaded_runtime_stub;
+
+  address load_barrier_on_oop_field_preloaded_runtime_stub(DecoratorSet decorators) const;
+  void load_barrier(LIRAccess& access, LIR_Opr result) const;
+
+protected:
+  virtual void load_at_resolved(LIRAccess& access, LIR_Opr result);
+  virtual LIR_Opr atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value);
+  virtual LIR_Opr atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value);
+
+public:
+  ZBarrierSetC1();
+
+  virtual void generate_c1_runtime_stubs(BufferBlob* blob);
+};
+
+#endif // SHARE_GC_Z_C1_ZBARRIERSETC1_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,1480 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "opto/compile.hpp"
+#include "opto/castnode.hpp"
+#include "opto/graphKit.hpp"
+#include "opto/idealKit.hpp"
+#include "opto/loopnode.hpp"
+#include "opto/macro.hpp"
+#include "opto/node.hpp"
+#include "opto/type.hpp"
+#include "utilities/macros.hpp"
+#include "gc/z/c2/zBarrierSetC2.hpp"
+#include "gc/z/zThreadLocalData.hpp"
+#include "gc/z/zBarrierSetRuntime.hpp"
+
+ZBarrierSetC2State::ZBarrierSetC2State(Arena* comp_arena)
+  : _load_barrier_nodes(new (comp_arena) GrowableArray<LoadBarrierNode*>(comp_arena, 8,  0, NULL)) {}
+
+int ZBarrierSetC2State::load_barrier_count() const {
+  return _load_barrier_nodes->length();
+}
+
+void ZBarrierSetC2State::add_load_barrier_node(LoadBarrierNode * n) {
+  assert(!_load_barrier_nodes->contains(n), " duplicate entry in expand list");
+  _load_barrier_nodes->append(n);
+}
+
+void ZBarrierSetC2State::remove_load_barrier_node(LoadBarrierNode * n) {
+  // this function may be called twice for a node so check
+  // that the node is in the array before attempting to remove it
+  if (_load_barrier_nodes->contains(n)) {
+    _load_barrier_nodes->remove(n);
+  }
+}
+
+LoadBarrierNode* ZBarrierSetC2State::load_barrier_node(int idx) const {
+  return _load_barrier_nodes->at(idx);
+}
+
+void* ZBarrierSetC2::create_barrier_state(Arena* comp_arena) const {
+  return new(comp_arena) ZBarrierSetC2State(comp_arena);
+}
+
+ZBarrierSetC2State* ZBarrierSetC2::state() const {
+  return reinterpret_cast<ZBarrierSetC2State*>(Compile::current()->barrier_set_state());
+}
+
+bool ZBarrierSetC2::is_gc_barrier_node(Node* node) const {
+  return node->is_LoadBarrier();
+}
+
+void ZBarrierSetC2::register_potential_barrier_node(Node* node) const {
+  if (node->is_LoadBarrier()) {
+    state()->add_load_barrier_node(node->as_LoadBarrier());
+  }
+}
+
+void ZBarrierSetC2::unregister_potential_barrier_node(Node* node) const {
+  if (node->is_LoadBarrier()) {
+    state()->remove_load_barrier_node(node->as_LoadBarrier());
+  }
+}
+
+void ZBarrierSetC2::eliminate_useless_gc_barriers(Unique_Node_List &useful) const {
+  // Remove useless LoadBarrier nodes
+  ZBarrierSetC2State* s = state();
+  for (int i = s->load_barrier_count()-1; i >= 0; i--) {
+    LoadBarrierNode* n = s->load_barrier_node(i);
+    if (!useful.member(n)) {
+      unregister_potential_barrier_node(n);
+    }
+  }
+}
+
+void ZBarrierSetC2::enqueue_useful_gc_barrier(Unique_Node_List &worklist, Node* node) const {
+  if (node->is_LoadBarrier() && !node->as_LoadBarrier()->has_true_uses()) {
+    worklist.push(node);
+  }
+}
+
+void ZBarrierSetC2::find_dominating_barriers(PhaseIterGVN& igvn) {
+  // Look for dominating barriers on the same address only once all
+  // other loop opts are over: loop opts may cause a safepoint to be
+  // inserted between a barrier and its dominating barrier.
+  Compile* C = Compile::current();
+  ZBarrierSetC2* bs = (ZBarrierSetC2*)BarrierSet::barrier_set()->barrier_set_c2();
+  ZBarrierSetC2State* s = bs->state();
+  if (s->load_barrier_count() >= 2) {
+    Compile::TracePhase tp("idealLoop", &C->timers[Phase::_t_idealLoop]);
+    PhaseIdealLoop ideal_loop(igvn, true, false, true);
+    if (C->major_progress()) C->print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
+  }
+}
+
+void ZBarrierSetC2::add_users_to_worklist(Unique_Node_List* worklist) const {
+  // Permanent temporary workaround
+  // Loadbarriers may have non-obvious dead uses keeping them alive during parsing. The use is
+  // removed by RemoveUseless (after parsing, before optimize) but the barriers won't be added to
+  // the worklist. Unless we add them explicitly they are not guaranteed to end up there.
+  ZBarrierSetC2State* s = state();
+
+  for (int i = 0; i < s->load_barrier_count(); i++) {
+    LoadBarrierNode* n = s->load_barrier_node(i);
+    worklist->push(n);
+  }
+}
+
+const TypeFunc* ZBarrierSetC2::load_barrier_Type() const {
+  const Type** fields;
+
+  // Create input types (domain)
+  fields = TypeTuple::fields(2);
+  fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;
+  fields[TypeFunc::Parms+1] = TypeOopPtr::BOTTOM;
+  const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
+
+  // Create result type (range)
+  fields = TypeTuple::fields(1);
+  fields[TypeFunc::Parms+0] = TypeInstPtr::BOTTOM;
+  const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
+
+  return TypeFunc::make(domain, range);
+}
+
+// == LoadBarrierNode ==
+
+LoadBarrierNode::LoadBarrierNode(Compile* C,
+                                 Node* c,
+                                 Node* mem,
+                                 Node* val,
+                                 Node* adr,
+                                 bool weak,
+                                 bool writeback,
+                                 bool oop_reload_allowed) :
+    MultiNode(Number_of_Inputs),
+    _weak(weak),
+    _writeback(writeback),
+    _oop_reload_allowed(oop_reload_allowed) {
+  init_req(Control, c);
+  init_req(Memory, mem);
+  init_req(Oop, val);
+  init_req(Address, adr);
+  init_req(Similar, C->top());
+
+  init_class_id(Class_LoadBarrier);
+  BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
+  bs->register_potential_barrier_node(this);
+}
+
+const Type *LoadBarrierNode::bottom_type() const {
+  const Type** floadbarrier = (const Type **)(Compile::current()->type_arena()->Amalloc_4((Number_of_Outputs)*sizeof(Type*)));
+  Node* in_oop = in(Oop);
+  floadbarrier[Control] = Type::CONTROL;
+  floadbarrier[Memory] = Type::MEMORY;
+  floadbarrier[Oop] = in_oop == NULL ? Type::TOP : in_oop->bottom_type();
+  return TypeTuple::make(Number_of_Outputs, floadbarrier);
+}
+
+const Type *LoadBarrierNode::Value(PhaseGVN *phase) const {
+  const Type** floadbarrier = (const Type **)(phase->C->type_arena()->Amalloc_4((Number_of_Outputs)*sizeof(Type*)));
+  const Type* val_t = phase->type(in(Oop));
+  floadbarrier[Control] = Type::CONTROL;
+  floadbarrier[Memory] = Type::MEMORY;
+  floadbarrier[Oop] = val_t;
+  return TypeTuple::make(Number_of_Outputs, floadbarrier);
+}
+
+bool LoadBarrierNode::is_dominator(PhaseIdealLoop* phase, bool linear_only, Node *d, Node *n) {
+  if (phase != NULL) {
+    return phase->is_dominator(d, n);
+  }
+
+  for (int i = 0; i < 10 && n != NULL; i++) {
+    n = IfNode::up_one_dom(n, linear_only);
+    if (n == d) {
+      return true;
+    }
+  }
+
+  return false;
+}
+
+LoadBarrierNode* LoadBarrierNode::has_dominating_barrier(PhaseIdealLoop* phase, bool linear_only, bool look_for_similar) {
+  Node* val = in(LoadBarrierNode::Oop);
+  if (in(Similar)->is_Proj() && in(Similar)->in(0)->is_LoadBarrier()) {
+    LoadBarrierNode* lb = in(Similar)->in(0)->as_LoadBarrier();
+    assert(lb->in(Address) == in(Address), "");
+    // Load barrier on Similar edge dominates so if it now has the Oop field it can replace this barrier.
+    if (lb->in(Oop) == in(Oop)) {
+      return lb;
+    }
+    // Follow chain of load barrier through Similar edges
+    while (!lb->in(Similar)->is_top()) {
+      lb = lb->in(Similar)->in(0)->as_LoadBarrier();
+      assert(lb->in(Address) == in(Address), "");
+    }
+    if (lb != in(Similar)->in(0)) {
+      return lb;
+    }
+  }
+  for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
+    Node* u = val->fast_out(i);
+    if (u != this && u->is_LoadBarrier() && u->in(Oop) == val && u->as_LoadBarrier()->has_true_uses()) {
+      Node* this_ctrl = in(LoadBarrierNode::Control);
+      Node* other_ctrl = u->in(LoadBarrierNode::Control);
+      if (is_dominator(phase, linear_only, other_ctrl, this_ctrl)) {
+        return u->as_LoadBarrier();
+      }
+    }
+  }
+
+  if (ZVerifyLoadBarriers || can_be_eliminated()) {
+    return NULL;
+  }
+
+  if (!look_for_similar) {
+    return NULL;
+  }
+
+  Node* addr = in(LoadBarrierNode::Address);
+  for (DUIterator_Fast imax, i = addr->fast_outs(imax); i < imax; i++) {
+    Node* u = addr->fast_out(i);
+    if (u != this && u->is_LoadBarrier() && u->as_LoadBarrier()->has_true_uses()) {
+      Node* this_ctrl = in(LoadBarrierNode::Control);
+      Node* other_ctrl = u->in(LoadBarrierNode::Control);
+      if (is_dominator(phase, linear_only, other_ctrl, this_ctrl)) {
+        ResourceMark rm;
+        Unique_Node_List wq;
+        wq.push(in(LoadBarrierNode::Control));
+        bool ok = true;
+        bool dom_found = false;
+        for (uint next = 0; next < wq.size(); ++next) {
+          Node *n = wq.at(next);
+          if (n->is_top()) {
+            return NULL;
+          }
+          assert(n->is_CFG(), "");
+          if (n->is_SafePoint()) {
+            ok = false;
+            break;
+          }
+          if (n == u) {
+            dom_found = true;
+            continue;
+          }
+          if (n->is_Region()) {
+            for (uint i = 1; i < n->req(); i++) {
+              Node* m = n->in(i);
+              if (m != NULL) {
+                wq.push(m);
+              }
+            }
+          } else {
+            Node* m = n->in(0);
+            if (m != NULL) {
+              wq.push(m);
+            }
+          }
+        }
+        if (ok) {
+          assert(dom_found, "");
+          return u->as_LoadBarrier();;
+        }
+        break;
+      }
+    }
+  }
+
+  return NULL;
+}
+
+void LoadBarrierNode::push_dominated_barriers(PhaseIterGVN* igvn) const {
+  // change to that barrier may affect a dominated barrier so re-push those
+  Node* val = in(LoadBarrierNode::Oop);
+
+  for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
+    Node* u = val->fast_out(i);
+    if (u != this && u->is_LoadBarrier() && u->in(Oop) == val) {
+      Node* this_ctrl = in(Control);
+      Node* other_ctrl = u->in(Control);
+      if (is_dominator(NULL, false, this_ctrl, other_ctrl)) {
+        igvn->_worklist.push(u);
+      }
+    }
+
+    Node* addr = in(LoadBarrierNode::Address);
+    for (DUIterator_Fast imax, i = addr->fast_outs(imax); i < imax; i++) {
+      Node* u = addr->fast_out(i);
+      if (u != this && u->is_LoadBarrier() && u->in(Similar)->is_top()) {
+        Node* this_ctrl = in(Control);
+        Node* other_ctrl = u->in(Control);
+        if (is_dominator(NULL, false, this_ctrl, other_ctrl)) {
+          igvn->_worklist.push(u);
+        }
+      }
+    }
+  }
+}
+
+Node *LoadBarrierNode::Identity(PhaseGVN *phase) {
+  if (!phase->C->directive()->ZOptimizeLoadBarriersOption) {
+    return this;
+  }
+
+  bool redundant_addr = false;
+  LoadBarrierNode* dominating_barrier = has_dominating_barrier(NULL, true, false);
+  if (dominating_barrier != NULL) {
+    assert(dominating_barrier->in(Oop) == in(Oop), "");
+    return dominating_barrier;
+  }
+
+  return this;
+}
+
+Node *LoadBarrierNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+  if (remove_dead_region(phase, can_reshape)) {
+    return this;
+  }
+
+  Node* val = in(Oop);
+  Node* mem = in(Memory);
+  Node* ctrl = in(Control);
+  Node* adr = in(Address);
+  assert(val->Opcode() != Op_LoadN, "");
+
+  if (mem->is_MergeMem()) {
+    Node* new_mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
+    set_req(Memory, new_mem);
+    if (mem->outcnt() == 0 && can_reshape) {
+      phase->is_IterGVN()->_worklist.push(mem);
+    }
+
+    return this;
+  }
+
+  bool optimizeLoadBarriers = phase->C->directive()->ZOptimizeLoadBarriersOption;
+  LoadBarrierNode* dominating_barrier = optimizeLoadBarriers ? has_dominating_barrier(NULL, !can_reshape, !phase->C->major_progress()) : NULL;
+  if (dominating_barrier != NULL && dominating_barrier->in(Oop) != in(Oop)) {
+    assert(in(Address) == dominating_barrier->in(Address), "");
+    set_req(Similar, dominating_barrier->proj_out(Oop));
+    return this;
+  }
+
+  bool eliminate = (optimizeLoadBarriers && !(val->is_Phi() || val->Opcode() == Op_LoadP || val->Opcode() == Op_GetAndSetP || val->is_DecodeN())) ||
+                   (can_reshape && (dominating_barrier != NULL || !has_true_uses()));
+
+  if (eliminate) {
+    if (can_reshape) {
+      PhaseIterGVN* igvn = phase->is_IterGVN();
+      Node* out_ctrl = proj_out_or_null(Control);
+      Node* out_res = proj_out_or_null(Oop);
+
+      if (out_ctrl != NULL) {
+        igvn->replace_node(out_ctrl, ctrl);
+      }
+
+      // That transformation may cause the Similar edge on the load barrier to be invalid
+      fix_similar_in_uses(igvn);
+      if (out_res != NULL) {
+        if (dominating_barrier != NULL) {
+          igvn->replace_node(out_res, dominating_barrier->proj_out(Oop));
+        } else {
+          igvn->replace_node(out_res, val);
+        }
+      }
+    }
+
+    return new ConINode(TypeInt::ZERO);
+  }
+
+  // If the Similar edge is no longer a load barrier, clear it
+  Node* similar = in(Similar);
+  if (!similar->is_top() && !(similar->is_Proj() && similar->in(0)->is_LoadBarrier())) {
+    set_req(Similar, phase->C->top());
+    return this;
+  }
+
+  if (can_reshape) {
+    // If this barrier is linked through the Similar edge by a
+    // dominated barrier and both barriers have the same Oop field,
+    // the dominated barrier can go away, so push it for reprocessing.
+    // We also want to avoid a barrier to depend on another dominating
+    // barrier through its Similar edge that itself depend on another
+    // barrier through its Similar edge and rather have the first
+    // depend on the third.
+    PhaseIterGVN* igvn = phase->is_IterGVN();
+    Node* out_res = proj_out(Oop);
+    for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) {
+      Node* u = out_res->fast_out(i);
+      if (u->is_LoadBarrier() && u->in(Similar) == out_res &&
+          (u->in(Oop) == val || !u->in(Similar)->is_top())) {
+        igvn->_worklist.push(u);
+      }
+    }
+
+    push_dominated_barriers(igvn);
+  }
+
+  return NULL;
+}
+
+void LoadBarrierNode::fix_similar_in_uses(PhaseIterGVN* igvn) {
+  Node* out_res = proj_out_or_null(Oop);
+  if (out_res == NULL) {
+    return;
+  }
+
+  for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) {
+    Node* u = out_res->fast_out(i);
+    if (u->is_LoadBarrier() && u->in(Similar) == out_res) {
+      igvn->replace_input_of(u, Similar, igvn->C->top());
+      --i;
+      --imax;
+    }
+  }
+}
+
+bool LoadBarrierNode::has_true_uses() const {
+  Node* out_res = proj_out_or_null(Oop);
+  if (out_res == NULL) {
+    return false;
+  }
+
+  for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) {
+    Node* u = out_res->fast_out(i);
+    if (!u->is_LoadBarrier() || u->in(Similar) != out_res) {
+      return true;
+    }
+  }
+
+  return false;
+}
+
+// == Accesses ==
+
+Node* ZBarrierSetC2::make_cas_loadbarrier(C2AtomicAccess& access) const {
+  assert(!UseCompressedOops, "Not allowed");
+  CompareAndSwapNode* cas = (CompareAndSwapNode*)access.raw_access();
+  PhaseGVN& gvn = access.kit()->gvn();
+  Compile* C = Compile::current();
+  GraphKit* kit = access.kit();
+
+  Node* in_ctrl     = cas->in(MemNode::Control);
+  Node* in_mem      = cas->in(MemNode::Memory);
+  Node* in_adr      = cas->in(MemNode::Address);
+  Node* in_val      = cas->in(MemNode::ValueIn);
+  Node* in_expected = cas->in(LoadStoreConditionalNode::ExpectedIn);
+
+  float likely                   = PROB_LIKELY(0.999);
+
+  const TypePtr *adr_type        = gvn.type(in_adr)->isa_ptr();
+  Compile::AliasType* alias_type = C->alias_type(adr_type);
+  int alias_idx                  = C->get_alias_index(adr_type);
+
+  // Outer check - true: continue, false: load and check
+  Node* region   = new RegionNode(3);
+  Node* phi      = new PhiNode(region, TypeInt::BOOL);
+  Node* phi_mem  = new PhiNode(region, Type::MEMORY, adr_type);
+
+  // Inner check - is the healed ref equal to the expected
+  Node* region2  = new RegionNode(3);
+  Node* phi2     = new PhiNode(region2, TypeInt::BOOL);
+  Node* phi_mem2 = new PhiNode(region2, Type::MEMORY, adr_type);
+
+  // CAS node returns 0 or 1
+  Node* cmp     = gvn.transform(new CmpINode(cas, kit->intcon(0)));
+  Node* bol     = gvn.transform(new BoolNode(cmp, BoolTest::ne))->as_Bool();
+  IfNode* iff   = gvn.transform(new IfNode(in_ctrl, bol, likely, COUNT_UNKNOWN))->as_If();
+  Node* then    = gvn.transform(new IfTrueNode(iff));
+  Node* elsen   = gvn.transform(new IfFalseNode(iff));
+
+  Node* scmemproj1   = gvn.transform(new SCMemProjNode(cas));
+
+  kit->set_memory(scmemproj1, alias_idx);
+  phi_mem->init_req(1, scmemproj1);
+  phi_mem2->init_req(2, scmemproj1);
+
+  // CAS fail - reload and heal oop
+  Node* reload      = kit->make_load(elsen, in_adr, TypeOopPtr::BOTTOM, T_OBJECT, MemNode::unordered);
+  Node* barrier     = gvn.transform(new LoadBarrierNode(C, elsen, scmemproj1, reload, in_adr, false, true, false));
+  Node* barrierctrl = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Control));
+  Node* barrierdata = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Oop));
+
+  // Check load
+  Node* tmpX    = gvn.transform(new CastP2XNode(NULL, barrierdata));
+  Node* in_expX = gvn.transform(new CastP2XNode(NULL, in_expected));
+  Node* cmp2    = gvn.transform(new CmpXNode(tmpX, in_expX));
+  Node *bol2    = gvn.transform(new BoolNode(cmp2, BoolTest::ne))->as_Bool();
+  IfNode* iff2  = gvn.transform(new IfNode(barrierctrl, bol2, likely, COUNT_UNKNOWN))->as_If();
+  Node* then2   = gvn.transform(new IfTrueNode(iff2));
+  Node* elsen2  = gvn.transform(new IfFalseNode(iff2));
+
+  // redo CAS
+  Node* cas2       = gvn.transform(new CompareAndSwapPNode(elsen2, kit->memory(alias_idx), in_adr, in_val, in_expected, cas->order()));
+  Node* scmemproj2 = gvn.transform(new SCMemProjNode(cas2));
+  kit->set_control(elsen2);
+  kit->set_memory(scmemproj2, alias_idx);
+
+  // Merge inner flow - check if healed oop was equal too expected.
+  region2->set_req(1, kit->control());
+  region2->set_req(2, then2);
+  phi2->set_req(1, cas2);
+  phi2->set_req(2, kit->intcon(0));
+  phi_mem2->init_req(1, scmemproj2);
+  kit->set_memory(phi_mem2, alias_idx);
+
+  // Merge outer flow - then check if first cas succeded
+  region->set_req(1, then);
+  region->set_req(2, region2);
+  phi->set_req(1, kit->intcon(1));
+  phi->set_req(2, phi2);
+  phi_mem->init_req(2, phi_mem2);
+  kit->set_memory(phi_mem, alias_idx);
+
+  gvn.transform(region2);
+  gvn.transform(phi2);
+  gvn.transform(phi_mem2);
+  gvn.transform(region);
+  gvn.transform(phi);
+  gvn.transform(phi_mem);
+
+  kit->set_control(region);
+  kit->insert_mem_bar(Op_MemBarCPUOrder);
+
+  return phi;
+}
+
+Node* ZBarrierSetC2::make_cmpx_loadbarrier(C2AtomicAccess& access) const {
+  CompareAndExchangePNode* cmpx = (CompareAndExchangePNode*)access.raw_access();
+  GraphKit* kit = access.kit();
+  PhaseGVN& gvn = kit->gvn();
+  Compile* C = Compile::current();
+
+  Node* in_ctrl     = cmpx->in(MemNode::Control);
+  Node* in_mem      = cmpx->in(MemNode::Memory);
+  Node* in_adr      = cmpx->in(MemNode::Address);
+  Node* in_val      = cmpx->in(MemNode::ValueIn);
+  Node* in_expected = cmpx->in(LoadStoreConditionalNode::ExpectedIn);
+
+  float likely                   = PROB_LIKELY(0.999);
+
+  const TypePtr *adr_type        = cmpx->get_ptr_type();
+  Compile::AliasType* alias_type = C->alias_type(adr_type);
+  int alias_idx                  = C->get_alias_index(adr_type);
+
+  // Outer check - true: continue, false: load and check
+  Node* region  = new RegionNode(3);
+  Node* phi     = new PhiNode(region, adr_type);
+
+  // Inner check - is the healed ref equal to the expected
+  Node* region2 = new RegionNode(3);
+  Node* phi2    = new PhiNode(region2, adr_type);
+
+  // Check if cmpx succeded
+  Node* cmp     = gvn.transform(new CmpPNode(cmpx, in_expected));
+  Node* bol     = gvn.transform(new BoolNode(cmp, BoolTest::eq))->as_Bool();
+  IfNode* iff   = gvn.transform(new IfNode(in_ctrl, bol, likely, COUNT_UNKNOWN))->as_If();
+  Node* then    = gvn.transform(new IfTrueNode(iff));
+  Node* elsen   = gvn.transform(new IfFalseNode(iff));
+
+  Node* scmemproj1  = gvn.transform(new SCMemProjNode(cmpx));
+  kit->set_memory(scmemproj1, alias_idx);
+
+  // CAS fail - reload and heal oop
+  Node* reload      = kit->make_load(elsen, in_adr, TypeOopPtr::BOTTOM, T_OBJECT, MemNode::unordered);
+  Node* barrier     = gvn.transform(new LoadBarrierNode(C, elsen, scmemproj1, reload, in_adr, false, true, false));
+  Node* barrierctrl = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Control));
+  Node* barrierdata = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Oop));
+
+  // Check load
+  Node* tmpX    = gvn.transform(new CastP2XNode(NULL, barrierdata));
+  Node* in_expX = gvn.transform(new CastP2XNode(NULL, in_expected));
+  Node* cmp2    = gvn.transform(new CmpXNode(tmpX, in_expX));
+  Node *bol2    = gvn.transform(new BoolNode(cmp2, BoolTest::ne))->as_Bool();
+  IfNode* iff2  = gvn.transform(new IfNode(barrierctrl, bol2, likely, COUNT_UNKNOWN))->as_If();
+  Node* then2   = gvn.transform(new IfTrueNode(iff2));
+  Node* elsen2  = gvn.transform(new IfFalseNode(iff2));
+
+  // Redo CAS
+  Node* cmpx2      = gvn.transform(new CompareAndExchangePNode(elsen2, kit->memory(alias_idx), in_adr, in_val, in_expected, adr_type, cmpx->get_ptr_type(), cmpx->order()));
+  Node* scmemproj2 = gvn.transform(new SCMemProjNode(cmpx2));
+  kit->set_control(elsen2);
+  kit->set_memory(scmemproj2, alias_idx);
+
+  // Merge inner flow - check if healed oop was equal too expected.
+  region2->set_req(1, kit->control());
+  region2->set_req(2, then2);
+  phi2->set_req(1, cmpx2);
+  phi2->set_req(2, barrierdata);
+
+  // Merge outer flow - then check if first cas succeded
+  region->set_req(1, then);
+  region->set_req(2, region2);
+  phi->set_req(1, cmpx);
+  phi->set_req(2, phi2);
+
+  gvn.transform(region2);
+  gvn.transform(phi2);
+  gvn.transform(region);
+  gvn.transform(phi);
+
+  kit->set_control(region);
+  kit->set_memory(in_mem, alias_idx);
+  kit->insert_mem_bar(Op_MemBarCPUOrder);
+
+  return phi;
+}
+
+Node* ZBarrierSetC2::load_barrier(GraphKit* kit, Node* val, Node* adr, bool weak, bool writeback, bool oop_reload_allowed) const {
+  PhaseGVN& gvn = kit->gvn();
+  Node* barrier = new LoadBarrierNode(Compile::current(), kit->control(), kit->memory(TypeRawPtr::BOTTOM), val, adr, weak, writeback, oop_reload_allowed);
+  Node* transformed_barrier = gvn.transform(barrier);
+
+  if (transformed_barrier->is_LoadBarrier()) {
+    if (barrier == transformed_barrier) {
+      kit->set_control(gvn.transform(new ProjNode(barrier, LoadBarrierNode::Control)));
+    }
+    return gvn.transform(new ProjNode(transformed_barrier, LoadBarrierNode::Oop));
+  } else {
+    return val;
+  }
+}
+
+static bool barrier_needed(C2Access access) {
+  return ZBarrierSet::barrier_needed(access.decorators(), access.type());
+}
+
+Node* ZBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
+  Node* p = BarrierSetC2::load_at_resolved(access, val_type);
+  if (!barrier_needed(access)) {
+    return p;
+  }
+
+  bool conc_root = (access.decorators() & IN_CONCURRENT_ROOT) != 0;
+  bool weak = (access.decorators() & ON_WEAK_OOP_REF) != 0;
+
+  GraphKit* kit = access.kit();
+  PhaseGVN& gvn = kit->gvn();
+  Node* adr = access.addr().node();
+  Node* heap_base_oop = access.base();
+  bool unsafe = (access.decorators() & C2_UNSAFE_ACCESS) != 0;
+  if (unsafe) {
+    if (!ZVerifyLoadBarriers) {
+      p = load_barrier(kit, p, adr);
+    } else {
+      if (!TypePtr::NULL_PTR->higher_equal(gvn.type(heap_base_oop))) {
+        p = load_barrier(kit, p, adr);
+      } else {
+        IdealKit ideal(kit);
+        IdealVariable res(ideal);
+#define __ ideal.
+        __ declarations_done();
+        __ set(res, p);
+        __ if_then(heap_base_oop, BoolTest::ne, kit->null(), PROB_UNLIKELY(0.999)); {
+          kit->sync_kit(ideal);
+          p = load_barrier(kit, p, adr);
+          __ set(res, p);
+          __ sync_kit(kit);
+        } __ end_if();
+        kit->final_sync(ideal);
+        p = __ value(res);
+#undef __
+      }
+    }
+    return p;
+  } else {
+    return load_barrier(access.kit(), p, access.addr().node(), weak, true, true);
+  }
+}
+
+Node* ZBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access, Node* expected_val,
+                                                    Node* new_val, const Type* val_type) const {
+  Node* result = BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, val_type);
+  if (!barrier_needed(access)) {
+    return result;
+  }
+
+  access.set_needs_pinning(false);
+  return make_cmpx_loadbarrier(access);
+}
+
+Node* ZBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access, Node* expected_val,
+                                                     Node* new_val, const Type* value_type) const {
+  Node* result = BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
+  if (!barrier_needed(access)) {
+    return result;
+  }
+
+  Node* load_store = access.raw_access();
+  bool weak_cas = (access.decorators() & C2_WEAK_CMPXCHG) != 0;
+  bool expected_is_null = (expected_val->get_ptr_type() == TypePtr::NULL_PTR);
+
+  if (!expected_is_null) {
+    if (weak_cas) {
+      access.set_needs_pinning(false);
+      load_store = make_cas_loadbarrier(access);
+    } else {
+      access.set_needs_pinning(false);
+      load_store = make_cas_loadbarrier(access);
+    }
+  }
+
+  return load_store;
+}
+
+Node* ZBarrierSetC2::atomic_xchg_at_resolved(C2AtomicAccess& access, Node* new_val, const Type* val_type) const {
+  Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, new_val, val_type);
+  if (!barrier_needed(access)) {
+    return result;
+  }
+
+  Node* load_store = access.raw_access();
+  Node* adr = access.addr().node();
+
+  return load_barrier(access.kit(), load_store, adr, false, false, false);
+}
+
+// == Macro Expansion ==
+
+void ZBarrierSetC2::expand_loadbarrier_node(PhaseMacroExpand* phase, LoadBarrierNode* barrier) const {
+  Node* in_ctrl = barrier->in(LoadBarrierNode::Control);
+  Node* in_mem  = barrier->in(LoadBarrierNode::Memory);
+  Node* in_val  = barrier->in(LoadBarrierNode::Oop);
+  Node* in_adr  = barrier->in(LoadBarrierNode::Address);
+
+  Node* out_ctrl = barrier->proj_out(LoadBarrierNode::Control);
+  Node* out_res  = barrier->proj_out(LoadBarrierNode::Oop);
+
+  PhaseIterGVN &igvn = phase->igvn();
+
+  if (ZVerifyLoadBarriers) {
+    igvn.replace_node(out_res, in_val);
+    igvn.replace_node(out_ctrl, in_ctrl);
+    return;
+  }
+
+  if (barrier->can_be_eliminated()) {
+    // Clone and pin the load for this barrier below the dominating
+    // barrier: the load cannot be allowed to float above the
+    // dominating barrier
+    Node* load = in_val;
+
+    if (load->is_Load()) {
+      Node* new_load = load->clone();
+      Node* addp = new_load->in(MemNode::Address);
+      assert(addp->is_AddP() || addp->is_Phi() || addp->is_Load(), "bad address");
+      Node* cast = new CastPPNode(addp, igvn.type(addp), true);
+      Node* ctrl = NULL;
+      Node* similar = barrier->in(LoadBarrierNode::Similar);
+      if (similar->is_Phi()) {
+        // already expanded
+        ctrl = similar->in(0);
+      } else {
+        assert(similar->is_Proj() && similar->in(0)->is_LoadBarrier(), "unexpected graph shape");
+        ctrl = similar->in(0)->as_LoadBarrier()->proj_out(LoadBarrierNode::Control);
+      }
+      assert(ctrl != NULL, "bad control");
+      cast->set_req(0, ctrl);
+      igvn.transform(cast);
+      new_load->set_req(MemNode::Address, cast);
+      igvn.transform(new_load);
+
+      igvn.replace_node(out_res, new_load);
+      igvn.replace_node(out_ctrl, in_ctrl);
+      return;
+    }
+    // cannot eliminate
+  }
+
+  // There are two cases that require the basic loadbarrier
+  // 1) When the writeback of a healed oop must be avoided (swap)
+  // 2) When we must guarantee that no reload of is done (swap, cas, cmpx)
+  if (!barrier->is_writeback()) {
+    assert(!barrier->oop_reload_allowed(), "writeback barriers should be marked as requires oop");
+  }
+
+  if (!barrier->oop_reload_allowed()) {
+    expand_loadbarrier_basic(phase, barrier);
+  } else {
+    expand_loadbarrier_optimized(phase, barrier);
+  }
+}
+
+// Basic loadbarrier using conventional arg passing
+void ZBarrierSetC2::expand_loadbarrier_basic(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const {
+  PhaseIterGVN &igvn = phase->igvn();
+
+  Node* in_ctrl = barrier->in(LoadBarrierNode::Control);
+  Node* in_mem  = barrier->in(LoadBarrierNode::Memory);
+  Node* in_val  = barrier->in(LoadBarrierNode::Oop);
+  Node* in_adr  = barrier->in(LoadBarrierNode::Address);
+
+  Node* out_ctrl = barrier->proj_out(LoadBarrierNode::Control);
+  Node* out_res  = barrier->proj_out(LoadBarrierNode::Oop);
+
+  float unlikely  = PROB_UNLIKELY(0.999);
+  const Type* in_val_maybe_null_t = igvn.type(in_val);
+
+  Node* jthread = igvn.transform(new ThreadLocalNode());
+  Node* adr = phase->basic_plus_adr(jthread, in_bytes(ZThreadLocalData::address_bad_mask_offset()));
+  Node* bad_mask = igvn.transform(LoadNode::make(igvn, in_ctrl, in_mem, adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered));
+  Node* cast = igvn.transform(new CastP2XNode(in_ctrl, in_val));
+  Node* obj_masked = igvn.transform(new AndXNode(cast, bad_mask));
+  Node* cmp = igvn.transform(new CmpXNode(obj_masked, igvn.zerocon(TypeX_X->basic_type())));
+  Node *bol = igvn.transform(new BoolNode(cmp, BoolTest::ne))->as_Bool();
+  IfNode* iff = igvn.transform(new IfNode(in_ctrl, bol, unlikely, COUNT_UNKNOWN))->as_If();
+  Node* then = igvn.transform(new IfTrueNode(iff));
+  Node* elsen = igvn.transform(new IfFalseNode(iff));
+
+  Node* result_region;
+  Node* result_val;
+
+  result_region = new RegionNode(3);
+  result_val = new PhiNode(result_region, TypeInstPtr::BOTTOM);
+
+  result_region->set_req(1, elsen);
+  Node* res = igvn.transform(new CastPPNode(in_val, in_val_maybe_null_t));
+  res->init_req(0, elsen);
+  result_val->set_req(1, res);
+
+  const TypeFunc *tf = load_barrier_Type();
+  Node* call;
+  if (barrier->is_weak()) {
+    call = new CallLeafNode(tf,
+                            ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded_addr(),
+                            "ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded",
+                            TypeRawPtr::BOTTOM);
+  } else {
+    call = new CallLeafNode(tf,
+                            ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(),
+                            "ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded",
+                            TypeRawPtr::BOTTOM);
+  }
+
+  call->init_req(TypeFunc::Control, then);
+  call->init_req(TypeFunc::I_O    , phase->top());
+  call->init_req(TypeFunc::Memory , in_mem);
+  call->init_req(TypeFunc::FramePtr, phase->top());
+  call->init_req(TypeFunc::ReturnAdr, phase->top());
+  call->init_req(TypeFunc::Parms+0, in_val);
+  if (barrier->is_writeback()) {
+    call->init_req(TypeFunc::Parms+1, in_adr);
+  } else {
+    // when slow path is called with a null adr, the healed oop will not be written back
+    call->init_req(TypeFunc::Parms+1, igvn.zerocon(T_OBJECT));
+  }
+  call = igvn.transform(call);
+
+  Node* ctrl = igvn.transform(new ProjNode(call, TypeFunc::Control));
+  res = igvn.transform(new ProjNode(call, TypeFunc::Parms));
+  res = igvn.transform(new CheckCastPPNode(ctrl, res, in_val_maybe_null_t));
+
+  result_region->set_req(2, ctrl);
+  result_val->set_req(2, res);
+
+  result_region = igvn.transform(result_region);
+  result_val = igvn.transform(result_val);
+
+  if (out_ctrl != NULL) { // added if cond
+    igvn.replace_node(out_ctrl, result_region);
+  }
+  igvn.replace_node(out_res, result_val);
+}
+
+// Optimized, low spill, loadbarrier variant using stub specialized on register used
+void ZBarrierSetC2::expand_loadbarrier_optimized(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const {
+  PhaseIterGVN &igvn = phase->igvn();
+#ifdef PRINT_NODE_TRAVERSALS
+  Node* preceding_barrier_node = barrier->in(LoadBarrierNode::Oop);
+#endif
+
+  Node* in_ctrl = barrier->in(LoadBarrierNode::Control);
+  Node* in_mem = barrier->in(LoadBarrierNode::Memory);
+  Node* in_val = barrier->in(LoadBarrierNode::Oop);
+  Node* in_adr = barrier->in(LoadBarrierNode::Address);
+
+  Node* out_ctrl = barrier->proj_out(LoadBarrierNode::Control);
+  Node* out_res = barrier->proj_out(LoadBarrierNode::Oop);
+
+  assert(barrier->in(LoadBarrierNode::Oop) != NULL, "oop to loadbarrier node cannot be null");
+
+#ifdef PRINT_NODE_TRAVERSALS
+  tty->print("\n\n\nBefore barrier optimization:\n");
+  traverse(barrier, out_ctrl, out_res, -1);
+
+  tty->print("\nBefore barrier optimization:  preceding_barrier_node\n");
+  traverse(preceding_barrier_node, out_ctrl, out_res, -1);
+#endif
+
+  float unlikely  = PROB_UNLIKELY(0.999);
+
+  Node* jthread = igvn.transform(new ThreadLocalNode());
+  Node* adr = phase->basic_plus_adr(jthread, in_bytes(ZThreadLocalData::address_bad_mask_offset()));
+  Node* bad_mask = igvn.transform(LoadNode::make(igvn, in_ctrl, in_mem, adr,
+                                                 TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(),
+                                                 MemNode::unordered));
+  Node* cast = igvn.transform(new CastP2XNode(in_ctrl, in_val));
+  Node* obj_masked = igvn.transform(new AndXNode(cast, bad_mask));
+  Node* cmp = igvn.transform(new CmpXNode(obj_masked, igvn.zerocon(TypeX_X->basic_type())));
+  Node *bol = igvn.transform(new BoolNode(cmp, BoolTest::ne))->as_Bool();
+  IfNode* iff = igvn.transform(new IfNode(in_ctrl, bol, unlikely, COUNT_UNKNOWN))->as_If();
+  Node* then = igvn.transform(new IfTrueNode(iff));
+  Node* elsen = igvn.transform(new IfFalseNode(iff));
+
+  Node* slow_path_surrogate;
+  if (!barrier->is_weak()) {
+    slow_path_surrogate = igvn.transform(new LoadBarrierSlowRegNode(then, in_mem, in_adr, in_val->adr_type(),
+                                                                    (const TypePtr*) in_val->bottom_type(), MemNode::unordered));
+  } else {
+    slow_path_surrogate = igvn.transform(new LoadBarrierWeakSlowRegNode(then, in_mem, in_adr, in_val->adr_type(),
+                                                                        (const TypePtr*) in_val->bottom_type(), MemNode::unordered));
+  }
+
+  Node *new_loadp;
+  new_loadp = slow_path_surrogate;
+  // create the final region/phi pair to converge cntl/data paths to downstream code
+  Node* result_region = igvn.transform(new RegionNode(3));
+  result_region->set_req(1, then);
+  result_region->set_req(2, elsen);
+
+  Node* result_phi = igvn.transform(new PhiNode(result_region, TypeInstPtr::BOTTOM));
+  result_phi->set_req(1, new_loadp);
+  result_phi->set_req(2, barrier->in(LoadBarrierNode::Oop));
+
+  // finally, connect the original outputs to the barrier region and phi to complete the expansion/substitution
+  // igvn.replace_node(out_ctrl, result_region);
+  if (out_ctrl != NULL) { // added if cond
+    igvn.replace_node(out_ctrl, result_region);
+  }
+  igvn.replace_node(out_res, result_phi);
+
+  assert(barrier->outcnt() == 0,"LoadBarrier macro node has non-null outputs after expansion!");
+
+#ifdef PRINT_NODE_TRAVERSALS
+  tty->print("\nAfter barrier optimization:  old out_ctrl\n");
+  traverse(out_ctrl, out_ctrl, out_res, -1);
+  tty->print("\nAfter barrier optimization:  old out_res\n");
+  traverse(out_res, out_ctrl, out_res, -1);
+  tty->print("\nAfter barrier optimization:  old barrier\n");
+  traverse(barrier, out_ctrl, out_res, -1);
+  tty->print("\nAfter barrier optimization:  preceding_barrier_node\n");
+  traverse(preceding_barrier_node, result_region, result_phi, -1);
+#endif
+
+  return;
+}
+
+bool ZBarrierSetC2::expand_macro_nodes(PhaseMacroExpand* macro) const {
+  Compile* C = Compile::current();
+  PhaseIterGVN &igvn = macro->igvn();
+  ZBarrierSetC2State* s = state();
+  if (s->load_barrier_count() > 0) {
+#ifdef ASSERT
+    verify_gc_barriers(false);
+#endif
+    igvn.set_delay_transform(true);
+    int skipped = 0;
+    while (s->load_barrier_count() > skipped) {
+      int load_barrier_count = s->load_barrier_count();
+      LoadBarrierNode * n = s->load_barrier_node(load_barrier_count-1-skipped);
+      if (igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())) {
+        // node is unreachable, so don't try to expand it
+        s->remove_load_barrier_node(n);
+        continue;
+      }
+      if (!n->can_be_eliminated()) {
+        skipped++;
+        continue;
+      }
+      expand_loadbarrier_node(macro, n);
+      assert(s->load_barrier_count() < load_barrier_count, "must have deleted a node from load barrier list");
+      if (C->failing())  return true;
+    }
+    while (s->load_barrier_count() > 0) {
+      int load_barrier_count = s->load_barrier_count();
+      LoadBarrierNode* n = s->load_barrier_node(load_barrier_count - 1);
+      assert(!(igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())), "should have been processed already");
+      assert(!n->can_be_eliminated(), "should have been processed already");
+      expand_loadbarrier_node(macro, n);
+      assert(s->load_barrier_count() < load_barrier_count, "must have deleted a node from load barrier list");
+      if (C->failing())  return true;
+    }
+    igvn.set_delay_transform(false);
+    igvn.optimize();
+    if (C->failing())  return true;
+  }
+  return false;
+}
+
+// == Loop optimization ==
+
+static bool replace_with_dominating_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, bool last_round) {
+  PhaseIterGVN &igvn = phase->igvn();
+  Compile* C = Compile::current();
+
+  LoadBarrierNode* lb2 = lb->has_dominating_barrier(phase, false, last_round);
+  if (lb2 != NULL) {
+    if (lb->in(LoadBarrierNode::Oop) != lb2->in(LoadBarrierNode::Oop)) {
+      assert(lb->in(LoadBarrierNode::Address) == lb2->in(LoadBarrierNode::Address), "");
+      igvn.replace_input_of(lb, LoadBarrierNode::Similar, lb2->proj_out(LoadBarrierNode::Oop));
+      C->set_major_progress();
+    } else  {
+      // That transformation may cause the Similar edge on dominated load barriers to be invalid
+      lb->fix_similar_in_uses(&igvn);
+
+      Node* val = lb->proj_out(LoadBarrierNode::Oop);
+      assert(lb2->has_true_uses(), "");
+      assert(lb2->in(LoadBarrierNode::Oop) == lb->in(LoadBarrierNode::Oop), "");
+
+      phase->lazy_update(lb, lb->in(LoadBarrierNode::Control));
+      phase->lazy_replace(lb->proj_out(LoadBarrierNode::Control), lb->in(LoadBarrierNode::Control));
+      igvn.replace_node(val, lb2->proj_out(LoadBarrierNode::Oop));
+
+      return true;
+    }
+  }
+  return false;
+}
+
+static Node* find_dominating_memory(PhaseIdealLoop* phase, Node* mem, Node* dom, int i) {
+  assert(dom->is_Region() || i == -1, "");
+  Node* m = mem;
+  while(phase->is_dominator(dom, phase->has_ctrl(m) ? phase->get_ctrl(m) : m->in(0))) {
+    if (m->is_Mem()) {
+      assert(m->as_Mem()->adr_type() == TypeRawPtr::BOTTOM, "");
+      m = m->in(MemNode::Memory);
+    } else if (m->is_MergeMem()) {
+      m = m->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
+    } else if (m->is_Phi()) {
+      if (m->in(0) == dom && i != -1) {
+        m = m->in(i);
+        break;
+      } else {
+        m = m->in(LoopNode::EntryControl);
+      }
+    } else if (m->is_Proj()) {
+      m = m->in(0);
+    } else if (m->is_SafePoint() || m->is_MemBar()) {
+      m = m->in(TypeFunc::Memory);
+    } else {
+#ifdef ASSERT
+      m->dump();
+#endif
+      ShouldNotReachHere();
+    }
+  }
+  return m;
+}
+
+static LoadBarrierNode* clone_load_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, Node* ctl, Node* mem, Node* oop_in) {
+  PhaseIterGVN &igvn = phase->igvn();
+  Compile* C = Compile::current();
+  Node* the_clone = lb->clone();
+  the_clone->set_req(LoadBarrierNode::Control, ctl);
+  the_clone->set_req(LoadBarrierNode::Memory, mem);
+  if (oop_in != NULL) {
+    the_clone->set_req(LoadBarrierNode::Oop, oop_in);
+  }
+
+  LoadBarrierNode* new_lb = the_clone->as_LoadBarrier();
+  igvn.register_new_node_with_optimizer(new_lb);
+  IdealLoopTree *loop = phase->get_loop(new_lb->in(0));
+  phase->set_ctrl(new_lb, new_lb->in(0));
+  phase->set_loop(new_lb, loop);
+  phase->set_idom(new_lb, new_lb->in(0), phase->dom_depth(new_lb->in(0))+1);
+  if (!loop->_child) {
+    loop->_body.push(new_lb);
+  }
+
+  Node* proj_ctl = new ProjNode(new_lb, LoadBarrierNode::Control);
+  igvn.register_new_node_with_optimizer(proj_ctl);
+  phase->set_ctrl(proj_ctl, proj_ctl->in(0));
+  phase->set_loop(proj_ctl, loop);
+  phase->set_idom(proj_ctl, new_lb, phase->dom_depth(new_lb)+1);
+  if (!loop->_child) {
+    loop->_body.push(proj_ctl);
+  }
+
+  Node* proj_oop = new ProjNode(new_lb, LoadBarrierNode::Oop);
+  phase->register_new_node(proj_oop, new_lb);
+
+  if (!new_lb->in(LoadBarrierNode::Similar)->is_top()) {
+    LoadBarrierNode* similar = new_lb->in(LoadBarrierNode::Similar)->in(0)->as_LoadBarrier();
+    if (!phase->is_dominator(similar, ctl)) {
+      igvn.replace_input_of(new_lb, LoadBarrierNode::Similar, C->top());
+    }
+  }
+
+  return new_lb;
+}
+
+static void replace_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, Node* new_val) {
+  PhaseIterGVN &igvn = phase->igvn();
+  Node* val = lb->proj_out(LoadBarrierNode::Oop);
+  igvn.replace_node(val, new_val);
+  phase->lazy_update(lb, lb->in(LoadBarrierNode::Control));
+  phase->lazy_replace(lb->proj_out(LoadBarrierNode::Control), lb->in(LoadBarrierNode::Control));
+}
+
+static bool split_barrier_thru_phi(PhaseIdealLoop* phase, LoadBarrierNode* lb) {
+  PhaseIterGVN &igvn = phase->igvn();
+  Compile* C = Compile::current();
+
+  if (lb->in(LoadBarrierNode::Oop)->is_Phi()) {
+    Node* oop_phi = lb->in(LoadBarrierNode::Oop);
+
+    if (oop_phi->req() == 2) {
+      // Ignore phis with only one input
+      return false;
+    }
+
+    if (phase->is_dominator(phase->get_ctrl(lb->in(LoadBarrierNode::Address)),
+                            oop_phi->in(0)) && phase->get_ctrl(lb->in(LoadBarrierNode::Address)) != oop_phi->in(0)) {
+      // That transformation may cause the Similar edge on dominated load barriers to be invalid
+      lb->fix_similar_in_uses(&igvn);
+
+      RegionNode* region = oop_phi->in(0)->as_Region();
+
+      int backedge = LoopNode::LoopBackControl;
+      if (region->is_Loop() && region->in(backedge)->is_Proj() && region->in(backedge)->in(0)->is_If()) {
+        Node* c = region->in(backedge)->in(0)->in(0);
+        assert(c->unique_ctrl_out() == region->in(backedge)->in(0), "");
+        Node* oop = lb->in(LoadBarrierNode::Oop)->in(backedge);
+        Node* oop_c = phase->has_ctrl(oop) ? phase->get_ctrl(oop) : oop;
+        if (!phase->is_dominator(oop_c, c)) {
+          return false;
+        }
+      }
+
+      // If the node on the backedge above the phi is the node itself - we have a self loop.
+      // Don't clone - this will be folded later.
+      if (oop_phi->in(LoopNode::LoopBackControl) == lb->proj_out(LoadBarrierNode::Oop)) {
+        return false;
+      }
+
+      bool is_strip_mined = region->is_CountedLoop() && region->as_CountedLoop()->is_strip_mined();
+      Node *phi = oop_phi->clone();
+
+      for (uint i = 1; i < region->req(); i++) {
+        Node* ctrl = region->in(i);
+        if (ctrl != C->top()) {
+          assert(!phase->is_dominator(ctrl, region) || region->is_Loop(), "");
+
+          Node* mem = lb->in(LoadBarrierNode::Memory);
+          Node* m = find_dominating_memory(phase, mem, region, i);
+
+          if (region->is_Loop() && i == LoopNode::LoopBackControl && ctrl->is_Proj() && ctrl->in(0)->is_If()) {
+            ctrl = ctrl->in(0)->in(0);
+          } else if (region->is_Loop() && is_strip_mined) {
+            // If this is a strip mined loop, control must move above OuterStripMinedLoop
+            assert(i == LoopNode::EntryControl, "check");
+            assert(ctrl->is_OuterStripMinedLoop(), "sanity");
+            ctrl = ctrl->as_OuterStripMinedLoop()->in(LoopNode::EntryControl);
+          }
+
+          LoadBarrierNode* new_lb = clone_load_barrier(phase, lb, ctrl, m, lb->in(LoadBarrierNode::Oop)->in(i));
+          Node* out_ctrl = new_lb->proj_out(LoadBarrierNode::Control);
+
+          if (is_strip_mined && (i == LoopNode::EntryControl)) {
+            assert(region->in(i)->is_OuterStripMinedLoop(), "");
+            igvn.replace_input_of(region->in(i), i, out_ctrl);
+          } else if (ctrl == region->in(i)) {
+            igvn.replace_input_of(region, i, out_ctrl);
+          } else {
+            Node* iff = region->in(i)->in(0);
+            igvn.replace_input_of(iff, 0, out_ctrl);
+            phase->set_idom(iff, out_ctrl, phase->dom_depth(out_ctrl)+1);
+          }
+          phi->set_req(i, new_lb->proj_out(LoadBarrierNode::Oop));
+        }
+      }
+      phase->register_new_node(phi, region);
+      replace_barrier(phase, lb, phi);
+
+      if (region->is_Loop()) {
+        // Load barrier moved to the back edge of the Loop may now
+        // have a safepoint on the path to the barrier on the Similar
+        // edge
+        igvn.replace_input_of(phi->in(LoopNode::LoopBackControl)->in(0), LoadBarrierNode::Similar, C->top());
+        Node* head = region->in(LoopNode::EntryControl);
+        phase->set_idom(region, head, phase->dom_depth(head)+1);
+        phase->recompute_dom_depth();
+        if (head->is_CountedLoop() && head->as_CountedLoop()->is_main_loop()) {
+          head->as_CountedLoop()->set_normal_loop();
+        }
+      }
+
+      return true;
+    }
+  }
+
+  return false;
+}
+
+static bool move_out_of_loop(PhaseIdealLoop* phase, LoadBarrierNode* lb) {
+  PhaseIterGVN &igvn = phase->igvn();
+  IdealLoopTree *lb_loop = phase->get_loop(lb->in(0));
+  if (lb_loop != phase->ltree_root() && !lb_loop->_irreducible) {
+    Node* oop_ctrl = phase->get_ctrl(lb->in(LoadBarrierNode::Oop));
+    IdealLoopTree *oop_loop = phase->get_loop(oop_ctrl);
+    IdealLoopTree* adr_loop = phase->get_loop(phase->get_ctrl(lb->in(LoadBarrierNode::Address)));
+    if (!lb_loop->is_member(oop_loop) && !lb_loop->is_member(adr_loop)) {
+      // That transformation may cause the Similar edge on dominated load barriers to be invalid
+      lb->fix_similar_in_uses(&igvn);
+
+      Node* head = lb_loop->_head;
+      assert(head->is_Loop(), "");
+
+      if (phase->is_dominator(head, oop_ctrl)) {
+        assert(oop_ctrl->Opcode() == Op_CProj && oop_ctrl->in(0)->Opcode() == Op_NeverBranch, "");
+        assert(lb_loop->is_member(phase->get_loop(oop_ctrl->in(0)->in(0))), "");
+        return false;
+      }
+
+      if (head->is_CountedLoop()) {
+        CountedLoopNode* cloop = head->as_CountedLoop();
+        if (cloop->is_main_loop()) {
+          cloop->set_normal_loop();
+        }
+        // When we are moving barrier out of a counted loop,
+        // make sure we move it all the way out of the strip mined outer loop.
+        if (cloop->is_strip_mined()) {
+          head = cloop->outer_loop();
+        }
+      }
+
+      Node* mem = lb->in(LoadBarrierNode::Memory);
+      Node* m = find_dominating_memory(phase, mem, head, -1);
+
+      LoadBarrierNode* new_lb = clone_load_barrier(phase, lb, head->in(LoopNode::EntryControl), m, NULL);
+
+      assert(phase->idom(head) == head->in(LoopNode::EntryControl), "");
+      Node* proj_ctl = new_lb->proj_out(LoadBarrierNode::Control);
+      igvn.replace_input_of(head, LoopNode::EntryControl, proj_ctl);
+      phase->set_idom(head, proj_ctl, phase->dom_depth(proj_ctl) + 1);
+
+      replace_barrier(phase, lb, new_lb->proj_out(LoadBarrierNode::Oop));
+
+      phase->recompute_dom_depth();
+
+      return true;
+    }
+  }
+
+  return false;
+}
+
+static bool common_barriers(PhaseIdealLoop* phase, LoadBarrierNode* lb) {
+  PhaseIterGVN &igvn = phase->igvn();
+  Node* in_val = lb->in(LoadBarrierNode::Oop);
+  for (DUIterator_Fast imax, i = in_val->fast_outs(imax); i < imax; i++) {
+    Node* u = in_val->fast_out(i);
+    if (u != lb && u->is_LoadBarrier() && u->as_LoadBarrier()->has_true_uses()) {
+      Node* this_ctrl = lb->in(LoadBarrierNode::Control);
+      Node* other_ctrl = u->in(LoadBarrierNode::Control);
+
+      Node* lca = phase->dom_lca(this_ctrl, other_ctrl);
+      bool ok = true;
+
+      Node* proj1 = NULL;
+      Node* proj2 = NULL;
+
+      while (this_ctrl != lca && ok) {
+        if (this_ctrl->in(0) != NULL &&
+            this_ctrl->in(0)->is_MultiBranch()) {
+          if (this_ctrl->in(0)->in(0) == lca) {
+            assert(proj1 == NULL, "");
+            assert(this_ctrl->is_Proj(), "");
+            proj1 = this_ctrl;
+          } else if (!(this_ctrl->in(0)->is_If() && this_ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none))) {
+            ok = false;
+          }
+        }
+        this_ctrl = phase->idom(this_ctrl);
+      }
+      while (other_ctrl != lca && ok) {
+        if (other_ctrl->in(0) != NULL &&
+            other_ctrl->in(0)->is_MultiBranch()) {
+          if (other_ctrl->in(0)->in(0) == lca) {
+            assert(other_ctrl->is_Proj(), "");
+            assert(proj2 == NULL, "");
+            proj2 = other_ctrl;
+          } else if (!(other_ctrl->in(0)->is_If() && other_ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none))) {
+            ok = false;
+          }
+        }
+        other_ctrl = phase->idom(other_ctrl);
+      }
+      assert(proj1 == NULL || proj2 == NULL || proj1->in(0) == proj2->in(0), "");
+      if (ok && proj1 && proj2 && proj1 != proj2 && proj1->in(0)->is_If()) {
+        // That transformation may cause the Similar edge on dominated load barriers to be invalid
+        lb->fix_similar_in_uses(&igvn);
+        u->as_LoadBarrier()->fix_similar_in_uses(&igvn);
+
+        Node* split = lca->unique_ctrl_out();
+        assert(split->in(0) == lca, "");
+
+        Node* mem = lb->in(LoadBarrierNode::Memory);
+        Node* m = find_dominating_memory(phase, mem, split, -1);
+        LoadBarrierNode* new_lb = clone_load_barrier(phase, lb, lca, m, NULL);
+
+        Node* proj_ctl = new_lb->proj_out(LoadBarrierNode::Control);
+        igvn.replace_input_of(split, 0, new_lb->proj_out(LoadBarrierNode::Control));
+        phase->set_idom(split, proj_ctl, phase->dom_depth(proj_ctl)+1);
+
+        Node* proj_oop = new_lb->proj_out(LoadBarrierNode::Oop);
+        replace_barrier(phase, lb, proj_oop);
+        replace_barrier(phase, u->as_LoadBarrier(), proj_oop);
+
+        phase->recompute_dom_depth();
+
+        return true;
+      }
+    }
+  }
+
+  return false;
+}
+
+static void optimize_load_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, bool last_round) {
+  Compile* C = Compile::current();
+
+  if (!C->directive()->ZOptimizeLoadBarriersOption) {
+    return;
+  }
+
+  if (lb->has_true_uses()) {
+    if (replace_with_dominating_barrier(phase, lb, last_round)) {
+      return;
+    }
+
+    if (split_barrier_thru_phi(phase, lb)) {
+      return;
+    }
+
+    if (move_out_of_loop(phase, lb)) {
+      return;
+    }
+
+    if (common_barriers(phase, lb)) {
+      return;
+    }
+  }
+}
+
+void ZBarrierSetC2::loop_optimize_gc_barrier(PhaseIdealLoop* phase, Node* node, bool last_round) {
+  if (node->is_LoadBarrier()) {
+    optimize_load_barrier(phase, node->as_LoadBarrier(), last_round);
+  }
+}
+
+// == Verification ==
+
+#ifdef ASSERT
+
+static bool look_for_barrier(Node* n, bool post_parse, VectorSet& visited) {
+  if (visited.test_set(n->_idx)) {
+    return true;
+  }
+
+  for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
+    Node* u = n->fast_out(i);
+    if (u->is_LoadBarrier()) {
+    } else if ((u->is_Phi() || u->is_CMove()) && !post_parse) {
+      if (!look_for_barrier(u, post_parse, visited)) {
+        return false;
+      }
+    } else if (u->Opcode() == Op_EncodeP || u->Opcode() == Op_DecodeN) {
+      if (!look_for_barrier(u, post_parse, visited)) {
+        return false;
+      }
+    } else if (u->Opcode() != Op_SCMemProj) {
+      tty->print("bad use"); u->dump();
+      return false;
+    }
+  }
+
+  return true;
+}
+
+void ZBarrierSetC2::verify_gc_barriers(bool post_parse) const {
+  ZBarrierSetC2State* s = state();
+  Compile* C = Compile::current();
+  ResourceMark rm;
+  VectorSet visited(Thread::current()->resource_area());
+  for (int i = 0; i < s->load_barrier_count(); i++) {
+    LoadBarrierNode* n = s->load_barrier_node(i);
+
+    // The dominating barrier on the same address if it exists and
+    // this barrier must not be applied on the value from the same
+    // load otherwise the value is not reloaded before it's used the
+    // second time.
+    assert(n->in(LoadBarrierNode::Similar)->is_top() ||
+           (n->in(LoadBarrierNode::Similar)->in(0)->is_LoadBarrier() &&
+            n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Address) == n->in(LoadBarrierNode::Address) &&
+            n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Oop) != n->in(LoadBarrierNode::Oop)),
+           "broken similar edge");
+
+    assert(post_parse || n->as_LoadBarrier()->has_true_uses(),
+           "found unneeded load barrier");
+
+    // Several load barrier nodes chained through their Similar edge
+    // break the code that remove the barriers in final graph reshape.
+    assert(n->in(LoadBarrierNode::Similar)->is_top() ||
+           (n->in(LoadBarrierNode::Similar)->in(0)->is_LoadBarrier() &&
+            n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Similar)->is_top()),
+           "chain of Similar load barriers");
+
+    if (!n->in(LoadBarrierNode::Similar)->is_top()) {
+      ResourceMark rm;
+      Unique_Node_List wq;
+      Node* other = n->in(LoadBarrierNode::Similar)->in(0);
+      wq.push(n);
+      bool ok = true;
+      bool dom_found = false;
+      for (uint next = 0; next < wq.size(); ++next) {
+        Node *n = wq.at(next);
+        assert(n->is_CFG(), "");
+        assert(!n->is_SafePoint(), "");
+
+        if (n == other) {
+          continue;
+        }
+
+        if (n->is_Region()) {
+          for (uint i = 1; i < n->req(); i++) {
+            Node* m = n->in(i);
+            if (m != NULL) {
+              wq.push(m);
+            }
+          }
+        } else {
+          Node* m = n->in(0);
+          if (m != NULL) {
+            wq.push(m);
+          }
+        }
+      }
+    }
+
+    if (ZVerifyLoadBarriers) {
+      if ((n->is_Load() || n->is_LoadStore()) && n->bottom_type()->make_oopptr() != NULL) {
+        visited.Clear();
+        bool found = look_for_barrier(n, post_parse, visited);
+        if (!found) {
+          n->dump(1);
+          n->dump(-3);
+          stringStream ss;
+          C->method()->print_short_name(&ss);
+          tty->print_cr("-%s-", ss.as_string());
+          assert(found, "");
+        }
+      }
+    }
+  }
+}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_C2_ZBARRIERSETC2_HPP
+#define SHARE_GC_Z_C2_ZBARRIERSETC2_HPP
+
+#include "gc/shared/c2/barrierSetC2.hpp"
+#include "memory/allocation.hpp"
+#include "opto/node.hpp"
+#include "utilities/growableArray.hpp"
+
+class LoadBarrierNode : public MultiNode {
+private:
+  bool _weak;
+  bool _writeback;          // Controls if the barrier writes the healed oop back to memory
+                            // A swap on a memory location must never write back the healed oop
+  bool _oop_reload_allowed; // Controls if the barrier are allowed to reload the oop from memory
+                            // before healing, otherwise both the oop and the address must be passed to the
+                            // barrier from the oop
+
+  static bool is_dominator(PhaseIdealLoop* phase, bool linear_only, Node *d, Node *n);
+  void push_dominated_barriers(PhaseIterGVN* igvn) const;
+
+public:
+  enum {
+    Control,
+    Memory,
+    Oop,
+    Address,
+    Number_of_Outputs = Address,
+    Similar,
+    Number_of_Inputs
+  };
+
+  LoadBarrierNode(Compile* C,
+                  Node* c,
+                  Node* mem,
+                  Node* val,
+                  Node* adr,
+                  bool weak,
+                  bool writeback,
+                  bool oop_reload_allowed);
+
+  virtual int Opcode() const;
+  virtual const Type *bottom_type() const;
+  virtual const Type *Value(PhaseGVN *phase) const;
+  virtual Node *Identity(PhaseGVN *phase);
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+
+  LoadBarrierNode* has_dominating_barrier(PhaseIdealLoop* phase,
+                                          bool linear_only,
+                                          bool look_for_similar);
+
+  void fix_similar_in_uses(PhaseIterGVN* igvn);
+
+  bool has_true_uses() const;
+
+  bool can_be_eliminated() const {
+    return !in(Similar)->is_top();
+  }
+
+  bool is_weak() const {
+    return _weak;
+  }
+
+  bool is_writeback() const {
+    return _writeback;
+  }
+
+  bool oop_reload_allowed() const {
+    return _oop_reload_allowed;
+  }
+};
+
+class LoadBarrierSlowRegNode : public LoadPNode {
+public:
+  LoadBarrierSlowRegNode(Node *c,
+                         Node *mem,
+                         Node *adr,
+                         const TypePtr *at,
+                         const TypePtr* t,
+                         MemOrd mo,
+                         ControlDependency control_dependency = DependsOnlyOnTest)
+    : LoadPNode(c, mem, adr, at, t, mo, control_dependency) {}
+
+  virtual const char * name() {
+    return "LoadBarrierSlowRegNode";
+  }
+
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) {
+    return NULL;
+  }
+
+  virtual int Opcode() const;
+};
+
+class LoadBarrierWeakSlowRegNode : public LoadPNode {
+public:
+  LoadBarrierWeakSlowRegNode(Node *c,
+                             Node *mem,
+                             Node *adr,
+                             const TypePtr *at,
+                             const TypePtr* t,
+                             MemOrd mo,
+                             ControlDependency control_dependency = DependsOnlyOnTest)
+    : LoadPNode(c, mem, adr, at, t, mo, control_dependency) {}
+
+  virtual const char * name() {
+    return "LoadBarrierWeakSlowRegNode";
+  }
+
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) {
+    return NULL;
+  }
+
+  virtual int Opcode() const;
+};
+
+class ZBarrierSetC2State : public ResourceObj {
+private:
+  // List of load barrier nodes which need to be expanded before matching
+  GrowableArray<LoadBarrierNode*>* _load_barrier_nodes;
+
+public:
+  ZBarrierSetC2State(Arena* comp_arena);
+  int load_barrier_count() const;
+  void add_load_barrier_node(LoadBarrierNode* n);
+  void remove_load_barrier_node(LoadBarrierNode* n);
+  LoadBarrierNode* load_barrier_node(int idx) const;
+};
+
+class ZBarrierSetC2 : public BarrierSetC2 {
+private:
+  ZBarrierSetC2State* state() const;
+  Node* make_cas_loadbarrier(C2AtomicAccess& access) const;
+  Node* make_cmpx_loadbarrier(C2AtomicAccess& access) const;
+  void expand_loadbarrier_basic(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const;
+  void expand_loadbarrier_node(PhaseMacroExpand* phase, LoadBarrierNode* barrier) const;
+  void expand_loadbarrier_optimized(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const;
+  const TypeFunc* load_barrier_Type() const;
+
+protected:
+  virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const;
+  virtual Node* atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access,
+                                               Node* expected_val,
+                                               Node* new_val,
+                                               const Type* val_type) const;
+  virtual Node* atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access,
+                                                Node* expected_val,
+                                                Node* new_val,
+                                                const Type* value_type) const;
+  virtual Node* atomic_xchg_at_resolved(C2AtomicAccess& access,
+                                        Node* new_val,
+                                        const Type* val_type) const;
+
+public:
+  Node* load_barrier(GraphKit* kit,
+                     Node* val,
+                     Node* adr,
+                     bool weak = false,
+                     bool writeback = true,
+                     bool oop_reload_allowed = true) const;
+
+  virtual void* create_barrier_state(Arena* comp_arena) const;
+  virtual bool is_gc_barrier_node(Node* node) const;
+  virtual void eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const { }
+  virtual void eliminate_useless_gc_barriers(Unique_Node_List &useful) const;
+  virtual void add_users_to_worklist(Unique_Node_List* worklist) const;
+  virtual void enqueue_useful_gc_barrier(Unique_Node_List &worklist, Node* node) const;
+  virtual void register_potential_barrier_node(Node* node) const;
+  virtual void unregister_potential_barrier_node(Node* node) const;
+  virtual bool array_copy_requires_gc_barriers(BasicType type) const { return true; }
+  virtual Node* step_over_gc_barrier(Node* c) const { return c; }
+  // If the BarrierSetC2 state has kept macro nodes in its compilation unit state to be
+  // expanded later, then now is the time to do so.
+  virtual bool expand_macro_nodes(PhaseMacroExpand* macro) const;
+
+  static void find_dominating_barriers(PhaseIterGVN& igvn);
+  static void loop_optimize_gc_barrier(PhaseIdealLoop* phase, Node* node, bool last_round);
+
+#ifdef ASSERT
+  virtual void verify_gc_barriers(bool post_parse) const;
+#endif
+};
+
+#endif // SHARE_GC_Z_C2_ZBARRIERSETC2_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/vmStructs_z.cpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/vmStructs_z.hpp"
+
+ZGlobalsForVMStructs::ZGlobalsForVMStructs() :
+    _ZGlobalPhase(&ZGlobalPhase),
+    _ZAddressGoodMask(&ZAddressGoodMask),
+    _ZAddressBadMask(&ZAddressBadMask),
+    _ZAddressWeakBadMask(&ZAddressWeakBadMask),
+    _ZObjectAlignmentSmallShift(&ZObjectAlignmentSmallShift),
+    _ZObjectAlignmentSmall(&ZObjectAlignmentSmall) {
+}
+
+ZGlobalsForVMStructs ZGlobalsForVMStructs::_instance;
+ZGlobalsForVMStructs* ZGlobalsForVMStructs::_instance_p = &ZGlobalsForVMStructs::_instance;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/vmStructs_z.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_Z_VMSTRUCTS_Z_HPP
+#define SHARE_VM_GC_Z_VMSTRUCTS_Z_HPP
+
+#include "gc/z/zAddressRangeMap.hpp"
+#include "gc/z/zCollectedHeap.hpp"
+#include "gc/z/zHeap.hpp"
+#include "gc/z/zPageAllocator.hpp"
+#include "gc/z/zPhysicalMemory.hpp"
+#include "utilities/macros.hpp"
+
+// Expose some ZGC globals to the SA agent.
+class ZGlobalsForVMStructs {
+  static ZGlobalsForVMStructs _instance;
+
+public:
+  static ZGlobalsForVMStructs* _instance_p;
+
+  ZGlobalsForVMStructs();
+
+  uint32_t* _ZGlobalPhase;
+
+  uintptr_t* _ZAddressGoodMask;
+  uintptr_t* _ZAddressBadMask;
+  uintptr_t* _ZAddressWeakBadMask;
+
+  const int* _ZObjectAlignmentSmallShift;
+  const int* _ZObjectAlignmentSmall;
+};
+
+typedef ZAddressRangeMap<ZPageTableEntry, ZPageSizeMinShift> ZAddressRangeMapForPageTable;
+
+#define VM_STRUCTS_ZGC(nonstatic_field, volatile_nonstatic_field, static_field)                      \
+  static_field(ZGlobalsForVMStructs,            _instance_p,          ZGlobalsForVMStructs*)         \
+  nonstatic_field(ZGlobalsForVMStructs,         _ZGlobalPhase,        uint32_t*)                     \
+  nonstatic_field(ZGlobalsForVMStructs,         _ZAddressGoodMask,    uintptr_t*)                    \
+  nonstatic_field(ZGlobalsForVMStructs,         _ZAddressBadMask,     uintptr_t*)                    \
+  nonstatic_field(ZGlobalsForVMStructs,         _ZAddressWeakBadMask, uintptr_t*)                    \
+  nonstatic_field(ZGlobalsForVMStructs,         _ZObjectAlignmentSmallShift, const int*)             \
+  nonstatic_field(ZGlobalsForVMStructs,         _ZObjectAlignmentSmall, const int*)                  \
+                                                                                                     \
+  nonstatic_field(ZCollectedHeap,               _heap,                ZHeap)                         \
+                                                                                                     \
+  nonstatic_field(ZHeap,                        _page_allocator,      ZPageAllocator)                \
+  nonstatic_field(ZHeap,                        _pagetable,           ZPageTable)                    \
+                                                                                                     \
+  nonstatic_field(ZPage,                        _type,                const uint8_t)                 \
+  nonstatic_field(ZPage,                        _virtual,             const ZVirtualMemory)          \
+  nonstatic_field(ZPage,                        _forwarding,          ZForwardingTable)              \
+                                                                                                     \
+  nonstatic_field(ZPageAllocator,               _physical,            ZPhysicalMemoryManager)        \
+  nonstatic_field(ZPageAllocator,               _used,                size_t)                        \
+                                                                                                     \
+  nonstatic_field(ZPageTable,                   _map,                 ZAddressRangeMapForPageTable)  \
+                                                                                                     \
+  nonstatic_field(ZAddressRangeMapForPageTable, _map,                 ZPageTableEntry* const)        \
+                                                                                                     \
+  nonstatic_field(ZVirtualMemory,                _start,              uintptr_t)                     \
+  nonstatic_field(ZVirtualMemory,                _end,                uintptr_t)                     \
+                                                                                                     \
+  nonstatic_field(ZForwardingTable,              _table,              ZForwardingTableEntry*)        \
+  nonstatic_field(ZForwardingTable,              _size,               size_t)                        \
+                                                                                                     \
+  nonstatic_field(ZPhysicalMemoryManager,        _max_capacity,       const size_t)                  \
+  nonstatic_field(ZPhysicalMemoryManager,        _capacity,           size_t)
+
+#define VM_INT_CONSTANTS_ZGC(declare_constant, declare_constant_with_value)                          \
+  declare_constant(ZPhaseRelocate)                                                                   \
+  declare_constant(ZPageTypeSmall)                                                                   \
+  declare_constant(ZPageTypeMedium)                                                                  \
+  declare_constant(ZPageTypeLarge)                                                                   \
+  declare_constant(ZObjectAlignmentMediumShift)                                                      \
+  declare_constant(ZObjectAlignmentLargeShift)
+
+#define VM_LONG_CONSTANTS_ZGC(declare_constant)                                                      \
+  declare_constant(ZPageSizeSmallShift)                                                              \
+  declare_constant(ZPageSizeMediumShift)                                                             \
+  declare_constant(ZPageSizeMinShift)                                                                \
+  declare_constant(ZAddressOffsetShift)                                                              \
+  declare_constant(ZAddressOffsetBits)                                                               \
+  declare_constant(ZAddressOffsetMask)                                                               \
+  declare_constant(ZAddressSpaceStart)
+
+#define VM_TYPES_ZGC(declare_type, declare_toplevel_type, declare_integer_type)                      \
+  declare_toplevel_type(ZGlobalsForVMStructs)                                                        \
+  declare_type(ZCollectedHeap, CollectedHeap)                                                        \
+  declare_toplevel_type(ZHeap)                                                                       \
+  declare_toplevel_type(ZPage)                                                                       \
+  declare_toplevel_type(ZPageAllocator)                                                              \
+  declare_toplevel_type(ZPageTable)                                                                  \
+  declare_toplevel_type(ZPageTableEntry)                                                             \
+  declare_toplevel_type(ZAddressRangeMapForPageTable)                                                \
+  declare_toplevel_type(ZVirtualMemory)                                                              \
+  declare_toplevel_type(ZForwardingTable)                                                            \
+  declare_toplevel_type(ZForwardingTableEntry)                                                       \
+  declare_toplevel_type(ZPhysicalMemoryManager)
+
+#endif // SHARE_VM_GC_Z_VMSTRUCTS_Z_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zAddress.cpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zAddress.inline.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "runtime/thread.hpp"
+
+void ZAddressMasks::set_good_mask(uintptr_t mask) {
+  uintptr_t old_bad_mask = ZAddressBadMask;
+  ZAddressGoodMask = mask;
+  ZAddressBadMask = ZAddressGoodMask ^ ZAddressMetadataMask;
+  ZAddressWeakBadMask = (ZAddressGoodMask | ZAddressMetadataRemapped | ZAddressMetadataFinalizable) ^ ZAddressMetadataMask;
+}
+
+void ZAddressMasks::initialize() {
+  ZAddressMetadataMarked = ZAddressMetadataMarked0;
+  set_good_mask(ZAddressMetadataRemapped);
+}
+
+void ZAddressMasks::flip_to_marked() {
+  ZAddressMetadataMarked ^= (ZAddressMetadataMarked0 | ZAddressMetadataMarked1);
+  set_good_mask(ZAddressMetadataMarked);
+}
+
+void ZAddressMasks::flip_to_remapped() {
+  set_good_mask(ZAddressMetadataRemapped);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zAddress.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZADDRESS_HPP
+#define SHARE_GC_Z_ZADDRESS_HPP
+
+#include "memory/allocation.hpp"
+
+class ZAddress : public AllStatic {
+public:
+  static bool is_null(uintptr_t value);
+  static bool is_bad(uintptr_t value);
+  static bool is_good(uintptr_t value);
+  static bool is_good_or_null(uintptr_t value);
+  static bool is_weak_bad(uintptr_t value);
+  static bool is_weak_good(uintptr_t value);
+  static bool is_weak_good_or_null(uintptr_t value);
+  static bool is_marked(uintptr_t value);
+  static bool is_finalizable(uintptr_t value);
+  static bool is_remapped(uintptr_t value);
+
+  static uintptr_t address(uintptr_t value);
+  static uintptr_t offset(uintptr_t value);
+  static uintptr_t good(uintptr_t value);
+  static uintptr_t good_or_null(uintptr_t value);
+  static uintptr_t finalizable_good(uintptr_t value);
+  static uintptr_t marked(uintptr_t value);
+  static uintptr_t marked0(uintptr_t value);
+  static uintptr_t marked1(uintptr_t value);
+  static uintptr_t remapped(uintptr_t value);
+  static uintptr_t remapped_or_null(uintptr_t value);
+};
+
+class ZAddressMasks : public AllStatic {
+  friend class ZAddressTest;
+
+private:
+  static void set_good_mask(uintptr_t mask);
+
+public:
+  static void initialize();
+  static void flip_to_marked();
+  static void flip_to_remapped();
+};
+
+#endif // SHARE_GC_Z_ZADDRESS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zAddress.inline.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZADDRESS_INLINE_HPP
+#define SHARE_GC_Z_ZADDRESS_INLINE_HPP
+
+#include "gc/z/zAddress.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "utilities/macros.hpp"
+#include OS_CPU_HEADER_INLINE(gc/z/zAddress)
+
+inline bool ZAddress::is_null(uintptr_t value) {
+  return value == 0;
+}
+
+inline bool ZAddress::is_bad(uintptr_t value) {
+  return value & ZAddressBadMask;
+}
+
+inline bool ZAddress::is_good(uintptr_t value) {
+  return !is_bad(value) && !is_null(value);
+}
+
+inline bool ZAddress::is_good_or_null(uintptr_t value) {
+  // Checking if an address is "not bad" is an optimized version of
+  // checking if it's "good or null", which eliminates an explicit
+  // null check. However, the implicit null check only checks that
+  // the mask bits are zero, not that the entire address is zero.
+  // This means that an address without mask bits would pass through
+  // the barrier as if it was null. This should be harmless as such
+  // addresses should ever be passed through the barrier.
+  const bool result = !is_bad(value);
+  assert((is_good(value) || is_null(value)) == result, "Bad address");
+  return result;
+}
+
+inline bool ZAddress::is_weak_bad(uintptr_t value) {
+  return value & ZAddressWeakBadMask;
+}
+
+inline bool ZAddress::is_weak_good(uintptr_t value) {
+  return !is_weak_bad(value) && !is_null(value);
+}
+
+inline bool ZAddress::is_weak_good_or_null(uintptr_t value) {
+  return !is_weak_bad(value);
+}
+
+inline bool ZAddress::is_marked(uintptr_t value) {
+  return value & ZAddressMetadataMarked;
+}
+
+inline bool ZAddress::is_finalizable(uintptr_t value) {
+  return value & ZAddressMetadataFinalizable;
+}
+
+inline bool ZAddress::is_remapped(uintptr_t value) {
+  return value & ZAddressMetadataRemapped;
+}
+
+inline uintptr_t ZAddress::offset(uintptr_t value) {
+  return value & ZAddressOffsetMask;
+}
+
+inline uintptr_t ZAddress::good(uintptr_t value) {
+  return address(offset(value) | ZAddressGoodMask);
+}
+
+inline uintptr_t ZAddress::good_or_null(uintptr_t value) {
+  return is_null(value) ? 0 : good(value);
+}
+
+inline uintptr_t ZAddress::finalizable_good(uintptr_t value) {
+  return address(offset(value) | ZAddressMetadataFinalizable | ZAddressGoodMask);
+}
+
+inline uintptr_t ZAddress::marked(uintptr_t value) {
+  return address(offset(value) | ZAddressMetadataMarked);
+}
+
+inline uintptr_t ZAddress::marked0(uintptr_t value) {
+  return address(offset(value) | ZAddressMetadataMarked0);
+}
+
+inline uintptr_t ZAddress::marked1(uintptr_t value) {
+  return address(offset(value) | ZAddressMetadataMarked1);
+}
+
+inline uintptr_t ZAddress::remapped(uintptr_t value) {
+  return address(offset(value) | ZAddressMetadataRemapped);
+}
+
+inline uintptr_t ZAddress::remapped_or_null(uintptr_t value) {
+  return is_null(value) ? 0 : remapped(value);
+}
+
+#endif // SHARE_GC_Z_ZADDRESS_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zAddressRangeMap.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZADDRESSRANGEMAP_HPP
+#define SHARE_GC_Z_ZADDRESSRANGEMAP_HPP
+
+#include "memory/allocation.hpp"
+
+template<typename T, size_t AddressRangeShift>
+class ZAddressRangeMapIterator;
+
+template <typename T, size_t AddressRangeShift>
+class ZAddressRangeMap {
+  friend class VMStructs;
+  friend class ZAddressRangeMapIterator<T, AddressRangeShift>;
+
+private:
+  T* const _map;
+
+  size_t index_for_addr(uintptr_t addr) const;
+  size_t size() const;
+
+public:
+  ZAddressRangeMap();
+  ~ZAddressRangeMap();
+
+  T get(uintptr_t addr) const;
+  void put(uintptr_t addr, T value);
+};
+
+template <typename T, size_t AddressRangeShift>
+class ZAddressRangeMapIterator : public StackObj {
+public:
+  const ZAddressRangeMap<T, AddressRangeShift>* const _map;
+  size_t                                              _next;
+
+public:
+  ZAddressRangeMapIterator(const ZAddressRangeMap<T, AddressRangeShift>* map);
+
+  bool next(T* value);
+};
+
+#endif // SHARE_GC_Z_ZADDRESSRANGEMAP_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zAddressRangeMap.inline.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZADDRESSRANGEMAP_INLINE_HPP
+#define SHARE_GC_Z_ZADDRESSRANGEMAP_INLINE_HPP
+
+#include "gc/z/zAddress.inline.hpp"
+#include "gc/z/zAddressRangeMap.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "memory/allocation.inline.hpp"
+
+template <typename T, size_t AddressRangeShift>
+ZAddressRangeMap<T, AddressRangeShift>::ZAddressRangeMap() :
+    _map(MmapArrayAllocator<T>::allocate(size(), mtGC)) {}
+
+template <typename T, size_t AddressRangeShift>
+ZAddressRangeMap<T, AddressRangeShift>::~ZAddressRangeMap() {
+  MmapArrayAllocator<T>::free(_map, size());
+}
+
+template <typename T, size_t AddressRangeShift>
+size_t ZAddressRangeMap<T, AddressRangeShift>::index_for_addr(uintptr_t addr) const {
+  assert(!ZAddress::is_null(addr), "Invalid address");
+
+  const size_t index = ZAddress::offset(addr) >> AddressRangeShift;
+  assert(index < size(), "Invalid index");
+
+  return index;
+}
+
+template <typename T, size_t AddressRangeShift>
+size_t ZAddressRangeMap<T, AddressRangeShift>::size() const {
+  return ZAddressOffsetMax >> AddressRangeShift;
+}
+
+template <typename T, size_t AddressRangeShift>
+T ZAddressRangeMap<T, AddressRangeShift>::get(uintptr_t addr) const {
+  const uintptr_t index = index_for_addr(addr);
+  return _map[index];
+}
+
+template <typename T, size_t AddressRangeShift>
+void ZAddressRangeMap<T, AddressRangeShift>::put(uintptr_t addr, T value) {
+  const uintptr_t index = index_for_addr(addr);
+  _map[index] = value;
+}
+
+template <typename T, size_t AddressRangeShift>
+inline ZAddressRangeMapIterator<T, AddressRangeShift>::ZAddressRangeMapIterator(const ZAddressRangeMap<T, AddressRangeShift>* map) :
+    _map(map),
+    _next(0) {}
+
+template <typename T, size_t AddressRangeShift>
+inline bool ZAddressRangeMapIterator<T, AddressRangeShift>::next(T* value) {
+  if (_next < _map->size()) {
+    *value = _map->_map[_next++];
+    return true;
+  }
+
+  // End of map
+  return false;
+}
+
+#endif // SHARE_GC_Z_ZADDRESSRANGEMAP_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zAllocationFlags.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZALLOCATIONFLAGS_HPP
+#define SHARE_GC_Z_ZALLOCATIONFLAGS_HPP
+
+#include "gc/z/zBitField.hpp"
+#include "memory/allocation.hpp"
+
+//
+// Allocation flags layout
+// -----------------------
+//
+//   7   4 3 2 1 0
+//  +---+-+-+-+-+-+
+//  |000|1|1|1|1|1|
+//  +---+-+-+-+-+-+
+//  |   | | | | |
+//  |   | | | | * 0-0 Java Thread Flag (1-bit)
+//  |   | | | |
+//  |   | | | * 1-1 Worker Thread Flag (1-bit)
+//  |   | | |
+//  |   | | * 2-2 Non-Blocking Flag (1-bit)
+//  |   | |
+//  |   | * 3-3 Relocation Flag (1-bit)
+//  |   |
+//  |   * 4-4 No Reserve Flag (1-bit)
+//  |
+//  * 7-5 Unused (3-bits)
+//
+
+class ZAllocationFlags {
+private:
+  typedef ZBitField<uint8_t, bool, 0, 1> field_java_thread;
+  typedef ZBitField<uint8_t, bool, 1, 1> field_worker_thread;
+  typedef ZBitField<uint8_t, bool, 2, 1> field_non_blocking;
+  typedef ZBitField<uint8_t, bool, 3, 1> field_relocation;
+  typedef ZBitField<uint8_t, bool, 4, 1> field_no_reserve;
+
+  uint8_t _flags;
+
+public:
+  ZAllocationFlags() :
+      _flags(0) {}
+
+  void set_java_thread() {
+    _flags |= field_java_thread::encode(true);
+  }
+
+  void set_worker_thread() {
+    _flags |= field_worker_thread::encode(true);
+  }
+
+  void set_non_blocking() {
+    _flags |= field_non_blocking::encode(true);
+  }
+
+  void set_relocation() {
+    _flags |= field_relocation::encode(true);
+  }
+
+  void set_no_reserve() {
+    _flags |= field_no_reserve::encode(true);
+  }
+
+  bool java_thread() const {
+    return field_java_thread::decode(_flags);
+  }
+
+  bool worker_thread() const {
+    return field_worker_thread::decode(_flags);
+  }
+
+  bool non_blocking() const {
+    return field_non_blocking::decode(_flags);
+  }
+
+  bool relocation() const {
+    return field_relocation::decode(_flags);
+  }
+
+  bool no_reserve() const {
+    return field_no_reserve::decode(_flags);
+  }
+};
+
+#endif // SHARE_GC_Z_ZALLOCATIONFLAGS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zArguments.cpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zArguments.hpp"
+#include "gc/z/zCollectedHeap.hpp"
+#include "gc/z/zCollectorPolicy.hpp"
+#include "gc/z/zWorkers.hpp"
+#include "gc/shared/gcArguments.inline.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/globals_extension.hpp"
+
+size_t ZArguments::conservative_max_heap_alignment() {
+  return 0;
+}
+
+void ZArguments::initialize() {
+  GCArguments::initialize();
+
+  // Enable NUMA by default
+  if (FLAG_IS_DEFAULT(UseNUMA)) {
+    FLAG_SET_DEFAULT(UseNUMA, true);
+  }
+
+  // Disable biased locking by default
+  if (FLAG_IS_DEFAULT(UseBiasedLocking)) {
+    FLAG_SET_DEFAULT(UseBiasedLocking, false);
+  }
+
+  // Select number of parallel threads
+  if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
+    FLAG_SET_DEFAULT(ParallelGCThreads, ZWorkers::calculate_nparallel());
+  }
+
+  if (ParallelGCThreads == 0) {
+    vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:ParallelGCThreads=0");
+  }
+
+  // Select number of concurrent threads
+  if (FLAG_IS_DEFAULT(ConcGCThreads)) {
+    FLAG_SET_DEFAULT(ConcGCThreads, ZWorkers::calculate_nconcurrent());
+  }
+
+  if (ConcGCThreads == 0) {
+    vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:ConcGCThreads=0");
+  }
+
+#ifdef COMPILER2
+  // Enable loop strip mining by default
+  if (FLAG_IS_DEFAULT(UseCountedLoopSafepoints)) {
+    FLAG_SET_DEFAULT(UseCountedLoopSafepoints, true);
+    if (FLAG_IS_DEFAULT(LoopStripMiningIter)) {
+      FLAG_SET_DEFAULT(LoopStripMiningIter, 1000);
+    }
+  }
+#endif
+
+  // To avoid asserts in set_active_workers()
+  FLAG_SET_DEFAULT(UseDynamicNumberOfGCThreads, true);
+
+  // CompressedOops/UseCompressedClassPointers not supported
+  FLAG_SET_DEFAULT(UseCompressedOops, false);
+  FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
+
+  // ClassUnloading not (yet) supported
+  FLAG_SET_DEFAULT(ClassUnloading, false);
+  FLAG_SET_DEFAULT(ClassUnloadingWithConcurrentMark, false);
+
+  // Verification before startup and after exit not (yet) supported
+  FLAG_SET_DEFAULT(VerifyDuringStartup, false);
+  FLAG_SET_DEFAULT(VerifyBeforeExit, false);
+
+  // Verification of stacks not (yet) supported, for the same reason
+  // we need fixup_partial_loads
+  DEBUG_ONLY(FLAG_SET_DEFAULT(VerifyStack, false));
+
+  // JVMCI not (yet) supported
+  if (EnableJVMCI) {
+    vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:+EnableJVMCI");
+  }
+}
+
+CollectedHeap* ZArguments::create_heap() {
+  return create_heap_with_policy<ZCollectedHeap, ZCollectorPolicy>();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zArguments.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZARGUMENTS_HPP
+#define SHARE_GC_Z_ZARGUMENTS_HPP
+
+#include "gc/shared/gcArguments.hpp"
+
+class CollectedHeap;
+
+class ZArguments : public GCArguments {
+public:
+  virtual void initialize();
+  virtual size_t conservative_max_heap_alignment();
+  virtual CollectedHeap* create_heap();
+};
+
+#endif // SHARE_GC_Z_ZARGUMENTS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zArray.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZARRAY_HPP
+#define SHARE_GC_Z_ZARRAY_HPP
+
+#include "memory/allocation.hpp"
+
+template <typename T>
+class ZArray {
+private:
+  static const size_t initial_capacity = 32;
+
+  T*     _array;
+  size_t _size;
+  size_t _capacity;
+
+  // Copy and assignment are not allowed
+  ZArray(const ZArray<T>& array);
+  ZArray<T>& operator=(const ZArray<T>& array);
+
+  void expand(size_t new_capacity);
+
+public:
+  ZArray();
+  ~ZArray();
+
+  size_t size() const;
+  bool is_empty() const;
+
+  T at(size_t index) const;
+
+  void add(T value);
+  void clear();
+};
+
+template <typename T, bool parallel>
+class ZArrayIteratorImpl : public StackObj {
+private:
+  ZArray<T>* const _array;
+  size_t           _next;
+
+public:
+  ZArrayIteratorImpl(ZArray<T>* array);
+
+  bool next(T* elem);
+};
+
+// Iterator types
+#define ZARRAY_SERIAL      false
+#define ZARRAY_PARALLEL    true
+
+template <typename T>
+class ZArrayIterator : public ZArrayIteratorImpl<T, ZARRAY_SERIAL> {
+public:
+  ZArrayIterator(ZArray<T>* array) :
+      ZArrayIteratorImpl<T, ZARRAY_SERIAL>(array) {}
+};
+
+template <typename T>
+class ZArrayParallelIterator : public ZArrayIteratorImpl<T, ZARRAY_PARALLEL> {
+public:
+  ZArrayParallelIterator(ZArray<T>* array) :
+      ZArrayIteratorImpl<T, ZARRAY_PARALLEL>(array) {}
+};
+
+#endif // SHARE_GC_Z_ZARRAY_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zArray.inline.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZARRAY_INLINE_HPP
+#define SHARE_GC_Z_ZARRAY_INLINE_HPP
+
+#include "gc/z/zArray.hpp"
+#include "memory/allocation.inline.hpp"
+#include "runtime/atomic.hpp"
+
+template <typename T>
+inline ZArray<T>::ZArray() :
+    _array(NULL),
+    _size(0),
+    _capacity(0) {}
+
+template <typename T>
+inline ZArray<T>::~ZArray() {
+  if (_array != NULL) {
+    FREE_C_HEAP_ARRAY(T, _array);
+  }
+}
+
+template <typename T>
+inline size_t ZArray<T>::size() const {
+  return _size;
+}
+
+template <typename T>
+inline bool ZArray<T>::is_empty() const {
+  return size() == 0;
+}
+
+template <typename T>
+inline T ZArray<T>::at(size_t index) const {
+  assert(index < _size, "Index out of bounds");
+  return _array[index];
+}
+
+template <typename T>
+inline void ZArray<T>::expand(size_t new_capacity) {
+  T* new_array = NEW_C_HEAP_ARRAY(T, new_capacity, mtGC);
+  if (_array != NULL) {
+    memcpy(new_array, _array, sizeof(T) * _capacity);
+    FREE_C_HEAP_ARRAY(T, _array);
+  }
+
+  _array = new_array;
+  _capacity = new_capacity;
+}
+
+template <typename T>
+inline void ZArray<T>::add(T value) {
+  if (_size == _capacity) {
+    const size_t new_capacity = (_capacity > 0) ? _capacity * 2 : initial_capacity;
+    expand(new_capacity);
+  }
+
+  _array[_size++] = value;
+}
+
+template <typename T>
+inline void ZArray<T>::clear() {
+  _size = 0;
+}
+
+template <typename T, bool parallel>
+inline ZArrayIteratorImpl<T, parallel>::ZArrayIteratorImpl(ZArray<T>* array) :
+    _array(array),
+    _next(0) {}
+
+template <typename T, bool parallel>
+inline bool ZArrayIteratorImpl<T, parallel>::next(T* elem) {
+  if (parallel) {
+    const size_t next = Atomic::add(1u, &_next) - 1u;
+    if (next < _array->size()) {
+      *elem = _array->at(next);
+      return true;
+    }
+  } else {
+    if (_next < _array->size()) {
+      *elem = _array->at(_next++);
+      return true;
+    }
+  }
+
+  // No more elements
+  return false;
+}
+
+#endif // SHARE_GC_Z_ZARRAY_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zBarrier.cpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,270 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zBarrier.inline.hpp"
+#include "gc/z/zHeap.inline.hpp"
+#include "gc/z/zOop.inline.hpp"
+#include "gc/z/zOopClosures.inline.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/safepoint.hpp"
+#include "utilities/debug.hpp"
+
+bool ZBarrier::during_mark() {
+  return ZGlobalPhase == ZPhaseMark;
+}
+
+bool ZBarrier::during_relocate() {
+  return ZGlobalPhase == ZPhaseRelocate;
+}
+
+template <bool finalizable>
+bool ZBarrier::should_mark_through(uintptr_t addr) {
+  // Finalizable marked oops can still exists on the heap after marking
+  // has completed, in which case we just want to convert this into a
+  // good oop and not push it on the mark stack.
+  if (!during_mark()) {
+    assert(ZAddress::is_marked(addr), "Should be marked");
+    assert(ZAddress::is_finalizable(addr), "Should be finalizable");
+    return false;
+  }
+
+  // During marking, we mark through already marked oops to avoid having
+  // some large part of the object graph hidden behind a pushed, but not
+  // yet flushed, entry on a mutator mark stack. Always marking through
+  // allows the GC workers to proceed through the object graph even if a
+  // mutator touched an oop first, which in turn will reduce the risk of
+  // having to flush mark stacks multiple times to terminate marking.
+  //
+  // However, when doing finalizable marking we don't always want to mark
+  // through. First, marking through an already strongly marked oop would
+  // be wasteful, since we will then proceed to do finalizable marking on
+  // an object which is, or will be, marked strongly. Second, marking
+  // through an already finalizable marked oop would also be wasteful,
+  // since such oops can never end up on a mutator mark stack and can
+  // therefore not hide some part of the object graph from GC workers.
+  if (finalizable) {
+    return !ZAddress::is_marked(addr);
+  }
+
+  // Mark through
+  return true;
+}
+
+template <bool finalizable, bool publish>
+uintptr_t ZBarrier::mark(uintptr_t addr) {
+  uintptr_t good_addr;
+
+  if (ZAddress::is_marked(addr)) {
+    // Already marked, but try to mark though anyway
+    good_addr = ZAddress::good(addr);
+  } else if (ZAddress::is_remapped(addr)) {
+    // Already remapped, but also needs to be marked
+    good_addr = ZAddress::good(addr);
+  } else {
+    // Needs to be both remapped and marked
+    good_addr = remap(addr);
+  }
+
+  // Mark
+  if (should_mark_through<finalizable>(addr)) {
+    ZHeap::heap()->mark_object<finalizable, publish>(good_addr);
+  }
+
+  return good_addr;
+}
+
+uintptr_t ZBarrier::remap(uintptr_t addr) {
+  assert(!ZAddress::is_good(addr), "Should not be good");
+  assert(!ZAddress::is_weak_good(addr), "Should not be weak good");
+
+  if (ZHeap::heap()->is_relocating(addr)) {
+    // Forward
+    return ZHeap::heap()->forward_object(addr);
+  }
+
+  // Remap
+  return ZAddress::good(addr);
+}
+
+uintptr_t ZBarrier::relocate(uintptr_t addr) {
+  assert(!ZAddress::is_good(addr), "Should not be good");
+  assert(!ZAddress::is_weak_good(addr), "Should not be weak good");
+
+  if (ZHeap::heap()->is_relocating(addr)) {
+    // Relocate
+    return ZHeap::heap()->relocate_object(addr);
+  }
+
+  // Remap
+  return ZAddress::good(addr);
+}
+
+uintptr_t ZBarrier::relocate_or_mark(uintptr_t addr) {
+  return during_relocate() ? relocate(addr) : mark<Strong, Publish>(addr);
+}
+
+uintptr_t ZBarrier::relocate_or_remap(uintptr_t addr) {
+  return during_relocate() ? relocate(addr) : remap(addr);
+}
+
+//
+// Load barrier
+//
+uintptr_t ZBarrier::load_barrier_on_oop_slow_path(uintptr_t addr) {
+  return relocate_or_mark(addr);
+}
+
+void ZBarrier::load_barrier_on_oop_fields(oop o) {
+  assert(ZOop::is_good(o), "Should be good");
+  ZLoadBarrierOopClosure cl;
+  o->oop_iterate(&cl);
+}
+
+//
+// Weak load barrier
+//
+uintptr_t ZBarrier::weak_load_barrier_on_oop_slow_path(uintptr_t addr) {
+  return ZAddress::is_weak_good(addr) ? ZAddress::good(addr) : relocate_or_remap(addr);
+}
+
+uintptr_t ZBarrier::weak_load_barrier_on_weak_oop_slow_path(uintptr_t addr) {
+  const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr);
+  if (ZHeap::heap()->is_object_strongly_live(good_addr)) {
+    return good_addr;
+  }
+
+  // Not strongly live
+  return 0;
+}
+
+uintptr_t ZBarrier::weak_load_barrier_on_phantom_oop_slow_path(uintptr_t addr) {
+  const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr);
+  if (ZHeap::heap()->is_object_live(good_addr)) {
+    return good_addr;
+  }
+
+  // Not live
+  return 0;
+}
+
+//
+// Keep alive barrier
+//
+uintptr_t ZBarrier::keep_alive_barrier_on_weak_oop_slow_path(uintptr_t addr) {
+  const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr);
+  assert(ZHeap::heap()->is_object_strongly_live(good_addr), "Should be live");
+  return good_addr;
+}
+
+uintptr_t ZBarrier::keep_alive_barrier_on_phantom_oop_slow_path(uintptr_t addr) {
+  const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr);
+  assert(ZHeap::heap()->is_object_live(good_addr), "Should be live");
+  return good_addr;
+}
+
+//
+// Mark barrier
+//
+uintptr_t ZBarrier::mark_barrier_on_oop_slow_path(uintptr_t addr) {
+  return mark<Strong, Overflow>(addr);
+}
+
+uintptr_t ZBarrier::mark_barrier_on_finalizable_oop_slow_path(uintptr_t addr) {
+  const uintptr_t good_addr = mark<Finalizable, Overflow>(addr);
+  if (ZAddress::is_good(addr)) {
+    // If the oop was already strongly marked/good, then we do
+    // not want to downgrade it to finalizable marked/good.
+    return good_addr;
+  }
+
+  // Make the oop finalizable marked/good, instead of normal marked/good.
+  // This is needed because an object might first becomes finalizable
+  // marked by the GC, and then loaded by a mutator thread. In this case,
+  // the mutator thread must be able to tell that the object needs to be
+  // strongly marked. The finalizable bit in the oop exists to make sure
+  // that a load of a finalizable marked oop will fall into the barrier
+  // slow path so that we can mark the object as strongly reachable.
+  return ZAddress::finalizable_good(good_addr);
+}
+
+uintptr_t ZBarrier::mark_barrier_on_root_oop_slow_path(uintptr_t addr) {
+  assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
+  assert(during_mark(), "Invalid phase");
+
+  // Mark
+  return mark<Strong, Publish>(addr);
+}
+
+//
+// Relocate barrier
+//
+uintptr_t ZBarrier::relocate_barrier_on_root_oop_slow_path(uintptr_t addr) {
+  assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
+  assert(during_relocate(), "Invalid phase");
+
+  // Relocate
+  return relocate(addr);
+}
+
+//
+// Narrow oop variants, never used.
+//
+oop ZBarrier::load_barrier_on_oop_field(volatile narrowOop* p) {
+  ShouldNotReachHere();
+  return NULL;
+}
+
+oop ZBarrier::load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o) {
+  ShouldNotReachHere();
+  return NULL;
+}
+
+void ZBarrier::load_barrier_on_oop_array(volatile narrowOop* p, size_t length) {
+  ShouldNotReachHere();
+}
+
+oop ZBarrier::load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o) {
+  ShouldNotReachHere();
+  return NULL;
+}
+
+oop ZBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o) {
+  ShouldNotReachHere();
+  return NULL;
+}
+
+oop ZBarrier::weak_load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o) {
+  ShouldNotReachHere();
+  return NULL;
+}
+
+oop ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o) {
+  ShouldNotReachHere();
+  return NULL;
+}
+
+oop ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o) {
+  ShouldNotReachHere();
+  return NULL;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zBarrier.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZBARRIER_HPP
+#define SHARE_GC_Z_ZBARRIER_HPP
+
+#include "memory/allocation.hpp"
+#include "oops/oop.hpp"
+
+typedef bool (*ZBarrierFastPath)(uintptr_t);
+typedef uintptr_t (*ZBarrierSlowPath)(uintptr_t);
+
+class ZBarrier : public AllStatic {
+private:
+  static const bool Strong      = false;
+  static const bool Finalizable = true;
+
+  static const bool Publish     = true;
+  static const bool Overflow    = false;
+
+  template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path> static oop barrier(volatile oop* p, oop o);
+  template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path> static oop weak_barrier(volatile oop* p, oop o);
+  template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path> static void root_barrier(oop* p, oop o);
+
+  static bool is_null_fast_path(uintptr_t addr);
+  static bool is_good_or_null_fast_path(uintptr_t addr);
+  static bool is_weak_good_or_null_fast_path(uintptr_t addr);
+
+  static bool is_resurrection_blocked(volatile oop* p, oop* o);
+
+  static bool during_mark();
+  static bool during_relocate();
+  template <bool finalizable> static bool should_mark_through(uintptr_t addr);
+  template <bool finalizable, bool publish> static uintptr_t mark(uintptr_t addr);
+  static uintptr_t remap(uintptr_t addr);
+  static uintptr_t relocate(uintptr_t addr);
+  static uintptr_t relocate_or_mark(uintptr_t addr);
+  static uintptr_t relocate_or_remap(uintptr_t addr);
+
+  static uintptr_t load_barrier_on_oop_slow_path(uintptr_t addr);
+
+  static uintptr_t weak_load_barrier_on_oop_slow_path(uintptr_t addr);
+  static uintptr_t weak_load_barrier_on_weak_oop_slow_path(uintptr_t addr);
+  static uintptr_t weak_load_barrier_on_phantom_oop_slow_path(uintptr_t addr);
+
+  static uintptr_t keep_alive_barrier_on_weak_oop_slow_path(uintptr_t addr);
+  static uintptr_t keep_alive_barrier_on_phantom_oop_slow_path(uintptr_t addr);
+
+  static uintptr_t mark_barrier_on_oop_slow_path(uintptr_t addr);
+  static uintptr_t mark_barrier_on_finalizable_oop_slow_path(uintptr_t addr);
+  static uintptr_t mark_barrier_on_root_oop_slow_path(uintptr_t addr);
+
+  static uintptr_t relocate_barrier_on_root_oop_slow_path(uintptr_t addr);
+
+public:
+  // Load barrier
+  static  oop load_barrier_on_oop(oop o);
+  static  oop load_barrier_on_oop_field(volatile oop* p);
+  static  oop load_barrier_on_oop_field_preloaded(volatile oop* p, oop o);
+  static void load_barrier_on_oop_array(volatile oop* p, size_t length);
+  static void load_barrier_on_oop_fields(oop o);
+  static  oop load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o);
+  static  oop load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o);
+
+  // Weak load barrier
+  static oop weak_load_barrier_on_oop_field_preloaded(volatile oop* p, oop o);
+  static oop weak_load_barrier_on_weak_oop(oop o);
+  static oop weak_load_barrier_on_weak_oop_field(volatile oop* p);
+  static oop weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o);
+  static oop weak_load_barrier_on_phantom_oop(oop o);
+  static oop weak_load_barrier_on_phantom_oop_field(volatile oop* p);
+  static oop weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o);
+
+  // Is alive barrier
+  static bool is_alive_barrier_on_weak_oop(oop o);
+  static bool is_alive_barrier_on_phantom_oop(oop o);
+
+  // Keep alive barrier
+  static void keep_alive_barrier_on_weak_oop_field(volatile oop* p);
+  static void keep_alive_barrier_on_phantom_oop_field(volatile oop* p);
+
+  // Mark barrier
+  static void mark_barrier_on_oop_field(volatile oop* p, bool finalizable);
+  static void mark_barrier_on_oop_array(volatile oop* p, size_t length, bool finalizable);
+  static void mark_barrier_on_root_oop_field(oop* p);
+
+  // Relocate barrier
+  static void relocate_barrier_on_root_oop_field(oop* p);
+
+  // Narrow oop variants, never used.
+  static oop  load_barrier_on_oop_field(volatile narrowOop* p);
+  static oop  load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o);
+  static void load_barrier_on_oop_array(volatile narrowOop* p, size_t length);
+  static oop  load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o);
+  static oop  load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o);
+  static oop  weak_load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o);
+  static oop  weak_load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o);
+  static oop  weak_load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o);
+};
+
+#endif // SHARE_GC_Z_ZBARRIER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zBarrier.inline.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,300 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZBARRIER_INLINE_HPP
+#define SHARE_GC_Z_ZBARRIER_INLINE_HPP
+
+#include "gc/z/zAddress.inline.hpp"
+#include "gc/z/zBarrier.hpp"
+#include "gc/z/zOop.inline.hpp"
+#include "gc/z/zResurrection.inline.hpp"
+#include "runtime/atomic.hpp"
+
+template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
+inline oop ZBarrier::barrier(volatile oop* p, oop o) {
+  uintptr_t addr = ZOop::to_address(o);
+
+retry:
+  // Fast path
+  if (fast_path(addr)) {
+    return ZOop::to_oop(addr);
+  }
+
+  // Slow path
+  const uintptr_t good_addr = slow_path(addr);
+
+  // Self heal, but only if the address was actually updated by the slow path,
+  // which might not be the case, e.g. when marking through an already good oop.
+  if (p != NULL && good_addr != addr) {
+    const uintptr_t prev_addr = Atomic::cmpxchg(good_addr, (volatile uintptr_t*)p, addr);
+    if (prev_addr != addr) {
+      // Some other thread overwrote the oop. If this oop was updated by a
+      // weak barrier the new oop might not be good, in which case we need
+      // to re-apply this barrier.
+      addr = prev_addr;
+      goto retry;
+    }
+  }
+
+  return ZOop::to_oop(good_addr);
+}
+
+template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
+inline oop ZBarrier::weak_barrier(volatile oop* p, oop o) {
+  const uintptr_t addr = ZOop::to_address(o);
+
+  // Fast path
+  if (fast_path(addr)) {
+    // Return the good address instead of the weak good address
+    // to ensure that the currently active heap view is used.
+    return ZOop::to_oop(ZAddress::good_or_null(addr));
+  }
+
+  // Slow path
+  uintptr_t good_addr = slow_path(addr);
+
+  // Self heal unless the address returned from the slow path is null,
+  // in which case resurrection was blocked and we must let the reference
+  // processor clear the oop. Mutators are not allowed to clear oops in
+  // these cases, since that would be similar to calling Reference.clear(),
+  // which would make the reference non-discoverable or silently dropped
+  // by the reference processor.
+  if (p != NULL && good_addr != 0) {
+    // The slow path returns a good/marked address, but we never mark oops
+    // in a weak load barrier so we always self heal with the remapped address.
+    const uintptr_t weak_good_addr = ZAddress::remapped(good_addr);
+    const uintptr_t prev_addr = Atomic::cmpxchg(weak_good_addr, (volatile uintptr_t*)p, addr);
+    if (prev_addr != addr) {
+      // Some other thread overwrote the oop. The new
+      // oop is guaranteed to be weak good or null.
+      assert(ZAddress::is_weak_good_or_null(prev_addr), "Bad weak overwrite");
+
+      // Return the good address instead of the weak good address
+      // to ensure that the currently active heap view is used.
+      good_addr = ZAddress::good_or_null(prev_addr);
+    }
+  }
+
+  return ZOop::to_oop(good_addr);
+}
+
+template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
+inline void ZBarrier::root_barrier(oop* p, oop o) {
+  const uintptr_t addr = ZOop::to_address(o);
+
+  // Fast path
+  if (fast_path(addr)) {
+    return;
+  }
+
+  // Slow path
+  const uintptr_t good_addr = slow_path(addr);
+
+  // Non-atomic healing helps speed up root scanning. This is safe to do
+  // since we are always healing roots in a safepoint, which means we are
+  // never racing with mutators modifying roots while we are healing them.
+  // It's also safe in case multiple GC threads try to heal the same root,
+  // since they would always heal the root in the same way and it does not
+  // matter in which order it happens.
+  *p = ZOop::to_oop(good_addr);
+}
+
+inline bool ZBarrier::is_null_fast_path(uintptr_t addr) {
+  return ZAddress::is_null(addr);
+}
+
+inline bool ZBarrier::is_good_or_null_fast_path(uintptr_t addr) {
+  return ZAddress::is_good_or_null(addr);
+}
+
+inline bool ZBarrier::is_weak_good_or_null_fast_path(uintptr_t addr) {
+  return ZAddress::is_weak_good_or_null(addr);
+}
+
+inline bool ZBarrier::is_resurrection_blocked(volatile oop* p, oop* o) {
+  const bool is_blocked = ZResurrection::is_blocked();
+
+  // Reload oop after checking the resurrection blocked state. This is
+  // done to prevent a race where we first load an oop, which is logically
+  // null but not yet cleared, then this oop is cleared by the reference
+  // processor and resurrection is unblocked. At this point the mutator
+  // would see the unblocked state and pass this invalid oop through the
+  // normal barrier path, which would incorrectly try to mark this oop.
+  if (p != NULL) {
+    // First assign to reloaded_o to avoid compiler warning about
+    // implicit dereference of volatile oop.
+    const oop reloaded_o = *p;
+    *o = reloaded_o;
+  }
+
+  return is_blocked;
+}
+
+//
+// Load barrier
+//
+inline oop ZBarrier::load_barrier_on_oop(oop o) {
+  return load_barrier_on_oop_field_preloaded((oop*)NULL, o);
+}
+
+inline oop ZBarrier::load_barrier_on_oop_field(volatile oop* p) {
+  const oop o = *p;
+  return load_barrier_on_oop_field_preloaded(p, o);
+}
+
+inline oop ZBarrier::load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) {
+  return barrier<is_good_or_null_fast_path, load_barrier_on_oop_slow_path>(p, o);
+}
+
+inline void ZBarrier::load_barrier_on_oop_array(volatile oop* p, size_t length) {
+  for (volatile const oop* const end = p + length; p < end; p++) {
+    load_barrier_on_oop_field(p);
+  }
+}
+
+inline oop ZBarrier::load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) {
+  if (is_resurrection_blocked(p, &o)) {
+    return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o);
+  }
+
+  return load_barrier_on_oop_field_preloaded(p, o);
+}
+
+inline oop ZBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) {
+  if (is_resurrection_blocked(p, &o)) {
+    return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o);
+  }
+
+  return load_barrier_on_oop_field_preloaded(p, o);
+}
+
+//
+// Weak load barrier
+//
+inline oop ZBarrier::weak_load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) {
+  return weak_barrier<is_weak_good_or_null_fast_path, weak_load_barrier_on_oop_slow_path>(p, o);
+}
+
+inline oop ZBarrier::weak_load_barrier_on_weak_oop(oop o) {
+  return weak_load_barrier_on_weak_oop_field_preloaded((oop*)NULL, o);
+}
+
+inline oop ZBarrier::weak_load_barrier_on_weak_oop_field(volatile oop* p) {
+  const oop o = *p;
+  return weak_load_barrier_on_weak_oop_field_preloaded(p, o);
+}
+
+inline oop ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) {
+  if (is_resurrection_blocked(p, &o)) {
+    return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o);
+  }
+
+  return weak_load_barrier_on_oop_field_preloaded(p, o);
+}
+
+inline oop ZBarrier::weak_load_barrier_on_phantom_oop(oop o) {
+  return weak_load_barrier_on_phantom_oop_field_preloaded((oop*)NULL, o);
+}
+
+inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field(volatile oop* p) {
+  const oop o = *p;
+  return weak_load_barrier_on_phantom_oop_field_preloaded(p, o);
+}
+
+inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) {
+  if (is_resurrection_blocked(p, &o)) {
+    return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o);
+  }
+
+  return weak_load_barrier_on_oop_field_preloaded(p, o);
+}
+
+//
+// Is alive barrier
+//
+inline bool ZBarrier::is_alive_barrier_on_weak_oop(oop o) {
+  // Check if oop is logically non-null. This operation
+  // is only valid when resurrection is blocked.
+  assert(ZResurrection::is_blocked(), "Invalid phase");
+  return weak_load_barrier_on_weak_oop(o) != NULL;
+}
+
+inline bool ZBarrier::is_alive_barrier_on_phantom_oop(oop o) {
+  // Check if oop is logically non-null. This operation
+  // is only valid when resurrection is blocked.
+  assert(ZResurrection::is_blocked(), "Invalid phase");
+  return weak_load_barrier_on_phantom_oop(o) != NULL;
+}
+
+//
+// Keep alive barrier
+//
+inline void ZBarrier::keep_alive_barrier_on_weak_oop_field(volatile oop* p) {
+  // This operation is only valid when resurrection is blocked.
+  assert(ZResurrection::is_blocked(), "Invalid phase");
+  const oop o = *p;
+  barrier<is_good_or_null_fast_path, keep_alive_barrier_on_weak_oop_slow_path>(p, o);
+}
+
+inline void ZBarrier::keep_alive_barrier_on_phantom_oop_field(volatile oop* p) {
+  // This operation is only valid when resurrection is blocked.
+  assert(ZResurrection::is_blocked(), "Invalid phase");
+  const oop o = *p;
+  barrier<is_good_or_null_fast_path, keep_alive_barrier_on_phantom_oop_slow_path>(p, o);
+}
+
+//
+// Mark barrier
+//
+inline void ZBarrier::mark_barrier_on_oop_field(volatile oop* p, bool finalizable) {
+  // The fast path only checks for null since the GC worker
+  // threads doing marking wants to mark through good oops.
+  const oop o = *p;
+
+  if (finalizable) {
+    barrier<is_null_fast_path, mark_barrier_on_finalizable_oop_slow_path>(p, o);
+  } else {
+    barrier<is_null_fast_path, mark_barrier_on_oop_slow_path>(p, o);
+  }
+}
+
+inline void ZBarrier::mark_barrier_on_oop_array(volatile oop* p, size_t length, bool finalizable) {
+  for (volatile const oop* const end = p + length; p < end; p++) {
+    mark_barrier_on_oop_field(p, finalizable);
+  }
+}
+
+inline void ZBarrier::mark_barrier_on_root_oop_field(oop* p) {
+  const oop o = *p;
+  root_barrier<is_good_or_null_fast_path, mark_barrier_on_root_oop_slow_path>(p, o);
+}
+
+//
+// Relocate barrier
+//
+inline void ZBarrier::relocate_barrier_on_root_oop_field(oop* p) {
+  const oop o = *p;
+  root_barrier<is_good_or_null_fast_path, relocate_barrier_on_root_oop_slow_path>(p, o);
+}
+
+#endif // SHARE_GC_Z_ZBARRIER_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zBarrierSet.cpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/c1/zBarrierSetC1.hpp"
+#include "gc/z/c2/zBarrierSetC2.hpp"
+#include "gc/z/zBarrierSet.hpp"
+#include "gc/z/zBarrierSetAssembler.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "gc/z/zHeap.inline.hpp"
+#include "gc/z/zThreadLocalData.hpp"
+#include "runtime/thread.hpp"
+
+ZBarrierSet::ZBarrierSet() :
+    BarrierSet(make_barrier_set_assembler<ZBarrierSetAssembler>(),
+               make_barrier_set_c1<ZBarrierSetC1>(),
+               make_barrier_set_c2<ZBarrierSetC2>(),
+               BarrierSet::FakeRtti(BarrierSet::ZBarrierSet)) {}
+
+ZBarrierSetAssembler* ZBarrierSet::assembler() {
+  BarrierSetAssembler* const bsa = BarrierSet::barrier_set()->barrier_set_assembler();
+  return reinterpret_cast<ZBarrierSetAssembler*>(bsa);
+}
+
+bool ZBarrierSet::barrier_needed(DecoratorSet decorators, BasicType type) {
+  assert((decorators & AS_RAW) == 0, "Unexpected decorator");
+  assert((decorators & AS_NO_KEEPALIVE) == 0, "Unexpected decorator");
+  assert((decorators & IN_ARCHIVE_ROOT) == 0, "Unexpected decorator");
+  //assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Unexpected decorator");
+
+  if (type == T_OBJECT || type == T_ARRAY) {
+    if (((decorators & IN_HEAP) != 0) ||
+        ((decorators & IN_CONCURRENT_ROOT) != 0) ||
+        ((decorators & ON_PHANTOM_OOP_REF) != 0)) {
+      // Barrier needed
+      return true;
+    }
+  }
+
+  // Barrier not neeed
+  return false;
+}
+
+void ZBarrierSet::on_thread_create(Thread* thread) {
+  // Create thread local data
+  ZThreadLocalData::create(thread);
+}
+
+void ZBarrierSet::on_thread_destroy(Thread* thread) {
+  // Destroy thread local data
+  ZThreadLocalData::destroy(thread);
+}
+
+void ZBarrierSet::on_thread_attach(JavaThread* thread) {
+  // Set thread local address bad mask
+  ZThreadLocalData::set_address_bad_mask(thread, ZAddressBadMask);
+}
+
+void ZBarrierSet::on_thread_detach(JavaThread* thread) {
+  // Flush and free any remaining mark stacks
+  ZHeap::heap()->mark_flush_and_free(thread);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zBarrierSet.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZBARRIERSET_HPP
+#define SHARE_GC_Z_ZBARRIERSET_HPP
+
+#include "gc/shared/barrierSet.hpp"
+
+class ZBarrierSetAssembler;
+
+class ZBarrierSet : public BarrierSet {
+public:
+  ZBarrierSet();
+
+  static ZBarrierSetAssembler* assembler();
+  static bool barrier_needed(DecoratorSet decorators, BasicType type);
+
+  virtual void on_thread_create(Thread* thread);
+  virtual void on_thread_destroy(Thread* thread);
+  virtual void on_thread_attach(JavaThread* thread);
+  virtual void on_thread_detach(JavaThread* thread);
+
+  virtual void print_on(outputStream* st) const {}
+
+  template <DecoratorSet decorators, typename BarrierSetT = ZBarrierSet>
+  class AccessBarrier : public BarrierSet::AccessBarrier<decorators, BarrierSetT> {
+  private:
+    typedef BarrierSet::AccessBarrier<decorators, BarrierSetT> Raw;
+
+    template <DecoratorSet expected>
+    static void verify_decorators_present();
+
+    template <DecoratorSet expected>
+    static void verify_decorators_absent();
+
+    static oop* field_addr(oop base, ptrdiff_t offset);
+
+    template <typename T>
+    static oop load_barrier_on_oop_field_preloaded(T* addr, oop o);
+
+    template <typename T>
+    static oop load_barrier_on_unknown_oop_field_preloaded(oop base, ptrdiff_t offset, T* addr, oop o);
+
+  public:
+    //
+    // In heap
+    //
+    template <typename T>
+    static oop oop_load_in_heap(T* addr);
+    static oop oop_load_in_heap_at(oop base, ptrdiff_t offset);
+
+    template <typename T>
+    static oop oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value);
+    static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value);
+
+    template <typename T>
+    static oop oop_atomic_xchg_in_heap(oop new_value, T* addr);
+    static oop oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset);
+
+    template <typename T>
+    static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
+                                      arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
+                                      size_t length);
+
+    static void clone_in_heap(oop src, oop dst, size_t size);
+
+    //
+    // Not in heap
+    //
+    template <typename T>
+    static oop oop_load_not_in_heap(T* addr);
+
+    template <typename T>
+    static oop oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value);
+
+    template <typename T>
+    static oop oop_atomic_xchg_not_in_heap(oop new_value, T* addr);
+  };
+};
+
+template<> struct BarrierSet::GetName<ZBarrierSet> {
+  static const BarrierSet::Name value = BarrierSet::ZBarrierSet;
+};
+
+template<> struct BarrierSet::GetType<BarrierSet::ZBarrierSet> {
+  typedef ::ZBarrierSet type;
+};
+
+#endif // SHARE_GC_Z_ZBARRIERSET_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zBarrierSet.inline.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZBARRIERSET_INLINE_HPP
+#define SHARE_GC_Z_ZBARRIERSET_INLINE_HPP
+
+#include "gc/shared/accessBarrierSupport.inline.hpp"
+#include "gc/z/zBarrier.inline.hpp"
+#include "gc/z/zBarrierSet.hpp"
+#include "utilities/debug.hpp"
+
+template <DecoratorSet decorators, typename BarrierSetT>
+template <DecoratorSet expected>
+inline void ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::verify_decorators_present() {
+  if ((decorators & expected) == 0) {
+    fatal("Using unsupported access decorators");
+  }
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+template <DecoratorSet expected>
+inline void ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::verify_decorators_absent() {
+  if ((decorators & expected) != 0) {
+    fatal("Using unsupported access decorators");
+  }
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+inline oop* ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::field_addr(oop base, ptrdiff_t offset) {
+  assert(base != NULL, "Invalid base");
+  return reinterpret_cast<oop*>(reinterpret_cast<intptr_t>((void*)base) + offset);
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+template <typename T>
+inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::load_barrier_on_oop_field_preloaded(T* addr, oop o) {
+  verify_decorators_absent<ON_UNKNOWN_OOP_REF>();
+
+  if (HasDecorator<decorators, AS_NO_KEEPALIVE>::value) {
+    if (HasDecorator<decorators, ON_STRONG_OOP_REF>::value) {
+      return ZBarrier::weak_load_barrier_on_oop_field_preloaded(addr, o);
+    } else if (HasDecorator<decorators, ON_WEAK_OOP_REF>::value) {
+      return ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(addr, o);
+    } else {
+      return ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(addr, o);
+    }
+  } else {
+    if (HasDecorator<decorators, ON_STRONG_OOP_REF>::value) {
+      return ZBarrier::load_barrier_on_oop_field_preloaded(addr, o);
+    } else if (HasDecorator<decorators, ON_WEAK_OOP_REF>::value) {
+      return ZBarrier::load_barrier_on_weak_oop_field_preloaded(addr, o);
+    } else {
+      return ZBarrier::load_barrier_on_phantom_oop_field_preloaded(addr, o);
+    }
+  }
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+template <typename T>
+inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::load_barrier_on_unknown_oop_field_preloaded(oop base, ptrdiff_t offset, T* addr, oop o) {
+  verify_decorators_present<ON_UNKNOWN_OOP_REF>();
+
+  const DecoratorSet decorators_known_strength =
+    AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength<decorators>(base, offset);
+
+  if (HasDecorator<decorators, AS_NO_KEEPALIVE>::value) {
+    if (decorators_known_strength & ON_STRONG_OOP_REF) {
+      return ZBarrier::weak_load_barrier_on_oop_field_preloaded(addr, o);
+    } else if (decorators_known_strength & ON_WEAK_OOP_REF) {
+      return ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(addr, o);
+    } else {
+      return ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(addr, o);
+    }
+  } else {
+    if (decorators_known_strength & ON_STRONG_OOP_REF) {
+      return ZBarrier::load_barrier_on_oop_field_preloaded(addr, o);
+    } else if (decorators_known_strength & ON_WEAK_OOP_REF) {
+      return ZBarrier::load_barrier_on_weak_oop_field_preloaded(addr, o);
+    } else {
+      return ZBarrier::load_barrier_on_phantom_oop_field_preloaded(addr, o);
+    }
+  }
+}
+
+//
+// In heap
+//
+template <DecoratorSet decorators, typename BarrierSetT>
+template <typename T>
+inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_load_in_heap(T* addr) {
+  verify_decorators_absent<ON_UNKNOWN_OOP_REF>();
+
+  const oop o = Raw::oop_load_in_heap(addr);
+  return load_barrier_on_oop_field_preloaded(addr, o);
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_load_in_heap_at(oop base, ptrdiff_t offset) {
+  oop* const addr = field_addr(base, offset);
+  const oop o = Raw::oop_load_in_heap(addr);
+
+  if (HasDecorator<decorators, ON_UNKNOWN_OOP_REF>::value) {
+    return load_barrier_on_unknown_oop_field_preloaded(base, offset, addr, o);
+  }
+
+  return load_barrier_on_oop_field_preloaded(addr, o);
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+template <typename T>
+inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value) {
+  verify_decorators_present<ON_STRONG_OOP_REF>();
+  verify_decorators_absent<AS_NO_KEEPALIVE>();
+
+  ZBarrier::load_barrier_on_oop_field(addr);
+  return Raw::oop_atomic_cmpxchg_in_heap(new_value, addr, compare_value);
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value) {
+  verify_decorators_present<ON_STRONG_OOP_REF | ON_UNKNOWN_OOP_REF>();
+  verify_decorators_absent<AS_NO_KEEPALIVE>();
+
+  // Through Unsafe.CompareAndExchangeObject()/CompareAndSetObject() we can recieve
+  // calls with ON_UNKNOWN_OOP_REF set. However, we treat these as ON_STRONG_OOP_REF,
+  // with the motivation that if you're doing Unsafe operations on a Reference.referent
+  // field, then you're on your own anyway.
+  ZBarrier::load_barrier_on_oop_field(field_addr(base, offset));
+  return Raw::oop_atomic_cmpxchg_in_heap_at(new_value, base, offset, compare_value);
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+template <typename T>
+inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap(oop new_value, T* addr) {
+  verify_decorators_present<ON_STRONG_OOP_REF>();
+  verify_decorators_absent<AS_NO_KEEPALIVE>();
+
+  const oop o = Raw::oop_atomic_xchg_in_heap(new_value, addr);
+  return ZBarrier::load_barrier_on_oop(o);
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset) {
+  verify_decorators_present<ON_STRONG_OOP_REF>();
+  verify_decorators_absent<AS_NO_KEEPALIVE>();
+
+  const oop o = Raw::oop_atomic_xchg_in_heap_at(new_value, base, offset);
+  return ZBarrier::load_barrier_on_oop(o);
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+template <typename T>
+inline bool ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
+                                                                                       arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
+                                                                                       size_t length) {
+  T* src = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
+  T* dst = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
+
+  if (!HasDecorator<decorators, ARRAYCOPY_CHECKCAST>::value) {
+    // No check cast, bulk barrier and bulk copy
+    ZBarrier::load_barrier_on_oop_array(src, length);
+    return Raw::oop_arraycopy_in_heap(NULL, 0, src, NULL, 0, dst, length);
+  }
+
+  // Check cast and copy each elements
+  Klass* const dst_klass = objArrayOop(dst_obj)->element_klass();
+  for (const T* const end = src + length; src < end; src++, dst++) {
+    const oop elem = ZBarrier::load_barrier_on_oop_field(src);
+    if (!oopDesc::is_instanceof_or_null(elem, dst_klass)) {
+      // Check cast failed
+      return false;
+    }
+
+    // Cast is safe, since we know it's never a narrowOop
+    *(oop*)dst = elem;
+  }
+
+  return true;
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+inline void ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::clone_in_heap(oop src, oop dst, size_t size) {
+  ZBarrier::load_barrier_on_oop_fields(src);
+  Raw::clone_in_heap(src, dst, size);
+}
+
+//
+// Not in heap
+//
+template <DecoratorSet decorators, typename BarrierSetT>
+template <typename T>
+inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_load_not_in_heap(T* addr) {
+  const oop o = Raw::oop_load_not_in_heap(addr);
+
+  if (HasDecorator<decorators, ON_PHANTOM_OOP_REF>::value) {
+    return load_barrier_on_oop_field_preloaded(addr, o);
+  }
+
+  verify_decorators_present<ON_STRONG_OOP_REF>();
+  verify_decorators_absent<AS_NO_KEEPALIVE>();
+
+  return o;
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+template <typename T>
+inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value) {
+  verify_decorators_present<ON_STRONG_OOP_REF>();
+  verify_decorators_absent<AS_NO_KEEPALIVE>();
+
+  return Raw::oop_atomic_cmpxchg_not_in_heap(new_value, addr, compare_value);
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+template <typename T>
+inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_not_in_heap(oop new_value, T* addr) {
+  verify_decorators_present<ON_STRONG_OOP_REF>();
+  verify_decorators_absent<AS_NO_KEEPALIVE>();
+
+  return Raw::oop_atomic_xchg_not_in_heap(new_value, addr);
+}
+
+#endif // SHARE_GC_Z_ZBARRIERSET_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zBarrierSetAssembler.cpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zBarrierSetAssembler.hpp"
+#include "gc/z/zThreadLocalData.hpp"
+#include "runtime/thread.hpp"
+
+Address ZBarrierSetAssemblerBase::address_bad_mask_from_thread(Register thread) {
+  return Address(thread, ZThreadLocalData::address_bad_mask_offset());
+}
+
+Address ZBarrierSetAssemblerBase::address_bad_mask_from_jni_env(Register env) {
+  return Address(env, ZThreadLocalData::address_bad_mask_offset() - JavaThread::jni_environment_offset());
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zBarrierSetAssembler.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZBARRIERSETASSEMBLER_HPP
+#define SHARE_GC_Z_ZBARRIERSETASSEMBLER_HPP
+
+#include "asm/macroAssembler.hpp"
+#include "gc/shared/barrierSetAssembler.hpp"
+#include "oops/accessDecorators.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
+
+class ZBarrierSetAssemblerBase : public BarrierSetAssembler {
+public:
+  static Address address_bad_mask_from_thread(Register thread);
+  static Address address_bad_mask_from_jni_env(Register env);
+};
+
+#include CPU_HEADER(gc/z/zBarrierSetAssembler)
+
+#endif // SHARE_GC_Z_ZBARRIERSETASSEMBLER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zBarrierSetRuntime.cpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zBarrier.inline.hpp"
+#include "gc/z/zBarrierSetRuntime.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
+
+JRT_LEAF(oopDesc*, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded(oopDesc* o, oop* p))
+  return ZBarrier::load_barrier_on_oop_field_preloaded(p, o);
+JRT_END
+
+JRT_LEAF(oopDesc*, ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded(oopDesc* o, oop* p))
+  return ZBarrier::load_barrier_on_weak_oop_field_preloaded(p, o);
+JRT_END
+
+JRT_LEAF(oopDesc*, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded(oopDesc* o, oop* p))
+  return ZBarrier::load_barrier_on_phantom_oop_field_preloaded(p, o);
+JRT_END
+
+JRT_LEAF(void, ZBarrierSetRuntime::load_barrier_on_oop_array(oop* p, size_t length))
+  ZBarrier::load_barrier_on_oop_array(p, length);
+JRT_END
+
+address ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(DecoratorSet decorators) {
+  if (decorators & ON_PHANTOM_OOP_REF) {
+    return load_barrier_on_phantom_oop_field_preloaded_addr();
+  } else if (decorators & ON_WEAK_OOP_REF) {
+    return load_barrier_on_weak_oop_field_preloaded_addr();
+  } else {
+    return load_barrier_on_oop_field_preloaded_addr();
+  }
+}
+
+address ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr() {
+  return reinterpret_cast<address>(load_barrier_on_oop_field_preloaded);
+}
+
+address ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded_addr() {
+  return reinterpret_cast<address>(load_barrier_on_weak_oop_field_preloaded);
+}
+
+address ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr() {
+  return reinterpret_cast<address>(load_barrier_on_phantom_oop_field_preloaded);
+}
+
+address ZBarrierSetRuntime::load_barrier_on_oop_array_addr() {
+  return reinterpret_cast<address>(load_barrier_on_oop_array);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zBarrierSetRuntime.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZBARRIERSETRUNTIME_HPP
+#define SHARE_GC_Z_ZBARRIERSETRUNTIME_HPP
+
+#include "memory/allocation.hpp"
+#include "oops/accessDecorators.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+class oopDesc;
+
+class ZBarrierSetRuntime : public AllStatic {
+private:
+  static oopDesc* load_barrier_on_oop_field_preloaded(oopDesc* o, oop* p);
+  static oopDesc* load_barrier_on_weak_oop_field_preloaded(oopDesc* o, oop* p);
+  static oopDesc* load_barrier_on_phantom_oop_field_preloaded(oopDesc* o, oop* p);
+  static void load_barrier_on_oop_array(oop* p, size_t length);
+
+public:
+  static address load_barrier_on_oop_field_preloaded_addr(DecoratorSet decorators);
+  static address load_barrier_on_oop_field_preloaded_addr();
+  static address load_barrier_on_weak_oop_field_preloaded_addr();
+  static address load_barrier_on_phantom_oop_field_preloaded_addr();
+  static address load_barrier_on_oop_array_addr();
+};
+
+#endif // SHARE_GC_Z_ZBARRIERSETRUNTIME_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zBitField.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZBITFIELD_HPP
+#define SHARE_GC_Z_ZBITFIELD_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/debug.hpp"
+
+//
+//  Example
+//  -------
+//
+//  typedef ZBitField<uint64_t, uint8_t,  0,  2, 3> field_word_aligned_size;
+//  typedef ZBitField<uint64_t, uint32_t, 2, 30>    field_length;
+//
+//
+//   6                                 3 3
+//   3                                 2 1                               2 10
+//  +-----------------------------------+---------------------------------+--+
+//  |11111111 11111111 11111111 11111111|11111111 11111111 11111111 111111|11|
+//  +-----------------------------------+---------------------------------+--+
+//  |                                   |                                 |
+//  |       31-2 field_length (30-bits) *                                 |
+//  |                                                                     |
+//  |                                1-0 field_word_aligned_size (2-bits) *
+//  |
+//  * 63-32 Unused (32-bits)
+//
+//
+//  field_word_aligned_size::encode(16) = 2
+//  field_length::encode(2342) = 9368
+//
+//  field_word_aligned_size::decode(9368 | 2) = 16
+//  field_length::decode(9368 | 2) = 2342
+//
+
+template <typename ContainerType, typename ValueType, int FieldShift, int FieldBits, int ValueShift = 0>
+class ZBitField : public AllStatic {
+private:
+  static const int ContainerBits = sizeof(ContainerType) * BitsPerByte;
+
+  STATIC_ASSERT(FieldBits < ContainerBits);
+  STATIC_ASSERT(FieldShift + FieldBits <= ContainerBits);
+  STATIC_ASSERT(ValueShift + FieldBits <= ContainerBits);
+
+  static const ContainerType FieldMask = (((ContainerType)1 << FieldBits) - 1);
+
+public:
+  static ValueType decode(ContainerType container) {
+    return (ValueType)(((container >> FieldShift) & FieldMask) << ValueShift);
+  }
+
+  static ContainerType encode(ValueType value) {
+    assert(((ContainerType)value & (FieldMask << ValueShift)) == (ContainerType)value, "Invalid value");
+    return ((ContainerType)value >> ValueShift) << FieldShift;
+  }
+};
+
+#endif // SHARE_GC_Z_ZBITFIELD_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zBitMap.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZBITMAP_HPP
+#define SHARE_GC_Z_ZBITMAP_HPP
+
+#include "utilities/bitMap.hpp"
+
+class ZBitMap : public CHeapBitMap {
+private:
+  static bm_word_t bit_mask_pair(idx_t bit);
+
+  bool par_set_bit_pair_finalizable(idx_t bit, bool& inc_live);
+  bool par_set_bit_pair_strong(idx_t bit, bool& inc_live);
+
+public:
+  ZBitMap(idx_t size_in_bits);
+
+  bool par_set_bit_pair(idx_t bit, bool finalizable, bool& inc_live);
+};
+
+#endif // SHARE_GC_Z_ZBITMAP_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zBitMap.inline.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZBITMAP_INLINE_HPP
+#define SHARE_GC_Z_ZBITMAP_INLINE_HPP
+
+#include "gc/z/zBitMap.hpp"
+#include "runtime/atomic.hpp"
+#include "utilities/bitMap.inline.hpp"
+#include "utilities/debug.hpp"
+
+inline ZBitMap::ZBitMap(idx_t size_in_bits) :
+    CHeapBitMap(size_in_bits, mtGC, false /* clear */) {}
+
+inline BitMap::bm_word_t ZBitMap::bit_mask_pair(idx_t bit) {
+  assert(bit_in_word(bit) < BitsPerWord - 1, "Invalid bit index");
+  return (bm_word_t)3 << bit_in_word(bit);
+}
+
+inline bool ZBitMap::par_set_bit_pair_finalizable(idx_t bit, bool& inc_live) {
+  inc_live = par_set_bit(bit);
+  return inc_live;
+}
+
+inline bool ZBitMap::par_set_bit_pair_strong(idx_t bit, bool& inc_live) {
+  verify_index(bit);
+  volatile bm_word_t* const addr = word_addr(bit);
+  const bm_word_t pair_mask = bit_mask_pair(bit);
+  bm_word_t old_val = *addr;
+
+  do {
+    const bm_word_t new_val = old_val | pair_mask;
+    if (new_val == old_val) {
+      inc_live = false;
+      return false;     // Someone else beat us to it.
+    }
+    const bm_word_t cur_val = Atomic::cmpxchg(new_val, addr, old_val);
+    if (cur_val == old_val) {
+      const bm_word_t marked_mask = bit_mask(bit);
+      inc_live = !(old_val & marked_mask);
+      return true;      // Success.
+    }
+    old_val = cur_val;  // The value changed, try again.
+  } while (true);
+}
+
+inline bool ZBitMap::par_set_bit_pair(idx_t bit, bool finalizable, bool& inc_live) {
+  if (finalizable) {
+    return par_set_bit_pair_finalizable(bit, inc_live);
+  } else {
+    return par_set_bit_pair_strong(bit, inc_live);
+  }
+}
+
+#endif // SHARE_GC_Z_ZBITMAP_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zCPU.cpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zCPU.hpp"
+#include "logging/log.hpp"
+#include "memory/padded.inline.hpp"
+#include "runtime/os.hpp"
+#include "runtime/thread.inline.hpp"
+#include "utilities/debug.hpp"
+
+#define ZCPU_UNKNOWN_AFFINITY (Thread*)-1;
+#define ZCPU_UNKNOWN_SELF     (Thread*)-2;
+
+PaddedEnd<ZCPU::ZCPUAffinity>* ZCPU::_affinity = NULL;
+__thread Thread*  ZCPU::_self                  = ZCPU_UNKNOWN_SELF;
+__thread uint32_t ZCPU::_cpu                   = 0;
+
+void ZCPU::initialize() {
+  assert(_affinity == NULL, "Already initialized");
+  const uint32_t ncpus = count();
+
+  _affinity = PaddedArray<ZCPUAffinity, mtGC>::create_unfreeable(ncpus);
+
+  for (uint32_t i = 0; i < ncpus; i++) {
+    _affinity[i]._thread = ZCPU_UNKNOWN_AFFINITY;
+  }
+
+  log_info(gc, init)("CPUs: %u total, %u available",
+                     os::processor_count(),
+                     os::initial_active_processor_count());
+}
+
+uint32_t ZCPU::count() {
+  return os::processor_count();
+}
+
+uint32_t ZCPU::id() {
+  assert(_affinity != NULL, "Not initialized");
+
+  // Fast path
+  if (_affinity[_cpu]._thread == _self) {
+    return _cpu;
+  }
+
+  // Slow path
+  _self = Thread::current();
+  _cpu = os::processor_id();
+
+  // Update affinity table
+  _affinity[_cpu]._thread = _self;
+
+  return _cpu;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zCPU.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZCPU_HPP
+#define SHARE_GC_Z_ZCPU_HPP
+
+#include "memory/allocation.hpp"
+#include "memory/padded.hpp"
+
+class Thread;
+
+class ZCPU : public AllStatic {
+private:
+  struct ZCPUAffinity {
+    Thread* _thread;
+  };
+
+  static PaddedEnd<ZCPUAffinity>* _affinity;
+  static __thread Thread*         _self;
+  static __thread uint32_t        _cpu;
+
+public:
+  static void initialize();
+
+  static uint32_t count();
+  static uint32_t id();
+};
+
+#endif // SHARE_GC_Z_ZCPU_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zCollectedHeap.cpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,348 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/gcHeapSummary.hpp"
+#include "gc/z/zCollectedHeap.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "gc/z/zHeap.inline.hpp"
+#include "gc/z/zNMethodTable.hpp"
+#include "gc/z/zServiceability.hpp"
+#include "gc/z/zStat.hpp"
+#include "gc/z/zUtils.inline.hpp"
+#include "runtime/mutexLocker.hpp"
+
+ZCollectedHeap* ZCollectedHeap::heap() {
+  CollectedHeap* heap = Universe::heap();
+  assert(heap != NULL, "Uninitialized access to ZCollectedHeap::heap()");
+  assert(heap->kind() == CollectedHeap::Z, "Invalid name");
+  return (ZCollectedHeap*)heap;
+}
+
+ZCollectedHeap::ZCollectedHeap(ZCollectorPolicy* policy) :
+    _collector_policy(policy),
+    _soft_ref_policy(),
+    _barrier_set(),
+    _initialize(&_barrier_set),
+    _heap(),
+    _director(new ZDirector()),
+    _driver(new ZDriver()),
+    _stat(new ZStat()),
+    _runtime_workers() {}
+
+CollectedHeap::Name ZCollectedHeap::kind() const {
+  return CollectedHeap::Z;
+}
+
+const char* ZCollectedHeap::name() const {
+  return ZGCName;
+}
+
+jint ZCollectedHeap::initialize() {
+  if (!_heap.is_initialized()) {
+    return JNI_ENOMEM;
+  }
+
+  initialize_reserved_region((HeapWord*)ZAddressReservedStart(),
+                             (HeapWord*)ZAddressReservedEnd());
+
+  return JNI_OK;
+}
+
+void ZCollectedHeap::initialize_serviceability() {
+  _heap.serviceability_initialize();
+}
+
+void ZCollectedHeap::stop() {
+  _director->stop();
+  _driver->stop();
+  _stat->stop();
+}
+
+CollectorPolicy* ZCollectedHeap::collector_policy() const {
+  return _collector_policy;
+}
+
+SoftRefPolicy* ZCollectedHeap::soft_ref_policy() {
+  return &_soft_ref_policy;
+}
+
+size_t ZCollectedHeap::max_capacity() const {
+  return _heap.max_capacity();
+}
+
+size_t ZCollectedHeap::capacity() const {
+  return _heap.capacity();
+}
+
+size_t ZCollectedHeap::used() const {
+  return _heap.used();
+}
+
+bool ZCollectedHeap::is_maximal_no_gc() const {
+  // Not supported
+  ShouldNotReachHere();
+  return false;
+}
+
+bool ZCollectedHeap::is_scavengable(oop obj) {
+  return false;
+}
+
+bool ZCollectedHeap::is_in(const void* p) const {
+  return is_in_reserved(p) && _heap.is_in((uintptr_t)p);
+}
+
+bool ZCollectedHeap::is_in_closed_subset(const void* p) const {
+  return is_in(p);
+}
+
+HeapWord* ZCollectedHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
+  const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(requested_size));
+  const uintptr_t addr = _heap.alloc_tlab(size_in_bytes);
+
+  if (addr != 0) {
+    *actual_size = requested_size;
+  }
+
+  return (HeapWord*)addr;
+}
+
+HeapWord* ZCollectedHeap::mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) {
+  const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(size));
+  return (HeapWord*)_heap.alloc_object(size_in_bytes);
+}
+
+MetaWord* ZCollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
+                                                             size_t size,
+                                                             Metaspace::MetadataType mdtype) {
+  MetaWord* result;
+
+  // Start asynchronous GC
+  collect(GCCause::_metadata_GC_threshold);
+
+  // Expand and retry allocation
+  result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
+  if (result != NULL) {
+    return result;
+  }
+
+  // Start synchronous GC
+  collect(GCCause::_metadata_GC_clear_soft_refs);
+
+  // Retry allocation
+  result = loader_data->metaspace_non_null()->allocate(size, mdtype);
+  if (result != NULL) {
+    return result;
+  }
+
+  // Expand and retry allocation
+  result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
+  if (result != NULL) {
+    return result;
+  }
+
+  // Out of memory
+  return NULL;
+}
+
+void ZCollectedHeap::collect(GCCause::Cause cause) {
+  _driver->collect(cause);
+}
+
+void ZCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
+  // These collection requests are ignored since ZGC can't run a synchronous
+  // GC cycle from within the VM thread. This is considered benign, since the
+  // only GC causes comming in here should be heap dumper and heap inspector.
+  // However, neither the heap dumper nor the heap inspector really need a GC
+  // to happen, but the result of their heap iterations might in that case be
+  // less accurate since they might include objects that would otherwise have
+  // been collected by a GC.
+  assert(Thread::current()->is_VM_thread(), "Should be the VM thread");
+  guarantee(cause == GCCause::_heap_dump ||
+            cause == GCCause::_heap_inspection, "Invalid cause");
+}
+
+void ZCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
+  // Not supported
+  ShouldNotReachHere();
+}
+
+bool ZCollectedHeap::supports_tlab_allocation() const {
+  return true;
+}
+
+size_t ZCollectedHeap::tlab_capacity(Thread* ignored) const {
+  return _heap.tlab_capacity();
+}
+
+size_t ZCollectedHeap::tlab_used(Thread* ignored) const {
+  return _heap.tlab_used();
+}
+
+size_t ZCollectedHeap::max_tlab_size() const {
+  return _heap.max_tlab_size();
+}
+
+size_t ZCollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
+  return _heap.unsafe_max_tlab_alloc();
+}
+
+bool ZCollectedHeap::can_elide_tlab_store_barriers() const {
+  return false;
+}
+
+bool ZCollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
+  // Not supported
+  ShouldNotReachHere();
+  return true;
+}
+
+bool ZCollectedHeap::card_mark_must_follow_store() const {
+  // Not supported
+  ShouldNotReachHere();
+  return false;
+}
+
+GrowableArray<GCMemoryManager*> ZCollectedHeap::memory_managers() {
+  return GrowableArray<GCMemoryManager*>(1, 1, _heap.serviceability_memory_manager());
+}
+
+GrowableArray<MemoryPool*> ZCollectedHeap::memory_pools() {
+  return GrowableArray<MemoryPool*>(1, 1, _heap.serviceability_memory_pool());
+}
+
+void ZCollectedHeap::object_iterate(ObjectClosure* cl) {
+  _heap.object_iterate(cl);
+}
+
+void ZCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
+  _heap.object_iterate(cl);
+}
+
+HeapWord* ZCollectedHeap::block_start(const void* addr) const {
+  return (HeapWord*)_heap.block_start((uintptr_t)addr);
+}
+
+size_t ZCollectedHeap::block_size(const HeapWord* addr) const {
+  size_t size_in_bytes = _heap.block_size((uintptr_t)addr);
+  return ZUtils::bytes_to_words(size_in_bytes);
+}
+
+bool ZCollectedHeap::block_is_obj(const HeapWord* addr) const {
+  return _heap.block_is_obj((uintptr_t)addr);
+}
+
+void ZCollectedHeap::register_nmethod(nmethod* nm) {
+  assert_locked_or_safepoint(CodeCache_lock);
+  ZNMethodTable::register_nmethod(nm);
+}
+
+void ZCollectedHeap::unregister_nmethod(nmethod* nm) {
+  assert_locked_or_safepoint(CodeCache_lock);
+  ZNMethodTable::unregister_nmethod(nm);
+}
+
+void ZCollectedHeap::verify_nmethod(nmethod* nm) {
+  // Does nothing
+}
+
+WorkGang* ZCollectedHeap::get_safepoint_workers() {
+  return _runtime_workers.workers();
+}
+
+jlong ZCollectedHeap::millis_since_last_gc() {
+  return ZStatCycle::time_since_last() / MILLIUNITS;
+}
+
+void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
+  tc->do_thread(_director);
+  tc->do_thread(_driver);
+  tc->do_thread(_stat);
+  _heap.worker_threads_do(tc);
+  _runtime_workers.threads_do(tc);
+}
+
+VirtualSpaceSummary ZCollectedHeap::create_heap_space_summary() {
+  const size_t capacity_in_words = capacity() / HeapWordSize;
+  const size_t max_capacity_in_words = max_capacity() / HeapWordSize;
+  return VirtualSpaceSummary(reserved_region().start(),
+                             reserved_region().start() + capacity_in_words,
+                             reserved_region().start() + max_capacity_in_words);
+}
+
+void ZCollectedHeap::prepare_for_verify() {
+  // Does nothing
+}
+
+void ZCollectedHeap::print_on(outputStream* st) const {
+  _heap.print_on(st);
+}
+
+void ZCollectedHeap::print_on_error(outputStream* st) const {
+  CollectedHeap::print_on_error(st);
+
+  st->print_cr("Address Space");
+  st->print_cr( "     Start:             " PTR_FORMAT, ZAddressSpaceStart);
+  st->print_cr( "     End:               " PTR_FORMAT, ZAddressSpaceEnd);
+  st->print_cr( "     Size:              " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressSpaceSize, ZAddressSpaceSize);
+  st->print_cr( "Heap");
+  st->print_cr( "     GlobalPhase:       %u", ZGlobalPhase);
+  st->print_cr( "     GlobalSeqNum:      %u", ZGlobalSeqNum);
+  st->print_cr( "     Offset Max:        " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressOffsetMax, ZAddressOffsetMax);
+  st->print_cr( "     Page Size Small:   " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeSmall, ZPageSizeSmall);
+  st->print_cr( "     Page Size Medium:  " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeMedium, ZPageSizeMedium);
+  st->print_cr( "Metadata Bits");
+  st->print_cr( "     Good:              " PTR_FORMAT, ZAddressGoodMask);
+  st->print_cr( "     Bad:               " PTR_FORMAT, ZAddressBadMask);
+  st->print_cr( "     WeakBad:           " PTR_FORMAT, ZAddressWeakBadMask);
+  st->print_cr( "     Marked:            " PTR_FORMAT, ZAddressMetadataMarked);
+  st->print_cr( "     Remapped:          " PTR_FORMAT, ZAddressMetadataRemapped);
+}
+
+void ZCollectedHeap::print_extended_on(outputStream* st) const {
+  _heap.print_extended_on(st);
+}
+
+void ZCollectedHeap::print_gc_threads_on(outputStream* st) const {
+  _director->print_on(st);
+  st->cr();
+  _driver->print_on(st);
+  st->cr();
+  _stat->print_on(st);
+  st->cr();
+  _heap.print_worker_threads_on(st);
+  _runtime_workers.print_threads_on(st);
+}
+
+void ZCollectedHeap::print_tracing_info() const {
+  // Does nothing
+}
+
+void ZCollectedHeap::verify(VerifyOption option /* ignored */) {
+  _heap.verify();
+}
+
+bool ZCollectedHeap::is_oop(oop object) const {
+  return CollectedHeap::is_oop(object) && _heap.is_oop(object);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zCollectedHeap.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZCOLLECTEDHEAP_HPP
+#define SHARE_GC_Z_ZCOLLECTEDHEAP_HPP
+
+#include "gc/shared/collectedHeap.hpp"
+#include "gc/shared/softRefPolicy.hpp"
+#include "gc/z/zBarrierSet.hpp"
+#include "gc/z/zCollectorPolicy.hpp"
+#include "gc/z/zDirector.hpp"
+#include "gc/z/zDriver.hpp"
+#include "gc/z/zInitialize.hpp"
+#include "gc/z/zHeap.hpp"
+#include "gc/z/zRuntimeWorkers.hpp"
+#include "gc/z/zStat.hpp"
+
+class ZCollectedHeap : public CollectedHeap {
+  friend class VMStructs;
+
+private:
+  ZCollectorPolicy* _collector_policy;
+  SoftRefPolicy     _soft_ref_policy;
+  ZBarrierSet       _barrier_set;
+  ZInitialize       _initialize;
+  ZHeap             _heap;
+  ZDirector*        _director;
+  ZDriver*          _driver;
+  ZStat*            _stat;
+  ZRuntimeWorkers   _runtime_workers;
+
+  virtual HeapWord* allocate_new_tlab(size_t min_size,
+                                      size_t requested_size,
+                                      size_t* actual_size);
+
+public:
+  static ZCollectedHeap* heap();
+
+  using CollectedHeap::ensure_parsability;
+  using CollectedHeap::accumulate_statistics_all_tlabs;
+  using CollectedHeap::resize_all_tlabs;
+
+  ZCollectedHeap(ZCollectorPolicy* policy);
+  virtual Name kind() const;
+  virtual const char* name() const;
+  virtual jint initialize();
+  virtual void initialize_serviceability();
+  virtual void stop();
+
+  virtual CollectorPolicy* collector_policy() const;
+  virtual SoftRefPolicy* soft_ref_policy();
+
+  virtual size_t max_capacity() const;
+  virtual size_t capacity() const;
+  virtual size_t used() const;
+
+  virtual bool is_maximal_no_gc() const;
+  virtual bool is_scavengable(oop obj);
+  virtual bool is_in(const void* p) const;
+  virtual bool is_in_closed_subset(const void* p) const;
+
+  virtual HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
+  virtual MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
+                                                       size_t size,
+                                                       Metaspace::MetadataType mdtype);
+  virtual void collect(GCCause::Cause cause);
+  virtual void collect_as_vm_thread(GCCause::Cause cause);
+  virtual void do_full_collection(bool clear_all_soft_refs);
+
+  virtual bool supports_tlab_allocation() const;
+  virtual size_t tlab_capacity(Thread* thr) const;
+  virtual size_t tlab_used(Thread* thr) const;
+  virtual size_t max_tlab_size() const;
+  virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
+
+  virtual bool can_elide_tlab_store_barriers() const;
+  virtual bool can_elide_initializing_store_barrier(oop new_obj);
+  virtual bool card_mark_must_follow_store() const;
+
+  virtual GrowableArray<GCMemoryManager*> memory_managers();
+  virtual GrowableArray<MemoryPool*> memory_pools();
+
+  virtual void object_iterate(ObjectClosure* cl);
+  virtual void safe_object_iterate(ObjectClosure* cl);
+
+  virtual HeapWord* block_start(const void* addr) const;
+  virtual size_t block_size(const HeapWord* addr) const;
+  virtual bool block_is_obj(const HeapWord* addr) const;
+
+  virtual void register_nmethod(nmethod* nm);
+  virtual void unregister_nmethod(nmethod* nm);
+  virtual void verify_nmethod(nmethod* nmethod);
+
+  virtual WorkGang* get_safepoint_workers();
+
+  virtual jlong millis_since_last_gc();
+
+  virtual void gc_threads_do(ThreadClosure* tc) const;
+
+  virtual VirtualSpaceSummary create_heap_space_summary();
+
+  virtual void print_on(outputStream* st) const;
+  virtual void print_on_error(outputStream* st) const;
+  virtual void print_extended_on(outputStream* st) const;
+  virtual void print_gc_threads_on(outputStream* st) const;
+  virtual void print_tracing_info() const;
+
+  virtual void prepare_for_verify();
+  virtual void verify(VerifyOption option /* ignored */);
+  virtual bool is_oop(oop object) const;
+};
+
+#endif // SHARE_GC_Z_ZCOLLECTEDHEAP_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zCollectorPolicy.cpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zCollectorPolicy.hpp"
+#include "gc/z/zGlobals.hpp"
+
+void ZCollectorPolicy::initialize_alignments() {
+  _space_alignment = ZPageSizeMin;
+  _heap_alignment = _space_alignment;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zCollectorPolicy.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZCOLLECTORPOLICY_HPP
+#define SHARE_GC_Z_ZCOLLECTORPOLICY_HPP
+
+#include "gc/shared/collectorPolicy.hpp"
+
+class ZCollectorPolicy : public CollectorPolicy {
+public:
+  virtual void initialize_alignments();
+};
+
+#endif // SHARE_GC_Z_ZCOLLECTORPOLICY_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zDebug.gdb	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,147 @@
+#
+# GDB functions for debugging the Z Garbage Collector
+#
+
+printf "Loading zDebug.gdb\n"
+
+# Print Klass*
+define zpk
+    printf "Klass: %s\n", (char*)((Klass*)($arg0))->_name->_body
+end
+
+# Print oop
+define zpo
+    set $obj = (oopDesc*)($arg0)
+
+    printf "Oop:   0x%016llx\tState: ", (uintptr_t)$obj
+    if ((uintptr_t)$obj & (uintptr_t)ZAddressGoodMask)
+        printf "Good "
+        if ((uintptr_t)$obj & (uintptr_t)ZAddressMetadataRemapped)
+            printf "(Remapped)"
+        else
+            if ((uintptr_t)$obj & (uintptr_t)ZAddressMetadataMarked)
+                printf "(Marked)"
+            else
+                printf "(Unknown)"
+            end
+        end
+    else
+        printf "Bad "
+        if ((uintptr_t)ZAddressGoodMask & (uintptr_t)ZAddressMetadataMarked)
+            # Should be marked
+            if ((uintptr_t)$obj & (uintptr_t)ZAddressMetadataRemapped)
+                printf "(Not Marked, Remapped)"
+            else
+                printf "(Not Marked, Not Remapped)"
+            end
+        else
+            if ((uintptr_t)ZAddressGoodMask & (uintptr_t)ZAddressMetadataRemapped)
+                # Should be remapped
+                if ((uintptr_t)$obj & (uintptr_t)ZAddressMetadataMarked)
+                    printf "(Marked, Not Remapped)"
+                else
+                    printf "(Not Marked, Not Remapped)"
+                end
+            else
+                # Unknown
+                printf "(Unknown)"
+            end
+        end
+    end
+    printf "\t Page: %llu\n", ((uintptr_t)$obj & ZAddressOffsetMask) >> ZPageSizeMinShift
+    x/16gx $obj
+    printf "Mark:  0x%016llx\tKlass: %s\n", (uintptr_t)$obj->_mark, (char*)$obj->_metadata->_klass->_name->_body
+end
+
+# Print heap page by pagetable index
+define zpp
+    set $page = (ZPage*)((uintptr_t)ZHeap::_heap._pagetable._map._map[($arg0)] & ~1)
+    printf "Page %p\n", $page
+    print *$page
+end
+
+# Print pagetable
+define zpt
+    printf "Pagetable (first 128 slots)\n"
+    x/128gx ZHeap::_heap._pagetable._map._map
+end
+
+# Print live map
+define __zmarked
+    set $livemap   = $arg0
+    set $bit        = $arg1
+    set $size       = $livemap._bitmap._size
+    set $segment    = $size / ZLiveMap::nsegments
+    set $segment_bit = 1 << $segment
+
+    printf "Segment is "
+    if !($livemap._segment_live_bits & $segment_bit)
+        printf "NOT "
+    end
+    printf "live (segment %d)\n", $segment
+
+    if $bit >= $size
+        print "Error: Bit %z out of bounds (bitmap size %z)\n", $bit, $size
+    else
+        set $word_index = $bit / 64
+        set $bit_index  = $bit % 64
+        set $word       = $livemap._bitmap._map[$word_index]
+        set $live_bit   = $word & (1 << $bit_index)
+
+        printf "Object is "
+        if $live_bit == 0
+            printf "NOT "
+        end
+        printf "live (word index %d, bit index %d)\n", $word_index, $bit_index
+    end
+end
+
+define zmarked
+    set $addr          = $arg0
+    set $obj           = ((uintptr_t)$addr & ZAddressOffsetMask)
+    set $page_index    = $obj >> ZPageSizeMinShift
+    set $page_entry    = (uintptr_t)ZHeap::_heap._pagetable._map._map[$page_index]
+    set $page          = (ZPage*)($page_entry & ~1)
+    set $page_start    = (uintptr_t)$page._virtual._start
+    set $page_end      = (uintptr_t)$page._virtual._end
+    set $page_seqnum   = $page._livemap._seqnum
+    set $global_seqnum = ZGlobalSeqNum
+
+    if $obj < $page_start || $obj >= $page_end
+        printf "Error: %p not in page %p (start %p, end %p)\n", $obj, $page, $page_start, $page_end
+    else
+        printf "Page is "
+        if $page_seqnum != $global_seqnum
+            printf "NOT "
+        end
+        printf "live (page %p, page seqnum %d, global seqnum %d)\n", $page, $page_seqnum, $global_seqnum
+
+        #if $page_seqnum == $global_seqnum
+            set $offset = $obj - $page_start
+            set $bit = $offset / 8
+            __zmarked $page._livemap $bit
+        #end
+    end
+end
+
+# Print heap information
+define zph
+    printf "Address Space\n"
+    printf "     Start:             0x%llx\n", ZAddressSpaceStart
+    printf "     End:               0x%llx\n", ZAddressSpaceEnd
+    printf "     Size:              %-15llu (0x%llx)\n", ZAddressSpaceSize, ZAddressSpaceSize
+    printf "Heap\n"
+    printf "     GlobalPhase:       %u\n", ZGlobalPhase
+    printf "     GlobalSeqNum:      %u\n", ZGlobalSeqNum
+    printf "     Offset Max:        %-15llu (0x%llx)\n", ZAddressOffsetMax, ZAddressOffsetMax
+    printf "     Page Size Small:   %-15llu (0x%llx)\n", ZPageSizeSmall, ZPageSizeSmall
+    printf "     Page Size Medium:  %-15llu (0x%llx)\n", ZPageSizeMedium, ZPageSizeMedium
+    printf "Metadata Bits\n"
+    printf "     Good:              0x%016llx\n", ZAddressGoodMask
+    printf "     Bad:               0x%016llx\n", ZAddressBadMask
+    printf "     WeakBad:           0x%016llx\n", ZAddressWeakBadMask
+    printf "     Marked:            0x%016llx\n", ZAddressMetadataMarked
+    printf "     Remapped:          0x%016llx\n", ZAddressMetadataRemapped
+end
+
+# End of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zDirector.cpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,222 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zCollectedHeap.hpp"
+#include "gc/z/zDirector.hpp"
+#include "gc/z/zHeap.inline.hpp"
+#include "gc/z/zStat.hpp"
+#include "gc/z/zUtils.hpp"
+#include "logging/log.hpp"
+
+const double ZDirector::one_in_1000 = 3.290527;
+
+ZDirector::ZDirector() :
+    _metronome(ZStatAllocRate::sample_hz) {
+  set_name("ZDirector");
+  create_and_start();
+}
+
+void ZDirector::sample_allocation_rate() const {
+  // Sample allocation rate. This is needed by rule_allocation_rate()
+  // below to estimate the time we have until we run out of memory.
+  const double bytes_per_second = ZStatAllocRate::sample_and_reset();
+
+  log_debug(gc, alloc)("Allocation Rate: %.3fMB/s, Avg: %.3f(+/-%.3f)MB/s",
+                       bytes_per_second / M,
+                       ZStatAllocRate::avg() / M,
+                       ZStatAllocRate::avg_sd() / M);
+}
+
+bool ZDirector::is_first() const {
+  return ZStatCycle::ncycles() == 0;
+}
+
+bool ZDirector::is_warm() const {
+  return ZStatCycle::ncycles() >= 3;
+}
+
+bool ZDirector::rule_timer() const {
+  if (ZCollectionInterval == 0) {
+    // Rule disabled
+    return false;
+  }
+
+  // Perform GC if timer has expired.
+  const double time_since_last_gc = ZStatCycle::time_since_last();
+  const double time_until_gc = ZCollectionInterval - time_since_last_gc;
+
+  log_debug(gc, director)("Rule: Timer, Interval: %us, TimeUntilGC: %.3lfs",
+                          ZCollectionInterval, time_until_gc);
+
+  return time_until_gc <= 0;
+}
+
+bool ZDirector::rule_warmup() const {
+  if (is_warm()) {
+    // Rule disabled
+    return false;
+  }
+
+  // Perform GC if heap usage passes 10/20/30% and no other GC has been
+  // performed yet. This allows us to get some early samples of the GC
+  // duration, which is needed by the other rules.
+  const size_t max_capacity = ZHeap::heap()->max_capacity();
+  const size_t used = ZHeap::heap()->used();
+  const double used_threshold_percent = (ZStatCycle::ncycles() + 1) * 0.1;
+  const size_t used_threshold = max_capacity * used_threshold_percent;
+
+  log_debug(gc, director)("Rule: Warmup %.0f%%, Used: " SIZE_FORMAT "MB, UsedThreshold: " SIZE_FORMAT "MB",
+                          used_threshold_percent * 100, used / M, used_threshold / M);
+
+  return used >= used_threshold;
+}
+
+bool ZDirector::rule_allocation_rate() const {
+  if (is_first()) {
+    // Rule disabled
+    return false;
+  }
+
+  // Perform GC if the estimated max allocation rate indicates that we
+  // will run out of memory. The estimated max allocation rate is based
+  // on the moving average of the sampled allocation rate plus a safety
+  // margin based on variations in the allocation rate and unforseen
+  // allocation spikes.
+
+  // Calculate amount of free memory available to Java threads. Note that
+  // the heap reserve is not available to Java threads and is therefore not
+  // considered part of the free memory.
+  const size_t max_capacity = ZHeap::heap()->max_capacity();
+  const size_t max_reserve = ZHeap::heap()->max_reserve();
+  const size_t used = ZHeap::heap()->used();
+  const size_t free_with_reserve = max_capacity - used;
+  const size_t free = free_with_reserve - MIN2(free_with_reserve, max_reserve);
+
+  // Calculate time until OOM given the max allocation rate and the amount
+  // of free memory. The allocation rate is a moving average and we multiply
+  // that with an alllcation spike tolerance factor to guard against unforseen
+  // phase changes in the allocate rate. We then add ~3.3 sigma to account for
+  // the allocation rate variance, which means the probablility is 1 in 1000
+  // that a sample is outside of the confidence interval.
+  const double max_alloc_rate = (ZStatAllocRate::avg() * ZAllocationSpikeTolerance) + (ZStatAllocRate::avg_sd() * one_in_1000);
+  const double time_until_oom = free / (max_alloc_rate + 1.0); // Plus 1.0B/s to avoid division by zero
+
+  // Calculate max duration of a GC cycle. The duration of GC is a moving
+  // average, we add ~3.3 sigma to account for the GC duration variance.
+  const AbsSeq& duration_of_gc = ZStatCycle::normalized_duration();
+  const double max_duration_of_gc = duration_of_gc.davg() + (duration_of_gc.dsd() * one_in_1000);
+
+  // Calculate time until GC given the time until OOM and max duration of GC.
+  // We also deduct the sample interval, so that we don't overshoot the target
+  // time and end up starting the GC too late in the next interval.
+  const double sample_interval = 1.0 / ZStatAllocRate::sample_hz;
+  const double time_until_gc = time_until_oom - max_duration_of_gc - sample_interval;
+
+  log_debug(gc, director)("Rule: Allocation Rate, MaxAllocRate: %.3lfMB/s, Free: " SIZE_FORMAT "MB, MaxDurationOfGC: %.3lfs, TimeUntilGC: %.3lfs",
+                          max_alloc_rate / M, free / M, max_duration_of_gc, time_until_gc);
+
+  return time_until_gc <= 0;
+}
+
+bool ZDirector::rule_proactive() const {
+  if (!ZProactive || !is_warm()) {
+    // Rule disabled
+    return false;
+  }
+
+  // Perform GC if the impact of doing so, in terms of application throughput
+  // reduction, is considered acceptable. This rule allows us to keep the heap
+  // size down and allow reference processing to happen even when we have a lot
+  // of free space on the heap.
+
+  // Only consider doing a proactive GC if the heap usage has grown by at least
+  // 10% of the max capacity since the previous GC, or more than 5 minutes has
+  // passed since the previous GC. This helps avoid superfluous GCs when running
+  // applications with very low allocation rate.
+  const size_t used_after_last_gc = ZStatHeap::used_at_relocate_end();
+  const size_t used_increase_threshold = ZHeap::heap()->max_capacity() * 0.10; // 10%
+  const size_t used_threshold = used_after_last_gc + used_increase_threshold;
+  const size_t used = ZHeap::heap()->used();
+  const double time_since_last_gc = ZStatCycle::time_since_last();
+  const double time_since_last_gc_threshold = 5 * 60; // 5 minutes
+  if (used < used_threshold && time_since_last_gc < time_since_last_gc_threshold) {
+    // Don't even consider doing a proactive GC
+    log_debug(gc, director)("Rule: Proactive, UsedUntilEnabled: " SIZE_FORMAT "MB, TimeUntilEnabled: %.3lfs",
+                            (used_threshold - used) / M,
+                            time_since_last_gc_threshold - time_since_last_gc);
+    return false;
+  }
+
+  const double assumed_throughput_drop_during_gc = 0.50; // 50%
+  const double acceptable_throughput_drop = 0.01;        // 1%
+  const AbsSeq& duration_of_gc = ZStatCycle::normalized_duration();
+  const double max_duration_of_gc = duration_of_gc.davg() + (duration_of_gc.dsd() * one_in_1000);
+  const double acceptable_gc_interval = max_duration_of_gc * ((assumed_throughput_drop_during_gc / acceptable_throughput_drop) - 1.0);
+  const double time_until_gc = acceptable_gc_interval - time_since_last_gc;
+
+  log_debug(gc, director)("Rule: Proactive, AcceptableGCInterval: %.3lfs, TimeSinceLastGC: %.3lfs, TimeUntilGC: %.3lfs",
+                          acceptable_gc_interval, time_since_last_gc, time_until_gc);
+
+  return time_until_gc <= 0;
+}
+
+GCCause::Cause ZDirector::make_gc_decision() const {
+  // Rule 0: Timer
+  if (rule_timer()) {
+    return GCCause::_z_timer;
+  }
+
+  // Rule 1: Warmup
+  if (rule_warmup()) {
+    return GCCause::_z_warmup;
+  }
+
+  // Rule 2: Allocation rate
+  if (rule_allocation_rate()) {
+    return GCCause::_z_allocation_rate;
+  }
+
+  // Rule 3: Proactive
+  if (rule_proactive()) {
+    return GCCause::_z_proactive;
+  }
+
+  // No GC
+  return GCCause::_no_gc;
+}
+
+void ZDirector::run_service() {
+  // Main loop
+  while (_metronome.wait_for_tick()) {
+    sample_allocation_rate();
+    const GCCause::Cause cause = make_gc_decision();
+    if (cause != GCCause::_no_gc) {
+      ZCollectedHeap::heap()->collect(cause);
+    }
+  }
+}
+
+void ZDirector::stop_service() {
+  _metronome.stop();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zDirector.hpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZDIRECTOR_HPP
+#define SHARE_GC_Z_ZDIRECTOR_HPP
+
+#include "gc/shared/concurrentGCThread.hpp"
+#include "gc/shared/gcCause.hpp"
+#include "gc/z/zMetronome.hpp"
+
+class ZDirector : public ConcurrentGCThread {
+private:
+  static const double one_in_1000;
+
+  ZMetronome _metronome;
+
+  void sample_allocation_rate() const;
+
+  bool is_first() const;
+  bool is_warm() const;
+
+  bool rule_timer() const;
+  bool rule_warmup() const;
+  bool rule_allocation_rate() const;
+  bool rule_proactive() const;
+  GCCause::Cause make_gc_decision() const;
+
+protected:
+  virtual void run_service();
+  virtual void stop_service();
+
+public:
+  ZDirector();
+};
+
+#endif // SHARE_GC_Z_ZDIRECTOR_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zDriver.cpp	Fri Jun 08 18:24:45 2018 +0200
@@ -0,0 +1,403 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/gcId.hpp"
+#include "gc/shared/gcLocker.hpp"
+#include "gc/shared/isGCActiveMark.hpp"
+#include "gc/shared/vmGCOperations.hpp"
+#include "gc/z/zCollectedHeap.hpp"
+#include "gc/z/zDriver.hpp"
+#include "gc/z/zHeap.inline.hpp"
+#include "gc/z/zMessagePort.inline.hpp"
+#include "gc/z/zServiceability.hpp"
+#include "gc/z/zStat.hpp"
+#include "logging/log.hpp"
+#include "runtime/vm_operations.hpp"
+#include "runtime/vmThread.hpp"
+
+static const ZStatPhaseCycle      ZPhaseCycle("Garbage Collection Cycle");
+static const ZStatPhasePause      ZPhasePauseMarkStart("Pause Mark Start");
+static const ZStatPhaseConcurrent ZPhaseConcurrentMark("Concurrent Mark");
+static const ZStatPhaseConcurrent ZPhaseConcurrentMarkContinue("Concurrent Mark Continue");
+static const ZStatPhasePause      ZPhasePauseMarkEnd("Pause Mark End");
+static const ZStatPhaseConcurrent ZPhaseConcurrentProcessNonStrongReferences("Concurrent Process Non-Strong References");
+static const ZStatPhaseConcurrent ZPhaseConcurrentResetRelocationSet("Concurrent Reset Relocation Set");
+static const ZStatPhaseConcurrent ZPhaseConcurrentDestroyDetachedPages("Concurrent Destroy Detached Pages");
+static const ZStatPhaseConcurrent ZPhaseConcurrentSelectRelocationSet("Concurrent Select Relocation Set");
+static const ZStatPhaseConcurrent ZPhaseConcurrentPrepareRelocationSet("Concurrent Prepare Relocation Set");
+static const ZStatPhasePause      ZPhasePauseRelocateStart("Pause Relocate Start");
+static const ZStatPhaseConcurrent ZPhaseConcurrentRelocated("Concurrent Relocate");
+static const ZStatCriticalPhase   ZCriticalPhaseGCLockerStall("GC Locker Stall", false /* verbose */);
+static const ZStatSampler         ZSamplerJavaThreads("System", "Java Threads", ZStatUnitThreads);
+
+class ZOperationClosure : public StackObj {
+public:
+  virtual const char* name() const = 0;
+
+  virtual bool needs_inactive_gc_locker() const {
+    // An inactive GC locker is needed in operations where we change the good
+    // mask or move objects. Changing the good mask will invalidate all oops,
+    // which makes it conceptually the same thing as moving all objects.
+    return false;
+  }
+
+  virtual bool do_operation() = 0;
+};
+
+class VM_ZOperation : public VM_Operation {
+private:
+  ZOperationClosure* _cl;
+  uint               _gc_id;
+  bool               _gc_locked;
+  bool               _success;
+
+public:
+  VM_ZOperation(ZOperationClosure* cl) :
+      _cl(cl),
+      _gc_id(GCId::current()),
+      _gc_locked(false),
+      _success(false) {}
+
+  virtual VMOp_Type type() const {
+    return VMOp_ZOperation;
+  }
+
+  virtual const char* name() const {
+    return _cl->name();
+  }
+
+  virtual bool doit_prologue() {
+    Heap_lock->lock();
+    return true;
+  }
+
+  virtual void doit() {
+    assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
+
+    ZStatSample(ZSamplerJavaThreads, Threads::number_of_threads());
+
+    // JVMTI support
+    SvcGCMarker sgcm(SvcGCMarker::OTHER);
+
+    // Setup GC id
+    GCIdMark gcid(_gc_id);
+
+    if (_cl->needs_inactive_gc_locker() && GCLocker::check_active_before_gc()) {
+      // GC locker is active, bail out
+      _gc_locked = true;
+    } else {
+      // Execute operation
+      IsGCActiveMark mark;
+      _success = _cl->do_operation();
+    }
+  }
+
+  virtual void doit_epilogue() {
+    Heap_lock->unlock();
+  }
+
+  bool gc_locked() {
+    return _gc_locked;
+  }
+
+  bool success() const {
+    return _success;
+  }
+};
+
+class ZMarkStartClosure : public ZOperationClosure {
+public:
+  virtual const char* name() const {
+    return "ZMarkStart";
+  }
+
+  virtual bool needs_inactive_gc_locker() const {
+    return true;
+  }
+
+  virtual bool do_operation() {
+    ZStatTimer timer(ZPhasePauseMarkStart);
+    ZServiceabilityMarkStartTracer tracer;
+
+    ZCollectedHeap::heap()->increment_total_collections(true /* full */);
+
+    ZHeap::heap()->mark_start();
+    return true;
+  }
+};
+
+class ZMarkEndClosure : public ZOperationClosure {
+public:
+  virtual const char* name() const {
+    return "ZMarkEnd";
+  }
+
+  virtual bool do_operation() {
+    ZStatTimer timer(ZPhasePauseMarkEnd);
+    ZServiceabilityMarkEndTracer tracer;
+
+    return ZHeap::heap()->mark_end();
+  }
+};
+
+class ZRelocateStartClosure : public ZOperationClosure {
+public:
+  virtual const char* name() const {
+    return "ZRelocateStart";
+  }
+
+  virtual bool needs_inactive_gc_locker() const {
+    return true;
+  }
+
+  virtual bool do_operation() {
+    ZStatTimer timer(ZPhasePauseRelocateStart);
+    ZServiceabilityRelocateStartTracer tracer;
+
+    ZHeap::heap()->relocate_start();
+    return true;
+  }
+};
+
+ZDriver::ZDriver() :
+    _gc_cycle_port(),
+    _gc_locker_port() {
+  set_name("ZDriver");
+  create_and_start();
+}
+
+bool ZDriver::vm_operation(ZOperationClosure* cl) {
+  for (;;) {
+    VM_ZOperation op(cl);
+    VMThread::execute(&op);
+    if (op.gc_locked()) {
+      // Wait for GC to become unlocked and restart the VM operation
+      ZStatTimer timer(ZCriticalPhaseGCLockerStall);
+      _gc_locker_port.wait();
+      continue;
+    }
+
+    // Notify VM operation completed
+    _gc_locker_port.ack();
+
+    return op.success();
+  }
+}
+
+void ZDriver::collect(GCCause::Cause cause) {
+  switch (cause) {
+  case GCCause::_wb_young_gc:
+  case GCCause::_wb_conc_mark:
+  case GCCause::_wb_full_gc:
+  case GCCause::_dcmd_gc_run:
+  case GCCause::_java_lang_system_gc:
+  case GCCause::_full_gc_alot:
+  case GCCause::_scavenge_alot:
+  case GCCause::_jvmti_force_gc:
+  case GCCause::_metadata_GC_clear_soft_refs:
+    // Start synchronous GC
+    _gc_cycle_port.send_sync(cause);
+    break;
+
+  case GCCause::_z_timer:
+  case GCCause::_z_warmup:
+  case GCCause::_z_allocation_rate:
+  case GCCause::_z_allocation_stall:
+  case GCCause::_z_proactive:
+  case GCCause::_metadata_GC_threshold:
+    // Start asynchronous GC
+    _gc_cycle_port.send_async(cause);
+    break;
+
+  case GCCause::_gc_locker:
+    // Restart VM operation previously blocked by the GC locker
+    _gc_locker_port.signal();
+    break;
+
+  default:
+    // Other causes not supported
+    fatal("Unsupported GC cause (%s)", GCCause::to_string(cause));
+    break;
+  }
+}
+
+GCCause::Cause ZDriver::start_gc_cycle() {
+  // Wait for GC request
+  return _gc_cycle_port.receive();
+}
+
+class ZSoftReferencePolicyScope : public StackObj {
+private:
+  bool should_clear_soft_reference(GCCause::Cause cause) const {
+    const bool clear = ZCollectedHeap::heap()->soft_ref_policy()->should_clear_all_soft_refs();
+
+    // Clear all soft reference if the policy says so, or if
+    // the GC cause indicates that we're running low on memory.
+    return clear ||
+           cause == GCCause::_z_allocation_stall ||
+           cause == GCCause::_metadata_GC_clear_soft_refs;
+  }
+
+  void clear_should_clear_soft_reference() const {
+    ZCollectedHeap::heap()->soft_ref_policy()->set_should_clear_all_soft_refs(false);
+  }
+
+public:
+  ZSoftReferencePolicyScope(GCCause::Cause cause) {
+    const bool clear = should_clear_soft_reference(cause);
+    ZHeap::heap()->set_soft_reference_policy(clear);
+    clear_should_clear_so