changeset 58157:eee025d47c8a nestmates

Merge
author mchung
date Thu, 05 Dec 2019 18:06:35 -0800
parents 61ee2fdd77ea e4b6321c11a4
children b6402b9800be
files make/hotspot/symbols/symbols-unix src/hotspot/share/aot/aotCodeHeap.cpp src/hotspot/share/classfile/classFileParser.cpp src/hotspot/share/classfile/classFileParser.hpp src/hotspot/share/classfile/classLoaderData.cpp src/hotspot/share/classfile/classLoaderDataGraph.cpp src/hotspot/share/classfile/classLoaderDataGraph.hpp src/hotspot/share/classfile/javaClasses.cpp src/hotspot/share/classfile/javaClasses.hpp src/hotspot/share/classfile/systemDictionary.cpp src/hotspot/share/classfile/systemDictionary.hpp src/hotspot/share/classfile/verifier.cpp src/hotspot/share/classfile/vmSymbols.hpp src/hotspot/share/gc/g1/survRateGroup.cpp src/hotspot/share/gc/g1/survRateGroup.hpp src/hotspot/share/include/jvm.h src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.cpp src/hotspot/share/jvmci/vmStructs_jvmci.cpp src/hotspot/share/logging/logTag.hpp src/hotspot/share/memory/heapInspection.hpp src/hotspot/share/memory/metaspace.cpp src/hotspot/share/memory/metaspaceShared.cpp src/hotspot/share/oops/constantPool.cpp src/hotspot/share/oops/cpCache.cpp src/hotspot/share/oops/instanceKlass.cpp src/hotspot/share/oops/instanceKlass.hpp src/hotspot/share/oops/klass.cpp src/hotspot/share/prims/jni.cpp src/hotspot/share/prims/jvm.cpp src/hotspot/share/prims/jvmti.xml src/hotspot/share/prims/jvmtiClassFileReconstituter.cpp src/hotspot/share/prims/jvmtiClassFileReconstituter.hpp src/hotspot/share/prims/jvmtiExport.cpp src/hotspot/share/prims/jvmtiRedefineClasses.cpp src/hotspot/share/prims/unsafe.cpp src/java.base/share/classes/java/lang/Class.java src/java.base/share/classes/java/lang/invoke/DirectMethodHandle.java src/java.base/share/classes/java/lang/invoke/MethodHandleImpl.java src/java.base/share/classes/java/lang/invoke/MethodHandles.java src/java.base/share/classes/java/lang/invoke/StringConcatFactory.java src/java.base/share/classes/java/time/overview.html src/java.base/share/native/libjava/Class.c src/jdk.compiler/share/classes/META-INF/services/com.sun.tools.javac.platform.PlatformProvider src/jdk.compiler/share/classes/com/sun/tools/javac/code/Symbol.java src/jdk.compiler/share/classes/com/sun/tools/javac/comp/LambdaToMethod.java src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Lower.java src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/ClassWriter.java src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/Gen.java src/jdk.compiler/share/classes/com/sun/tools/javac/services/javax.tools.JavaCompilerTool src/jdk.compiler/share/classes/com/sun/tools/javac/util/Names.java src/jdk.jdeps/share/classes/com/sun/tools/classfile/Attribute.java src/jdk.jdeps/share/classes/com/sun/tools/classfile/ClassWriter.java src/jdk.jdeps/share/classes/com/sun/tools/javap/AttributeWriter.java src/jdk.zipfs/share/classes/jdk/nio/zipfs/JarFileSystem.java src/jdk.zipfs/share/classes/jdk/nio/zipfs/JarFileSystemProvider.java test/hotspot/jtreg/ProblemList.txt test/jdk/ProblemList.txt test/jdk/java/lang/Runtime/Resources.java test/jdk/java/lang/Runtime/Version/Basic.java test/jdk/java/lang/Runtime/Version/VersionProps.java test/jdk/java/lang/Runtime/exec/ArgWithSpaceAndFinalBackslash.java test/jdk/java/lang/Runtime/exec/BadEnvp.java test/jdk/java/lang/Runtime/exec/ConcurrentRead.java test/jdk/java/lang/Runtime/exec/Duped.java test/jdk/java/lang/Runtime/exec/ExecCommand.java test/jdk/java/lang/Runtime/exec/ExecEmptyString.java test/jdk/java/lang/Runtime/exec/ExecWithDir.java test/jdk/java/lang/Runtime/exec/ExecWithInput.java test/jdk/java/lang/Runtime/exec/ExecWithLotsOfArgs.java test/jdk/java/lang/Runtime/exec/ExitValue.java test/jdk/java/lang/Runtime/exec/LotsOfDestroys.java test/jdk/java/lang/Runtime/exec/LotsOfOutput.java test/jdk/java/lang/Runtime/exec/SetCwd.java test/jdk/java/lang/Runtime/exec/SleepyCat.java test/jdk/java/lang/Runtime/exec/Space.java test/jdk/java/lang/Runtime/exec/Status.java test/jdk/java/lang/Runtime/exec/StreamsSurviveDestroy.java test/jdk/java/lang/Runtime/exec/UnixCommands.java test/jdk/java/lang/Runtime/exec/WinCommand.java test/jdk/java/lang/Runtime/exec/setcwd.sh test/jdk/java/lang/Runtime/loadLibrary/LoadLibraryTest.java test/jdk/java/lang/Runtime/loadLibrary/src/Target.java test/jdk/java/lang/Runtime/loadLibrary/src/Target2.java test/jdk/java/lang/Runtime/shutdown/Basic.java test/jdk/java/lang/Runtime/shutdown/ShutdownHooks.java test/jdk/java/lang/Runtime/shutdown/ShutdownHooks.sh test/jdk/java/lang/Runtime/shutdown/ShutdownInterruptedMain.java test/jdk/jdk/jfr/api/consumer/streaming/TestRepositoryMigration.java test/jdk/sun/security/tools/jarsigner/warnings/BadKeyUsageTest.java test/langtools/jdk/javadoc/doclet/testDocFiles/pkg/Test.java test/langtools/jdk/javadoc/doclet/testDocFiles/pkg/doc-files/test.txt test/langtools/lib/annotations/annotations/classfile/ClassfileInspector.java test/langtools/tools/javac/MethodParameters/AttributeVisitor.java test/langtools/tools/javac/lambda/methodReference/ProtectedInaccessibleMethodRefTest2.java
diffstat 1509 files changed, 55193 insertions(+), 24213 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Mon Nov 25 14:29:46 2019 +0000
+++ b/.hgtags	Thu Dec 05 18:06:35 2019 -0800
@@ -597,3 +597,5 @@
 83810b7d12e7ff761ad3dd91f323a22dad96f108 jdk-14+22
 15936b142f86731afa4b1a2c0fe4a01e806c4944 jdk-14+23
 438337c846fb071900ddb6922bddf8b3e895a514 jdk-14+24
+17d242844fc9e7d18b3eac97426490a9c246119e jdk-14+25
+288777cf0702914e5266bc1e5d380eed9032ca41 jdk-14+26
--- a/make/CompileInterimLangtools.gmk	Mon Nov 25 14:29:46 2019 +0000
+++ b/make/CompileInterimLangtools.gmk	Thu Dec 05 18:06:35 2019 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -80,6 +80,7 @@
       ADD_JAVAC_FLAGS := --module-path $(BUILDTOOLS_OUTPUTDIR)/interim_langtools_modules \
           $$(INTERIM_LANGTOOLS_ADD_EXPORTS) \
           --patch-module java.base=$(BUILDTOOLS_OUTPUTDIR)/gensrc/java.base.interim \
+          --add-exports java.base/jdk.internal=java.compiler.interim \
           --add-exports java.base/jdk.internal=jdk.compiler.interim \
           -Xlint:-module, \
   ))
--- a/make/autoconf/flags-cflags.m4	Mon Nov 25 14:29:46 2019 +0000
+++ b/make/autoconf/flags-cflags.m4	Thu Dec 05 18:06:35 2019 -0800
@@ -190,20 +190,7 @@
       WARNINGS_ENABLE_ALL_CXXFLAGS="$WARNINGS_ENABLE_ALL_CFLAGS $WARNINGS_ENABLE_ADDITIONAL_CXX"
 
       DISABLED_WARNINGS="unused-parameter unused"
-
-      # Repeate the check for the BUILD_CC and BUILD_CXX. Need to also reset
-      # CFLAGS since any target specific flags will likely not work with the
-      # build compiler
-      CC_OLD="$CC"
-      CXX_OLD="$CXX"
-      CC="$BUILD_CC"
-      CXX="$BUILD_CXX"
-      CFLAGS_OLD="$CFLAGS"
-      CFLAGS=""
       BUILD_CC_DISABLE_WARNING_PREFIX="-Wno-"
-      CC="$CC_OLD"
-      CXX="$CXX_OLD"
-      CFLAGS="$CFLAGS_OLD"
       ;;
 
     clang)
@@ -420,6 +407,17 @@
 
   FLAGS_SETUP_CFLAGS_CPU_DEP([TARGET])
 
+  # Repeat the check for the BUILD_CC and BUILD_CXX. Need to also reset CFLAGS
+  # since any target specific flags will likely not work with the build compiler.
+  CC_OLD="$CC"
+  CXX_OLD="$CXX"
+  CFLAGS_OLD="$CFLAGS"
+  CXXFLAGS_OLD="$CXXFLAGS"
+  CC="$BUILD_CC"
+  CXX="$BUILD_CXX"
+  CFLAGS=""
+  CXXFLAGS=""
+
   FLAGS_OS=$OPENJDK_BUILD_OS
   FLAGS_OS_TYPE=$OPENJDK_BUILD_OS_TYPE
   FLAGS_CPU=$OPENJDK_BUILD_CPU
@@ -430,6 +428,11 @@
   FLAGS_CPU_LEGACY_LIB=$OPENJDK_BUILD_CPU_LEGACY_LIB
 
   FLAGS_SETUP_CFLAGS_CPU_DEP([BUILD], [OPENJDK_BUILD_], [BUILD_])
+
+  CC="$CC_OLD"
+  CXX="$CXX_OLD"
+  CFLAGS="$CFLAGS_OLD"
+  CXXFLAGS="$CXXFLAGS_OLD"
 ])
 
 ################################################################################
@@ -529,6 +532,11 @@
   if test "x$TOOLCHAIN_TYPE" = xgcc; then
     TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -fcheck-new -fstack-protector"
     TOOLCHAIN_CFLAGS_JDK="-pipe -fstack-protector"
+    # reduce lib size on s390x in link step, this needs also special compile flags
+    if test "x$OPENJDK_TARGET_CPU" = xs390x; then
+      TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -ffunction-sections -fdata-sections"
+      TOOLCHAIN_CFLAGS_JDK="$TOOLCHAIN_CFLAGS_JDK -ffunction-sections -fdata-sections"
+    fi
     # technically NOT for CXX (but since this gives *worse* performance, use
     # no-strict-aliasing everywhere!)
     TOOLCHAIN_CFLAGS_JDK_CONLY="-fno-strict-aliasing"
--- a/make/autoconf/flags-ldflags.m4	Mon Nov 25 14:29:46 2019 +0000
+++ b/make/autoconf/flags-ldflags.m4	Thu Dec 05 18:06:35 2019 -0800
@@ -70,10 +70,14 @@
     fi
 
     # Add -z defs, to forbid undefined symbols in object files.
-    BASIC_LDFLAGS="$BASIC_LDFLAGS -Wl,-z,defs"
+    # add relro (mark relocations read only) for all libs
+    BASIC_LDFLAGS="$BASIC_LDFLAGS -Wl,-z,defs -Wl,-z,relro"
+    # s390x : remove unused code+data in link step
+    if test "x$OPENJDK_TARGET_CPU" = xs390x; then
+      BASIC_LDFLAGS="$BASIC_LDFLAGS -Wl,--gc-sections -Wl,--print-gc-sections"
+    fi
 
-    BASIC_LDFLAGS_JVM_ONLY="-Wl,-O1 -Wl,-z,relro"
-
+    BASIC_LDFLAGS_JVM_ONLY="-Wl,-O1"
 
   elif test "x$TOOLCHAIN_TYPE" = xclang; then
     BASIC_LDFLAGS_JVM_ONLY="-mno-omit-leaf-frame-pointer -mstack-alignment=16 \
@@ -120,9 +124,6 @@
     if test "x$OPENJDK_TARGET_OS" = xlinux; then
       if test x$DEBUG_LEVEL = xrelease; then
         DEBUGLEVEL_LDFLAGS_JDK_ONLY="$DEBUGLEVEL_LDFLAGS_JDK_ONLY -Wl,-O1"
-      else
-        # mark relocations read only on (fast/slow) debug builds
-        DEBUGLEVEL_LDFLAGS_JDK_ONLY="-Wl,-z,relro"
       fi
       if test x$DEBUG_LEVEL = xslowdebug; then
         # do relocations at load
--- a/make/autoconf/hotspot.m4	Mon Nov 25 14:29:46 2019 +0000
+++ b/make/autoconf/hotspot.m4	Thu Dec 05 18:06:35 2019 -0800
@@ -347,7 +347,8 @@
   # Only enable ZGC on supported platforms
   AC_MSG_CHECKING([if zgc can be built])
   if (test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = "xx86_64") || \
-     (test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = "xaarch64") ||
+     (test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = "xaarch64") || \
+     (test "x$OPENJDK_TARGET_OS" = "xwindows" && test "x$OPENJDK_TARGET_CPU" = "xx86_64") || \
      (test "x$OPENJDK_TARGET_OS" = "xmacosx" && test "x$OPENJDK_TARGET_CPU" = "xx86_64"); then
     AC_MSG_RESULT([yes])
   else
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/data/cacerts/amazonrootca1	Thu Dec 05 18:06:35 2019 -0800
@@ -0,0 +1,27 @@
+Owner: CN=Amazon Root CA 1, O=Amazon, C=US
+Issuer: CN=Amazon Root CA 1, O=Amazon, C=US
+Serial number: 66c9fcf99bf8c0a39e2f0788a43e696365bca
+Valid from: Tue May 26 00:00:00 GMT 2015 until: Sun Jan 17 00:00:00 GMT 2038
+Signature algorithm name: SHA256withRSA
+Subject Public Key Algorithm: 2048-bit RSA key
+Version: 3
+-----BEGIN CERTIFICATE-----
+MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsF
+ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6
+b24gUm9vdCBDQSAxMB4XDTE1MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTEL
+MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv
+b3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALJ4gHHKeNXj
+ca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgHFzZM
+9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qw
+IFAGbHrQgLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6
+VOujw5H5SNz/0egwLX0tdHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L
+93FcXmn/6pUCyziKrlA4b9v7LWIbxcceVOF34GfID5yHI9Y/QCB/IIDEgEw+OyQm
+jgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
+AYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3DQEBCwUA
+A4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDI
+U5PMCCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUs
+N+gDS63pYaACbvXy8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vv
+o/ufQJVtMVT8QtPHRh8jrdkPSHCa2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU
+5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2xJNDd2ZhwLnoQdeXeGADbkpy
+rqXRfboQnoZsG4q5WTP468SQvvG5
+-----END CERTIFICATE-----
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/data/cacerts/amazonrootca2	Thu Dec 05 18:06:35 2019 -0800
@@ -0,0 +1,38 @@
+Owner: CN=Amazon Root CA 2, O=Amazon, C=US
+Issuer: CN=Amazon Root CA 2, O=Amazon, C=US
+Serial number: 66c9fd29635869f0a0fe58678f85b26bb8a37
+Valid from: Tue May 26 00:00:00 GMT 2015 until: Sat May 26 00:00:00 GMT 2040
+Signature algorithm name: SHA384withRSA
+Subject Public Key Algorithm: 4096-bit RSA key
+Version: 3
+-----BEGIN CERTIFICATE-----
+MIIFQTCCAymgAwIBAgITBmyf0pY1hp8KD+WGePhbJruKNzANBgkqhkiG9w0BAQwF
+ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6
+b24gUm9vdCBDQSAyMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTEL
+MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv
+b3QgQ0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK2Wny2cSkxK
+gXlRmeyKy2tgURO8TW0G/LAIjd0ZEGrHJgw12MBvIITplLGbhQPDW9tK6Mj4kHbZ
+W0/jTOgGNk3Mmqw9DJArktQGGWCsN0R5hYGCrVo34A3MnaZMUnbqQ523BNFQ9lXg
+1dKmSYXpN+nKfq5clU1Imj+uIFptiJXZNLhSGkOQsL9sBbm2eLfq0OQ6PBJTYv9K
+8nu+NQWpEjTj82R0Yiw9AElaKP4yRLuH3WUnAnE72kr3H9rN9yFVkE8P7K6C4Z9r
+2UXTu/Bfh+08LDmG2j/e7HJV63mjrdvdfLC6HM783k81ds8P+HgfajZRRidhW+me
+z/CiVX18JYpvL7TFz4QuK/0NURBs+18bvBt+xa47mAExkv8LV/SasrlX6avvDXbR
+8O70zoan4G7ptGmh32n2M8ZpLpcTnqWHsFcQgTfJU7O7f/aS0ZzQGPSSbtqDT6Zj
+mUyl+17vIWR6IF9sZIUVyzfpYgwLKhbcAS4y2j5L9Z469hdAlO+ekQiG+r5jqFoz
+7Mt0Q5X5bGlSNscpb/xVA1wf+5+9R+vnSUeVC06JIglJ4PVhHvG/LopyboBZ/1c6
++XUyo05f7O0oYtlNc/LMgRdg7c3r3NunysV+Ar3yVAhU/bQtCSwXVEqY0VThUWcI
+0u1ufm8/0i2BWSlmy5A5lREedCf+3euvAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMB
+Af8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSwDPBMMPQFWAJI/TPlUq9LhONm
+UjANBgkqhkiG9w0BAQwFAAOCAgEAqqiAjw54o+Ci1M3m9Zh6O+oAA7CXDpO8Wqj2
+LIxyh6mx/H9z/WNxeKWHWc8w4Q0QshNabYL1auaAn6AFC2jkR2vHat+2/XcycuUY
++gn0oJMsXdKMdYV2ZZAMA3m3MSNjrXiDCYZohMr/+c8mmpJ5581LxedhpxfL86kS
+k5Nrp+gvU5LEYFiwzAJRGFuFjWJZY7attN6a+yb3ACfAXVU3dJnJUH/jWS5E4ywl
+7uxMMne0nxrpS10gxdr9HIcWxkPo1LsmmkVwXqkLN1PiRnsn/eBG8om3zEK2yygm
+btmlyTrIQRNg91CMFa6ybRoVGld45pIq2WWQgj9sAq+uEjonljYE1x2igGOpm/Hl
+urR8FLBOybEfdF849lHqm/osohHUqS0nGkWxr7JOcQ3AWEbWaQbLU8uz/mtBzUF+
+fUwPfHJ5elnNXkoOrJupmHN5fLT0zLm4BwyydFy4x2+IoZCn9Kr5v2c69BoVYh63
+n749sSmvZ6ES8lgQGVMDMBu4Gon2nL2XA46jCfMdiyHxtN/kHNGfZQIG6lzWE7OE
+76KlXIx3KadowGuuQNKotOrN8I1LOJwZmhsoVLiJkO/KdYE+HvJkJMcYr07/R54H
+9jVlpNMKVv/1F2Rs76giJUmTtt8AF9pYfl3uxRuw0dFfIRDH+fO6AgonB8Xx1sfT
+4PsJYGw=
+-----END CERTIFICATE-----
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/data/cacerts/amazonrootca3	Thu Dec 05 18:06:35 2019 -0800
@@ -0,0 +1,19 @@
+Owner: CN=Amazon Root CA 3, O=Amazon, C=US
+Issuer: CN=Amazon Root CA 3, O=Amazon, C=US
+Serial number: 66c9fd5749736663f3b0b9ad9e89e7603f24a
+Valid from: Tue May 26 00:00:00 GMT 2015 until: Sat May 26 00:00:00 GMT 2040
+Signature algorithm name: SHA256withECDSA
+Subject Public Key Algorithm: 256-bit EC key
+Version: 3
+-----BEGIN CERTIFICATE-----
+MIIBtjCCAVugAwIBAgITBmyf1XSXNmY/Owua2eiedgPySjAKBggqhkjOPQQDAjA5
+MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g
+Um9vdCBDQSAzMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG
+A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg
+Q0EgMzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCmXp8ZBf8ANm+gBG1bG8lKl
+ui2yEujSLtf6ycXYqm0fc4E7O5hrOXwzpcVOho6AF2hiRVd9RFgdszflZwjrZt6j
+QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSr
+ttvXBp43rDCGB5Fwx5zEGbF4wDAKBggqhkjOPQQDAgNJADBGAiEA4IWSoxe3jfkr
+BqWTrBqYaGFy+uGh0PsceGCmQ5nFuMQCIQCcAu/xlJyzlvnrxir4tiz+OpAUFteM
+YyRIHN8wfdVoOw==
+-----END CERTIFICATE-----
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/data/cacerts/amazonrootca4	Thu Dec 05 18:06:35 2019 -0800
@@ -0,0 +1,20 @@
+Owner: CN=Amazon Root CA 4, O=Amazon, C=US
+Issuer: CN=Amazon Root CA 4, O=Amazon, C=US
+Serial number: 66c9fd7c1bb104c2943e5717b7b2cc81ac10e
+Valid from: Tue May 26 00:00:00 GMT 2015 until: Sat May 26 00:00:00 GMT 2040
+Signature algorithm name: SHA384withECDSA
+Subject Public Key Algorithm: 384-bit EC key
+Version: 3
+-----BEGIN CERTIFICATE-----
+MIIB8jCCAXigAwIBAgITBmyf18G7EEwpQ+Vxe3ssyBrBDjAKBggqhkjOPQQDAzA5
+MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g
+Um9vdCBDQSA0MB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG
+A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg
+Q0EgNDB2MBAGByqGSM49AgEGBSuBBAAiA2IABNKrijdPo1MN/sGKe0uoe0ZLY7Bi
+9i0b2whxIdIA6GO9mif78DluXeo9pcmBqqNbIJhFXRbb/egQbeOc4OO9X4Ri83Bk
+M6DLJC9wuoihKqB1+IGuYgbEgds5bimwHvouXKNCMEAwDwYDVR0TAQH/BAUwAwEB
+/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFNPsxzplbszh2naaVvuc84ZtV+WB
+MAoGCCqGSM49BAMDA2gAMGUCMDqLIfG9fhGt0O9Yli/W651+kI0rz2ZVwyzjKKlw
+CkcO8DdZEv8tmZQoTipPNU0zWgIxAOp1AE47xDqUEpHJWEadIRNyp4iciuRMStuW
+1KyLa2tJElMzrdfkviT8tQp21KW8EA==
+-----END CERTIFICATE-----
--- a/make/hotspot/symbols/symbols-unix	Mon Nov 25 14:29:46 2019 +0000
+++ b/make/hotspot/symbols/symbols-unix	Thu Dec 05 18:06:35 2019 -0800
@@ -122,6 +122,7 @@
 JVM_GetPrimitiveArrayElement
 JVM_GetProperties
 JVM_GetProtectionDomain
+JVM_GetRecordComponents
 JVM_GetSimpleBinaryName
 JVM_GetStackAccessControlContext
 JVM_GetSystemPackage
@@ -144,6 +145,7 @@
 JVM_IsHiddenClass
 JVM_IsInterface
 JVM_IsPrimitiveClass
+JVM_IsRecord
 JVM_IsSameClassPackage
 JVM_IsSupportedJNIVersion
 JVM_IsThreadAlive
--- a/make/langtools/tools/propertiesparser/resources/templates.properties	Mon Nov 25 14:29:46 2019 +0000
+++ b/make/langtools/tools/propertiesparser/resources/templates.properties	Thu Dec 05 18:06:35 2019 -0800
@@ -1,3 +1,28 @@
+#
+# Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.  Oracle designates this
+# particular file as subject to the "Classpath" exception as provided
+# by Oracle in the LICENSE file that accompanied this code.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+
 toplevel.decl=\
     package {0};\n\
     \n\
--- a/src/hotspot/cpu/aarch64/aarch64.ad	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/cpu/aarch64/aarch64.ad	Thu Dec 05 18:06:35 2019 -0800
@@ -5639,7 +5639,6 @@
 operand cmpOpEqNe()
 %{
   match(Bool);
-  match(CmpOp);
   op_cost(0);
   predicate(n->as_Bool()->_test._test == BoolTest::ne
             || n->as_Bool()->_test._test == BoolTest::eq);
@@ -5663,7 +5662,6 @@
 operand cmpOpLtGe()
 %{
   match(Bool);
-  match(CmpOp);
   op_cost(0);
 
   predicate(n->as_Bool()->_test._test == BoolTest::lt
@@ -5688,7 +5686,6 @@
 operand cmpOpUEqNeLtGe()
 %{
   match(Bool);
-  match(CmpOp);
   op_cost(0);
 
   predicate(n->as_Bool()->_test._test == BoolTest::eq
--- a/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -144,7 +144,7 @@
   const size_t max_address_offset_bits = 44; // 16TB
   const size_t address_offset = ZUtils::round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio);
   const size_t address_offset_bits = log2_intptr(address_offset);
-  return MIN2(MAX2(address_offset_bits, min_address_offset_bits), max_address_offset_bits);
+  return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits);
 }
 
 size_t ZPlatformAddressMetadataShift() {
--- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -30,6 +30,7 @@
 #include "nativeInst_aarch64.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/handles.hpp"
+#include "runtime/orderAccess.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "utilities/ostream.hpp"
--- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -33,6 +33,7 @@
 #include "interpreter/interp_masm.hpp"
 #include "logging/log.hpp"
 #include "memory/resourceArea.hpp"
+#include "nativeInst_aarch64.hpp"
 #include "oops/compiledICHolder.hpp"
 #include "oops/klass.inline.hpp"
 #include "runtime/safepointMechanism.hpp"
--- a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -443,7 +443,6 @@
     Register obj = r0;
     Register mdp = r1;
     Register tmp = r2;
-    __ ldr(mdp, Address(rmethod, Method::method_data_offset()));
     __ profile_return_type(mdp, obj, tmp);
   }
 
@@ -1633,13 +1632,8 @@
   __ mov(rscratch2, true);
   __ strb(rscratch2, do_not_unlock_if_synchronized);
 
-  Label no_mdp;
   Register mdp = r3;
-  __ ldr(mdp, Address(rmethod, Method::method_data_offset()));
-  __ cbz(mdp, no_mdp);
-  __ add(mdp, mdp, in_bytes(MethodData::data_offset()));
   __ profile_parameters_type(mdp, r1, r2);
-  __ bind(no_mdp);
 
   // increment invocation count & check for overflow
   Label invocation_counter_overflow;
--- a/src/hotspot/cpu/arm/arm.ad	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/cpu/arm/arm.ad	Thu Dec 05 18:06:35 2019 -0800
@@ -2204,6 +2204,30 @@
   interface(REG_INTER);
 %}
 
+operand R8RegP() %{
+  constraint(ALLOC_IN_RC(R8_regP));
+  match(iRegP);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand R9RegP() %{
+  constraint(ALLOC_IN_RC(R9_regP));
+  match(iRegP);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand R12RegP() %{
+  constraint(ALLOC_IN_RC(R12_regP));
+  match(iRegP);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
 operand R2RegP() %{
   constraint(ALLOC_IN_RC(R2_regP));
   match(iRegP);
@@ -2236,6 +2260,14 @@
   interface(REG_INTER);
 %}
 
+operand SPRegP() %{
+  constraint(ALLOC_IN_RC(SP_regP));
+  match(iRegP);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
 operand LRRegP() %{
   constraint(ALLOC_IN_RC(LR_regP));
   match(iRegP);
--- a/src/hotspot/cpu/arm/arm_32.ad	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/cpu/arm/arm_32.ad	Thu Dec 05 18:06:35 2019 -0800
@@ -232,11 +232,15 @@
 reg_class R1_regP(R_R1);
 reg_class R2_regP(R_R2);
 reg_class R4_regP(R_R4);
+reg_class R8_regP(R_R8);
+reg_class R9_regP(R_R9);
+reg_class R12_regP(R_R12);
 reg_class Rexception_regP(R_Rexception_obj);
 reg_class Ricklass_regP(R_Ricklass);
 reg_class Rmethod_regP(R_Rmethod);
 reg_class Rthread_regP(R_Rthread);
 reg_class IP_regP(R_R12);
+reg_class SP_regP(R_R13);
 reg_class LR_regP(R_R14);
 
 reg_class FP_regP(R_R11);
--- a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -581,6 +581,7 @@
         base_reg = Rtemp;
         __ str(from_lo, Address(Rtemp));
         if (patch != NULL) {
+          __ nop(); // see comment before patching_epilog for 2nd str
           patching_epilog(patch, lir_patch_low, base_reg, info);
           patch = new PatchingStub(_masm, PatchingStub::access_field_id);
           patch_code = lir_patch_high;
@@ -589,6 +590,7 @@
       } else if (base_reg == from_lo) {
         __ str(from_hi, as_Address_hi(to_addr));
         if (patch != NULL) {
+          __ nop(); // see comment before patching_epilog for 2nd str
           patching_epilog(patch, lir_patch_high, base_reg, info);
           patch = new PatchingStub(_masm, PatchingStub::access_field_id);
           patch_code = lir_patch_low;
@@ -597,6 +599,7 @@
       } else {
         __ str(from_lo, as_Address_lo(to_addr));
         if (patch != NULL) {
+          __ nop(); // see comment before patching_epilog for 2nd str
           patching_epilog(patch, lir_patch_low, base_reg, info);
           patch = new PatchingStub(_masm, PatchingStub::access_field_id);
           patch_code = lir_patch_high;
@@ -640,7 +643,7 @@
   }
 
   if (patch != NULL) {
-    // Offset embeedded into LDR/STR instruction may appear not enough
+    // Offset embedded into LDR/STR instruction may appear not enough
     // to address a field. So, provide a space for one more instruction
     // that will deal with larger offsets.
     __ nop();
@@ -791,6 +794,7 @@
         base_reg = Rtemp;
         __ ldr(to_lo, Address(Rtemp));
         if (patch != NULL) {
+          __ nop(); // see comment before patching_epilog for 2nd ldr
           patching_epilog(patch, lir_patch_low, base_reg, info);
           patch = new PatchingStub(_masm, PatchingStub::access_field_id);
           patch_code = lir_patch_high;
@@ -799,6 +803,7 @@
       } else if (base_reg == to_lo) {
         __ ldr(to_hi, as_Address_hi(addr));
         if (patch != NULL) {
+          __ nop(); // see comment before patching_epilog for 2nd ldr
           patching_epilog(patch, lir_patch_high, base_reg, info);
           patch = new PatchingStub(_masm, PatchingStub::access_field_id);
           patch_code = lir_patch_low;
@@ -807,6 +812,7 @@
       } else {
         __ ldr(to_lo, as_Address_lo(addr));
         if (patch != NULL) {
+          __ nop(); // see comment before patching_epilog for 2nd ldr
           patching_epilog(patch, lir_patch_low, base_reg, info);
           patch = new PatchingStub(_masm, PatchingStub::access_field_id);
           patch_code = lir_patch_high;
@@ -846,7 +852,7 @@
   }
 
   if (patch != NULL) {
-    // Offset embeedded into LDR/STR instruction may appear not enough
+    // Offset embedded into LDR/STR instruction may appear not enough
     // to address a field. So, provide a space for one more instruction
     // that will deal with larger offsets.
     __ nop();
--- a/src/hotspot/cpu/arm/nativeInst_arm_32.hpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/cpu/arm/nativeInst_arm_32.hpp	Thu Dec 05 18:06:35 2019 -0800
@@ -28,6 +28,7 @@
 #include "asm/macroAssembler.hpp"
 #include "code/codeCache.hpp"
 #include "runtime/icache.hpp"
+#include "runtime/orderAccess.hpp"
 #include "runtime/os.hpp"
 #include "runtime/thread.hpp"
 #include "register_arm.hpp"
--- a/src/hotspot/cpu/arm/relocInfo_arm.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/cpu/arm/relocInfo_arm.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -29,7 +29,6 @@
 #include "nativeInst_arm.hpp"
 #include "oops/compressedOops.inline.hpp"
 #include "oops/oop.hpp"
-#include "runtime/orderAccess.hpp"
 #include "runtime/safepoint.hpp"
 
 void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
--- a/src/hotspot/cpu/arm/stubGenerator_arm.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/cpu/arm/stubGenerator_arm.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -437,7 +437,8 @@
   // for which we do not support MP and so membars are not necessary. This ARMv5 code will
   // be removed in the future.
 
-  // Support for jint Atomic::add(jint add_value, volatile jint *dest)
+  // Implementation of atomic_add(jint add_value, volatile jint* dest)
+  // used by Atomic::add(volatile jint* dest, jint add_value)
   //
   // Arguments :
   //
@@ -487,7 +488,8 @@
     return start;
   }
 
-  // Support for jint Atomic::xchg(jint exchange_value, volatile jint *dest)
+  // Implementation of jint atomic_xchg(jint exchange_value, volatile jint* dest)
+  // used by Atomic::add(volatile jint* dest, jint exchange_value)
   //
   // Arguments :
   //
@@ -535,7 +537,8 @@
     return start;
   }
 
-  // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint *dest, jint compare_value)
+  // Implementation of jint atomic_cmpxchg(jint exchange_value, volatile jint *dest, jint compare_value)
+  // used by Atomic::cmpxchg(volatile jint *dest, jint compare_value, jint exchange_value)
   //
   // Arguments :
   //
--- a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -883,7 +883,7 @@
     //
     // markWord displaced_header = obj->mark().set_unlocked();
     // monitor->lock()->set_displaced_header(displaced_header);
-    // if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
+    // if (Atomic::cmpxchg(/*addr*/obj->mark_addr(), /*cmp*/displaced_header, /*ex=*/monitor) == displaced_header) {
     //   // We stored the monitor address into the object's mark word.
     // } else if (THREAD->is_lock_owned((address)displaced_header))
     //   // Simple recursive case.
@@ -921,7 +921,7 @@
     std(displaced_header, BasicObjectLock::lock_offset_in_bytes() +
         BasicLock::displaced_header_offset_in_bytes(), monitor);
 
-    // if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
+    // if (Atomic::cmpxchg(/*addr*/obj->mark_addr(), /*cmp*/displaced_header, /*ex=*/monitor) == displaced_header) {
 
     // Store stack address of the BasicObjectLock (this is monitor) into object.
     addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes());
@@ -997,7 +997,7 @@
     // if ((displaced_header = monitor->displaced_header()) == NULL) {
     //   // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL.
     //   monitor->set_obj(NULL);
-    // } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) {
+    // } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) {
     //   // We swapped the unlocked mark in displaced_header into the object's mark word.
     //   monitor->set_obj(NULL);
     // } else {
@@ -1030,7 +1030,7 @@
     cmpdi(CCR0, displaced_header, 0);
     beq(CCR0, free_slot); // recursive unlock
 
-    // } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) {
+    // } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) {
     //   // We swapped the unlocked mark in displaced_header into the object's mark word.
     //   monitor->set_obj(NULL);
 
--- a/src/hotspot/cpu/ppc/nativeInst_ppc.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/cpu/ppc/nativeInst_ppc.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -374,7 +374,7 @@
   // Finally patch out the jump.
   volatile juint *jump_addr = (volatile juint*)instr_addr;
   // Release not needed because caller uses invalidate_range after copying the remaining bytes.
-  //OrderAccess::release_store(jump_addr, *((juint*)code_buffer));
+  //Atomic::release_store(jump_addr, *((juint*)code_buffer));
   *jump_addr = *((juint*)code_buffer); // atomically store code over branch instruction
   ICache::ppc64_flush_icache_bytes(instr_addr, NativeGeneralJump::instruction_size);
 }
--- a/src/hotspot/cpu/s390/interp_masm_s390.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/cpu/s390/interp_masm_s390.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -976,7 +976,7 @@
   //
   // markWord displaced_header = obj->mark().set_unlocked();
   // monitor->lock()->set_displaced_header(displaced_header);
-  // if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
+  // if (Atomic::cmpxchg(/*addr*/obj->mark_addr(), /*cmp*/displaced_header, /*ex=*/monitor) == displaced_header) {
   //   // We stored the monitor address into the object's mark word.
   // } else if (THREAD->is_lock_owned((address)displaced_header))
   //   // Simple recursive case.
@@ -1011,7 +1011,7 @@
   z_stg(displaced_header, BasicObjectLock::lock_offset_in_bytes() +
                           BasicLock::displaced_header_offset_in_bytes(), monitor);
 
-  // if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
+  // if (Atomic::cmpxchg(/*addr*/obj->mark_addr(), /*cmp*/displaced_header, /*ex=*/monitor) == displaced_header) {
 
   // Store stack address of the BasicObjectLock (this is monitor) into object.
   add2reg(object_mark_addr, oopDesc::mark_offset_in_bytes(), object);
@@ -1082,7 +1082,7 @@
   // if ((displaced_header = monitor->displaced_header()) == NULL) {
   //   // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL.
   //   monitor->set_obj(NULL);
-  // } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) {
+  // } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) {
   //   // We swapped the unlocked mark in displaced_header into the object's mark word.
   //   monitor->set_obj(NULL);
   // } else {
@@ -1123,7 +1123,7 @@
                                                       BasicLock::displaced_header_offset_in_bytes()));
   z_bre(done); // displaced_header == 0 -> goto done
 
-  // } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) {
+  // } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) {
   //   // We swapped the unlocked mark in displaced_header into the object's mark word.
   //   monitor->set_obj(NULL);
 
--- a/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -585,7 +585,8 @@
     return start;
   }
 
-  // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
+  // Implementation of jint atomic_xchg(jint exchange_value, volatile jint* dest)
+  // used by Atomic::xchg(volatile jint* dest, jint exchange_value)
   //
   // Arguments:
   //
@@ -622,7 +623,8 @@
   }
 
 
-  // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value)
+  // Implementation of jint atomic_cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value)
+  // used by Atomic::cmpxchg(volatile jint* dest, jint compare_value, jint exchange_value)
   //
   // Arguments:
   //
@@ -646,7 +648,8 @@
     return start;
   }
 
-  // Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value)
+  // Implementation of jlong atomic_cmpxchg_long(jlong exchange_value, volatile jlong *dest, jlong compare_value)
+  // used by Atomic::cmpxchg(volatile jlong *dest, jlong compare_value, jlong exchange_value)
   //
   // Arguments:
   //
@@ -679,7 +682,8 @@
   }
 
 
-  // Support for jint Atomic::add(jint add_value, volatile jint* dest).
+  // Implementation of jint atomic_add(jint add_value, volatile jint* dest)
+  // used by Atomic::add(volatile jint* dest, jint add_value)
   //
   // Arguments:
   //
--- a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -327,24 +327,42 @@
 #endif
 }
 
+#ifdef _LP64
 void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm) {
   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
   if (bs_nm == NULL) {
     return;
   }
-#ifndef _LP64
-  ShouldNotReachHere();
-#else
   Label continuation;
-  Register thread = LP64_ONLY(r15_thread);
+  Register thread = r15_thread;
   Address disarmed_addr(thread, in_bytes(bs_nm->thread_disarmed_offset()));
   __ align(8);
   __ cmpl(disarmed_addr, 0);
   __ jcc(Assembler::equal, continuation);
   __ call(RuntimeAddress(StubRoutines::x86::method_entry_barrier()));
   __ bind(continuation);
+}
+#else
+void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm) {
+  BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
+  if (bs_nm == NULL) {
+    return;
+  }
+
+  Label continuation;
+
+  Register tmp = rdi;
+  __ push(tmp);
+  __ movptr(tmp, (intptr_t)bs_nm->disarmed_value_address());
+  Address disarmed_addr(tmp, 0);
+  __ align(4);
+  __ cmpl(disarmed_addr, 0);
+  __ pop(tmp);
+  __ jcc(Assembler::equal, continuation);
+  __ call(RuntimeAddress(StubRoutines::x86::method_entry_barrier()));
+  __ bind(continuation);
+}
 #endif
-}
 
 void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {
   BarrierSetNMethod* bs = BarrierSet::barrier_set()->barrier_set_nmethod();
--- a/src/hotspot/cpu/x86/gc/shared/barrierSetNMethod_x86.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/cpu/x86/gc/shared/barrierSetNMethod_x86.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -35,6 +35,7 @@
 
 class NativeNMethodCmpBarrier: public NativeInstruction {
 public:
+#ifdef _LP64
   enum Intel_specific_constants {
     instruction_code        = 0x81,
     instruction_size        = 8,
@@ -42,6 +43,14 @@
     instruction_rex_prefix  = Assembler::REX | Assembler::REX_B,
     instruction_modrm       = 0x7f  // [r15 + offset]
   };
+#else
+  enum Intel_specific_constants {
+    instruction_code        = 0x81,
+    instruction_size        = 7,
+    imm_offset              = 2,
+    instruction_modrm       = 0x3f  // [rdi]
+  };
+#endif
 
   address instruction_address() const { return addr_at(0); }
   address immediate_address() const { return addr_at(imm_offset); }
@@ -51,6 +60,7 @@
   void verify() const;
 };
 
+#ifdef _LP64
 void NativeNMethodCmpBarrier::verify() const {
   if (((uintptr_t) instruction_address()) & 0x7) {
     fatal("Not properly aligned");
@@ -77,6 +87,27 @@
     fatal("not a cmp barrier");
   }
 }
+#else
+void NativeNMethodCmpBarrier::verify() const {
+  if (((uintptr_t) instruction_address()) & 0x3) {
+    fatal("Not properly aligned");
+  }
+
+  int inst = ubyte_at(0);
+  if (inst != instruction_code) {
+    tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()),
+        inst);
+    fatal("not a cmp barrier");
+  }
+
+  int modrm = ubyte_at(1);
+  if (modrm != instruction_modrm) {
+    tty->print_cr("Addr: " INTPTR_FORMAT " mod/rm: 0x%x", p2i(instruction_address()),
+        modrm);
+    fatal("not a cmp barrier");
+  }
+}
+#endif // _LP64
 
 void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) {
   /*
@@ -127,7 +158,7 @@
 // NativeNMethodCmpBarrier::verify() will immediately complain when it does
 // not find the expected native instruction at this offset, which needs updating.
 // Note that this offset is invariant of PreserveFramePointer.
-static const int entry_barrier_offset = -19;
+static const int entry_barrier_offset = LP64_ONLY(-19) NOT_LP64(-18);
 
 static NativeNMethodCmpBarrier* native_nmethod_barrier(nmethod* nm) {
   address barrier_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset;
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -395,6 +395,52 @@
   __ block_comment("load_reference_barrier_native { ");
 }
 
+#ifdef _LP64
+void ShenandoahBarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {
+  // Use default version
+  BarrierSetAssembler::c2i_entry_barrier(masm);
+}
+#else
+void ShenandoahBarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {
+  BarrierSetNMethod* bs = BarrierSet::barrier_set()->barrier_set_nmethod();
+  if (bs == NULL) {
+    return;
+  }
+
+  Label bad_call;
+  __ cmpptr(rbx, 0); // rbx contains the incoming method for c2i adapters.
+  __ jcc(Assembler::equal, bad_call);
+
+  Register tmp1 = rax;
+  Register tmp2 = rcx;
+
+  __ push(tmp1);
+  __ push(tmp2);
+
+  // Pointer chase to the method holder to find out if the method is concurrently unloading.
+  Label method_live;
+  __ load_method_holder_cld(tmp1, rbx);
+
+   // Is it a strong CLD?
+  __ cmpl(Address(tmp1, ClassLoaderData::keep_alive_offset()), 0);
+  __ jcc(Assembler::greater, method_live);
+
+   // Is it a weak but alive CLD?
+  __ movptr(tmp1, Address(tmp1, ClassLoaderData::holder_offset()));
+  __ resolve_weak_handle(tmp1, tmp2);
+  __ cmpptr(tmp1, 0);
+  __ jcc(Assembler::notEqual, method_live);
+  __ pop(tmp2);
+  __ pop(tmp1);
+
+  __ bind(bad_call);
+  __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
+  __ bind(method_live);
+  __ pop(tmp2);
+  __ pop(tmp1);
+}
+#endif
+
 void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) {
   if (ShenandoahStoreValEnqueueBarrier) {
     storeval_barrier_impl(masm, dst, tmp);
@@ -511,8 +557,12 @@
 
   // 3: apply keep-alive barrier if needed
   if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
-    const Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread);
+    __ push_IU_state();
+    Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread);
     assert_different_registers(dst, tmp1, tmp_thread);
+    if (!thread->is_valid()) {
+      thread = rdx;
+    }
     NOT_LP64(__ get_thread(thread));
     // Generate the SATB pre-barrier code to log the value of
     // the referent field in an SATB buffer.
@@ -523,6 +573,7 @@
                                  tmp1 /* tmp */,
                                  true /* tosca_live */,
                                  true /* expand_call */);
+    __ pop_IU_state();
   }
 }
 
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp	Thu Dec 05 18:06:35 2019 -0800
@@ -86,6 +86,7 @@
                         Address dst, Register val, Register tmp1, Register tmp2);
   virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
                                              Register obj, Register tmp, Label& slowpath);
+  virtual void c2i_entry_barrier(MacroAssembler* masm);
 
   virtual void barrier_stubs_init();
 
--- a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -517,8 +517,11 @@
     // Sort by size, largest first
     _xmm_registers.sort(xmm_compare_register_size);
 
+    // On Windows, the caller reserves stack space for spilling register arguments
+    const int arg_spill_size = frame::arg_reg_save_area_bytes;
+
     // Stack pointer must be 16 bytes aligned for the call
-    _spill_offset = _spill_size = align_up(xmm_spill_size + gp_spill_size, 16);
+    _spill_offset = _spill_size = align_up(xmm_spill_size + gp_spill_size + arg_spill_size, 16);
   }
 
 public:
--- a/src/hotspot/cpu/x86/gc/z/zGlobals_x86.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/cpu/x86/gc/z/zGlobals_x86.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -144,7 +144,7 @@
   const size_t max_address_offset_bits = 44; // 16TB
   const size_t address_offset = ZUtils::round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio);
   const size_t address_offset_bits = log2_intptr(address_offset);
-  return MIN2(MAX2(address_offset_bits, min_address_offset_bits), max_address_offset_bits);
+  return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits);
 }
 
 size_t ZPlatformAddressMetadataShift() {
--- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp	Thu Dec 05 18:06:35 2019 -0800
@@ -1356,7 +1356,7 @@
   void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); }
 
   void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
-    if (UseAVX > 2) {
+    if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
       Assembler::vinserti32x4(dst, dst, src, imm8);
     } else if (UseAVX > 1) {
       // vinserti128 is available only in AVX2
@@ -1367,7 +1367,7 @@
   }
 
   void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
-    if (UseAVX > 2) {
+    if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
       Assembler::vinserti32x4(dst, dst, src, imm8);
     } else if (UseAVX > 1) {
       // vinserti128 is available only in AVX2
@@ -1378,7 +1378,7 @@
   }
 
   void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
-    if (UseAVX > 2) {
+    if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
       Assembler::vextracti32x4(dst, src, imm8);
     } else if (UseAVX > 1) {
       // vextracti128 is available only in AVX2
@@ -1389,7 +1389,7 @@
   }
 
   void vextracti128(Address dst, XMMRegister src, uint8_t imm8) {
-    if (UseAVX > 2) {
+    if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
       Assembler::vextracti32x4(dst, src, imm8);
     } else if (UseAVX > 1) {
       // vextracti128 is available only in AVX2
@@ -1414,7 +1414,7 @@
   }
 
   void vinsertf128_high(XMMRegister dst, XMMRegister src) {
-    if (UseAVX > 2) {
+    if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
       Assembler::vinsertf32x4(dst, dst, src, 1);
     } else {
       Assembler::vinsertf128(dst, dst, src, 1);
@@ -1422,7 +1422,7 @@
   }
 
   void vinsertf128_high(XMMRegister dst, Address src) {
-    if (UseAVX > 2) {
+    if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
       Assembler::vinsertf32x4(dst, dst, src, 1);
     } else {
       Assembler::vinsertf128(dst, dst, src, 1);
@@ -1430,7 +1430,7 @@
   }
 
   void vextractf128_high(XMMRegister dst, XMMRegister src) {
-    if (UseAVX > 2) {
+    if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
       Assembler::vextractf32x4(dst, src, 1);
     } else {
       Assembler::vextractf128(dst, src, 1);
@@ -1438,7 +1438,7 @@
   }
 
   void vextractf128_high(Address dst, XMMRegister src) {
-    if (UseAVX > 2) {
+    if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
       Assembler::vextractf32x4(dst, src, 1);
     } else {
       Assembler::vextractf128(dst, src, 1);
@@ -1480,7 +1480,7 @@
   }
 
   void vinsertf128_low(XMMRegister dst, XMMRegister src) {
-    if (UseAVX > 2) {
+    if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
       Assembler::vinsertf32x4(dst, dst, src, 0);
     } else {
       Assembler::vinsertf128(dst, dst, src, 0);
@@ -1488,7 +1488,7 @@
   }
 
   void vinsertf128_low(XMMRegister dst, Address src) {
-    if (UseAVX > 2) {
+    if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
       Assembler::vinsertf32x4(dst, dst, src, 0);
     } else {
       Assembler::vinsertf128(dst, dst, src, 0);
@@ -1496,7 +1496,7 @@
   }
 
   void vextractf128_low(XMMRegister dst, XMMRegister src) {
-    if (UseAVX > 2) {
+    if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
       Assembler::vextractf32x4(dst, src, 0);
     } else {
       Assembler::vextractf128(dst, src, 0);
@@ -1504,7 +1504,7 @@
   }
 
   void vextractf128_low(Address dst, XMMRegister src) {
-    if (UseAVX > 2) {
+    if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
       Assembler::vextractf32x4(dst, src, 0);
     } else {
       Assembler::vextractf128(dst, src, 0);
--- a/src/hotspot/cpu/x86/rdtsc_x86.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/cpu/x86/rdtsc_x86.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "rdtsc_x86.hpp"
+#include "runtime/orderAccess.hpp"
 #include "runtime/thread.inline.hpp"
 #include "vm_version_ext_x86.hpp"
 
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -975,6 +975,9 @@
 
   address c2i_entry = __ pc();
 
+  BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+  bs->c2i_entry_barrier(masm);
+
   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 
   __ flush();
@@ -1886,6 +1889,10 @@
   // -2 because return address is already present and so is saved rbp
   __ subptr(rsp, stack_size - 2*wordSize);
 
+
+  BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+  bs->nmethod_entry_barrier(masm);
+
   // Frame is now completed as far as size and linkage.
   int frame_complete = ((intptr_t)__ pc()) - start;
 
@@ -1921,12 +1928,12 @@
   // if we load it once it is usable thru the entire wrapper
   const Register thread = rdi;
 
-  // We use rsi as the oop handle for the receiver/klass
-  // It is callee save so it survives the call to native
-
-  const Register oop_handle_reg = rsi;
-
-  __ get_thread(thread);
+   // We use rsi as the oop handle for the receiver/klass
+   // It is callee save so it survives the call to native
+
+   const Register oop_handle_reg = rsi;
+
+   __ get_thread(thread);
 
   if (is_critical_native && !Universe::heap()->supports_object_pinning()) {
     check_needs_gc_for_critical_native(masm, thread, stack_slots, total_c_args, total_in_args,
--- a/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -27,6 +27,7 @@
 #include "asm/macroAssembler.inline.hpp"
 #include "gc/shared/barrierSet.hpp"
 #include "gc/shared/barrierSetAssembler.hpp"
+#include "gc/shared/barrierSetNMethod.hpp"
 #include "interpreter/interpreter.hpp"
 #include "memory/universe.hpp"
 #include "nativeInst_x86.hpp"
@@ -430,7 +431,8 @@
 
 
   //----------------------------------------------------------------------------------------------------
-  // Support for int32_t Atomic::xchg(int32_t exchange_value, volatile int32_t* dest)
+  // Implementation of int32_t atomic_xchg(int32_t exchange_value, volatile int32_t* dest)
+  // used by Atomic::xchg(volatile int32_t* dest, int32_t exchange_value)
   //
   // xchg exists as far back as 8086, lock needed for MP only
   // Stack layout immediately after call:
@@ -3662,6 +3664,68 @@
     __ ret(0);
   }
 
+  address generate_method_entry_barrier() {
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "nmethod_entry_barrier");
+
+    Label deoptimize_label;
+
+    address start = __ pc();
+
+    __ push(-1); // cookie, this is used for writing the new rsp when deoptimizing
+
+    BLOCK_COMMENT("Entry:");
+    __ enter(); // save rbp
+
+    // save rbx, because we want to use that value.
+    // We could do without it but then we depend on the number of slots used by pusha
+    __ push(rbx);
+
+    __ lea(rbx, Address(rsp, wordSize * 3)); // 1 for cookie, 1 for rbp, 1 for rbx - this should be the return address
+
+    __ pusha();
+
+    // xmm0 and xmm1 may be used for passing float/double arguments
+    const int xmm_size = wordSize * 2;
+    const int xmm_spill_size = xmm_size * 2;
+    __ subptr(rsp, xmm_spill_size);
+    __ movdqu(Address(rsp, xmm_size * 1), xmm1);
+    __ movdqu(Address(rsp, xmm_size * 0), xmm0);
+
+    __ call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(address*)>(BarrierSetNMethod::nmethod_stub_entry_barrier)), rbx);
+
+    __ movdqu(xmm0, Address(rsp, xmm_size * 0));
+    __ movdqu(xmm1, Address(rsp, xmm_size * 1));
+    __ addptr(rsp, xmm_spill_size);
+
+    __ cmpl(rax, 1); // 1 means deoptimize
+    __ jcc(Assembler::equal, deoptimize_label);
+
+    __ popa();
+    __ pop(rbx);
+
+    __ leave();
+
+    __ addptr(rsp, 1 * wordSize); // cookie
+    __ ret(0);
+
+    __ BIND(deoptimize_label);
+
+    __ popa();
+    __ pop(rbx);
+
+    __ leave();
+
+    // this can be taken out, but is good for verification purposes. getting a SIGSEGV
+    // here while still having a correct stack is valuable
+    __ testptr(rsp, Address(rsp, 0));
+
+    __ movptr(rsp, Address(rsp, 0)); // new rsp was written in the barrier
+    __ jmp(Address(rsp, -1 * wordSize)); // jmp target should be callers verified_entry_point
+
+    return start;
+  }
+
  public:
   // Information about frame layout at time of blocking runtime call.
   // Note that we only have to preserve callee-saved registers since
@@ -3958,6 +4022,11 @@
     StubRoutines::_safefetchN_entry           = StubRoutines::_safefetch32_entry;
     StubRoutines::_safefetchN_fault_pc        = StubRoutines::_safefetch32_fault_pc;
     StubRoutines::_safefetchN_continuation_pc = StubRoutines::_safefetch32_continuation_pc;
+
+    BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
+    if (bs_nm != NULL) {
+      StubRoutines::x86::_method_entry_barrier = generate_method_entry_barrier();
+    }
   }
 
 
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -552,7 +552,8 @@
     return start;
   }
 
-  // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest)
+  // Implementation of jint atomic_xchg(jint add_value, volatile jint* dest)
+  // used by Atomic::xchg(volatile jint* dest, jint exchange_value)
   //
   // Arguments :
   //    c_rarg0: exchange_value
@@ -571,7 +572,8 @@
     return start;
   }
 
-  // Support for intptr_t atomic::xchg_long(jlong exchange_value, volatile jlong* dest)
+  // Implementation of intptr_t atomic_xchg(jlong add_value, volatile jlong* dest)
+  // used by Atomic::xchg(volatile jlong* dest, jlong exchange_value)
   //
   // Arguments :
   //    c_rarg0: exchange_value
@@ -668,7 +670,8 @@
     return start;
   }
 
-  // Support for jint atomic::add(jint add_value, volatile jint* dest)
+  // Implementation of jint atomic_add(jint add_value, volatile jint* dest)
+  // used by Atomic::add(volatile jint* dest, jint add_value)
   //
   // Arguments :
   //    c_rarg0: add_value
@@ -690,7 +693,8 @@
     return start;
   }
 
-  // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest)
+  // Implementation of intptr_t atomic_add(intptr_t add_value, volatile intptr_t* dest)
+  // used by Atomic::add(volatile intptr_t* dest, intptr_t add_value)
   //
   // Arguments :
   //    c_rarg0: add_value
--- a/src/hotspot/cpu/x86/stubRoutines_x86.hpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/cpu/x86/stubRoutines_x86.hpp	Thu Dec 05 18:06:35 2019 -0800
@@ -55,14 +55,8 @@
   static address _double_sign_mask;
   static address _double_sign_flip;
 
-  static address _method_entry_barrier;
-
  public:
 
-  static address method_entry_barrier() {
-    return _method_entry_barrier;
-  }
-
   static address get_previous_fp_entry() {
     return _get_previous_fp_entry;
   }
@@ -121,6 +115,8 @@
   //shuffle mask for big-endian 128-bit integers
   static address _counter_shuffle_mask_addr;
 
+  static address _method_entry_barrier;
+
   // masks and table for CRC32
   static uint64_t _crc_by128_masks[];
   static juint    _crc_table[];
@@ -221,6 +217,7 @@
   static address upper_word_mask_addr() { return _upper_word_mask_addr; }
   static address shuffle_byte_flip_mask_addr() { return _shuffle_byte_flip_mask_addr; }
   static address k256_addr()      { return _k256_adr; }
+  static address method_entry_barrier() { return _method_entry_barrier; }
 
   static address vector_short_to_byte_mask() {
     return _vector_short_to_byte_mask;
--- a/src/hotspot/cpu/x86/stubRoutines_x86_32.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/cpu/x86/stubRoutines_x86_32.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,3 +32,5 @@
 // a description of how to extend it, see the stubRoutines.hpp file.
 
 address StubRoutines::x86::_verify_fpu_cntrl_wrd_entry = NULL;
+address StubRoutines::x86::_method_entry_barrier = NULL;
+
--- a/src/hotspot/cpu/x86/vm_version_x86.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/cpu/x86/vm_version_x86.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -367,26 +367,29 @@
     //
     intx saved_useavx = UseAVX;
     intx saved_usesse = UseSSE;
-    // check _cpuid_info.sef_cpuid7_ebx.bits.avx512f
-    __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset())));
-    __ movl(rax, 0x10000);
-    __ andl(rax, Address(rsi, 4)); // xcr0 bits sse | ymm
-    __ cmpl(rax, 0x10000);
-    __ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported
-    // check _cpuid_info.xem_xcr0_eax.bits.opmask
-    // check _cpuid_info.xem_xcr0_eax.bits.zmm512
-    // check _cpuid_info.xem_xcr0_eax.bits.zmm32
-    __ movl(rax, 0xE0);
-    __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm
-    __ cmpl(rax, 0xE0);
-    __ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported
 
-    __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
-    __ movl(rax, Address(rsi, 0));
-    __ cmpl(rax, 0x50654);              // If it is Skylake
-    __ jcc(Assembler::equal, legacy_setup);
     // If UseAVX is unitialized or is set by the user to include EVEX
     if (use_evex) {
+      // check _cpuid_info.sef_cpuid7_ebx.bits.avx512f
+      __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset())));
+      __ movl(rax, 0x10000);
+      __ andl(rax, Address(rsi, 4)); // xcr0 bits sse | ymm
+      __ cmpl(rax, 0x10000);
+      __ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported
+      // check _cpuid_info.xem_xcr0_eax.bits.opmask
+      // check _cpuid_info.xem_xcr0_eax.bits.zmm512
+      // check _cpuid_info.xem_xcr0_eax.bits.zmm32
+      __ movl(rax, 0xE0);
+      __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm
+      __ cmpl(rax, 0xE0);
+      __ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported
+
+      if (FLAG_IS_DEFAULT(UseAVX)) {
+        __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
+        __ movl(rax, Address(rsi, 0));
+        __ cmpl(rax, 0x50654);              // If it is Skylake
+        __ jcc(Assembler::equal, legacy_setup);
+      }
       // EVEX setup: run in lowest evex mode
       VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts
       UseAVX = 3;
@@ -455,27 +458,28 @@
     VM_Version::set_cpuinfo_cont_addr(__ pc());
     // Returns here after signal. Save xmm0 to check it later.
 
-    // check _cpuid_info.sef_cpuid7_ebx.bits.avx512f
-    __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset())));
-    __ movl(rax, 0x10000);
-    __ andl(rax, Address(rsi, 4));
-    __ cmpl(rax, 0x10000);
-    __ jcc(Assembler::notEqual, legacy_save_restore);
-    // check _cpuid_info.xem_xcr0_eax.bits.opmask
-    // check _cpuid_info.xem_xcr0_eax.bits.zmm512
-    // check _cpuid_info.xem_xcr0_eax.bits.zmm32
-    __ movl(rax, 0xE0);
-    __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm
-    __ cmpl(rax, 0xE0);
-    __ jcc(Assembler::notEqual, legacy_save_restore);
-
-    __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
-    __ movl(rax, Address(rsi, 0));
-    __ cmpl(rax, 0x50654);              // If it is Skylake
-    __ jcc(Assembler::equal, legacy_save_restore);
-
     // If UseAVX is unitialized or is set by the user to include EVEX
     if (use_evex) {
+      // check _cpuid_info.sef_cpuid7_ebx.bits.avx512f
+      __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset())));
+      __ movl(rax, 0x10000);
+      __ andl(rax, Address(rsi, 4));
+      __ cmpl(rax, 0x10000);
+      __ jcc(Assembler::notEqual, legacy_save_restore);
+      // check _cpuid_info.xem_xcr0_eax.bits.opmask
+      // check _cpuid_info.xem_xcr0_eax.bits.zmm512
+      // check _cpuid_info.xem_xcr0_eax.bits.zmm32
+      __ movl(rax, 0xE0);
+      __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm
+      __ cmpl(rax, 0xE0);
+      __ jcc(Assembler::notEqual, legacy_save_restore);
+
+      if (FLAG_IS_DEFAULT(UseAVX)) {
+        __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
+        __ movl(rax, Address(rsi, 0));
+        __ cmpl(rax, 0x50654);              // If it is Skylake
+        __ jcc(Assembler::equal, legacy_save_restore);
+      }
       // EVEX check: run in lowest evex mode
       VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts
       UseAVX = 3;
--- a/src/hotspot/cpu/x86/x86_32.ad	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/cpu/x86/x86_32.ad	Thu Dec 05 18:06:35 2019 -0800
@@ -3917,6 +3917,13 @@
   interface(REG_INTER);
 %}
 
+operand eDXRegP(eRegP reg) %{
+  constraint(ALLOC_IN_RC(edx_reg));
+  match(reg);
+  format %{ "EDX" %}
+  interface(REG_INTER);
+%}
+
 operand eSIRegP(eRegP reg) %{
   constraint(ALLOC_IN_RC(esi_reg));
   match(reg);
@@ -8977,7 +8984,7 @@
   %}
 
   ins_pipe(ialu_reg_reg);
-%} 
+%}
 
 //----------Long Instructions------------------------------------------------
 // Add Long Register with Register
--- a/src/hotspot/cpu/x86/x86_64.ad	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/cpu/x86/x86_64.ad	Thu Dec 05 18:06:35 2019 -0800
@@ -267,6 +267,9 @@
 // Singleton class for RSI pointer register
 reg_class ptr_rsi_reg(RSI, RSI_H);
 
+// Singleton class for RBP pointer register
+reg_class ptr_rbp_reg(RBP, RBP_H);
+
 // Singleton class for RDI pointer register
 reg_class ptr_rdi_reg(RDI, RDI_H);
 
@@ -3530,6 +3533,16 @@
   interface(REG_INTER);
 %}
 
+operand rbp_RegP()
+%{
+  constraint(ALLOC_IN_RC(ptr_rbp_reg));
+  match(RegP);
+  match(rRegP);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
 // Used in rep stosq
 operand rdi_RegP()
 %{
--- a/src/hotspot/cpu/zero/cppInterpreter_zero.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/cpu/zero/cppInterpreter_zero.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -38,13 +38,11 @@
 #include "prims/jvmtiExport.hpp"
 #include "prims/jvmtiThreadState.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/atomic.hpp"
 #include "runtime/deoptimization.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
 #include "runtime/jniHandles.inline.hpp"
-#include "runtime/orderAccess.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/synchronizer.hpp"
--- a/src/hotspot/os/aix/os_aix.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os/aix/os_aix.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -60,7 +60,6 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/objectMonitor.hpp"
-#include "runtime/orderAccess.hpp"
 #include "runtime/os.hpp"
 #include "runtime/osThread.hpp"
 #include "runtime/perfMemory.hpp"
@@ -1084,7 +1083,7 @@
     if (now <= prev) {
       return prev;   // same or retrograde time;
     }
-    jlong obsv = Atomic::cmpxchg(now, &max_real_time, prev);
+    jlong obsv = Atomic::cmpxchg(&max_real_time, prev, now);
     assert(obsv >= prev, "invariant");   // Monotonicity
     // If the CAS succeeded then we're done and return "now".
     // If the CAS failed and the observed value "obsv" is >= now then
@@ -1385,6 +1384,8 @@
   st->print_cr("AIX kernel version %u.%u.%u.%u",
                (ver >> 24) & 0xFF, (ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF);
 
+  os::Posix::print_uptime_info(st);
+
   os::Posix::print_rlimit_info(st);
 
   os::Posix::print_load_average(st);
@@ -1794,7 +1795,7 @@
   for (;;) {
     for (int i = 0; i < NSIG + 1; i++) {
       jint n = pending_signals[i];
-      if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
+      if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
         return i;
       }
     }
--- a/src/hotspot/os/aix/os_perf_aix.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os/aix/os_perf_aix.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -28,6 +28,7 @@
 #include "os_aix.inline.hpp"
 #include "runtime/os.hpp"
 #include "runtime/os_perf.hpp"
+#include "utilities/globalDefinitions.hpp"
 
 #include CPU_HEADER(vm_version_ext)
 
@@ -884,8 +885,7 @@
   friend class NetworkPerformanceInterface;
  private:
   NetworkPerformance();
-  NetworkPerformance(const NetworkPerformance& rhs); // no impl
-  NetworkPerformance& operator=(const NetworkPerformance& rhs); // no impl
+  NONCOPYABLE(NetworkPerformance);
   bool initialize();
   ~NetworkPerformance();
   int network_utilization(NetworkInterface** network_interfaces) const;
--- a/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -148,7 +148,7 @@
   os::pretouch_memory((void*)addr, (void*)(addr + size), page_size);
 }
 
-void ZPhysicalMemoryBacking::map_view(const ZPhysicalMemory& pmem, uintptr_t addr, bool pretouch) const {
+void ZPhysicalMemoryBacking::map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
   const size_t nsegments = pmem.nsegments();
   size_t size = 0;
 
@@ -159,11 +159,6 @@
     _file.map(segment_addr, segment.size(), segment.start());
     size += segment.size();
   }
-
-  // Pre-touch memory
-  if (pretouch) {
-    pretouch_view(addr, size);
-  }
 }
 
 void ZPhysicalMemoryBacking::unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
@@ -175,15 +170,27 @@
   return ZAddress::marked0(offset);
 }
 
+void ZPhysicalMemoryBacking::pretouch(uintptr_t offset, size_t size) const {
+  if (ZVerifyViews) {
+    // Pre-touch good view
+    pretouch_view(ZAddress::good(offset), size);
+  } else {
+    // Pre-touch all views
+    pretouch_view(ZAddress::marked0(offset), size);
+    pretouch_view(ZAddress::marked1(offset), size);
+    pretouch_view(ZAddress::remapped(offset), size);
+  }
+}
+
 void ZPhysicalMemoryBacking::map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
   if (ZVerifyViews) {
     // Map good view
-    map_view(pmem, ZAddress::good(offset), AlwaysPreTouch);
+    map_view(pmem, ZAddress::good(offset));
   } else {
     // Map all views
-    map_view(pmem, ZAddress::marked0(offset), AlwaysPreTouch);
-    map_view(pmem, ZAddress::marked1(offset), AlwaysPreTouch);
-    map_view(pmem, ZAddress::remapped(offset), AlwaysPreTouch);
+    map_view(pmem, ZAddress::marked0(offset));
+    map_view(pmem, ZAddress::marked1(offset));
+    map_view(pmem, ZAddress::remapped(offset));
   }
 }
 
@@ -202,7 +209,7 @@
 void ZPhysicalMemoryBacking::debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
   // Map good view
   assert(ZVerifyViews, "Should be enabled");
-  map_view(pmem, ZAddress::good(offset), false /* pretouch */);
+  map_view(pmem, ZAddress::good(offset));
 }
 
 void ZPhysicalMemoryBacking::debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
--- a/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.hpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.hpp	Thu Dec 05 18:06:35 2019 -0800
@@ -36,7 +36,7 @@
   ZMemoryManager _uncommitted;
 
   void pretouch_view(uintptr_t addr, size_t size) const;
-  void map_view(const ZPhysicalMemory& pmem, uintptr_t addr, bool pretouch) const;
+  void map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const;
   void unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const;
 
 public:
@@ -53,6 +53,8 @@
 
   uintptr_t nmt_address(uintptr_t offset) const;
 
+  void pretouch(uintptr_t offset, size_t size) const;
+
   void map(const ZPhysicalMemory& pmem, uintptr_t offset) const;
   void unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const;
 
--- a/src/hotspot/os/bsd/os_bsd.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os/bsd/os_bsd.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -51,7 +51,6 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/objectMonitor.hpp"
-#include "runtime/orderAccess.hpp"
 #include "runtime/osThread.hpp"
 #include "runtime/perfMemory.hpp"
 #include "runtime/semaphore.hpp"
@@ -169,6 +168,22 @@
   return available;
 }
 
+// for more info see :
+// https://man.openbsd.org/sysctl.2
+void os::Bsd::print_uptime_info(outputStream* st) {
+  struct timeval boottime;
+  size_t len = sizeof(boottime);
+  int mib[2];
+  mib[0] = CTL_KERN;
+  mib[1] = KERN_BOOTTIME;
+
+  if (sysctl(mib, 2, &boottime, &len, NULL, 0) >= 0) {
+    time_t bootsec = boottime.tv_sec;
+    time_t currsec = time(NULL);
+    os::print_dhm(st, "OS uptime:", (long) difftime(currsec, bootsec));
+  }
+}
+
 julong os::physical_memory() {
   return Bsd::physical_memory();
 }
@@ -931,7 +946,7 @@
   if (now <= prev) {
     return prev;   // same or retrograde time;
   }
-  const uint64_t obsv = Atomic::cmpxchg(now, &Bsd::_max_abstime, prev);
+  const uint64_t obsv = Atomic::cmpxchg(&Bsd::_max_abstime, prev, now);
   assert(obsv >= prev, "invariant");   // Monotonicity
   // If the CAS succeeded then we're done and return "now".
   // If the CAS failed and the observed value "obsv" is >= now then
@@ -1570,6 +1585,8 @@
 
   os::Posix::print_uname_info(st);
 
+  os::Bsd::print_uptime_info(st);
+
   os::Posix::print_rlimit_info(st);
 
   os::Posix::print_load_average(st);
@@ -1834,7 +1851,7 @@
   for (;;) {
     for (int i = 0; i < NSIG + 1; i++) {
       jint n = pending_signals[i];
-      if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
+      if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
         return i;
       }
     }
@@ -1895,7 +1912,7 @@
   }
 
   char buf[PATH_MAX + 1];
-  int num = Atomic::add(1, &cnt);
+  int num = Atomic::add(&cnt, 1);
 
   snprintf(buf, PATH_MAX + 1, "%s/hs-vm-%d-%d",
            os::get_temp_directory(), os::current_process_id(), num);
@@ -3209,7 +3226,7 @@
 static volatile int next_processor_id = 0;
 
 static inline volatile int* get_apic_to_processor_mapping() {
-  volatile int* mapping = OrderAccess::load_acquire(&apic_to_processor_mapping);
+  volatile int* mapping = Atomic::load_acquire(&apic_to_processor_mapping);
   if (mapping == NULL) {
     // Calculate possible number space for APIC ids. This space is not necessarily
     // in the range [0, number_of_processors).
@@ -3238,9 +3255,9 @@
       mapping[i] = -1;
     }
 
-    if (!Atomic::replace_if_null(mapping, &apic_to_processor_mapping)) {
+    if (!Atomic::replace_if_null(&apic_to_processor_mapping, mapping)) {
       FREE_C_HEAP_ARRAY(int, mapping);
-      mapping = OrderAccess::load_acquire(&apic_to_processor_mapping);
+      mapping = Atomic::load_acquire(&apic_to_processor_mapping);
     }
   }
 
@@ -3264,12 +3281,14 @@
   int processor_id = Atomic::load(&mapping[apic_id]);
 
   while (processor_id < 0) {
-    if (Atomic::cmpxchg(-2, &mapping[apic_id], -1)) {
-      Atomic::store(Atomic::add(1, &next_processor_id) - 1, &mapping[apic_id]);
+    if (Atomic::cmpxchg(&mapping[apic_id], -1, -2) == -1) {
+      Atomic::store(&mapping[apic_id], Atomic::add(&next_processor_id, 1) - 1);
     }
     processor_id = Atomic::load(&mapping[apic_id]);
   }
 
+  assert(processor_id >= 0 && processor_id < os::processor_count(), "invalid processor id");
+
   return (uint)processor_id;
 }
 #endif
@@ -3762,11 +3781,30 @@
   }
 }
 
-// Get the default path to the core file
+// Get the kern.corefile setting, or otherwise the default path to the core file
 // Returns the length of the string
 int os::get_core_path(char* buffer, size_t bufferSize) {
-  int n = jio_snprintf(buffer, bufferSize, "/cores/core.%d", current_process_id());
-
+  int n = 0;
+#ifdef __APPLE__
+  char coreinfo[MAX_PATH];
+  size_t sz = sizeof(coreinfo);
+  int ret = sysctlbyname("kern.corefile", coreinfo, &sz, NULL, 0);
+  if (ret == 0) {
+    char *pid_pos = strstr(coreinfo, "%P");
+    // skip over the "%P" to preserve any optional custom user pattern
+    const char* tail = (pid_pos != NULL) ? (pid_pos + 2) : "";
+
+    if (pid_pos != NULL) {
+      *pid_pos = '\0';
+      n = jio_snprintf(buffer, bufferSize, "%s%d%s", coreinfo, os::current_process_id(), tail);
+    } else {
+      n = jio_snprintf(buffer, bufferSize, "%s", coreinfo);
+    }
+  } else
+#endif
+  {
+    n = jio_snprintf(buffer, bufferSize, "/cores/core.%d", os::current_process_id());
+  }
   // Truncate if theoretical string was longer than bufferSize
   n = MIN2(n, (int)bufferSize);
 
--- a/src/hotspot/os/bsd/os_bsd.hpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os/bsd/os_bsd.hpp	Thu Dec 05 18:06:35 2019 -0800
@@ -155,6 +155,8 @@
     }
   }
   static int get_node_by_cpu(int cpu_id);
+
+  static void print_uptime_info(outputStream* st);
 };
 
 #endif // OS_BSD_OS_BSD_HPP
--- a/src/hotspot/os/bsd/os_perf_bsd.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os/bsd/os_perf_bsd.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -26,6 +26,7 @@
 #include "memory/resourceArea.hpp"
 #include "runtime/os.hpp"
 #include "runtime/os_perf.hpp"
+#include "utilities/globalDefinitions.hpp"
 #include CPU_HEADER(vm_version_ext)
 
 #ifdef __APPLE__
@@ -72,8 +73,8 @@
   int cpu_load_total_process(double* cpu_load);
   int cpu_loads_process(double* pjvmUserLoad, double* pjvmKernelLoad, double* psystemTotalLoad);
 
-  CPUPerformance(const CPUPerformance& rhs); // no impl
-  CPUPerformance& operator=(const CPUPerformance& rhs); // no impl
+  NONCOPYABLE(CPUPerformance);
+
  public:
   CPUPerformance();
   bool initialize();
@@ -264,8 +265,7 @@
  private:
   SystemProcesses();
   bool initialize();
-  SystemProcesses(const SystemProcesses& rhs); // no impl
-  SystemProcesses& operator=(const SystemProcesses& rhs); // no impl
+  NONCOPYABLE(SystemProcesses);
   ~SystemProcesses();
 
   //information about system processes
@@ -407,8 +407,7 @@
   friend class NetworkPerformanceInterface;
  private:
   NetworkPerformance();
-  NetworkPerformance(const NetworkPerformance& rhs); // no impl
-  NetworkPerformance& operator=(const NetworkPerformance& rhs); // no impl
+  NONCOPYABLE(NetworkPerformance);
   bool initialize();
   ~NetworkPerformance();
   int network_utilization(NetworkInterface** network_interfaces) const;
--- a/src/hotspot/os/bsd/semaphore_bsd.hpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os/bsd/semaphore_bsd.hpp	Thu Dec 05 18:06:35 2019 -0800
@@ -25,6 +25,8 @@
 #ifndef OS_BSD_SEMAPHORE_BSD_HPP
 #define OS_BSD_SEMAPHORE_BSD_HPP
 
+#include "utilities/globalDefinitions.hpp"
+
 #ifndef __APPLE__
 // Use POSIX semaphores.
 # include "semaphore_posix.hpp"
@@ -37,9 +39,7 @@
 class OSXSemaphore : public CHeapObj<mtInternal>{
   semaphore_t _semaphore;
 
-  // Prevent copying and assignment.
-  OSXSemaphore(const OSXSemaphore&);
-  OSXSemaphore& operator=(const OSXSemaphore&);
+  NONCOPYABLE(OSXSemaphore);
 
  public:
   OSXSemaphore(uint value = 0);
--- a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -249,7 +249,7 @@
   os::pretouch_memory((void*)addr, (void*)(addr + size), page_size);
 }
 
-void ZPhysicalMemoryBacking::map_view(const ZPhysicalMemory& pmem, uintptr_t addr, bool pretouch) const {
+void ZPhysicalMemoryBacking::map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
   const size_t nsegments = pmem.nsegments();
   size_t size = 0;
 
@@ -273,11 +273,6 @@
 
   // NUMA interleave memory before touching it
   ZNUMA::memory_interleave(addr, size);
-
-  // Pre-touch memory
-  if (pretouch) {
-    pretouch_view(addr, size);
-  }
 }
 
 void ZPhysicalMemoryBacking::unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
@@ -296,15 +291,27 @@
   return ZAddress::marked0(offset);
 }
 
+void ZPhysicalMemoryBacking::pretouch(uintptr_t offset, size_t size) const {
+  if (ZVerifyViews) {
+    // Pre-touch good view
+    pretouch_view(ZAddress::good(offset), size);
+  } else {
+    // Pre-touch all views
+    pretouch_view(ZAddress::marked0(offset), size);
+    pretouch_view(ZAddress::marked1(offset), size);
+    pretouch_view(ZAddress::remapped(offset), size);
+  }
+}
+
 void ZPhysicalMemoryBacking::map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
   if (ZVerifyViews) {
     // Map good view
-    map_view(pmem, ZAddress::good(offset), AlwaysPreTouch);
+    map_view(pmem, ZAddress::good(offset));
   } else {
     // Map all views
-    map_view(pmem, ZAddress::marked0(offset), AlwaysPreTouch);
-    map_view(pmem, ZAddress::marked1(offset), AlwaysPreTouch);
-    map_view(pmem, ZAddress::remapped(offset), AlwaysPreTouch);
+    map_view(pmem, ZAddress::marked0(offset));
+    map_view(pmem, ZAddress::marked1(offset));
+    map_view(pmem, ZAddress::remapped(offset));
   }
 }
 
@@ -323,7 +330,7 @@
 void ZPhysicalMemoryBacking::debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
   // Map good view
   assert(ZVerifyViews, "Should be enabled");
-  map_view(pmem, ZAddress::good(offset), false /* pretouch */);
+  map_view(pmem, ZAddress::good(offset));
 }
 
 void ZPhysicalMemoryBacking::debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
--- a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.hpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.hpp	Thu Dec 05 18:06:35 2019 -0800
@@ -43,7 +43,7 @@
 
   void advise_view(uintptr_t addr, size_t size, int advice) const;
   void pretouch_view(uintptr_t addr, size_t size) const;
-  void map_view(const ZPhysicalMemory& pmem, uintptr_t addr, bool pretouch) const;
+  void map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const;
   void unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const;
 
 public:
@@ -60,6 +60,8 @@
 
   uintptr_t nmt_address(uintptr_t offset) const;
 
+  void pretouch(uintptr_t offset, size_t size) const;
+
   void map(const ZPhysicalMemory& pmem, uintptr_t offset) const;
   void unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const;
 
--- a/src/hotspot/os/linux/os_linux.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os/linux/os_linux.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -53,7 +53,6 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/objectMonitor.hpp"
-#include "runtime/orderAccess.hpp"
 #include "runtime/osThread.hpp"
 #include "runtime/perfMemory.hpp"
 #include "runtime/sharedRuntime.hpp"
@@ -2120,6 +2119,8 @@
 
   os::Posix::print_uname_info(st);
 
+  os::Linux::print_uptime_info(st);
+
   // Print warning if unsafe chroot environment detected
   if (unsafe_chroot_detected) {
     st->print("WARNING!! ");
@@ -2305,6 +2306,15 @@
   st->cr();
 }
 
+void os::Linux::print_uptime_info(outputStream* st) {
+  struct sysinfo sinfo;
+  int ret = sysinfo(&sinfo);
+  if (ret == 0) {
+    os::print_dhm(st, "OS uptime:", (long) sinfo.uptime);
+  }
+}
+
+
 void os::Linux::print_container_info(outputStream* st) {
   if (!OSContainer::is_containerized()) {
     return;
@@ -2752,7 +2762,7 @@
   for (;;) {
     for (int i = 0; i < NSIG + 1; i++) {
       jint n = pending_signals[i];
-      if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
+      if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
         return i;
       }
     }
@@ -2813,7 +2823,7 @@
   }
 
   char buf[PATH_MAX+1];
-  int num = Atomic::add(1, &cnt);
+  int num = Atomic::add(&cnt, 1);
 
   snprintf(buf, sizeof(buf), "%s/hs-vm-%d-%d",
            os::get_temp_directory(), os::current_process_id(), num);
--- a/src/hotspot/os/linux/os_linux.hpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os/linux/os_linux.hpp	Thu Dec 05 18:06:35 2019 -0800
@@ -103,6 +103,7 @@
   static void print_libversion_info(outputStream* st);
   static void print_proc_sys_info(outputStream* st);
   static void print_ld_preload_file(outputStream* st);
+  static void print_uptime_info(outputStream* st);
 
  public:
   struct CPUPerfTicks {
--- a/src/hotspot/os/linux/os_perf_linux.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os/linux/os_perf_linux.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -28,6 +28,7 @@
 #include "os_linux.inline.hpp"
 #include "runtime/os.hpp"
 #include "runtime/os_perf.hpp"
+#include "utilities/globalDefinitions.hpp"
 
 #include CPU_HEADER(vm_version_ext)
 
@@ -948,8 +949,7 @@
   friend class NetworkPerformanceInterface;
  private:
   NetworkPerformance();
-  NetworkPerformance(const NetworkPerformance& rhs); // no impl
-  NetworkPerformance& operator=(const NetworkPerformance& rhs); // no impl
+  NONCOPYABLE(NetworkPerformance);
   bool initialize();
   ~NetworkPerformance();
   int64_t read_counter(const char* iface, const char* counter) const;
--- a/src/hotspot/os/linux/waitBarrier_linux.hpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os/linux/waitBarrier_linux.hpp	Thu Dec 05 18:06:35 2019 -0800
@@ -26,13 +26,12 @@
 #define OS_LINUX_WAITBARRIER_LINUX_HPP
 
 #include "memory/allocation.hpp"
+#include "utilities/globalDefinitions.hpp"
 
 class LinuxWaitBarrier : public CHeapObj<mtInternal> {
   volatile int _futex_barrier;
 
-  // Prevent copying and assignment of LinuxWaitBarrier instances.
-  LinuxWaitBarrier(const LinuxWaitBarrier&);
-  LinuxWaitBarrier& operator=(const LinuxWaitBarrier&);
+  NONCOPYABLE(LinuxWaitBarrier);
 
  public:
   LinuxWaitBarrier() : _futex_barrier(0) {};
--- a/src/hotspot/os/posix/os_posix.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os/posix/os_posix.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -30,6 +30,8 @@
 #include "runtime/frame.inline.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
 #include "services/memTracker.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/orderAccess.hpp"
 #include "utilities/align.hpp"
 #include "utilities/events.hpp"
 #include "utilities/formatBuffer.hpp"
@@ -47,6 +49,7 @@
 #include <sys/utsname.h>
 #include <time.h>
 #include <unistd.h>
+#include <utmpx.h>
 
 // Todo: provide a os::get_max_process_id() or similar. Number of processes
 // may have been configured, can be read more accurately from proc fs etc.
@@ -377,6 +380,27 @@
   st->cr();
 }
 
+// boot/uptime information;
+// unfortunately it does not work on macOS and Linux because the utx chain has no entry
+// for reboot at least on my test machines
+void os::Posix::print_uptime_info(outputStream* st) {
+  int bootsec = -1;
+  int currsec = time(NULL);
+  struct utmpx* ent;
+  setutxent();
+  while ((ent = getutxent())) {
+    if (!strcmp("system boot", ent->ut_line)) {
+      bootsec = ent->ut_tv.tv_sec;
+      break;
+    }
+  }
+
+  if (bootsec != -1) {
+    os::print_dhm(st, "OS uptime:", (long) (currsec-bootsec));
+  }
+}
+
+
 void os::Posix::print_rlimit_info(outputStream* st) {
   st->print("rlimit:");
   struct rlimit rlim;
@@ -1900,7 +1924,7 @@
   // atomically decrement _event
   for (;;) {
     v = _event;
-    if (Atomic::cmpxchg(v - 1, &_event, v) == v) break;
+    if (Atomic::cmpxchg(&_event, v, v - 1) == v) break;
   }
   guarantee(v >= 0, "invariant");
 
@@ -1940,7 +1964,7 @@
   // atomically decrement _event
   for (;;) {
     v = _event;
-    if (Atomic::cmpxchg(v - 1, &_event, v) == v) break;
+    if (Atomic::cmpxchg(&_event, v, v - 1) == v) break;
   }
   guarantee(v >= 0, "invariant");
 
@@ -1998,7 +2022,7 @@
   // but only in the correctly written condition checking loops of ObjectMonitor,
   // Mutex/Monitor, Thread::muxAcquire and JavaThread::sleep
 
-  if (Atomic::xchg(1, &_event) >= 0) return;
+  if (Atomic::xchg(&_event, 1) >= 0) return;
 
   int status = pthread_mutex_lock(_mutex);
   assert_status(status == 0, status, "mutex_lock");
@@ -2046,7 +2070,7 @@
   // Return immediately if a permit is available.
   // We depend on Atomic::xchg() having full barrier semantics
   // since we are doing a lock-free update to _counter.
-  if (Atomic::xchg(0, &_counter) > 0) return;
+  if (Atomic::xchg(&_counter, 0) > 0) return;
 
   Thread* thread = Thread::current();
   assert(thread->is_Java_thread(), "Must be JavaThread");
--- a/src/hotspot/os/posix/os_posix.hpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os/posix/os_posix.hpp	Thu Dec 05 18:06:35 2019 -0800
@@ -41,6 +41,7 @@
   static void print_uname_info(outputStream* st);
   static void print_libversion_info(outputStream* st);
   static void print_load_average(outputStream* st);
+  static void print_uptime_info(outputStream* st);
 
   // Minimum stack size a thread can be created with (allowing
   // the VM to completely create the thread and enter user code).
@@ -285,10 +286,8 @@
 
 #endif // PLATFORM_MONITOR_IMPL_INDIRECT
 
-private:
-  // Disable copying
-  PlatformMutex(const PlatformMutex&);
-  PlatformMutex& operator=(const PlatformMutex&);
+ private:
+  NONCOPYABLE(PlatformMutex);
 
  public:
   void lock();
@@ -329,9 +328,7 @@
 #endif // PLATFORM_MONITOR_IMPL_INDIRECT
 
  private:
-  // Disable copying
-  PlatformMonitor(const PlatformMonitor&);
-  PlatformMonitor& operator=(const PlatformMonitor&);
+  NONCOPYABLE(PlatformMonitor);
 
  public:
   int wait(jlong millis);
--- a/src/hotspot/os/posix/semaphore_posix.hpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os/posix/semaphore_posix.hpp	Thu Dec 05 18:06:35 2019 -0800
@@ -26,15 +26,14 @@
 #define OS_POSIX_SEMAPHORE_POSIX_HPP
 
 #include "memory/allocation.hpp"
+#include "utilities/globalDefinitions.hpp"
 
 #include <semaphore.h>
 
 class PosixSemaphore : public CHeapObj<mtInternal> {
   sem_t _semaphore;
 
-  // Prevent copying and assignment.
-  PosixSemaphore(const PosixSemaphore&);
-  PosixSemaphore& operator=(const PosixSemaphore&);
+  NONCOPYABLE(PosixSemaphore);
 
  public:
   PosixSemaphore(uint value = 0);
--- a/src/hotspot/os/solaris/os_perf_solaris.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os/solaris/os_perf_solaris.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -28,6 +28,7 @@
 #include "runtime/os.hpp"
 #include "runtime/os_perf.hpp"
 #include "os_solaris.inline.hpp"
+#include "utilities/globalDefinitions.hpp"
 #include "utilities/macros.hpp"
 
 #include CPU_HEADER(vm_version_ext)
@@ -737,8 +738,7 @@
   friend class NetworkPerformanceInterface;
  private:
   NetworkPerformance();
-  NetworkPerformance(const NetworkPerformance& rhs); // no impl
-  NetworkPerformance& operator=(const NetworkPerformance& rhs); // no impl
+  NONCOPYABLE(NetworkPerformance);
   bool initialize();
   ~NetworkPerformance();
   int network_utilization(NetworkInterface** network_interfaces) const;
--- a/src/hotspot/os/solaris/os_solaris.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os/solaris/os_solaris.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -1024,7 +1024,7 @@
   if (now <= prev) {
     return prev;   // same or retrograde time;
   }
-  const hrtime_t obsv = Atomic::cmpxchg(now, &max_hrtime, prev);
+  const hrtime_t obsv = Atomic::cmpxchg(&max_hrtime, prev, now);
   assert(obsv >= prev, "invariant");   // Monotonicity
   // If the CAS succeeded then we're done and return "now".
   // If the CAS failed and the observed value "obsv" is >= now then
@@ -1584,6 +1584,8 @@
 
   os::Posix::print_uname_info(st);
 
+  os::Posix::print_uptime_info(st);
+
   os::Solaris::print_libversion_info(st);
 
   os::Posix::print_rlimit_info(st);
@@ -1984,7 +1986,7 @@
   while (true) {
     for (int i = 0; i < Sigexit + 1; i++) {
       jint n = pending_signals[i];
-      if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
+      if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
         return i;
       }
     }
@@ -4710,7 +4712,7 @@
   int v;
   for (;;) {
     v = _Event;
-    if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
+    if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
   }
   guarantee(v >= 0, "invariant");
   if (v == 0) {
@@ -4748,7 +4750,7 @@
   int v;
   for (;;) {
     v = _Event;
-    if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
+    if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
   }
   guarantee(v >= 0, "invariant");
   if (v != 0) return OS_OK;
@@ -4797,7 +4799,7 @@
   // from the first park() call after an unpark() call which will help
   // shake out uses of park() and unpark() without condition variables.
 
-  if (Atomic::xchg(1, &_Event) >= 0) return;
+  if (Atomic::xchg(&_Event, 1) >= 0) return;
 
   // If the thread associated with the event was parked, wake it.
   // Wait for the thread assoc with the PlatformEvent to vacate.
@@ -4896,7 +4898,7 @@
   // Return immediately if a permit is available.
   // We depend on Atomic::xchg() having full barrier semantics
   // since we are doing a lock-free update to _counter.
-  if (Atomic::xchg(0, &_counter) > 0) return;
+  if (Atomic::xchg(&_counter, 0) > 0) return;
 
   // Optional fast-exit: Check interrupt before trying to wait
   Thread* thread = Thread::current();
--- a/src/hotspot/os/solaris/os_solaris.hpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os/solaris/os_solaris.hpp	Thu Dec 05 18:06:35 2019 -0800
@@ -334,9 +334,7 @@
 // Platform specific implementations that underpin VM Mutex/Monitor classes
 
 class PlatformMutex : public CHeapObj<mtSynchronizer> {
-  // Disable copying
-  PlatformMutex(const PlatformMutex&);
-  PlatformMutex& operator=(const PlatformMutex&);
+  NONCOPYABLE(PlatformMutex);
 
  protected:
   mutex_t _mutex; // Native mutex for locking
@@ -352,9 +350,8 @@
 class PlatformMonitor : public PlatformMutex {
  private:
   cond_t  _cond;  // Native condition variable for blocking
-  // Disable copying
-  PlatformMonitor(const PlatformMonitor&);
-  PlatformMonitor& operator=(const PlatformMonitor&);
+
+  NONCOPYABLE(PlatformMonitor);
 
  public:
   PlatformMonitor();
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/windows/gc/z/zBackingFile_windows.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zBackingFile_windows.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "gc/z/zGranuleMap.inline.hpp"
+#include "gc/z/zMapper_windows.hpp"
+#include "logging/log.hpp"
+#include "runtime/globals.hpp"
+#include "utilities/debug.hpp"
+
+// The backing file commits and uncommits physical memory, that can be
+// multi-mapped into the virtual address space. To support fine-graned
+// committing and uncommitting, each ZGranuleSize chunked is mapped to
+// a separate paging file mapping.
+
+ZBackingFile::ZBackingFile() :
+    _handles(MaxHeapSize),
+    _size(0) {}
+
+size_t ZBackingFile::size() const {
+  return _size;
+}
+
+HANDLE ZBackingFile::get_handle(uintptr_t offset) const {
+  HANDLE const handle = _handles.get(offset);
+  assert(handle != 0, "Should be set");
+  return handle;
+}
+
+void ZBackingFile::put_handle(uintptr_t offset, HANDLE handle) {
+  assert(handle != INVALID_HANDLE_VALUE, "Invalid handle");
+  assert(_handles.get(offset) == 0, "Should be cleared");
+  _handles.put(offset, handle);
+}
+
+void ZBackingFile::clear_handle(uintptr_t offset) {
+  assert(_handles.get(offset) != 0, "Should be set");
+  _handles.put(offset, 0);
+}
+
+size_t ZBackingFile::commit_from_paging_file(size_t offset, size_t size) {
+  for (size_t i = 0; i < size; i += ZGranuleSize) {
+    HANDLE const handle = ZMapper::create_and_commit_paging_file_mapping(ZGranuleSize);
+    if (handle == 0) {
+      return i;
+    }
+
+    put_handle(offset + i, handle);
+  }
+
+  return size;
+}
+
+size_t ZBackingFile::uncommit_from_paging_file(size_t offset, size_t size) {
+  for (size_t i = 0; i < size; i += ZGranuleSize) {
+    HANDLE const handle = get_handle(offset + i);
+    clear_handle(offset + i);
+    ZMapper::close_paging_file_mapping(handle);
+  }
+
+  return size;
+}
+
+size_t ZBackingFile::commit(size_t offset, size_t length) {
+  log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
+                      offset / M, (offset + length) / M, length / M);
+
+  const size_t committed = commit_from_paging_file(offset, length);
+
+  const size_t end = offset + committed;
+  if (end > _size) {
+    // Update size
+    _size = end;
+  }
+
+  return committed;
+}
+
+size_t ZBackingFile::uncommit(size_t offset, size_t length) {
+  log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
+                      offset / M, (offset + length) / M, length / M);
+
+  return uncommit_from_paging_file(offset, length);
+}
+
+void ZBackingFile::map(uintptr_t addr, size_t size, size_t offset) const {
+  assert(is_aligned(offset, ZGranuleSize), "Misaligned");
+  assert(is_aligned(addr, ZGranuleSize), "Misaligned");
+  assert(is_aligned(size, ZGranuleSize), "Misaligned");
+
+  for (size_t i = 0; i < size; i += ZGranuleSize) {
+    HANDLE const handle = get_handle(offset + i);
+    ZMapper::map_view_replace_placeholder(handle, 0 /* offset */, addr + i, ZGranuleSize);
+  }
+}
+
+void ZBackingFile::unmap(uintptr_t addr, size_t size) const {
+  assert(is_aligned(addr, ZGranuleSize), "Misaligned");
+  assert(is_aligned(size, ZGranuleSize), "Misaligned");
+
+  for (size_t i = 0; i < size; i += ZGranuleSize) {
+    ZMapper::unmap_view_preserve_placeholder(addr + i, ZGranuleSize);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/windows/gc/z/zBackingFile_windows.hpp	Thu Dec 05 18:06:35 2019 -0800
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef OS_WINDOWS_GC_Z_ZBACKINGFILE_WINDOWS_HPP
+#define OS_WINDOWS_GC_Z_ZBACKINGFILE_WINDOWS_HPP
+
+#include "gc/z/zGranuleMap.hpp"
+#include "memory/allocation.hpp"
+
+#include <Windows.h>
+
+class ZBackingFile {
+private:
+  ZGranuleMap<HANDLE> _handles;
+  size_t              _size;
+
+  HANDLE get_handle(uintptr_t offset) const;
+  void put_handle(uintptr_t offset, HANDLE handle);
+  void clear_handle(uintptr_t offset);
+
+  size_t commit_from_paging_file(size_t offset, size_t size);
+  size_t uncommit_from_paging_file(size_t offset, size_t size);
+
+public:
+  ZBackingFile();
+
+  size_t size() const;
+
+  size_t commit(size_t offset, size_t length);
+  size_t uncommit(size_t offset, size_t length);
+
+  void map(uintptr_t addr, size_t size, size_t offset) const;
+  void unmap(uintptr_t addr, size_t size) const;
+};
+
+#endif // OS_WINDOWS_GC_Z_ZBACKINGFILE_WINDOWS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/windows/gc/z/zInitialize_windows.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zInitialize.hpp"
+#include "gc/z/zSyscall_windows.hpp"
+
+void ZInitialize::initialize_os() {
+  ZSyscall::initialize();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/windows/gc/z/zLargePages_windows.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zLargePages.hpp"
+
+void ZLargePages::initialize_platform() {
+  _state = Disabled;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/windows/gc/z/zMapper_windows.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -0,0 +1,254 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zMapper_windows.hpp"
+#include "gc/z/zSyscall_windows.hpp"
+#include "logging/log.hpp"
+#include "utilities/debug.hpp"
+
+#include <Windows.h>
+
+// Memory reservation, commit, views, and placeholders.
+//
+// To be able to up-front reserve address space for the heap views, and later
+// multi-map the heap views to the same physical memory, without ever losing the
+// reservation of the reserved address space, we use "placeholders".
+//
+// These placeholders block out the address space from being used by other parts
+// of the process. To commit memory in this address space, the placeholder must
+// be replaced by anonymous memory, or replaced by mapping a view against a
+// paging file mapping. We use the later to support multi-mapping.
+//
+// We want to be able to dynamically commit and uncommit the physical memory of
+// the heap (and also unmap ZPages), in granules of ZGranuleSize bytes. There is
+// no way to grow and shrink the committed memory of a paging file mapping.
+// Therefore, we create multiple granule-sized page file mappings. The memory is
+// committed by creating a page file mapping, map a view against it, commit the
+// memory, unmap the view. The memory will stay committed until all views are
+// unmapped, and the paging file mapping handle is closed.
+//
+// When replacing a placeholder address space reservation with a mapped view
+// against a paging file mapping, the virtual address space must exactly match
+// an existing placeholder's address and size. Therefore we only deal with
+// granule-sized placeholders at this layer. Higher layers that keep track of
+// reserved available address space can (and will) coalesce placeholders, but
+// they will be split before being used.
+
+#define fatal_error(msg, addr, size)                  \
+  fatal(msg ": " PTR_FORMAT " " SIZE_FORMAT "M (%d)", \
+        (addr), (size) / M, GetLastError())
+
+uintptr_t ZMapper::reserve(uintptr_t addr, size_t size) {
+  void* const res = ZSyscall::VirtualAlloc2(
+    GetCurrentProcess(),                   // Process
+    (void*)addr,                           // BaseAddress
+    size,                                  // Size
+    MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, // AllocationType
+    PAGE_NOACCESS,                         // PageProtection
+    NULL,                                  // ExtendedParameters
+    0                                      // ParameterCount
+    );
+
+  // Caller responsible for error handling
+  return (uintptr_t)res;
+}
+
+void ZMapper::unreserve(uintptr_t addr, size_t size) {
+  const bool res = ZSyscall::VirtualFreeEx(
+    GetCurrentProcess(), // hProcess
+    (void*)addr,         // lpAddress
+    size,                // dwSize
+    MEM_RELEASE          // dwFreeType
+    );
+
+  if (!res) {
+    fatal_error("Failed to unreserve memory", addr, size);
+  }
+}
+
+HANDLE ZMapper::create_paging_file_mapping(size_t size) {
+  // Create mapping with SEC_RESERVE instead of SEC_COMMIT.
+  //
+  // We use MapViewOfFile3 for two different reasons:
+  //  1) When commiting memory for the created paging file
+  //  2) When mapping a view of the memory created in (2)
+  //
+  // The non-platform code is only setup to deal with out-of-memory
+  // errors in (1). By using SEC_RESERVE, we prevent MapViewOfFile3
+  // from failing because of "commit limit" checks. To actually commit
+  // memory in (1), a call to VirtualAlloc2 is done.
+
+  HANDLE const res = ZSyscall::CreateFileMappingW(
+    INVALID_HANDLE_VALUE,         // hFile
+    NULL,                         // lpFileMappingAttribute
+    PAGE_READWRITE | SEC_RESERVE, // flProtect
+    size >> 32,                   // dwMaximumSizeHigh
+    size & 0xFFFFFFFF,            // dwMaximumSizeLow
+    NULL                          // lpName
+    );
+
+  // Caller responsible for error handling
+  return res;
+}
+
+bool ZMapper::commit_paging_file_mapping(HANDLE file_handle, uintptr_t file_offset, size_t size) {
+  const uintptr_t addr = map_view_no_placeholder(file_handle, file_offset, size);
+  if (addr == 0) {
+    log_error(gc)("Failed to map view of paging file mapping (%d)", GetLastError());
+    return false;
+  }
+
+  const uintptr_t res = commit(addr, size);
+  if (res != addr) {
+    log_error(gc)("Failed to commit memory (%d)", GetLastError());
+  }
+
+  unmap_view_no_placeholder(addr, size);
+
+  return res == addr;
+}
+
+uintptr_t ZMapper::map_view_no_placeholder(HANDLE file_handle, uintptr_t file_offset, size_t size) {
+  void* const res = ZSyscall::MapViewOfFile3(
+    file_handle,         // FileMapping
+    GetCurrentProcess(), // ProcessHandle
+    NULL,                // BaseAddress
+    file_offset,         // Offset
+    size,                // ViewSize
+    0,                   // AllocationType
+    PAGE_NOACCESS,       // PageProtection
+    NULL,                // ExtendedParameters
+    0                    // ParameterCount
+    );
+
+  // Caller responsible for error handling
+  return (uintptr_t)res;
+}
+
+void ZMapper::unmap_view_no_placeholder(uintptr_t addr, size_t size) {
+  const bool res = ZSyscall::UnmapViewOfFile2(
+    GetCurrentProcess(), // ProcessHandle
+    (void*)addr,         // BaseAddress
+    0                    // UnmapFlags
+    );
+
+  if (!res) {
+    fatal_error("Failed to unmap memory", addr, size);
+  }
+}
+
+uintptr_t ZMapper::commit(uintptr_t addr, size_t size) {
+  void* const res = ZSyscall::VirtualAlloc2(
+    GetCurrentProcess(), // Process
+    (void*)addr,         // BaseAddress
+    size,                // Size
+    MEM_COMMIT,          // AllocationType
+    PAGE_NOACCESS,       // PageProtection
+    NULL,                // ExtendedParameters
+    0                    // ParameterCount
+    );
+
+  // Caller responsible for error handling
+  return (uintptr_t)res;
+}
+
+HANDLE ZMapper::create_and_commit_paging_file_mapping(size_t size) {
+  HANDLE const file_handle = create_paging_file_mapping(size);
+  if (file_handle == 0) {
+    log_error(gc)("Failed to create paging file mapping (%d)", GetLastError());
+    return 0;
+  }
+
+  const bool res = commit_paging_file_mapping(file_handle, 0 /* file_offset */, size);
+  if (!res) {
+    close_paging_file_mapping(file_handle);
+    return 0;
+  }
+
+  return file_handle;
+}
+
+void ZMapper::close_paging_file_mapping(HANDLE file_handle) {
+  const bool res = CloseHandle(
+    file_handle // hObject
+    );
+
+  if (!res) {
+    fatal("Failed to close paging file handle (%d)", GetLastError());
+  }
+}
+
+void ZMapper::split_placeholder(uintptr_t addr, size_t size) {
+  const bool res = VirtualFree(
+    (void*)addr,                           // lpAddress
+    size,                                  // dwSize
+    MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER // dwFreeType
+    );
+
+  if (!res) {
+    fatal_error("Failed to split placeholder", addr, size);
+  }
+}
+
+void ZMapper::coalesce_placeholders(uintptr_t addr, size_t size) {
+  const bool res = VirtualFree(
+    (void*)addr,                            // lpAddress
+    size,                                   // dwSize
+    MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS // dwFreeType
+    );
+
+  if (!res) {
+    fatal_error("Failed to coalesce placeholders", addr, size);
+  }
+}
+
+void ZMapper::map_view_replace_placeholder(HANDLE file_handle, uintptr_t file_offset, uintptr_t addr, size_t size) {
+  void* const res = ZSyscall::MapViewOfFile3(
+    file_handle,             // FileMapping
+    GetCurrentProcess(),     // ProcessHandle
+    (void*)addr,             // BaseAddress
+    file_offset,             // Offset
+    size,                    // ViewSize
+    MEM_REPLACE_PLACEHOLDER, // AllocationType
+    PAGE_READWRITE,          // PageProtection
+    NULL,                    // ExtendedParameters
+    0                        // ParameterCount
+    );
+
+  if (res == NULL) {
+    fatal_error("Failed to map memory", addr, size);
+  }
+}
+
+void ZMapper::unmap_view_preserve_placeholder(uintptr_t addr, size_t size) {
+  const bool res = ZSyscall::UnmapViewOfFile2(
+    GetCurrentProcess(),     // ProcessHandle
+    (void*)addr,             // BaseAddress
+    MEM_PRESERVE_PLACEHOLDER // UnmapFlags
+    );
+
+  if (!res) {
+    fatal_error("Failed to unmap memory", addr, size);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/windows/gc/z/zMapper_windows.hpp	Thu Dec 05 18:06:35 2019 -0800
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef OS_WINDOWS_GC_Z_ZMAPPER_WINDOWS_HPP
+#define OS_WINDOWS_GC_Z_ZMAPPER_WINDOWS_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+#include <Windows.h>
+
+class ZMapper : public AllStatic {
+private:
+  // Create paging file mapping
+  static HANDLE create_paging_file_mapping(size_t size);
+
+  // Commit paging file mapping
+  static bool commit_paging_file_mapping(HANDLE file_handle, uintptr_t file_offset, size_t size);
+
+  // Map a view anywhere without a placeholder
+  static uintptr_t map_view_no_placeholder(HANDLE file_handle, uintptr_t file_offset, size_t size);
+
+  // Unmap a view without preserving a placeholder
+  static void unmap_view_no_placeholder(uintptr_t addr, size_t size);
+
+  // Commit memory covering the given virtual address range
+  static uintptr_t commit(uintptr_t addr, size_t size);
+
+public:
+  // Reserve memory with a placeholder
+  static uintptr_t reserve(uintptr_t addr, size_t size);
+
+  // Unreserve memory
+  static void unreserve(uintptr_t addr, size_t size);
+
+  // Create and commit paging file mapping
+  static HANDLE create_and_commit_paging_file_mapping(size_t size);
+
+  // Close paging file mapping
+  static void close_paging_file_mapping(HANDLE file_handle);
+
+  // Split a placeholder
+  //
+  // A view can only replace an entire placeholder, so placeholders need to be
+  // split and coalesced to be the exact size of the new views.
+  // [addr, addr + size) needs to be a proper sub-placeholder of an existing
+  // placeholder.
+  static void split_placeholder(uintptr_t addr, size_t size);
+
+  // Coalesce a placeholder
+  //
+  // [addr, addr + size) is the new placeholder. A sub-placeholder needs to
+  // exist within that range.
+  static void coalesce_placeholders(uintptr_t addr, size_t size);
+
+  // Map a view of the file handle and replace the placeholder covering the
+  // given virtual address range
+  static void map_view_replace_placeholder(HANDLE file_handle, uintptr_t file_offset, uintptr_t addr, size_t size);
+
+  // Unmap the view and reinstate a placeholder covering the given virtual
+  // address range
+  static void unmap_view_preserve_placeholder(uintptr_t addr, size_t size);
+};
+
+#endif // OS_WINDOWS_GC_Z_ZMAPPER_WINDOWS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/windows/gc/z/zNUMA_windows.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zNUMA.hpp"
+
+void ZNUMA::initialize_platform() {
+  _enabled = false;
+}
+
+uint32_t ZNUMA::count() {
+  return 1;
+}
+
+uint32_t ZNUMA::id() {
+  return 0;
+}
+
+uint32_t ZNUMA::memory_id(uintptr_t addr) {
+  // NUMA support not enabled, assume everything belongs to node zero
+  return 0;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -0,0 +1,219 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zAddress.inline.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "gc/z/zLargePages.inline.hpp"
+#include "gc/z/zMapper_windows.hpp"
+#include "gc/z/zPhysicalMemory.inline.hpp"
+#include "gc/z/zPhysicalMemoryBacking_windows.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/init.hpp"
+#include "runtime/os.hpp"
+#include "utilities/align.hpp"
+#include "utilities/debug.hpp"
+
+bool ZPhysicalMemoryBacking::is_initialized() const {
+  return true;
+}
+
+void ZPhysicalMemoryBacking::warn_commit_limits(size_t max) const {
+  // Does nothing
+}
+
+bool ZPhysicalMemoryBacking::supports_uncommit() {
+  assert(!is_init_completed(), "Invalid state");
+  assert(_file.size() >= ZGranuleSize, "Invalid size");
+
+  // Test if uncommit is supported by uncommitting and then re-committing a granule
+  return commit(uncommit(ZGranuleSize)) == ZGranuleSize;
+}
+
+size_t ZPhysicalMemoryBacking::commit(size_t size) {
+  size_t committed = 0;
+
+  // Fill holes in the backing file
+  while (committed < size) {
+    size_t allocated = 0;
+    const size_t remaining = size - committed;
+    const uintptr_t start = _uncommitted.alloc_from_front_at_most(remaining, &allocated);
+    if (start == UINTPTR_MAX) {
+      // No holes to commit
+      break;
+    }
+
+    // Try commit hole
+    const size_t filled = _file.commit(start, allocated);
+    if (filled > 0) {
+      // Successful or partialy successful
+      _committed.free(start, filled);
+      committed += filled;
+    }
+    if (filled < allocated) {
+      // Failed or partialy failed
+      _uncommitted.free(start + filled, allocated - filled);
+      return committed;
+    }
+  }
+
+  // Expand backing file
+  if (committed < size) {
+    const size_t remaining = size - committed;
+    const uintptr_t start = _file.size();
+    const size_t expanded = _file.commit(start, remaining);
+    if (expanded > 0) {
+      // Successful or partialy successful
+      _committed.free(start, expanded);
+      committed += expanded;
+    }
+  }
+
+  return committed;
+}
+
+size_t ZPhysicalMemoryBacking::uncommit(size_t size) {
+  size_t uncommitted = 0;
+
+  // Punch holes in backing file
+  while (uncommitted < size) {
+    size_t allocated = 0;
+    const size_t remaining = size - uncommitted;
+    const uintptr_t start = _committed.alloc_from_back_at_most(remaining, &allocated);
+    assert(start != UINTPTR_MAX, "Allocation should never fail");
+
+    // Try punch hole
+    const size_t punched = _file.uncommit(start, allocated);
+    if (punched > 0) {
+      // Successful or partialy successful
+      _uncommitted.free(start, punched);
+      uncommitted += punched;
+    }
+    if (punched < allocated) {
+      // Failed or partialy failed
+      _committed.free(start + punched, allocated - punched);
+      return uncommitted;
+    }
+  }
+
+  return uncommitted;
+}
+
+ZPhysicalMemory ZPhysicalMemoryBacking::alloc(size_t size) {
+  assert(is_aligned(size, ZGranuleSize), "Invalid size");
+
+  ZPhysicalMemory pmem;
+
+  // Allocate segments
+  for (size_t allocated = 0; allocated < size; allocated += ZGranuleSize) {
+    const uintptr_t start = _committed.alloc_from_front(ZGranuleSize);
+    assert(start != UINTPTR_MAX, "Allocation should never fail");
+    pmem.add_segment(ZPhysicalMemorySegment(start, ZGranuleSize));
+  }
+
+  return pmem;
+}
+
+void ZPhysicalMemoryBacking::free(const ZPhysicalMemory& pmem) {
+  const size_t nsegments = pmem.nsegments();
+
+  // Free segments
+  for (size_t i = 0; i < nsegments; i++) {
+    const ZPhysicalMemorySegment& segment = pmem.segment(i);
+    _committed.free(segment.start(), segment.size());
+  }
+}
+
+void ZPhysicalMemoryBacking::pretouch_view(uintptr_t addr, size_t size) const {
+  const size_t page_size = ZLargePages::is_explicit() ? os::large_page_size() : os::vm_page_size();
+  os::pretouch_memory((void*)addr, (void*)(addr + size), page_size);
+}
+
+void ZPhysicalMemoryBacking::map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
+  const size_t nsegments = pmem.nsegments();
+  size_t size = 0;
+
+  // Map segments
+  for (size_t i = 0; i < nsegments; i++) {
+    const ZPhysicalMemorySegment& segment = pmem.segment(i);
+    _file.map(addr + size, segment.size(), segment.start());
+    size += segment.size();
+  }
+}
+
+void ZPhysicalMemoryBacking::unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
+  _file.unmap(addr, pmem.size());
+}
+
+uintptr_t ZPhysicalMemoryBacking::nmt_address(uintptr_t offset) const {
+  // From an NMT point of view we treat the first heap view (marked0) as committed
+  return ZAddress::marked0(offset);
+}
+
+void ZPhysicalMemoryBacking::pretouch(uintptr_t offset, size_t size) const {
+  if (ZVerifyViews) {
+    // Pre-touch good view
+    pretouch_view(ZAddress::good(offset), size);
+  } else {
+    // Pre-touch all views
+    pretouch_view(ZAddress::marked0(offset), size);
+    pretouch_view(ZAddress::marked1(offset), size);
+    pretouch_view(ZAddress::remapped(offset), size);
+  }
+}
+
+void ZPhysicalMemoryBacking::map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
+  if (ZVerifyViews) {
+    // Map good view
+    map_view(pmem, ZAddress::good(offset));
+  } else {
+    // Map all views
+    map_view(pmem, ZAddress::marked0(offset));
+    map_view(pmem, ZAddress::marked1(offset));
+    map_view(pmem, ZAddress::remapped(offset));
+  }
+}
+
+void ZPhysicalMemoryBacking::unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
+  if (ZVerifyViews) {
+    // Unmap good view
+    unmap_view(pmem, ZAddress::good(offset));
+  } else {
+    // Unmap all views
+    unmap_view(pmem, ZAddress::marked0(offset));
+    unmap_view(pmem, ZAddress::marked1(offset));
+    unmap_view(pmem, ZAddress::remapped(offset));
+  }
+}
+
+void ZPhysicalMemoryBacking::debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
+  // Map good view
+  assert(ZVerifyViews, "Should be enabled");
+  map_view(pmem, ZAddress::good(offset));
+}
+
+void ZPhysicalMemoryBacking::debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
+  // Unmap good view
+  assert(ZVerifyViews, "Should be enabled");
+  unmap_view(pmem, ZAddress::good(offset));
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.hpp	Thu Dec 05 18:06:35 2019 -0800
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef OS_WINDOWS_GC_Z_ZPHYSICALMEMORYBACKING_WINDOWS_HPP
+#define OS_WINDOWS_GC_Z_ZPHYSICALMEMORYBACKING_WINDOWS_HPP
+
+#include "gc/z/zBackingFile_windows.hpp"
+#include "gc/z/zMemory.hpp"
+
+class ZPhysicalMemory;
+
+class ZPhysicalMemoryBacking {
+private:
+  ZBackingFile   _file;
+  ZMemoryManager _committed;
+  ZMemoryManager _uncommitted;
+
+  void pretouch_view(uintptr_t addr, size_t size) const;
+  void map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const;
+  void unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const;
+
+public:
+  bool is_initialized() const;
+
+  void warn_commit_limits(size_t max) const;
+  bool supports_uncommit();
+
+  size_t commit(size_t size);
+  size_t uncommit(size_t size);
+
+  ZPhysicalMemory alloc(size_t size);
+  void free(const ZPhysicalMemory& pmem);
+
+  uintptr_t nmt_address(uintptr_t offset) const;
+
+  void pretouch(uintptr_t offset, size_t size) const;
+
+  void map(const ZPhysicalMemory& pmem, uintptr_t offset) const;
+  void unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const;
+
+  void debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const;
+  void debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const;
+};
+
+#endif // OS_WINDOWS_GC_Z_ZPHYSICALMEMORYBACKING_WINDOWS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/windows/gc/z/zSyscall_windows.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zSyscall_windows.hpp"
+#include "logging/log.hpp"
+#include "runtime/java.hpp"
+#include "runtime/os.hpp"
+
+ZSyscall::CreateFileMappingWFn ZSyscall::CreateFileMappingW;
+ZSyscall::VirtualAlloc2Fn ZSyscall::VirtualAlloc2;
+ZSyscall::VirtualFreeExFn ZSyscall::VirtualFreeEx;
+ZSyscall::MapViewOfFile3Fn ZSyscall::MapViewOfFile3;
+ZSyscall::UnmapViewOfFile2Fn ZSyscall::UnmapViewOfFile2;
+
+template <typename Fn>
+static void lookup_symbol(Fn*& fn, const char* library, const char* symbol) {
+  char ebuf[1024];
+  void* const handle = os::dll_load(library, ebuf, sizeof(ebuf));
+  if (handle == NULL) {
+    log_error(gc)("Failed to load library: %s", library);
+    vm_exit_during_initialization("ZGC requires Windows version 1803 or later");
+  }
+
+  fn = reinterpret_cast<Fn*>(os::dll_lookup(handle, symbol));
+  if (fn == NULL) {
+    log_error(gc)("Failed to lookup symbol: %s", symbol);
+    vm_exit_during_initialization("ZGC requires Windows version 1803 or later");
+  }
+}
+
+void ZSyscall::initialize() {
+  lookup_symbol(CreateFileMappingW, "KernelBase", "CreateFileMappingW");
+  lookup_symbol(VirtualAlloc2,      "KernelBase", "VirtualAlloc2");
+  lookup_symbol(VirtualFreeEx,      "KernelBase", "VirtualFreeEx");
+  lookup_symbol(MapViewOfFile3,     "KernelBase", "MapViewOfFile3");
+  lookup_symbol(UnmapViewOfFile2,   "KernelBase", "UnmapViewOfFile2");
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/windows/gc/z/zSyscall_windows.hpp	Thu Dec 05 18:06:35 2019 -0800
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef OS_WINDOWS_GC_Z_ZSYSCALL_WINDOWS_HPP
+#define OS_WINDOWS_GC_Z_ZSYSCALL_WINDOWS_HPP
+
+#include "utilities/globalDefinitions.hpp"
+
+#include <Windows.h>
+#include <Memoryapi.h>
+
+class ZSyscall {
+private:
+  typedef HANDLE (*CreateFileMappingWFn)(HANDLE, LPSECURITY_ATTRIBUTES, DWORD, DWORD, DWORD, LPCWSTR);
+  typedef PVOID (*VirtualAlloc2Fn)(HANDLE, PVOID, SIZE_T, ULONG, ULONG, MEM_EXTENDED_PARAMETER*, ULONG);
+  typedef BOOL (*VirtualFreeExFn)(HANDLE, LPVOID, SIZE_T, DWORD);
+  typedef PVOID (*MapViewOfFile3Fn)(HANDLE, HANDLE, PVOID, ULONG64, SIZE_T, ULONG, ULONG, MEM_EXTENDED_PARAMETER*, ULONG);
+  typedef BOOL (*UnmapViewOfFile2Fn)(HANDLE, PVOID, ULONG);
+
+public:
+  static CreateFileMappingWFn CreateFileMappingW;
+  static VirtualAlloc2Fn      VirtualAlloc2;
+  static VirtualFreeExFn      VirtualFreeEx;
+  static MapViewOfFile3Fn     MapViewOfFile3;
+  static UnmapViewOfFile2Fn   UnmapViewOfFile2;
+
+  static void initialize();
+};
+
+#endif // OS_WINDOWS_GC_Z_ZSYSCALL_WINDOWS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/windows/gc/z/zUtils_windows.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zUtils.hpp"
+#include "utilities/debug.hpp"
+
+#include <malloc.h>
+
+uintptr_t ZUtils::alloc_aligned(size_t alignment, size_t size) {
+  void* const res = _aligned_malloc(size, alignment);
+
+  if (res == NULL) {
+    fatal("_aligned_malloc failed");
+  }
+
+  memset(res, 0, size);
+
+  return (uintptr_t)res;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/windows/gc/z/zVirtualMemory_windows.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zAddress.inline.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "gc/z/zMapper_windows.hpp"
+#include "gc/z/zVirtualMemory.hpp"
+#include "utilities/align.hpp"
+#include "utilities/debug.hpp"
+
+static void split_placeholder(uintptr_t start, size_t size) {
+  ZMapper::split_placeholder(ZAddress::marked0(start), size);
+  ZMapper::split_placeholder(ZAddress::marked1(start), size);
+  ZMapper::split_placeholder(ZAddress::remapped(start), size);
+}
+
+static void coalesce_placeholders(uintptr_t start, size_t size) {
+  ZMapper::coalesce_placeholders(ZAddress::marked0(start), size);
+  ZMapper::coalesce_placeholders(ZAddress::marked1(start), size);
+  ZMapper::coalesce_placeholders(ZAddress::remapped(start), size);
+}
+
+static void split_into_placeholder_granules(uintptr_t start, size_t size) {
+  for (uintptr_t addr = start; addr < start + size; addr += ZGranuleSize) {
+    split_placeholder(addr, ZGranuleSize);
+  }
+}
+
+static void coalesce_into_one_placeholder(uintptr_t start, size_t size) {
+  assert(is_aligned(size, ZGranuleSize), "Must be granule aligned");
+
+  if (size > ZGranuleSize) {
+    coalesce_placeholders(start, size);
+  }
+}
+
+static void create_callback(const ZMemory* area) {
+  assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
+  coalesce_into_one_placeholder(area->start(), area->size());
+}
+
+static void destroy_callback(const ZMemory* area) {
+  assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
+  // Don't try split the last granule - VirtualFree will fail
+  split_into_placeholder_granules(area->start(), area->size() - ZGranuleSize);
+}
+
+static void shrink_from_front_callback(const ZMemory* area, size_t size) {
+  assert(is_aligned(size, ZGranuleSize), "Must be granule aligned");
+  split_into_placeholder_granules(area->start(), size);
+}
+
+static void shrink_from_back_callback(const ZMemory* area, size_t size) {
+  assert(is_aligned(size, ZGranuleSize), "Must be granule aligned");
+  // Don't try split the last granule - VirtualFree will fail
+  split_into_placeholder_granules(area->end() - size, size - ZGranuleSize);
+}
+
+static void grow_from_front_callback(const ZMemory* area, size_t size) {
+  assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
+  coalesce_into_one_placeholder(area->start() - size, area->size() + size);
+}
+
+static void grow_from_back_callback(const ZMemory* area, size_t size) {
+  assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
+  coalesce_into_one_placeholder(area->start(), area->size() + size);
+}
+
+void ZVirtualMemoryManager::initialize_os() {
+  // Each reserved virtual memory address area registered in _manager is
+  // exactly covered by a single placeholder. Callbacks are installed so
+  // that whenever a memory area changes, the corresponding placeholder
+  // is adjusted.
+  //
+  // The create and grow callbacks are called when virtual memory is
+  // returned to the memory manager. The new memory area is then covered
+  // by a new single placeholder.
+  //
+  // The destroy and shrink callbacks are called when virtual memory is
+  // allocated from the memory manager. The memory area is then is split
+  // into granule-sized placeholders.
+  //
+  // See comment in zMapper_windows.cpp explaining why placeholders are
+  // split into ZGranuleSize sized placeholders.
+
+  ZMemoryManager::Callbacks callbacks;
+
+  callbacks._create = &create_callback;
+  callbacks._destroy = &destroy_callback;
+  callbacks._shrink_from_front = &shrink_from_front_callback;
+  callbacks._shrink_from_back = &shrink_from_back_callback;
+  callbacks._grow_from_front = &grow_from_front_callback;
+  callbacks._grow_from_back = &grow_from_back_callback;
+
+  _manager.register_callbacks(callbacks);
+}
+
+bool ZVirtualMemoryManager::reserve_contiguous_platform(uintptr_t start, size_t size) {
+  assert(is_aligned(size, ZGranuleSize), "Must be granule aligned");
+
+  // Reserve address views
+  const uintptr_t marked0 = ZAddress::marked0(start);
+  const uintptr_t marked1 = ZAddress::marked1(start);
+  const uintptr_t remapped = ZAddress::remapped(start);
+
+  // Reserve address space
+  if (ZMapper::reserve(marked0, size) != marked0) {
+    return false;
+  }
+
+  if (ZMapper::reserve(marked1, size) != marked1) {
+    ZMapper::unreserve(marked0, size);
+    return false;
+  }
+
+  if (ZMapper::reserve(remapped, size) != remapped) {
+    ZMapper::unreserve(marked0, size);
+    ZMapper::unreserve(marked1, size);
+    return false;
+  }
+
+  // Register address views with native memory tracker
+  nmt_reserve(marked0, size);
+  nmt_reserve(marked1, size);
+  nmt_reserve(remapped, size);
+
+  return true;
+}
--- a/src/hotspot/os/windows/osThread_windows.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os/windows/osThread_windows.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -23,7 +23,6 @@
  */
 
 // no precompiled headers
-#include "runtime/orderAccess.hpp"
 #include "runtime/os.hpp"
 #include "runtime/osThread.hpp"
 
--- a/src/hotspot/os/windows/os_perf_windows.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os/windows/os_perf_windows.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -30,6 +30,7 @@
 #include "pdh_interface.hpp"
 #include "runtime/os_perf.hpp"
 #include "runtime/os.hpp"
+#include "utilities/globalDefinitions.hpp"
 #include "utilities/macros.hpp"
 #include CPU_HEADER(vm_version_ext)
 #include <math.h>
@@ -1355,8 +1356,7 @@
   bool _iphlp_attached;
 
   NetworkPerformance();
-  NetworkPerformance(const NetworkPerformance& rhs); // no impl
-  NetworkPerformance& operator=(const NetworkPerformance& rhs); // no impl
+  NONCOPYABLE(NetworkPerformance);
   bool initialize();
   ~NetworkPerformance();
   int network_utilization(NetworkInterface** network_interfaces) const;
--- a/src/hotspot/os/windows/os_windows.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os/windows/os_windows.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -1591,6 +1591,11 @@
   os::print_os_info(st);
 }
 
+void os::win32::print_uptime_info(outputStream* st) {
+  unsigned long long ticks = GetTickCount64();
+  os::print_dhm(st, "OS uptime:", ticks/1000);
+}
+
 void os::print_os_info(outputStream* st) {
 #ifdef ASSERT
   char buffer[1024];
@@ -1604,6 +1609,8 @@
   st->print("OS:");
   os::win32::print_windows_version(st);
 
+  os::win32::print_uptime_info(st);
+
 #ifdef _LP64
   VM_Version::print_platform_virtualization_info(st);
 #endif
@@ -2096,7 +2103,7 @@
   while (true) {
     for (int i = 0; i < NSIG + 1; i++) {
       jint n = pending_signals[i];
-      if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
+      if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
         return i;
       }
     }
@@ -3747,15 +3754,15 @@
     // The first thread that reached this point, initializes the critical section.
     if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
       warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
-    } else if (OrderAccess::load_acquire(&process_exiting) == 0) {
+    } else if (Atomic::load_acquire(&process_exiting) == 0) {
       if (what != EPT_THREAD) {
         // Atomically set process_exiting before the critical section
         // to increase the visibility between racing threads.
-        Atomic::cmpxchg(GetCurrentThreadId(), &process_exiting, (DWORD)0);
+        Atomic::cmpxchg(&process_exiting, (DWORD)0, GetCurrentThreadId());
       }
       EnterCriticalSection(&crit_sect);
 
-      if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) {
+      if (what == EPT_THREAD && Atomic::load_acquire(&process_exiting) == 0) {
         // Remove from the array those handles of the threads that have completed exiting.
         for (i = 0, j = 0; i < handle_count; ++i) {
           res = WaitForSingleObject(handles[i], 0 /* don't wait */);
@@ -3868,7 +3875,7 @@
     }
 
     if (!registered &&
-        OrderAccess::load_acquire(&process_exiting) != 0 &&
+        Atomic::load_acquire(&process_exiting) != 0 &&
         process_exiting != GetCurrentThreadId()) {
       // Some other thread is about to call exit(), so we don't let
       // the current unregistered thread proceed to exit() or _endthreadex()
@@ -5136,7 +5143,7 @@
   int v;
   for (;;) {
     v = _Event;
-    if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
+    if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
   }
   guarantee((v == 0) || (v == 1), "invariant");
   if (v != 0) return OS_OK;
@@ -5198,7 +5205,7 @@
   int v;
   for (;;) {
     v = _Event;
-    if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
+    if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
   }
   guarantee((v == 0) || (v == 1), "invariant");
   if (v != 0) return;
@@ -5236,7 +5243,7 @@
   // from the first park() call after an unpark() call which will help
   // shake out uses of park() and unpark() without condition variables.
 
-  if (Atomic::xchg(1, &_Event) >= 0) return;
+  if (Atomic::xchg(&_Event, 1) >= 0) return;
 
   ::SetEvent(_ParkHandle);
 }
--- a/src/hotspot/os/windows/os_windows.hpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os/windows/os_windows.hpp	Thu Dec 05 18:06:35 2019 -0800
@@ -55,6 +55,7 @@
   static bool   _has_exit_bug;
 
   static void print_windows_version(outputStream* st);
+  static void print_uptime_info(outputStream* st);
 
  public:
   // Windows-specific interface:
@@ -190,9 +191,7 @@
 // Platform specific implementations that underpin VM Mutex/Monitor classes
 
 class PlatformMutex : public CHeapObj<mtSynchronizer> {
-  // Disable copying
-  PlatformMutex(const PlatformMutex&);
-  PlatformMutex& operator=(const PlatformMutex&);
+  NONCOPYABLE(PlatformMutex);
 
  protected:
   CRITICAL_SECTION   _mutex; // Native mutex for locking
@@ -208,9 +207,7 @@
 class PlatformMonitor : public PlatformMutex {
  private:
   CONDITION_VARIABLE _cond;  // Native condition variable for blocking
-  // Disable copying
-  PlatformMonitor(const PlatformMonitor&);
-  PlatformMonitor& operator=(const PlatformMonitor&);
+  NONCOPYABLE(PlatformMonitor);
 
  public:
   PlatformMonitor();
--- a/src/hotspot/os/windows/semaphore_windows.hpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os/windows/semaphore_windows.hpp	Thu Dec 05 18:06:35 2019 -0800
@@ -26,15 +26,14 @@
 #define OS_WINDOWS_SEMAPHORE_WINDOWS_HPP
 
 #include "memory/allocation.hpp"
+#include "utilities/globalDefinitions.hpp"
 
 #include <windows.h>
 
 class WindowsSemaphore : public CHeapObj<mtInternal> {
   HANDLE _semaphore;
 
-  // Prevent copying and assignment.
-  WindowsSemaphore(const WindowsSemaphore&);
-  WindowsSemaphore& operator=(const WindowsSemaphore&);
+  NONCOPYABLE(WindowsSemaphore);
 
  public:
   WindowsSemaphore(uint value = 0);
--- a/src/hotspot/os/windows/threadCritical_windows.cpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os/windows/threadCritical_windows.cpp	Thu Dec 05 18:06:35 2019 -0800
@@ -56,7 +56,7 @@
 
   if (lock_owner != current_thread) {
     // Grab the lock before doing anything.
-    while (Atomic::cmpxchg(0, &lock_count, -1) != -1) {
+    while (Atomic::cmpxchg(&lock_count, -1, 0) != -1) {
       if (initialized) {
         DWORD ret = WaitForSingleObject(lock_event,  INFINITE);
         assert(ret == WAIT_OBJECT_0, "unexpected return value from WaitForSingleObject");
--- a/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp	Thu Dec 05 18:06:35 2019 -0800
@@ -30,6 +30,7 @@
 #error "Atomic currently only implemented for PPC64"
 #endif
 
+#include "orderAccess_aix_ppc.hpp"
 #include "utilities/debug.hpp"
 
 // Implementation of class atomic
@@ -95,13 +96,13 @@
 struct Atomic::PlatformAdd
   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
 {
-  template<typename I, typename D>
-  D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
+  template<typename D, typename I>
+  D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
 };
 
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
                                                atomic_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(I));
   STATIC_ASSERT(4 == sizeof(D));
@@ -126,8 +127,8 @@
 
 
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
                                                atomic_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(I));
   STATIC_ASSERT(8 == sizeof(D));
@@ -152,8 +153,8 @@
 
 template<>
 template<typename T>
-inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
-                                             T volatile* dest,
+inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
+                                             T exchange_value,
                                              atomic_memory_order order) const {
   // Note that xchg doesn't necessarily do an acquire
   // (see synchronizer.cpp).
@@ -191,8 +192,8 @@
 
 template<>
 template<typename T>
-inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
-                                             T volatile* dest,
+inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
+                                             T exchange_value,
                                              atomic_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(T));
   // Note that xchg doesn't necessarily do an acquire
@@ -231,9 +232,9 @@
 
 template<>
 template<typename T>
-inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
-                                                T volatile* dest,
+inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest,
                                                 T compare_value,
+                                                T exchange_value,
                                                 atomic_memory_order order) const {
   STATIC_ASSERT(1 == sizeof(T));
 
@@ -301,9 +302,9 @@
 
 template<>
 template<typename T>
-inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
-                                                T volatile* dest,
+inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
                                                 T compare_value,
+                                                T exchange_value,
                                                 atomic_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(T));
 
@@ -351,9 +352,9 @@
 
 template<>
 template<typename T>
-inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
-                                                T volatile* dest,
+inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
                                                 T compare_value,
+                                                T exchange_value,
                                                 atomic_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(T));
 
@@ -399,4 +400,15 @@
   return old_value;
 }
 
+template<size_t byte_size>
+struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE> {
+  template <typename T>
+  T operator()(const volatile T* p) const {
+    T t = Atomic::load(p);
+    // Use twi-isync for load_acquire (faster than lwsync).
+    __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (t) : "memory");
+    return t;
+  }
+};
+
 #endif // OS_CPU_AIX_PPC_ATOMIC_AIX_PPC_HPP
--- a/src/hotspot/os_cpu/aix_ppc/orderAccess_aix_ppc.hpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os_cpu/aix_ppc/orderAccess_aix_ppc.hpp	Thu Dec 05 18:06:35 2019 -0800
@@ -64,8 +64,6 @@
 #define inlasm_lwsync()   __asm__ __volatile__ ("lwsync" : : : "memory");
 #define inlasm_eieio()    __asm__ __volatile__ ("eieio"  : : : "memory");
 #define inlasm_isync()    __asm__ __volatile__ ("isync"  : : : "memory");
-// Use twi-isync for load_acquire (faster than lwsync).
-#define inlasm_acquire_reg(X) __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (X) : "memory");
 
 inline void OrderAccess::loadload()   { inlasm_lwsync(); }
 inline void OrderAccess::storestore() { inlasm_lwsync(); }
@@ -78,13 +76,6 @@
 inline void OrderAccess::cross_modify_fence()
                                       { inlasm_isync();  }
 
-template<size_t byte_size>
-struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
-{
-  template <typename T>
-  T operator()(const volatile T* p) const { T t = Atomic::load(p); inlasm_acquire_reg(t); return t; }
-};
-
 #undef inlasm_sync
 #undef inlasm_lwsync
 #undef inlasm_eieio
--- a/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp	Thu Dec 05 18:06:35 2019 -0800
@@ -31,13 +31,13 @@
 struct Atomic::PlatformAdd
   : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
 {
-  template<typename I, typename D>
-  D fetch_and_add(I add_value, D volatile* dest, atomic_memory_order /* order */) const;
+  template<typename D, typename I>
+  D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order /* order */) const;
 };
 
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<4>::fetch_and_add(D volatile* dest, I add_value,
                                                atomic_memory_order /* order */) const {
   STATIC_ASSERT(4 == sizeof(I));
   STATIC_ASSERT(4 == sizeof(D));
@@ -51,8 +51,8 @@
 
 template<>
 template<typename T>
-inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
-                                             T volatile* dest,
+inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
+                                             T exchange_value,
                                              atomic_memory_order /* order */) const {
   STATIC_ASSERT(4 == sizeof(T));
   __asm__ volatile (  "xchgl (%2),%0"
@@ -64,9 +64,9 @@
 
 template<>
 template<typename T>
-inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
-                                                T volatile* dest,
+inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest,
                                                 T compare_value,
+                                                T exchange_value,
                                                 atomic_memory_order /* order */) const {
   STATIC_ASSERT(1 == sizeof(T));
   __asm__ volatile (  "lock cmpxchgb %1,(%3)"
@@ -78,9 +78,9 @@
 
 template<>
 template<typename T>
-inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
-                                                T volatile* dest,
+inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
                                                 T compare_value,
+                                                T exchange_value,
                                                 atomic_memory_order /* order */) const {
   STATIC_ASSERT(4 == sizeof(T));
   __asm__ volatile (  "lock cmpxchgl %1,(%3)"
@@ -92,8 +92,8 @@
 
 #ifdef AMD64
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<8>::fetch_and_add(D volatile* dest, I add_value,
                                                atomic_memory_order /* order */) const {
   STATIC_ASSERT(8 == sizeof(I));
   STATIC_ASSERT(8 == sizeof(D));
@@ -107,8 +107,8 @@
 
 template<>
 template<typename T>
-inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
-                                             T volatile* dest,
+inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
+                                             T exchange_value,
                                              atomic_memory_order /* order */) const {
   STATIC_ASSERT(8 == sizeof(T));
   __asm__ __volatile__ ("xchgq (%2),%0"
@@ -120,9 +120,9 @@
 
 template<>
 template<typename T>
-inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
-                                                T volatile* dest,
+inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
                                                 T compare_value,
+                                                T exchange_value,
                                                 atomic_memory_order /* order */) const {
   STATIC_ASSERT(8 == sizeof(T));
   __asm__ __volatile__ (  "lock cmpxchgq %1,(%3)"
@@ -142,12 +142,12 @@
 
 template<>
 template<typename T>
-inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
-                                                T volatile* dest,
+inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
                                                 T compare_value,
+                                                T exchange_value,
                                                 atomic_memory_order /* order */) const {
   STATIC_ASSERT(8 == sizeof(T));
-  return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
+  return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, dest, compare_value, exchange_value);
 }
 
 template<>
@@ -161,12 +161,62 @@
 
 template<>
 template<typename T>
-inline void Atomic::PlatformStore<8>::operator()(T store_value,
-                                                 T volatile* dest) const {
+inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
+                                                 T store_value) const {
   STATIC_ASSERT(8 == sizeof(T));
   _Atomic_move_long(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
 }
 
 #endif // AMD64
 
+template<>
+struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(volatile T* p, T v) const {
+    __asm__ volatile (  "xchgb (%2),%0"
+                      : "=q" (v)
+                      : "0" (v), "r" (p)
+                      : "memory");
+  }
+};
+
+template<>
+struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(volatile T* p, T v) const {
+    __asm__ volatile (  "xchgw (%2),%0"
+                      : "=r" (v)
+                      : "0" (v), "r" (p)
+                      : "memory");
+  }
+};
+
+template<>
+struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(volatile T* p, T v) const {
+    __asm__ volatile (  "xchgl (%2),%0"
+                      : "=r" (v)
+                      : "0" (v), "r" (p)
+                      : "memory");
+  }
+};
+
+#ifdef AMD64
+template<>
+struct Atomic::PlatformOrderedStore<8, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(volatile T* p, T v) const {
+    __asm__ volatile (  "xchgq (%2), %0"
+                      : "=r" (v)
+                      : "0" (v), "r" (p)
+                      : "memory");
+  }
+};
+#endif // AMD64
+
 #endif // OS_CPU_BSD_X86_ATOMIC_BSD_X86_HPP
--- a/src/hotspot/os_cpu/bsd_x86/bsd_x86_32.s	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os_cpu/bsd_x86/bsd_x86_32.s	Thu Dec 05 18:06:35 2019 -0800
@@ -633,9 +633,9 @@
         ret
 
 
-        # Support for int64_t Atomic::cmpxchg(int64_t exchange_value,
+        # Support for int64_t Atomic::cmpxchg(int64_t compare_value,
         #                                     volatile int64_t* dest,
-        #                                     int64_t compare_value)
+        #                                     int64_t exchange_value)
         #
         .p2align 4,,15
         ELF_TYPE(_Atomic_cmpxchg_long,@function)
@@ -665,4 +665,3 @@
         movl     8(%esp), %eax   # dest
         fistpll   (%eax)
         ret
-
--- a/src/hotspot/os_cpu/bsd_x86/orderAccess_bsd_x86.hpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os_cpu/bsd_x86/orderAccess_bsd_x86.hpp	Thu Dec 05 18:06:35 2019 -0800
@@ -64,54 +64,4 @@
   __asm__ volatile ("cpuid " : "+a" (idx) : : "ebx", "ecx", "edx", "memory");
 }
 
-template<>
-struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const {
-    __asm__ volatile (  "xchgb (%2),%0"
-                      : "=q" (v)
-                      : "0" (v), "r" (p)
-                      : "memory");
-  }
-};
-
-template<>
-struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const {
-    __asm__ volatile (  "xchgw (%2),%0"
-                      : "=r" (v)
-                      : "0" (v), "r" (p)
-                      : "memory");
-  }
-};
-
-template<>
-struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const {
-    __asm__ volatile (  "xchgl (%2),%0"
-                      : "=r" (v)
-                      : "0" (v), "r" (p)
-                      : "memory");
-  }
-};
-
-#ifdef AMD64
-template<>
-struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const {
-    __asm__ volatile (  "xchgq (%2), %0"
-                      : "=r" (v)
-                      : "0" (v), "r" (p)
-                      : "memory");
-  }
-};
-#endif // AMD64
-
 #endif // OS_CPU_BSD_X86_ORDERACCESS_BSD_X86_HPP
--- a/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp	Thu Dec 05 18:06:35 2019 -0800
@@ -163,22 +163,22 @@
 struct Atomic::PlatformAdd
   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
 {
-  template<typename I, typename D>
-  D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
+  template<typename D, typename I>
+  D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
 };
 
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
                                                atomic_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(I));
   STATIC_ASSERT(4 == sizeof(D));
 
 #ifdef ARM
-  return add_using_helper<int>(arm_add_and_fetch, add_value, dest);
+  return add_using_helper<int>(arm_add_and_fetch, dest, add_value);
 #else
 #ifdef M68K
-  return add_using_helper<int>(m68k_add_and_fetch, add_value, dest);
+  return add_using_helper<int>(m68k_add_and_fetch, dest, add_value);
 #else
   return __sync_add_and_fetch(dest, add_value);
 #endif // M68K
@@ -186,8 +186,8 @@
 }
 
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename !>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
                                                atomic_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(I));
   STATIC_ASSERT(8 == sizeof(D));
@@ -197,15 +197,15 @@
 
 template<>
 template<typename T>
-inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
-                                             T volatile* dest,
+inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
+                                             T exchange_value,
                                              atomic_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(T));
 #ifdef ARM
-  return xchg_using_helper<int>(arm_lock_test_and_set, exchange_value, dest);
+  return xchg_using_helper<int>(arm_lock_test_and_set, dest, exchange_value);
 #else
 #ifdef M68K
-  return xchg_using_helper<int>(m68k_lock_test_and_set, exchange_value, dest);
+  return xchg_using_helper<int>(m68k_lock_test_and_set, dest, exchange_value);
 #else
   // __sync_lock_test_and_set is a bizarrely named atomic exchange
   // operation.  Note that some platforms only support this with the
@@ -224,8 +224,8 @@
 
 template<>
 template<typename T>
-inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
-                                             T volatile* dest,
+inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
+                                             T exchange_value,
                                              atomic_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(T));
   T result = __sync_lock_test_and_set (dest, exchange_value);
@@ -239,16 +239,16 @@
 
 template<>
 template<typename T>
-inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
-                                                T volatile* dest,
+inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
                                                 T compare_value,
+                                                T exchange_value,
                                                 atomic_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(T));
 #ifdef ARM
-  return cmpxchg_using_helper<int>(arm_compare_and_swap, exchange_value, dest, compare_value);
+  return cmpxchg_using_helper<int>(arm_compare_and_swap, dest, compare_value, exchange_value);
 #else
 #ifdef M68K
-  return cmpxchg_using_helper<int>(m68k_compare_and_swap, exchange_value, dest, compare_value);
+  return cmpxchg_using_helper<int>(m68k_compare_and_swap, dest, compare_value, exchange_value);
 #else
   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 #endif // M68K
@@ -257,9 +257,9 @@
 
 template<>
 template<typename T>
-inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
-                                                T volatile* dest,
+inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
                                                 T compare_value,
+                                                T exchange_value,
                                                 atomic_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(T));
   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
@@ -276,8 +276,8 @@
 
 template<>
 template<typename T>
-inline void Atomic::PlatformStore<8>::operator()(T store_value,
-                                                 T volatile* dest) const {
+inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
+                                                 T store_value) const {
   STATIC_ASSERT(8 == sizeof(T));
   os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
 }
--- a/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp	Thu Dec 05 18:06:35 2019 -0800
@@ -32,16 +32,12 @@
 // Note that memory_order_conservative requires a full barrier after atomic stores.
 // See https://patchwork.kernel.org/patch/3575821/
 
-#define FULL_MEM_BARRIER  __sync_synchronize()
-#define READ_MEM_BARRIER  __atomic_thread_fence(__ATOMIC_ACQUIRE);
-#define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE);
-
 template<size_t byte_size>
 struct Atomic::PlatformAdd
   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
 {
-  template<typename I, typename D>
-  D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const {
+  template<typename D, typename I>
+  D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
     D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);
     FULL_MEM_BARRIER;
     return res;
@@ -50,8 +46,8 @@
 
 template<size_t byte_size>
 template<typename T>
-inline T Atomic::PlatformXchg<byte_size>::operator()(T exchange_value,
-                                                     T volatile* dest,
+inline T Atomic::PlatformXchg<byte_size>::operator()(T volatile* dest,
+                                                     T exchange_value,
                                                      atomic_memory_order order) const {
   STATIC_ASSERT(byte_size == sizeof(T));
   T res = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELEASE);
@@ -61,9 +57,9 @@
 
 template<size_t byte_size>
 template<typename T>
-inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T exchange_value,
-                                                        T volatile* dest,
+inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T volatile* dest,
                                                         T compare_value,
+                                                        T exchange_value,
                                                         atomic_memory_order order) const {
   STATIC_ASSERT(byte_size == sizeof(T));
   if (order == memory_order_relaxed) {
@@ -81,4 +77,25 @@
   }
 }
 
+template<size_t byte_size>
+struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE>
+{
+  template <typename T>
+  T operator()(const volatile T* p) const { T data; __atomic_load(const_cast<T*>(p), &data, __ATOMIC_ACQUIRE); return data; }
+};
+
+template<size_t byte_size>
+struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X>
+{
+  template <typename T>
+  void operator()(volatile T* p, T v) const { __atomic_store(const_cast<T*>(p), &v, __ATOMIC_RELEASE); }
+};
+
+template<size_t byte_size>
+struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(volatile T* p, T v) const { release_store(p, v); OrderAccess::fence(); }
+};
+
 #endif // OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP
--- a/src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.hpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.hpp	Thu Dec 05 18:06:35 2019 -0800
@@ -37,6 +37,10 @@
 inline void OrderAccess::loadstore()  { acquire(); }
 inline void OrderAccess::storeload()  { fence(); }
 
+#define FULL_MEM_BARRIER  __sync_synchronize()
+#define READ_MEM_BARRIER  __atomic_thread_fence(__ATOMIC_ACQUIRE);
+#define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE);
+
 inline void OrderAccess::acquire() {
   READ_MEM_BARRIER;
 }
@@ -51,25 +55,4 @@
 
 inline void OrderAccess::cross_modify_fence() { }
 
-template<size_t byte_size>
-struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
-{
-  template <typename T>
-  T operator()(const volatile T* p) const { T data; __atomic_load(const_cast<T*>(p), &data, __ATOMIC_ACQUIRE); return data; }
-};
-
-template<size_t byte_size>
-struct OrderAccess::PlatformOrderedStore<byte_size, RELEASE_X>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const { __atomic_store(const_cast<T*>(p), &v, __ATOMIC_RELEASE); }
-};
-
-template<size_t byte_size>
-struct OrderAccess::PlatformOrderedStore<byte_size, RELEASE_X_FENCE>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const { release_store(p, v); fence(); }
-};
-
 #endif // OS_CPU_LINUX_AARCH64_ORDERACCESS_LINUX_AARCH64_HPP
--- a/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp	Thu Dec 05 18:06:35 2019 -0800
@@ -54,8 +54,8 @@
 
 template<>
 template<typename T>
-inline void Atomic::PlatformStore<8>::operator()(T store_value,
-                                                 T volatile* dest) const {
+inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
+                                                 T store_value) const {
   STATIC_ASSERT(8 == sizeof(T));
   (*os::atomic_store_long_func)(
     PrimitiveConversions::cast<int64_t>(store_value), reinterpret_cast<volatile int64_t*>(dest));
@@ -70,27 +70,27 @@
 struct Atomic::PlatformAdd
   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
 {
-  template<typename I, typename D>
-  D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
+  template<typename D, typename I>
+  D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
 };
 
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
                                                atomic_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(I));
   STATIC_ASSERT(4 == sizeof(D));
-  return add_using_helper<int32_t>(os::atomic_add_func, add_value, dest);
+  return add_using_helper<int32_t>(os::atomic_add_func, dest, add_value);
 }
 
 
 template<>
 template<typename T>
-inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
-                                             T volatile* dest,
+inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
+                                             T exchange_value,
                                              atomic_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(T));
-  return xchg_using_helper<int32_t>(os::atomic_xchg_func, exchange_value, dest);
+  return xchg_using_helper<int32_t>(os::atomic_xchg_func, dest, exchange_value);
 }
 
 
@@ -119,22 +119,22 @@
 
 template<>
 template<typename T>
-inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
-                                                T volatile* dest,
+inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
                                                 T compare_value,
+                                                T exchange_value,
                                                 atomic_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(T));
-  return cmpxchg_using_helper<int32_t>(reorder_cmpxchg_func, exchange_value, dest, compare_value);
+  return cmpxchg_using_helper<int32_t>(reorder_cmpxchg_func, dest, compare_value, exchange_value);
 }
 
 template<>
 template<typename T>
-inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
-                                                T volatile* dest,
+inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
                                                 T compare_value,
+                                                T exchange_value,
                                                 atomic_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(T));
-  return cmpxchg_using_helper<int64_t>(reorder_cmpxchg_long_func, exchange_value, dest, compare_value);
+  return cmpxchg_using_helper<int64_t>(reorder_cmpxchg_long_func, dest, compare_value, exchange_value);
 }
 
 #endif // OS_CPU_LINUX_ARM_ATOMIC_LINUX_ARM_HPP
--- a/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp	Thu Dec 05 18:06:35 2019 -0800
@@ -30,6 +30,7 @@
 #error "Atomic currently only implemented for PPC64"
 #endif
 
+#include "orderAccess_linux_ppc.hpp"
 #include "utilities/debug.hpp"
 
 // Implementation of class atomic
@@ -95,13 +96,13 @@
 struct Atomic::PlatformAdd
   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
 {
-  template<typename I, typename D>
-  D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
+  template<typename D, typename I>
+  D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
 };
 
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
                                                atomic_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(I));
   STATIC_ASSERT(4 == sizeof(D));
@@ -126,8 +127,8 @@
 
 
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
                                                atomic_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(I));
   STATIC_ASSERT(8 == sizeof(D));
@@ -152,8 +153,8 @@
 
 template<>
 template<typename T>
-inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
-                                             T volatile* dest,
+inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
+                                             T exchange_value,
                                              atomic_memory_order order) const {
   // Note that xchg doesn't necessarily do an acquire
   // (see synchronizer.cpp).
@@ -191,8 +192,8 @@
 
 template<>
 template<typename T>
-inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
-                                             T volatile* dest,
+inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
+                                             T exchange_value,
                                              atomic_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(T));
   // Note that xchg doesn't necessarily do an acquire
@@ -231,9 +232,9 @@
 
 template<>
 template<typename T>
-inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
-                                                T volatile* dest,
+inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest,
                                                 T compare_value,
+                                                T exchange_value,
                                                 atomic_memory_order order) const {
   STATIC_ASSERT(1 == sizeof(T));
 
@@ -301,9 +302,9 @@
 
 template<>
 template<typename T>
-inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
-                                                T volatile* dest,
+inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
                                                 T compare_value,
+                                                T exchange_value,
                                                 atomic_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(T));
 
@@ -351,9 +352,9 @@
 
 template<>
 template<typename T>
-inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
-                                                T volatile* dest,
+inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
                                                 T compare_value,
+                                                T exchange_value,
                                                 atomic_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(T));
 
@@ -399,4 +400,16 @@
   return old_value;
 }
 
+template<size_t byte_size>
+struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE>
+{
+  template <typename T>
+  T operator()(const volatile T* p) const {
+    T t = Atomic::load(p);
+    // Use twi-isync for load_acquire (faster than lwsync).
+    __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (t) : "memory");
+    return t;
+  }
+};
+
 #endif // OS_CPU_LINUX_PPC_ATOMIC_LINUX_PPC_HPP
--- a/src/hotspot/os_cpu/linux_ppc/orderAccess_linux_ppc.hpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os_cpu/linux_ppc/orderAccess_linux_ppc.hpp	Thu Dec 05 18:06:35 2019 -0800
@@ -68,8 +68,6 @@
 #define inlasm_lwsync()   __asm__ __volatile__ ("lwsync" : : : "memory");
 #define inlasm_eieio()    __asm__ __volatile__ ("eieio"  : : : "memory");
 #define inlasm_isync()    __asm__ __volatile__ ("isync"  : : : "memory");
-// Use twi-isync for load_acquire (faster than lwsync).
-#define inlasm_acquire_reg(X) __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (X) : "memory");
 
 inline void   OrderAccess::loadload()   { inlasm_lwsync(); }
 inline void   OrderAccess::storestore() { inlasm_lwsync(); }
@@ -82,17 +80,9 @@
 inline void   OrderAccess::cross_modify_fence()
                                         { inlasm_isync();  }
 
-template<size_t byte_size>
-struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
-{
-  template <typename T>
-  T operator()(const volatile T* p) const { T t = Atomic::load(p); inlasm_acquire_reg(t); return t; }
-};
-
 #undef inlasm_sync
 #undef inlasm_lwsync
 #undef inlasm_eieio
 #undef inlasm_isync
-#undef inlasm_acquire_reg
 
 #endif // OS_CPU_LINUX_PPC_ORDERACCESS_LINUX_PPC_HPP
--- a/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp	Thu Dec 05 18:06:35 2019 -0800
@@ -78,13 +78,13 @@
 struct Atomic::PlatformAdd
   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
 {
-  template<typename I, typename D>
-  D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
+  template<typename D, typename I>
+  D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
 };
 
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::add_and_fetch(I inc, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I inc,
                                                atomic_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(I));
   STATIC_ASSERT(4 == sizeof(D));
@@ -137,8 +137,8 @@
 
 
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<8>::add_and_fetch(I inc, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I inc,
                                                atomic_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(I));
   STATIC_ASSERT(8 == sizeof(D));
@@ -208,8 +208,8 @@
 // replacement succeeded.
 template<>
 template<typename T>
-inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
-                                             T volatile* dest,
+inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
+                                             T exchange_value,
                                              atomic_memory_order unused) const {
   STATIC_ASSERT(4 == sizeof(T));
   T old;
@@ -232,8 +232,8 @@
 
 template<>
 template<typename T>
-inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
-                                             T volatile* dest,
+inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
+                                             T exchange_value,
                                              atomic_memory_order unused) const {
   STATIC_ASSERT(8 == sizeof(T));
   T old;
@@ -289,9 +289,9 @@
 
 template<>
 template<typename T>
-inline T Atomic::PlatformCmpxchg<4>::operator()(T xchg_val,
-                                                T volatile* dest,
+inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
                                                 T cmp_val,
+                                                T xchg_val,
                                                 atomic_memory_order unused) const {
   STATIC_ASSERT(4 == sizeof(T));
   T old;
@@ -313,9 +313,9 @@
 
 template<>
 template<typename T>
-inline T Atomic::PlatformCmpxchg<8>::operator()(T xchg_val,
-                                                T volatile* dest,
+inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
                                                 T cmp_val,
+                                                T xchg_val,
                                                 atomic_memory_order unused) const {
   STATIC_ASSERT(8 == sizeof(T));
   T old;
@@ -335,4 +335,11 @@
   return old;
 }
 
+template<size_t byte_size>
+struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE>
+{
+  template <typename T>
+  T operator()(const volatile T* p) const { T t = *p; OrderAccess::acquire(); return t; }
+};
+
 #endif // OS_CPU_LINUX_S390_ATOMIC_LINUX_S390_HPP
--- a/src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.hpp	Mon Nov 25 14:29:46 2019 +0000
+++ b/src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.hpp	Thu Dec 05 18:06:35 2019 -0800
@@ -76,13 +76,6 @@
 inline void OrderAccess::fence()      { inlasm_zarch_sync(); }
 inline void OrderAccess::cross_modify_fence() { inlasm_zarch_sync(); }
 
-template<size_t byte_size>
-struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
-{
-  template <typename T>
-  T operator()(const volatile T* p) const { T t = *p; inlasm_zarch_acquire(); return t; }
-};
-
 #undef inlasm_compiler_barrier
 #undef inlasm_zarch_sync
<