changeset 57031:b83adf4bd4ee

Merge
author psadhukhan
date Wed, 20 Nov 2019 10:52:28 +0530
parents a56b7a304bac c4be5e03aff7
children 10385df5d986
files src/hotspot/share/gc/cms/adaptiveFreeList.cpp src/hotspot/share/gc/cms/adaptiveFreeList.hpp src/hotspot/share/gc/cms/allocationStats.cpp src/hotspot/share/gc/cms/allocationStats.hpp src/hotspot/share/gc/cms/cmsArguments.cpp src/hotspot/share/gc/cms/cmsArguments.hpp src/hotspot/share/gc/cms/cmsCardTable.cpp src/hotspot/share/gc/cms/cmsCardTable.hpp src/hotspot/share/gc/cms/cmsGCStats.cpp src/hotspot/share/gc/cms/cmsGCStats.hpp src/hotspot/share/gc/cms/cmsHeap.cpp src/hotspot/share/gc/cms/cmsHeap.hpp src/hotspot/share/gc/cms/cmsHeap.inline.hpp src/hotspot/share/gc/cms/cmsLockVerifier.cpp src/hotspot/share/gc/cms/cmsLockVerifier.hpp src/hotspot/share/gc/cms/cmsOopClosures.hpp src/hotspot/share/gc/cms/cmsOopClosures.inline.hpp src/hotspot/share/gc/cms/cmsVMOperations.cpp src/hotspot/share/gc/cms/cmsVMOperations.hpp src/hotspot/share/gc/cms/cms_globals.hpp src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp src/hotspot/share/gc/cms/compactibleFreeListSpace.hpp src/hotspot/share/gc/cms/compactibleFreeListSpace.inline.hpp src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.inline.hpp src/hotspot/share/gc/cms/concurrentMarkSweepThread.cpp src/hotspot/share/gc/cms/concurrentMarkSweepThread.hpp src/hotspot/share/gc/cms/freeChunk.cpp src/hotspot/share/gc/cms/freeChunk.hpp src/hotspot/share/gc/cms/gSpaceCounters.cpp src/hotspot/share/gc/cms/gSpaceCounters.hpp src/hotspot/share/gc/cms/jvmFlagConstraintsCMS.cpp src/hotspot/share/gc/cms/jvmFlagConstraintsCMS.hpp src/hotspot/share/gc/cms/parNewGeneration.cpp src/hotspot/share/gc/cms/parNewGeneration.hpp src/hotspot/share/gc/cms/parNewGeneration.inline.hpp src/hotspot/share/gc/cms/parOopClosures.hpp src/hotspot/share/gc/cms/parOopClosures.inline.hpp src/hotspot/share/gc/cms/promotionInfo.cpp src/hotspot/share/gc/cms/promotionInfo.hpp src/hotspot/share/gc/cms/promotionInfo.inline.hpp src/hotspot/share/gc/cms/vmStructs_cms.hpp src/hotspot/share/gc/cms/yieldingWorkgroup.cpp src/hotspot/share/gc/cms/yieldingWorkgroup.hpp src/hotspot/share/libadt/set.cpp src/hotspot/share/libadt/set.hpp src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/AdaptiveFreeList.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/CMSBitMap.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/CMSCollector.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/CMSHeap.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/CompactibleFreeListSpace.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/ConcurrentMarkSweepGeneration.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/LinearAllocBlock.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/ParNewGeneration.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/PrintRegionClosure.java test/hotspot/jtreg/compiler/c2/aarch64/TestVolatilesCMS.java test/hotspot/jtreg/compiler/c2/aarch64/TestVolatilesCMSCondMark.java test/hotspot/jtreg/gc/TestMemoryInitializationWithCMS.java test/hotspot/jtreg/gc/arguments/TestCMSHeapSizeFlags.java test/hotspot/jtreg/gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java test/hotspot/jtreg/gc/cms/DisableResizePLAB.java test/hotspot/jtreg/gc/cms/GuardShrinkWarning.java test/hotspot/jtreg/gc/cms/TestBubbleUpRef.java test/hotspot/jtreg/gc/cms/TestCMSScavengeBeforeRemark.java test/hotspot/jtreg/gc/cms/TestCriticalPriority.java test/hotspot/jtreg/gc/cms/TestMBeanCMS.java test/hotspot/jtreg/gc/concurrent_phase_control/TestConcurrentPhaseControlCMS.java test/hotspot/jtreg/gc/metaspace/TestMetaspaceCMSCancel.java test/hotspot/jtreg/gc/startup_warnings/TestCMS.java test/hotspot/jtreg/gc/stress/gcbasher/TestGCBasherWithCMS.java test/hotspot/jtreg/gc/stress/gclocker/TestGCLockerWithCMS.java test/hotspot/jtreg/gc/stress/gcold/TestGCOldWithCMS.java test/hotspot/jtreg/gc/stress/jfr/TestStressAllocationGCEventsWithCMS.java test/hotspot/jtreg/gc/stress/jfr/TestStressAllocationGCEventsWithParNew.java test/hotspot/jtreg/gc/stress/jfr/TestStressBigAllocationGCEventsWithCMS.java test/hotspot/jtreg/gc/stress/jfr/TestStressBigAllocationGCEventsWithParNew.java test/hotspot/jtreg/gc/stress/systemgc/TestSystemGCWithCMS.java test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorGCCMSTest.java test/jdk/ProblemList.txt test/jdk/java/lang/management/MemoryMXBean/MemoryManagementConcMarkSweepGC.sh test/jdk/java/util/Arrays/ParallelSorting.java test/jdk/jdk/jfr/event/gc/collection/TestGCCauseWithCMSConcurrent.java test/jdk/jdk/jfr/event/gc/collection/TestGCCauseWithCMSMarkSweep.java test/jdk/jdk/jfr/event/gc/collection/TestGCEventMixedWithCMSConcurrent.java test/jdk/jdk/jfr/event/gc/collection/TestGCEventMixedWithCMSMarkSweep.java test/jdk/jdk/jfr/event/gc/collection/TestGCEventMixedWithParNew.java test/jdk/jdk/jfr/event/gc/collection/TestYoungGarbageCollectionEventWithParNew.java test/jdk/jdk/jfr/event/gc/configuration/TestGCHeapConfigurationEventWith32BitOops.sh test/jdk/jdk/jfr/event/gc/configuration/TestGCHeapConfigurationEventWithHeapBasedOops.sh test/jdk/jdk/jfr/event/gc/configuration/TestGCHeapConfigurationEventWithZeroBasedOops.sh test/jdk/jdk/jfr/event/gc/detailed/TestCMSConcurrentModeFailureEvent.java test/jdk/jdk/jfr/event/gc/detailed/TestPromotionFailedEventWithParNew.java test/jdk/jdk/jfr/event/gc/heapsummary/TestHeapSummaryEventConcurrentCMS.java test/jdk/jdk/jfr/event/gc/heapsummary/TestHeapSummaryEventParNewCMS.java test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountAfterGCEventWithCMSConcurrent.java test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountAfterGCEventWithCMSMarkSweep.java test/jdk/jdk/jfr/event/gc/refstat/TestRefStatEventWithCMSConcurrent.java test/jdk/jdk/jfr/event/gc/refstat/TestRefStatEventWithCMSMarkSweep.java test/jdk/jdk/jfr/event/gc/stacktrace/TestConcMarkSweepAllocationPendingStackTrace.java test/jdk/jdk/jfr/event/gc/stacktrace/TestMetaspaceConcMarkSweepGCAllocationPendingStackTrace.java test/jdk/jdk/jfr/event/gc/stacktrace/TestParNewAllocationPendingStackTrace.java test/jdk/jdk/jfr/event/io/MakeJAR.sh test/jdk/jdk/jfr/event/oldobject/TestCMS.java test/jdk/jdk/jfr/event/os/TestInitialEnvironmentVariable.sh test/jdk/jdk/jfr/event/runtime/TestVMInfoEvent.flags test/jdk/jdk/jfr/event/runtime/TestVMInfoEvent.sh test/jdk/jdk/jfr/javaagent/JavaAgentBuilder.java test/jdk/jdk/modules/etc/src/TestJson.java test/jdk/jdk/modules/etc/src/TestRootModules.java test/jdk/sun/security/tools/keytool/DeprecateKeyalg.java test/langtools/tools/javac/diags/examples/RestrictedTypeNotAllowedPreview.java
diffstat 1097 files changed, 21809 insertions(+), 43146 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Wed Nov 20 09:28:31 2019 +0900
+++ b/.hgtags	Wed Nov 20 10:52:28 2019 +0530
@@ -594,3 +594,5 @@
 9b67dd88a9313e982ec5f710a7747161bc8f0c23 jdk-14+19
 54ffb15c48399dd59922ee22bb592d815307e77c jdk-14+20
 c16ac7a2eba4e73cb4f7ee9294dd647860eebff0 jdk-14+21
+83810b7d12e7ff761ad3dd91f323a22dad96f108 jdk-14+22
+15936b142f86731afa4b1a2c0fe4a01e806c4944 jdk-14+23
--- a/doc/testing.html	Wed Nov 20 09:28:31 2019 +0900
+++ b/doc/testing.html	Wed Nov 20 10:52:28 2019 +0530
@@ -43,6 +43,7 @@
 <li><a href="#docker-tests">Docker Tests</a></li>
 <li><a href="#non-us-locale">Non-US locale</a></li>
 <li><a href="#pkcs11-tests">PKCS11 Tests</a></li>
+<li><a href="#client-ui-tests">Client UI Tests</a></li>
 </ul></li>
 </ul>
 </nav>
@@ -207,5 +208,15 @@
 <p>It is highly recommended to use the latest NSS version when running PKCS11 tests. Improper NSS version may lead to unexpected failures which are hard to diagnose. For example, sun/security/pkcs11/Secmod/AddTrustedCert.java may fail on Ubuntu 18.04 with the default NSS version in the system. To run these tests correctly, the system property <code>test.nss.lib.paths</code> is required on Ubuntu 18.04 to specify the alternative NSS lib directories. For example:</p>
 <pre><code>$ make test TEST=&quot;jtreg:sun/security/pkcs11/Secmod/AddTrustedCert.java&quot; JTREG=&quot;JAVA_OPTIONS=-Dtest.nss.lib.paths=/path/to/your/latest/NSS-libs&quot;</code></pre>
 <p>For more notes about the PKCS11 tests, please refer to test/jdk/sun/security/pkcs11/README.</p>
+<h3 id="client-ui-tests">Client UI Tests</h3>
+<p>Some Client UI tests use key sequences which may be reserved by the operating system. Usually that causes the test failure. So it is highly recommended to disable system key shortcuts prior testing. The steps to access and disable system key shortcuts for various platforms are provided below.</p>
+<h4 id="macos">MacOS</h4>
+<p>Choose Apple menu; System Preferences, click Keyboard, then click Shortcuts; select or deselect desired shortcut.</p>
+<p>For example, test/jdk/javax/swing/TooltipManager/JMenuItemToolTipKeyBindingsTest/JMenuItemToolTipKeyBindingsTest.java fails on MacOS because it uses <code>CTRL + F1</code> key sequence to show or hide tooltip message but the key combination is reserved by the operating system. To run the test correctly the default global key shortcut should be disabled using the steps described above, and then deselect "Turn keyboard access on or off" option which is responsible for <code>CTRL + F1</code> combination.</p>
+<h4 id="linux">Linux</h4>
+<p>Open the Activities overview and start typing Settings; Choose Settings, click Devices, then click Keyboard; set or override desired shortcut.</p>
+<h4 id="windows">Windows</h4>
+<p>Type <code>gpedit</code> in the Search and then click Edit group policy; navigate to User Configuration -&gt; Administrative Templates -&gt; Windows Components -&gt; File Explorer; in the right-side pane look for "Turn off Windows key hotkeys" and double click on it; enable or disable hotkeys.</p>
+<p>Note: restart is required to make the settings take effect.</p>
 </body>
 </html>
--- a/doc/testing.md	Wed Nov 20 09:28:31 2019 +0900
+++ b/doc/testing.md	Wed Nov 20 10:52:28 2019 +0530
@@ -421,6 +421,35 @@
 
 For more notes about the PKCS11 tests, please refer to test/jdk/sun/security/pkcs11/README.
 
+### Client UI Tests
+
+Some Client UI tests use key sequences which may be reserved by the operating
+system. Usually that causes the test failure. So it is highly recommended to disable
+system key shortcuts prior testing. The steps to access and disable system key shortcuts
+for various platforms are provided below.
+
+#### MacOS
+Choose Apple menu; System Preferences, click Keyboard, then click Shortcuts;
+select or deselect desired shortcut.
+
+For example, test/jdk/javax/swing/TooltipManager/JMenuItemToolTipKeyBindingsTest/JMenuItemToolTipKeyBindingsTest.java fails
+on MacOS because it uses `CTRL + F1` key sequence to show or hide tooltip message
+but the key combination is reserved by the operating system. To run the test correctly
+the default global key shortcut should be disabled using the steps described above, and then deselect
+"Turn keyboard access on or off" option which is responsible for `CTRL + F1` combination.
+
+#### Linux
+Open the Activities overview and start typing Settings; Choose Settings, click Devices,
+then click Keyboard; set or override desired shortcut.
+
+#### Windows
+Type `gpedit` in the Search and then click Edit group policy; navigate to
+User Configuration -> Administrative Templates -> Windows Components -> File Explorer;
+in the right-side pane look for "Turn off Windows key hotkeys" and double click on it;
+enable or disable hotkeys.
+
+Note: restart is required to make the settings take effect.
+
 ---
 # Override some definitions in the global css file that are not optimal for
 # this document.
--- a/make/RunTests.gmk	Wed Nov 20 09:28:31 2019 +0900
+++ b/make/RunTests.gmk	Wed Nov 20 10:52:28 2019 +0530
@@ -247,11 +247,29 @@
       CORES_DIVIDER := 4
     endif
   endif
+  # For some big multi-core machines with low ulimit -u setting we hit the max
+  # threads/process limit. In such a setup the memory/cores-only-guided
+  # TEST_JOBS config is insufficient. From experience a concurrency setting of
+  # 14 works reasonably well for low ulimit values (<= 4096). Thus, use
+  # divider 4096/14. For high ulimit -u values this shouldn't make a difference.
+  ULIMIT_DIVIDER := (4096/14)
+  PROC_ULIMIT := -1
+  ifneq ($(OPENJDK_TARGET_OS), windows)
+    PROC_ULIMIT := $(shell $(ULIMIT) -u)
+    ifeq ($(PROC_ULIMIT), unlimited)
+      PROC_ULIMIT := -1
+    endif
+  endif
   MEMORY_DIVIDER := 2048
   TEST_JOBS := $(shell $(AWK) \
     'BEGIN { \
       c = $(NUM_CORES) / $(CORES_DIVIDER); \
       m = $(MEMORY_SIZE) / $(MEMORY_DIVIDER); \
+      u = $(PROC_ULIMIT); \
+      if (u > -1) { \
+        u = u / $(ULIMIT_DIVIDER); \
+        if (u < c) c = u; \
+      } \
       if (c > m) c = m; \
       c = c * $(TEST_JOBS_FACTOR); \
       c = c * $(TEST_JOBS_FACTOR_JDL); \
--- a/make/RunTestsPrebuiltSpec.gmk	Wed Nov 20 09:28:31 2019 +0900
+++ b/make/RunTestsPrebuiltSpec.gmk	Wed Nov 20 10:52:28 2019 +0530
@@ -175,6 +175,7 @@
 EXPR := expr
 FILE := file
 HG := hg
+ULIMIT := ulimit
 
 # On Solaris gnu versions of some tools are required.
 ifeq ($(OPENJDK_BUILD_OS), solaris)
--- a/make/autoconf/basics.m4	Wed Nov 20 09:28:31 2019 +0900
+++ b/make/autoconf/basics.m4	Wed Nov 20 10:52:28 2019 +0530
@@ -574,6 +574,26 @@
 ])
 
 ###############################################################################
+# Like BASIC_REQUIRE_PROGS but also allows for bash built-ins
+# $1: variable to set
+# $2: executable name (or list of names) to look for
+# $3: [path]
+AC_DEFUN([BASIC_REQUIRE_BUILTIN_PROGS],
+[
+  BASIC_SETUP_TOOL($1, [AC_PATH_PROGS($1, $2, , $3)])
+  if test "x[$]$1" = x; then
+    AC_MSG_NOTICE([Required tool $2 not found in PATH, checking built-in])
+    if help $2 > /dev/null 2>&1; then
+      AC_MSG_NOTICE([Found $2 as shell built-in. Using it])
+      $1="$2"
+    else
+      AC_MSG_ERROR([Required tool $2 also not found as built-in.])
+    fi
+  fi
+  BASIC_CHECK_NONEMPTY($1)
+])
+
+###############################################################################
 # Setup the most fundamental tools that relies on not much else to set up,
 # but is used by much of the early bootstrap code.
 AC_DEFUN_ONCE([BASIC_SETUP_FUNDAMENTAL_TOOLS],
@@ -1284,6 +1304,9 @@
   elif test "x$OPENJDK_TARGET_OS" = "xsolaris"; then
     BASIC_REQUIRE_PROGS(ELFEDIT, elfedit)
   fi
+  if ! test "x$OPENJDK_TARGET_OS" = "xwindows"; then
+    BASIC_REQUIRE_BUILTIN_PROGS(ULIMIT, ulimit)
+  fi
 ])
 
 ###############################################################################
--- a/make/autoconf/hotspot.m4	Wed Nov 20 09:28:31 2019 +0900
+++ b/make/autoconf/hotspot.m4	Wed Nov 20 10:52:28 2019 +0530
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -25,11 +25,11 @@
 
 # All valid JVM features, regardless of platform
 VALID_JVM_FEATURES="compiler1 compiler2 zero minimal dtrace jvmti jvmci \
-    graal vm-structs jni-check services management cmsgc epsilongc g1gc parallelgc serialgc shenandoahgc zgc nmt cds \
+    graal vm-structs jni-check services management epsilongc g1gc parallelgc serialgc shenandoahgc zgc nmt cds \
     static-build link-time-opt aot jfr"
 
 # Deprecated JVM features (these are ignored, but with a warning)
-DEPRECATED_JVM_FEATURES="trace"
+DEPRECATED_JVM_FEATURES="trace cmsgc"
 
 # All valid JVM variants
 VALID_JVM_VARIANTS="server client minimal core zero custom"
@@ -326,10 +326,6 @@
     AC_MSG_ERROR([Specified JVM feature 'jvmci' requires feature 'compiler2' or 'compiler1'])
   fi
 
-  if HOTSPOT_CHECK_JVM_FEATURE(cmsgc) && ! HOTSPOT_CHECK_JVM_FEATURE(serialgc); then
-    AC_MSG_ERROR([Specified JVM feature 'cmsgc' requires feature 'serialgc'])
-  fi
-
   # Enable JFR by default, except for Zero, linux-sparcv9 and on minimal.
   if ! HOTSPOT_CHECK_JVM_VARIANT(zero); then
     if test "x$OPENJDK_TARGET_OS" != xaix; then
@@ -351,7 +347,8 @@
   # Only enable ZGC on supported platforms
   AC_MSG_CHECKING([if zgc can be built])
   if (test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = "xx86_64") || \
-     (test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = "xaarch64"); then
+     (test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = "xaarch64") ||
+     (test "x$OPENJDK_TARGET_OS" = "xmacosx" && test "x$OPENJDK_TARGET_CPU" = "xx86_64"); then
     AC_MSG_RESULT([yes])
   else
     DISABLED_JVM_FEATURES="$DISABLED_JVM_FEATURES zgc"
@@ -490,7 +487,7 @@
   fi
 
   # All variants but minimal (and custom) get these features
-  NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES cmsgc g1gc parallelgc serialgc epsilongc shenandoahgc jni-check jvmti management nmt services vm-structs zgc"
+  NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES g1gc parallelgc serialgc epsilongc shenandoahgc jni-check jvmti management nmt services vm-structs zgc"
 
   # Disable CDS on AIX.
   if test "x$OPENJDK_TARGET_OS" = "xaix"; then
--- a/make/autoconf/spec.gmk.in	Wed Nov 20 09:28:31 2019 +0900
+++ b/make/autoconf/spec.gmk.in	Wed Nov 20 10:52:28 2019 +0530
@@ -767,6 +767,7 @@
 XCODEBUILD=@XCODEBUILD@
 DTRACE := @DTRACE@
 FIXPATH:=@FIXPATH@
+ULIMIT:=@ULIMIT@
 
 TAR_TYPE:=@TAR_TYPE@
 TAR_CREATE_EXTRA_PARAM:=@TAR_CREATE_EXTRA_PARAM@
--- a/make/autoconf/toolchain.m4	Wed Nov 20 09:28:31 2019 +0900
+++ b/make/autoconf/toolchain.m4	Wed Nov 20 10:52:28 2019 +0530
@@ -481,7 +481,7 @@
     COMPILER_VERSION_STRING=`$ECHO $COMPILER_VERSION_OUTPUT | \
         $SED -e 's/ *Copyright .*//'`
     COMPILER_VERSION_NUMBER=`$ECHO $COMPILER_VERSION_OUTPUT | \
-        $SED -e 's/^.* \(@<:@1-9@:>@\.@<:@0-9.@:>@*\)@<:@^0-9.@:>@.*$/\1/'`
+        $SED -e 's/^.* \(@<:@1-9@:>@<:@0-9@:>@*\.@<:@0-9.@:>@*\)@<:@^0-9.@:>@.*$/\1/'`
   elif test  "x$TOOLCHAIN_TYPE" = xclang; then
     # clang --version output typically looks like
     #    Apple LLVM version 5.0 (clang-500.2.79) (based on LLVM 3.3svn)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/data/cacerts/luxtrustglobalroot2ca	Wed Nov 20 10:52:28 2019 +0530
@@ -0,0 +1,40 @@
+Owner: CN=LuxTrust Global Root 2, O=LuxTrust S.A., C=LU
+Issuer: CN=LuxTrust Global Root 2, O=LuxTrust S.A., C=LU
+Serial number: a7ea6df4b449eda6a24859ee6b815d3167fbbb1
+Valid from: Thu Mar 05 13:21:57 GMT 2015 until: Mon Mar 05 13:21:57 GMT 2035
+Signature algorithm name: SHA256withRSA
+Subject Public Key Algorithm: 4096-bit RSA key
+Version: 3
+-----BEGIN CERTIFICATE-----
+MIIFwzCCA6ugAwIBAgIUCn6m30tEntpqJIWe5rgV0xZ/u7EwDQYJKoZIhvcNAQEL
+BQAwRjELMAkGA1UEBhMCTFUxFjAUBgNVBAoMDUx1eFRydXN0IFMuQS4xHzAdBgNV
+BAMMFkx1eFRydXN0IEdsb2JhbCBSb290IDIwHhcNMTUwMzA1MTMyMTU3WhcNMzUw
+MzA1MTMyMTU3WjBGMQswCQYDVQQGEwJMVTEWMBQGA1UECgwNTHV4VHJ1c3QgUy5B
+LjEfMB0GA1UEAwwWTHV4VHJ1c3QgR2xvYmFsIFJvb3QgMjCCAiIwDQYJKoZIhvcN
+AQEBBQADggIPADCCAgoCggIBANeFl78RmOnwYoNMPIf5U2o3C/IPPIfOb9wmKb3F
+ibrJgz337spbxm1Jc7TJRqMbNBM/wYlFV/TZsfs2ZUv7COJIcRHIbjuend+JZTem
+hfY7RBi2xjcwYkSSl2l9QjAk5A0MiWtj3sXh306pFGxT4GHO9hcvHTy95iJMHZP1
+EMShduxq3sVs35a0VkBCwGKSMKEtFZSg0iAGCW5qbeXrt77U8PEVfIvmTroTzEsn
+Xpk8F12PgX8zPU/TPxvsXD/wPEx1bvKm1Z3aLQdjAsZy6ZS8TEmVT4hSyNvoaYL4
+zDRbIvCGp4m9SAptZoFtyMhk+wHh9OHe2Z7d21vUKpkmFRseTJIpgp7VkoGSQXAZ
+96Tlk0u8d2cx3Rz9MXANF5kM+Qw5GSoXtTBxVdUPrljhPS80m8+f9niFwpN6cj5m
+j5wWEWCPnolvZ77gR1o7DJpni89Gxq44o/KnvObWhWszJHAiS8sIm7vI+AIpHb4g
+DEa/a4ebsypmQjVGbKq6rfmYe+lQVRQxv7HaLe2ArWgk+2mr2HETMOZns4dA/Yl+
+8kPREd8vZS9kzl8UubG/Mb2HeFpZZYiq/FkySIbWTLkpS5XTdvN3JW1CHDiDTf2j
+X5t/Lax5Gw5CMZdjpPuKadUiDTSQMC6otOBttpSsvItO13D8xTiOZCXhTTmQzsmH
+hFhxAgMBAAGjgagwgaUwDwYDVR0TAQH/BAUwAwEB/zBCBgNVHSAEOzA5MDcGByuB
+KwEBAQowLDAqBggrBgEFBQcCARYeaHR0cHM6Ly9yZXBvc2l0b3J5Lmx1eHRydXN0
+Lmx1MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBT/GCh2+UgFLKGu8SsbK7JT
++Et8szAdBgNVHQ4EFgQU/xgodvlIBSyhrvErGyuyU/hLfLMwDQYJKoZIhvcNAQEL
+BQADggIBAGoZFO1uecEsh9QNcH7X9njJCwROxLHOk3D+sFTAMs2ZMGQXvw/l4jP9
+BzZAcg4atmpZ1gDlaCDdLnINH2pkMSCEfUmmWjfrRcmF9dTHF5kH5ptV5AzoqbTO
+jFu1EVzPig4N1qx3gf4ynCSecs5U89BvolbW7MM3LGVYvlcAGvI1+ut7MV3CwRI9
+loGIlonBWVx65n9wNOeD4rHh4bhY79SV5GCc8JaXcozrhAIuZY+kt9J/Z93I055c
+qqmkoCUUBpvsT34tC38ddfEz2O3OuHVtPlu5mB0xDVbYQw8wkbIEa91WvpWAVWe+
+2M2D2RjuLg+GLZKecBPs3lHJQ3gCpU3I+V/EkVhGFndadKpAvAefMLmx9xIX3eP/
+JEAdemrRTxgKqpAd60Ae36EeRJIQmvKN4dFLRp7oRUKX6kWZ8+xm1QL68qZKJKre
+zrnK+T+Tb/mjuuqlPpmt/f97mfVl7vBZKGfXkJWkE4SphMHozs51k2MavDzq1WQf
+LSoSOcbDWjLtR5EWDrw4wVDej8oqkDQc7kGUnF4ZLvhFSZl0kbAEb+MEWrGrKqv+
+x9CWttrhSmQGbmBNvUJO/3jaJMobtNeWOWyu8Q6qp31IiyBMz2TWuJdGsE7RKlY6
+oJO9r4Ak4Ap+58rVyuiFVdw2KuGUaJPHZnJED4AhMmwlxyOAgwrr
+-----END CERTIFICATE-----
--- a/make/hotspot/lib/CompileJvm.gmk	Wed Nov 20 09:28:31 2019 +0900
+++ b/make/hotspot/lib/CompileJvm.gmk	Wed Nov 20 10:52:28 2019 +0530
@@ -57,7 +57,7 @@
 JVM_EXCLUDE_FILES += args.cc
 JVM_EXCLUDES += adlc
 
-# Needed by vm_version.cpp
+# Needed by abstract_vm_version.cpp
 ifeq ($(call isTargetCpu, x86_64), true)
   OPENJDK_TARGET_CPU_VM_VERSION := amd64
 else ifeq ($(call isTargetCpu, sparcv9), true)
@@ -183,7 +183,7 @@
     EXCLUDE_PATTERNS := $(JVM_EXCLUDE_PATTERNS), \
     EXTRA_OBJECT_FILES := $(DTRACE_EXTRA_OBJECT_FILES), \
     CFLAGS := $(JVM_CFLAGS), \
-    vm_version.cpp_CXXFLAGS := $(CFLAGS_VM_VERSION), \
+    abstract_vm_version.cpp_CXXFLAGS := $(CFLAGS_VM_VERSION), \
     arguments.cpp_CXXFLAGS := $(CFLAGS_VM_VERSION), \
     DISABLED_WARNINGS_gcc := $(DISABLED_WARNINGS_gcc), \
     DISABLED_WARNINGS_clang := $(DISABLED_WARNINGS_clang), \
@@ -206,11 +206,11 @@
     DEFINE_THIS_FILE := false, \
 ))
 
-# Always recompile vm_version.cpp if libjvm needs to be relinked. This ensures
+# Always recompile abstract_vm_version.cpp if libjvm needs to be relinked. This ensures
 # that the internal vm version is updated as it relies on __DATE__ and __TIME__
 # macros.
-VM_VERSION_OBJ := $(JVM_OUTPUTDIR)/objs/vm_version$(OBJ_SUFFIX)
-$(VM_VERSION_OBJ): $(filter-out $(VM_VERSION_OBJ) $(JVM_MAPFILE), \
+ABSTRACT_VM_VERSION_OBJ := $(JVM_OUTPUTDIR)/objs/abstract_vm_version$(OBJ_SUFFIX)
+$(ABSTRACT_VM_VERSION_OBJ): $(filter-out $(ABSTRACT_VM_VERSION_OBJ) $(JVM_MAPFILE), \
     $(BUILD_LIBJVM_TARGET_DEPS))
 
 ifneq ($(GENERATE_COMPILE_COMMANDS_ONLY), true)
--- a/make/hotspot/lib/JvmDtraceObjects.gmk	Wed Nov 20 09:28:31 2019 +0900
+++ b/make/hotspot/lib/JvmDtraceObjects.gmk	Wed Nov 20 10:52:28 2019 +0530
@@ -79,12 +79,6 @@
         vmThread.o \
     )
 
-    ifeq ($(call check-jvm-feature, cmsgc), true)
-      DTRACE_INSTRUMENTED_OBJS += $(addprefix $(JVM_OUTPUTDIR)/objs/, \
-          cmsVMOperations.o \
-      )
-    endif
-
     ifeq ($(call check-jvm-feature, parallelgc), true)
       DTRACE_INSTRUMENTED_OBJS += $(addprefix $(JVM_OUTPUTDIR)/objs/, \
           psVMOperations.o \
--- a/make/hotspot/lib/JvmFeatures.gmk	Wed Nov 20 09:28:31 2019 +0900
+++ b/make/hotspot/lib/JvmFeatures.gmk	Wed Nov 20 10:52:28 2019 +0530
@@ -138,11 +138,6 @@
       aotLoader.cpp compiledIC_aot.cpp
 endif
 
-ifneq ($(call check-jvm-feature, cmsgc), true)
-  JVM_CFLAGS_FEATURES += -DINCLUDE_CMSGC=0
-  JVM_EXCLUDE_PATTERNS += gc/cms
-endif
-
 ifneq ($(call check-jvm-feature, g1gc), true)
   JVM_CFLAGS_FEATURES += -DINCLUDE_G1GC=0
   JVM_EXCLUDE_PATTERNS += gc/g1
--- a/make/hotspot/src/native/dtrace/generateJvmOffsets.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/make/hotspot/src/native/dtrace/generateJvmOffsets.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -63,7 +63,6 @@
  */
 
 #pragma weak tty
-#pragma weak CMSExpAvgFactor
 
 #if defined(i386) || defined(__i386) || defined(__amd64)
 #pragma weak noreg
--- a/make/jdk/src/classes/build/tools/cldrconverter/Bundle.java	Wed Nov 20 09:28:31 2019 +0900
+++ b/make/jdk/src/classes/build/tools/cldrconverter/Bundle.java	Wed Nov 20 10:52:28 2019 +0530
@@ -391,26 +391,23 @@
     }
 
     private void handleMultipleInheritance(Map<String, Object> map, Map<String, Object> parents, String key) {
-        String formatKey = key + "/format";
-        Object format = map.get(formatKey);
+        String formatMapKey = key + "/format";
+        Object format = map.get(formatMapKey);
         if (format != null) {
-            map.remove(formatKey);
+            map.remove(formatMapKey);
             map.put(key, format);
-            if (fillInElements(parents, formatKey, format)) {
+            if (fillInElements(parents, formatMapKey, format)) {
                 map.remove(key);
             }
         }
-        String standaloneKey = key + "/stand-alone";
-        Object standalone = map.get(standaloneKey);
+        String standaloneMapKey = key + "/stand-alone";
+        Object standalone = map.get(standaloneMapKey);
         if (standalone != null) {
-            map.remove(standaloneKey);
-            String realKey = key;
-            if (format != null) {
-                realKey = "standalone." + key;
-            }
-            map.put(realKey, standalone);
-            if (fillInElements(parents, standaloneKey, standalone)) {
-                map.remove(realKey);
+            map.remove(standaloneMapKey);
+            String standaloneResourceKey = "standalone." + key;
+            map.put(standaloneResourceKey, standalone);
+            if (fillInElements(parents, standaloneMapKey, standalone)) {
+                map.remove(standaloneResourceKey);
             }
         }
     }
--- a/make/lib/Awt2dLibraries.gmk	Wed Nov 20 09:28:31 2019 +0900
+++ b/make/lib/Awt2dLibraries.gmk	Wed Nov 20 10:52:28 2019 +0530
@@ -383,7 +383,7 @@
         libawt/java2d, \
     HEADERS_FROM_SRC := $(LIBLCMS_HEADERS_FROM_SRC), \
     DISABLED_WARNINGS_gcc := format-nonliteral type-limits \
-        misleading-indentation undef unused-function, \
+        misleading-indentation undef unused-function stringop-truncation, \
     DISABLED_WARNINGS_clang := tautological-compare format-nonliteral undef, \
     DISABLED_WARNINGS_solstudio := E_STATEMENT_NOT_REACHED, \
     DISABLED_WARNINGS_microsoft := 4819, \
--- a/make/lib/Lib-jdk.hotspot.agent.gmk	Wed Nov 20 09:28:31 2019 +0900
+++ b/make/lib/Lib-jdk.hotspot.agent.gmk	Wed Nov 20 10:52:28 2019 +0530
@@ -55,7 +55,7 @@
 
 SA_TOOLCHAIN := $(TOOLCHAIN_DEFAULT)
 ifeq ($(call isTargetOs, linux), true)
-  SA_TOOLCHAIN := TOOLCHAIN_BUILD_LINK_CXX
+  SA_TOOLCHAIN := TOOLCHAIN_LINK_CXX
 endif
 
 ################################################################################
--- a/src/hotspot/cpu/aarch64/aarch64.ad	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/aarch64/aarch64.ad	Wed Nov 20 10:52:28 2019 +0530
@@ -1192,9 +1192,6 @@
   // predicate controlling translation of CompareAndSwapX
   bool needs_acquiring_load_exclusive(const Node *load);
 
-  // predicate controlling translation of StoreCM
-  bool unnecessary_storestore(const Node *storecm);
-
   // predicate controlling addressing modes
   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
 %}
@@ -1583,29 +1580,6 @@
   return true;
 }
 
-// predicate controlling translation of StoreCM
-//
-// returns true if a StoreStore must precede the card write otherwise
-// false
-
-bool unnecessary_storestore(const Node *storecm)
-{
-  assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
-
-  // we need to generate a dmb ishst between an object put and the
-  // associated card mark when we are using CMS without conditional
-  // card marking
-
-  if (UseConcMarkSweepGC && !UseCondCardMark) {
-    return false;
-  }
-
-  // a storestore is unnecesary in all other cases
-
-  return true;
-}
-
-
 #define __ _masm.
 
 // advance declarations for helper functions to convert register
@@ -7220,7 +7194,6 @@
 instruct storeimmCM0(immI0 zero, memory mem)
 %{
   match(Set mem (StoreCM mem zero));
-  predicate(unnecessary_storestore(n));
 
   ins_cost(INSN_COST);
   format %{ "storestore (elided)\n\t"
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1978,6 +1978,9 @@
       case T_ADDRESS:
         imm = opr2->as_constant_ptr()->as_jint();
         break;
+      case T_METADATA:
+        imm = (intptr_t)(opr2->as_constant_ptr()->as_metadata());
+        break;
       case T_OBJECT:
       case T_ARRAY:
         jobject2reg(opr2->as_constant_ptr()->as_jobject(), rscratch1);
--- a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -840,6 +840,7 @@
           __ sub(arr_size, arr_size, t1);  // body length
           __ add(t1, t1, obj);       // body start
           __ initialize_body(t1, arr_size, 0, t2);
+          __ membar(Assembler::StoreStore);
           __ verify_oop(obj);
 
           __ ret(lr);
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -22,6 +22,7 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/shenandoah/shenandoahBarrierSet.hpp"
 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
 #include "gc/shenandoah/shenandoahForwarding.hpp"
 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
@@ -253,32 +254,16 @@
     dst = rscratch1;
   }
 
-  RegSet to_save_r1 = RegSet::of(r1);
-  // If outgoing register is r1, we can clobber it
-  if (result_dst != r1) {
-    __ push(to_save_r1, sp);
-  }
+  // Save r0 and r1, unless it is an output register
+  RegSet to_save = RegSet::of(r0, r1) - result_dst;
+  __ push(to_save, sp);
   __ lea(r1, load_addr);
-
-  RegSet to_save_r0 = RegSet::of(r0);
-  if (dst != r0) {
-    __ push(to_save_r0, sp);
-    __ mov(r0, dst);
-  }
+  __ mov(r0, dst);
 
   __ far_call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahBarrierSetAssembler::shenandoah_lrb())));
 
-  if (result_dst != r0) {
-    __ mov(result_dst, r0);
-  }
-
-  if (dst != r0) {
-    __ pop(to_save_r0, sp);
-  }
-
-  if (result_dst != r1) {
-    __ pop(to_save_r1, sp);
-  }
+  __ mov(result_dst, r0);
+  __ pop(to_save, sp);
 
   __ bind(done);
   __ leave();
@@ -346,29 +331,42 @@
   }
 }
 
+//
+// Arguments:
+//
+// Inputs:
+//   src:        oop location to load from, might be clobbered
+//
+// Output:
+//   dst:        oop loaded from src location
+//
+// Kill:
+//   rscratch1 (scratch reg)
+//
+// Alias:
+//   dst: rscratch1 (might use rscratch1 as temporary output register to avoid clobbering src)
+//
 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
                                             Register dst, Address src, Register tmp1, Register tmp_thread) {
-  bool on_oop = is_reference_type(type);
-  bool not_in_heap = (decorators & IN_NATIVE) != 0;
-  bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
-  bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
-  bool on_reference = on_weak || on_phantom;
-  bool is_traversal_mode = ShenandoahHeap::heap()->is_traversal_mode();
-  bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0 || is_traversal_mode;
+  // 1: non-reference load, no additional barrier is needed
+  if (!is_reference_type(type)) {
+    BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
+    return;
+  }
 
-  Register result_dst = dst;
+  // 2: load a reference from src location and apply LRB if needed
+  if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
+    Register result_dst = dst;
 
-  if (on_oop) {
-    // We want to preserve src
+    // Preserve src location for LRB
     if (dst == src.base() || dst == src.index()) {
       dst = rscratch1;
     }
     assert_different_registers(dst, src.base(), src.index());
-  }
 
-  BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
-  if (on_oop) {
-    if (not_in_heap && !is_traversal_mode) {
+    BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
+
+    if (ShenandoahBarrierSet::use_load_reference_barrier_native(decorators, type)) {
       load_reference_barrier_native(masm, dst, src);
     } else {
       load_reference_barrier(masm, dst, src);
@@ -378,18 +376,21 @@
       __ mov(result_dst, dst);
       dst = result_dst;
     }
+  } else {
+    BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
+  }
 
-    if (ShenandoahKeepAliveBarrier && on_reference && keep_alive) {
-      __ enter();
-      satb_write_barrier_pre(masm /* masm */,
-                             noreg /* obj */,
-                             dst /* pre_val */,
-                             rthread /* thread */,
-                             tmp1 /* tmp */,
-                             true /* tosca_live */,
-                             true /* expand_call */);
-      __ leave();
-    }
+  // 3: apply keep-alive barrier if needed
+  if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
+    __ enter();
+    satb_write_barrier_pre(masm /* masm */,
+                           noreg /* obj */,
+                           dst /* pre_val */,
+                           rthread /* thread */,
+                           tmp1 /* tmp */,
+                           true /* tosca_live */,
+                           true /* expand_call */);
+    __ leave();
   }
 }
 
--- a/src/hotspot/cpu/aarch64/globals_aarch64.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/aarch64/globals_aarch64.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -64,9 +64,6 @@
 
 define_pd_global(bool, PreserveFramePointer, false);
 
-// GC Ergo Flags
-define_pd_global(uintx, CMSYoungGenPerWorker, 64*M);  // default max size of CMS young gen, per GC worker thread
-
 define_pd_global(uintx, TypeProfileLevel, 111);
 
 define_pd_global(bool, CompactStrings, true);
--- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -287,8 +287,6 @@
 
 //-------------------------------------------------------------------
 
-address NativeMovRegMem::instruction_address() const      { return addr_at(instruction_offset); }
-
 int NativeMovRegMem::offset() const  {
   address pc = instruction_address();
   unsigned insn = *(unsigned*)pc;
--- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -381,11 +381,11 @@
 
  public:
   // helper
-  int instruction_start() const;
+  int instruction_start() const { return instruction_offset; }
 
-  address instruction_address() const;
+  address instruction_address() const { return addr_at(instruction_offset); }
 
-  address next_instruction_address() const;
+  int num_bytes_to_end_of_patch() const { return instruction_offset + instruction_size; }
 
   int   offset() const;
 
--- a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -285,7 +285,6 @@
     }
     break;
   case Interpreter::java_lang_math_pow :
-    fpargs = 2;
     if (StubRoutines::dpow() == NULL) {
       fn = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
     } else {
--- a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -30,8 +30,8 @@
 #include "runtime/java.hpp"
 #include "runtime/os.hpp"
 #include "runtime/stubCodeGenerator.hpp"
+#include "runtime/vm_version.hpp"
 #include "utilities/macros.hpp"
-#include "vm_version_aarch64.hpp"
 
 #include OS_HEADER_INLINE(os)
 
--- a/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -26,8 +26,8 @@
 #ifndef CPU_AARCH64_VM_VERSION_AARCH64_HPP
 #define CPU_AARCH64_VM_VERSION_AARCH64_HPP
 
+#include "runtime/abstract_vm_version.hpp"
 #include "runtime/globals_extension.hpp"
-#include "runtime/vm_version.hpp"
 #include "utilities/sizes.hpp"
 
 class VM_Version : public Abstract_VM_Version {
--- a/src/hotspot/cpu/aarch64/vm_version_ext_aarch64.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/aarch64/vm_version_ext_aarch64.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -25,8 +25,8 @@
 #ifndef CPU_AARCH64_VM_VERSION_EXT_AARCH64_HPP
 #define CPU_AARCH64_VM_VERSION_EXT_AARCH64_HPP
 
+#include "runtime/vm_version.hpp"
 #include "utilities/macros.hpp"
-#include "vm_version_aarch64.hpp"
 
 class VM_Version_Ext : public VM_Version {
  private:
--- a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1817,6 +1817,11 @@
           assert(opr2->as_constant_ptr()->as_jobject() == NULL, "cannot handle otherwise");
           __ cmp(opr1->as_register(), 0);
           break;
+        case T_METADATA:
+          assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "Only equality tests");
+          assert(opr2->as_constant_ptr()->as_metadata() == NULL, "cannot handle otherwise");
+          __ cmp(opr1->as_register(), 0);
+          break;
         default:
           ShouldNotReachHere();
       }
--- a/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1310,9 +1310,16 @@
                                         CodeEmitInfo* info) {
   if (value->is_double_cpu()) {
     assert(address->index()->is_illegal(), "should have a constant displacement");
-    LIR_Opr tmp = new_pointer_register();
-    add_large_constant(address->base(), address->disp(), tmp);
-    __ volatile_store_mem_reg(value, new LIR_Address(tmp, (intx)0, address->type()), info);
+    LIR_Address* store_addr = NULL;
+    if (address->disp() != 0) {
+      LIR_Opr tmp = new_pointer_register();
+      add_large_constant(address->base(), address->disp(), tmp);
+      store_addr = new LIR_Address(tmp, (intx)0, address->type());
+    } else {
+      // address->disp() can be 0, if the address is referenced using the unsafe intrinsic
+      store_addr = address;
+    }
+    __ volatile_store_mem_reg(value, store_addr, info);
     return;
   }
   __ store(value, address, info, lir_patch_none);
@@ -1322,9 +1329,16 @@
                                        CodeEmitInfo* info) {
   if (result->is_double_cpu()) {
     assert(address->index()->is_illegal(), "should have a constant displacement");
-    LIR_Opr tmp = new_pointer_register();
-    add_large_constant(address->base(), address->disp(), tmp);
-    __ volatile_load_mem_reg(new LIR_Address(tmp, (intx)0, address->type()), result, info);
+    LIR_Address* load_addr = NULL;
+    if (address->disp() != 0) {
+      LIR_Opr tmp = new_pointer_register();
+      add_large_constant(address->base(), address->disp(), tmp);
+      load_addr = new LIR_Address(tmp, (intx)0, address->type());
+    } else {
+      // address->disp() can be 0, if the address is referenced using the unsafe intrinsic
+      load_addr = address;
+    }
+    __ volatile_load_mem_reg(load_addr, result, info);
     return;
   }
   __ load(address, result, info, lir_patch_none);
--- a/src/hotspot/cpu/arm/globals_arm.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/arm/globals_arm.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -63,9 +63,6 @@
 
 define_pd_global(bool,  PreserveFramePointer,     false);
 
-// GC Ergo Flags
-define_pd_global(size_t, CMSYoungGenPerWorker,    16*M);  // default max size of CMS young gen, per GC worker thread
-
 define_pd_global(uintx, TypeProfileLevel, 0);
 
 // No performance work done here yet.
--- a/src/hotspot/cpu/arm/nativeInst_arm_32.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/arm/nativeInst_arm_32.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -349,6 +349,11 @@
 // (field access patching is handled differently in that case)
 class NativeMovRegMem: public NativeInstruction {
  public:
+  enum arm_specific_constants {
+    instruction_size = 8
+  };
+
+  int num_bytes_to_end_of_patch() const { return instruction_size; }
 
   int offset() const;
   void set_offset(int x);
--- a/src/hotspot/cpu/arm/register_arm.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/arm/register_arm.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -26,7 +26,7 @@
 #define CPU_ARM_REGISTER_ARM_HPP
 
 #include "asm/register.hpp"
-#include "vm_version_arm.hpp"
+#include "runtime/vm_version.hpp"
 
 class VMRegImpl;
 typedef VMRegImpl* VMReg;
--- a/src/hotspot/cpu/arm/vm_version_arm.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/arm/vm_version_arm.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -25,8 +25,8 @@
 #ifndef CPU_ARM_VM_VERSION_ARM_HPP
 #define CPU_ARM_VM_VERSION_ARM_HPP
 
+#include "runtime/abstract_vm_version.hpp"
 #include "runtime/globals_extension.hpp"
-#include "runtime/vm_version.hpp"
 
 class VM_Version: public Abstract_VM_Version {
   friend class JVMCIVMStructs;
--- a/src/hotspot/cpu/arm/vm_version_arm_32.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/arm/vm_version_arm_32.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,7 @@
 #include "runtime/java.hpp"
 #include "runtime/os.inline.hpp"
 #include "runtime/stubCodeGenerator.hpp"
-#include "vm_version_arm.hpp"
+#include "runtime/vm_version.hpp"
 
 int  VM_Version::_stored_pc_adjustment = 4;
 int  VM_Version::_arm_arch             = 5;
--- a/src/hotspot/cpu/arm/vm_version_ext_arm.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/arm/vm_version_ext_arm.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -25,8 +25,8 @@
 #ifndef CPU_ARM_VM_VERSION_EXT_ARM_HPP
 #define CPU_ARM_VM_VERSION_EXT_ARM_HPP
 
+#include "runtime/vm_version.hpp"
 #include "utilities/macros.hpp"
-#include "vm_version_arm.hpp"
 
 class VM_Version_Ext : public VM_Version {
  private:
--- a/src/hotspot/cpu/arm/vtableStubs_arm.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/arm/vtableStubs_arm.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,7 @@
 #include "oops/compiledICHolder.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/klassVtable.hpp"
+#include "oops/klass.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "vmreg_arm.inline.hpp"
 #ifdef COMPILER2
--- a/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -322,7 +322,7 @@
 void PatchingStub::emit_code(LIR_Assembler* ce) {
   // copy original code here
   assert(NativeGeneralJump::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
-         "not enough room for call");
+         "not enough room for call, need %d", _bytes_to_copy);
   assert((_bytes_to_copy & 0x3) == 0, "must copy a multiple of four bytes");
 
   Label call_patch;
@@ -340,7 +340,7 @@
     __ load_const(_obj, addrlit, R0);
     DEBUG_ONLY( compare_with_patch_site(__ code_section()->start() + being_initialized_entry, _pc_start, _bytes_to_copy); )
   } else {
-    // Make a copy the code which is going to be patched.
+    // Make a copy of the code which is going to be patched.
     for (int i = 0; i < _bytes_to_copy; i++) {
       address ptr = (address)(_pc_start + i);
       int a_byte = (*ptr) & 0xFF;
--- a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -743,10 +743,11 @@
           if (UseCompressedOops && !wide) {
             // Encoding done in caller
             __ stw(from_reg->as_register(), offset, base);
+            __ verify_coop(from_reg->as_register(), FILE_AND_LINE);
           } else {
             __ std(from_reg->as_register(), offset, base);
+            __ verify_oop(from_reg->as_register(), FILE_AND_LINE);
           }
-          __ verify_oop(from_reg->as_register());
           break;
         }
       case T_FLOAT : __ stfs(from_reg->as_float_reg(), offset, base); break;
@@ -783,10 +784,11 @@
         if (UseCompressedOops && !wide) {
           // Encoding done in caller.
           __ stwx(from_reg->as_register(), base, disp);
+          __ verify_coop(from_reg->as_register(), FILE_AND_LINE); // kills R0
         } else {
           __ stdx(from_reg->as_register(), base, disp);
+          __ verify_oop(from_reg->as_register(), FILE_AND_LINE); // kills R0
         }
-        __ verify_oop(from_reg->as_register()); // kills R0
         break;
       }
     case T_FLOAT : __ stfsx(from_reg->as_float_reg(), base, disp); break;
@@ -831,7 +833,7 @@
           } else {
             __ ld(to_reg->as_register(), offset, base);
           }
-          __ verify_oop(to_reg->as_register());
+          __ verify_oop(to_reg->as_register(), FILE_AND_LINE);
           break;
         }
       case T_FLOAT:  __ lfs(to_reg->as_float_reg(), offset, base); break;
@@ -862,7 +864,7 @@
         } else {
           __ ldx(to_reg->as_register(), base, disp);
         }
-        __ verify_oop(to_reg->as_register());
+        __ verify_oop(to_reg->as_register(), FILE_AND_LINE);
         break;
       }
     case T_FLOAT:  __ lfsx(to_reg->as_float_reg() , base, disp); break;
@@ -1141,7 +1143,7 @@
   }
 
   if (addr->base()->type() == T_OBJECT) {
-    __ verify_oop(src);
+    __ verify_oop(src, FILE_AND_LINE);
   }
 
   PatchingStub* patch = NULL;
@@ -1238,7 +1240,7 @@
     ShouldNotReachHere();
   }
   if (is_reference_type(to_reg->type())) {
-    __ verify_oop(to_reg->as_register());
+    __ verify_oop(to_reg->as_register(), FILE_AND_LINE);
   }
 }
 
@@ -1265,7 +1267,7 @@
   }
 
   if (addr->base()->is_oop_register()) {
-    __ verify_oop(src);
+    __ verify_oop(src, FILE_AND_LINE);
   }
 
   PatchingStub* patch = NULL;
@@ -1467,6 +1469,19 @@
           }
           break;
 
+        case T_METADATA:
+          // We only need, for now, comparison with NULL for metadata.
+          {
+            assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
+            Metadata* p = opr2->as_constant_ptr()->as_metadata();
+            if (p == NULL) {
+              __ cmpdi(BOOL_RESULT, opr1->as_register(), 0);
+            } else {
+              ShouldNotReachHere();
+            }
+          }
+          break;
+
         default:
           ShouldNotReachHere();
           break;
@@ -2308,7 +2323,7 @@
                      *op->stub()->entry());
 
   __ bind(*op->stub()->continuation());
-  __ verify_oop(op->obj()->as_register());
+  __ verify_oop(op->obj()->as_register(), FILE_AND_LINE);
 }
 
 
@@ -2533,7 +2548,7 @@
     Register Rtmp1 = op->tmp3()->as_register();
     bool should_profile = op->should_profile();
 
-    __ verify_oop(value);
+    __ verify_oop(value, FILE_AND_LINE);
     CodeStub* stub = op->stub();
     // Check if it needs to be profiled.
     ciMethodData* md = NULL;
@@ -3086,7 +3101,7 @@
   assert(do_null || do_update, "why are we here?");
   assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
 
-  __ verify_oop(obj);
+  __ verify_oop(obj, FILE_AND_LINE);
 
   if (do_null) {
     if (!TypeEntries::was_null_seen(current_klass)) {
--- a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -44,7 +44,7 @@
   const Register temp_reg = R12_scratch2;
   Label Lmiss;
 
-  verify_oop(receiver);
+  verify_oop(receiver, FILE_AND_LINE);
   MacroAssembler::null_check(receiver, oopDesc::klass_offset_in_bytes(), &Lmiss);
   load_klass(temp_reg, receiver);
 
@@ -100,7 +100,7 @@
   // Load object header.
   ld(Rmark, oopDesc::mark_offset_in_bytes(), Roop);
 
-  verify_oop(Roop);
+  verify_oop(Roop, FILE_AND_LINE);
 
   // Save object being locked into the BasicObjectLock...
   std(Roop, BasicObjectLock::obj_offset_in_bytes(), Rbox);
@@ -157,7 +157,7 @@
   if (UseBiasedLocking) {
     // Load the object out of the BasicObjectLock.
     ld(Roop, BasicObjectLock::obj_offset_in_bytes(), Rbox);
-    verify_oop(Roop);
+    verify_oop(Roop, FILE_AND_LINE);
     biased_locking_exit(CCR0, Roop, R0, done);
   }
   // Test first it it is a fast recursive unlock.
@@ -167,7 +167,7 @@
   if (!UseBiasedLocking) {
     // Load object.
     ld(Roop, BasicObjectLock::obj_offset_in_bytes(), Rbox);
-    verify_oop(Roop);
+    verify_oop(Roop, FILE_AND_LINE);
   }
 
   // Check if it is still a light weight lock, this is is true if we see
@@ -316,7 +316,7 @@
 //         relocInfo::runtime_call_type);
   }
 
-  verify_oop(obj);
+  verify_oop(obj, FILE_AND_LINE);
 }
 
 
@@ -383,7 +383,7 @@
     //     relocInfo::runtime_call_type);
   }
 
-  verify_oop(obj);
+  verify_oop(obj, FILE_AND_LINE);
 }
 
 
@@ -399,8 +399,7 @@
   bne(CCR0, not_null);
   stop("non-null oop required");
   bind(not_null);
-  if (!VerifyOops) return;
-  verify_oop(r);
+  verify_oop(r, FILE_AND_LINE);
 }
 
 #endif // PRODUCT
--- a/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -335,12 +335,12 @@
   __ ld(value, 0, tmp1);      // Resolve (untagged) jobject.
 
   __ beq(CCR0, not_weak);     // Test for jweak tag.
-  __ verify_oop(value);
+  __ verify_oop(value, FILE_AND_LINE);
   g1_write_barrier_pre(masm, IN_NATIVE | ON_PHANTOM_OOP_REF,
                        noreg, noreg, value,
                        tmp1, tmp2, needs_frame);
   __ bind(not_weak);
-  __ verify_oop(value);
+  __ verify_oop(value, FILE_AND_LINE);
   __ bind(done);
 }
 
--- a/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -113,7 +113,7 @@
   __ clrrdi(tmp1, value, JNIHandles::weak_tag_size);
   __ ld(value, 0, tmp1);      // Resolve (untagged) jobject.
 
-  __ verify_oop(value);
+  __ verify_oop(value, FILE_AND_LINE);
   __ bind(done);
 }
 
--- a/src/hotspot/cpu/ppc/globalDefinitions_ppc.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/ppc/globalDefinitions_ppc.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -30,6 +30,10 @@
 #error "CC_INTERP is no longer supported. Removed in change 8145117."
 #endif
 
+#ifndef FILE_AND_LINE
+#define FILE_AND_LINE __FILE__ ":" XSTR(__LINE__)
+#endif
+
 // Size of PPC Instructions
 const int BytesPerInstWord = 4;
 
--- a/src/hotspot/cpu/ppc/globals_ppc.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/ppc/globals_ppc.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -67,9 +67,6 @@
 
 define_pd_global(bool, PreserveFramePointer,  false);
 
-// GC Ergo Flags
-define_pd_global(size_t, CMSYoungGenPerWorker, 16*M);  // Default max size of CMS young gen, per GC worker thread.
-
 define_pd_global(uintx, TypeProfileLevel, 111);
 
 define_pd_global(bool, CompactStrings, true);
--- a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -2313,7 +2313,7 @@
 }
 
 void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) {
-  if (state == atos) { MacroAssembler::verify_oop(reg); }
+  if (state == atos) { MacroAssembler::verify_oop(reg, FILE_AND_LINE); }
 }
 
 // Local helper function for the verify_oop_or_return_address macro.
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -3120,7 +3120,7 @@
   li(R0, 0);
   std(R0, in_bytes(JavaThread::vm_result_offset()), R16_thread);
 
-  verify_oop(oop_result);
+  verify_oop(oop_result, FILE_AND_LINE);
 }
 
 void MacroAssembler::get_vm_result_2(Register metadata_result) {
@@ -4917,6 +4917,13 @@
   }
 }
 
+void MacroAssembler::verify_coop(Register coop, const char* msg) {
+  if (!VerifyOops) { return; }
+  if (UseCompressedOops) { decode_heap_oop(coop); }
+  verify_oop(coop, msg);
+  if (UseCompressedOops) { encode_heap_oop(coop, coop); }
+}
+
 // READ: oop. KILL: R0. Volatile floats perhaps.
 void MacroAssembler::verify_oop(Register oop, const char* msg) {
   if (!VerifyOops) {
@@ -4926,6 +4933,9 @@
   address/* FunctionDescriptor** */fd = StubRoutines::verify_oop_subroutine_entry_address();
   const Register tmp = R11; // Will be preserved.
   const int nbytes_save = MacroAssembler::num_volatile_regs * 8;
+
+  BLOCK_COMMENT("verify_oop {");
+
   save_volatile_gprs(R1_SP, -nbytes_save); // except R0
 
   mr_if_needed(R4_ARG2, oop);
@@ -4942,6 +4952,8 @@
   pop_frame();
   restore_LR_CR(tmp);
   restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
+
+  BLOCK_COMMENT("} verify_oop");
 }
 
 void MacroAssembler::verify_oop_addr(RegisterOrConstant offs, Register base, const char* msg) {
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -914,6 +914,9 @@
   // Verify R16_thread contents.
   void verify_thread();
 
+  // Calls verify_oop. If UseCompressedOops is on, decodes the oop.
+  // Preserves reg.
+  void verify_coop(Register reg, const char*);
   // Emit code to verify that reg contains a valid oop if +VerifyOops is set.
   void verify_oop(Register reg, const char* s = "broken oop");
   void verify_oop_addr(RegisterOrConstant offs, Register base, const char* s = "contains broken oop");
--- a/src/hotspot/cpu/ppc/methodHandles_ppc.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/ppc/methodHandles_ppc.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -77,7 +77,7 @@
   Klass* klass = SystemDictionary::well_known_klass(klass_id);
   Label L_ok, L_bad;
   BLOCK_COMMENT("verify_klass {");
-  __ verify_oop(obj_reg);
+  __ verify_oop(obj_reg, FILE_AND_LINE);
   __ cmpdi(CCR0, obj_reg, 0);
   __ beq(CCR0, L_bad);
   __ load_klass(temp_reg, obj_reg);
@@ -172,16 +172,16 @@
   assert(method_temp == R19_method, "required register for loading method");
 
   // Load the invoker, as MH -> MH.form -> LF.vmentry
-  __ verify_oop(recv);
+  __ verify_oop(recv, FILE_AND_LINE);
   __ load_heap_oop(method_temp, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes()), recv,
                    temp2, noreg, false, IS_NOT_NULL);
-  __ verify_oop(method_temp);
+  __ verify_oop(method_temp, FILE_AND_LINE);
   __ load_heap_oop(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes()), method_temp,
                    temp2, noreg, false, IS_NOT_NULL);
-  __ verify_oop(method_temp);
+  __ verify_oop(method_temp, FILE_AND_LINE);
   __ load_heap_oop(method_temp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()), method_temp,
                    temp2, noreg, false, IS_NOT_NULL);
-  __ verify_oop(method_temp);
+  __ verify_oop(method_temp, FILE_AND_LINE);
   __ ld(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()), method_temp);
 
   if (VerifyMethodHandles && !for_compiler_entry) {
@@ -318,7 +318,7 @@
 
     Register temp1_recv_klass = temp1;
     if (iid != vmIntrinsics::_linkToStatic) {
-      __ verify_oop(receiver_reg);
+      __ verify_oop(receiver_reg, FILE_AND_LINE);
       if (iid == vmIntrinsics::_linkToSpecial) {
         // Don't actually load the klass; just null-check the receiver.
         __ null_check_throw(receiver_reg, -1, temp1,
--- a/src/hotspot/cpu/ppc/nativeInst_ppc.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/ppc/nativeInst_ppc.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -462,6 +462,8 @@
 
   address instruction_address() const { return addr_at(0); }
 
+  int num_bytes_to_end_of_patch() const { return instruction_size; }
+
   intptr_t offset() const {
 #ifdef VM_LITTLE_ENDIAN
     short *hi_ptr = (short*)(addr_at(0));
--- a/src/hotspot/cpu/ppc/ppc.ad	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/ppc/ppc.ad	Wed Nov 20 10:52:28 2019 +0530
@@ -6928,25 +6928,6 @@
   ins_pipe(pipe_class_memory);
 %}
 
-// Card-mark for CMS garbage collection.
-// This cardmark does an optimization so that it must not always
-// do a releasing store. For this, it needs the constant address of
-// CMSCollectorCardTableBarrierSetBSExt::_requires_release.
-// This constant address is split off here by expand so we can use
-// adlc / matcher functionality to load it from the constant section.
-instruct storeCM_CMS_ExEx(memory mem, immI_0 zero) %{
-  match(Set mem (StoreCM mem zero));
-  predicate(UseConcMarkSweepGC);
-
-  expand %{
-    immL baseImm %{ 0 /* TODO: PPC port (jlong)CMSCollectorCardTableBarrierSetBSExt::requires_release_address() */ %}
-    iRegLdst releaseFieldAddress;
-    flagsReg crx;
-    loadConL_Ex(releaseFieldAddress, baseImm);
-    storeCM_CMS(mem, releaseFieldAddress, crx);
-  %}
-%}
-
 instruct storeCM_G1(memory mem, immI_0 zero) %{
   match(Set mem (StoreCM mem zero));
   predicate(UseG1GC);
--- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1742,9 +1742,9 @@
         assert(r->is_valid(), "bad oop arg");
         if (r->is_stack()) {
           __ ld(temp_reg, reg2offset(r), R1_SP);
-          __ verify_oop(temp_reg);
+          __ verify_oop(temp_reg, FILE_AND_LINE);
         } else {
-          __ verify_oop(r->as_Register());
+          __ verify_oop(r->as_Register(), FILE_AND_LINE);
         }
       }
     }
@@ -2107,7 +2107,7 @@
 
   __ cmpdi(CCR0, R3_ARG1, 0);
   __ beq(CCR0, ic_miss);
-  __ verify_oop(R3_ARG1);
+  __ verify_oop(R3_ARG1, FILE_AND_LINE);
   __ load_klass(receiver_klass, R3_ARG1);
 
   __ cmpd(CCR0, receiver_klass, ic);
--- a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -440,7 +440,6 @@
     StubCodeMark mark(this, "StubRoutines", "forward_exception");
     address start = __ pc();
 
-#if !defined(PRODUCT)
     if (VerifyOops) {
       // Get pending exception oop.
       __ ld(R3_ARG1,
@@ -456,7 +455,6 @@
       }
       __ verify_oop(R3_ARG1, "StubRoutines::forward exception: not an oop");
     }
-#endif
 
     // Save LR/CR and copy exception pc (LR) into R4_ARG2.
     __ save_LR_CR(R4_ARG2);
@@ -702,9 +700,9 @@
 #if !defined(PRODUCT)
   // Wrapper which calls oopDesc::is_oop_or_null()
   // Only called by MacroAssembler::verify_oop
-  static void verify_oop_helper(const char* message, oop o) {
+  static void verify_oop_helper(const char* message, oopDesc* o) {
     if (!oopDesc::is_oop_or_null(o)) {
-      fatal("%s", message);
+      fatal("%s. oop: " PTR_FORMAT, message, p2i(o));
     }
     ++ StubRoutines::_verify_oop_count;
   }
@@ -725,7 +723,6 @@
     return start;
   }
 
-
   // -XX:+OptimizeFill : convert fill/copy loops into intrinsic
   //
   // The code is implemented(ported from sparc) as we believe it benefits JVM98, however
--- a/src/hotspot/cpu/ppc/vm_version_ext_ppc.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/ppc/vm_version_ext_ppc.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -25,8 +25,8 @@
 #ifndef CPU_PPC_VM_VERSION_EXT_PPC_HPP
 #define CPU_PPC_VM_VERSION_EXT_PPC_HPP
 
+#include "runtime/vm_version.hpp"
 #include "utilities/macros.hpp"
-#include "vm_version_ppc.hpp"
 
 #define CPU_INFO        "cpu_info"
 #define CPU_TYPE        "fpu_type"
--- a/src/hotspot/cpu/ppc/vm_version_ppc.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/ppc/vm_version_ppc.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -32,10 +32,10 @@
 #include "runtime/java.hpp"
 #include "runtime/os.hpp"
 #include "runtime/stubCodeGenerator.hpp"
+#include "runtime/vm_version.hpp"
 #include "utilities/align.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/globalDefinitions.hpp"
-#include "vm_version_ppc.hpp"
 
 #include <sys/sysinfo.h>
 #if defined(_AIX)
--- a/src/hotspot/cpu/ppc/vm_version_ppc.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/ppc/vm_version_ppc.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -26,8 +26,8 @@
 #ifndef CPU_PPC_VM_VERSION_PPC_HPP
 #define CPU_PPC_VM_VERSION_PPC_HPP
 
+#include "runtime/abstract_vm_version.hpp"
 #include "runtime/globals_extension.hpp"
-#include "runtime/vm_version.hpp"
 
 class VM_Version: public Abstract_VM_Version {
 protected:
--- a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1322,6 +1322,15 @@
         } else {
           __ z_cfi(reg1, c->as_jint());
         }
+      } else if (c->type() == T_METADATA) {
+        // We only need, for now, comparison with NULL for metadata.
+        assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
+        Metadata* m = c->as_metadata();
+        if (m == NULL) {
+          __ z_cghi(reg1, 0);
+        } else {
+          ShouldNotReachHere();
+        }
       } else if (is_reference_type(c->type())) {
         // In 64bit oops are single register.
         jobject o = c->as_jobject();
--- a/src/hotspot/cpu/s390/compiledIC_s390.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/s390/compiledIC_s390.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2019, SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -75,6 +75,7 @@
   return stub;
 #else
   ShouldNotReachHere();
+  return NULL;
 #endif
 }
 
--- a/src/hotspot/cpu/s390/disassembler_s390.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/s390/disassembler_s390.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -28,8 +28,6 @@
 #include "code/codeCache.hpp"
 #include "compiler/disassembler.hpp"
 #include "depChecker_s390.hpp"
-#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
-#include "gc/cms/parOopClosures.inline.hpp"
 #include "gc/shared/collectedHeap.hpp"
 #include "gc/shared/cardTableBarrierSet.hpp"
 #include "gc/shared/genOopClosures.inline.hpp"
--- a/src/hotspot/cpu/s390/globals_s390.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/s390/globals_s390.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -69,9 +69,6 @@
 
 define_pd_global(bool, PreserveFramePointer, false);
 
-// GC Ergo Flags
-define_pd_global(size_t, CMSYoungGenPerWorker, 16*M);  // Default max size of CMS young gen, per GC worker thread.
-
 define_pd_global(uintx, TypeProfileLevel, 111);
 
 define_pd_global(bool, CompactStrings, true);
--- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -37,9 +37,11 @@
 #include "oops/accessDecorators.hpp"
 #include "oops/compressedOops.inline.hpp"
 #include "oops/klass.inline.hpp"
+#ifdef COMPILER2
 #include "opto/compile.hpp"
 #include "opto/intrinsicnode.hpp"
 #include "opto/matcher.hpp"
+#endif
 #include "prims/methodHandles.hpp"
 #include "registerSaver_s390.hpp"
 #include "runtime/biasedLocking.hpp"
@@ -2925,7 +2927,7 @@
 }
 
 void MacroAssembler::nmethod_UEP(Label& ic_miss) {
-  Register ic_reg       = as_Register(Matcher::inline_cache_reg_encode());
+  Register ic_reg       = Z_inline_cache;
   int      klass_offset = oopDesc::klass_offset_in_bytes();
   if (!ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(klass_offset)) {
     if (VM_Version::has_CompareBranch()) {
@@ -4590,6 +4592,7 @@
   return block_end - block_start;
 }
 
+#ifdef COMPILER2
 //------------------------------------------------------
 //   Special String Intrinsics. Implementation
 //------------------------------------------------------
@@ -5837,7 +5840,7 @@
 
   return offset() - block_start;
 }
-
+#endif
 
 //-------------------------------------------------
 //   Constants (scalar and oop) in constant pool
@@ -6150,96 +6153,6 @@
   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
 }
 
-
-void MacroAssembler::generate_type_profiling(const Register Rdata,
-                                             const Register Rreceiver_klass,
-                                             const Register Rwanted_receiver_klass,
-                                             const Register Rmatching_row,
-                                             bool is_virtual_call) {
-  const int row_size = in_bytes(ReceiverTypeData::receiver_offset(1)) -
-                       in_bytes(ReceiverTypeData::receiver_offset(0));
-  const int num_rows = ReceiverTypeData::row_limit();
-  NearLabel found_free_row;
-  NearLabel do_increment;
-  NearLabel found_no_slot;
-
-  BLOCK_COMMENT("type profiling {");
-
-  // search for:
-  //    a) The type given in Rwanted_receiver_klass.
-  //    b) The *first* empty row.
-
-  // First search for a) only, just running over b) with no regard.
-  // This is possible because
-  //    wanted_receiver_class == receiver_class  &&  wanted_receiver_class == 0
-  // is never true (receiver_class can't be zero).
-  for (int row_num = 0; row_num < num_rows; row_num++) {
-    // Row_offset should be a well-behaved positive number. The generated code relies
-    // on that wrt constant code size. Add2reg can handle all row_offset values, but
-    // will have to vary generated code size.
-    int row_offset = in_bytes(ReceiverTypeData::receiver_offset(row_num));
-    assert(Displacement::is_shortDisp(row_offset), "Limitation of generated code");
-
-    // Is Rwanted_receiver_klass in this row?
-    if (VM_Version::has_CompareBranch()) {
-      z_lg(Rwanted_receiver_klass, row_offset, Z_R0, Rdata);
-      // Rmatching_row = Rdata + row_offset;
-      add2reg(Rmatching_row, row_offset, Rdata);
-      // if (*row_recv == (intptr_t) receiver_klass) goto fill_existing_slot;
-      compare64_and_branch(Rwanted_receiver_klass, Rreceiver_klass, Assembler::bcondEqual, do_increment);
-    } else {
-      add2reg(Rmatching_row, row_offset, Rdata);
-      z_cg(Rreceiver_klass, row_offset, Z_R0, Rdata);
-      z_bre(do_increment);
-    }
-  }
-
-  // Now that we did not find a match, let's search for b).
-
-  // We could save the first calculation of Rmatching_row if we woud search for a) in reverse order.
-  // We would then end up here with Rmatching_row containing the value for row_num == 0.
-  // We would not see much benefit, if any at all, because the CPU can schedule
-  // two instructions together with a branch anyway.
-  for (int row_num = 0; row_num < num_rows; row_num++) {
-    int row_offset = in_bytes(ReceiverTypeData::receiver_offset(row_num));
-
-    // Has this row a zero receiver_klass, i.e. is it empty?
-    if (VM_Version::has_CompareBranch()) {
-      z_lg(Rwanted_receiver_klass, row_offset, Z_R0, Rdata);
-      // Rmatching_row = Rdata + row_offset
-      add2reg(Rmatching_row, row_offset, Rdata);
-      // if (*row_recv == (intptr_t) 0) goto found_free_row
-      compare64_and_branch(Rwanted_receiver_klass, (intptr_t)0, Assembler::bcondEqual, found_free_row);
-    } else {
-      add2reg(Rmatching_row, row_offset, Rdata);
-      load_and_test_long(Rwanted_receiver_klass, Address(Rdata, row_offset));
-      z_bre(found_free_row);  // zero -> Found a free row.
-    }
-  }
-
-  // No match, no empty row found.
-  // Increment total counter to indicate polymorphic case.
-  if (is_virtual_call) {
-    add2mem_64(Address(Rdata, CounterData::count_offset()), 1, Rmatching_row);
-  }
-  z_bru(found_no_slot);
-
-  // Here we found an empty row, but we have not found Rwanted_receiver_klass.
-  // Rmatching_row holds the address to the first empty row.
-  bind(found_free_row);
-  // Store receiver_klass into empty slot.
-  z_stg(Rreceiver_klass, 0, Z_R0, Rmatching_row);
-
-  // Increment the counter of Rmatching_row.
-  bind(do_increment);
-  ByteSize counter_offset = ReceiverTypeData::receiver_count_offset(0) - ReceiverTypeData::receiver_offset(0);
-  add2mem_64(Address(Rmatching_row, counter_offset), 1, Rdata);
-
-  bind(found_no_slot);
-
-  BLOCK_COMMENT("} type profiling");
-}
-
 //---------------------------------------
 // Helpers for Intrinsic Emitters
 //---------------------------------------
--- a/src/hotspot/cpu/s390/macroAssembler_s390.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -850,6 +850,7 @@
   //   Kills:    tmp, Z_R0, Z_R1.
   //   Early clobber: result.
   //   Boolean precise controls accuracy of result value.
+#ifdef COMPILER2
   unsigned int string_compress(Register result, Register src, Register dst, Register cnt,
                                Register tmp,    bool precise);
 
@@ -885,6 +886,7 @@
 
   unsigned int string_indexof_char(Register result, Register haystack, Register haycnt,
                                    Register needle, jchar needleChar, Register odd_reg, Register even_reg, bool is_byte);
+#endif
 
   // Emit an oop const to the constant pool and set a relocation info
   // with address current_pc. Return the TOC offset of the constant.
@@ -918,13 +920,6 @@
   // Offset is +/- 2**32 -> use long.
   static long get_load_const_from_toc_offset(address a);
 
-
-  void generate_type_profiling(const Register Rdata,
-                               const Register Rreceiver_klass,
-                               const Register Rwanted_receiver_klass,
-                               const Register Rmatching_row,
-                               bool is_virtual_call);
-
   // Bit operations for single register operands.
   inline void lshift(Register r, int places, bool doubl = true);   // <<
   inline void rshift(Register r, int places, bool doubl = true);   // >>
--- a/src/hotspot/cpu/s390/nativeInst_s390.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/s390/nativeInst_s390.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -535,6 +535,12 @@
 inline NativeMovRegMem* nativeMovRegMem_at (address address);
 class NativeMovRegMem: public NativeInstruction {
  public:
+  enum z_specific_constants {
+    instruction_size = 12 // load_const used with access_field_id
+  };
+
+  int num_bytes_to_end_of_patch() const { return instruction_size; }
+
   intptr_t offset() const {
     return nativeMovConstReg_at(addr_at(0))->data();
   }
--- a/src/hotspot/cpu/s390/register_s390.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/s390/register_s390.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016, 2017 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2019 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
 #define CPU_S390_REGISTER_S390_HPP
 
 #include "asm/register.hpp"
-#include "vm_version_s390.hpp"
+#include "runtime/vm_version.hpp"
 
 class Address;
 class VMRegImpl;
--- a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -32,6 +32,7 @@
 #include "interpreter/interpreter.hpp"
 #include "interpreter/interp_masm.hpp"
 #include "memory/resourceArea.hpp"
+#include "nativeInst_s390.hpp"
 #include "oops/compiledICHolder.hpp"
 #include "oops/klass.inline.hpp"
 #include "registerSaver_s390.hpp"
@@ -1521,7 +1522,6 @@
                                                 VMRegPair *in_regs,
                                                 BasicType ret_type,
                                                 address critical_entry) {
-#ifdef COMPILER2
   int total_in_args = method->size_of_parameters();
   if (method->is_method_handle_intrinsic()) {
     vmIntrinsics::ID iid = method->intrinsic_id();
@@ -2401,10 +2401,6 @@
   }
 
   return nm;
-#else
-  ShouldNotReachHere();
-  return NULL;
-#endif // COMPILER2
 }
 
 static address gen_c2i_adapter(MacroAssembler  *masm,
@@ -2880,7 +2876,7 @@
   // to Deoptimization::fetch_unroll_info below.
   // The (int) cast is necessary, because -((unsigned int)14)
   // is an unsigned int.
-  __ add2reg(Z_R14, -(int)HandlerImpl::size_deopt_handler());
+  __ add2reg(Z_R14, -(int)NativeCall::max_instruction_size());
 
   const Register   exec_mode_reg = Z_tmp_1;
 
--- a/src/hotspot/cpu/s390/vm_version_ext_s390.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/s390/vm_version_ext_s390.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -25,8 +25,8 @@
 #ifndef CPU_S390_VM_VERSION_EXT_S390_HPP
 #define CPU_S390_VM_VERSION_EXT_S390_HPP
 
+#include "runtime/vm_version.hpp"
 #include "utilities/macros.hpp"
-#include "vm_version_s390.hpp"
 
 #define CPU_INFO        "cpu_info"
 #define CPU_TYPE        "fpu_type"
--- a/src/hotspot/cpu/s390/vm_version_s390.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/s390/vm_version_s390.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -31,7 +31,7 @@
 #include "memory/resourceArea.hpp"
 #include "runtime/java.hpp"
 #include "runtime/stubCodeGenerator.hpp"
-#include "vm_version_s390.hpp"
+#include "runtime/vm_version.hpp"
 
 # include <sys/sysinfo.h>
 
@@ -44,8 +44,8 @@
 unsigned int  VM_Version::_nfeatures                                = 0;
 unsigned int  VM_Version::_ncipher_features                         = 0;
 unsigned int  VM_Version::_nmsgdigest_features                      = 0;
-unsigned int  VM_Version::_Dcache_lineSize                          = 256;
-unsigned int  VM_Version::_Icache_lineSize                          = 256;
+unsigned int  VM_Version::_Dcache_lineSize                          = DEFAULT_CACHE_LINE_SIZE;
+unsigned int  VM_Version::_Icache_lineSize                          = DEFAULT_CACHE_LINE_SIZE;
 
 static const char* z_gen[]     = {"  ",   "G1",   "G2", "G3",    "G4",     "G5",      "G6",   "G7"   };
 static const char* z_machine[] = {"  ", "2064", "2084", "2094",  "2097",   "2817",    "  ",   "2964" };
@@ -61,7 +61,9 @@
 
   intx cache_line_size = Dcache_lineSize(0);
 
+#ifdef COMPILER2
   MaxVectorSize = 8;
+#endif
 
   if (has_PrefetchRaw()) {
     if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {  // not preset
@@ -217,6 +219,7 @@
     FLAG_SET_DEFAULT(UseSHA, false);
   }
 
+#ifdef COMPILER2
   if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
     FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, true);
   }
@@ -226,6 +229,7 @@
   if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
     FLAG_SET_DEFAULT(UseMontgomerySquareIntrinsic, true);
   }
+#endif
   if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
     FLAG_SET_DEFAULT(UsePopCountInstruction, true);
   }
--- a/src/hotspot/cpu/s390/vm_version_s390.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/s390/vm_version_s390.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016, 2018 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2019 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,8 +27,8 @@
 #define CPU_S390_VM_VERSION_S390_HPP
 
 
+#include "runtime/abstract_vm_version.hpp"
 #include "runtime/globals_extension.hpp"
-#include "runtime/vm_version.hpp"
 
 class VM_Version: public Abstract_VM_Version {
 
--- a/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1511,6 +1511,18 @@
           }
           break;
 
+        case T_METADATA:
+          // We only need, for now, comparison with NULL for metadata.
+          { assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
+            Metadata* m = opr2->as_constant_ptr()->as_metadata();
+            if (m == NULL) {
+              __ cmp(opr1->as_register(), 0);
+            } else {
+              ShouldNotReachHere();
+            }
+          }
+          break;
+
         default:
           ShouldNotReachHere();
           break;
--- a/src/hotspot/cpu/sparc/c1_MacroAssembler_sparc.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/sparc/c1_MacroAssembler_sparc.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -66,9 +66,9 @@
 
 
 void C1_MacroAssembler::verified_entry() {
-  if (C1Breakpoint) breakpoint_trap();
-  // build frame
-  verify_FPU(0, "method_entry");
+  if (C1Breakpoint) {
+    breakpoint_trap();
+  }
 }
 
 
--- a/src/hotspot/cpu/sparc/globals_sparc.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/sparc/globals_sparc.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -74,9 +74,6 @@
 
 define_pd_global(bool, PreserveFramePointer, false);
 
-// GC Ergo Flags
-define_pd_global(size_t, CMSYoungGenPerWorker, 16*M);  // default max size of CMS young gen, per GC worker thread
-
 define_pd_global(uintx, TypeProfileLevel, 111);
 
 define_pd_global(bool, CompactStrings, true);
--- a/src/hotspot/cpu/sparc/interp_masm_sparc.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/sparc/interp_masm_sparc.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -91,7 +91,6 @@
 // dispatch.
 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int bcp_incr) {
   assert_not_delayed();
-  verify_FPU(1, state);
   interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
   jmp( IdispatchAddress, 0 );
   if (bcp_incr != 0)  delayed()->inc(Lbcp, bcp_incr);
@@ -264,7 +263,6 @@
 // dispatch value in Lbyte_code and increment Lbcp
 
 void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* table, int bcp_incr, bool verify, bool generate_poll) {
-  verify_FPU(1, state);
   // %%%%% maybe implement +VerifyActivationFrameSize here
   //verify_thread(); //too slow; we will just verify on method entry & exit
   if (verify) interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
@@ -2545,11 +2543,6 @@
 }
 
 
-void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
-  if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth);
-}
-
-
 // Jump if ((*counter_addr += increment) & mask) satisfies the condition.
 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
                                                         int increment, Address mask_addr,
--- a/src/hotspot/cpu/sparc/interp_masm_sparc.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/sparc/interp_masm_sparc.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -321,7 +321,7 @@
   // Debugging
   void interp_verify_oop(Register reg, TosState state, const char * file, int line);    // only if +VerifyOops && state == atos
   void verify_oop_or_return_address(Register reg, Register rtmp); // for astore
-  void verify_FPU(int stack_depth, TosState state = ftos); // only if +VerifyFPU  && (state == ftos || state == dtos)
+  void verify_FPU(int stack_depth, TosState state = ftos) {}      // No-op.
 
   // support for JVMTI/Dtrace
   typedef enum { NotifyJVMTI, SkipNotifyJVMTI } NotifyMethodExitMode;
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1130,21 +1130,6 @@
   }
 }
 
-
-// pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
-void MacroAssembler::push_fTOS() {
-  // %%%%%% need to implement this
-}
-
-// pops double TOS element from CPU stack and pushes on FPU stack
-void MacroAssembler::pop_fTOS() {
-  // %%%%%% need to implement this
-}
-
-void MacroAssembler::empty_FPU_stack() {
-  // %%%%%% need to implement this
-}
-
 void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * file, int line) {
   // plausibility check for oops
   if (!VerifyOops) return;
@@ -2826,47 +2811,6 @@
    bind(done);
 }
 
-
-
-void MacroAssembler::print_CPU_state() {
-  // %%%%% need to implement this
-}
-
-void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
-  // %%%%% need to implement this
-}
-
-void MacroAssembler::push_IU_state() {
-  // %%%%% need to implement this
-}
-
-
-void MacroAssembler::pop_IU_state() {
-  // %%%%% need to implement this
-}
-
-
-void MacroAssembler::push_FPU_state() {
-  // %%%%% need to implement this
-}
-
-
-void MacroAssembler::pop_FPU_state() {
-  // %%%%% need to implement this
-}
-
-
-void MacroAssembler::push_CPU_state() {
-  // %%%%% need to implement this
-}
-
-
-void MacroAssembler::pop_CPU_state() {
-  // %%%%% need to implement this
-}
-
-
-
 void MacroAssembler::verify_tlab() {
 #ifdef ASSERT
   if (UseTLAB && VerifyOops) {
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -649,10 +649,6 @@
   inline void callr( Register s1, Register s2 );
   inline void callr( Register s1, int simm13a, RelocationHolder const& rspec = RelocationHolder() );
 
-  // Emits nothing on V8
-  inline void iprefetch( address d, relocInfo::relocType rt = relocInfo::none );
-  inline void iprefetch( Label& L);
-
   inline void tst( Register s );
 
   inline void ret(  bool trace = false );
@@ -1056,23 +1052,6 @@
   // check_and_forward_exception to handle exceptions when it is safe
   void check_and_forward_exception(Register scratch_reg);
 
-  // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
-  void push_fTOS();
-
-  // pops double TOS element from CPU stack and pushes on FPU stack
-  void pop_fTOS();
-
-  void empty_FPU_stack();
-
-  void push_IU_state();
-  void pop_IU_state();
-
-  void push_FPU_state();
-  void pop_FPU_state();
-
-  void push_CPU_state();
-  void pop_CPU_state();
-
   // Returns the byte size of the instructions generated by decode_klass_not_null().
   static int instr_size_for_decode_klass_not_null();
 
@@ -1092,15 +1071,11 @@
 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
 
-        // only if +VerifyOops
-  void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
-        // only if +VerifyFPU
   void stop(const char* msg);                          // prints msg, dumps registers and stops execution
   void warn(const char* msg);                          // prints msg, but don't stop
   void untested(const char* what = "");
   void unimplemented(const char* what = "");
   void should_not_reach_here()                   { stop("should not reach here"); }
-  void print_CPU_state();
 
   // oops in code
   AddressLiteral allocate_oop_address(jobject obj);                          // allocate_index
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.inline.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.inline.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -278,13 +278,6 @@
 inline void MacroAssembler::callr( Register s1, Register s2 ) { jmpl( s1, s2, O7 ); }
 inline void MacroAssembler::callr( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, O7, rspec); }
 
-// prefetch instruction
-inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) {
-  Assembler::bp( never, true, xcc, pt, d, rt );
-    Assembler::bp( never, true, xcc, pt, d, rt );
-}
-inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); }
-
 inline void MacroAssembler::tst( Register s ) { orcc( G0, s, G0 ); }
 
 inline void MacroAssembler::ret( bool trace ) {
--- a/src/hotspot/cpu/sparc/nativeInst_sparc.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/sparc/nativeInst_sparc.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -574,15 +574,6 @@
 //-------------------------------------------------------------------
 
 
-void NativeMovRegMem::copy_instruction_to(address new_instruction_address) {
-  Untested("copy_instruction_to");
-  int instruction_size = next_instruction_address() - instruction_address();
-  for (int i = 0; i < instruction_size; i += BytesPerInstWord) {
-    *(int*)(new_instruction_address + i) = *(int*)(address(this) + i);
-  }
-}
-
-
 void NativeMovRegMem::verify() {
   NativeInstruction::verify();
   // make sure code pattern is actually a "ld" or "st" of some sort.
--- a/src/hotspot/cpu/sparc/nativeInst_sparc.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/sparc/nativeInst_sparc.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -576,7 +576,8 @@
 // sethi and the add.  The nop is required to be in the delay slot of the call instruction
 // which overwrites the sethi during patching.
 class NativeMovConstRegPatching;
-inline NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address);class NativeMovConstRegPatching: public NativeInstruction {
+inline NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address);
+class NativeMovConstRegPatching: public NativeInstruction {
  public:
   enum Sparc_specific_constants {
     sethi_offset           = 0,
@@ -664,10 +665,13 @@
     return (is_op(i0, Assembler::ldst_op));
   }
 
-  address instruction_address() const           { return addr_at(0); }
-  address next_instruction_address() const      {
-    return addr_at(is_immediate() ? 4 : (7 * BytesPerInstWord));
+  address instruction_address() const { return addr_at(0); }
+
+  int num_bytes_to_end_of_patch() const {
+    return is_immediate()? BytesPerInstWord :
+                           NativeMovConstReg::instruction_size;
   }
+
   intptr_t   offset() const                             {
      return is_immediate()? inv_simm(long_at(0), offset_width) :
                             nativeMovConstReg_at(addr_at(0))->data();
@@ -684,8 +688,6 @@
       set_offset (offset() + radd_offset);
   }
 
-  void  copy_instruction_to(address new_instruction_address);
-
   void verify();
   void print ();
 
--- a/src/hotspot/cpu/sparc/vm_version_ext_sparc.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/sparc/vm_version_ext_sparc.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -25,8 +25,8 @@
 #ifndef CPU_SPARC_VM_VERSION_EXT_SPARC_HPP
 #define CPU_SPARC_VM_VERSION_EXT_SPARC_HPP
 
+#include "runtime/vm_version.hpp"
 #include "utilities/macros.hpp"
-#include "vm_version_sparc.hpp"
 
 #if defined(SOLARIS)
 #include <kstat.h>
--- a/src/hotspot/cpu/sparc/vm_version_sparc.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/sparc/vm_version_sparc.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,7 +32,7 @@
 #include "runtime/java.hpp"
 #include "runtime/os.hpp"
 #include "runtime/stubCodeGenerator.hpp"
-#include "vm_version_sparc.hpp"
+#include "runtime/vm_version.hpp"
 
 #include <sys/mman.h>
 
--- a/src/hotspot/cpu/sparc/vm_version_sparc.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/sparc/vm_version_sparc.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -25,8 +25,8 @@
 #ifndef CPU_SPARC_VM_VERSION_SPARC_HPP
 #define CPU_SPARC_VM_VERSION_SPARC_HPP
 
+#include "runtime/abstract_vm_version.hpp"
 #include "runtime/globals_extension.hpp"
-#include "runtime/vm_version.hpp"
 
 class VM_Version: public Abstract_VM_Version {
   friend class VMStructs;
--- a/src/hotspot/cpu/x86/assembler_x86.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/x86/assembler_x86.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -4227,7 +4227,7 @@
 void Assembler::vpshufb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
          vector_len == AVX_256bit? VM_Version::supports_avx2() :
-         0, "");
+         vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, "");
   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
   emit_int8(0x00);
@@ -7197,7 +7197,6 @@
   emit_int8(0x7C);
   emit_int8((unsigned char)(0xC0 | encode));
 }
-
 void Assembler::evpgatherdd(XMMRegister dst, KRegister mask, Address src, int vector_len) {
   assert(VM_Version::supports_evex(), "");
   assert(dst != xnoreg, "sanity");
@@ -7212,7 +7211,6 @@
   emit_int8((unsigned char)0x90);
   emit_operand(dst, src);
 }
-
 // Carry-Less Multiplication Quadword
 void Assembler::pclmulqdq(XMMRegister dst, XMMRegister src, int mask) {
   assert(VM_Version::supports_clmul(), "");
@@ -9165,6 +9163,26 @@
   emit_int8((unsigned char)(0xD0 | encode));
 }
 
+void Assembler::btsq(Address dst, int imm8) {
+  assert(isByte(imm8), "not a byte");
+  InstructionMark im(this);
+  prefixq(dst);
+  emit_int8((unsigned char)0x0F);
+  emit_int8((unsigned char)0xBA);
+  emit_operand(rbp /* 5 */, dst, 1);
+  emit_int8(imm8);
+}
+
+void Assembler::btrq(Address dst, int imm8) {
+  assert(isByte(imm8), "not a byte");
+  InstructionMark im(this);
+  prefixq(dst);
+  emit_int8((unsigned char)0x0F);
+  emit_int8((unsigned char)0xBA);
+  emit_operand(rsi /* 6 */, dst, 1);
+  emit_int8(imm8);
+}
+
 void Assembler::orq(Address dst, int32_t imm32) {
   InstructionMark im(this);
   prefixq(dst);
--- a/src/hotspot/cpu/x86/assembler_x86.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/x86/assembler_x86.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -26,7 +26,7 @@
 #define CPU_X86_ASSEMBLER_X86_HPP
 
 #include "asm/register.hpp"
-#include "vm_version_x86.hpp"
+#include "runtime/vm_version.hpp"
 
 class BiasedLockingCounters;
 
@@ -1592,6 +1592,9 @@
 
 #ifdef _LP64
   void notq(Register dst);
+
+  void btsq(Address dst, int imm8);
+  void btrq(Address dst, int imm8);
 #endif
 
   void orl(Address dst, int32_t imm32);
--- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -2641,6 +2641,15 @@
       LIR_Const* c = opr2->as_constant_ptr();
       if (c->type() == T_INT) {
         __ cmpl(reg1, c->as_jint());
+      } else if (c->type() == T_METADATA) {
+        // All we need for now is a comparison with NULL for equality.
+        assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
+        Metadata* m = c->as_metadata();
+        if (m == NULL) {
+          __ cmpptr(reg1, (int32_t)0);
+        } else {
+          ShouldNotReachHere();
+        }
       } else if (is_reference_type(c->type())) {
         // In 64bit oops are single register
         jobject o = c->as_jobject();
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -22,6 +22,7 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/shenandoah/shenandoahBarrierSet.hpp"
 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
 #include "gc/shenandoah/shenandoahForwarding.hpp"
 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
@@ -445,21 +446,35 @@
   }
 }
 
+//
+// Arguments:
+//
+// Inputs:
+//   src:        oop location, might be clobbered
+//   tmp1:       scratch register, might not be valid.
+//
+// Output:
+//   dst:        oop loaded from src location
+//
+// Kill:
+//   tmp1 (if it is valid)
+//
 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
              Register dst, Address src, Register tmp1, Register tmp_thread) {
-  bool on_oop = is_reference_type(type);
-  bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
-  bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
-  bool not_in_heap = (decorators & IN_NATIVE) != 0;
-  bool on_reference = on_weak || on_phantom;
-  bool is_traversal_mode = ShenandoahHeap::heap()->is_traversal_mode();
-  bool keep_alive = ((decorators & AS_NO_KEEPALIVE) == 0) || is_traversal_mode;
+  // 1: non-reference load, no additional barrier is needed
+  if (!is_reference_type(type)) {
+    BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
+    return;
+  }
 
-  Register result_dst = dst;
-  bool use_tmp1_for_dst = false;
+  assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Not expected");
 
-  if (on_oop) {
-    // We want to preserve src
+  // 2: load a reference from src location and apply LRB if needed
+  if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
+    Register result_dst = dst;
+    bool use_tmp1_for_dst = false;
+
+    // Preserve src location for LRB
     if (dst == src.base() || dst == src.index()) {
       // Use tmp1 for dst if possible, as it is not used in BarrierAssembler::load_at()
       if (tmp1->is_valid() && tmp1 != src.base() && tmp1 != src.index()) {
@@ -469,19 +484,18 @@
         dst = rdi;
         __ push(dst);
       }
+      assert_different_registers(dst, src.base(), src.index());
     }
-    assert_different_registers(dst, src.base(), src.index());
-  }
 
-  BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
+    BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
 
-  if (on_oop) {
-    if (not_in_heap && !is_traversal_mode) {
+    if (ShenandoahBarrierSet::use_load_reference_barrier_native(decorators, type)) {
       load_reference_barrier_native(masm, dst, src);
     } else {
       load_reference_barrier(masm, dst, src);
     }
 
+    // Move loaded oop to final destination
     if (dst != result_dst) {
       __ movptr(result_dst, dst);
 
@@ -491,21 +505,24 @@
 
       dst = result_dst;
     }
+  } else {
+    BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
+  }
 
-    if (ShenandoahKeepAliveBarrier && on_reference && keep_alive) {
-      const Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread);
-      assert_different_registers(dst, tmp1, tmp_thread);
-      NOT_LP64(__ get_thread(thread));
-      // Generate the SATB pre-barrier code to log the value of
-      // the referent field in an SATB buffer.
-      shenandoah_write_barrier_pre(masm /* masm */,
-                                   noreg /* obj */,
-                                   dst /* pre_val */,
-                                   thread /* thread */,
-                                   tmp1 /* tmp */,
-                                   true /* tosca_live */,
-                                   true /* expand_call */);
-    }
+  // 3: apply keep-alive barrier if needed
+  if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
+    const Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread);
+    assert_different_registers(dst, tmp1, tmp_thread);
+    NOT_LP64(__ get_thread(thread));
+    // Generate the SATB pre-barrier code to log the value of
+    // the referent field in an SATB buffer.
+    shenandoah_write_barrier_pre(masm /* masm */,
+                                 noreg /* obj */,
+                                 dst /* pre_val */,
+                                 thread /* thread */,
+                                 tmp1 /* tmp */,
+                                 true /* tosca_live */,
+                                 true /* expand_call */);
   }
 }
 
--- a/src/hotspot/cpu/x86/globals_x86.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/x86/globals_x86.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -81,9 +81,6 @@
 define_pd_global(bool, RewriteBytecodes,     true);
 define_pd_global(bool, RewriteFrequentPairs, true);
 
-// GC Ergo Flags
-define_pd_global(size_t, CMSYoungGenPerWorker, 64*M);  // default max size of CMS young gen, per GC worker thread
-
 define_pd_global(uintx, TypeProfileLevel, 111);
 
 define_pd_global(bool, CompactStrings, true);
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -3770,6 +3770,16 @@
   }
 }
 
+void MacroAssembler::vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
+  assert(UseAVX > 0, "requires some form of AVX");
+  if (reachable(src)) {
+    Assembler::vpaddd(dst, nds, as_Address(src), vector_len);
+  } else {
+    lea(rscratch, src);
+    Assembler::vpaddd(dst, nds, Address(rscratch, 0), vector_len);
+  }
+}
+
 void MacroAssembler::vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len) {
   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
   vandps(dst, nds, negate_field, vector_len);
--- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -993,6 +993,8 @@
 public:
   void aesecb_encrypt(Register source_addr, Register dest_addr, Register key, Register len);
   void aesecb_decrypt(Register source_addr, Register dest_addr, Register key, Register len);
+  void aesctr_encrypt(Register src_addr, Register dest_addr, Register key, Register counter,
+                      Register len_reg, Register used, Register used_addr, Register saved_encCounter_start);
 
 #endif
 
@@ -1238,6 +1240,10 @@
   void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
   void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
 
+  void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
+  void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
+  void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch);
+
   void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
   void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
   void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
--- a/src/hotspot/cpu/x86/macroAssembler_x86_aes.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/x86/macroAssembler_x86_aes.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 2018, Intel Corporation.
+* Copyright (c) 2019, Intel Corporation.
 *
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
@@ -778,4 +778,493 @@
     vpxor(xmm3, xmm3, xmm3, Assembler::AVX_128bit);
     vpxor(xmm15, xmm15, xmm15, Assembler::AVX_128bit);
 }
+
+// AES Counter Mode using VAES instructions
+void MacroAssembler::aesctr_encrypt(Register src_addr, Register dest_addr, Register key, Register counter,
+    Register len_reg, Register used, Register used_addr, Register saved_encCounter_start) {
+
+    const Register rounds = 0;
+    const Register pos = r12;
+
+    Label PRELOOP_START, EXIT_PRELOOP, REMAINDER, REMAINDER_16, LOOP, END, EXIT, END_LOOP,
+    AES192, AES256, AES192_REMAINDER16, REMAINDER16_END_LOOP, AES256_REMAINDER16,
+    REMAINDER_8, REMAINDER_4, AES192_REMAINDER8, REMAINDER_LOOP, AES256_REMINDER,
+    AES192_REMAINDER, END_REMAINDER_LOOP, AES256_REMAINDER8, REMAINDER8_END_LOOP,
+    AES192_REMAINDER4, AES256_REMAINDER4, AES256_REMAINDER, END_REMAINDER4, EXTRACT_TAILBYTES,
+    EXTRACT_TAIL_4BYTES, EXTRACT_TAIL_2BYTES, EXTRACT_TAIL_1BYTE, STORE_CTR;
+
+    cmpl(len_reg, 0);
+    jcc(Assembler::belowEqual, EXIT);
+
+    movl(pos, 0);
+    // if the number of used encrypted counter bytes < 16,
+    // XOR PT with saved encrypted counter to obtain CT
+    bind(PRELOOP_START);
+    cmpl(used, 16);
+    jcc(Assembler::aboveEqual, EXIT_PRELOOP);
+    movb(rbx, Address(saved_encCounter_start, used));
+    xorb(rbx, Address(src_addr, pos));
+    movb(Address(dest_addr, pos), rbx);
+    addptr(pos, 1);
+    addptr(used, 1);
+    decrement(len_reg);
+    jmp(PRELOOP_START);
+
+    bind(EXIT_PRELOOP);
+    movl(Address(used_addr, 0), used);
+
+    // Calculate number of rounds i.e. 10, 12, 14,  based on key length(128, 192, 256).
+    movl(rounds, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
+
+    vpxor(xmm0, xmm0, xmm0, Assembler::AVX_128bit);
+    // Move initial counter value in xmm0
+    movdqu(xmm0, Address(counter, 0));
+    // broadcast counter value to zmm8
+    evshufi64x2(xmm8, xmm0, xmm0, 0, Assembler::AVX_512bit);
+
+    // load lbswap mask
+    evmovdquq(xmm16, ExternalAddress(StubRoutines::x86::counter_mask_addr()), Assembler::AVX_512bit, r15);
+
+    //shuffle counter using lbswap_mask
+    vpshufb(xmm8, xmm8, xmm16, Assembler::AVX_512bit);
+
+    // pre-increment and propagate counter values to zmm9-zmm15 registers.
+    // Linc0 increments the zmm8 by 1 (initial value being 0), Linc4 increments the counters zmm9-zmm15 by 4
+    // The counter is incremented after each block i.e. 16 bytes is processed;
+    // each zmm register has 4 counter values as its MSB
+    // the counters are incremented in parallel
+    vpaddd(xmm8, xmm8, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 64), Assembler::AVX_512bit, r15);//linc0
+    vpaddd(xmm9, xmm8, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 128), Assembler::AVX_512bit, r15);//linc4(rip)
+    vpaddd(xmm10, xmm9, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 128), Assembler::AVX_512bit, r15);//Linc4(rip)
+    vpaddd(xmm11, xmm10, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 128), Assembler::AVX_512bit, r15);//Linc4(rip)
+    vpaddd(xmm12, xmm11, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 128), Assembler::AVX_512bit, r15);//Linc4(rip)
+    vpaddd(xmm13, xmm12, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 128), Assembler::AVX_512bit, r15);//Linc4(rip)
+    vpaddd(xmm14, xmm13, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 128), Assembler::AVX_512bit, r15);//Linc4(rip)
+    vpaddd(xmm15, xmm14, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 128), Assembler::AVX_512bit, r15);//Linc4(rip)
+
+    // load linc32 mask in zmm register.linc32 increments counter by 32
+    evmovdquq(xmm19, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 256), Assembler::AVX_512bit, r15);//Linc32
+
+    // xmm31 contains the key shuffle mask.
+    movdqu(xmm31, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
+    // Load key function loads 128 bit key and shuffles it. Then we broadcast the shuffled key to convert it into a 512 bit value.
+    // For broadcasting the values to ZMM, vshufi64 is used instead of evbroadcasti64x2 as the source in this case is ZMM register
+    // that holds shuffled key value.
+    ev_load_key(xmm20, key, 0, xmm31);
+    ev_load_key(xmm21, key, 1 * 16, xmm31);
+    ev_load_key(xmm22, key, 2 * 16, xmm31);
+    ev_load_key(xmm23, key, 3 * 16, xmm31);
+    ev_load_key(xmm24, key, 4 * 16, xmm31);
+    ev_load_key(xmm25, key, 5 * 16, xmm31);
+    ev_load_key(xmm26, key, 6 * 16, xmm31);
+    ev_load_key(xmm27, key, 7 * 16, xmm31);
+    ev_load_key(xmm28, key, 8 * 16, xmm31);
+    ev_load_key(xmm29, key, 9 * 16, xmm31);
+    ev_load_key(xmm30, key, 10 * 16, xmm31);
+
+    // Process 32 blocks or 512 bytes of data
+    bind(LOOP);
+    cmpl(len_reg, 512);
+    jcc(Assembler::less, REMAINDER);
+    subq(len_reg, 512);
+    //Shuffle counter and Exor it with roundkey1. Result is stored in zmm0-7
+    vpshufb(xmm0, xmm8, xmm16, Assembler::AVX_512bit);
+    evpxorq(xmm0, xmm0, xmm20, Assembler::AVX_512bit);
+    vpshufb(xmm1, xmm9, xmm16, Assembler::AVX_512bit);
+    evpxorq(xmm1, xmm1, xmm20, Assembler::AVX_512bit);
+    vpshufb(xmm2, xmm10, xmm16, Assembler::AVX_512bit);
+    evpxorq(xmm2, xmm2, xmm20, Assembler::AVX_512bit);
+    vpshufb(xmm3, xmm11, xmm16, Assembler::AVX_512bit);
+    evpxorq(xmm3, xmm3, xmm20, Assembler::AVX_512bit);
+    vpshufb(xmm4, xmm12, xmm16, Assembler::AVX_512bit);
+    evpxorq(xmm4, xmm4, xmm20, Assembler::AVX_512bit);
+    vpshufb(xmm5, xmm13, xmm16, Assembler::AVX_512bit);
+    evpxorq(xmm5, xmm5, xmm20, Assembler::AVX_512bit);
+    vpshufb(xmm6, xmm14, xmm16, Assembler::AVX_512bit);
+    evpxorq(xmm6, xmm6, xmm20, Assembler::AVX_512bit);
+    vpshufb(xmm7, xmm15, xmm16, Assembler::AVX_512bit);
+    evpxorq(xmm7, xmm7, xmm20, Assembler::AVX_512bit);
+    // Perform AES encode operations and put results in zmm0-zmm7.
+    // This is followed by incrementing counter values in zmm8-zmm15.
+    // Since we will be processing 32 blocks at a time, the counter is incremented by 32.
+    roundEnc(xmm21, 7);
+    vpaddq(xmm8, xmm8, xmm19, Assembler::AVX_512bit);
+    roundEnc(xmm22, 7);
+    vpaddq(xmm9, xmm9, xmm19, Assembler::AVX_512bit);
+    roundEnc(xmm23, 7);
+    vpaddq(xmm10, xmm10, xmm19, Assembler::AVX_512bit);
+    roundEnc(xmm24, 7);
+    vpaddq(xmm11, xmm11, xmm19, Assembler::AVX_512bit);
+    roundEnc(xmm25, 7);
+    vpaddq(xmm12, xmm12, xmm19, Assembler::AVX_512bit);
+    roundEnc(xmm26, 7);
+    vpaddq(xmm13, xmm13, xmm19, Assembler::AVX_512bit);
+    roundEnc(xmm27, 7);
+    vpaddq(xmm14, xmm14, xmm19, Assembler::AVX_512bit);
+    roundEnc(xmm28, 7);
+    vpaddq(xmm15, xmm15, xmm19, Assembler::AVX_512bit);
+    roundEnc(xmm29, 7);
+
+    cmpl(rounds, 52);
+    jcc(Assembler::aboveEqual, AES192);
+    lastroundEnc(xmm30, 7);
+    jmp(END_LOOP);
+
+    bind(AES192);
+    roundEnc(xmm30, 7);
+    ev_load_key(xmm18, key, 11 * 16, xmm31);
+    roundEnc(xmm18, 7);
+    cmpl(rounds, 60);
+    jcc(Assembler::aboveEqual, AES256);
+    ev_load_key(xmm18, key, 12 * 16, xmm31);
+    lastroundEnc(xmm18, 7);
+    jmp(END_LOOP);
+
+    bind(AES256);
+    ev_load_key(xmm18, key, 12 * 16, xmm31);
+    roundEnc(xmm18, 7);
+    ev_load_key(xmm18, key, 13 * 16, xmm31);
+    roundEnc(xmm18, 7);
+    ev_load_key(xmm18, key, 14 * 16, xmm31);
+    lastroundEnc(xmm18, 7);
+
+    // After AES encode rounds, the encrypted block cipher lies in zmm0-zmm7
+    // xor encrypted block cipher and input plaintext and store resultant ciphertext
+    bind(END_LOOP);
+    evpxorq(xmm0, xmm0, Address(src_addr, pos, Address::times_1, 0 * 64), Assembler::AVX_512bit);
+    evmovdquq(Address(dest_addr, pos, Address::times_1, 0), xmm0, Assembler::AVX_512bit);
+    evpxorq(xmm1, xmm1, Address(src_addr, pos, Address::times_1, 1 * 64), Assembler::AVX_512bit);
+    evmovdquq(Address(dest_addr, pos, Address::times_1, 64), xmm1, Assembler::AVX_512bit);
+    evpxorq(xmm2, xmm2, Address(src_addr, pos, Address::times_1, 2 * 64), Assembler::AVX_512bit);
+    evmovdquq(Address(dest_addr, pos, Address::times_1, 2 * 64), xmm2, Assembler::AVX_512bit);
+    evpxorq(xmm3, xmm3, Address(src_addr, pos, Address::times_1, 3 * 64), Assembler::AVX_512bit);
+    evmovdquq(Address(dest_addr, pos, Address::times_1, 3 * 64), xmm3, Assembler::AVX_512bit);
+    evpxorq(xmm4, xmm4, Address(src_addr, pos, Address::times_1, 4 * 64), Assembler::AVX_512bit);
+    evmovdquq(Address(dest_addr, pos, Address::times_1, 4 * 64), xmm4, Assembler::AVX_512bit);
+    evpxorq(xmm5, xmm5, Address(src_addr, pos, Address::times_1, 5 * 64), Assembler::AVX_512bit);
+    evmovdquq(Address(dest_addr, pos, Address::times_1, 5 * 64), xmm5, Assembler::AVX_512bit);
+    evpxorq(xmm6, xmm6, Address(src_addr, pos, Address::times_1, 6 * 64), Assembler::AVX_512bit);
+    evmovdquq(Address(dest_addr, pos, Address::times_1, 6 * 64), xmm6, Assembler::AVX_512bit);
+    evpxorq(xmm7, xmm7, Address(src_addr, pos, Address::times_1, 7 * 64), Assembler::AVX_512bit);
+    evmovdquq(Address(dest_addr, pos, Address::times_1, 7 * 64), xmm7, Assembler::AVX_512bit);
+    addq(pos, 512);
+    jmp(LOOP);
+
+    // Encode 256, 128, 64 or 16 bytes at a time if length is less than 512 bytes
+    bind(REMAINDER);
+    cmpl(len_reg, 0);
+    jcc(Assembler::equal, END);
+    cmpl(len_reg, 256);
+    jcc(Assembler::aboveEqual, REMAINDER_16);
+    cmpl(len_reg, 128);
+    jcc(Assembler::aboveEqual, REMAINDER_8);
+    cmpl(len_reg, 64);
+    jcc(Assembler::aboveEqual, REMAINDER_4);
+    // At this point, we will process 16 bytes of data at a time.
+    // So load xmm19 with counter increment value as 1
+    evmovdquq(xmm19, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 80), Assembler::AVX_128bit, r15);
+    jmp(REMAINDER_LOOP);
+
+    // Each ZMM register can be used to encode 64 bytes of data, so we have 4 ZMM registers to encode 256 bytes of data
+    bind(REMAINDER_16);
+    subq(len_reg, 256);
+    // As we process 16 blocks at a time, load mask for incrementing the counter value by 16
+    evmovdquq(xmm19, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 320), Assembler::AVX_512bit, r15);//Linc16(rip)
+    // shuffle counter and XOR counter with roundkey1
+    vpshufb(xmm0, xmm8, xmm16, Assembler::AVX_512bit);
+    evpxorq(xmm0, xmm0, xmm20, Assembler::AVX_512bit);
+    vpshufb(xmm1, xmm9, xmm16, Assembler::AVX_512bit);
+    evpxorq(xmm1, xmm1, xmm20, Assembler::AVX_512bit);
+    vpshufb(xmm2, xmm10, xmm16, Assembler::AVX_512bit);
+    evpxorq(xmm2, xmm2, xmm20, Assembler::AVX_512bit);
+    vpshufb(xmm3, xmm11, xmm16, Assembler::AVX_512bit);
+    evpxorq(xmm3, xmm3, xmm20, Assembler::AVX_512bit);
+    // Increment counter values by 16
+    vpaddq(xmm8, xmm8, xmm19, Assembler::AVX_512bit);
+    vpaddq(xmm9, xmm9, xmm19, Assembler::AVX_512bit);
+    // AES encode rounds
+    roundEnc(xmm21, 3);
+    roundEnc(xmm22, 3);
+    roundEnc(xmm23, 3);
+    roundEnc(xmm24, 3);
+    roundEnc(xmm25, 3);
+    roundEnc(xmm26, 3);
+    roundEnc(xmm27, 3);
+    roundEnc(xmm28, 3);
+    roundEnc(xmm29, 3);
+
+    cmpl(rounds, 52);
+    jcc(Assembler::aboveEqual, AES192_REMAINDER16);
+    lastroundEnc(xmm30, 3);
+    jmp(REMAINDER16_END_LOOP);
+
+    bind(AES192_REMAINDER16);
+    roundEnc(xmm30, 3);
+    ev_load_key(xmm18, key, 11 * 16, xmm31);
+    roundEnc(xmm18, 3);
+    ev_load_key(xmm5, key, 12 * 16, xmm31);
+
+    cmpl(rounds, 60);
+    jcc(Assembler::aboveEqual, AES256_REMAINDER16);
+    lastroundEnc(xmm5, 3);
+    jmp(REMAINDER16_END_LOOP);
+    bind(AES256_REMAINDER16);
+    roundEnc(xmm5, 3);
+    ev_load_key(xmm6, key, 13 * 16, xmm31);
+    roundEnc(xmm6, 3);
+    ev_load_key(xmm7, key, 14 * 16, xmm31);
+    lastroundEnc(xmm7, 3);
+
+    // After AES encode rounds, the encrypted block cipher lies in zmm0-zmm3
+    // xor 256 bytes of PT with the encrypted counters to produce CT.
+    bind(REMAINDER16_END_LOOP);
+    evpxorq(xmm0, xmm0, Address(src_addr, pos, Address::times_1, 0), Assembler::AVX_512bit);
+    evmovdquq(Address(dest_addr, pos, Address::times_1, 0), xmm0, Assembler::AVX_512bit);
+    evpxorq(xmm1, xmm1, Address(src_addr, pos, Address::times_1, 1 * 64), Assembler::AVX_512bit);
+    evmovdquq(Address(dest_addr, pos, Address::times_1, 1 * 64), xmm1, Assembler::AVX_512bit);
+    evpxorq(xmm2, xmm2, Address(src_addr, pos, Address::times_1, 2 * 64), Assembler::AVX_512bit);
+    evmovdquq(Address(dest_addr, pos, Address::times_1, 2 * 64), xmm2, Assembler::AVX_512bit);
+    evpxorq(xmm3, xmm3, Address(src_addr, pos, Address::times_1, 3 * 64), Assembler::AVX_512bit);
+    evmovdquq(Address(dest_addr, pos, Address::times_1, 3 * 64), xmm3, Assembler::AVX_512bit);
+    addq(pos, 256);
+
+    cmpl(len_reg, 128);
+    jcc(Assembler::aboveEqual, REMAINDER_8);
+
+    cmpl(len_reg, 64);
+    jcc(Assembler::aboveEqual, REMAINDER_4);
+    //load mask for incrementing the counter value by 1
+    evmovdquq(xmm19, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 80), Assembler::AVX_128bit, r15);//Linc0 + 16(rip)
+    jmp(REMAINDER_LOOP);
+
+    // Each ZMM register can be used to encode 64 bytes of data, so we have 2 ZMM registers to encode 128 bytes of data
+    bind(REMAINDER_8);
+    subq(len_reg, 128);
+    // As we process 8 blocks at a time, load mask for incrementing the counter value by 8
+    evmovdquq(xmm19, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 192), Assembler::AVX_512bit, r15);//Linc8(rip)
+    // shuffle counters and xor with roundkey1
+    vpshufb(xmm0, xmm8, xmm16, Assembler::AVX_512bit);
+    evpxorq(xmm0, xmm0, xmm20, Assembler::AVX_512bit);
+    vpshufb(xmm1, xmm9, xmm16, Assembler::AVX_512bit);
+    evpxorq(xmm1, xmm1, xmm20, Assembler::AVX_512bit);
+    // increment counter by 8
+    vpaddq(xmm8, xmm8, xmm19, Assembler::AVX_512bit);
+    // AES encode
+    roundEnc(xmm21, 1);
+    roundEnc(xmm22, 1);
+    roundEnc(xmm23, 1);
+    roundEnc(xmm24, 1);
+    roundEnc(xmm25, 1);
+    roundEnc(xmm26, 1);
+    roundEnc(xmm27, 1);
+    roundEnc(xmm28, 1);
+    roundEnc(xmm29, 1);
+
+    cmpl(rounds, 52);
+    jcc(Assembler::aboveEqual, AES192_REMAINDER8);
+    lastroundEnc(xmm30, 1);
+    jmp(REMAINDER8_END_LOOP);
+
+    bind(AES192_REMAINDER8);
+    roundEnc(xmm30, 1);
+    ev_load_key(xmm18, key, 11 * 16, xmm31);
+    roundEnc(xmm18, 1);
+    ev_load_key(xmm5, key, 12 * 16, xmm31);
+    cmpl(rounds, 60);
+    jcc(Assembler::aboveEqual, AES256_REMAINDER8);
+    lastroundEnc(xmm5, 1);
+    jmp(REMAINDER8_END_LOOP);
+
+    bind(AES256_REMAINDER8);
+    roundEnc(xmm5, 1);
+    ev_load_key(xmm6, key, 13 * 16, xmm31);
+    roundEnc(xmm6, 1);
+    ev_load_key(xmm7, key, 14 * 16, xmm31);
+    lastroundEnc(xmm7, 1);
+
+    bind(REMAINDER8_END_LOOP);
+    // After AES encode rounds, the encrypted block cipher lies in zmm0-zmm1
+    // XOR PT with the encrypted counter and store as CT
+    evpxorq(xmm0, xmm0, Address(src_addr, pos, Address::times_1, 0 * 64), Assembler::AVX_512bit);
+    evmovdquq(Address(dest_addr, pos, Address::times_1, 0 * 64), xmm0, Assembler::AVX_512bit);
+    evpxorq(xmm1, xmm1, Address(src_addr, pos, Address::times_1, 1 * 64), Assembler::AVX_512bit);
+    evmovdquq(Address(dest_addr, pos, Address::times_1, 1 * 64), xmm1, Assembler::AVX_512bit);
+    addq(pos, 128);
+
+    cmpl(len_reg, 64);
+    jcc(Assembler::aboveEqual, REMAINDER_4);
+    // load mask for incrementing the counter value by 1
+    evmovdquq(xmm19, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 80), Assembler::AVX_128bit, r15);//Linc0 + 16(rip)
+    jmp(REMAINDER_LOOP);
+
+    // Each ZMM register can be used to encode 64 bytes of data, so we have 1 ZMM register used in this block of code
+    bind(REMAINDER_4);
+    subq(len_reg, 64);
+    // As we process 4 blocks at a time, load mask for incrementing the counter value by 4
+    evmovdquq(xmm19, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 128), Assembler::AVX_512bit, r15);//Linc4(rip)
+    // XOR counter with first roundkey
+    vpshufb(xmm0, xmm8, xmm16, Assembler::AVX_512bit);
+    evpxorq(xmm0, xmm0, xmm20, Assembler::AVX_512bit);
+    // Increment counter
+    vpaddq(xmm8, xmm8, xmm19, Assembler::AVX_512bit);
+    vaesenc(xmm0, xmm0, xmm21, Assembler::AVX_512bit);
+    vaesenc(xmm0, xmm0, xmm22, Assembler::AVX_512bit);
+    vaesenc(xmm0, xmm0, xmm23, Assembler::AVX_512bit);
+    vaesenc(xmm0, xmm0, xmm24, Assembler::AVX_512bit);
+    vaesenc(xmm0, xmm0, xmm25, Assembler::AVX_512bit);
+    vaesenc(xmm0, xmm0, xmm26, Assembler::AVX_512bit);
+    vaesenc(xmm0, xmm0, xmm27, Assembler::AVX_512bit);
+    vaesenc(xmm0, xmm0, xmm28, Assembler::AVX_512bit);
+    vaesenc(xmm0, xmm0, xmm29, Assembler::AVX_512bit);
+    cmpl(rounds, 52);
+    jcc(Assembler::aboveEqual, AES192_REMAINDER4);
+    vaesenclast(xmm0, xmm0, xmm30, Assembler::AVX_512bit);
+    jmp(END_REMAINDER4);
+
+    bind(AES192_REMAINDER4);
+    vaesenc(xmm0, xmm0, xmm30, Assembler::AVX_512bit);
+    ev_load_key(xmm18, key, 11 * 16, xmm31);
+    vaesenc(xmm0, xmm0, xmm18, Assembler::AVX_512bit);
+    ev_load_key(xmm5, key, 12 * 16, xmm31);
+
+    cmpl(rounds, 60);
+    jcc(Assembler::aboveEqual, AES256_REMAINDER4);
+    vaesenclast(xmm0, xmm0, xmm5, Assembler::AVX_512bit);
+    jmp(END_REMAINDER4);
+
+    bind(AES256_REMAINDER4);
+    vaesenc(xmm0, xmm0, xmm5, Assembler::AVX_512bit);
+    ev_load_key(xmm6, key, 13 * 16, xmm31);
+    vaesenc(xmm0, xmm0, xmm6, Assembler::AVX_512bit);
+    ev_load_key(xmm7, key, 14 * 16, xmm31);
+    vaesenclast(xmm0, xmm0, xmm7, Assembler::AVX_512bit);
+    // After AES encode rounds, the encrypted block cipher lies in zmm0.
+    // XOR encrypted block cipher with PT and store 64 bytes of ciphertext
+    bind(END_REMAINDER4);
+    evpxorq(xmm0, xmm0, Address(src_addr, pos, Address::times_1, 0 * 64), Assembler::AVX_512bit);
+    evmovdquq(Address(dest_addr, pos, Address::times_1, 0), xmm0, Assembler::AVX_512bit);
+    addq(pos, 64);
+    // load mask for incrementing the counter value by 1
+    evmovdquq(xmm19, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 80), Assembler::AVX_128bit, r15);//Linc0 + 16(rip)
+
+    // For a single block, the AES rounds start here.
+    bind(REMAINDER_LOOP);
+    cmpl(len_reg, 0);
+    jcc(Assembler::belowEqual, END);
+    // XOR counter with first roundkey
+    vpshufb(xmm0, xmm8, xmm16, Assembler::AVX_128bit);
+    evpxorq(xmm0, xmm0, xmm20, Assembler::AVX_128bit);
+    vaesenc(xmm0, xmm0, xmm21, Assembler::AVX_128bit);
+    // Increment counter by 1
+    vpaddq(xmm8, xmm8, xmm19, Assembler::AVX_128bit);
+    vaesenc(xmm0, xmm0, xmm22, Assembler::AVX_128bit);
+    vaesenc(xmm0, xmm0, xmm23, Assembler::AVX_128bit);
+    vaesenc(xmm0, xmm0, xmm24, Assembler::AVX_128bit);
+    vaesenc(xmm0, xmm0, xmm25, Assembler::AVX_128bit);
+    vaesenc(xmm0, xmm0, xmm26, Assembler::AVX_128bit);
+    vaesenc(xmm0, xmm0, xmm27, Assembler::AVX_128bit);
+    vaesenc(xmm0, xmm0, xmm28, Assembler::AVX_128bit);
+    vaesenc(xmm0, xmm0, xmm29, Assembler::AVX_128bit);
+
+    cmpl(rounds, 52);
+    jcc(Assembler::aboveEqual, AES192_REMAINDER);
+    vaesenclast(xmm0, xmm0, xmm30, Assembler::AVX_128bit);
+    jmp(END_REMAINDER_LOOP);
+
+    bind(AES192_REMAINDER);
+    vaesenc(xmm0, xmm0, xmm30, Assembler::AVX_128bit);
+    ev_load_key(xmm18, key, 11 * 16, xmm31);
+    vaesenc(xmm0, xmm0, xmm18, Assembler::AVX_128bit);
+    ev_load_key(xmm5, key, 12 * 16, xmm31);
+    cmpl(rounds, 60);
+    jcc(Assembler::aboveEqual, AES256_REMAINDER);
+    vaesenclast(xmm0, xmm0, xmm5, Assembler::AVX_128bit);
+    jmp(END_REMAINDER_LOOP);
+
+    bind(AES256_REMAINDER);
+    vaesenc(xmm0, xmm0, xmm5, Assembler::AVX_128bit);
+    ev_load_key(xmm6, key, 13 * 16, xmm31);
+    vaesenc(xmm0, xmm0, xmm6, Assembler::AVX_128bit);
+    ev_load_key(xmm7, key, 14 * 16, xmm31);
+    vaesenclast(xmm0, xmm0, xmm7, Assembler::AVX_128bit);
+
+    bind(END_REMAINDER_LOOP);
+    // If the length register is less than the blockSize i.e. 16
+    // then we store only those bytes of the CT to the destination
+    // corresponding to the length register value
+    // extracting the exact number of bytes is handled by EXTRACT_TAILBYTES
+    cmpl(len_reg, 16);
+    jcc(Assembler::less, EXTRACT_TAILBYTES);
+    subl(len_reg, 16);
+    // After AES encode rounds, the encrypted block cipher lies in xmm0.
+    // If the length register is equal to 16 bytes, store CT in dest after XOR operation.
+    evpxorq(xmm0, xmm0, Address(src_addr, pos, Address::times_1, 0), Assembler::AVX_128bit);
+    evmovdquq(Address(dest_addr, pos, Address::times_1, 0), xmm0, Assembler::AVX_128bit);
+    addl(pos, 16);
+
+    jmp(REMAINDER_LOOP);
+
+    bind(EXTRACT_TAILBYTES);
+    // Save encrypted counter value in xmm0 for next invocation, before XOR operation
+    movdqu(Address(saved_encCounter_start, 0), xmm0);
+    // XOR encryted block cipher in xmm0 with PT to produce CT
+    evpxorq(xmm0, xmm0, Address(src_addr, pos, Address::times_1, 0), Assembler::AVX_128bit);
+    // extract upto 15 bytes of CT from xmm0 as specified by length register
+    testptr(len_reg, 8);
+    jcc(Assembler::zero, EXTRACT_TAIL_4BYTES);
+    pextrq(Address(dest_addr, pos), xmm0, 0);
+    psrldq(xmm0, 8);
+    addl(pos, 8);
+    bind(EXTRACT_TAIL_4BYTES);
+    testptr(len_reg, 4);
+    jcc(Assembler::zero, EXTRACT_TAIL_2BYTES);
+    pextrd(Address(dest_addr, pos), xmm0, 0);
+    psrldq(xmm0, 4);
+    addq(pos, 4);
+    bind(EXTRACT_TAIL_2BYTES);
+    testptr(len_reg, 2);
+    jcc(Assembler::zero, EXTRACT_TAIL_1BYTE);
+    pextrw(Address(dest_addr, pos), xmm0, 0);
+    psrldq(xmm0, 2);
+    addl(pos, 2);
+    bind(EXTRACT_TAIL_1BYTE);
+    testptr(len_reg, 1);
+    jcc(Assembler::zero, END);
+    pextrb(Address(dest_addr, pos), xmm0, 0);
+    addl(pos, 1);
+
+    bind(END);
+    // If there are no tail bytes, store counter value and exit
+    cmpl(len_reg, 0);
+    jcc(Assembler::equal, STORE_CTR);
+    movl(Address(used_addr, 0), len_reg);
+
+    bind(STORE_CTR);
+    //shuffle updated counter and store it
+    vpshufb(xmm8, xmm8, xmm16, Assembler::AVX_128bit);
+    movdqu(Address(counter, 0), xmm8);
+    // Zero out counter and key registers
+    evpxorq(xmm8, xmm8, xmm8, Assembler::AVX_512bit);
+    evpxorq(xmm20, xmm20, xmm20, Assembler::AVX_512bit);
+    evpxorq(xmm21, xmm21, xmm21, Assembler::AVX_512bit);
+    evpxorq(xmm22, xmm22, xmm22, Assembler::AVX_512bit);
+    evpxorq(xmm23, xmm23, xmm23, Assembler::AVX_512bit);
+    evpxorq(xmm24, xmm24, xmm24, Assembler::AVX_512bit);
+    evpxorq(xmm25, xmm25, xmm25, Assembler::AVX_512bit);
+    evpxorq(xmm26, xmm26, xmm26, Assembler::AVX_512bit);
+    evpxorq(xmm27, xmm27, xmm27, Assembler::AVX_512bit);
+    evpxorq(xmm28, xmm28, xmm28, Assembler::AVX_512bit);
+    evpxorq(xmm29, xmm29, xmm29, Assembler::AVX_512bit);
+    evpxorq(xmm30, xmm30, xmm30, Assembler::AVX_512bit);
+    cmpl(rounds, 44);
+    jcc(Assembler::belowEqual, EXIT);
+    evpxorq(xmm18, xmm18, xmm18, Assembler::AVX_512bit);
+    evpxorq(xmm5, xmm5, xmm5, Assembler::AVX_512bit);
+    cmpl(rounds, 52);
+    jcc(Assembler::belowEqual, EXIT);
+    evpxorq(xmm6, xmm6, xmm6, Assembler::AVX_512bit);
+    evpxorq(xmm7, xmm7, xmm7, Assembler::AVX_512bit);
+    bind(EXIT);
+}
+
 #endif // _LP64
--- a/src/hotspot/cpu/x86/nativeInst_x86.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/x86/nativeInst_x86.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -355,60 +355,7 @@
   return off;
 }
 
-address NativeMovRegMem::instruction_address() const {
-  return addr_at(instruction_start());
-}
-
-address NativeMovRegMem::next_instruction_address() const {
-  address ret = instruction_address() + instruction_size;
-  u_char instr_0 =  *(u_char*) instruction_address();
-  switch (instr_0) {
-  case instruction_operandsize_prefix:
-
-    fatal("should have skipped instruction_operandsize_prefix");
-    break;
-
-  case instruction_extended_prefix:
-    fatal("should have skipped instruction_extended_prefix");
-    break;
-
-  case instruction_code_mem2reg_movslq: // 0x63
-  case instruction_code_mem2reg_movzxb: // 0xB6
-  case instruction_code_mem2reg_movsxb: // 0xBE
-  case instruction_code_mem2reg_movzxw: // 0xB7
-  case instruction_code_mem2reg_movsxw: // 0xBF
-  case instruction_code_reg2mem:        // 0x89 (q/l)
-  case instruction_code_mem2reg:        // 0x8B (q/l)
-  case instruction_code_reg2memb:       // 0x88
-  case instruction_code_mem2regb:       // 0x8a
-
-  case instruction_code_lea:            // 0x8d
-
-  case instruction_code_float_s:        // 0xd9 fld_s a
-  case instruction_code_float_d:        // 0xdd fld_d a
-
-  case instruction_code_xmm_load:       // 0x10
-  case instruction_code_xmm_store:      // 0x11
-  case instruction_code_xmm_lpd:        // 0x12
-    {
-      // If there is an SIB then instruction is longer than expected
-      u_char mod_rm = *(u_char*)(instruction_address() + 1);
-      if ((mod_rm & 7) == 0x4) {
-        ret++;
-      }
-    }
-  case instruction_code_xor:
-    fatal("should have skipped xor lead in");
-    break;
-
-  default:
-    fatal("not a NativeMovRegMem");
-  }
-  return ret;
-
-}
-
-int NativeMovRegMem::offset() const{
+int NativeMovRegMem::patch_offset() const {
   int off = data_offset + instruction_start();
   u_char mod_rm = *(u_char*)(instruction_address() + 1);
   // nnnn(r12|rsp) isn't coded as simple mod/rm since that is
@@ -417,19 +364,7 @@
   if ((mod_rm & 7) == 0x4) {
     off++;
   }
-  return int_at(off);
-}
-
-void NativeMovRegMem::set_offset(int x) {
-  int off = data_offset + instruction_start();
-  u_char mod_rm = *(u_char*)(instruction_address() + 1);
-  // nnnn(r12|rsp) isn't coded as simple mod/rm since that is
-  // the encoding to use an SIB byte. Which will have the nnnn
-  // field off by one byte
-  if ((mod_rm & 7) == 0x4) {
-    off++;
-  }
-  set_int_at(off, x);
+  return off;
 }
 
 void NativeMovRegMem::verify() {
--- a/src/hotspot/cpu/x86/nativeInst_x86.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/x86/nativeInst_x86.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -361,7 +361,6 @@
     instruction_VEX_prefix_3bytes       = Assembler::VEX_3bytes,
     instruction_EVEX_prefix_4bytes      = Assembler::EVEX_4bytes,
 
-    instruction_size                    = 4,
     instruction_offset                  = 0,
     data_offset                         = 2,
     next_instruction_offset             = 4
@@ -370,15 +369,26 @@
   // helper
   int instruction_start() const;
 
-  address instruction_address() const;
+  address instruction_address() const {
+    return addr_at(instruction_start());
+  }
 
-  address next_instruction_address() const;
+  int num_bytes_to_end_of_patch() const {
+    return patch_offset() + sizeof(jint);
+  }
 
-  int   offset() const;
+  int offset() const {
+    return int_at(patch_offset());
+  }
 
-  void  set_offset(int x);
+  void set_offset(int x) {
+    set_int_at(patch_offset(), x);
+  }
 
-  void  add_offset_in_bytes(int add_offset)     { set_offset ( ( offset() + add_offset ) ); }
+  void add_offset_in_bytes(int add_offset) {
+    int patch_off = patch_offset();
+    set_int_at(patch_off, int_at(patch_off) + add_offset);
+  }
 
   void verify();
   void print ();
@@ -387,6 +397,7 @@
   static void test() {}
 
  private:
+  int patch_offset() const;
   inline friend NativeMovRegMem* nativeMovRegMem_at (address address);
 };
 
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -38,6 +38,7 @@
 #include "runtime/safepointMechanism.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/vframeArray.hpp"
+#include "runtime/vm_version.hpp"
 #include "utilities/align.hpp"
 #include "vmreg_x86.inline.hpp"
 #ifdef COMPILER1
@@ -46,7 +47,6 @@
 #ifdef COMPILER2
 #include "opto/runtime.hpp"
 #endif
-#include "vm_version_x86.hpp"
 
 #define __ masm->
 
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -45,9 +45,9 @@
 #include "runtime/safepointMechanism.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/vframeArray.hpp"
+#include "runtime/vm_version.hpp"
 #include "utilities/align.hpp"
 #include "utilities/formatBuffer.hpp"
-#include "vm_version_x86.hpp"
 #include "vmreg_x86.inline.hpp"
 #ifdef COMPILER1
 #include "c1/c1_Runtime1.hpp"
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -3982,6 +3982,123 @@
     return start;
   }
 
+  // This mask is used for incrementing counter value(linc0, linc4, etc.)
+  address counter_mask_addr() {
+    __ align(64);
+    StubCodeMark mark(this, "StubRoutines", "counter_mask_addr");
+    address start = __ pc();
+    __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none);//lbswapmask
+    __ emit_data64(0x0001020304050607, relocInfo::none);
+    __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none);
+    __ emit_data64(0x0001020304050607, relocInfo::none);
+    __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none);
+    __ emit_data64(0x0001020304050607, relocInfo::none);
+    __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none);
+    __ emit_data64(0x0001020304050607, relocInfo::none);
+    __ emit_data64(0x0000000000000000, relocInfo::none);//linc0 = counter_mask_addr+64
+    __ emit_data64(0x0000000000000000, relocInfo::none);
+    __ emit_data64(0x0000000000000001, relocInfo::none);//counter_mask_addr() + 80
+    __ emit_data64(0x0000000000000000, relocInfo::none);
+    __ emit_data64(0x0000000000000002, relocInfo::none);
+    __ emit_data64(0x0000000000000000, relocInfo::none);
+    __ emit_data64(0x0000000000000003, relocInfo::none);
+    __ emit_data64(0x0000000000000000, relocInfo::none);
+    __ emit_data64(0x0000000000000004, relocInfo::none);//linc4 = counter_mask_addr() + 128
+    __ emit_data64(0x0000000000000000, relocInfo::none);
+    __ emit_data64(0x0000000000000004, relocInfo::none);
+    __ emit_data64(0x0000000000000000, relocInfo::none);
+    __ emit_data64(0x0000000000000004, relocInfo::none);
+    __ emit_data64(0x0000000000000000, relocInfo::none);
+    __ emit_data64(0x0000000000000004, relocInfo::none);
+    __ emit_data64(0x0000000000000000, relocInfo::none);
+    __ emit_data64(0x0000000000000008, relocInfo::none);//linc8 = counter_mask_addr() + 192
+    __ emit_data64(0x0000000000000000, relocInfo::none);
+    __ emit_data64(0x0000000000000008, relocInfo::none);
+    __ emit_data64(0x0000000000000000, relocInfo::none);
+    __ emit_data64(0x0000000000000008, relocInfo::none);
+    __ emit_data64(0x0000000000000000, relocInfo::none);
+    __ emit_data64(0x0000000000000008, relocInfo::none);
+    __ emit_data64(0x0000000000000000, relocInfo::none);
+    __ emit_data64(0x0000000000000020, relocInfo::none);//linc32 = counter_mask_addr() + 256
+    __ emit_data64(0x0000000000000000, relocInfo::none);
+    __ emit_data64(0x0000000000000020, relocInfo::none);
+    __ emit_data64(0x0000000000000000, relocInfo::none);
+    __ emit_data64(0x0000000000000020, relocInfo::none);
+    __ emit_data64(0x0000000000000000, relocInfo::none);
+    __ emit_data64(0x0000000000000020, relocInfo::none);
+    __ emit_data64(0x0000000000000000, relocInfo::none);
+    __ emit_data64(0x0000000000000010, relocInfo::none);//linc16 = counter_mask_addr() + 320
+    __ emit_data64(0x0000000000000000, relocInfo::none);
+    __ emit_data64(0x0000000000000010, relocInfo::none);
+    __ emit_data64(0x0000000000000000, relocInfo::none);
+    __ emit_data64(0x0000000000000010, relocInfo::none);
+    __ emit_data64(0x0000000000000000, relocInfo::none);
+    __ emit_data64(0x0000000000000010, relocInfo::none);
+    __ emit_data64(0x0000000000000000, relocInfo::none);
+    return start;
+  }
+
+ // Vector AES Counter implementation
+  address generate_counterMode_VectorAESCrypt()  {
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt");
+    address start = __ pc();
+    const Register from = c_rarg0; // source array address
+    const Register to = c_rarg1; // destination array address
+    const Register key = c_rarg2; // key array address r8
+    const Register counter = c_rarg3; // counter byte array initialized from counter array address
+    // and updated with the incremented counter in the end
+#ifndef _WIN64
+    const Register len_reg = c_rarg4;
+    const Register saved_encCounter_start = c_rarg5;
+    const Register used_addr = r10;
+    const Address  used_mem(rbp, 2 * wordSize);
+    const Register used = r11;
+#else
+    const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64
+    const Address saved_encCounter_mem(rbp, 7 * wordSize); // saved encrypted counter is on stack on Win64
+    const Address used_mem(rbp, 8 * wordSize); // used length is on stack on Win64
+    const Register len_reg = r10; // pick the first volatile windows register
+    const Register saved_encCounter_start = r11;
+    const Register used_addr = r13;
+    const Register used = r14;
+#endif
+    __ enter();
+   // Save state before entering routine
+    __ push(r12);
+    __ push(r13);
+    __ push(r14);
+    __ push(r15);
+#ifdef _WIN64
+    // on win64, fill len_reg from stack position
+    __ movl(len_reg, len_mem);
+    __ movptr(saved_encCounter_start, saved_encCounter_mem);
+    __ movptr(used_addr, used_mem);
+    __ movl(used, Address(used_addr, 0));
+#else
+    __ push(len_reg); // Save
+    __ movptr(used_addr, used_mem);
+    __ movl(used, Address(used_addr, 0));
+#endif
+    __ push(rbx);
+    __ aesctr_encrypt(from, to, key, counter, len_reg, used, used_addr, saved_encCounter_start);
+    // Restore state before leaving routine
+    __ pop(rbx);
+#ifdef _WIN64
+    __ movl(rax, len_mem); // return length
+#else
+    __ pop(rax); // return length
+#endif
+    __ pop(r15);
+    __ pop(r14);
+    __ pop(r13);
+    __ pop(r12);
+
+    __ leave(); // required for proper stackwalking of RuntimeStub frame
+    __ ret(0);
+    return start;
+  }
+
   // This is a version of CTR/AES crypt which does 6 blocks in a loop at a time
   // to hide instruction latency
   //
@@ -6111,9 +6228,14 @@
         StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel();
       }
     }
-    if (UseAESCTRIntrinsics){
-      StubRoutines::x86::_counter_shuffle_mask_addr = generate_counter_shuffle_mask();
-      StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt_Parallel();
+    if (UseAESCTRIntrinsics) {
+      if (VM_Version::supports_vaes() && VM_Version::supports_avx512bw() && VM_Version::supports_avx512vl()) {
+        StubRoutines::x86::_counter_mask_addr = counter_mask_addr();
+        StubRoutines::_counterMode_AESCrypt = generate_counterMode_VectorAESCrypt();
+      } else {
+        StubRoutines::x86::_counter_shuffle_mask_addr = generate_counter_shuffle_mask();
+        StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt_Parallel();
+      }
     }
 
     if (UseSHA1Intrinsics) {
--- a/src/hotspot/cpu/x86/stubRoutines_x86.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/x86/stubRoutines_x86.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -62,7 +62,7 @@
 address StubRoutines::x86::_left_shift_mask = NULL;
 address StubRoutines::x86::_and_mask = NULL;
 address StubRoutines::x86::_url_charset = NULL;
-
+address StubRoutines::x86::_counter_mask_addr = NULL;
 #endif
 address StubRoutines::x86::_pshuffle_byte_flip_mask_addr = NULL;
 
--- a/src/hotspot/cpu/x86/stubRoutines_x86.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/x86/stubRoutines_x86.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -154,6 +154,7 @@
   static address _k512_W_addr;
   // byte flip mask for sha512
   static address _pshuffle_byte_flip_mask_addr_sha512;
+  static address _counter_mask_addr;
   // Masks for base64
   static address _base64_charset;
   static address _bswap_mask;
@@ -258,6 +259,7 @@
   static address base64_right_shift_mask_addr() { return _right_shift_mask; }
   static address base64_left_shift_mask_addr() { return _left_shift_mask; }
   static address base64_and_mask_addr() { return _and_mask; }
+  static address counter_mask_addr() { return _counter_mask_addr; }
 #endif
   static address pshuffle_byte_flip_mask_addr() { return _pshuffle_byte_flip_mask_addr; }
   static void generate_CRC32C_table(bool is_pclmulqdq_supported);
--- a/src/hotspot/cpu/x86/vm_version_ext_x86.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/x86/vm_version_ext_x86.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -25,8 +25,8 @@
 #ifndef CPU_X86_VM_VERSION_EXT_X86_HPP
 #define CPU_X86_VM_VERSION_EXT_X86_HPP
 
+#include "runtime/vm_version.hpp"
 #include "utilities/macros.hpp"
-#include "vm_version_x86.hpp"
 
 class VM_Version_Ext : public VM_Version {
 
--- a/src/hotspot/cpu/x86/vm_version_x86.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/x86/vm_version_x86.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -32,8 +32,8 @@
 #include "runtime/java.hpp"
 #include "runtime/os.hpp"
 #include "runtime/stubCodeGenerator.hpp"
+#include "runtime/vm_version.hpp"
 #include "utilities/virtualizationSupport.hpp"
-#include "vm_version_x86.hpp"
 
 #include OS_HEADER_INLINE(os)
 
--- a/src/hotspot/cpu/x86/vm_version_x86.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/x86/vm_version_x86.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -26,8 +26,8 @@
 #define CPU_X86_VM_VERSION_X86_HPP
 
 #include "memory/universe.hpp"
+#include "runtime/abstract_vm_version.hpp"
 #include "runtime/globals_extension.hpp"
-#include "runtime/vm_version.hpp"
 
 class VM_Version : public Abstract_VM_Version {
   friend class VMStructs;
--- a/src/hotspot/cpu/x86/x86_64.ad	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/x86/x86_64.ad	Wed Nov 20 10:52:28 2019 +0530
@@ -3116,6 +3116,26 @@
   interface(CONST_INTER);
 %}
 
+operand immL_Pow2()
+%{
+  predicate(is_power_of_2_long(n->get_long()));
+  match(ConL);
+
+  op_cost(15);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immL_NotPow2()
+%{
+  predicate(is_power_of_2_long(~n->get_long()));
+  match(ConL);
+
+  op_cost(15);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
 // Long Immediate zero
 operand immL0()
 %{
@@ -9841,6 +9861,23 @@
   ins_pipe(ialu_mem_imm);
 %}
 
+instruct btrL_mem_imm(memory dst, immL_NotPow2 con, rFlagsReg cr)
+%{
+  // con should be a pure 64-bit immediate given that not(con) is a power of 2
+  // because AND/OR works well enough for 8/32-bit values.
+  predicate(log2_long(~n->in(3)->in(2)->get_long()) > 30);
+
+  match(Set dst (StoreL dst (AndL (LoadL dst) con)));
+  effect(KILL cr);
+
+  ins_cost(125);
+  format %{ "btrq    $dst, log2(not($con))\t# long" %}
+  ins_encode %{
+    __ btrq($dst$$Address, log2_long(~$con$$constant));
+  %}
+  ins_pipe(ialu_mem_imm);
+%}
+
 // BMI1 instructions
 instruct andnL_rReg_rReg_mem(rRegL dst, rRegL src1, memory src2, immL_M1 minus_1, rFlagsReg cr) %{
   match(Set dst (AndL (XorL src1 minus_1) (LoadL src2)));
@@ -10034,6 +10071,23 @@
   ins_pipe(ialu_mem_imm);
 %}
 
+instruct btsL_mem_imm(memory dst, immL_Pow2 con, rFlagsReg cr)
+%{
+  // con should be a pure 64-bit power of 2 immediate
+  // because AND/OR works well enough for 8/32-bit values.
+  predicate(log2_long(n->in(3)->in(2)->get_long()) > 31);
+
+  match(Set dst (StoreL dst (OrL (LoadL dst) con)));
+  effect(KILL cr);
+
+  ins_cost(125);
+  format %{ "btsq    $dst, log2($con)\t# long" %}
+  ins_encode %{
+    __ btsq($dst$$Address, log2_long($con$$constant));
+  %}
+  ins_pipe(ialu_mem_imm);
+%}
+
 // Xor Instructions
 // Xor Register with Register
 instruct xorL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
--- a/src/hotspot/cpu/zero/globals_zero.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/zero/globals_zero.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -66,9 +66,6 @@
 define_pd_global(bool,  RewriteBytecodes,     true);
 define_pd_global(bool,  RewriteFrequentPairs, true);
 
-// GC Ergo Flags
-define_pd_global(size_t, CMSYoungGenPerWorker, 16*M);  // default max size of CMS young gen, per GC worker thread
-
 define_pd_global(uintx, TypeProfileLevel, 0);
 
 define_pd_global(bool, PreserveFramePointer, false);
--- a/src/hotspot/cpu/zero/register_zero.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/zero/register_zero.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -27,7 +27,7 @@
 #define CPU_ZERO_REGISTER_ZERO_HPP
 
 #include "asm/register.hpp"
-#include "vm_version_zero.hpp"
+#include "runtime/vm_version.hpp"
 
 class VMRegImpl;
 typedef VMRegImpl* VMReg;
--- a/src/hotspot/cpu/zero/vm_version_ext_zero.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/zero/vm_version_ext_zero.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -25,8 +25,8 @@
 #ifndef CPU_ZERO_VM_VERSION_EXT_ZERO_HPP
 #define CPU_ZERO_VM_VERSION_EXT_ZERO_HPP
 
+#include "runtime/vm_version.hpp"
 #include "utilities/macros.hpp"
-#include "vm_version_zero.hpp"
 
 class VM_Version_Ext : public VM_Version {
  private:
--- a/src/hotspot/cpu/zero/vm_version_zero.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/zero/vm_version_zero.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2009 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -28,7 +28,7 @@
 #include "memory/resourceArea.hpp"
 #include "runtime/java.hpp"
 #include "runtime/stubCodeGenerator.hpp"
-#include "vm_version_zero.hpp"
+#include "runtime/vm_version.hpp"
 
 
 void VM_Version::initialize() {
--- a/src/hotspot/cpu/zero/vm_version_zero.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/cpu/zero/vm_version_zero.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -26,8 +26,8 @@
 #ifndef CPU_ZERO_VM_VERSION_ZERO_HPP
 #define CPU_ZERO_VM_VERSION_ZERO_HPP
 
+#include "runtime/abstract_vm_version.hpp"
 #include "runtime/globals_extension.hpp"
-#include "runtime/vm_version.hpp"
 
 class VM_Version : public Abstract_VM_Version {
  public:
--- a/src/hotspot/os/aix/os_aix.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/os/aix/os_aix.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -2341,6 +2341,10 @@
   return 0;
 }
 
+int os::numa_get_group_id_for_address(const void* address) {
+  return 0;
+}
+
 bool os::get_page_info(char *start, page_info* info) {
   return false;
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/bsd/gc/z/zBackingFile_bsd.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zBackingFile_bsd.hpp"
+#include "gc/z/zErrno.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "gc/z/zLargePages.inline.hpp"
+#include "gc/z/zPhysicalMemory.inline.hpp"
+#include "logging/log.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/os.hpp"
+#include "utilities/align.hpp"
+#include "utilities/debug.hpp"
+
+#include <mach/mach.h>
+#include <mach/mach_vm.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+
+static int vm_flags_superpage() {
+  if (!ZLargePages::is_explicit()) {
+    return 0;
+  }
+
+  const int page_size_in_megabytes = ZGranuleSize >> 20;
+  return page_size_in_megabytes << VM_FLAGS_SUPERPAGE_SHIFT;
+}
+
+static ZErrno mremap(uintptr_t from_addr, uintptr_t to_addr, size_t size) {
+  mach_vm_address_t remap_addr = to_addr;
+  vm_prot_t remap_cur_prot;
+  vm_prot_t remap_max_prot;
+
+  // Remap memory to an additional location
+  const kern_return_t res = mach_vm_remap(mach_task_self(),
+                                          &remap_addr,
+                                          size,
+                                          0 /* mask */,
+                                          VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE | vm_flags_superpage(),
+                                          mach_task_self(),
+                                          from_addr,
+                                          FALSE /* copy */,
+                                          &remap_cur_prot,
+                                          &remap_max_prot,
+                                          VM_INHERIT_COPY);
+
+  return (res == KERN_SUCCESS) ? ZErrno(0) : ZErrno(EINVAL);
+}
+
+ZBackingFile::ZBackingFile() :
+    _base(0),
+    _size(0),
+    _initialized(false) {
+
+  // Reserve address space for virtual backing file
+  _base = (uintptr_t)os::reserve_memory(MaxHeapSize);
+  if (_base == 0) {
+    // Failed
+    log_error(gc)("Failed to reserve address space for virtual backing file");
+    return;
+  }
+
+  // Successfully initialized
+  _initialized = true;
+}
+
+bool ZBackingFile::is_initialized() const {
+  return _initialized;
+}
+
+size_t ZBackingFile::size() const {
+  return _size;
+}
+
+bool ZBackingFile::commit_inner(size_t offset, size_t length) {
+  assert(is_aligned(offset, os::vm_page_size()), "Invalid offset");
+  assert(is_aligned(length, os::vm_page_size()), "Invalid length");
+
+  log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
+                      offset / M, (offset + length) / M, length / M);
+
+  const uintptr_t addr = _base + offset;
+  const void* const res = mmap((void*)addr, length, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+  if (res == MAP_FAILED) {
+    ZErrno err;
+    log_error(gc)("Failed to commit memory (%s)", err.to_string());
+    return false;
+  }
+
+  const size_t end = offset + length;
+  if (end > _size) {
+    // Record new virtual file size
+    _size = end;
+  }
+
+  // Success
+  return true;
+}
+
+size_t ZBackingFile::commit(size_t offset, size_t length) {
+  // Try to commit the whole region
+  if (commit_inner(offset, length)) {
+    // Success
+    return length;
+  }
+
+  // Failed, try to commit as much as possible
+  size_t start = offset;
+  size_t end = offset + length;
+
+  for (;;) {
+    length = align_down((end - start) / 2, ZGranuleSize);
+    if (length == 0) {
+      // Done, don't commit more
+      return start - offset;
+    }
+
+    if (commit_inner(start, length)) {
+      // Success, try commit more
+      start += length;
+    } else {
+      // Failed, try commit less
+      end -= length;
+    }
+  }
+}
+
+size_t ZBackingFile::uncommit(size_t offset, size_t length) {
+  assert(is_aligned(offset, os::vm_page_size()), "Invalid offset");
+  assert(is_aligned(length, os::vm_page_size()), "Invalid length");
+
+  log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
+                      offset / M, (offset + length) / M, length / M);
+
+  const uintptr_t start = _base + offset;
+  const void* const res = mmap((void*)start, length, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
+  if (res == MAP_FAILED) {
+    ZErrno err;
+    log_error(gc)("Failed to uncommit memory (%s)", err.to_string());
+    return 0;
+  }
+
+  return length;
+}
+
+void ZBackingFile::map(uintptr_t addr, size_t size, uintptr_t offset) const {
+  const ZErrno err = mremap(_base + offset, addr, size);
+  if (err) {
+    fatal("Failed to remap memory (%s)", err.to_string());
+  }
+}
+
+void ZBackingFile::unmap(uintptr_t addr, size_t size) const {
+  // Note that we must keep the address space reservation intact and just detach
+  // the backing memory. For this reason we map a new anonymous, non-accessible
+  // and non-reserved page over the mapping instead of actually unmapping.
+  const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
+  if (res == MAP_FAILED) {
+    ZErrno err;
+    fatal("Failed to map memory (%s)", err.to_string());
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/bsd/gc/z/zBackingFile_bsd.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef OS_BSD_GC_Z_ZBACKINGFILE_BSD_HPP
+#define OS_BSD_GC_Z_ZBACKINGFILE_BSD_HPP
+
+#include "memory/allocation.hpp"
+
+class ZPhysicalMemory;
+
+// On macOS, we use a virtual backing file. It is represented by a reserved virtual
+// address space, in which we commit physical memory using the mach_vm_map() API.
+// The multi-mapping API simply remaps these addresses using mach_vm_remap() into
+// the different heap views. This works as-if there was a backing file, it's just
+// that the file is represented with memory mappings instead.
+
+class ZBackingFile {
+private:
+  uintptr_t _base;
+  size_t    _size;
+  bool      _initialized;
+
+  bool commit_inner(size_t offset, size_t length);
+
+public:
+  ZBackingFile();
+
+  bool is_initialized() const;
+
+  size_t size() const;
+
+  size_t commit(size_t offset, size_t length);
+  size_t uncommit(size_t offset, size_t length);
+
+  void map(uintptr_t addr, size_t size, uintptr_t offset) const;
+  void unmap(uintptr_t addr, size_t size) const;
+};
+
+#endif // OS_BSD_GC_Z_ZBACKINGFILE_BSD_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/bsd/gc/z/zLargePages_bsd.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zLargePages.hpp"
+#include "runtime/globals.hpp"
+
+void ZLargePages::initialize_platform() {
+  if (UseLargePages) {
+    _state = Explicit;
+  } else {
+    _state = Disabled;
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/bsd/gc/z/zNUMA_bsd.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zNUMA.hpp"
+
+void ZNUMA::initialize_platform() {
+  _enabled = false;
+}
+
+uint32_t ZNUMA::count() {
+  return 1;
+}
+
+uint32_t ZNUMA::id() {
+  return 0;
+}
+
+uint32_t ZNUMA::memory_id(uintptr_t addr) {
+  // NUMA support not enabled, assume everything belongs to node zero
+  return 0;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zAddress.inline.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "gc/z/zLargePages.inline.hpp"
+#include "gc/z/zPhysicalMemory.inline.hpp"
+#include "gc/z/zPhysicalMemoryBacking_bsd.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/init.hpp"
+#include "runtime/os.hpp"
+#include "utilities/align.hpp"
+#include "utilities/debug.hpp"
+
+bool ZPhysicalMemoryBacking::is_initialized() const {
+  return _file.is_initialized();
+}
+
+void ZPhysicalMemoryBacking::warn_commit_limits(size_t max) const {
+  // Does nothing
+}
+
+bool ZPhysicalMemoryBacking::supports_uncommit() {
+  assert(!is_init_completed(), "Invalid state");
+  assert(_file.size() >= ZGranuleSize, "Invalid size");
+
+  // Test if uncommit is supported by uncommitting and then re-committing a granule
+  return commit(uncommit(ZGranuleSize)) == ZGranuleSize;
+}
+
+size_t ZPhysicalMemoryBacking::commit(size_t size) {
+  size_t committed = 0;
+
+  // Fill holes in the backing file
+  while (committed < size) {
+    size_t allocated = 0;
+    const size_t remaining = size - committed;
+    const uintptr_t start = _uncommitted.alloc_from_front_at_most(remaining, &allocated);
+    if (start == UINTPTR_MAX) {
+      // No holes to commit
+      break;
+    }
+
+    // Try commit hole
+    const size_t filled = _file.commit(start, allocated);
+    if (filled > 0) {
+      // Successful or partialy successful
+      _committed.free(start, filled);
+      committed += filled;
+    }
+    if (filled < allocated) {
+      // Failed or partialy failed
+      _uncommitted.free(start + filled, allocated - filled);
+      return committed;
+    }
+  }
+
+  // Expand backing file
+  if (committed < size) {
+    const size_t remaining = size - committed;
+    const uintptr_t start = _file.size();
+    const size_t expanded = _file.commit(start, remaining);
+    if (expanded > 0) {
+      // Successful or partialy successful
+      _committed.free(start, expanded);
+      committed += expanded;
+    }
+  }
+
+  return committed;
+}
+
+size_t ZPhysicalMemoryBacking::uncommit(size_t size) {
+  size_t uncommitted = 0;
+
+  // Punch holes in backing file
+  while (uncommitted < size) {
+    size_t allocated = 0;
+    const size_t remaining = size - uncommitted;
+    const uintptr_t start = _committed.alloc_from_back_at_most(remaining, &allocated);
+    assert(start != UINTPTR_MAX, "Allocation should never fail");
+
+    // Try punch hole
+    const size_t punched = _file.uncommit(start, allocated);
+    if (punched > 0) {
+      // Successful or partialy successful
+      _uncommitted.free(start, punched);
+      uncommitted += punched;
+    }
+    if (punched < allocated) {
+      // Failed or partialy failed
+      _committed.free(start + punched, allocated - punched);
+      return uncommitted;
+    }
+  }
+
+  return uncommitted;
+}
+
+ZPhysicalMemory ZPhysicalMemoryBacking::alloc(size_t size) {
+  assert(is_aligned(size, ZGranuleSize), "Invalid size");
+
+  ZPhysicalMemory pmem;
+
+  // Allocate segments
+  for (size_t allocated = 0; allocated < size; allocated += ZGranuleSize) {
+    const uintptr_t start = _committed.alloc_from_front(ZGranuleSize);
+    assert(start != UINTPTR_MAX, "Allocation should never fail");
+    pmem.add_segment(ZPhysicalMemorySegment(start, ZGranuleSize));
+  }
+
+  return pmem;
+}
+
+void ZPhysicalMemoryBacking::free(const ZPhysicalMemory& pmem) {
+  const size_t nsegments = pmem.nsegments();
+
+  // Free segments
+  for (size_t i = 0; i < nsegments; i++) {
+    const ZPhysicalMemorySegment& segment = pmem.segment(i);
+    _committed.free(segment.start(), segment.size());
+  }
+}
+
+void ZPhysicalMemoryBacking::pretouch_view(uintptr_t addr, size_t size) const {
+  const size_t page_size = ZLargePages::is_explicit() ? ZGranuleSize : os::vm_page_size();
+  os::pretouch_memory((void*)addr, (void*)(addr + size), page_size);
+}
+
+void ZPhysicalMemoryBacking::map_view(const ZPhysicalMemory& pmem, uintptr_t addr, bool pretouch) const {
+  const size_t nsegments = pmem.nsegments();
+  size_t size = 0;
+
+  // Map segments
+  for (size_t i = 0; i < nsegments; i++) {
+    const ZPhysicalMemorySegment& segment = pmem.segment(i);
+    const uintptr_t segment_addr = addr + size;
+    _file.map(segment_addr, segment.size(), segment.start());
+    size += segment.size();
+  }
+
+  // Pre-touch memory
+  if (pretouch) {
+    pretouch_view(addr, size);
+  }
+}
+
+void ZPhysicalMemoryBacking::unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
+  _file.unmap(addr, pmem.size());
+}
+
+uintptr_t ZPhysicalMemoryBacking::nmt_address(uintptr_t offset) const {
+  // From an NMT point of view we treat the first heap view (marked0) as committed
+  return ZAddress::marked0(offset);
+}
+
+void ZPhysicalMemoryBacking::map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
+  if (ZVerifyViews) {
+    // Map good view
+    map_view(pmem, ZAddress::good(offset), AlwaysPreTouch);
+  } else {
+    // Map all views
+    map_view(pmem, ZAddress::marked0(offset), AlwaysPreTouch);
+    map_view(pmem, ZAddress::marked1(offset), AlwaysPreTouch);
+    map_view(pmem, ZAddress::remapped(offset), AlwaysPreTouch);
+  }
+}
+
+void ZPhysicalMemoryBacking::unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
+  if (ZVerifyViews) {
+    // Unmap good view
+    unmap_view(pmem, ZAddress::good(offset));
+  } else {
+    // Unmap all views
+    unmap_view(pmem, ZAddress::marked0(offset));
+    unmap_view(pmem, ZAddress::marked1(offset));
+    unmap_view(pmem, ZAddress::remapped(offset));
+  }
+}
+
+void ZPhysicalMemoryBacking::debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
+  // Map good view
+  assert(ZVerifyViews, "Should be enabled");
+  map_view(pmem, ZAddress::good(offset), false /* pretouch */);
+}
+
+void ZPhysicalMemoryBacking::debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
+  // Unmap good view
+  assert(ZVerifyViews, "Should be enabled");
+  unmap_view(pmem, ZAddress::good(offset));
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef OS_BSD_GC_Z_ZPHYSICALMEMORYBACKING_BSD_HPP
+#define OS_BSD_GC_Z_ZPHYSICALMEMORYBACKING_BSD_HPP
+
+#include "gc/z/zBackingFile_bsd.hpp"
+#include "gc/z/zMemory.hpp"
+
+class ZPhysicalMemory;
+
+class ZPhysicalMemoryBacking {
+private:
+  ZBackingFile   _file;
+  ZMemoryManager _committed;
+  ZMemoryManager _uncommitted;
+
+  void pretouch_view(uintptr_t addr, size_t size) const;
+  void map_view(const ZPhysicalMemory& pmem, uintptr_t addr, bool pretouch) const;
+  void unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const;
+
+public:
+  bool is_initialized() const;
+
+  void warn_commit_limits(size_t max) const;
+  bool supports_uncommit();
+
+  size_t commit(size_t size);
+  size_t uncommit(size_t size);
+
+  ZPhysicalMemory alloc(size_t size);
+  void free(const ZPhysicalMemory& pmem);
+
+  uintptr_t nmt_address(uintptr_t offset) const;
+
+  void map(const ZPhysicalMemory& pmem, uintptr_t offset) const;
+  void unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const;
+
+  void debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const;
+  void debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const;
+};
+
+#endif // OS_BSD_GC_Z_ZPHYSICALMEMORYBACKING_BSD_HPP
--- a/src/hotspot/os/bsd/os_bsd.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/os/bsd/os_bsd.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -2005,6 +2005,10 @@
   return 0;
 }
 
+int os::numa_get_group_id_for_address(const void* address) {
+  return 0;
+}
+
 bool os::get_page_info(char *start, page_info* info) {
   return false;
 }
@@ -2845,15 +2849,11 @@
     // and if UserSignalHandler is installed all bets are off
     if (CheckJNICalls) {
       if (libjsig_is_loaded) {
-        if (PrintJNIResolving) {
-          tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
-        }
+        log_debug(jni, resolve)("Info: libjsig is activated, all active signal checking is disabled");
         check_signals = false;
       }
       if (AllowUserSignalHandlers) {
-        if (PrintJNIResolving) {
-          tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
-        }
+        log_debug(jni, resolve)("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
         check_signals = false;
       }
     }
--- a/src/hotspot/os/bsd/os_perf_bsd.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/os/bsd/os_perf_bsd.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -26,7 +26,7 @@
 #include "memory/resourceArea.hpp"
 #include "runtime/os.hpp"
 #include "runtime/os_perf.hpp"
-#include "vm_version_ext_x86.hpp"
+#include CPU_HEADER(vm_version_ext)
 
 #ifdef __APPLE__
   #import <libproc.h>
--- a/src/hotspot/os/linux/os_linux.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/os/linux/os_linux.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -3007,6 +3007,19 @@
   return 0;
 }
 
+int os::numa_get_group_id_for_address(const void* address) {
+  void** pages = const_cast<void**>(&address);
+  int id = -1;
+
+  if (os::Linux::numa_move_pages(0, 1, pages, NULL, &id, 0) == -1) {
+    return -1;
+  }
+  if (id < 0) {
+    return -1;
+  }
+  return id;
+}
+
 int os::Linux::get_existing_num_nodes() {
   int node;
   int highest_node_number = Linux::numa_max_node();
@@ -3135,6 +3148,8 @@
                                           libnuma_v2_dlsym(handle, "numa_get_membind")));
       set_numa_get_interleave_mask(CAST_TO_FN_PTR(numa_get_interleave_mask_func_t,
                                                   libnuma_v2_dlsym(handle, "numa_get_interleave_mask")));
+      set_numa_move_pages(CAST_TO_FN_PTR(numa_move_pages_func_t,
+                                         libnuma_dlsym(handle, "numa_move_pages")));
 
       if (numa_available() != -1) {
         set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes"));
@@ -3269,6 +3284,7 @@
 os::Linux::numa_distance_func_t os::Linux::_numa_distance;
 os::Linux::numa_get_membind_func_t os::Linux::_numa_get_membind;
 os::Linux::numa_get_interleave_mask_func_t os::Linux::_numa_get_interleave_mask;
+os::Linux::numa_move_pages_func_t os::Linux::_numa_move_pages;
 os::Linux::NumaAllocationPolicy os::Linux::_current_numa_policy;
 unsigned long* os::Linux::_numa_all_nodes;
 struct bitmask* os::Linux::_numa_all_nodes_ptr;
@@ -4789,15 +4805,11 @@
     // Log that signal checking is off only if -verbose:jni is specified.
     if (CheckJNICalls) {
       if (libjsig_is_loaded) {
-        if (PrintJNIResolving) {
-          tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
-        }
+        log_debug(jni, resolve)("Info: libjsig is activated, all active signal checking is disabled");
         check_signals = false;
       }
       if (AllowUserSignalHandlers) {
-        if (PrintJNIResolving) {
-          tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
-        }
+        log_debug(jni, resolve)("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
         check_signals = false;
       }
     }
--- a/src/hotspot/os/linux/os_linux.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/os/linux/os_linux.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -216,6 +216,7 @@
   typedef void (*numa_interleave_memory_v2_func_t)(void *start, size_t size, struct bitmask* mask);
   typedef struct bitmask* (*numa_get_membind_func_t)(void);
   typedef struct bitmask* (*numa_get_interleave_mask_func_t)(void);
+  typedef long (*numa_move_pages_func_t)(int pid, unsigned long count, void **pages, const int *nodes, int *status, int flags);
 
   typedef void (*numa_set_bind_policy_func_t)(int policy);
   typedef int (*numa_bitmask_isbitset_func_t)(struct bitmask *bmp, unsigned int n);
@@ -234,6 +235,7 @@
   static numa_distance_func_t _numa_distance;
   static numa_get_membind_func_t _numa_get_membind;
   static numa_get_interleave_mask_func_t _numa_get_interleave_mask;
+  static numa_move_pages_func_t _numa_move_pages;
   static unsigned long* _numa_all_nodes;
   static struct bitmask* _numa_all_nodes_ptr;
   static struct bitmask* _numa_nodes_ptr;
@@ -253,6 +255,7 @@
   static void set_numa_distance(numa_distance_func_t func) { _numa_distance = func; }
   static void set_numa_get_membind(numa_get_membind_func_t func) { _numa_get_membind = func; }
   static void set_numa_get_interleave_mask(numa_get_interleave_mask_func_t func) { _numa_get_interleave_mask = func; }
+  static void set_numa_move_pages(numa_move_pages_func_t func) { _numa_move_pages = func; }
   static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; }
   static void set_numa_all_nodes_ptr(struct bitmask **ptr) { _numa_all_nodes_ptr = (ptr == NULL ? NULL : *ptr); }
   static void set_numa_nodes_ptr(struct bitmask **ptr) { _numa_nodes_ptr = (ptr == NULL ? NULL : *ptr); }
@@ -318,6 +321,9 @@
   static int numa_distance(int node1, int node2) {
     return _numa_distance != NULL ? _numa_distance(node1, node2) : -1;
   }
+  static long numa_move_pages(int pid, unsigned long count, void **pages, const int *nodes, int *status, int flags) {
+    return _numa_move_pages != NULL ? _numa_move_pages(pid, count, pages, nodes, status, flags) : -1;
+  }
   static int get_node_by_cpu(int cpu_id);
   static int get_existing_num_nodes();
   // Check if numa node is configured (non-zero memory node).
--- a/src/hotspot/os/posix/os_posix.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/os/posix/os_posix.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -2075,10 +2075,12 @@
   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
   ThreadBlockInVM tbivm(jt);
 
+  // Can't access interrupt state now that we are _thread_blocked. If we've
+  // been interrupted since we checked above then _counter will be > 0.
+
   // Don't wait if cannot get lock since interference arises from
-  // unparking. Also re-check interrupt before trying wait.
-  if (jt->is_interrupted(false) ||
-      pthread_mutex_trylock(_mutex) != 0) {
+  // unparking.
+  if (pthread_mutex_trylock(_mutex) != 0) {
     return;
   }
 
--- a/src/hotspot/os/solaris/os_solaris.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/os/solaris/os_solaris.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -2072,7 +2072,7 @@
   char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
   if (res != NULL) {
     if (UseNUMAInterleaving) {
-      numa_make_global(addr, bytes);
+        numa_make_global(addr, bytes);
     }
     return 0;
   }
@@ -2267,6 +2267,10 @@
   return ids[os::random() % r];
 }
 
+int os::numa_get_group_id_for_address(const void* address) {
+  return 0;
+}
+
 // Request information about the page.
 bool os::get_page_info(char *start, page_info* info) {
   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
@@ -3684,15 +3688,11 @@
   // Log that signal checking is off only if -verbose:jni is specified.
   if (CheckJNICalls) {
     if (libjsig_is_loaded) {
-      if (PrintJNIResolving) {
-        tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
-      }
+      log_debug(jni, resolve)("Info: libjsig is activated, all active signal checking is disabled");
       check_signals = false;
     }
     if (AllowUserSignalHandlers) {
-      if (PrintJNIResolving) {
-        tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
-      }
+      log_debug(jni, resolve)("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
       check_signals = false;
     }
   }
@@ -4925,10 +4925,12 @@
   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
   ThreadBlockInVM tbivm(jt);
 
+  // Can't access interrupt state now that we are _thread_blocked. If we've
+  // been interrupted since we checked above then _counter will be > 0.
+
   // Don't wait if cannot get lock since interference arises from
-  // unblocking.  Also. check interrupt before trying wait
-  if (jt->is_interrupted(false) ||
-      os::Solaris::mutex_trylock(_mutex) != 0) {
+  // unblocking.
+  if (os::Solaris::mutex_trylock(_mutex) != 0) {
     return;
   }
 
--- a/src/hotspot/os/windows/os_perf_windows.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/os/windows/os_perf_windows.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -31,7 +31,7 @@
 #include "runtime/os_perf.hpp"
 #include "runtime/os.hpp"
 #include "utilities/macros.hpp"
-#include "vm_version_ext_x86.hpp"
+#include CPU_HEADER(vm_version_ext)
 #include <math.h>
 #include <psapi.h>
 #include <TlHelp32.h>
--- a/src/hotspot/os/windows/os_windows.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/os/windows/os_windows.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -3447,6 +3447,10 @@
   }
 }
 
+int os::numa_get_group_id_for_address(const void* address) {
+  return 0;
+}
+
 bool os::get_page_info(char *start, page_info* info) {
   return false;
 }
@@ -5688,7 +5692,7 @@
 // up the offset from FS of the thread pointer.
 void os::win32::initialize_thread_ptr_offset() {
   os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
-                           NULL, NULL, NULL, NULL);
+                           NULL, methodHandle(), NULL, NULL);
 }
 
 bool os::supports_map_sync() {
--- a/src/hotspot/os_cpu/bsd_x86/vm_version_bsd_x86.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/os_cpu/bsd_x86/vm_version_bsd_x86.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,4 +24,4 @@
 
 #include "precompiled.hpp"
 #include "runtime/os.hpp"
-#include "vm_version_x86.hpp"
+#include "runtime/vm_version.hpp"
--- a/src/hotspot/os_cpu/bsd_zero/vm_version_bsd_zero.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/os_cpu/bsd_zero/vm_version_bsd_zero.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2009 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -25,6 +25,6 @@
 
 #include "precompiled.hpp"
 #include "runtime/os.hpp"
-#include "vm_version_zero.hpp"
+#include "runtime/vm_version.hpp"
 
 // This file is intentionally empty
--- a/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
+ * Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,9 +26,11 @@
 #ifndef OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP
 #define OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP
 
-#include "vm_version_aarch64.hpp"
+#include "runtime/vm_version.hpp"
 
 // Implementation of class atomic
+// Note that memory_order_conservative requires a full barrier after atomic stores.
+// See https://patchwork.kernel.org/patch/3575821/
 
 #define FULL_MEM_BARRIER  __sync_synchronize()
 #define READ_MEM_BARRIER  __atomic_thread_fence(__ATOMIC_ACQUIRE);
@@ -52,7 +54,7 @@
                                                      T volatile* dest,
                                                      atomic_memory_order order) const {
   STATIC_ASSERT(byte_size == sizeof(T));
-  T res = __sync_lock_test_and_set(dest, exchange_value);
+  T res = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELEASE);
   FULL_MEM_BARRIER;
   return res;
 }
@@ -70,7 +72,12 @@
                               __ATOMIC_RELAXED, __ATOMIC_RELAXED);
     return value;
   } else {
-    return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
+    T value = compare_value;
+    FULL_MEM_BARRIER;
+    __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
+                              __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+    FULL_MEM_BARRIER;
+    return value;
   }
 }
 
--- a/src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
+ * Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,7 @@
 
 // Included in orderAccess.hpp header file.
 
-#include "vm_version_aarch64.hpp"
+#include "runtime/vm_version.hpp"
 
 // Implementation of class OrderAccess.
 
@@ -55,14 +55,14 @@
 struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
 {
   template <typename T>
-  T operator()(const volatile T* p) const { T data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
+  T operator()(const volatile T* p) const { T data; __atomic_load(const_cast<T*>(p), &data, __ATOMIC_ACQUIRE); return data; }
 };
 
 template<size_t byte_size>
 struct OrderAccess::PlatformOrderedStore<byte_size, RELEASE_X>
 {
   template <typename T>
-  void operator()(T v, volatile T* p) const { __atomic_store(p, &v, __ATOMIC_RELEASE); }
+  void operator()(T v, volatile T* p) const { __atomic_store(const_cast<T*>(p), &v, __ATOMIC_RELEASE); }
 };
 
 template<size_t byte_size>
--- a/src/hotspot/os_cpu/linux_aarch64/vm_version_linux_aarch64.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/os_cpu/linux_aarch64/vm_version_linux_aarch64.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
+ * Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,5 +25,5 @@
 
 #include "precompiled.hpp"
 #include "runtime/os.hpp"
-#include "vm_version_aarch64.hpp"
+#include "runtime/vm_version.hpp"
 
--- a/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -26,7 +26,7 @@
 #define OS_CPU_LINUX_ARM_ATOMIC_LINUX_ARM_HPP
 
 #include "runtime/os.hpp"
-#include "vm_version_arm.hpp"
+#include "runtime/vm_version.hpp"
 
 // Implementation of class atomic
 
--- a/src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -28,7 +28,7 @@
 // Included in orderAccess.hpp header file.
 
 #include "runtime/os.hpp"
-#include "vm_version_arm.hpp"
+#include "runtime/vm_version.hpp"
 
 // Implementation of class OrderAccess.
 // - we define the high level barriers below and use the general
--- a/src/hotspot/os_cpu/linux_arm/vm_version_linux_arm_32.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/os_cpu/linux_arm/vm_version_linux_arm_32.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,7 @@
 
 #include "precompiled.hpp"
 #include "runtime/os.hpp"
-#include "vm_version_arm.hpp"
+#include "runtime/vm_version.hpp"
 
 # include <sys/utsname.h>
 
--- a/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016, 2018 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2019 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,7 @@
 
 #include "runtime/atomic.hpp"
 #include "runtime/os.hpp"
-#include "vm_version_s390.hpp"
+#include "runtime/vm_version.hpp"
 
 // Note that the compare-and-swap instructions on System z perform
 // a serialization function before the storage operand is fetched
--- a/src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2019 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,7 @@
 
 // Included in orderAccess.hpp header file.
 
-#include "vm_version_s390.hpp"
+#include "runtime/vm_version.hpp"
 
 // Implementation of class OrderAccess.
 
--- a/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016, 2018 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2019 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -426,6 +426,7 @@
         stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
       }
 
+#ifdef COMPILER2
       // SIGTRAP-based implicit range check in compiled code.
       else if (sig == SIGFPE && TrapBasedRangeChecks &&
                (trap_pc != NULL) &&
@@ -435,6 +436,7 @@
         }
         stub = SharedRuntime::continuation_for_implicit_exception(thread, trap_pc, SharedRuntime::IMPLICIT_NULL);
       }
+#endif
 
       else if (sig == SIGFPE && info->si_code == FPE_INTDIV) {
         stub = SharedRuntime::continuation_for_implicit_exception(thread, trap_pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO);
--- a/src/hotspot/os_cpu/linux_sparc/vm_version_linux_sparc.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/os_cpu/linux_sparc/vm_version_linux_sparc.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
 #include "logging/log.hpp"
 #include "precompiled.hpp"
 #include "runtime/os.hpp"
-#include "vm_version_sparc.hpp"
+#include "runtime/vm_version.hpp"
 
 
 #define CPUINFO_LINE_SIZE 1024
--- a/src/hotspot/os_cpu/linux_x86/vm_version_linux_x86.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/os_cpu/linux_x86/vm_version_linux_x86.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,5 +24,5 @@
 
 #include "precompiled.hpp"
 #include "runtime/os.hpp"
-#include "vm_version_x86.hpp"
+#include "runtime/vm_version.hpp"
 
--- a/src/hotspot/os_cpu/linux_zero/vm_version_linux_zero.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/os_cpu/linux_zero/vm_version_linux_zero.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2009 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -25,6 +25,6 @@
 
 #include "precompiled.hpp"
 #include "runtime/os.hpp"
-#include "vm_version_zero.hpp"
+#include "runtime/vm_version.hpp"
 
 // This file is intentionally empty
--- a/src/hotspot/os_cpu/solaris_sparc/vm_version_solaris_sparc.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/os_cpu/solaris_sparc/vm_version_solaris_sparc.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
 #include "memory/allocation.hpp"
 #include "memory/allocation.inline.hpp"
 #include "runtime/os.hpp"
-#include "vm_version_sparc.hpp"
+#include "runtime/vm_version.hpp"
 
 #include <sys/auxv.h>
 #include <sys/systeminfo.h>
--- a/src/hotspot/os_cpu/solaris_x86/vm_version_solaris_x86.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/os_cpu/solaris_x86/vm_version_solaris_x86.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,5 +24,5 @@
 
 #include "precompiled.hpp"
 #include "runtime/os.hpp"
-#include "vm_version_x86.hpp"
+#include "runtime/vm_version.hpp"
 
--- a/src/hotspot/os_cpu/windows_x86/vm_version_windows_x86.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/os_cpu/windows_x86/vm_version_windows_x86.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,5 +24,5 @@
 
 #include "precompiled.hpp"
 #include "runtime/os.hpp"
-#include "vm_version_x86.hpp"
+#include "runtime/vm_version.hpp"
 
--- a/src/hotspot/share/adlc/output_h.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/share/adlc/output_h.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -758,10 +758,6 @@
       fprintf(fp_hpp, "  Pipeline_Use_Cycle_Mask(uint mask1, uint mask2) : _mask((((uint64_t)mask1) << 32) | mask2) {}\n\n");
       fprintf(fp_hpp, "  Pipeline_Use_Cycle_Mask(uint64_t mask) : _mask(mask) {}\n\n");
     }
-    fprintf(fp_hpp, "  Pipeline_Use_Cycle_Mask& operator=(const Pipeline_Use_Cycle_Mask &in) {\n");
-    fprintf(fp_hpp, "    _mask = in._mask;\n");
-    fprintf(fp_hpp, "    return *this;\n");
-    fprintf(fp_hpp, "  }\n\n");
     fprintf(fp_hpp, "  bool overlaps(const Pipeline_Use_Cycle_Mask &in2) const {\n");
     fprintf(fp_hpp, "    return ((_mask & in2._mask) != 0);\n");
     fprintf(fp_hpp, "  }\n\n");
@@ -792,11 +788,6 @@
     for (l = 1; l <= masklen; l++)
       fprintf(fp_hpp, "_mask%d(mask%d)%s", l, l, l < masklen ? ", " : " {}\n\n");
 
-    fprintf(fp_hpp, "  Pipeline_Use_Cycle_Mask& operator=(const Pipeline_Use_Cycle_Mask &in) {\n");
-    for (l = 1; l <= masklen; l++)
-      fprintf(fp_hpp, "    _mask%d = in._mask%d;\n", l, l);
-    fprintf(fp_hpp, "    return *this;\n");
-    fprintf(fp_hpp, "  }\n\n");
     fprintf(fp_hpp, "  Pipeline_Use_Cycle_Mask intersect(const Pipeline_Use_Cycle_Mask &in2) {\n");
     fprintf(fp_hpp, "    Pipeline_Use_Cycle_Mask out;\n");
     for (l = 1; l <= masklen; l++)
--- a/src/hotspot/share/aot/aotCodeHeap.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/share/aot/aotCodeHeap.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -34,6 +34,7 @@
 #include "interpreter/abstractInterpreter.hpp"
 #include "jvmci/compilerRuntime.hpp"
 #include "jvmci/jvmciRuntime.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/universe.hpp"
 #include "oops/compressedOops.hpp"
--- a/src/hotspot/share/aot/aotCompiledMethod.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/share/aot/aotCompiledMethod.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -160,8 +160,6 @@
 }
 
 bool AOTCompiledMethod::make_not_entrant_helper(int new_state) {
-  // Make sure the method is not flushed in case of a safepoint in code below.
-  methodHandle the_method(method());
   NoSafepointVerifier nsv;
 
   {
@@ -208,10 +206,7 @@
 bool AOTCompiledMethod::make_entrant() {
   assert(!method()->is_old(), "reviving evolved method!");
 
-  // Make sure the method is not flushed in case of a safepoint in code below.
-  methodHandle the_method(method());
   NoSafepointVerifier nsv;
-
   {
     // Enter critical section.  Does not block for safepoint.
     MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
--- a/src/hotspot/share/c1/c1_Canonicalizer.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/share/c1/c1_Canonicalizer.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -354,25 +354,21 @@
     }
     if (t2->is_constant()) {
       if (t->tag() == intTag) {
-        int value = t->as_IntConstant()->value();
-        int shift = t2->as_IntConstant()->value() & 31;
-        jint mask = ~(~0 << (32 - shift));
-        if (shift == 0) mask = ~0;
+        jint value = t->as_IntConstant()->value();
+        jint shift = t2->as_IntConstant()->value();
         switch (x->op()) {
-          case Bytecodes::_ishl:  set_constant(value << shift); return;
-          case Bytecodes::_ishr:  set_constant(value >> shift); return;
-          case Bytecodes::_iushr: set_constant((value >> shift) & mask); return;
+          case Bytecodes::_ishl:  set_constant(java_shift_left(value, shift)); return;
+          case Bytecodes::_ishr:  set_constant(java_shift_right(value, shift)); return;
+          case Bytecodes::_iushr: set_constant(java_shift_right_unsigned(value, shift)); return;
           default:                break;
         }
       } else if (t->tag() == longTag) {
         jlong value = t->as_LongConstant()->value();
-        int shift = t2->as_IntConstant()->value() & 63;
-        jlong mask = ~(~jlong_cast(0) << (64 - shift));
-        if (shift == 0) mask = ~jlong_cast(0);
+        jint shift = t2->as_IntConstant()->value();
         switch (x->op()) {
-          case Bytecodes::_lshl:  set_constant(value << shift); return;
-          case Bytecodes::_lshr:  set_constant(value >> shift); return;
-          case Bytecodes::_lushr: set_constant((value >> shift) & mask); return;
+          case Bytecodes::_lshl:  set_constant(java_shift_left(value, shift)); return;
+          case Bytecodes::_lshr:  set_constant(java_shift_right(value, shift)); return;
+          case Bytecodes::_lushr: set_constant(java_shift_right_unsigned(value, shift)); return;
           default:                break;
         }
       }
--- a/src/hotspot/share/c1/c1_CodeStubs.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/share/c1/c1_CodeStubs.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -409,7 +409,7 @@
     if (_id == PatchingStub::access_field_id) {
       // embed a fixed offset to handle long patches which need to be offset by a word.
       // the patching code will just add the field offset field to this offset so
-      // that we can refernce either the high or low word of a double word field.
+      // that we can reference either the high or low word of a double word field.
       int field_offset = 0;
       switch (patch_code) {
       case lir_patch_low:         field_offset = lo_word_offset_in_bytes; break;
@@ -419,6 +419,8 @@
       }
       NativeMovRegMem* n_move = nativeMovRegMem_at(pc_start());
       n_move->set_offset(field_offset);
+      // Copy will never get executed, so only copy the part which is required for patching.
+      _bytes_to_copy = MAX2(n_move->num_bytes_to_end_of_patch(), (int)NativeGeneralJump::instruction_size);
     } else if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
       assert(_obj != noreg, "must have register object for load_klass/load_mirror");
 #ifdef ASSERT
--- a/src/hotspot/share/c1/c1_GraphBuilder.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/share/c1/c1_GraphBuilder.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -2590,7 +2590,7 @@
 
 #ifdef ASSERT
   for_each_phi_fun(b, phi,
-                   assert(phi->operand_count() != 1 || phi->subst() != phi, "missed trivial simplification");
+                   assert(phi->operand_count() != 1 || phi->subst() != phi || phi->is_illegal(), "missed trivial simplification");
   );
 
   ValueStack* state = b->state()->caller_state();
--- a/src/hotspot/share/c1/c1_Instruction.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/share/c1/c1_Instruction.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -29,6 +29,7 @@
 #include "c1/c1_ValueStack.hpp"
 #include "ci/ciObjArrayKlass.hpp"
 #include "ci/ciTypeArrayKlass.hpp"
+#include "utilities/bitMap.inline.hpp"
 
 
 // Implementation of Instruction
--- a/src/hotspot/share/c1/c1_LIRGenerator.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1301,7 +1301,7 @@
   }
 
   __ move(new LIR_Address(rcvr.result(), java_lang_Class::klass_offset_in_bytes(), T_ADDRESS), temp, info);
-  __ cmp(lir_cond_notEqual, temp, LIR_OprFact::intConst(0));
+  __ cmp(lir_cond_notEqual, temp, LIR_OprFact::metadataConst(0));
   __ cmove(lir_cond_notEqual, LIR_OprFact::intConst(0), LIR_OprFact::intConst(1), result, T_BOOLEAN);
 }
 
--- a/src/hotspot/share/c1/c1_Optimizer.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/share/c1/c1_Optimizer.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -366,6 +366,8 @@
           assert(sux_value == end_state->stack_at(index), "stack not equal");
         }
         for_each_local_value(sux_state, index, sux_value) {
+          Phi* sux_phi = sux_value->as_Phi();
+          if (sux_phi != NULL && sux_phi->is_illegal()) continue;
           assert(sux_value == end_state->local_at(index), "locals not equal");
         }
         assert(sux_state->caller_state() == end_state->caller_state(), "caller not equal");
--- a/src/hotspot/share/c1/c1_Runtime1.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/share/c1/c1_Runtime1.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1422,7 +1422,7 @@
   assert (nm != NULL, "no more nmethod?");
   nm->make_not_entrant();
 
-  methodHandle m(nm->method());
+  methodHandle m(thread, nm->method());
   MethodData* mdo = m->method_data();
 
   if (mdo == NULL && !HAS_PENDING_EXCEPTION) {
@@ -1443,7 +1443,7 @@
   if (TracePredicateFailedTraps) {
     stringStream ss1, ss2;
     vframeStream vfst(thread);
-    methodHandle inlinee = methodHandle(vfst.method());
+    Method* inlinee = vfst.method();
     inlinee->print_short_name(&ss1);
     m->print_short_name(&ss2);
     tty->print_cr("Predicate failed trap in method %s at bci %d inlined in %s at pc " INTPTR_FORMAT, ss1.as_string(), vfst.bci(), ss2.as_string(), p2i(caller_frame.pc()));
--- a/src/hotspot/share/ci/bcEscapeAnalyzer.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/share/ci/bcEscapeAnalyzer.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -151,7 +151,7 @@
 void BCEscapeAnalyzer::clear_bits(ArgumentMap vars, VectorSet &bm) {
   for (int i = 0; i < _arg_size; i++) {
     if (vars.contains(i)) {
-      bm >>= i;
+      bm.remove(i);
     }
   }
 }
@@ -1280,9 +1280,9 @@
     set_modified(var, OFFSET_ANY, 4);
     set_global_escape(var);
   }
-  _arg_local.Clear();
-  _arg_stack.Clear();
-  _arg_returned.Clear();
+  _arg_local.clear();
+  _arg_stack.clear();
+  _arg_returned.clear();
   _return_local = false;
   _return_allocated = false;
   _allocated_escapes = true;
@@ -1334,7 +1334,7 @@
 
   // Do not scan method if it has no object parameters and
   // does not returns an object (_return_allocated is set in initialize()).
-  if (_arg_local.Size() == 0 && !_return_allocated) {
+  if (_arg_local.is_empty() && !_return_allocated) {
     // Clear all info since method's bytecode was not analysed and
     // set pessimistic escape information.
     clear_escape_info();
@@ -1457,10 +1457,10 @@
     , _parent(parent)
     , _level(parent == NULL ? 0 : parent->level() + 1) {
   if (!_conservative) {
-    _arg_local.Clear();
-    _arg_stack.Clear();
-    _arg_returned.Clear();
-    _dirty.Clear();
+    _arg_local.clear();
+    _arg_stack.clear();
+    _arg_returned.clear();
+    _dirty.clear();
     Arena* arena = CURRENT_ENV->arena();
     _arg_modified = (uint *) arena->Amalloc(_arg_size * sizeof(uint));
     Copy::zero_to_bytes(_arg_modified, _arg_size * sizeof(uint));
--- a/src/hotspot/share/ci/ciEnv.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/share/ci/ciEnv.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -760,7 +760,7 @@
 
   InstanceKlass* accessor_klass = accessor->get_instanceKlass();
   Klass* holder_klass = holder->get_Klass();
-  methodHandle dest_method;
+  Method* dest_method;
   LinkInfo link_info(holder_klass, name, sig, accessor_klass, LinkInfo::needs_access_check, tag);
   switch (bc) {
   case Bytecodes::_invokestatic:
@@ -782,7 +782,7 @@
   default: ShouldNotReachHere();
   }
 
-  return dest_method();
+  return dest_method;
 }
 
 
--- a/src/hotspot/share/ci/ciExceptionHandler.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/share/ci/ciExceptionHandler.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,7 +41,7 @@
   if (_catch_klass == NULL) {
     bool will_link;
     assert(_loading_klass->get_instanceKlass()->is_linked(), "must be linked before accessing constant pool");
-    constantPoolHandle cpool(_loading_klass->get_instanceKlass()->constants());
+    constantPoolHandle cpool(THREAD, _loading_klass->get_instanceKlass()->constants());
     ciKlass* k = CURRENT_ENV->get_klass_by_index(cpool,
                                                  _catch_klass_index,
                                                  will_link,
--- a/src/hotspot/share/ci/ciField.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/share/ci/ciField.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -391,7 +391,7 @@
 
   LinkInfo link_info(_holder->get_instanceKlass(),
                      _name->get_symbol(), _signature->get_symbol(),
-                     accessing_method->get_Method());
+                     methodHandle(THREAD, accessing_method->get_Method()));
   fieldDescriptor result;
   LinkResolver::resolve_field(result, link_info, bc, false, KILL_COMPILE_ON_FATAL_(false));
 
--- a/src/hotspot/share/ci/ciInstanceKlass.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/share/ci/ciInstanceKlass.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -32,7 +32,7 @@
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
-#include "oops/fieldStreams.hpp"
+#include "oops/fieldStreams.inline.hpp"
 #include "runtime/fieldDescriptor.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/jniHandles.inline.hpp"
--- a/src/hotspot/share/ci/ciMethod.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/share/ci/ciMethod.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -72,25 +72,25 @@
   assert(h_m() != NULL, "no null method");
 
   if (LogTouchedMethods) {
-    h_m()->log_touched(Thread::current());
+    h_m->log_touched(Thread::current());
   }
   // These fields are always filled in in loaded methods.
-  _flags = ciFlags(h_m()->access_flags());
+  _flags = ciFlags(h_m->access_flags());
 
   // Easy to compute, so fill them in now.
-  _max_stack          = h_m()->max_stack();
-  _max_locals         = h_m()->max_locals();
-  _code_size          = h_m()->code_size();
-  _intrinsic_id       = h_m()->intrinsic_id();
-  _handler_count      = h_m()->exception_table_length();
-  _size_of_parameters = h_m()->size_of_parameters();
-  _uses_monitors      = h_m()->access_flags().has_monitor_bytecodes();
-  _balanced_monitors  = !_uses_monitors || h_m()->access_flags().is_monitor_matching();
-  _is_c1_compilable   = !h_m()->is_not_c1_compilable();
-  _is_c2_compilable   = !h_m()->is_not_c2_compilable();
+  _max_stack          = h_m->max_stack();
+  _max_locals         = h_m->max_locals();
+  _code_size          = h_m->code_size();
+  _intrinsic_id       = h_m->intrinsic_id();
+  _handler_count      = h_m->exception_table_length();
+  _size_of_parameters = h_m->size_of_parameters();
+  _uses_monitors      = h_m->access_flags().has_monitor_bytecodes();
+  _balanced_monitors  = !_uses_monitors || h_m->access_flags().is_monitor_matching();
+  _is_c1_compilable   = !h_m->is_not_c1_compilable();
+  _is_c2_compilable   = !h_m->is_not_c2_compilable();
   _can_be_parsed      = true;
-  _has_reserved_stack_access = h_m()->has_reserved_stack_access();
-  _is_overpass        = h_m()->is_overpass();
+  _has_reserved_stack_access = h_m->has_reserved_stack_access();
+  _is_overpass        = h_m->is_overpass();
   // Lazy fields, filled in on demand.  Require allocation.
   _code               = NULL;
   _exception_handlers = NULL;
@@ -114,8 +114,8 @@
     DEBUG_ONLY(CompilerThread::current()->check_possible_safepoint());
   }
 
-  if (h_m()->method_holder()->is_linked()) {
-    _can_be_statically_bound = h_m()->can_be_statically_bound();
+  if (h_m->method_holder()->is_linked()) {
+    _can_be_statically_bound = h_m->can_be_statically_bound();
   } else {
     // Have to use a conservative value in this case.
     _can_be_statically_bound = false;
@@ -123,25 +123,25 @@
 
   // Adjust the definition of this condition to be more useful:
   // %%% take these conditions into account in vtable generation
-  if (!_can_be_statically_bound && h_m()->is_private())
+  if (!_can_be_statically_bound && h_m->is_private())
     _can_be_statically_bound = true;
-  if (_can_be_statically_bound && h_m()->is_abstract())
+  if (_can_be_statically_bound && h_m->is_abstract())
     _can_be_statically_bound = false;
 
   // generating _signature may allow GC and therefore move m.
   // These fields are always filled in.
-  _name = env->get_symbol(h_m()->name());
-  ciSymbol* sig_symbol = env->get_symbol(h_m()->signature());
-  constantPoolHandle cpool = h_m()->constants();
+  _name = env->get_symbol(h_m->name());
+  ciSymbol* sig_symbol = env->get_symbol(h_m->signature());
+  constantPoolHandle cpool(Thread::current(), h_m->constants());
   _signature = new (env->arena()) ciSignature(_holder, cpool, sig_symbol);
   _method_data = NULL;
-  _nmethod_age = h_m()->nmethod_age();
+  _nmethod_age = h_m->nmethod_age();
   // Take a snapshot of these values, so they will be commensurate with the MDO.
   if (ProfileInterpreter || TieredCompilation) {
-    int invcnt = h_m()->interpreter_invocation_count();
+    int invcnt = h_m->interpreter_invocation_count();
     // if the value overflowed report it as max int
     _interpreter_invocation_count = invcnt < 0 ? max_jint : invcnt ;
-    _interpreter_throwout_count   = h_m()->interpreter_throwout_count();
+    _interpreter_throwout_count   = h_m->interpreter_throwout_count();
   } else {
     _interpreter_invocation_count = 0;
     _interpreter_throwout_count = 0;
@@ -431,7 +431,7 @@
 ResourceBitMap ciMethod::live_local_oops_at_bci(int bci) {
   VM_ENTRY_MARK;
   InterpreterOopMap mask;
-  OopMapCache::compute_one_oop_map(get_Method(), bci, &mask);
+  OopMapCache::compute_one_oop_map(methodHandle(THREAD, get_Method()), bci, &mask);
   int mask_size = max_locals();
   ResourceBitMap result(mask_size);
   int i;
@@ -749,8 +749,8 @@
   {
     MutexLocker locker(Compile_lock);
     Klass* context = actual_recv->get_Klass();
-    target = Dependencies::find_unique_concrete_method(context,
-                                                       root_m->get_Method());
+    target = methodHandle(THREAD, Dependencies::find_unique_concrete_method(context,
+                                                       root_m->get_Method()));
     // %%% Should upgrade this ciMethod API to look for 1 or 2 concrete methods.
   }
 
@@ -810,7 +810,7 @@
 
    LinkInfo link_info(resolved, h_name, h_signature, caller_klass,
                       check_access ? LinkInfo::needs_access_check : LinkInfo::skip_access_check);
-   methodHandle m;
+   Method* m = NULL;
    // Only do exact lookup if receiver klass has been linked.  Otherwise,
    // the vtable has not been setup, and the LinkResolver will fail.
    if (recv->is_array_klass()
@@ -823,14 +823,14 @@
      }
    }
 
-   if (m.is_null()) {
+   if (m == NULL) {
      // Return NULL only if there was a problem with lookup (uninitialized class, etc.)
      return NULL;
    }
 
    ciMethod* result = this;
-   if (m() != get_Method()) {
-     result = CURRENT_THREAD_ENV->get_method(m());
+   if (m != get_Method()) {
+     result = CURRENT_THREAD_ENV->get_method(m);
    }
 
    // Don't return abstract methods because they aren't
@@ -1035,7 +1035,8 @@
   bool result = true;
   if (_method_data == NULL || _method_data->is_empty()) {
     GUARDED_VM_ENTRY({
-      result = ensure_method_data(get_Method());
+      methodHandle mh(Thread::current(), get_Method());
+      result = ensure_method_data(mh);
     });
   }
   return result;
@@ -1268,7 +1269,7 @@
     HandleMark hm(THREAD);
     constantPoolHandle pool (THREAD, get_Method()->constants());
     Bytecodes::Code code = (is_static ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual);
-    methodHandle spec_method = LinkResolver::resolve_method_statically(code, pool, refinfo_index, THREAD);
+    Method* spec_method = LinkResolver::resolve_method_statically(code, pool, refinfo_index, THREAD);
     if (HAS_PENDING_EXCEPTION) {
       CLEAR_PENDING_EXCEPTION;
       return false;
--- a/src/hotspot/share/ci/ciReplay.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/share/ci/ciReplay.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -597,7 +597,7 @@
       nm->make_not_entrant();
     }
     replay_state = this;
-    CompileBroker::compile_method(method, entry_bci, comp_level,
+    CompileBroker::compile_method(methodHandle(THREAD, method), entry_bci, comp_level,
                                   methodHandle(), 0, CompileTask::Reason_Replay, THREAD);
     replay_state = NULL;
     reset();
@@ -634,7 +634,7 @@
       MutexLocker ml(MethodData_lock, THREAD);
       if (method->method_data() == NULL) {
         ClassLoaderData* loader_data = method->method_holder()->class_loader_data();
-        MethodData* method_data = MethodData::allocate(loader_data, method, CHECK);
+        MethodData* method_data = MethodData::allocate(loader_data, methodHandle(THREAD, method), CHECK);
         method->set_method_data(method_data);
       }
     }
--- a/src/hotspot/share/ci/ciStreams.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/share/ci/ciStreams.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -186,7 +186,7 @@
 // or checkcast, get the referenced klass.
 ciKlass* ciBytecodeStream::get_klass(bool& will_link) {
   VM_ENTRY_MARK;
-  constantPoolHandle cpool(_method->get_Method()->constants());
+  constantPoolHandle cpool(THREAD, _method->get_Method()->constants());
   return CURRENT_ENV->get_klass_by_index(cpool, get_klass_index(), will_link, _holder);
 }
 
@@ -217,7 +217,7 @@
   int index = get_constant_raw_index();
   if (has_cache_index()) {
     VM_ENTRY_MARK;
-    constantPoolHandle cpool(_method->get_Method()->constants());
+    constantPoolHandle cpool(THREAD, _method->get_Method()->constants());
     return cpool->object_to_cp_index(index);
   }
   return index;
@@ -236,7 +236,7 @@
     pool_index = -1;
   }
   VM_ENTRY_MARK;
-  constantPoolHandle cpool(_method->get_Method()->constants());
+  constantPoolHandle cpool(THREAD, _method->get_Method()->constants());
   return CURRENT_ENV->get_constant_by_index(cpool, pool_index, cache_index, _holder);
 }
 
@@ -289,7 +289,7 @@
 // for checking linkability when retrieving the associated field.
 ciInstanceKlass* ciBytecodeStream::get_declared_field_holder() {
   VM_ENTRY_MARK;
-  constantPoolHandle cpool(_method->get_Method()->constants());
+  constantPoolHandle cpool(THREAD, _method->get_Method()->constants());
   int holder_index = get_field_holder_index();
   bool ignore;
   return CURRENT_ENV->get_klass_by_index(cpool, holder_index, ignore, _holder)
@@ -431,7 +431,7 @@
 // constant pool cache at the current bci.
 bool ciBytecodeStream::has_appendix() {
   VM_ENTRY_MARK;
-  constantPoolHandle cpool(_method->get_Method()->constants());
+  constantPoolHandle cpool(THREAD, _method->get_Method()->constants());
   return ConstantPool::has_appendix_at_if_loaded(cpool, get_method_index());
 }
 
@@ -442,7 +442,7 @@
 // the current bci.
 ciObject* ciBytecodeStream::get_appendix() {
   VM_ENTRY_MARK;
-  constantPoolHandle cpool(_method->get_Method()->constants());
+  constantPoolHandle cpool(THREAD, _method->get_Method()->constants());
   oop appendix_oop = ConstantPool::appendix_at_if_loaded(cpool, get_method_index());
   return CURRENT_ENV->get_object(appendix_oop);
 }
@@ -454,7 +454,7 @@
 // pool cache at the current bci has a local signature.
 bool ciBytecodeStream::has_local_signature() {
   GUARDED_VM_ENTRY(
-    constantPoolHandle cpool(_method->get_Method()->constants());
+    constantPoolHandle cpool(Thread::current(), _method->get_Method()->constants());
     return ConstantPool::has_local_signature_at_if_loaded(cpool, get_method_index());
   )
 }
@@ -472,7 +472,7 @@
 // for checking linkability when retrieving the associated method.
 ciKlass* ciBytecodeStream::get_declared_method_holder() {
   VM_ENTRY_MARK;
-  constantPoolHandle cpool(_method->get_Method()->constants());
+  constantPoolHandle cpool(THREAD, _method->get_Method()->constants());
   bool ignore;
   // report as MethodHandle for invokedynamic, which is syntactically classless
   if (cur_bc() == Bytecodes::_invokedynamic)
--- a/src/hotspot/share/classfile/bytecodeAssembler.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/share/classfile/bytecodeAssembler.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -54,7 +54,8 @@
       _orig->length() + _entries.length(), CHECK_NULL);
 
   cp->set_pool_holder(_orig->pool_holder());
-  _orig->copy_cp_to(1, _orig->length() - 1, cp, 1, CHECK_NULL);
+  constantPoolHandle cp_h(THREAD, cp);
+  _orig->copy_cp_to(1, _orig->length() - 1, cp_h, 1, CHECK_NULL);
 
   // Preserve dynamic constant information from the original pool
   if (_orig->has_dynamic_constant()) {
--- a/src/hotspot/share/classfile/classFileParser.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/share/classfile/classFileParser.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -47,7 +47,7 @@
 #include "memory/universe.hpp"
 #include "oops/annotations.hpp"
 #include "oops/constantPool.inline.hpp"
-#include "oops/fieldStreams.hpp"
+#include "oops/fieldStreams.inline.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/instanceMirrorKlass.hpp"
 #include "oops/klass.inline.hpp"
@@ -332,7 +332,7 @@
           hashValues[names_count++] = hash;
           if (names_count == SymbolTable::symbol_alloc_batch_size) {
             SymbolTable::new_symbols(_loader_data,
-                                     cp,
+                                     constantPoolHandle(THREAD, cp),
                                      names_count,
                                      names,
                                      lengths,
@@ -369,7 +369,7 @@
   // Allocate the remaining symbols
   if (names_count > 0) {
     SymbolTable::new_symbols(_loader_data,
-                             cp,
+                             constantPoolHandle(THREAD, cp),
                              names_count,
                              names,
                              lengths,
@@ -2870,7 +2870,7 @@
   }
 
   if (parsed_annotations.has_any_annotations())
-    parsed_annotations.apply_to(m);
+    parsed_annotations.apply_to(methodHandle(THREAD, m));
 
   // Copy annotations
   copy_method_annotations(m->constMethod(),
@@ -3753,7 +3753,7 @@
 #ifndef PRODUCT
 static void print_field_layout(const Symbol* name,
                                Array<u2>* fields,
-                               const constantPoolHandle& cp,
+                               ConstantPool* cp,
                                int instance_size,
                                int instance_fields_start,
                                int instance_fields_end,
--- a/src/hotspot/share/classfile/defaultMethods.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/share/classfile/defaultMethods.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -47,38 +47,6 @@
 
 typedef enum { QUALIFIED, DISQUALIFIED } QualifiedState;
 
-// Because we use an iterative algorithm when iterating over the type
-// hierarchy, we can't use traditional scoped objects which automatically do
-// cleanup in the destructor when the scope is exited.  PseudoScope (and
-// PseudoScopeMark) provides a similar functionality, but for when you want a
-// scoped object in non-stack memory (such as in resource memory, as we do
-// here).  You've just got to remember to call 'destroy()' on the scope when
-// leaving it (and marks have to be explicitly added).
-class PseudoScopeMark : public ResourceObj {
- public:
-  virtual void destroy() = 0;
-};
-
-class PseudoScope : public ResourceObj {
- private:
-  GrowableArray<PseudoScopeMark*> _marks;
- public:
-
-  static PseudoScope* cast(void* data) {
-    return static_cast<PseudoScope*>(data);
-  }
-
-  void add_mark(PseudoScopeMark* psm) {
-   _marks.append(psm);
-  }
-
-  void destroy() {
-    for (int i = 0; i < _marks.length(); ++i) {
-      _marks.at(i)->destroy();
-    }
-  }
-};
-
 static void print_slot(outputStream* str, Symbol* name, Symbol* signature) {
   str->print("%s%s", name->as_C_string(), signature->as_C_string());
 }
@@ -108,13 +76,13 @@
  *
  * The ALGO class, must provide a visit() method, which each of which will be
  * called once for each node in the inheritance tree during the iteration.  In
- * addition, it can provide a memory block via new_node_data(InstanceKlass*),
- * which it can use for node-specific storage (and access via the
- * current_data() and data_at_depth(int) methods).
+ * addition, it can provide a memory block via new_node_data(), which it can
+ * use for node-specific storage (and access via the current_data() and
+ * data_at_depth(int) methods).
  *
  * Bare minimum needed to be an ALGO class:
  * class Algo : public HierarchyVisitor<Algo> {
- *   void* new_node_data(InstanceKlass* cls) { return NULL; }
+ *   void* new_node_data() { return NULL; }
  *   void free_node_data(void* data) { return; }
  *   bool visit() { return true; }
  * };
@@ -134,6 +102,12 @@
         : _class(cls), _super_was_visited(!visit_super),
           _interface_index(0), _algorithm_data(data) {}
 
+    void update(InstanceKlass* cls, void* data, bool visit_super) {
+      _class = cls;
+      _super_was_visited = !visit_super;
+      _interface_index = 0;
+      _algorithm_data = data;
+    }
     int number_of_interfaces() { return _class->local_interfaces()->length(); }
     int interface_index() { return _interface_index; }
     void set_super_visited() { _super_was_visited = true; }
@@ -155,19 +129,32 @@
   };
 
   bool _visited_Object;
+
   GrowableArray<Node*> _path;
+  GrowableArray<Node*> _free_nodes;
 
   Node* current_top() const { return _path.top(); }
-  bool has_more_nodes() const { return !_path.is_empty(); }
-  void push(InstanceKlass* cls, void* data) {
+  bool has_more_nodes() const { return _path.length() > 0; }
+  void push(InstanceKlass* cls, ALGO* algo) {
     assert(cls != NULL, "Requires a valid instance class");
-    Node* node = new Node(cls, data, has_super(cls));
     if (cls == SystemDictionary::Object_klass()) {
       _visited_Object = true;
     }
+    void* data = algo->new_node_data();
+    Node* node;
+    if (_free_nodes.is_empty()) { // Add a new node
+      node = new Node(cls, data, has_super(cls));
+    } else { // Reuse existing node and data
+      node = _free_nodes.pop();
+      node->update(cls, data, has_super(cls));
+    }
     _path.push(node);
   }
-  void pop() { _path.pop(); }
+  void pop() {
+    Node* node = _path.pop();
+    // Make the node available for reuse
+    _free_nodes.push(node);
+  }
 
   // Since the starting point can be an interface, we must ensure we catch
   // j.l.Object as the super once in those cases. The _visited_Object flag
@@ -183,6 +170,11 @@
 
  protected:
 
+  // Resets the visitor
+  void reset() {
+    _visited_Object = false;
+  }
+
   // Accessors available to the algorithm
   int current_depth() const { return _path.length() - 1; }
 
@@ -199,14 +191,13 @@
   void* current_data() { return data_at_depth(0); }
 
  public:
+  HierarchyVisitor() : _visited_Object(false), _path() {}
 
   void run(InstanceKlass* root) {
     ALGO* algo = static_cast<ALGO*>(this);
 
-    void* algo_data = algo->new_node_data(root);
-    push(root, algo_data);
+    push(root, algo);
     bool top_needs_visit = true;
-
     do {
       Node* top = current_top();
       if (top_needs_visit) {
@@ -232,8 +223,7 @@
           top->increment_visited_interface();
         }
         assert(next != NULL, "Otherwise we shouldn't be here");
-        algo_data = algo->new_node_data(next);
-        push(next, algo_data);
+        push(next, algo);
         top_needs_visit = true;
       }
     } while (has_more_nodes());
@@ -251,7 +241,7 @@
     return true;
   }
 
-  void* new_node_data(InstanceKlass* cls) { return NULL; }
+  void* new_node_data() { return NULL; }
   void free_node_data(void* data) { return; }
 
   PrintHierarchy(outputStream* st = tty) : _st(st) {}
@@ -270,7 +260,7 @@
   GrowableArray<ConstantPool*> _keep_alive;
 
  public:
-  KeepAliveRegistrar(Thread* thread) : _thread(thread), _keep_alive(20) {
+  KeepAliveRegistrar(Thread* thread) : _thread(thread), _keep_alive(6) {
     assert(thread == Thread::current(), "Must be current thread");
   }
 
@@ -299,7 +289,7 @@
  public:
   KeepAliveVisitor(KeepAliveRegistrar* registrar) : _registrar(registrar) {}
 
-  void* new_node_data(InstanceKlass* cls) { return NULL; }
+  void* new_node_data() { return NULL; }
   void free_node_data(void* data) { return; }
 
   bool visit() {
@@ -316,36 +306,41 @@
 // from the root of hierarchy to the method that contains an interleaving
 // erased method defined in an interface.
 
+class MethodState {
+ public:
+  Method* _method;
+  QualifiedState _state;
+
+  MethodState() : _method(NULL), _state(DISQUALIFIED) {}
+  MethodState(Method* method, QualifiedState state) : _method(method), _state(state) {}
+};
+
 class MethodFamily : public ResourceObj {
  private:
 
-  GrowableArray<Pair<Method*,QualifiedState> > _members;
-  ResourceHashtable<Method*, int> _member_index;
+  GrowableArray<MethodState> _members;
 
   Method* _selected_target;  // Filled in later, if a unique target exists
   Symbol* _exception_message; // If no unique target is found
   Symbol* _exception_name;    // If no unique target is found
 
-  bool contains_method(Method* method) {
-    int* lookup = _member_index.get(method);
-    return lookup != NULL;
+  MethodState* find_method(Method* method) {
+    for (int i = 0; i < _members.length(); i++) {
+      if (_members.at(i)._method == method) {
+        return &_members.at(i);
+      }
+    }
+    return NULL;
   }
 
   void add_method(Method* method, QualifiedState state) {
-    Pair<Method*,QualifiedState> entry(method, state);
-    _member_index.put(method, _members.length());
-    _members.append(entry);
-  }
-
-  void disqualify_method(Method* method) {
-    int* index = _member_index.get(method);
-    guarantee(index != NULL && *index >= 0 && *index < _members.length(), "bad index");
-    _members.at(*index).second = DISQUALIFIED;
+    MethodState method_state(method, state);
+    _members.append(method_state);
   }
 
   Symbol* generate_no_defaults_message(TRAPS) const;
   Symbol* generate_method_message(Symbol *klass_name, Method* method, TRAPS) const;
-  Symbol* generate_conflicts_message(GrowableArray<Method*>* methods, TRAPS) const;
+  Symbol* generate_conflicts_message(GrowableArray<MethodState>* methods, TRAPS) const;
 
  public:
 
@@ -358,23 +353,15 @@
     }
   }
 
-  void record_qualified_method(Method* m) {
-    // If the method already exists in the set as qualified, this operation is
-    // redundant.  If it already exists as disqualified, then we leave it as
-    // disqualfied.  Thus we only add to the set if it's not already in the
-    // set.
-    if (!contains_method(m)) {
-      add_method(m, QUALIFIED);
-    }
-  }
-
-  void record_disqualified_method(Method* m) {
-    // If not in the set, add it as disqualified.  If it's already in the set,
-    // then set the state to disqualified no matter what the previous state was.
-    if (!contains_method(m)) {
-      add_method(m, DISQUALIFIED);
-    } else {
-      disqualify_method(m);
+  void record_method(Method* m, QualifiedState state) {
+    // If not in the set, add it.  If it's already in the set, then leave it
+    // as is if state is qualified, or set it to disqualified if state is
+    // disqualified.
+    MethodState* method_state = find_method(m);
+    if (method_state == NULL) {
+      add_method(m, state);
+    } else if (state == DISQUALIFIED) {
+      method_state->_state = DISQUALIFIED;
     }
   }
 
@@ -386,30 +373,43 @@
   Symbol* get_exception_name() { return _exception_name; }
 
   // Either sets the target or the exception error message
-  void determine_target(InstanceKlass* root, TRAPS) {
+  void determine_target_or_set_exception_message(InstanceKlass* root, TRAPS) {
     if (has_target() || throws_exception()) {
       return;
     }
 
     // Qualified methods are maximally-specific methods
     // These include public, instance concrete (=default) and abstract methods
-    GrowableArray<Method*> qualified_methods;
     int num_defaults = 0;
     int default_index = -1;
-    int qualified_index = -1;
-    for (int i = 0; i < _members.length(); ++i) {
-      Pair<Method*,QualifiedState> entry = _members.at(i);
-      if (entry.second == QUALIFIED) {
-        qualified_methods.append(entry.first);
-        qualified_index++;
-        if (entry.first->is_default_method()) {
+    for (int i = 0; i < _members.length(); i++) {
+      MethodState &member = _members.at(i);
+      if (member._state == QUALIFIED) {
+        if (member._method->is_default_method()) {
           num_defaults++;
-          default_index = qualified_index;
-
+          default_index = i;
         }
       }
     }
 
+    if (num_defaults == 1) {
+      assert(_members.at(default_index)._state == QUALIFIED, "");
+      _selected_target = _members.at(default_index)._method;
+    } else {
+      generate_and_set_exception_message(root, num_defaults, default_index, CHECK);
+    }
+  }
+
+  void generate_and_set_exception_message(InstanceKlass* root, int num_defaults, int default_index, TRAPS) {
+    assert(num_defaults != 1, "invariant - should've been handled calling method");
+
+    GrowableArray<Method*> qualified_methods;
+    for (int i = 0; i < _members.length(); i++) {
+      MethodState& member = _members.at(i);
+      if (member._state == QUALIFIED) {
+        qualified_methods.push(member._method);
+      }
+    }
     if (num_defaults == 0) {
       // If the root klass has a static method with matching name and signature
       // then do not generate an overpass method because it will hide the
@@ -421,13 +421,8 @@
         _exception_message = generate_method_message(root->name(), qualified_methods.at(0), CHECK);
       }
       _exception_name = vmSymbols::java_lang_AbstractMethodError();
-
-    // If only one qualified method is default, select that
-    } else if (num_defaults == 1) {
-        _selected_target = qualified_methods.at(default_index);
-
-    } else if (num_defaults > 1) {
-      _exception_message = generate_conflicts_message(&qualified_methods,CHECK);
+    } else {
+      _exception_message = generate_conflicts_message(&_members,CHECK);
       _exception_name = vmSymbols::java_lang_IncompatibleClassChangeError();
       LogTarget(Debug, defaultmethods) lt;
       if (lt.is_enabled()) {
@@ -475,23 +470,23 @@
   return SymbolTable::new_symbol(ss.base(), (int)ss.size());
 }
 
-Symbol* MethodFamily::generate_conflicts_message(GrowableArray<Method*>* methods, TRAPS) const {
+Symbol* MethodFamily::generate_conflicts_message(GrowableArray<MethodState>* methods, TRAPS) const {
   stringStream ss;
   ss.print("Conflicting default methods:");
   for (int i = 0; i < methods->length(); ++i) {
-    Method* method = methods->at(i);
-    Symbol* klass = method->klass_name();
-    Symbol* name = method->name();
+    Method *method = methods->at(i)._method;
+    Symbol *klass = method->klass_name();
+    Symbol *name = method->name();
     ss.print(" ");
-    ss.write((const char*)klass->bytes(), klass->utf8_length());
+    ss.write((const char*) klass->bytes(), klass->utf8_length());
     ss.print(".");
-    ss.write((const char*)name->bytes(), name->utf8_length());
+    ss.write((const char*) name->bytes(), name->utf8_length());
   }
   return SymbolTable::new_symbol(ss.base(), (int)ss.size());
 }
 
 
-class StateRestorer;
+class StateRestorerScope;
 
 // StatefulMethodFamily is a wrapper around a MethodFamily that maintains the
 // qualification state during hierarchy visitation, and applies that state
@@ -517,32 +512,72 @@
 
   MethodFamily* get_method_family() { return &_method_family; }
 
-  StateRestorer* record_method_and_dq_further(Method* mo);
+  void record_method_and_dq_further(StateRestorerScope* scope, Method* mo);
 };
 
-class StateRestorer : public PseudoScopeMark {
- private:
+// Because we use an iterative algorithm when iterating over the type
+// hierarchy, we can't use traditional scoped objects which automatically do
+// cleanup in the destructor when the scope is exited.  StateRestorerScope (and
+// StateRestorer) provides a similar functionality, but for when you want a
+// scoped object in non-stack memory (such as in resource memory, as we do
+// here).  You've just got to remember to call 'restore_state()' on the scope when
+// leaving it (and marks have to be explicitly added). The scope is reusable after
+// 'restore_state()' has been called.
+class StateRestorer : public ResourceObj {
+ public:
   StatefulMethodFamily* _method;
   QualifiedState _state_to_restore;
- public:
-  StateRestorer(StatefulMethodFamily* dm, QualifiedState state)
-      : _method(dm), _state_to_restore(state) {}
-  ~StateRestorer() { destroy(); }
+
+  StateRestorer() : _method(NULL), _state_to_restore(DISQUALIFIED) {}
+
   void restore_state() { _method->set_qualification_state(_state_to_restore); }
-  virtual void destroy() { restore_state(); }
 };
 
-StateRestorer* StatefulMethodFamily::record_method_and_dq_further(Method* mo) {
-  StateRestorer* mark = new StateRestorer(this, _qualification_state);
-  if (_qualification_state == QUALIFIED) {
-    _method_family.record_qualified_method(mo);
-  } else {
-    _method_family.record_disqualified_method(mo);
+class StateRestorerScope : public ResourceObj {
+ private:
+  GrowableArray<StateRestorer*>  _marks;
+  GrowableArray<StateRestorer*>* _free_list; // Shared between scopes
+ public:
+  StateRestorerScope(GrowableArray<StateRestorer*>* free_list) : _marks(), _free_list(free_list) {}
+
+  static StateRestorerScope* cast(void* data) {
+    return static_cast<StateRestorerScope*>(data);
   }
+
+  void mark(StatefulMethodFamily* family, QualifiedState qualification_state) {
+    StateRestorer* restorer;
+    if (!_free_list->is_empty()) {
+      restorer = _free_list->pop();
+    } else {
+      restorer = new StateRestorer();
+    }
+    restorer->_method = family;
+    restorer->_state_to_restore = qualification_state;
+    _marks.append(restorer);
+  }
+
+#ifdef ASSERT
+  bool is_empty() {
+    return _marks.is_empty();
+  }
+#endif
+
+  void restore_state() {
+    while(!_marks.is_empty()) {
+      StateRestorer* restorer = _marks.pop();
+      restorer->restore_state();
+      _free_list->push(restorer);
+    }
+  }
+};
+
+void StatefulMethodFamily::record_method_and_dq_further(StateRestorerScope* scope, Method* mo) {
+  scope->mark(this, _qualification_state);
+  _method_family.record_method(mo, _qualification_state);
+
   // Everything found "above"??? this method in the hierarchy walk is set to
   // disqualified
   set_qualification_state(DISQUALIFIED);
-  return mark;
 }
 
 // Represents a location corresponding to a vtable slot for methods that
@@ -660,11 +695,19 @@
   Symbol* _method_signature;
   StatefulMethodFamily*  _family;
   bool _cur_class_is_interface;
+  // Free lists, used as an optimization
+  GrowableArray<StateRestorerScope*> _free_scopes;
+  GrowableArray<StateRestorer*> _free_restorers;
+ public:
+  FindMethodsByErasedSig() : _free_scopes(6), _free_restorers(6) {};
 
- public:
-  FindMethodsByErasedSig(Symbol* name, Symbol* signature, bool is_interf) :
-      _method_name(name), _method_signature(signature), _family(NULL),
-      _cur_class_is_interface(is_interf) {}
+  void prepare(Symbol* name, Symbol* signature, bool is_interf) {
+    reset();
+    _method_name = name;
+    _method_signature = signature;
+    _family = NULL;
+    _cur_class_is_interface = is_interf;
+  }
 
   void get_discovered_family(MethodFamily** family) {
       if (_family != NULL) {
@@ -674,15 +717,25 @@
       }
   }
 
-  void* new_node_data(InstanceKlass* cls) { return new PseudoScope(); }
+  void* new_node_data() {
+    if (!_free_scopes.is_empty()) {
+      StateRestorerScope* free_scope = _free_scopes.pop();
+      assert(free_scope->is_empty(), "StateRestorerScope::_marks array not empty");
+      return free_scope;
+    }
+    return new StateRestorerScope(&_free_restorers);
+  }
   void free_node_data(void* node_data) {
-    PseudoScope::cast(node_data)->destroy();
+    StateRestorerScope* scope =  StateRestorerScope::cast(node_data);
+    scope->restore_state();
+    // Reuse scopes
+    _free_scopes.push(scope);
   }
 
   // Find all methods on this hierarchy that match this
   // method's erased (name, signature)
   bool visit() {
-    PseudoScope* scope = PseudoScope::cast(current_data());
+    StateRestorerScope* scope = StateRestorerScope::cast(current_data());
     InstanceKlass* iklass = current_class();
 
     Method* m = iklass->find_method(_method_name, _method_signature);
@@ -702,8 +755,7 @@
       }
 
       if (iklass->is_interface()) {
-        StateRestorer* restorer = _family->record_method_and_dq_further(m);
-        scope->add_mark(restorer);
+        _family->record_method_and_dq_further(scope, m);
       } else {
         // This is the rule that methods in classes "win" (bad word) over
         // methods in interfaces. This works because of single inheritance.
@@ -724,16 +776,20 @@
     GrowableArray<EmptyVtableSlot*>* slots, InstanceKlass* klass, TRAPS);
 
 static void generate_erased_defaults(
-     InstanceKlass* klass, EmptyVtableSlot* slot, bool is_intf, TRAPS) {
+    FindMethodsByErasedSig* visitor,
+    InstanceKlass* klass, EmptyVtableSlot* slot, bool is_intf, TRAPS) {
 
+  // the visitor needs to be initialized or re-initialized before use
+  // - this facilitates reusing the same visitor instance on multiple
+  // generation passes as an optimization
+  visitor->prepare(slot->name(), slot->signature(), is_intf);
   // sets up a set of methods with the same exact erased signature
-  FindMethodsByErasedSig visitor(slot->name(), slot->signature(), is_intf);
-  visitor.run(klass);
+  visitor->run(klass);
 
   MethodFamily* family;
-  visitor.get_discovered_family(&family);
+  visitor->get_discovered_family(&family);
   if (family != NULL) {
-    family->determine_target(klass, CHECK);
+    family->determine_target_or_set_exception_message(klass, CHECK);
     slot->bind_family(family);
   }
 }
@@ -788,6 +844,7 @@
   find_empty_vtable_slots(&empty_slots, klass, mirandas, CHECK);
 
   if (empty_slots.length() > 0) {
+    FindMethodsByErasedSig findMethodsByErasedSig;
     for (int i = 0; i < empty_slots.length(); ++i) {
       EmptyVtableSlot* slot = empty_slots.at(i);
       LogTarget(Debug, defaultmethods) lt;
@@ -798,7 +855,7 @@
         slot->print_on(&ls);
         ls.cr();
       }
-      generate_erased_defaults(klass, slot, klass->is_interface(), CHECK);
+      generate_erased_defaults(&findMethodsByErasedSig, klass, slot, klass->is_interface(), CHECK);
     }
     log_debug(defaultmethods)("Creating defaults and overpasses...");
     create_defaults_and_exceptions(&empty_slots, klass, CHECK);
@@ -898,12 +955,12 @@
   GrowableArray<Method*> defaults;
   BytecodeConstantPool bpool(klass->constants());
 
+  BytecodeBuffer* buffer = NULL; // Lazily create a reusable buffer
   for (int i = 0; i < slots->length(); ++i) {
     EmptyVtableSlot* slot = slots->at(i);
 
     if (slot->is_bound()) {
       MethodFamily* method = slot->get_binding();
-      BytecodeBuffer buffer;
 
       LogTarget(Debug, defaultmethods) lt;
       if (lt.is_enabled()) {
@@ -926,11 +983,16 @@
           defaults.push(selected);
         }
       } else if (method->throws_exception()) {
-        int max_stack = assemble_method_error(&bpool, &buffer,
+        if (buffer == NULL) {
+          buffer = new BytecodeBuffer();
+        } else {
+          buffer->clear();
+        }
+        int max_stack = assemble_method_error(&bpool, buffer,
            method->get_exception_name(), method->get_exception_message(), CHECK);
         AccessFlags flags = accessFlags_from(
           JVM_ACC_PUBLIC | JVM_ACC_SYNTHETIC | JVM_ACC_BRIDGE);
-         Method* m = new_method(&bpool, &buffer, slot->name(), slot->signature(),
+        Method* m = new_method(&bpool, buffer, slot->name(), slot->signature(),
           flags, max_stack, slot->size_of_parameters(),
           ConstMethod::OVERPASS, CHECK);
         // We push to the methods list:
--- a/src/hotspot/share/classfile/javaClasses.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/share/classfile/javaClasses.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -42,7 +42,7 @@
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
-#include "oops/fieldStreams.hpp"
+#include "oops/fieldStreams.inline.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/instanceMirrorKlass.hpp"
 #include "oops/klass.hpp"
@@ -1077,7 +1077,7 @@
       Klass *ak = (Klass*)(archived_m->metadata_field(_array_klass_offset));
       assert(ak != NULL || t == T_VOID, "should not be NULL");
       if (ak != NULL) {
-        Klass *reloc_ak = MetaspaceShared::get_relocated_klass(ak);
+        Klass *reloc_ak = MetaspaceShared::get_relocated_klass(ak, true);
         archived_m->metadata_field_put(_array_klass_offset, reloc_ak);
       }
 
@@ -1222,7 +1222,7 @@
   // The archived mirror's field at _klass_offset is still pointing to the original
   // klass. Updated the field in the archived mirror to point to the relocated
   // klass in the archive.
-  Klass *reloc_k = MetaspaceShared::get_relocated_klass(as_Klass(mirror));
+  Klass *reloc_k = MetaspaceShared::get_relocated_klass(as_Klass(mirror), true);
   log_debug(cds, heap, mirror)(
     "Relocate mirror metadata field at _klass_offset from " PTR_FORMAT " ==> " PTR_FORMAT,
     p2i(as_Klass(mirror)), p2i(reloc_k));
@@ -1232,7 +1232,7 @@
   // higher array klass if exists. Relocate the pointer.
   Klass *arr = array_klass_acquire(mirror);
   if (arr != NULL) {
-    Klass *reloc_arr = MetaspaceShared::get_relocated_klass(arr);
+    Klass *reloc_arr = MetaspaceShared::get_relocated_klass(arr, true);
     log_debug(cds, heap, mirror)(
       "Relocate mirror metadata field at _array_klass_offset from " PTR_FORMAT " ==> " PTR_FORMAT,
       p2i(arr), p2i(reloc_arr));
@@ -1241,6 +1241,33 @@
   return archived_mirror;
 }
 
+void java_lang_Class::update_archived_primitive_mirror_native_pointers(oop archived_mirror) {
+  if (MetaspaceShared::relocation_delta() != 0) {
+    assert(archived_mirror->metadata_field(_klass_offset) == NULL, "must be for primitive class");
+
+    Klass* ak = ((Klass*)archived_mirror->metadata_field(_array_klass_offset));
+    if (ak != NULL) {
+      archived_mirror->metadata_field_put(_array_klass_offset,
+          (Klass*)(address(ak) + MetaspaceShared::relocation_delta()));
+    }
+  }
+}
+
+void java_lang_Class::update_archived_mirror_native_pointers(oop archived_mirror) {
+  if (MetaspaceShared::relocation_delta() != 0) {
+    Klass* k = ((Klass*)archived_mirror->metadata_field(_klass_offset));
+    archived_mirror->metadata_field_put(_klass_offset,
+        (Klass*)(address(k) + MetaspaceShared::relocation_delta()));
+
+    Klass* ak = ((Klass*)archived_mirror->metadata_field(_array_klass_offset));
+    if (ak != NULL) {
+      archived_mirror->metadata_field_put(_array_klass_offset,
+          (Klass*)(address(ak) + MetaspaceShared::relocation_delta()));
+    }
+  }
+}
+
+
 // Returns true if the mirror is updated, false if no archived mirror
 // data is present. After the archived mirror object is restored, the
 // shared klass' _has_raw_archived_mirror flag is cleared.
@@ -1256,15 +1283,15 @@
   }
 
   oop m = HeapShared::materialize_archived_object(k->archived_java_mirror_raw_narrow());
-
   if (m == NULL) {
     return false;
   }
 
+  // mirror is archived, restore
   log_debug(cds, mirror)("Archived mirror is: " PTR_FORMAT, p2i(m));
-
-  // mirror is archived, restore
   assert(HeapShared::is_archived_object(m), "must be archived mirror object");
+  update_archived_mirror_native_pointers(m);
+  assert(as_Klass(m) == k, "must be");
   Handle mirror(THREAD, m);
 
   if (!k->is_array_klass()) {
@@ -1681,17 +1708,20 @@
 }
 
 bool java_lang_Thread::interrupted(oop java_thread) {
-#if INCLUDE_JFR
-  if (java_thread == NULL) {
-    // can happen from Jfr::on_vm_init leading to call of JavaThread::sleep
-    assert(!is_init_completed(), "should only happen during init");
-    return false;
-  }
-#endif
+  // Make sure the caller can safely access oops.
+  assert(Thread::current()->is_VM_thread() ||
+         (JavaThread::current()->thread_state() != _thread_blocked &&
+          JavaThread::current()->thread_state() != _thread_in_native),
+         "Unsafe access to oop");
   return java_thread->bool_field_volatile(_interrupted_offset);
 }
 
 void java_lang_Thread::set_interrupted(oop java_thread, bool val) {
+  // Make sure the caller can safely access oops.
+  assert(Thread::current()->is_VM_thread() ||
+         (JavaThread::current()->thread_state() != _thread_blocked &&
+          JavaThread::current()->thread_state() != _thread_in_native),
+         "Unsafe access to oop");
   java_thread->bool_field_put_volatile(_interrupted_offset, val);
 }
 
@@ -2277,7 +2307,7 @@
   st->print_cr("%s", buf);
 }
 
-void java_lang_Throwable::print_stack_element(outputStream *st, const methodHandle& method, int bci) {
+void java_lang_Throwable::print_stack_element(outputStream *st, Method* method, int bci) {
   Handle mirror (Thread::current(),  method->method_holder()->java_mirror());
   int method_id = method->orig_method_idnum();
   int version = method->constants()->version();
@@ -2383,7 +2413,6 @@
   // trace as utilizing vframe.
 #ifdef ASSERT
   vframeStream st(thread);
-  methodHandle st_method(THREAD, st.method());
 #endif
   int total_count = 0;
   RegisterMap map(thread, false);
@@ -2433,14 +2462,9 @@
       }
     }
 #ifdef ASSERT
-    assert(st_method() == method && st.bci() == bci,
+    assert(st.method() == method && st.bci() == bci,
            "Wrong stack trace");
     st.next();
-    // vframeStream::method isn't GC-safe so store off a copy
-    // of the Method* in case we GC.
-    if (!st.at_end()) {
-      st_method = st.method();
-    }
 #endif
 
     // the format of the stacktrace will be:
@@ -2703,7 +2727,7 @@
     }
     java_lang_StackTraceElement::set_fileName(element(), source_file);
 
-    int line_number = Backtrace::get_line_number(method, bci);
+    int line_number = Backtrace::get_line_number(method(), bci);
     java_lang_StackTraceElement::set_lineNumber(element(), line_number);
   }
 }
@@ -2778,7 +2802,8 @@
   short version = stackFrame->short_field(_version_offset);
   int bci = stackFrame->int_field(_bci_offset);
   Symbol* name = method->name();
-  java_lang_StackTraceElement::fill_in(stack_trace_element, holder, method, version, bci, name, CHECK);
+  java_lang_StackTraceElement::fill_in(stack_trace_element, holder, methodHandle(THREAD, method),
+                                       version, bci, name, CHECK);
 }
 
 #define STACKFRAMEINFO_FIELDS_DO(macro) \
@@ -4661,6 +4686,28 @@
 }
 #endif
 
+#if INCLUDE_CDS_JAVA_HEAP
+bool JavaClasses::is_supported_for_archiving(oop obj) {
+  Klass* klass = obj->klass();
+
+  if (klass == SystemDictionary::ClassLoader_klass() ||  // ClassLoader::loader_data is malloc'ed.
+      klass == SystemDictionary::Module_klass() ||       // Module::module_entry is malloc'ed
+      // The next 3 classes are used to implement java.lang.invoke, and are not used directly in
+      // regular Java code. The implementation of java.lang.invoke uses generated anonymoys classes
+      // (e.g., as referenced by ResolvedMethodName::vmholder) that are not yet supported by CDS.
+      // So for now we cannot not support these classes for archiving.
+      //
+      // These objects typically are not referenced by static fields, but rather by resolved
+      // constant pool entries, so excluding them shouldn't affect the archiving of static fields.
+      klass == SystemDictionary::ResolvedMethodName_klass() ||
+      klass == SystemDictionary::MemberName_klass() ||
+      klass == SystemDictionary::Context_klass()) {
+    return false;
+  }
+
+  return true;
+}
+#endif
 
 #ifndef PRODUCT
 
--- a/src/hotspot/share/classfile/javaClasses.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/share/classfile/javaClasses.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -281,6 +281,8 @@
                             Handle protection_domain, TRAPS);
   static void fixup_mirror(Klass* k, TRAPS);
   static oop  create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS);
+  static void update_archived_primitive_mirror_native_pointers(oop archived_mirror) NOT_CDS_JAVA_HEAP_RETURN;
+  static void update_archived_mirror_native_pointers(oop archived_mirror) NOT_CDS_JAVA_HEAP_RETURN;
 
   // Archiving
   static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
@@ -561,7 +563,7 @@
   static oop message(oop throwable);
   static void set_message(oop throwable, oop value);
   static Symbol* detail_message(oop throwable);
-  static void print_stack_element(outputStream *st, const methodHandle& method, int bci);
+  static void print_stack_element(outputStream *st, Method* method, int bci);
   static void print_stack_usage(Handle stream);
 
   static void compute_offsets();
@@ -1402,7 +1404,7 @@
   static int version_at(unsigned int merged);
   static int mid_at(unsigned int merged);
   static int cpref_at(unsigned int merged);
-  static int get_line_number(const methodHandle& method, int bci);
+  static int get_line_number(Method* method, int bci);
   static Symbol* get_source_file_name(InstanceKlass* holder, int version);
 
   // Debugging
@@ -1662,6 +1664,7 @@
   static void check_offsets() PRODUCT_RETURN;
   static void serialize_offsets(SerializeClosure* soc) NOT_CDS_RETURN;
   static InjectedField* get_injected(Symbol* class_name, int* field_count);
+  static bool is_supported_for_archiving(oop obj) NOT_CDS_JAVA_HEAP_RETURN_(false);
 };
 
 #undef DECLARE_INJECTED_FIELD_ENUM
--- a/src/hotspot/share/classfile/javaClasses.inline.hpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/share/classfile/javaClasses.inline.hpp	Wed Nov 20 10:52:28 2019 +0530
@@ -264,7 +264,7 @@
   return extract_low_short_from_int(merged);
 }
 
-inline int Backtrace::get_line_number(const methodHandle& method, int bci) {
+inline int Backtrace::get_line_number(Method* method, int bci) {
   int line_number = 0;
   if (method->is_native()) {
     // Negative value different from -1 below, enabling Java code in
--- a/src/hotspot/share/classfile/systemDictionary.cpp	Wed Nov 20 09:28:31 2019 +0900
+++ b/src/hotspot/share/classfile/systemDictionary.cpp	Wed Nov 20 10:52:28 2019 +0530
@@ -2338,9 +2338,9 @@
 }
 
 
-methodHandle SystemDictionary::find_method_handle_intrinsic(vmIntrinsics::ID iid,
-                                                            Symbol* signature,
-                                                            TRAPS) {
+Method* SystemDictionary::find_method_handle_intrinsic(vmIntrinsics::ID iid,
+                                                       Symbol* signature,
+                                                       TRAPS) {
   methodHandle empty;
   assert(MethodHandles::is_signature_polymorphic(iid) &&
          MethodHandles::is_signature_polymorphic_intrinsic(iid) &&
@@ -2354,14 +2354,14 @@
   if (spe == NULL || spe->method() == NULL) {
     spe = NULL;
     // Must create lots of stuff here, but outside of the SystemDictionary lock.
-    m = Method::make_method_handle_intrinsic(iid, signature, CHECK_(empty));
+    m = Method::make_method_handle_intrinsic(iid, signature, CHECK_NULL);
     if (!Arguments::is_interpreter_only()) {
       // Generate a compiled form of the MH intrinsic.
       AdapterHandlerLibrary::create_native_wrapper(m);
       // Check if have the compiled code.
       if (!m->has_compiled_code()) {
-        THROW_MSG_(vmSymbols::java_lang_VirtualMachineError(),
-                   "Out of space in CodeCache for method handle intrinsic", empty);
+        THROW_MSG_NULL(vmSymbols::java_lang_VirtualMachineError(),
+                       "Out of space in CodeCache for method handle intrinsic");
       }
     }
     // Now grab the lock.  We might have to throw away the new method,
@@ -2384,12 +2384,11 @@
 }
 
 // Helper for unpacking the return value from linkMethod and linkCallSite.
-static methodHandle unpack_method_and_appendix(Handle mname,
-                                               Klass* accessing_klass,
-                                               objArrayHandle appendix_box,
-                                               Handle* appendix_result,
-                                               TRAPS) {
-  methodHandle empty;
+static Method* unpack_method_and_appendix(Handle mname,
+                                          Klass* accessing_klass,
+                                          objArrayHandle appendix_box,
+                                          Handle* appendix_result,
+                                          TRAPS) {
   if (mname.not_null()) {
     Method* m = java_lang_invoke_MemberName::vmtarget(mname());
     if (m != NULL) {
@@ -2407,35 +2406,34 @@
       // the target is stored in the cpCache and if a reference to this
       // MemberName is dropped we need a way to make sure the
       // class_loader containing this method is kept alive.
+      methodHandle mh(THREAD, m); // record_dependency can safepoint.
       ClassLoaderData* this_key = accessing_klass->class_loader_data();
       this_key->record_dependency(m->method_holder());
-      return methodHandle(THREAD, m);
+      return mh();
     }
   }
-  THROW_MSG_(vmSymbols::java_lang_LinkageError(), "bad value from MethodHandleNatives", empty);
-  return empty;
+  THROW_MSG_NULL(vmSymbols::java_lang_LinkageError(), "bad value from MethodHandleNatives");
 }
 
-methodHandle SystemDictionary::find_method_handle_invoker(Klass* klass,
-                                                          Symbol* name,
-                                                          Symbol* signature,
-                                                          Klass* accessing_klass,
-                                                          Handle *appendix_result,
-                                                          TRAPS) {
-  methodHandle empty;
+Method* SystemDictionary::find_method_handle_invoker(Klass* klass,
+                                                     Symbol* name,
+                                                     Symbol* signature,
+                                                     Klass* accessing_klass,
+                                                     Handle *appendix_result,
+                                                     TRAPS) {
   assert(THREAD->can_call_java() ,"");
   Handle method_type =
-    SystemDictionary::find_method_handle_type(signature, accessing_klass, CHECK_(empty));